lithium-codegen-s390.cc revision f91f0611dbaf29ca0f1d4aecb357ce243a19d2fa
1// Copyright 2014 the V8 project authors. All rights reserved. 2// 3// Use of this source code is governed by a BSD-style license that can be 4// found in the LICENSE file. 5 6#include "src/crankshaft/s390/lithium-codegen-s390.h" 7 8#include "src/base/bits.h" 9#include "src/code-factory.h" 10#include "src/code-stubs.h" 11#include "src/crankshaft/hydrogen-osr.h" 12#include "src/crankshaft/s390/lithium-gap-resolver-s390.h" 13#include "src/ic/ic.h" 14#include "src/ic/stub-cache.h" 15 16namespace v8 { 17namespace internal { 18 19class SafepointGenerator final : public CallWrapper { 20 public: 21 SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers, 22 Safepoint::DeoptMode mode) 23 : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {} 24 virtual ~SafepointGenerator() {} 25 26 void BeforeCall(int call_size) const override {} 27 28 void AfterCall() const override { 29 codegen_->RecordSafepoint(pointers_, deopt_mode_); 30 } 31 32 private: 33 LCodeGen* codegen_; 34 LPointerMap* pointers_; 35 Safepoint::DeoptMode deopt_mode_; 36}; 37 38#define __ masm()-> 39 40bool LCodeGen::GenerateCode() { 41 LPhase phase("Z_Code generation", chunk()); 42 DCHECK(is_unused()); 43 status_ = GENERATING; 44 45 // Open a frame scope to indicate that there is a frame on the stack. The 46 // NONE indicates that the scope shouldn't actually generate code to set up 47 // the frame (that is done in GeneratePrologue). 48 FrameScope frame_scope(masm_, StackFrame::NONE); 49 50 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && 51 GenerateJumpTable() && GenerateSafepointTable(); 52} 53 54void LCodeGen::FinishCode(Handle<Code> code) { 55 DCHECK(is_done()); 56 code->set_stack_slots(GetTotalFrameSlotCount()); 57 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 58 PopulateDeoptimizationData(code); 59} 60 61void LCodeGen::SaveCallerDoubles() { 62 DCHECK(info()->saves_caller_doubles()); 63 DCHECK(NeedsEagerFrame()); 64 Comment(";;; Save clobbered callee double registers"); 65 int count = 0; 66 BitVector* doubles = chunk()->allocated_double_registers(); 67 BitVector::Iterator save_iterator(doubles); 68 while (!save_iterator.Done()) { 69 __ std(DoubleRegister::from_code(save_iterator.Current()), 70 MemOperand(sp, count * kDoubleSize)); 71 save_iterator.Advance(); 72 count++; 73 } 74} 75 76void LCodeGen::RestoreCallerDoubles() { 77 DCHECK(info()->saves_caller_doubles()); 78 DCHECK(NeedsEagerFrame()); 79 Comment(";;; Restore clobbered callee double registers"); 80 BitVector* doubles = chunk()->allocated_double_registers(); 81 BitVector::Iterator save_iterator(doubles); 82 int count = 0; 83 while (!save_iterator.Done()) { 84 __ ld(DoubleRegister::from_code(save_iterator.Current()), 85 MemOperand(sp, count * kDoubleSize)); 86 save_iterator.Advance(); 87 count++; 88 } 89} 90 91bool LCodeGen::GeneratePrologue() { 92 DCHECK(is_generating()); 93 94 if (info()->IsOptimizing()) { 95 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 96 97 // r3: Callee's JS function. 98 // cp: Callee's context. 99 // fp: Caller's frame pointer. 100 // lr: Caller's pc. 101 // ip: Our own function entry (required by the prologue) 102 } 103 104 int prologue_offset = masm_->pc_offset(); 105 106 if (prologue_offset) { 107 // Prologue logic requires its starting address in ip and the 108 // corresponding offset from the function entry. Need to add 109 // 4 bytes for the size of AHI/AGHI that AddP expands into. 110 prologue_offset += sizeof(FourByteInstr); 111 __ AddP(ip, ip, Operand(prologue_offset)); 112 } 113 info()->set_prologue_offset(prologue_offset); 114 if (NeedsEagerFrame()) { 115 if (info()->IsStub()) { 116 __ StubPrologue(StackFrame::STUB, ip, prologue_offset); 117 } else { 118 __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset); 119 } 120 frame_is_built_ = true; 121 } 122 123 // Reserve space for the stack slots needed by the code. 124 int slots = GetStackSlotCount(); 125 if (slots > 0) { 126 __ lay(sp, MemOperand(sp, -(slots * kPointerSize))); 127 if (FLAG_debug_code) { 128 __ Push(r2, r3); 129 __ mov(r2, Operand(slots * kPointerSize)); 130 __ mov(r3, Operand(kSlotsZapValue)); 131 Label loop; 132 __ bind(&loop); 133 __ StoreP(r3, MemOperand(sp, r2, kPointerSize)); 134 __ lay(r2, MemOperand(r2, -kPointerSize)); 135 __ CmpP(r2, Operand::Zero()); 136 __ bne(&loop); 137 __ Pop(r2, r3); 138 } 139 } 140 141 if (info()->saves_caller_doubles()) { 142 SaveCallerDoubles(); 143 } 144 return !is_aborted(); 145} 146 147void LCodeGen::DoPrologue(LPrologue* instr) { 148 Comment(";;; Prologue begin"); 149 150 // Possibly allocate a local context. 151 if (info()->scope()->num_heap_slots() > 0) { 152 Comment(";;; Allocate local context"); 153 bool need_write_barrier = true; 154 // Argument to NewContext is the function, which is in r3. 155 int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 156 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt; 157 if (info()->scope()->is_script_scope()) { 158 __ push(r3); 159 __ Push(info()->scope()->GetScopeInfo(info()->isolate())); 160 __ CallRuntime(Runtime::kNewScriptContext); 161 deopt_mode = Safepoint::kLazyDeopt; 162 } else { 163 if (slots <= FastNewFunctionContextStub::kMaximumSlots) { 164 FastNewFunctionContextStub stub(isolate()); 165 __ mov(FastNewFunctionContextDescriptor::SlotsRegister(), 166 Operand(slots)); 167 __ CallStub(&stub); 168 // Result of FastNewFunctionContextStub is always in new space. 169 need_write_barrier = false; 170 } else { 171 __ push(r3); 172 __ CallRuntime(Runtime::kNewFunctionContext); 173 } 174 } 175 RecordSafepoint(deopt_mode); 176 177 // Context is returned in both r2 and cp. It replaces the context 178 // passed to us. It's saved in the stack and kept live in cp. 179 __ LoadRR(cp, r2); 180 __ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset)); 181 // Copy any necessary parameters into the context. 182 int num_parameters = info()->scope()->num_parameters(); 183 int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0; 184 for (int i = first_parameter; i < num_parameters; i++) { 185 Variable* var = (i == -1) ? info()->scope()->receiver() 186 : info()->scope()->parameter(i); 187 if (var->IsContextSlot()) { 188 int parameter_offset = StandardFrameConstants::kCallerSPOffset + 189 (num_parameters - 1 - i) * kPointerSize; 190 // Load parameter from stack. 191 __ LoadP(r2, MemOperand(fp, parameter_offset)); 192 // Store it in the context. 193 MemOperand target = ContextMemOperand(cp, var->index()); 194 __ StoreP(r2, target); 195 // Update the write barrier. This clobbers r5 and r2. 196 if (need_write_barrier) { 197 __ RecordWriteContextSlot(cp, target.offset(), r2, r5, 198 GetLinkRegisterState(), kSaveFPRegs); 199 } else if (FLAG_debug_code) { 200 Label done; 201 __ JumpIfInNewSpace(cp, r2, &done); 202 __ Abort(kExpectedNewSpaceObject); 203 __ bind(&done); 204 } 205 } 206 } 207 Comment(";;; End allocate local context"); 208 } 209 210 Comment(";;; Prologue end"); 211} 212 213void LCodeGen::GenerateOsrPrologue() { 214 // Generate the OSR entry prologue at the first unknown OSR value, or if there 215 // are none, at the OSR entrypoint instruction. 216 if (osr_pc_offset_ >= 0) return; 217 218 osr_pc_offset_ = masm()->pc_offset(); 219 220 // Adjust the frame size, subsuming the unoptimized frame into the 221 // optimized frame. 222 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); 223 DCHECK(slots >= 0); 224 __ lay(sp, MemOperand(sp, -slots * kPointerSize)); 225} 226 227void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { 228 if (instr->IsCall()) { 229 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 230 } 231 if (!instr->IsLazyBailout() && !instr->IsGap()) { 232 safepoints_.BumpLastLazySafepointIndex(); 233 } 234} 235 236bool LCodeGen::GenerateDeferredCode() { 237 DCHECK(is_generating()); 238 if (deferred_.length() > 0) { 239 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 240 LDeferredCode* code = deferred_[i]; 241 242 HValue* value = 243 instructions_->at(code->instruction_index())->hydrogen_value(); 244 RecordAndWritePosition( 245 chunk()->graph()->SourcePositionToScriptPosition(value->position())); 246 247 Comment( 248 ";;; <@%d,#%d> " 249 "-------------------- Deferred %s --------------------", 250 code->instruction_index(), code->instr()->hydrogen_value()->id(), 251 code->instr()->Mnemonic()); 252 __ bind(code->entry()); 253 if (NeedsDeferredFrame()) { 254 Comment(";;; Build frame"); 255 DCHECK(!frame_is_built_); 256 DCHECK(info()->IsStub()); 257 frame_is_built_ = true; 258 __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB)); 259 __ PushCommonFrame(scratch0()); 260 Comment(";;; Deferred code"); 261 } 262 code->Generate(); 263 if (NeedsDeferredFrame()) { 264 Comment(";;; Destroy frame"); 265 DCHECK(frame_is_built_); 266 __ PopCommonFrame(scratch0()); 267 frame_is_built_ = false; 268 } 269 __ b(code->exit()); 270 } 271 } 272 273 return !is_aborted(); 274} 275 276bool LCodeGen::GenerateJumpTable() { 277 // Check that the jump table is accessible from everywhere in the function 278 // code, i.e. that offsets in halfworld to the table can be encoded in the 279 // 32-bit signed immediate of a branch instruction. 280 // To simplify we consider the code size from the first instruction to the 281 // end of the jump table. We also don't consider the pc load delta. 282 // Each entry in the jump table generates one instruction and inlines one 283 // 32bit data after it. 284 // TODO(joransiu): The Int24 condition can likely be relaxed for S390 285 if (!is_int24(masm()->pc_offset() + jump_table_.length() * 7)) { 286 Abort(kGeneratedCodeIsTooLarge); 287 } 288 289 if (jump_table_.length() > 0) { 290 Label needs_frame, call_deopt_entry; 291 292 Comment(";;; -------------------- Jump table --------------------"); 293 Address base = jump_table_[0].address; 294 295 Register entry_offset = scratch0(); 296 297 int length = jump_table_.length(); 298 for (int i = 0; i < length; i++) { 299 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i]; 300 __ bind(&table_entry->label); 301 302 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type); 303 Address entry = table_entry->address; 304 DeoptComment(table_entry->deopt_info); 305 306 // Second-level deopt table entries are contiguous and small, so instead 307 // of loading the full, absolute address of each one, load an immediate 308 // offset which will be added to the base address later. 309 __ mov(entry_offset, Operand(entry - base)); 310 311 if (table_entry->needs_frame) { 312 DCHECK(!info()->saves_caller_doubles()); 313 Comment(";;; call deopt with frame"); 314 __ PushCommonFrame(); 315 __ b(r14, &needs_frame); 316 } else { 317 __ b(r14, &call_deopt_entry); 318 } 319 } 320 321 if (needs_frame.is_linked()) { 322 __ bind(&needs_frame); 323 // This variant of deopt can only be used with stubs. Since we don't 324 // have a function pointer to install in the stack frame that we're 325 // building, install a special marker there instead. 326 DCHECK(info()->IsStub()); 327 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB)); 328 __ push(ip); 329 DCHECK(info()->IsStub()); 330 } 331 332 Comment(";;; call deopt"); 333 __ bind(&call_deopt_entry); 334 335 if (info()->saves_caller_doubles()) { 336 DCHECK(info()->IsStub()); 337 RestoreCallerDoubles(); 338 } 339 340 // Add the base address to the offset previously loaded in entry_offset. 341 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base))); 342 __ AddP(ip, entry_offset, ip); 343 __ Jump(ip); 344 } 345 346 // The deoptimization jump table is the last part of the instruction 347 // sequence. Mark the generated code as done unless we bailed out. 348 if (!is_aborted()) status_ = DONE; 349 return !is_aborted(); 350} 351 352bool LCodeGen::GenerateSafepointTable() { 353 DCHECK(is_done()); 354 safepoints_.Emit(masm(), GetTotalFrameSlotCount()); 355 return !is_aborted(); 356} 357 358Register LCodeGen::ToRegister(int code) const { 359 return Register::from_code(code); 360} 361 362DoubleRegister LCodeGen::ToDoubleRegister(int code) const { 363 return DoubleRegister::from_code(code); 364} 365 366Register LCodeGen::ToRegister(LOperand* op) const { 367 DCHECK(op->IsRegister()); 368 return ToRegister(op->index()); 369} 370 371Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { 372 if (op->IsRegister()) { 373 return ToRegister(op->index()); 374 } else if (op->IsConstantOperand()) { 375 LConstantOperand* const_op = LConstantOperand::cast(op); 376 HConstant* constant = chunk_->LookupConstant(const_op); 377 Handle<Object> literal = constant->handle(isolate()); 378 Representation r = chunk_->LookupLiteralRepresentation(const_op); 379 if (r.IsInteger32()) { 380 AllowDeferredHandleDereference get_number; 381 DCHECK(literal->IsNumber()); 382 __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number())); 383 } else if (r.IsDouble()) { 384 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); 385 } else { 386 DCHECK(r.IsSmiOrTagged()); 387 __ Move(scratch, literal); 388 } 389 return scratch; 390 } else if (op->IsStackSlot()) { 391 __ LoadP(scratch, ToMemOperand(op)); 392 return scratch; 393 } 394 UNREACHABLE(); 395 return scratch; 396} 397 398void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op, 399 Register dst) { 400 DCHECK(IsInteger32(const_op)); 401 HConstant* constant = chunk_->LookupConstant(const_op); 402 int32_t value = constant->Integer32Value(); 403 if (IsSmi(const_op)) { 404 __ LoadSmiLiteral(dst, Smi::FromInt(value)); 405 } else { 406 __ LoadIntLiteral(dst, value); 407 } 408} 409 410DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 411 DCHECK(op->IsDoubleRegister()); 412 return ToDoubleRegister(op->index()); 413} 414 415Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { 416 HConstant* constant = chunk_->LookupConstant(op); 417 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); 418 return constant->handle(isolate()); 419} 420 421bool LCodeGen::IsInteger32(LConstantOperand* op) const { 422 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); 423} 424 425bool LCodeGen::IsSmi(LConstantOperand* op) const { 426 return chunk_->LookupLiteralRepresentation(op).IsSmi(); 427} 428 429int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { 430 return ToRepresentation(op, Representation::Integer32()); 431} 432 433intptr_t LCodeGen::ToRepresentation(LConstantOperand* op, 434 const Representation& r) const { 435 HConstant* constant = chunk_->LookupConstant(op); 436 int32_t value = constant->Integer32Value(); 437 if (r.IsInteger32()) return value; 438 DCHECK(r.IsSmiOrTagged()); 439 return reinterpret_cast<intptr_t>(Smi::FromInt(value)); 440} 441 442Smi* LCodeGen::ToSmi(LConstantOperand* op) const { 443 HConstant* constant = chunk_->LookupConstant(op); 444 return Smi::FromInt(constant->Integer32Value()); 445} 446 447double LCodeGen::ToDouble(LConstantOperand* op) const { 448 HConstant* constant = chunk_->LookupConstant(op); 449 DCHECK(constant->HasDoubleValue()); 450 return constant->DoubleValue(); 451} 452 453Operand LCodeGen::ToOperand(LOperand* op) { 454 if (op->IsConstantOperand()) { 455 LConstantOperand* const_op = LConstantOperand::cast(op); 456 HConstant* constant = chunk()->LookupConstant(const_op); 457 Representation r = chunk_->LookupLiteralRepresentation(const_op); 458 if (r.IsSmi()) { 459 DCHECK(constant->HasSmiValue()); 460 return Operand(Smi::FromInt(constant->Integer32Value())); 461 } else if (r.IsInteger32()) { 462 DCHECK(constant->HasInteger32Value()); 463 return Operand(constant->Integer32Value()); 464 } else if (r.IsDouble()) { 465 Abort(kToOperandUnsupportedDoubleImmediate); 466 } 467 DCHECK(r.IsTagged()); 468 return Operand(constant->handle(isolate())); 469 } else if (op->IsRegister()) { 470 return Operand(ToRegister(op)); 471 } else if (op->IsDoubleRegister()) { 472 Abort(kToOperandIsDoubleRegisterUnimplemented); 473 return Operand::Zero(); 474 } 475 // Stack slots not implemented, use ToMemOperand instead. 476 UNREACHABLE(); 477 return Operand::Zero(); 478} 479 480static int ArgumentsOffsetWithoutFrame(int index) { 481 DCHECK(index < 0); 482 return -(index + 1) * kPointerSize; 483} 484 485MemOperand LCodeGen::ToMemOperand(LOperand* op) const { 486 DCHECK(!op->IsRegister()); 487 DCHECK(!op->IsDoubleRegister()); 488 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); 489 if (NeedsEagerFrame()) { 490 return MemOperand(fp, FrameSlotToFPOffset(op->index())); 491 } else { 492 // Retrieve parameter without eager stack-frame relative to the 493 // stack-pointer. 494 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index())); 495 } 496} 497 498MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { 499 DCHECK(op->IsDoubleStackSlot()); 500 if (NeedsEagerFrame()) { 501 return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize); 502 } else { 503 // Retrieve parameter without eager stack-frame relative to the 504 // stack-pointer. 505 return MemOperand(sp, 506 ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); 507 } 508} 509 510void LCodeGen::WriteTranslation(LEnvironment* environment, 511 Translation* translation) { 512 if (environment == NULL) return; 513 514 // The translation includes one command per value in the environment. 515 int translation_size = environment->translation_size(); 516 517 WriteTranslation(environment->outer(), translation); 518 WriteTranslationFrame(environment, translation); 519 520 int object_index = 0; 521 int dematerialized_index = 0; 522 for (int i = 0; i < translation_size; ++i) { 523 LOperand* value = environment->values()->at(i); 524 AddToTranslation( 525 environment, translation, value, environment->HasTaggedValueAt(i), 526 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); 527 } 528} 529 530void LCodeGen::AddToTranslation(LEnvironment* environment, 531 Translation* translation, LOperand* op, 532 bool is_tagged, bool is_uint32, 533 int* object_index_pointer, 534 int* dematerialized_index_pointer) { 535 if (op == LEnvironment::materialization_marker()) { 536 int object_index = (*object_index_pointer)++; 537 if (environment->ObjectIsDuplicateAt(object_index)) { 538 int dupe_of = environment->ObjectDuplicateOfAt(object_index); 539 translation->DuplicateObject(dupe_of); 540 return; 541 } 542 int object_length = environment->ObjectLengthAt(object_index); 543 if (environment->ObjectIsArgumentsAt(object_index)) { 544 translation->BeginArgumentsObject(object_length); 545 } else { 546 translation->BeginCapturedObject(object_length); 547 } 548 int dematerialized_index = *dematerialized_index_pointer; 549 int env_offset = environment->translation_size() + dematerialized_index; 550 *dematerialized_index_pointer += object_length; 551 for (int i = 0; i < object_length; ++i) { 552 LOperand* value = environment->values()->at(env_offset + i); 553 AddToTranslation(environment, translation, value, 554 environment->HasTaggedValueAt(env_offset + i), 555 environment->HasUint32ValueAt(env_offset + i), 556 object_index_pointer, dematerialized_index_pointer); 557 } 558 return; 559 } 560 561 if (op->IsStackSlot()) { 562 int index = op->index(); 563 if (is_tagged) { 564 translation->StoreStackSlot(index); 565 } else if (is_uint32) { 566 translation->StoreUint32StackSlot(index); 567 } else { 568 translation->StoreInt32StackSlot(index); 569 } 570 } else if (op->IsDoubleStackSlot()) { 571 int index = op->index(); 572 translation->StoreDoubleStackSlot(index); 573 } else if (op->IsRegister()) { 574 Register reg = ToRegister(op); 575 if (is_tagged) { 576 translation->StoreRegister(reg); 577 } else if (is_uint32) { 578 translation->StoreUint32Register(reg); 579 } else { 580 translation->StoreInt32Register(reg); 581 } 582 } else if (op->IsDoubleRegister()) { 583 DoubleRegister reg = ToDoubleRegister(op); 584 translation->StoreDoubleRegister(reg); 585 } else if (op->IsConstantOperand()) { 586 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); 587 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); 588 translation->StoreLiteral(src_index); 589 } else { 590 UNREACHABLE(); 591 } 592} 593 594void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode, 595 LInstruction* instr) { 596 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); 597} 598 599void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode, 600 LInstruction* instr, 601 SafepointMode safepoint_mode) { 602 DCHECK(instr != NULL); 603 __ Call(code, mode); 604 RecordSafepointWithLazyDeopt(instr, safepoint_mode); 605 606 // Signal that we don't inline smi code before these stubs in the 607 // optimizing code generator. 608 if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) { 609 __ nop(); 610 } 611} 612 613void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments, 614 LInstruction* instr, SaveFPRegsMode save_doubles) { 615 DCHECK(instr != NULL); 616 617 __ CallRuntime(function, num_arguments, save_doubles); 618 619 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 620} 621 622void LCodeGen::LoadContextFromDeferred(LOperand* context) { 623 if (context->IsRegister()) { 624 __ Move(cp, ToRegister(context)); 625 } else if (context->IsStackSlot()) { 626 __ LoadP(cp, ToMemOperand(context)); 627 } else if (context->IsConstantOperand()) { 628 HConstant* constant = 629 chunk_->LookupConstant(LConstantOperand::cast(context)); 630 __ Move(cp, Handle<Object>::cast(constant->handle(isolate()))); 631 } else { 632 UNREACHABLE(); 633 } 634} 635 636void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, 637 LInstruction* instr, LOperand* context) { 638 LoadContextFromDeferred(context); 639 __ CallRuntimeSaveDoubles(id); 640 RecordSafepointWithRegisters(instr->pointer_map(), argc, 641 Safepoint::kNoLazyDeopt); 642} 643 644void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, 645 Safepoint::DeoptMode mode) { 646 environment->set_has_been_used(); 647 if (!environment->HasBeenRegistered()) { 648 // Physical stack frame layout: 649 // -x ............. -4 0 ..................................... y 650 // [incoming arguments] [spill slots] [pushed outgoing arguments] 651 652 // Layout of the environment: 653 // 0 ..................................................... size-1 654 // [parameters] [locals] [expression stack including arguments] 655 656 // Layout of the translation: 657 // 0 ........................................................ size - 1 + 4 658 // [expression stack including arguments] [locals] [4 words] [parameters] 659 // |>------------ translation_size ------------<| 660 661 int frame_count = 0; 662 int jsframe_count = 0; 663 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { 664 ++frame_count; 665 if (e->frame_type() == JS_FUNCTION) { 666 ++jsframe_count; 667 } 668 } 669 Translation translation(&translations_, frame_count, jsframe_count, zone()); 670 WriteTranslation(environment, &translation); 671 int deoptimization_index = deoptimizations_.length(); 672 int pc_offset = masm()->pc_offset(); 673 environment->Register(deoptimization_index, translation.index(), 674 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 675 deoptimizations_.Add(environment, zone()); 676 } 677} 678 679void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, 680 DeoptimizeReason deopt_reason, 681 Deoptimizer::BailoutType bailout_type, 682 CRegister cr) { 683 LEnvironment* environment = instr->environment(); 684 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 685 DCHECK(environment->HasBeenRegistered()); 686 int id = environment->deoptimization_index(); 687 Address entry = 688 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 689 if (entry == NULL) { 690 Abort(kBailoutWasNotPrepared); 691 return; 692 } 693 694 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { 695 Register scratch = scratch0(); 696 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); 697 Label no_deopt; 698 699 // Store the condition on the stack if necessary 700 if (cond != al) { 701 Label done; 702 __ LoadImmP(scratch, Operand::Zero()); 703 __ b(NegateCondition(cond), &done, Label::kNear); 704 __ LoadImmP(scratch, Operand(1)); 705 __ bind(&done); 706 __ push(scratch); 707 } 708 709 Label done; 710 __ Push(r3); 711 __ mov(scratch, Operand(count)); 712 __ LoadW(r3, MemOperand(scratch)); 713 __ Sub32(r3, r3, Operand(1)); 714 __ Cmp32(r3, Operand::Zero()); 715 __ bne(&no_deopt, Label::kNear); 716 717 __ LoadImmP(r3, Operand(FLAG_deopt_every_n_times)); 718 __ StoreW(r3, MemOperand(scratch)); 719 __ Pop(r3); 720 721 if (cond != al) { 722 // Clean up the stack before the deoptimizer call 723 __ pop(scratch); 724 } 725 726 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 727 728 __ b(&done); 729 730 __ bind(&no_deopt); 731 __ StoreW(r3, MemOperand(scratch)); 732 __ Pop(r3); 733 734 if (cond != al) { 735 // Clean up the stack before the deoptimizer call 736 __ pop(scratch); 737 } 738 739 __ bind(&done); 740 741 if (cond != al) { 742 cond = ne; 743 __ CmpP(scratch, Operand::Zero()); 744 } 745 } 746 747 if (info()->ShouldTrapOnDeopt()) { 748 __ stop("trap_on_deopt", cond, kDefaultStopCode, cr); 749 } 750 751 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id); 752 753 DCHECK(info()->IsStub() || frame_is_built_); 754 // Go through jump table if we need to handle condition, build frame, or 755 // restore caller doubles. 756 if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) { 757 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 758 } else { 759 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type, 760 !frame_is_built_); 761 // We often have several deopts to the same entry, reuse the last 762 // jump entry if this is the case. 763 if (FLAG_trace_deopt || isolate()->is_profiling() || 764 jump_table_.is_empty() || 765 !table_entry.IsEquivalentTo(jump_table_.last())) { 766 jump_table_.Add(table_entry, zone()); 767 } 768 __ b(cond, &jump_table_.last().label /*, cr*/); 769 } 770} 771 772void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, 773 DeoptimizeReason deopt_reason, CRegister cr) { 774 Deoptimizer::BailoutType bailout_type = 775 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; 776 DeoptimizeIf(cond, instr, deopt_reason, bailout_type, cr); 777} 778 779void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr, 780 SafepointMode safepoint_mode) { 781 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { 782 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); 783 } else { 784 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 785 RecordSafepointWithRegisters(instr->pointer_map(), 0, 786 Safepoint::kLazyDeopt); 787 } 788} 789 790void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind, 791 int arguments, Safepoint::DeoptMode deopt_mode) { 792 DCHECK(expected_safepoint_kind_ == kind); 793 794 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); 795 Safepoint safepoint = 796 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode); 797 for (int i = 0; i < operands->length(); i++) { 798 LOperand* pointer = operands->at(i); 799 if (pointer->IsStackSlot()) { 800 safepoint.DefinePointerSlot(pointer->index(), zone()); 801 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { 802 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); 803 } 804 } 805} 806 807void LCodeGen::RecordSafepoint(LPointerMap* pointers, 808 Safepoint::DeoptMode deopt_mode) { 809 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); 810} 811 812void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { 813 LPointerMap empty_pointers(zone()); 814 RecordSafepoint(&empty_pointers, deopt_mode); 815} 816 817void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, 818 int arguments, 819 Safepoint::DeoptMode deopt_mode) { 820 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode); 821} 822 823static const char* LabelType(LLabel* label) { 824 if (label->is_loop_header()) return " (loop header)"; 825 if (label->is_osr_entry()) return " (OSR entry)"; 826 return ""; 827} 828 829void LCodeGen::DoLabel(LLabel* label) { 830 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", 831 current_instruction_, label->hydrogen_value()->id(), 832 label->block_id(), LabelType(label)); 833 __ bind(label->label()); 834 current_block_ = label->block_id(); 835 DoGap(label); 836} 837 838void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); } 839 840void LCodeGen::DoGap(LGap* gap) { 841 for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION; 842 i++) { 843 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); 844 LParallelMove* move = gap->GetParallelMove(inner_pos); 845 if (move != NULL) DoParallelMove(move); 846 } 847} 848 849void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); } 850 851void LCodeGen::DoParameter(LParameter* instr) { 852 // Nothing to do. 853} 854 855void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { 856 GenerateOsrPrologue(); 857} 858 859void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { 860 Register dividend = ToRegister(instr->dividend()); 861 int32_t divisor = instr->divisor(); 862 DCHECK(dividend.is(ToRegister(instr->result()))); 863 864 // Theoretically, a variation of the branch-free code for integer division by 865 // a power of 2 (calculating the remainder via an additional multiplication 866 // (which gets simplified to an 'and') and subtraction) should be faster, and 867 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to 868 // indicate that positive dividends are heavily favored, so the branching 869 // version performs better. 870 HMod* hmod = instr->hydrogen(); 871 int32_t shift = WhichPowerOf2Abs(divisor); 872 Label dividend_is_not_negative, done; 873 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 874 __ CmpP(dividend, Operand::Zero()); 875 __ bge(÷nd_is_not_negative, Label::kNear); 876 if (shift) { 877 // Note that this is correct even for kMinInt operands. 878 __ LoadComplementRR(dividend, dividend); 879 __ ExtractBitRange(dividend, dividend, shift - 1, 0); 880 __ LoadComplementRR(dividend, dividend); 881 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 882 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); 883 } 884 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 885 __ mov(dividend, Operand::Zero()); 886 } else { 887 DeoptimizeIf(al, instr, DeoptimizeReason::kMinusZero); 888 } 889 __ b(&done, Label::kNear); 890 } 891 892 __ bind(÷nd_is_not_negative); 893 if (shift) { 894 __ ExtractBitRange(dividend, dividend, shift - 1, 0); 895 } else { 896 __ mov(dividend, Operand::Zero()); 897 } 898 __ bind(&done); 899} 900 901void LCodeGen::DoModByConstI(LModByConstI* instr) { 902 Register dividend = ToRegister(instr->dividend()); 903 int32_t divisor = instr->divisor(); 904 Register result = ToRegister(instr->result()); 905 DCHECK(!dividend.is(result)); 906 907 if (divisor == 0) { 908 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); 909 return; 910 } 911 912 __ TruncatingDiv(result, dividend, Abs(divisor)); 913 __ mov(ip, Operand(Abs(divisor))); 914 __ Mul(result, result, ip); 915 __ SubP(result, dividend, result /*, LeaveOE, SetRC*/); 916 917 // Check for negative zero. 918 HMod* hmod = instr->hydrogen(); 919 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 920 Label remainder_not_zero; 921 __ bne(&remainder_not_zero, Label::kNear /*, cr0*/); 922 __ Cmp32(dividend, Operand::Zero()); 923 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); 924 __ bind(&remainder_not_zero); 925 } 926} 927 928void LCodeGen::DoModI(LModI* instr) { 929 HMod* hmod = instr->hydrogen(); 930 Register left_reg = ToRegister(instr->left()); 931 Register right_reg = ToRegister(instr->right()); 932 Register result_reg = ToRegister(instr->result()); 933 Label done; 934 935 // Check for x % 0. 936 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { 937 __ Cmp32(right_reg, Operand::Zero()); 938 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); 939 } 940 941 // Check for kMinInt % -1, dr will return undefined, which is not what we 942 // want. We have to deopt if we care about -0, because we can't return that. 943 if (hmod->CheckFlag(HValue::kCanOverflow)) { 944 Label no_overflow_possible; 945 __ Cmp32(left_reg, Operand(kMinInt)); 946 __ bne(&no_overflow_possible, Label::kNear); 947 __ Cmp32(right_reg, Operand(-1)); 948 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 949 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); 950 } else { 951 __ b(ne, &no_overflow_possible, Label::kNear); 952 __ mov(result_reg, Operand::Zero()); 953 __ b(&done, Label::kNear); 954 } 955 __ bind(&no_overflow_possible); 956 } 957 958 // Divide instruction dr will implicity use register pair 959 // r0 & r1 below. 960 DCHECK(!left_reg.is(r1)); 961 DCHECK(!right_reg.is(r1)); 962 DCHECK(!result_reg.is(r1)); 963 __ LoadRR(r0, left_reg); 964 __ srda(r0, Operand(32)); 965 __ dr(r0, right_reg); // R0:R1 = R1 / divisor - R0 remainder 966 967 __ LoadAndTestP_ExtendSrc(result_reg, r0); // Copy remainder to resultreg 968 969 // If we care about -0, test if the dividend is <0 and the result is 0. 970 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 971 __ bne(&done, Label::kNear); 972 __ Cmp32(left_reg, Operand::Zero()); 973 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); 974 } 975 976 __ bind(&done); 977} 978 979void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 980 Register dividend = ToRegister(instr->dividend()); 981 int32_t divisor = instr->divisor(); 982 Register result = ToRegister(instr->result()); 983 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); 984 DCHECK(!result.is(dividend)); 985 986 // Check for (0 / -x) that will produce negative zero. 987 HDiv* hdiv = instr->hydrogen(); 988 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 989 __ Cmp32(dividend, Operand::Zero()); 990 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); 991 } 992 // Check for (kMinInt / -1). 993 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 994 __ Cmp32(dividend, Operand(0x80000000)); 995 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); 996 } 997 998 int32_t shift = WhichPowerOf2Abs(divisor); 999 1000 // Deoptimize if remainder will not be 0. 1001 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) { 1002 __ TestBitRange(dividend, shift - 1, 0, r0); 1003 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, cr0); 1004 } 1005 1006 if (divisor == -1) { // Nice shortcut, not needed for correctness. 1007 __ LoadComplementRR(result, dividend); 1008 return; 1009 } 1010 if (shift == 0) { 1011 __ LoadRR(result, dividend); 1012 } else { 1013 if (shift == 1) { 1014 __ ShiftRight(result, dividend, Operand(31)); 1015 } else { 1016 __ ShiftRightArith(result, dividend, Operand(31)); 1017 __ ShiftRight(result, result, Operand(32 - shift)); 1018 } 1019 __ AddP(result, dividend, result); 1020 __ ShiftRightArith(result, result, Operand(shift)); 1021#if V8_TARGET_ARCH_S390X 1022 __ lgfr(result, result); 1023#endif 1024 } 1025 if (divisor < 0) __ LoadComplementRR(result, result); 1026} 1027 1028void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 1029 Register dividend = ToRegister(instr->dividend()); 1030 int32_t divisor = instr->divisor(); 1031 Register result = ToRegister(instr->result()); 1032 DCHECK(!dividend.is(result)); 1033 1034 if (divisor == 0) { 1035 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); 1036 return; 1037 } 1038 1039 // Check for (0 / -x) that will produce negative zero. 1040 HDiv* hdiv = instr->hydrogen(); 1041 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1042 __ Cmp32(dividend, Operand::Zero()); 1043 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); 1044 } 1045 1046 __ TruncatingDiv(result, dividend, Abs(divisor)); 1047 if (divisor < 0) __ LoadComplementRR(result, result); 1048 1049 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1050 Register scratch = scratch0(); 1051 __ mov(ip, Operand(divisor)); 1052 __ Mul(scratch, result, ip); 1053 __ Cmp32(scratch, dividend); 1054 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision); 1055 } 1056} 1057 1058// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 1059void LCodeGen::DoDivI(LDivI* instr) { 1060 HBinaryOperation* hdiv = instr->hydrogen(); 1061 const Register dividend = ToRegister(instr->dividend()); 1062 const Register divisor = ToRegister(instr->divisor()); 1063 Register result = ToRegister(instr->result()); 1064 1065 DCHECK(!dividend.is(result)); 1066 DCHECK(!divisor.is(result)); 1067 1068 // Check for x / 0. 1069 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1070 __ Cmp32(divisor, Operand::Zero()); 1071 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); 1072 } 1073 1074 // Check for (0 / -x) that will produce negative zero. 1075 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1076 Label dividend_not_zero; 1077 __ Cmp32(dividend, Operand::Zero()); 1078 __ bne(÷nd_not_zero, Label::kNear); 1079 __ Cmp32(divisor, Operand::Zero()); 1080 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); 1081 __ bind(÷nd_not_zero); 1082 } 1083 1084 // Check for (kMinInt / -1). 1085 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 1086 Label dividend_not_min_int; 1087 __ Cmp32(dividend, Operand(kMinInt)); 1088 __ bne(÷nd_not_min_int, Label::kNear); 1089 __ Cmp32(divisor, Operand(-1)); 1090 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); 1091 __ bind(÷nd_not_min_int); 1092 } 1093 1094 __ LoadRR(r0, dividend); 1095 __ srda(r0, Operand(32)); 1096 __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient 1097 1098 __ LoadAndTestP_ExtendSrc(result, r1); // Move quotient to result register 1099 1100 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1101 // Deoptimize if remainder is not 0. 1102 __ Cmp32(r0, Operand::Zero()); 1103 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision); 1104 } 1105} 1106 1107void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 1108 HBinaryOperation* hdiv = instr->hydrogen(); 1109 Register dividend = ToRegister(instr->dividend()); 1110 Register result = ToRegister(instr->result()); 1111 int32_t divisor = instr->divisor(); 1112 bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt); 1113 1114 // If the divisor is positive, things are easy: There can be no deopts and we 1115 // can simply do an arithmetic right shift. 1116 int32_t shift = WhichPowerOf2Abs(divisor); 1117 if (divisor > 0) { 1118 if (shift || !result.is(dividend)) { 1119 __ ShiftRightArith(result, dividend, Operand(shift)); 1120#if V8_TARGET_ARCH_S390X 1121 __ lgfr(result, result); 1122#endif 1123 } 1124 return; 1125 } 1126 1127// If the divisor is negative, we have to negate and handle edge cases. 1128#if V8_TARGET_ARCH_S390X 1129 if (divisor == -1 && can_overflow) { 1130 __ Cmp32(dividend, Operand(0x80000000)); 1131 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); 1132 } 1133#endif 1134 1135 __ LoadComplementRR(result, dividend); 1136 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1137 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0); 1138 } 1139 1140// If the negation could not overflow, simply shifting is OK. 1141#if !V8_TARGET_ARCH_S390X 1142 if (!can_overflow) { 1143#endif 1144 if (shift) { 1145 __ ShiftRightArithP(result, result, Operand(shift)); 1146 } 1147 return; 1148#if !V8_TARGET_ARCH_S390X 1149 } 1150 1151 // Dividing by -1 is basically negation, unless we overflow. 1152 if (divisor == -1) { 1153 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0); 1154 return; 1155 } 1156 1157 Label overflow_label, done; 1158 __ b(overflow, &overflow_label, Label::kNear); 1159 __ ShiftRightArith(result, result, Operand(shift)); 1160#if V8_TARGET_ARCH_S390X 1161 __ lgfr(result, result); 1162#endif 1163 __ b(&done, Label::kNear); 1164 __ bind(&overflow_label); 1165 __ mov(result, Operand(kMinInt / divisor)); 1166 __ bind(&done); 1167#endif 1168} 1169 1170void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 1171 Register dividend = ToRegister(instr->dividend()); 1172 int32_t divisor = instr->divisor(); 1173 Register result = ToRegister(instr->result()); 1174 DCHECK(!dividend.is(result)); 1175 1176 if (divisor == 0) { 1177 DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero); 1178 return; 1179 } 1180 1181 // Check for (0 / -x) that will produce negative zero. 1182 HMathFloorOfDiv* hdiv = instr->hydrogen(); 1183 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1184 __ Cmp32(dividend, Operand::Zero()); 1185 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); 1186 } 1187 1188 // Easy case: We need no dynamic check for the dividend and the flooring 1189 // division is the same as the truncating division. 1190 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 1191 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 1192 __ TruncatingDiv(result, dividend, Abs(divisor)); 1193 if (divisor < 0) __ LoadComplementRR(result, result); 1194 return; 1195 } 1196 1197 // In the general case we may need to adjust before and after the truncating 1198 // division to get a flooring division. 1199 Register temp = ToRegister(instr->temp()); 1200 DCHECK(!temp.is(dividend) && !temp.is(result)); 1201 Label needs_adjustment, done; 1202 __ Cmp32(dividend, Operand::Zero()); 1203 __ b(divisor > 0 ? lt : gt, &needs_adjustment); 1204 __ TruncatingDiv(result, dividend, Abs(divisor)); 1205 if (divisor < 0) __ LoadComplementRR(result, result); 1206 __ b(&done, Label::kNear); 1207 __ bind(&needs_adjustment); 1208 __ AddP(temp, dividend, Operand(divisor > 0 ? 1 : -1)); 1209 __ TruncatingDiv(result, temp, Abs(divisor)); 1210 if (divisor < 0) __ LoadComplementRR(result, result); 1211 __ SubP(result, result, Operand(1)); 1212 __ bind(&done); 1213} 1214 1215// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. 1216void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { 1217 HBinaryOperation* hdiv = instr->hydrogen(); 1218 const Register dividend = ToRegister(instr->dividend()); 1219 const Register divisor = ToRegister(instr->divisor()); 1220 Register result = ToRegister(instr->result()); 1221 1222 DCHECK(!dividend.is(result)); 1223 DCHECK(!divisor.is(result)); 1224 1225 // Check for x / 0. 1226 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1227 __ Cmp32(divisor, Operand::Zero()); 1228 DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero); 1229 } 1230 1231 // Check for (0 / -x) that will produce negative zero. 1232 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1233 Label dividend_not_zero; 1234 __ Cmp32(dividend, Operand::Zero()); 1235 __ bne(÷nd_not_zero, Label::kNear); 1236 __ Cmp32(divisor, Operand::Zero()); 1237 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); 1238 __ bind(÷nd_not_zero); 1239 } 1240 1241 // Check for (kMinInt / -1). 1242 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 1243 Label no_overflow_possible; 1244 __ Cmp32(dividend, Operand(kMinInt)); 1245 __ bne(&no_overflow_possible, Label::kNear); 1246 __ Cmp32(divisor, Operand(-1)); 1247 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 1248 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); 1249 } else { 1250 __ bne(&no_overflow_possible, Label::kNear); 1251 __ LoadRR(result, dividend); 1252 } 1253 __ bind(&no_overflow_possible); 1254 } 1255 1256 __ LoadRR(r0, dividend); 1257 __ srda(r0, Operand(32)); 1258 __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient 1259 1260 __ lr(result, r1); // Move quotient to result register 1261 1262 Label done; 1263 Register scratch = scratch0(); 1264 // If both operands have the same sign then we are done. 1265 __ Xor(scratch, dividend, divisor); 1266 __ ltr(scratch, scratch); // use 32 bit version LoadAndTestRR even in 64 bit 1267 __ bge(&done, Label::kNear); 1268 1269 // If there is no remainder then we are done. 1270 __ lr(scratch, result); 1271 __ msr(scratch, divisor); 1272 __ Cmp32(dividend, scratch); 1273 __ beq(&done, Label::kNear); 1274 1275 // We performed a truncating division. Correct the result. 1276 __ Sub32(result, result, Operand(1)); 1277 __ bind(&done); 1278} 1279 1280void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { 1281 DoubleRegister addend = ToDoubleRegister(instr->addend()); 1282 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); 1283 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); 1284 DoubleRegister result = ToDoubleRegister(instr->result()); 1285 1286 // Unable to use madbr as the intermediate value is not rounded 1287 // to proper precision 1288 __ ldr(result, multiplier); 1289 __ mdbr(result, multiplicand); 1290 __ adbr(result, addend); 1291} 1292 1293void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) { 1294 DoubleRegister minuend = ToDoubleRegister(instr->minuend()); 1295 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); 1296 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); 1297 DoubleRegister result = ToDoubleRegister(instr->result()); 1298 1299 // Unable to use msdbr as the intermediate value is not rounded 1300 // to proper precision 1301 __ ldr(result, multiplier); 1302 __ mdbr(result, multiplicand); 1303 __ sdbr(result, minuend); 1304} 1305 1306void LCodeGen::DoMulI(LMulI* instr) { 1307 Register scratch = scratch0(); 1308 Register result = ToRegister(instr->result()); 1309 // Note that result may alias left. 1310 Register left = ToRegister(instr->left()); 1311 LOperand* right_op = instr->right(); 1312 1313 bool bailout_on_minus_zero = 1314 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 1315 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1316 1317 if (right_op->IsConstantOperand()) { 1318 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); 1319 1320 if (bailout_on_minus_zero && (constant < 0)) { 1321 // The case of a null constant will be handled separately. 1322 // If constant is negative and left is null, the result should be -0. 1323 __ CmpP(left, Operand::Zero()); 1324 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); 1325 } 1326 1327 switch (constant) { 1328 case -1: 1329 if (can_overflow) { 1330#if V8_TARGET_ARCH_S390X 1331 if (instr->hydrogen()->representation().IsSmi()) { 1332#endif 1333 __ LoadComplementRR(result, left); 1334 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); 1335#if V8_TARGET_ARCH_S390X 1336 } else { 1337 __ LoadComplementRR(result, left); 1338 __ TestIfInt32(result, r0); 1339 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); 1340 } 1341#endif 1342 } else { 1343 __ LoadComplementRR(result, left); 1344 } 1345 break; 1346 case 0: 1347 if (bailout_on_minus_zero) { 1348// If left is strictly negative and the constant is null, the 1349// result is -0. Deoptimize if required, otherwise return 0. 1350#if V8_TARGET_ARCH_S390X 1351 if (instr->hydrogen()->representation().IsSmi()) { 1352#endif 1353 __ Cmp32(left, Operand::Zero()); 1354#if V8_TARGET_ARCH_S390X 1355 } else { 1356 __ Cmp32(left, Operand::Zero()); 1357 } 1358#endif 1359 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); 1360 } 1361 __ LoadImmP(result, Operand::Zero()); 1362 break; 1363 case 1: 1364 __ Move(result, left); 1365 break; 1366 default: 1367 // Multiplying by powers of two and powers of two plus or minus 1368 // one can be done faster with shifted operands. 1369 // For other constants we emit standard code. 1370 int32_t mask = constant >> 31; 1371 uint32_t constant_abs = (constant + mask) ^ mask; 1372 1373 if (base::bits::IsPowerOfTwo32(constant_abs)) { 1374 int32_t shift = WhichPowerOf2(constant_abs); 1375 __ ShiftLeftP(result, left, Operand(shift)); 1376 // Correct the sign of the result if the constant is negative. 1377 if (constant < 0) __ LoadComplementRR(result, result); 1378 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) { 1379 int32_t shift = WhichPowerOf2(constant_abs - 1); 1380 __ ShiftLeftP(scratch, left, Operand(shift)); 1381 __ AddP(result, scratch, left); 1382 // Correct the sign of the result if the constant is negative. 1383 if (constant < 0) __ LoadComplementRR(result, result); 1384 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) { 1385 int32_t shift = WhichPowerOf2(constant_abs + 1); 1386 __ ShiftLeftP(scratch, left, Operand(shift)); 1387 __ SubP(result, scratch, left); 1388 // Correct the sign of the result if the constant is negative. 1389 if (constant < 0) __ LoadComplementRR(result, result); 1390 } else { 1391 // Generate standard code. 1392 __ Move(result, left); 1393 __ MulP(result, Operand(constant)); 1394 } 1395 } 1396 1397 } else { 1398 DCHECK(right_op->IsRegister()); 1399 Register right = ToRegister(right_op); 1400 1401 if (can_overflow) { 1402#if V8_TARGET_ARCH_S390X 1403 // result = left * right. 1404 if (instr->hydrogen()->representation().IsSmi()) { 1405 __ SmiUntag(result, left); 1406 __ SmiUntag(scratch, right); 1407 __ msgr(result, scratch); 1408 } else { 1409 __ LoadRR(result, left); 1410 __ msgr(result, right); 1411 } 1412 __ TestIfInt32(result, r0); 1413 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); 1414 if (instr->hydrogen()->representation().IsSmi()) { 1415 __ SmiTag(result); 1416 } 1417#else 1418 // r0:scratch = scratch * right 1419 if (instr->hydrogen()->representation().IsSmi()) { 1420 __ SmiUntag(scratch, left); 1421 __ mr_z(r0, right); 1422 __ LoadRR(result, scratch); 1423 } else { 1424 // r0:scratch = scratch * right 1425 __ LoadRR(scratch, left); 1426 __ mr_z(r0, right); 1427 __ LoadRR(result, scratch); 1428 } 1429 __ TestIfInt32(r0, result, scratch); 1430 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow); 1431#endif 1432 } else { 1433 if (instr->hydrogen()->representation().IsSmi()) { 1434 __ SmiUntag(result, left); 1435 __ Mul(result, result, right); 1436 } else { 1437 __ Mul(result, left, right); 1438 } 1439 } 1440 1441 if (bailout_on_minus_zero) { 1442 Label done; 1443#if V8_TARGET_ARCH_S390X 1444 if (instr->hydrogen()->representation().IsSmi()) { 1445#endif 1446 __ XorP(r0, left, right); 1447 __ LoadAndTestRR(r0, r0); 1448 __ bge(&done, Label::kNear); 1449#if V8_TARGET_ARCH_S390X 1450 } else { 1451 __ XorP(r0, left, right); 1452 __ Cmp32(r0, Operand::Zero()); 1453 __ bge(&done, Label::kNear); 1454 } 1455#endif 1456 // Bail out if the result is minus zero. 1457 __ CmpP(result, Operand::Zero()); 1458 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); 1459 __ bind(&done); 1460 } 1461 } 1462} 1463 1464void LCodeGen::DoBitI(LBitI* instr) { 1465 LOperand* left_op = instr->left(); 1466 LOperand* right_op = instr->right(); 1467 DCHECK(left_op->IsRegister()); 1468 Register left = ToRegister(left_op); 1469 Register result = ToRegister(instr->result()); 1470 1471 if (right_op->IsConstantOperand()) { 1472 switch (instr->op()) { 1473 case Token::BIT_AND: 1474 __ AndP(result, left, Operand(ToOperand(right_op))); 1475 break; 1476 case Token::BIT_OR: 1477 __ OrP(result, left, Operand(ToOperand(right_op))); 1478 break; 1479 case Token::BIT_XOR: 1480 __ XorP(result, left, Operand(ToOperand(right_op))); 1481 break; 1482 default: 1483 UNREACHABLE(); 1484 break; 1485 } 1486 } else if (right_op->IsStackSlot()) { 1487 // Reg-Mem instruction clobbers, so copy src to dst first. 1488 if (!left.is(result)) __ LoadRR(result, left); 1489 switch (instr->op()) { 1490 case Token::BIT_AND: 1491 __ AndP(result, ToMemOperand(right_op)); 1492 break; 1493 case Token::BIT_OR: 1494 __ OrP(result, ToMemOperand(right_op)); 1495 break; 1496 case Token::BIT_XOR: 1497 __ XorP(result, ToMemOperand(right_op)); 1498 break; 1499 default: 1500 UNREACHABLE(); 1501 break; 1502 } 1503 } else { 1504 DCHECK(right_op->IsRegister()); 1505 1506 switch (instr->op()) { 1507 case Token::BIT_AND: 1508 __ AndP(result, left, ToRegister(right_op)); 1509 break; 1510 case Token::BIT_OR: 1511 __ OrP(result, left, ToRegister(right_op)); 1512 break; 1513 case Token::BIT_XOR: 1514 __ XorP(result, left, ToRegister(right_op)); 1515 break; 1516 default: 1517 UNREACHABLE(); 1518 break; 1519 } 1520 } 1521} 1522 1523void LCodeGen::DoShiftI(LShiftI* instr) { 1524 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so 1525 // result may alias either of them. 1526 LOperand* right_op = instr->right(); 1527 Register left = ToRegister(instr->left()); 1528 Register result = ToRegister(instr->result()); 1529 Register scratch = scratch0(); 1530 if (right_op->IsRegister()) { 1531 // Mask the right_op operand. 1532 __ AndP(scratch, ToRegister(right_op), Operand(0x1F)); 1533 switch (instr->op()) { 1534 case Token::ROR: 1535 // rotate_right(a, b) == rotate_left(a, 32 - b) 1536 __ LoadComplementRR(scratch, scratch); 1537 __ rll(result, left, scratch, Operand(32)); 1538#if V8_TARGET_ARCH_S390X 1539 __ lgfr(result, result); 1540#endif 1541 break; 1542 case Token::SAR: 1543 __ ShiftRightArith(result, left, scratch); 1544#if V8_TARGET_ARCH_S390X 1545 __ lgfr(result, result); 1546#endif 1547 break; 1548 case Token::SHR: 1549 __ ShiftRight(result, left, scratch); 1550#if V8_TARGET_ARCH_S390X 1551 __ lgfr(result, result); 1552#endif 1553 if (instr->can_deopt()) { 1554#if V8_TARGET_ARCH_S390X 1555 __ ltgfr(result, result /*, SetRC*/); 1556#else 1557 __ ltr(result, result); // Set the <,==,> condition 1558#endif 1559 DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, cr0); 1560 } 1561 break; 1562 case Token::SHL: 1563 __ ShiftLeft(result, left, scratch); 1564#if V8_TARGET_ARCH_S390X 1565 __ lgfr(result, result); 1566#endif 1567 break; 1568 default: 1569 UNREACHABLE(); 1570 break; 1571 } 1572 } else { 1573 // Mask the right_op operand. 1574 int value = ToInteger32(LConstantOperand::cast(right_op)); 1575 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); 1576 switch (instr->op()) { 1577 case Token::ROR: 1578 if (shift_count != 0) { 1579 __ rll(result, left, Operand(32 - shift_count)); 1580#if V8_TARGET_ARCH_S390X 1581 __ lgfr(result, result); 1582#endif 1583 } else { 1584 __ Move(result, left); 1585 } 1586 break; 1587 case Token::SAR: 1588 if (shift_count != 0) { 1589 __ ShiftRightArith(result, left, Operand(shift_count)); 1590#if V8_TARGET_ARCH_S390X 1591 __ lgfr(result, result); 1592#endif 1593 } else { 1594 __ Move(result, left); 1595 } 1596 break; 1597 case Token::SHR: 1598 if (shift_count != 0) { 1599 __ ShiftRight(result, left, Operand(shift_count)); 1600#if V8_TARGET_ARCH_S390X 1601 __ lgfr(result, result); 1602#endif 1603 } else { 1604 if (instr->can_deopt()) { 1605 __ Cmp32(left, Operand::Zero()); 1606 DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue); 1607 } 1608 __ Move(result, left); 1609 } 1610 break; 1611 case Token::SHL: 1612 if (shift_count != 0) { 1613#if V8_TARGET_ARCH_S390X 1614 if (instr->hydrogen_value()->representation().IsSmi()) { 1615 __ ShiftLeftP(result, left, Operand(shift_count)); 1616#else 1617 if (instr->hydrogen_value()->representation().IsSmi() && 1618 instr->can_deopt()) { 1619 if (shift_count != 1) { 1620 __ ShiftLeft(result, left, Operand(shift_count - 1)); 1621#if V8_TARGET_ARCH_S390X 1622 __ lgfr(result, result); 1623#endif 1624 __ SmiTagCheckOverflow(result, result, scratch); 1625 } else { 1626 __ SmiTagCheckOverflow(result, left, scratch); 1627 } 1628 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); 1629#endif 1630 } else { 1631 __ ShiftLeft(result, left, Operand(shift_count)); 1632#if V8_TARGET_ARCH_S390X 1633 __ lgfr(result, result); 1634#endif 1635 } 1636 } else { 1637 __ Move(result, left); 1638 } 1639 break; 1640 default: 1641 UNREACHABLE(); 1642 break; 1643 } 1644 } 1645} 1646 1647void LCodeGen::DoSubI(LSubI* instr) { 1648 LOperand* left = instr->left(); 1649 LOperand* right = instr->right(); 1650 LOperand* result = instr->result(); 1651 1652 bool isInteger = !(instr->hydrogen()->representation().IsSmi() || 1653 instr->hydrogen()->representation().IsExternal()); 1654 1655#if V8_TARGET_ARCH_S390X 1656 // The overflow detection needs to be tested on the lower 32-bits. 1657 // As a result, on 64-bit, we need to force 32-bit arithmetic operations 1658 // to set the CC overflow bit properly. The result is then sign-extended. 1659 bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1660#else 1661 bool checkOverflow = true; 1662#endif 1663 1664 if (right->IsConstantOperand()) { 1665 if (!isInteger || !checkOverflow) 1666 __ SubP(ToRegister(result), ToRegister(left), ToOperand(right)); 1667 else 1668 __ Sub32(ToRegister(result), ToRegister(left), ToOperand(right)); 1669 } else if (right->IsRegister()) { 1670 if (!isInteger) 1671 __ SubP(ToRegister(result), ToRegister(left), ToRegister(right)); 1672 else if (!checkOverflow) 1673 __ SubP_ExtendSrc(ToRegister(result), ToRegister(left), 1674 ToRegister(right)); 1675 else 1676 __ Sub32(ToRegister(result), ToRegister(left), ToRegister(right)); 1677 } else { 1678 if (!left->Equals(instr->result())) 1679 __ LoadRR(ToRegister(result), ToRegister(left)); 1680 1681 MemOperand mem = ToMemOperand(right); 1682 if (!isInteger) { 1683 __ SubP(ToRegister(result), mem); 1684 } else { 1685#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN 1686 // We want to read the 32-bits directly from memory 1687 MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4); 1688#else 1689 MemOperand Upper32Mem = ToMemOperand(right); 1690#endif 1691 if (checkOverflow) { 1692 __ Sub32(ToRegister(result), Upper32Mem); 1693 } else { 1694 __ SubP_ExtendSrc(ToRegister(result), Upper32Mem); 1695 } 1696 } 1697 } 1698 1699#if V8_TARGET_ARCH_S390X 1700 if (isInteger && checkOverflow) 1701 __ lgfr(ToRegister(result), ToRegister(result)); 1702#endif 1703 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1704 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); 1705 } 1706} 1707 1708void LCodeGen::DoRSubI(LRSubI* instr) { 1709 LOperand* left = instr->left(); 1710 LOperand* right = instr->right(); 1711 LOperand* result = instr->result(); 1712 1713 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) && 1714 right->IsConstantOperand()); 1715 1716#if V8_TARGET_ARCH_S390X 1717 // The overflow detection needs to be tested on the lower 32-bits. 1718 // As a result, on 64-bit, we need to force 32-bit arithmetic operations 1719 // to set the CC overflow bit properly. The result is then sign-extended. 1720 bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1721#else 1722 bool checkOverflow = true; 1723#endif 1724 1725 Operand right_operand = ToOperand(right); 1726 __ mov(r0, right_operand); 1727 1728 if (!checkOverflow) { 1729 __ SubP_ExtendSrc(ToRegister(result), r0, ToRegister(left)); 1730 } else { 1731 __ Sub32(ToRegister(result), r0, ToRegister(left)); 1732 } 1733} 1734 1735void LCodeGen::DoConstantI(LConstantI* instr) { 1736 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1737} 1738 1739void LCodeGen::DoConstantS(LConstantS* instr) { 1740 __ LoadSmiLiteral(ToRegister(instr->result()), instr->value()); 1741} 1742 1743void LCodeGen::DoConstantD(LConstantD* instr) { 1744 DCHECK(instr->result()->IsDoubleRegister()); 1745 DoubleRegister result = ToDoubleRegister(instr->result()); 1746 uint64_t bits = instr->bits(); 1747 __ LoadDoubleLiteral(result, bits, scratch0()); 1748} 1749 1750void LCodeGen::DoConstantE(LConstantE* instr) { 1751 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1752} 1753 1754void LCodeGen::DoConstantT(LConstantT* instr) { 1755 Handle<Object> object = instr->value(isolate()); 1756 AllowDeferredHandleDereference smi_check; 1757 __ Move(ToRegister(instr->result()), object); 1758} 1759 1760MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index, 1761 String::Encoding encoding) { 1762 if (index->IsConstantOperand()) { 1763 int offset = ToInteger32(LConstantOperand::cast(index)); 1764 if (encoding == String::TWO_BYTE_ENCODING) { 1765 offset *= kUC16Size; 1766 } 1767 STATIC_ASSERT(kCharSize == 1); 1768 return FieldMemOperand(string, SeqString::kHeaderSize + offset); 1769 } 1770 Register scratch = scratch0(); 1771 DCHECK(!scratch.is(string)); 1772 DCHECK(!scratch.is(ToRegister(index))); 1773 // TODO(joransiu) : Fold Add into FieldMemOperand 1774 if (encoding == String::ONE_BYTE_ENCODING) { 1775 __ AddP(scratch, string, ToRegister(index)); 1776 } else { 1777 STATIC_ASSERT(kUC16Size == 2); 1778 __ ShiftLeftP(scratch, ToRegister(index), Operand(1)); 1779 __ AddP(scratch, string, scratch); 1780 } 1781 return FieldMemOperand(scratch, SeqString::kHeaderSize); 1782} 1783 1784void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { 1785 String::Encoding encoding = instr->hydrogen()->encoding(); 1786 Register string = ToRegister(instr->string()); 1787 Register result = ToRegister(instr->result()); 1788 1789 if (FLAG_debug_code) { 1790 Register scratch = scratch0(); 1791 __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); 1792 __ llc(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 1793 1794 __ AndP(scratch, scratch, 1795 Operand(kStringRepresentationMask | kStringEncodingMask)); 1796 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 1797 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 1798 __ CmpP(scratch, 1799 Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type 1800 : two_byte_seq_type)); 1801 __ Check(eq, kUnexpectedStringType); 1802 } 1803 1804 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); 1805 if (encoding == String::ONE_BYTE_ENCODING) { 1806 __ llc(result, operand); 1807 } else { 1808 __ llh(result, operand); 1809 } 1810} 1811 1812void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { 1813 String::Encoding encoding = instr->hydrogen()->encoding(); 1814 Register string = ToRegister(instr->string()); 1815 Register value = ToRegister(instr->value()); 1816 1817 if (FLAG_debug_code) { 1818 Register index = ToRegister(instr->index()); 1819 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 1820 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 1821 int encoding_mask = 1822 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING 1823 ? one_byte_seq_type 1824 : two_byte_seq_type; 1825 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); 1826 } 1827 1828 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); 1829 if (encoding == String::ONE_BYTE_ENCODING) { 1830 __ stc(value, operand); 1831 } else { 1832 __ sth(value, operand); 1833 } 1834} 1835 1836void LCodeGen::DoAddI(LAddI* instr) { 1837 LOperand* left = instr->left(); 1838 LOperand* right = instr->right(); 1839 LOperand* result = instr->result(); 1840 bool isInteger = !(instr->hydrogen()->representation().IsSmi() || 1841 instr->hydrogen()->representation().IsExternal()); 1842#if V8_TARGET_ARCH_S390X 1843 // The overflow detection needs to be tested on the lower 32-bits. 1844 // As a result, on 64-bit, we need to force 32-bit arithmetic operations 1845 // to set the CC overflow bit properly. The result is then sign-extended. 1846 bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1847#else 1848 bool checkOverflow = true; 1849#endif 1850 1851 if (right->IsConstantOperand()) { 1852 if (!isInteger || !checkOverflow) 1853 __ AddP(ToRegister(result), ToRegister(left), ToOperand(right)); 1854 else 1855 __ Add32(ToRegister(result), ToRegister(left), ToOperand(right)); 1856 } else if (right->IsRegister()) { 1857 if (!isInteger) 1858 __ AddP(ToRegister(result), ToRegister(left), ToRegister(right)); 1859 else if (!checkOverflow) 1860 __ AddP_ExtendSrc(ToRegister(result), ToRegister(left), 1861 ToRegister(right)); 1862 else 1863 __ Add32(ToRegister(result), ToRegister(left), ToRegister(right)); 1864 } else { 1865 if (!left->Equals(instr->result())) 1866 __ LoadRR(ToRegister(result), ToRegister(left)); 1867 1868 MemOperand mem = ToMemOperand(right); 1869 if (!isInteger) { 1870 __ AddP(ToRegister(result), mem); 1871 } else { 1872#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN 1873 // We want to read the 32-bits directly from memory 1874 MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4); 1875#else 1876 MemOperand Upper32Mem = ToMemOperand(right); 1877#endif 1878 if (checkOverflow) { 1879 __ Add32(ToRegister(result), Upper32Mem); 1880 } else { 1881 __ AddP_ExtendSrc(ToRegister(result), Upper32Mem); 1882 } 1883 } 1884 } 1885 1886#if V8_TARGET_ARCH_S390X 1887 if (isInteger && checkOverflow) 1888 __ lgfr(ToRegister(result), ToRegister(result)); 1889#endif 1890 // Doptimize on overflow 1891 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1892 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow); 1893 } 1894} 1895 1896void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 1897 LOperand* left = instr->left(); 1898 LOperand* right = instr->right(); 1899 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 1900 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge; 1901 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { 1902 Register left_reg = ToRegister(left); 1903 Register right_reg = EmitLoadRegister(right, ip); 1904 Register result_reg = ToRegister(instr->result()); 1905 Label return_left, done; 1906#if V8_TARGET_ARCH_S390X 1907 if (instr->hydrogen_value()->representation().IsSmi()) { 1908#endif 1909 __ CmpP(left_reg, right_reg); 1910#if V8_TARGET_ARCH_S390X 1911 } else { 1912 __ Cmp32(left_reg, right_reg); 1913 } 1914#endif 1915 __ b(cond, &return_left, Label::kNear); 1916 __ Move(result_reg, right_reg); 1917 __ b(&done, Label::kNear); 1918 __ bind(&return_left); 1919 __ Move(result_reg, left_reg); 1920 __ bind(&done); 1921 } else { 1922 DCHECK(instr->hydrogen()->representation().IsDouble()); 1923 DoubleRegister left_reg = ToDoubleRegister(left); 1924 DoubleRegister right_reg = ToDoubleRegister(right); 1925 DoubleRegister result_reg = ToDoubleRegister(instr->result()); 1926 Label check_nan_left, check_zero, return_left, return_right, done; 1927 __ cdbr(left_reg, right_reg); 1928 __ bunordered(&check_nan_left, Label::kNear); 1929 __ beq(&check_zero); 1930 __ b(cond, &return_left, Label::kNear); 1931 __ b(&return_right, Label::kNear); 1932 1933 __ bind(&check_zero); 1934 __ lzdr(kDoubleRegZero); 1935 __ cdbr(left_reg, kDoubleRegZero); 1936 __ bne(&return_left, Label::kNear); // left == right != 0. 1937 1938 // At this point, both left and right are either 0 or -0. 1939 // N.B. The following works because +0 + -0 == +0 1940 if (operation == HMathMinMax::kMathMin) { 1941 // For min we want logical-or of sign bit: -(-L + -R) 1942 __ lcdbr(left_reg, left_reg); 1943 __ ldr(result_reg, left_reg); 1944 if (left_reg.is(right_reg)) { 1945 __ adbr(result_reg, right_reg); 1946 } else { 1947 __ sdbr(result_reg, right_reg); 1948 } 1949 __ lcdbr(result_reg, result_reg); 1950 } else { 1951 // For max we want logical-and of sign bit: (L + R) 1952 __ ldr(result_reg, left_reg); 1953 __ adbr(result_reg, right_reg); 1954 } 1955 __ b(&done, Label::kNear); 1956 1957 __ bind(&check_nan_left); 1958 __ cdbr(left_reg, left_reg); 1959 __ bunordered(&return_left, Label::kNear); // left == NaN. 1960 1961 __ bind(&return_right); 1962 if (!right_reg.is(result_reg)) { 1963 __ ldr(result_reg, right_reg); 1964 } 1965 __ b(&done, Label::kNear); 1966 1967 __ bind(&return_left); 1968 if (!left_reg.is(result_reg)) { 1969 __ ldr(result_reg, left_reg); 1970 } 1971 __ bind(&done); 1972 } 1973} 1974 1975void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 1976 DoubleRegister left = ToDoubleRegister(instr->left()); 1977 DoubleRegister right = ToDoubleRegister(instr->right()); 1978 DoubleRegister result = ToDoubleRegister(instr->result()); 1979 // All operations except MOD are computed in-place. 1980 DCHECK(instr->op() == Token::MOD || left.is(result)); 1981 switch (instr->op()) { 1982 case Token::ADD: 1983 __ adbr(result, right); 1984 break; 1985 case Token::SUB: 1986 __ sdbr(result, right); 1987 break; 1988 case Token::MUL: 1989 __ mdbr(result, right); 1990 break; 1991 case Token::DIV: 1992 __ ddbr(result, right); 1993 break; 1994 case Token::MOD: { 1995 __ PrepareCallCFunction(0, 2, scratch0()); 1996 __ MovToFloatParameters(left, right); 1997 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), 1998 0, 2); 1999 // Move the result in the double result register. 2000 __ MovFromFloatResult(result); 2001 break; 2002 } 2003 default: 2004 UNREACHABLE(); 2005 break; 2006 } 2007} 2008 2009void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 2010 DCHECK(ToRegister(instr->context()).is(cp)); 2011 DCHECK(ToRegister(instr->left()).is(r3)); 2012 DCHECK(ToRegister(instr->right()).is(r2)); 2013 DCHECK(ToRegister(instr->result()).is(r2)); 2014 2015 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code(); 2016 CallCode(code, RelocInfo::CODE_TARGET, instr); 2017} 2018 2019template <class InstrType> 2020void LCodeGen::EmitBranch(InstrType instr, Condition cond) { 2021 int left_block = instr->TrueDestination(chunk_); 2022 int right_block = instr->FalseDestination(chunk_); 2023 2024 int next_block = GetNextEmittedBlock(); 2025 2026 if (right_block == left_block || cond == al) { 2027 EmitGoto(left_block); 2028 } else if (left_block == next_block) { 2029 __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block)); 2030 } else if (right_block == next_block) { 2031 __ b(cond, chunk_->GetAssemblyLabel(left_block)); 2032 } else { 2033 __ b(cond, chunk_->GetAssemblyLabel(left_block)); 2034 __ b(chunk_->GetAssemblyLabel(right_block)); 2035 } 2036} 2037 2038template <class InstrType> 2039void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond) { 2040 int true_block = instr->TrueDestination(chunk_); 2041 __ b(cond, chunk_->GetAssemblyLabel(true_block)); 2042} 2043 2044template <class InstrType> 2045void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond) { 2046 int false_block = instr->FalseDestination(chunk_); 2047 __ b(cond, chunk_->GetAssemblyLabel(false_block)); 2048} 2049 2050void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); } 2051 2052void LCodeGen::DoBranch(LBranch* instr) { 2053 Representation r = instr->hydrogen()->value()->representation(); 2054 DoubleRegister dbl_scratch = double_scratch0(); 2055 2056 if (r.IsInteger32()) { 2057 DCHECK(!info()->IsStub()); 2058 Register reg = ToRegister(instr->value()); 2059 __ Cmp32(reg, Operand::Zero()); 2060 EmitBranch(instr, ne); 2061 } else if (r.IsSmi()) { 2062 DCHECK(!info()->IsStub()); 2063 Register reg = ToRegister(instr->value()); 2064 __ CmpP(reg, Operand::Zero()); 2065 EmitBranch(instr, ne); 2066 } else if (r.IsDouble()) { 2067 DCHECK(!info()->IsStub()); 2068 DoubleRegister reg = ToDoubleRegister(instr->value()); 2069 __ lzdr(kDoubleRegZero); 2070 __ cdbr(reg, kDoubleRegZero); 2071 // Test the double value. Zero and NaN are false. 2072 Condition lt_gt = static_cast<Condition>(lt | gt); 2073 2074 EmitBranch(instr, lt_gt); 2075 } else { 2076 DCHECK(r.IsTagged()); 2077 Register reg = ToRegister(instr->value()); 2078 HType type = instr->hydrogen()->value()->type(); 2079 if (type.IsBoolean()) { 2080 DCHECK(!info()->IsStub()); 2081 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 2082 EmitBranch(instr, eq); 2083 } else if (type.IsSmi()) { 2084 DCHECK(!info()->IsStub()); 2085 __ CmpP(reg, Operand::Zero()); 2086 EmitBranch(instr, ne); 2087 } else if (type.IsJSArray()) { 2088 DCHECK(!info()->IsStub()); 2089 EmitBranch(instr, al); 2090 } else if (type.IsHeapNumber()) { 2091 DCHECK(!info()->IsStub()); 2092 __ ld(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); 2093 // Test the double value. Zero and NaN are false. 2094 __ lzdr(kDoubleRegZero); 2095 __ cdbr(dbl_scratch, kDoubleRegZero); 2096 Condition lt_gt = static_cast<Condition>(lt | gt); 2097 EmitBranch(instr, lt_gt); 2098 } else if (type.IsString()) { 2099 DCHECK(!info()->IsStub()); 2100 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset)); 2101 __ CmpP(ip, Operand::Zero()); 2102 EmitBranch(instr, ne); 2103 } else { 2104 ToBooleanICStub::Types expected = 2105 instr->hydrogen()->expected_input_types(); 2106 // Avoid deopts in the case where we've never executed this path before. 2107 if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic(); 2108 2109 if (expected.Contains(ToBooleanICStub::UNDEFINED)) { 2110 // undefined -> false. 2111 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex); 2112 __ beq(instr->FalseLabel(chunk_)); 2113 } 2114 if (expected.Contains(ToBooleanICStub::BOOLEAN)) { 2115 // Boolean -> its value. 2116 __ CompareRoot(reg, Heap::kTrueValueRootIndex); 2117 __ beq(instr->TrueLabel(chunk_)); 2118 __ CompareRoot(reg, Heap::kFalseValueRootIndex); 2119 __ beq(instr->FalseLabel(chunk_)); 2120 } 2121 if (expected.Contains(ToBooleanICStub::NULL_TYPE)) { 2122 // 'null' -> false. 2123 __ CompareRoot(reg, Heap::kNullValueRootIndex); 2124 __ beq(instr->FalseLabel(chunk_)); 2125 } 2126 2127 if (expected.Contains(ToBooleanICStub::SMI)) { 2128 // Smis: 0 -> false, all other -> true. 2129 __ CmpP(reg, Operand::Zero()); 2130 __ beq(instr->FalseLabel(chunk_)); 2131 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); 2132 } else if (expected.NeedsMap()) { 2133 // If we need a map later and have a Smi -> deopt. 2134 __ TestIfSmi(reg); 2135 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); 2136 } 2137 2138 const Register map = scratch0(); 2139 if (expected.NeedsMap()) { 2140 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset)); 2141 2142 if (expected.CanBeUndetectable()) { 2143 // Undetectable -> false. 2144 __ tm(FieldMemOperand(map, Map::kBitFieldOffset), 2145 Operand(1 << Map::kIsUndetectable)); 2146 __ bne(instr->FalseLabel(chunk_)); 2147 } 2148 } 2149 2150 if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) { 2151 // spec object -> true. 2152 __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE); 2153 __ bge(instr->TrueLabel(chunk_)); 2154 } 2155 2156 if (expected.Contains(ToBooleanICStub::STRING)) { 2157 // String value -> false iff empty. 2158 Label not_string; 2159 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); 2160 __ bge(¬_string, Label::kNear); 2161 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset)); 2162 __ CmpP(ip, Operand::Zero()); 2163 __ bne(instr->TrueLabel(chunk_)); 2164 __ b(instr->FalseLabel(chunk_)); 2165 __ bind(¬_string); 2166 } 2167 2168 if (expected.Contains(ToBooleanICStub::SYMBOL)) { 2169 // Symbol value -> true. 2170 __ CompareInstanceType(map, ip, SYMBOL_TYPE); 2171 __ beq(instr->TrueLabel(chunk_)); 2172 } 2173 2174 if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) { 2175 // SIMD value -> true. 2176 Label not_simd; 2177 __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE); 2178 __ beq(instr->TrueLabel(chunk_)); 2179 } 2180 2181 if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) { 2182 // heap number -> false iff +0, -0, or NaN. 2183 Label not_heap_number; 2184 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); 2185 __ bne(¬_heap_number, Label::kNear); 2186 __ LoadDouble(dbl_scratch, 2187 FieldMemOperand(reg, HeapNumber::kValueOffset)); 2188 __ lzdr(kDoubleRegZero); 2189 __ cdbr(dbl_scratch, kDoubleRegZero); 2190 __ bunordered(instr->FalseLabel(chunk_)); // NaN -> false. 2191 __ beq(instr->FalseLabel(chunk_)); // +0, -0 -> false. 2192 __ b(instr->TrueLabel(chunk_)); 2193 __ bind(¬_heap_number); 2194 } 2195 2196 if (!expected.IsGeneric()) { 2197 // We've seen something for the first time -> deopt. 2198 // This can only happen if we are not generic already. 2199 DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject); 2200 } 2201 } 2202 } 2203} 2204 2205void LCodeGen::EmitGoto(int block) { 2206 if (!IsNextEmittedBlock(block)) { 2207 __ b(chunk_->GetAssemblyLabel(LookupDestination(block))); 2208 } 2209} 2210 2211void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); } 2212 2213Condition LCodeGen::TokenToCondition(Token::Value op) { 2214 Condition cond = kNoCondition; 2215 switch (op) { 2216 case Token::EQ: 2217 case Token::EQ_STRICT: 2218 cond = eq; 2219 break; 2220 case Token::NE: 2221 case Token::NE_STRICT: 2222 cond = ne; 2223 break; 2224 case Token::LT: 2225 cond = lt; 2226 break; 2227 case Token::GT: 2228 cond = gt; 2229 break; 2230 case Token::LTE: 2231 cond = le; 2232 break; 2233 case Token::GTE: 2234 cond = ge; 2235 break; 2236 case Token::IN: 2237 case Token::INSTANCEOF: 2238 default: 2239 UNREACHABLE(); 2240 } 2241 return cond; 2242} 2243 2244void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { 2245 LOperand* left = instr->left(); 2246 LOperand* right = instr->right(); 2247 bool is_unsigned = 2248 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || 2249 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); 2250 Condition cond = TokenToCondition(instr->op()); 2251 2252 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2253 // We can statically evaluate the comparison. 2254 double left_val = ToDouble(LConstantOperand::cast(left)); 2255 double right_val = ToDouble(LConstantOperand::cast(right)); 2256 int next_block = Token::EvalComparison(instr->op(), left_val, right_val) 2257 ? instr->TrueDestination(chunk_) 2258 : instr->FalseDestination(chunk_); 2259 EmitGoto(next_block); 2260 } else { 2261 if (instr->is_double()) { 2262 // Compare left and right operands as doubles and load the 2263 // resulting flags into the normal status register. 2264 __ cdbr(ToDoubleRegister(left), ToDoubleRegister(right)); 2265 // If a NaN is involved, i.e. the result is unordered, 2266 // jump to false block label. 2267 __ bunordered(instr->FalseLabel(chunk_)); 2268 } else { 2269 if (right->IsConstantOperand()) { 2270 int32_t value = ToInteger32(LConstantOperand::cast(right)); 2271 if (instr->hydrogen_value()->representation().IsSmi()) { 2272 if (is_unsigned) { 2273 __ CmpLogicalSmiLiteral(ToRegister(left), Smi::FromInt(value), r0); 2274 } else { 2275 __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0); 2276 } 2277 } else { 2278 if (is_unsigned) { 2279 __ CmpLogical32(ToRegister(left), ToOperand(right)); 2280 } else { 2281 __ Cmp32(ToRegister(left), ToOperand(right)); 2282 } 2283 } 2284 } else if (left->IsConstantOperand()) { 2285 int32_t value = ToInteger32(LConstantOperand::cast(left)); 2286 if (instr->hydrogen_value()->representation().IsSmi()) { 2287 if (is_unsigned) { 2288 __ CmpLogicalSmiLiteral(ToRegister(right), Smi::FromInt(value), r0); 2289 } else { 2290 __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0); 2291 } 2292 } else { 2293 if (is_unsigned) { 2294 __ CmpLogical32(ToRegister(right), ToOperand(left)); 2295 } else { 2296 __ Cmp32(ToRegister(right), ToOperand(left)); 2297 } 2298 } 2299 // We commuted the operands, so commute the condition. 2300 cond = CommuteCondition(cond); 2301 } else if (instr->hydrogen_value()->representation().IsSmi()) { 2302 if (is_unsigned) { 2303 __ CmpLogicalP(ToRegister(left), ToRegister(right)); 2304 } else { 2305 __ CmpP(ToRegister(left), ToRegister(right)); 2306 } 2307 } else { 2308 if (is_unsigned) { 2309 __ CmpLogical32(ToRegister(left), ToRegister(right)); 2310 } else { 2311 __ Cmp32(ToRegister(left), ToRegister(right)); 2312 } 2313 } 2314 } 2315 EmitBranch(instr, cond); 2316 } 2317} 2318 2319void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { 2320 Register left = ToRegister(instr->left()); 2321 Register right = ToRegister(instr->right()); 2322 2323 __ CmpP(left, right); 2324 EmitBranch(instr, eq); 2325} 2326 2327void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { 2328 if (instr->hydrogen()->representation().IsTagged()) { 2329 Register input_reg = ToRegister(instr->object()); 2330 __ CmpP(input_reg, Operand(factory()->the_hole_value())); 2331 EmitBranch(instr, eq); 2332 return; 2333 } 2334 2335 DoubleRegister input_reg = ToDoubleRegister(instr->object()); 2336 __ cdbr(input_reg, input_reg); 2337 EmitFalseBranch(instr, ordered); 2338 2339 Register scratch = scratch0(); 2340 // Convert to GPR and examine the upper 32 bits 2341 __ lgdr(scratch, input_reg); 2342 __ srlg(scratch, scratch, Operand(32)); 2343 __ Cmp32(scratch, Operand(kHoleNanUpper32)); 2344 EmitBranch(instr, eq); 2345} 2346 2347Condition LCodeGen::EmitIsString(Register input, Register temp1, 2348 Label* is_not_string, 2349 SmiCheck check_needed = INLINE_SMI_CHECK) { 2350 if (check_needed == INLINE_SMI_CHECK) { 2351 __ JumpIfSmi(input, is_not_string); 2352 } 2353 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE); 2354 2355 return lt; 2356} 2357 2358void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { 2359 Register reg = ToRegister(instr->value()); 2360 Register temp1 = ToRegister(instr->temp()); 2361 2362 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() 2363 ? OMIT_SMI_CHECK 2364 : INLINE_SMI_CHECK; 2365 Condition true_cond = 2366 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); 2367 2368 EmitBranch(instr, true_cond); 2369} 2370 2371void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { 2372 Register input_reg = EmitLoadRegister(instr->value(), ip); 2373 __ TestIfSmi(input_reg); 2374 EmitBranch(instr, eq); 2375} 2376 2377void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { 2378 Register input = ToRegister(instr->value()); 2379 Register temp = ToRegister(instr->temp()); 2380 2381 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2382 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2383 } 2384 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset)); 2385 __ tm(FieldMemOperand(temp, Map::kBitFieldOffset), 2386 Operand(1 << Map::kIsUndetectable)); 2387 EmitBranch(instr, ne); 2388} 2389 2390static Condition ComputeCompareCondition(Token::Value op) { 2391 switch (op) { 2392 case Token::EQ_STRICT: 2393 case Token::EQ: 2394 return eq; 2395 case Token::LT: 2396 return lt; 2397 case Token::GT: 2398 return gt; 2399 case Token::LTE: 2400 return le; 2401 case Token::GTE: 2402 return ge; 2403 default: 2404 UNREACHABLE(); 2405 return kNoCondition; 2406 } 2407} 2408 2409void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { 2410 DCHECK(ToRegister(instr->context()).is(cp)); 2411 DCHECK(ToRegister(instr->left()).is(r3)); 2412 DCHECK(ToRegister(instr->right()).is(r2)); 2413 2414 Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code(); 2415 CallCode(code, RelocInfo::CODE_TARGET, instr); 2416 __ CompareRoot(r2, Heap::kTrueValueRootIndex); 2417 EmitBranch(instr, eq); 2418} 2419 2420static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { 2421 InstanceType from = instr->from(); 2422 InstanceType to = instr->to(); 2423 if (from == FIRST_TYPE) return to; 2424 DCHECK(from == to || to == LAST_TYPE); 2425 return from; 2426} 2427 2428static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { 2429 InstanceType from = instr->from(); 2430 InstanceType to = instr->to(); 2431 if (from == to) return eq; 2432 if (to == LAST_TYPE) return ge; 2433 if (from == FIRST_TYPE) return le; 2434 UNREACHABLE(); 2435 return eq; 2436} 2437 2438void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { 2439 Register scratch = scratch0(); 2440 Register input = ToRegister(instr->value()); 2441 2442 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2443 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2444 } 2445 2446 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); 2447 EmitBranch(instr, BranchCondition(instr->hydrogen())); 2448} 2449 2450void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 2451 Register input = ToRegister(instr->value()); 2452 Register result = ToRegister(instr->result()); 2453 2454 __ AssertString(input); 2455 2456 __ LoadlW(result, FieldMemOperand(input, String::kHashFieldOffset)); 2457 __ IndexFromHash(result, result); 2458} 2459 2460void LCodeGen::DoHasCachedArrayIndexAndBranch( 2461 LHasCachedArrayIndexAndBranch* instr) { 2462 Register input = ToRegister(instr->value()); 2463 Register scratch = scratch0(); 2464 2465 __ LoadlW(scratch, FieldMemOperand(input, String::kHashFieldOffset)); 2466 __ mov(r0, Operand(String::kContainsCachedArrayIndexMask)); 2467 __ AndP(r0, scratch); 2468 EmitBranch(instr, eq); 2469} 2470 2471// Branches to a label or falls through with the answer in flags. Trashes 2472// the temp registers, but not the input. 2473void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, 2474 Handle<String> class_name, Register input, 2475 Register temp, Register temp2) { 2476 DCHECK(!input.is(temp)); 2477 DCHECK(!input.is(temp2)); 2478 DCHECK(!temp.is(temp2)); 2479 2480 __ JumpIfSmi(input, is_false); 2481 2482 __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE); 2483 STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE); 2484 if (String::Equals(isolate()->factory()->Function_string(), class_name)) { 2485 __ bge(is_true); 2486 } else { 2487 __ bge(is_false); 2488 } 2489 2490 // Check if the constructor in the map is a function. 2491 Register instance_type = ip; 2492 __ GetMapConstructor(temp, temp, temp2, instance_type); 2493 2494 // Objects with a non-function constructor have class 'Object'. 2495 __ CmpP(instance_type, Operand(JS_FUNCTION_TYPE)); 2496 if (String::Equals(isolate()->factory()->Object_string(), class_name)) { 2497 __ bne(is_true); 2498 } else { 2499 __ bne(is_false); 2500 } 2501 2502 // temp now contains the constructor function. Grab the 2503 // instance class name from there. 2504 __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); 2505 __ LoadP(temp, 2506 FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); 2507 // The class name we are testing against is internalized since it's a literal. 2508 // The name in the constructor is internalized because of the way the context 2509 // is booted. This routine isn't expected to work for random API-created 2510 // classes and it doesn't have to because you can't access it with natives 2511 // syntax. Since both sides are internalized it is sufficient to use an 2512 // identity comparison. 2513 __ CmpP(temp, Operand(class_name)); 2514 // End with the answer in flags. 2515} 2516 2517void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { 2518 Register input = ToRegister(instr->value()); 2519 Register temp = scratch0(); 2520 Register temp2 = ToRegister(instr->temp()); 2521 Handle<String> class_name = instr->hydrogen()->class_name(); 2522 2523 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), 2524 class_name, input, temp, temp2); 2525 2526 EmitBranch(instr, eq); 2527} 2528 2529void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { 2530 Register reg = ToRegister(instr->value()); 2531 Register temp = ToRegister(instr->temp()); 2532 2533 __ mov(temp, Operand(instr->map())); 2534 __ CmpP(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); 2535 EmitBranch(instr, eq); 2536} 2537 2538void LCodeGen::DoHasInPrototypeChainAndBranch( 2539 LHasInPrototypeChainAndBranch* instr) { 2540 Register const object = ToRegister(instr->object()); 2541 Register const object_map = scratch0(); 2542 Register const object_instance_type = ip; 2543 Register const object_prototype = object_map; 2544 Register const prototype = ToRegister(instr->prototype()); 2545 2546 // The {object} must be a spec object. It's sufficient to know that {object} 2547 // is not a smi, since all other non-spec objects have {null} prototypes and 2548 // will be ruled out below. 2549 if (instr->hydrogen()->ObjectNeedsSmiCheck()) { 2550 __ TestIfSmi(object); 2551 EmitFalseBranch(instr, eq); 2552 } 2553 // Loop through the {object}s prototype chain looking for the {prototype}. 2554 __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); 2555 Label loop; 2556 __ bind(&loop); 2557 2558 // Deoptimize if the object needs to be access checked. 2559 __ LoadlB(object_instance_type, 2560 FieldMemOperand(object_map, Map::kBitFieldOffset)); 2561 __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0); 2562 DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, cr0); 2563 // Deoptimize for proxies. 2564 __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE); 2565 DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy); 2566 __ LoadP(object_prototype, 2567 FieldMemOperand(object_map, Map::kPrototypeOffset)); 2568 __ CompareRoot(object_prototype, Heap::kNullValueRootIndex); 2569 EmitFalseBranch(instr, eq); 2570 __ CmpP(object_prototype, prototype); 2571 EmitTrueBranch(instr, eq); 2572 __ LoadP(object_map, 2573 FieldMemOperand(object_prototype, HeapObject::kMapOffset)); 2574 __ b(&loop); 2575} 2576 2577void LCodeGen::DoCmpT(LCmpT* instr) { 2578 DCHECK(ToRegister(instr->context()).is(cp)); 2579 Token::Value op = instr->op(); 2580 2581 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code(); 2582 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2583 // This instruction also signals no smi code inlined 2584 __ CmpP(r2, Operand::Zero()); 2585 2586 Condition condition = ComputeCompareCondition(op); 2587 Label true_value, done; 2588 2589 __ b(condition, &true_value, Label::kNear); 2590 2591 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); 2592 __ b(&done, Label::kNear); 2593 2594 __ bind(&true_value); 2595 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); 2596 2597 __ bind(&done); 2598} 2599 2600void LCodeGen::DoReturn(LReturn* instr) { 2601 if (FLAG_trace && info()->IsOptimizing()) { 2602 // Push the return value on the stack as the parameter. 2603 // Runtime::TraceExit returns its parameter in r2. We're leaving the code 2604 // managed by the register allocator and tearing down the frame, it's 2605 // safe to write to the context register. 2606 __ push(r2); 2607 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 2608 __ CallRuntime(Runtime::kTraceExit); 2609 } 2610 if (info()->saves_caller_doubles()) { 2611 RestoreCallerDoubles(); 2612 } 2613 if (instr->has_constant_parameter_count()) { 2614 int parameter_count = ToInteger32(instr->constant_parameter_count()); 2615 int32_t sp_delta = (parameter_count + 1) * kPointerSize; 2616 if (NeedsEagerFrame()) { 2617 masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta); 2618 } else if (sp_delta != 0) { 2619 // TODO(joransiu): Clean this up into Macro Assembler 2620 if (sp_delta >= 0 && sp_delta < 4096) 2621 __ la(sp, MemOperand(sp, sp_delta)); 2622 else 2623 __ lay(sp, MemOperand(sp, sp_delta)); 2624 } 2625 } else { 2626 DCHECK(info()->IsStub()); // Functions would need to drop one more value. 2627 Register reg = ToRegister(instr->parameter_count()); 2628 // The argument count parameter is a smi 2629 if (NeedsEagerFrame()) { 2630 masm_->LeaveFrame(StackFrame::JAVA_SCRIPT); 2631 } 2632 __ SmiToPtrArrayOffset(r0, reg); 2633 __ AddP(sp, sp, r0); 2634 } 2635 2636 __ Ret(); 2637} 2638 2639template <class T> 2640void LCodeGen::EmitVectorLoadICRegisters(T* instr) { 2641 Register vector_register = ToRegister(instr->temp_vector()); 2642 Register slot_register = LoadDescriptor::SlotRegister(); 2643 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister())); 2644 DCHECK(slot_register.is(r2)); 2645 2646 AllowDeferredHandleDereference vector_structure_check; 2647 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); 2648 __ Move(vector_register, vector); 2649 // No need to allocate this register. 2650 FeedbackVectorSlot slot = instr->hydrogen()->slot(); 2651 int index = vector->GetIndex(slot); 2652 __ LoadSmiLiteral(slot_register, Smi::FromInt(index)); 2653} 2654 2655template <class T> 2656void LCodeGen::EmitVectorStoreICRegisters(T* instr) { 2657 Register vector_register = ToRegister(instr->temp_vector()); 2658 Register slot_register = ToRegister(instr->temp_slot()); 2659 2660 AllowDeferredHandleDereference vector_structure_check; 2661 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector(); 2662 __ Move(vector_register, vector); 2663 FeedbackVectorSlot slot = instr->hydrogen()->slot(); 2664 int index = vector->GetIndex(slot); 2665 __ LoadSmiLiteral(slot_register, Smi::FromInt(index)); 2666} 2667 2668void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { 2669 DCHECK(ToRegister(instr->context()).is(cp)); 2670 DCHECK(ToRegister(instr->result()).is(r2)); 2671 2672 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr); 2673 Handle<Code> ic = 2674 CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode()) 2675 .code(); 2676 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2677} 2678 2679void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 2680 Register context = ToRegister(instr->context()); 2681 Register result = ToRegister(instr->result()); 2682 __ LoadP(result, ContextMemOperand(context, instr->slot_index())); 2683 if (instr->hydrogen()->RequiresHoleCheck()) { 2684 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); 2685 if (instr->hydrogen()->DeoptimizesOnHole()) { 2686 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); 2687 } else { 2688 Label skip; 2689 __ bne(&skip, Label::kNear); 2690 __ mov(result, Operand(factory()->undefined_value())); 2691 __ bind(&skip); 2692 } 2693 } 2694} 2695 2696void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 2697 Register context = ToRegister(instr->context()); 2698 Register value = ToRegister(instr->value()); 2699 Register scratch = scratch0(); 2700 MemOperand target = ContextMemOperand(context, instr->slot_index()); 2701 2702 Label skip_assignment; 2703 2704 if (instr->hydrogen()->RequiresHoleCheck()) { 2705 __ LoadP(scratch, target); 2706 __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex); 2707 if (instr->hydrogen()->DeoptimizesOnHole()) { 2708 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); 2709 } else { 2710 __ bne(&skip_assignment); 2711 } 2712 } 2713 2714 __ StoreP(value, target); 2715 if (instr->hydrogen()->NeedsWriteBarrier()) { 2716 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() 2717 ? OMIT_SMI_CHECK 2718 : INLINE_SMI_CHECK; 2719 __ RecordWriteContextSlot(context, target.offset(), value, scratch, 2720 GetLinkRegisterState(), kSaveFPRegs, 2721 EMIT_REMEMBERED_SET, check_needed); 2722 } 2723 2724 __ bind(&skip_assignment); 2725} 2726 2727void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 2728 HObjectAccess access = instr->hydrogen()->access(); 2729 int offset = access.offset(); 2730 Register object = ToRegister(instr->object()); 2731 2732 if (access.IsExternalMemory()) { 2733 Register result = ToRegister(instr->result()); 2734 MemOperand operand = MemOperand(object, offset); 2735 __ LoadRepresentation(result, operand, access.representation(), r0); 2736 return; 2737 } 2738 2739 if (instr->hydrogen()->representation().IsDouble()) { 2740 DCHECK(access.IsInobject()); 2741 DoubleRegister result = ToDoubleRegister(instr->result()); 2742 __ ld(result, FieldMemOperand(object, offset)); 2743 return; 2744 } 2745 2746 Register result = ToRegister(instr->result()); 2747 if (!access.IsInobject()) { 2748 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 2749 object = result; 2750 } 2751 2752 Representation representation = access.representation(); 2753 2754#if V8_TARGET_ARCH_S390X 2755 // 64-bit Smi optimization 2756 if (representation.IsSmi() && 2757 instr->hydrogen()->representation().IsInteger32()) { 2758 // Read int value directly from upper half of the smi. 2759 offset = SmiWordOffset(offset); 2760 representation = Representation::Integer32(); 2761 } 2762#endif 2763 2764 __ LoadRepresentation(result, FieldMemOperand(object, offset), representation, 2765 r0); 2766} 2767 2768void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 2769 DCHECK(ToRegister(instr->context()).is(cp)); 2770 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); 2771 DCHECK(ToRegister(instr->result()).is(r2)); 2772 2773 // Name is always in r4. 2774 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name())); 2775 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr); 2776 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code(); 2777 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2778} 2779 2780void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { 2781 Register scratch = scratch0(); 2782 Register function = ToRegister(instr->function()); 2783 Register result = ToRegister(instr->result()); 2784 2785 // Get the prototype or initial map from the function. 2786 __ LoadP(result, 2787 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 2788 2789 // Check that the function has a prototype or an initial map. 2790 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); 2791 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); 2792 2793 // If the function does not have an initial map, we're done. 2794 Label done; 2795 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); 2796 __ bne(&done, Label::kNear); 2797 2798 // Get the prototype from the initial map. 2799 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset)); 2800 2801 // All done. 2802 __ bind(&done); 2803} 2804 2805void LCodeGen::DoLoadRoot(LLoadRoot* instr) { 2806 Register result = ToRegister(instr->result()); 2807 __ LoadRoot(result, instr->index()); 2808} 2809 2810void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 2811 Register arguments = ToRegister(instr->arguments()); 2812 Register result = ToRegister(instr->result()); 2813 // There are two words between the frame pointer and the last argument. 2814 // Subtracting from length accounts for one of them add one more. 2815 if (instr->length()->IsConstantOperand()) { 2816 int const_length = ToInteger32(LConstantOperand::cast(instr->length())); 2817 if (instr->index()->IsConstantOperand()) { 2818 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 2819 int index = (const_length - const_index) + 1; 2820 __ LoadP(result, MemOperand(arguments, index * kPointerSize)); 2821 } else { 2822 Register index = ToRegister(instr->index()); 2823 __ SubP(result, index, Operand(const_length + 1)); 2824 __ LoadComplementRR(result, result); 2825 __ ShiftLeftP(result, result, Operand(kPointerSizeLog2)); 2826 __ LoadP(result, MemOperand(arguments, result)); 2827 } 2828 } else if (instr->index()->IsConstantOperand()) { 2829 Register length = ToRegister(instr->length()); 2830 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 2831 int loc = const_index - 1; 2832 if (loc != 0) { 2833 __ SubP(result, length, Operand(loc)); 2834 __ ShiftLeftP(result, result, Operand(kPointerSizeLog2)); 2835 __ LoadP(result, MemOperand(arguments, result)); 2836 } else { 2837 __ ShiftLeftP(result, length, Operand(kPointerSizeLog2)); 2838 __ LoadP(result, MemOperand(arguments, result)); 2839 } 2840 } else { 2841 Register length = ToRegister(instr->length()); 2842 Register index = ToRegister(instr->index()); 2843 __ SubP(result, length, index); 2844 __ AddP(result, result, Operand(1)); 2845 __ ShiftLeftP(result, result, Operand(kPointerSizeLog2)); 2846 __ LoadP(result, MemOperand(arguments, result)); 2847 } 2848} 2849 2850void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { 2851 Register external_pointer = ToRegister(instr->elements()); 2852 Register key = no_reg; 2853 ElementsKind elements_kind = instr->elements_kind(); 2854 bool key_is_constant = instr->key()->IsConstantOperand(); 2855 int constant_key = 0; 2856 if (key_is_constant) { 2857 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 2858 if (constant_key & 0xF0000000) { 2859 Abort(kArrayIndexConstantValueTooBig); 2860 } 2861 } else { 2862 key = ToRegister(instr->key()); 2863 } 2864 int element_size_shift = ElementsKindToShiftSize(elements_kind); 2865 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); 2866 bool keyMaybeNegative = instr->hydrogen()->IsDehoisted(); 2867 int base_offset = instr->base_offset(); 2868 bool use_scratch = false; 2869 2870 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { 2871 DoubleRegister result = ToDoubleRegister(instr->result()); 2872 if (key_is_constant) { 2873 base_offset += constant_key << element_size_shift; 2874 if (!is_int20(base_offset)) { 2875 __ mov(scratch0(), Operand(base_offset)); 2876 base_offset = 0; 2877 use_scratch = true; 2878 } 2879 } else { 2880 __ IndexToArrayOffset(scratch0(), key, element_size_shift, key_is_smi, 2881 keyMaybeNegative); 2882 use_scratch = true; 2883 } 2884 if (elements_kind == FLOAT32_ELEMENTS) { 2885 if (!use_scratch) { 2886 __ ldeb(result, MemOperand(external_pointer, base_offset)); 2887 } else { 2888 __ ldeb(result, MemOperand(scratch0(), external_pointer, base_offset)); 2889 } 2890 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS 2891 if (!use_scratch) { 2892 __ ld(result, MemOperand(external_pointer, base_offset)); 2893 } else { 2894 __ ld(result, MemOperand(scratch0(), external_pointer, base_offset)); 2895 } 2896 } 2897 } else { 2898 Register result = ToRegister(instr->result()); 2899 MemOperand mem_operand = 2900 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi, 2901 constant_key, element_size_shift, base_offset, 2902 keyMaybeNegative); 2903 switch (elements_kind) { 2904 case INT8_ELEMENTS: 2905 __ LoadB(result, mem_operand); 2906 break; 2907 case UINT8_ELEMENTS: 2908 case UINT8_CLAMPED_ELEMENTS: 2909 __ LoadlB(result, mem_operand); 2910 break; 2911 case INT16_ELEMENTS: 2912 __ LoadHalfWordP(result, mem_operand); 2913 break; 2914 case UINT16_ELEMENTS: 2915 __ LoadLogicalHalfWordP(result, mem_operand); 2916 break; 2917 case INT32_ELEMENTS: 2918 __ LoadW(result, mem_operand, r0); 2919 break; 2920 case UINT32_ELEMENTS: 2921 __ LoadlW(result, mem_operand, r0); 2922 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 2923 __ CmpLogical32(result, Operand(0x80000000)); 2924 DeoptimizeIf(ge, instr, DeoptimizeReason::kNegativeValue); 2925 } 2926 break; 2927 case FLOAT32_ELEMENTS: 2928 case FLOAT64_ELEMENTS: 2929 case FAST_HOLEY_DOUBLE_ELEMENTS: 2930 case FAST_HOLEY_ELEMENTS: 2931 case FAST_HOLEY_SMI_ELEMENTS: 2932 case FAST_DOUBLE_ELEMENTS: 2933 case FAST_ELEMENTS: 2934 case FAST_SMI_ELEMENTS: 2935 case DICTIONARY_ELEMENTS: 2936 case FAST_SLOPPY_ARGUMENTS_ELEMENTS: 2937 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: 2938 case FAST_STRING_WRAPPER_ELEMENTS: 2939 case SLOW_STRING_WRAPPER_ELEMENTS: 2940 case NO_ELEMENTS: 2941 UNREACHABLE(); 2942 break; 2943 } 2944 } 2945} 2946 2947void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { 2948 Register elements = ToRegister(instr->elements()); 2949 bool key_is_constant = instr->key()->IsConstantOperand(); 2950 Register key = no_reg; 2951 DoubleRegister result = ToDoubleRegister(instr->result()); 2952 Register scratch = scratch0(); 2953 2954 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); 2955 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); 2956 bool keyMaybeNegative = instr->hydrogen()->IsDehoisted(); 2957 int constant_key = 0; 2958 if (key_is_constant) { 2959 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 2960 if (constant_key & 0xF0000000) { 2961 Abort(kArrayIndexConstantValueTooBig); 2962 } 2963 } else { 2964 key = ToRegister(instr->key()); 2965 } 2966 2967 bool use_scratch = false; 2968 intptr_t base_offset = instr->base_offset() + constant_key * kDoubleSize; 2969 if (!key_is_constant) { 2970 use_scratch = true; 2971 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi, 2972 keyMaybeNegative); 2973 } 2974 2975 // Memory references support up to 20-bits signed displacement in RXY form 2976 // Include Register::kExponentOffset in check, so we are guaranteed not to 2977 // overflow displacement later. 2978 if (!is_int20(base_offset + Register::kExponentOffset)) { 2979 use_scratch = true; 2980 if (key_is_constant) { 2981 __ mov(scratch, Operand(base_offset)); 2982 } else { 2983 __ AddP(scratch, Operand(base_offset)); 2984 } 2985 base_offset = 0; 2986 } 2987 2988 if (!use_scratch) { 2989 __ ld(result, MemOperand(elements, base_offset)); 2990 } else { 2991 __ ld(result, MemOperand(scratch, elements, base_offset)); 2992 } 2993 2994 if (instr->hydrogen()->RequiresHoleCheck()) { 2995 if (!use_scratch) { 2996 __ LoadlW(r0, 2997 MemOperand(elements, base_offset + Register::kExponentOffset)); 2998 } else { 2999 __ LoadlW(r0, MemOperand(scratch, elements, 3000 base_offset + Register::kExponentOffset)); 3001 } 3002 __ Cmp32(r0, Operand(kHoleNanUpper32)); 3003 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); 3004 } 3005} 3006 3007void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3008 HLoadKeyed* hinstr = instr->hydrogen(); 3009 Register elements = ToRegister(instr->elements()); 3010 Register result = ToRegister(instr->result()); 3011 Register scratch = scratch0(); 3012 int offset = instr->base_offset(); 3013 3014 if (instr->key()->IsConstantOperand()) { 3015 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 3016 offset += ToInteger32(const_operand) * kPointerSize; 3017 } else { 3018 Register key = ToRegister(instr->key()); 3019 // Even though the HLoadKeyed instruction forces the input 3020 // representation for the key to be an integer, the input gets replaced 3021 // during bound check elimination with the index argument to the bounds 3022 // check, which can be tagged, so that case must be handled here, too. 3023 if (hinstr->key()->representation().IsSmi()) { 3024 __ SmiToPtrArrayOffset(scratch, key); 3025 } else { 3026 __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2)); 3027 } 3028 } 3029 3030 bool requires_hole_check = hinstr->RequiresHoleCheck(); 3031 Representation representation = hinstr->representation(); 3032 3033#if V8_TARGET_ARCH_S390X 3034 // 64-bit Smi optimization 3035 if (representation.IsInteger32() && 3036 hinstr->elements_kind() == FAST_SMI_ELEMENTS) { 3037 DCHECK(!requires_hole_check); 3038 // Read int value directly from upper half of the smi. 3039 offset = SmiWordOffset(offset); 3040 } 3041#endif 3042 3043 if (instr->key()->IsConstantOperand()) { 3044 __ LoadRepresentation(result, MemOperand(elements, offset), representation, 3045 r1); 3046 } else { 3047 __ LoadRepresentation(result, MemOperand(scratch, elements, offset), 3048 representation, r1); 3049 } 3050 3051 // Check for the hole value. 3052 if (requires_hole_check) { 3053 if (IsFastSmiElementsKind(hinstr->elements_kind())) { 3054 __ TestIfSmi(result); 3055 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0); 3056 } else { 3057 __ CompareRoot(result, Heap::kTheHoleValueRootIndex); 3058 DeoptimizeIf(eq, instr, DeoptimizeReason::kHole); 3059 } 3060 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) { 3061 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS); 3062 Label done; 3063 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 3064 __ CmpP(result, scratch); 3065 __ bne(&done); 3066 if (info()->IsStub()) { 3067 // A stub can safely convert the hole to undefined only if the array 3068 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise 3069 // it needs to bail out. 3070 __ LoadRoot(result, Heap::kArrayProtectorRootIndex); 3071 __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset)); 3072 __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0); 3073 DeoptimizeIf(ne, instr, DeoptimizeReason::kHole); 3074 } 3075 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); 3076 __ bind(&done); 3077 } 3078} 3079 3080void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { 3081 if (instr->is_fixed_typed_array()) { 3082 DoLoadKeyedExternalArray(instr); 3083 } else if (instr->hydrogen()->representation().IsDouble()) { 3084 DoLoadKeyedFixedDoubleArray(instr); 3085 } else { 3086 DoLoadKeyedFixedArray(instr); 3087 } 3088} 3089 3090MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base, 3091 bool key_is_constant, bool key_is_smi, 3092 int constant_key, 3093 int element_size_shift, 3094 int base_offset, 3095 bool keyMaybeNegative) { 3096 Register scratch = scratch0(); 3097 3098 if (key_is_constant) { 3099 int offset = (base_offset + (constant_key << element_size_shift)); 3100 if (!is_int20(offset)) { 3101 __ mov(scratch, Operand(offset)); 3102 return MemOperand(base, scratch); 3103 } else { 3104 return MemOperand(base, 3105 (constant_key << element_size_shift) + base_offset); 3106 } 3107 } 3108 3109 bool needs_shift = 3110 (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0)); 3111 3112 if (needs_shift) { 3113 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi, 3114 keyMaybeNegative); 3115 } else { 3116 scratch = key; 3117 } 3118 3119 if (!is_int20(base_offset)) { 3120 __ AddP(scratch, Operand(base_offset)); 3121 base_offset = 0; 3122 } 3123 return MemOperand(scratch, base, base_offset); 3124} 3125 3126void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 3127 DCHECK(ToRegister(instr->context()).is(cp)); 3128 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister())); 3129 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister())); 3130 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr); 3131 3132 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code(); 3133 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3134} 3135 3136void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 3137 Register scratch = scratch0(); 3138 Register result = ToRegister(instr->result()); 3139 3140 if (instr->hydrogen()->from_inlined()) { 3141 __ lay(result, MemOperand(sp, -2 * kPointerSize)); 3142 } else if (instr->hydrogen()->arguments_adaptor()) { 3143 // Check if the calling frame is an arguments adaptor frame. 3144 Label done, adapted; 3145 __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3146 __ LoadP( 3147 result, 3148 MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset)); 3149 __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); 3150 3151 // Result is the frame pointer for the frame if not adapted and for the real 3152 // frame below the adaptor frame if adapted. 3153 __ beq(&adapted, Label::kNear); 3154 __ LoadRR(result, fp); 3155 __ b(&done, Label::kNear); 3156 3157 __ bind(&adapted); 3158 __ LoadRR(result, scratch); 3159 __ bind(&done); 3160 } else { 3161 __ LoadRR(result, fp); 3162 } 3163} 3164 3165void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { 3166 Register elem = ToRegister(instr->elements()); 3167 Register result = ToRegister(instr->result()); 3168 3169 Label done; 3170 3171 // If no arguments adaptor frame the number of arguments is fixed. 3172 __ CmpP(fp, elem); 3173 __ mov(result, Operand(scope()->num_parameters())); 3174 __ beq(&done, Label::kNear); 3175 3176 // Arguments adaptor frame present. Get argument length from there. 3177 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3178 __ LoadP(result, 3179 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3180 __ SmiUntag(result); 3181 3182 // Argument length is in result register. 3183 __ bind(&done); 3184} 3185 3186void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { 3187 Register receiver = ToRegister(instr->receiver()); 3188 Register function = ToRegister(instr->function()); 3189 Register result = ToRegister(instr->result()); 3190 Register scratch = scratch0(); 3191 3192 // If the receiver is null or undefined, we have to pass the global 3193 // object as a receiver to normal functions. Values have to be 3194 // passed unchanged to builtins and strict-mode functions. 3195 Label global_object, result_in_receiver; 3196 3197 if (!instr->hydrogen()->known_function()) { 3198 // Do not transform the receiver to object for strict mode 3199 // functions or builtins. 3200 __ LoadP(scratch, 3201 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); 3202 __ LoadlW(scratch, FieldMemOperand( 3203 scratch, SharedFunctionInfo::kCompilerHintsOffset)); 3204 __ AndP(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) | 3205 (1 << SharedFunctionInfo::kNativeBit))); 3206 __ bne(&result_in_receiver, Label::kNear); 3207 } 3208 3209 // Normal function. Replace undefined or null with global receiver. 3210 __ CompareRoot(receiver, Heap::kNullValueRootIndex); 3211 __ beq(&global_object, Label::kNear); 3212 __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex); 3213 __ beq(&global_object, Label::kNear); 3214 3215 // Deoptimize if the receiver is not a JS object. 3216 __ TestIfSmi(receiver); 3217 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); 3218 __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE); 3219 DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject); 3220 3221 __ b(&result_in_receiver, Label::kNear); 3222 __ bind(&global_object); 3223 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset)); 3224 __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX)); 3225 __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX)); 3226 3227 if (result.is(receiver)) { 3228 __ bind(&result_in_receiver); 3229 } else { 3230 Label result_ok; 3231 __ b(&result_ok, Label::kNear); 3232 __ bind(&result_in_receiver); 3233 __ LoadRR(result, receiver); 3234 __ bind(&result_ok); 3235 } 3236} 3237 3238void LCodeGen::DoApplyArguments(LApplyArguments* instr) { 3239 Register receiver = ToRegister(instr->receiver()); 3240 Register function = ToRegister(instr->function()); 3241 Register length = ToRegister(instr->length()); 3242 Register elements = ToRegister(instr->elements()); 3243 Register scratch = scratch0(); 3244 DCHECK(receiver.is(r2)); // Used for parameter count. 3245 DCHECK(function.is(r3)); // Required by InvokeFunction. 3246 DCHECK(ToRegister(instr->result()).is(r2)); 3247 3248 // Copy the arguments to this function possibly from the 3249 // adaptor frame below it. 3250 const uint32_t kArgumentsLimit = 1 * KB; 3251 __ CmpLogicalP(length, Operand(kArgumentsLimit)); 3252 DeoptimizeIf(gt, instr, DeoptimizeReason::kTooManyArguments); 3253 3254 // Push the receiver and use the register to keep the original 3255 // number of arguments. 3256 __ push(receiver); 3257 __ LoadRR(receiver, length); 3258 // The arguments are at a one pointer size offset from elements. 3259 __ AddP(elements, Operand(1 * kPointerSize)); 3260 3261 // Loop through the arguments pushing them onto the execution 3262 // stack. 3263 Label invoke, loop; 3264 // length is a small non-negative integer, due to the test above. 3265 __ CmpP(length, Operand::Zero()); 3266 __ beq(&invoke, Label::kNear); 3267 __ bind(&loop); 3268 __ ShiftLeftP(r1, length, Operand(kPointerSizeLog2)); 3269 __ LoadP(scratch, MemOperand(elements, r1)); 3270 __ push(scratch); 3271 __ BranchOnCount(length, &loop); 3272 3273 __ bind(&invoke); 3274 3275 InvokeFlag flag = CALL_FUNCTION; 3276 if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) { 3277 DCHECK(!info()->saves_caller_doubles()); 3278 // TODO(ishell): drop current frame before pushing arguments to the stack. 3279 flag = JUMP_FUNCTION; 3280 ParameterCount actual(r2); 3281 // It is safe to use r5, r6 and r7 as scratch registers here given that 3282 // 1) we are not going to return to caller function anyway, 3283 // 2) r5 (new.target) will be initialized below. 3284 PrepareForTailCall(actual, r5, r6, r7); 3285 } 3286 3287 DCHECK(instr->HasPointerMap()); 3288 LPointerMap* pointers = instr->pointer_map(); 3289 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); 3290 // The number of arguments is stored in receiver which is r2, as expected 3291 // by InvokeFunction. 3292 ParameterCount actual(receiver); 3293 __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator); 3294} 3295 3296void LCodeGen::DoPushArgument(LPushArgument* instr) { 3297 LOperand* argument = instr->value(); 3298 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { 3299 Abort(kDoPushArgumentNotImplementedForDoubleType); 3300 } else { 3301 Register argument_reg = EmitLoadRegister(argument, ip); 3302 __ push(argument_reg); 3303 } 3304} 3305 3306void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); } 3307 3308void LCodeGen::DoThisFunction(LThisFunction* instr) { 3309 Register result = ToRegister(instr->result()); 3310 __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 3311} 3312 3313void LCodeGen::DoContext(LContext* instr) { 3314 // If there is a non-return use, the context must be moved to a register. 3315 Register result = ToRegister(instr->result()); 3316 if (info()->IsOptimizing()) { 3317 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); 3318 } else { 3319 // If there is no frame, the context must be in cp. 3320 DCHECK(result.is(cp)); 3321 } 3322} 3323 3324void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { 3325 DCHECK(ToRegister(instr->context()).is(cp)); 3326 __ Move(scratch0(), instr->hydrogen()->pairs()); 3327 __ push(scratch0()); 3328 __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags())); 3329 __ push(scratch0()); 3330 __ Move(scratch0(), instr->hydrogen()->feedback_vector()); 3331 __ push(scratch0()); 3332 CallRuntime(Runtime::kDeclareGlobals, instr); 3333} 3334 3335void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 3336 int formal_parameter_count, int arity, 3337 bool is_tail_call, LInstruction* instr) { 3338 bool dont_adapt_arguments = 3339 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; 3340 bool can_invoke_directly = 3341 dont_adapt_arguments || formal_parameter_count == arity; 3342 3343 Register function_reg = r3; 3344 3345 LPointerMap* pointers = instr->pointer_map(); 3346 3347 if (can_invoke_directly) { 3348 // Change context. 3349 __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); 3350 3351 // Always initialize new target and number of actual arguments. 3352 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); 3353 __ mov(r2, Operand(arity)); 3354 3355 bool is_self_call = function.is_identical_to(info()->closure()); 3356 3357 // Invoke function. 3358 if (is_self_call) { 3359 Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location())); 3360 if (is_tail_call) { 3361 __ Jump(self, RelocInfo::CODE_TARGET); 3362 } else { 3363 __ Call(self, RelocInfo::CODE_TARGET); 3364 } 3365 } else { 3366 __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset)); 3367 if (is_tail_call) { 3368 __ JumpToJSEntry(ip); 3369 } else { 3370 __ CallJSEntry(ip); 3371 } 3372 } 3373 3374 if (!is_tail_call) { 3375 // Set up deoptimization. 3376 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 3377 } 3378 } else { 3379 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3380 ParameterCount actual(arity); 3381 ParameterCount expected(formal_parameter_count); 3382 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; 3383 __ InvokeFunction(function_reg, expected, actual, flag, generator); 3384 } 3385} 3386 3387void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { 3388 DCHECK(instr->context() != NULL); 3389 DCHECK(ToRegister(instr->context()).is(cp)); 3390 Register input = ToRegister(instr->value()); 3391 Register result = ToRegister(instr->result()); 3392 Register scratch = scratch0(); 3393 3394 // Deoptimize if not a heap number. 3395 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 3396 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); 3397 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); 3398 3399 Label done; 3400 Register exponent = scratch0(); 3401 scratch = no_reg; 3402 __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3403 // Check the sign of the argument. If the argument is positive, just 3404 // return it. 3405 __ Cmp32(exponent, Operand::Zero()); 3406 // Move the input to the result if necessary. 3407 __ Move(result, input); 3408 __ bge(&done); 3409 3410 // Input is negative. Reverse its sign. 3411 // Preserve the value of all registers. 3412 { 3413 PushSafepointRegistersScope scope(this); 3414 3415 // Registers were saved at the safepoint, so we can use 3416 // many scratch registers. 3417 Register tmp1 = input.is(r3) ? r2 : r3; 3418 Register tmp2 = input.is(r4) ? r2 : r4; 3419 Register tmp3 = input.is(r5) ? r2 : r5; 3420 Register tmp4 = input.is(r6) ? r2 : r6; 3421 3422 // exponent: floating point exponent value. 3423 3424 Label allocated, slow; 3425 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); 3426 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); 3427 __ b(&allocated); 3428 3429 // Slow case: Call the runtime system to do the number allocation. 3430 __ bind(&slow); 3431 3432 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, 3433 instr->context()); 3434 // Set the pointer to the new heap number in tmp. 3435 if (!tmp1.is(r2)) __ LoadRR(tmp1, r2); 3436 // Restore input_reg after call to runtime. 3437 __ LoadFromSafepointRegisterSlot(input, input); 3438 __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3439 3440 __ bind(&allocated); 3441 // exponent: floating point exponent value. 3442 // tmp1: allocated heap number. 3443 3444 // Clear the sign bit. 3445 __ nilf(exponent, Operand(~HeapNumber::kSignMask)); 3446 __ StoreW(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); 3447 __ LoadlW(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); 3448 __ StoreW(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); 3449 3450 __ StoreToSafepointRegisterSlot(tmp1, result); 3451 } 3452 3453 __ bind(&done); 3454} 3455 3456void LCodeGen::EmitMathAbs(LMathAbs* instr) { 3457 Register input = ToRegister(instr->value()); 3458 Register result = ToRegister(instr->result()); 3459 Label done; 3460 __ CmpP(input, Operand::Zero()); 3461 __ Move(result, input); 3462 __ bge(&done, Label::kNear); 3463 __ LoadComplementRR(result, result); 3464 // Deoptimize on overflow. 3465 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0); 3466 __ bind(&done); 3467} 3468 3469#if V8_TARGET_ARCH_S390X 3470void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) { 3471 Register input = ToRegister(instr->value()); 3472 Register result = ToRegister(instr->result()); 3473 Label done; 3474 __ Cmp32(input, Operand::Zero()); 3475 __ Move(result, input); 3476 __ bge(&done, Label::kNear); 3477 3478 // Deoptimize on overflow. 3479 __ Cmp32(input, Operand(0x80000000)); 3480 DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow); 3481 3482 __ LoadComplementRR(result, result); 3483 __ bind(&done); 3484} 3485#endif 3486 3487void LCodeGen::DoMathAbs(LMathAbs* instr) { 3488 // Class for deferred case. 3489 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode { 3490 public: 3491 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) 3492 : LDeferredCode(codegen), instr_(instr) {} 3493 void Generate() override { 3494 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3495 } 3496 LInstruction* instr() override { return instr_; } 3497 3498 private: 3499 LMathAbs* instr_; 3500 }; 3501 3502 Representation r = instr->hydrogen()->value()->representation(); 3503 if (r.IsDouble()) { 3504 DoubleRegister input = ToDoubleRegister(instr->value()); 3505 DoubleRegister result = ToDoubleRegister(instr->result()); 3506 __ lpdbr(result, input); 3507#if V8_TARGET_ARCH_S390X 3508 } else if (r.IsInteger32()) { 3509 EmitInteger32MathAbs(instr); 3510 } else if (r.IsSmi()) { 3511#else 3512 } else if (r.IsSmiOrInteger32()) { 3513#endif 3514 EmitMathAbs(instr); 3515 } else { 3516 // Representation is tagged. 3517 DeferredMathAbsTaggedHeapNumber* deferred = 3518 new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr); 3519 Register input = ToRegister(instr->value()); 3520 // Smi check. 3521 __ JumpIfNotSmi(input, deferred->entry()); 3522 // If smi, handle it directly. 3523 EmitMathAbs(instr); 3524 __ bind(deferred->exit()); 3525 } 3526} 3527 3528void LCodeGen::DoMathFloor(LMathFloor* instr) { 3529 DoubleRegister input = ToDoubleRegister(instr->value()); 3530 Register result = ToRegister(instr->result()); 3531 Register input_high = scratch0(); 3532 Register scratch = ip; 3533 Label done, exact; 3534 3535 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done, 3536 &exact); 3537 DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN); 3538 3539 __ bind(&exact); 3540 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3541 // Test for -0. 3542 __ CmpP(result, Operand::Zero()); 3543 __ bne(&done, Label::kNear); 3544 __ Cmp32(input_high, Operand::Zero()); 3545 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); 3546 } 3547 __ bind(&done); 3548} 3549 3550void LCodeGen::DoMathRound(LMathRound* instr) { 3551 DoubleRegister input = ToDoubleRegister(instr->value()); 3552 Register result = ToRegister(instr->result()); 3553 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); 3554 DoubleRegister input_plus_dot_five = double_scratch1; 3555 Register scratch1 = scratch0(); 3556 Register scratch2 = ip; 3557 DoubleRegister dot_five = double_scratch0(); 3558 Label convert, done; 3559 3560 __ LoadDoubleLiteral(dot_five, 0.5, r0); 3561 __ lpdbr(double_scratch1, input); 3562 __ cdbr(double_scratch1, dot_five); 3563 DeoptimizeIf(unordered, instr, DeoptimizeReason::kLostPrecisionOrNaN); 3564 // If input is in [-0.5, -0], the result is -0. 3565 // If input is in [+0, +0.5[, the result is +0. 3566 // If the input is +0.5, the result is 1. 3567 __ bgt(&convert, Label::kNear); // Out of [-0.5, +0.5]. 3568 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3569 // [-0.5, -0] (negative) yields minus zero. 3570 __ TestDoubleSign(input, scratch1); 3571 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); 3572 } 3573 Label return_zero; 3574 __ cdbr(input, dot_five); 3575 __ bne(&return_zero, Label::kNear); 3576 __ LoadImmP(result, Operand(1)); // +0.5. 3577 __ b(&done, Label::kNear); 3578 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on 3579 // flag kBailoutOnMinusZero. 3580 __ bind(&return_zero); 3581 __ LoadImmP(result, Operand::Zero()); 3582 __ b(&done, Label::kNear); 3583 3584 __ bind(&convert); 3585 __ ldr(input_plus_dot_five, input); 3586 __ adbr(input_plus_dot_five, dot_five); 3587 // Reuse dot_five (double_scratch0) as we no longer need this value. 3588 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2, 3589 double_scratch0(), &done, &done); 3590 DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN); 3591 __ bind(&done); 3592} 3593 3594void LCodeGen::DoMathFround(LMathFround* instr) { 3595 DoubleRegister input_reg = ToDoubleRegister(instr->value()); 3596 DoubleRegister output_reg = ToDoubleRegister(instr->result()); 3597 3598 // Round double to float 3599 __ ledbr(output_reg, input_reg); 3600 // Extend from float to double 3601 __ ldebr(output_reg, output_reg); 3602} 3603 3604void LCodeGen::DoMathSqrt(LMathSqrt* instr) { 3605 DoubleRegister input = ToDoubleRegister(instr->value()); 3606 DoubleRegister result = ToDoubleRegister(instr->result()); 3607 __ sqdbr(result, input); 3608} 3609 3610void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { 3611 DoubleRegister input = ToDoubleRegister(instr->value()); 3612 DoubleRegister result = ToDoubleRegister(instr->result()); 3613 DoubleRegister temp = double_scratch0(); 3614 3615 // Note that according to ECMA-262 15.8.2.13: 3616 // Math.pow(-Infinity, 0.5) == Infinity 3617 // Math.sqrt(-Infinity) == NaN 3618 Label skip, done; 3619 3620 __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0()); 3621 __ cdbr(input, temp); 3622 __ bne(&skip, Label::kNear); 3623 __ lcdbr(result, temp); 3624 __ b(&done, Label::kNear); 3625 3626 // Add +0 to convert -0 to +0. 3627 __ bind(&skip); 3628 __ ldr(result, input); 3629 __ lzdr(kDoubleRegZero); 3630 __ adbr(result, kDoubleRegZero); 3631 __ sqdbr(result, result); 3632 __ bind(&done); 3633} 3634 3635void LCodeGen::DoPower(LPower* instr) { 3636 Representation exponent_type = instr->hydrogen()->right()->representation(); 3637 // Having marked this as a call, we can use any registers. 3638 // Just make sure that the input/output registers are the expected ones. 3639 Register tagged_exponent = MathPowTaggedDescriptor::exponent(); 3640 DCHECK(!instr->right()->IsDoubleRegister() || 3641 ToDoubleRegister(instr->right()).is(d2)); 3642 DCHECK(!instr->right()->IsRegister() || 3643 ToRegister(instr->right()).is(tagged_exponent)); 3644 DCHECK(ToDoubleRegister(instr->left()).is(d1)); 3645 DCHECK(ToDoubleRegister(instr->result()).is(d3)); 3646 3647 if (exponent_type.IsSmi()) { 3648 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3649 __ CallStub(&stub); 3650 } else if (exponent_type.IsTagged()) { 3651 Label no_deopt; 3652 __ JumpIfSmi(tagged_exponent, &no_deopt); 3653 __ LoadP(r9, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); 3654 __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex); 3655 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); 3656 __ bind(&no_deopt); 3657 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3658 __ CallStub(&stub); 3659 } else if (exponent_type.IsInteger32()) { 3660 MathPowStub stub(isolate(), MathPowStub::INTEGER); 3661 __ CallStub(&stub); 3662 } else { 3663 DCHECK(exponent_type.IsDouble()); 3664 MathPowStub stub(isolate(), MathPowStub::DOUBLE); 3665 __ CallStub(&stub); 3666 } 3667} 3668 3669void LCodeGen::DoMathCos(LMathCos* instr) { 3670 __ PrepareCallCFunction(0, 1, scratch0()); 3671 __ MovToFloatParameter(ToDoubleRegister(instr->value())); 3672 __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1); 3673 __ MovFromFloatResult(ToDoubleRegister(instr->result())); 3674} 3675 3676void LCodeGen::DoMathSin(LMathSin* instr) { 3677 __ PrepareCallCFunction(0, 1, scratch0()); 3678 __ MovToFloatParameter(ToDoubleRegister(instr->value())); 3679 __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1); 3680 __ MovFromFloatResult(ToDoubleRegister(instr->result())); 3681} 3682 3683void LCodeGen::DoMathExp(LMathExp* instr) { 3684 __ PrepareCallCFunction(0, 1, scratch0()); 3685 __ MovToFloatParameter(ToDoubleRegister(instr->value())); 3686 __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1); 3687 __ MovFromFloatResult(ToDoubleRegister(instr->result())); 3688} 3689 3690void LCodeGen::DoMathLog(LMathLog* instr) { 3691 __ PrepareCallCFunction(0, 1, scratch0()); 3692 __ MovToFloatParameter(ToDoubleRegister(instr->value())); 3693 __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1); 3694 __ MovFromFloatResult(ToDoubleRegister(instr->result())); 3695} 3696 3697void LCodeGen::DoMathClz32(LMathClz32* instr) { 3698 Register input = ToRegister(instr->value()); 3699 Register result = ToRegister(instr->result()); 3700 Label done; 3701 __ llgfr(result, input); 3702 __ flogr(r0, result); 3703 __ LoadRR(result, r0); 3704 __ CmpP(r0, Operand::Zero()); 3705 __ beq(&done, Label::kNear); 3706 __ SubP(result, Operand(32)); 3707 __ bind(&done); 3708} 3709 3710void LCodeGen::PrepareForTailCall(const ParameterCount& actual, 3711 Register scratch1, Register scratch2, 3712 Register scratch3) { 3713#if DEBUG 3714 if (actual.is_reg()) { 3715 DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3)); 3716 } else { 3717 DCHECK(!AreAliased(scratch1, scratch2, scratch3)); 3718 } 3719#endif 3720 if (FLAG_code_comments) { 3721 if (actual.is_reg()) { 3722 Comment(";;; PrepareForTailCall, actual: %s {", 3723 RegisterConfiguration::Crankshaft()->GetGeneralRegisterName( 3724 actual.reg().code())); 3725 } else { 3726 Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate()); 3727 } 3728 } 3729 3730 // Check if next frame is an arguments adaptor frame. 3731 Register caller_args_count_reg = scratch1; 3732 Label no_arguments_adaptor, formal_parameter_count_loaded; 3733 __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3734 __ LoadP(scratch3, 3735 MemOperand(scratch2, StandardFrameConstants::kContextOffset)); 3736 __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); 3737 __ bne(&no_arguments_adaptor); 3738 3739 // Drop current frame and load arguments count from arguments adaptor frame. 3740 __ LoadRR(fp, scratch2); 3741 __ LoadP(caller_args_count_reg, 3742 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3743 __ SmiUntag(caller_args_count_reg); 3744 __ b(&formal_parameter_count_loaded); 3745 3746 __ bind(&no_arguments_adaptor); 3747 // Load caller's formal parameter count 3748 __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count())); 3749 3750 __ bind(&formal_parameter_count_loaded); 3751 __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3); 3752 3753 Comment(";;; }"); 3754} 3755 3756void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { 3757 HInvokeFunction* hinstr = instr->hydrogen(); 3758 DCHECK(ToRegister(instr->context()).is(cp)); 3759 DCHECK(ToRegister(instr->function()).is(r3)); 3760 DCHECK(instr->HasPointerMap()); 3761 3762 bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow; 3763 3764 if (is_tail_call) { 3765 DCHECK(!info()->saves_caller_doubles()); 3766 ParameterCount actual(instr->arity()); 3767 // It is safe to use r5, r6 and r7 as scratch registers here given that 3768 // 1) we are not going to return to caller function anyway, 3769 // 2) r5 (new.target) will be initialized below. 3770 PrepareForTailCall(actual, r5, r6, r7); 3771 } 3772 3773 Handle<JSFunction> known_function = hinstr->known_function(); 3774 if (known_function.is_null()) { 3775 LPointerMap* pointers = instr->pointer_map(); 3776 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3777 ParameterCount actual(instr->arity()); 3778 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION; 3779 __ InvokeFunction(r3, no_reg, actual, flag, generator); 3780 } else { 3781 CallKnownFunction(known_function, hinstr->formal_parameter_count(), 3782 instr->arity(), is_tail_call, instr); 3783 } 3784} 3785 3786void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { 3787 DCHECK(ToRegister(instr->result()).is(r2)); 3788 3789 if (instr->hydrogen()->IsTailCall()) { 3790 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL); 3791 3792 if (instr->target()->IsConstantOperand()) { 3793 LConstantOperand* target = LConstantOperand::cast(instr->target()); 3794 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 3795 __ Jump(code, RelocInfo::CODE_TARGET); 3796 } else { 3797 DCHECK(instr->target()->IsRegister()); 3798 Register target = ToRegister(instr->target()); 3799 __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag)); 3800 __ JumpToJSEntry(ip); 3801 } 3802 } else { 3803 LPointerMap* pointers = instr->pointer_map(); 3804 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3805 3806 if (instr->target()->IsConstantOperand()) { 3807 LConstantOperand* target = LConstantOperand::cast(instr->target()); 3808 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 3809 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); 3810 __ Call(code, RelocInfo::CODE_TARGET); 3811 } else { 3812 DCHECK(instr->target()->IsRegister()); 3813 Register target = ToRegister(instr->target()); 3814 generator.BeforeCall(__ CallSize(target)); 3815 __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag)); 3816 __ CallJSEntry(ip); 3817 } 3818 generator.AfterCall(); 3819 } 3820} 3821 3822void LCodeGen::DoCallNewArray(LCallNewArray* instr) { 3823 DCHECK(ToRegister(instr->context()).is(cp)); 3824 DCHECK(ToRegister(instr->constructor()).is(r3)); 3825 DCHECK(ToRegister(instr->result()).is(r2)); 3826 3827 __ mov(r2, Operand(instr->arity())); 3828 __ Move(r4, instr->hydrogen()->site()); 3829 3830 ElementsKind kind = instr->hydrogen()->elements_kind(); 3831 AllocationSiteOverrideMode override_mode = 3832 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) 3833 ? DISABLE_ALLOCATION_SITES 3834 : DONT_OVERRIDE; 3835 3836 if (instr->arity() == 0) { 3837 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); 3838 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3839 } else if (instr->arity() == 1) { 3840 Label done; 3841 if (IsFastPackedElementsKind(kind)) { 3842 Label packed_case; 3843 // We might need a change here 3844 // look at the first argument 3845 __ LoadP(r7, MemOperand(sp, 0)); 3846 __ CmpP(r7, Operand::Zero()); 3847 __ beq(&packed_case, Label::kNear); 3848 3849 ElementsKind holey_kind = GetHoleyElementsKind(kind); 3850 ArraySingleArgumentConstructorStub stub(isolate(), holey_kind, 3851 override_mode); 3852 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3853 __ b(&done, Label::kNear); 3854 __ bind(&packed_case); 3855 } 3856 3857 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); 3858 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3859 __ bind(&done); 3860 } else { 3861 ArrayNArgumentsConstructorStub stub(isolate()); 3862 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 3863 } 3864} 3865 3866void LCodeGen::DoCallRuntime(LCallRuntime* instr) { 3867 CallRuntime(instr->function(), instr->arity(), instr); 3868} 3869 3870void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { 3871 Register function = ToRegister(instr->function()); 3872 Register code_object = ToRegister(instr->code_object()); 3873 __ lay(code_object, 3874 MemOperand(code_object, Code::kHeaderSize - kHeapObjectTag)); 3875 __ StoreP(code_object, 3876 FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0); 3877} 3878 3879void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { 3880 Register result = ToRegister(instr->result()); 3881 Register base = ToRegister(instr->base_object()); 3882 if (instr->offset()->IsConstantOperand()) { 3883 LConstantOperand* offset = LConstantOperand::cast(instr->offset()); 3884 __ lay(result, MemOperand(base, ToInteger32(offset))); 3885 } else { 3886 Register offset = ToRegister(instr->offset()); 3887 __ lay(result, MemOperand(base, offset)); 3888 } 3889} 3890 3891void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 3892 HStoreNamedField* hinstr = instr->hydrogen(); 3893 Representation representation = instr->representation(); 3894 3895 Register object = ToRegister(instr->object()); 3896 Register scratch = scratch0(); 3897 HObjectAccess access = hinstr->access(); 3898 int offset = access.offset(); 3899 3900 if (access.IsExternalMemory()) { 3901 Register value = ToRegister(instr->value()); 3902 MemOperand operand = MemOperand(object, offset); 3903 __ StoreRepresentation(value, operand, representation, r0); 3904 return; 3905 } 3906 3907 __ AssertNotSmi(object); 3908 3909#if V8_TARGET_ARCH_S390X 3910 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() || 3911 IsInteger32(LConstantOperand::cast(instr->value()))); 3912#else 3913 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() || 3914 IsSmi(LConstantOperand::cast(instr->value()))); 3915#endif 3916 if (!FLAG_unbox_double_fields && representation.IsDouble()) { 3917 DCHECK(access.IsInobject()); 3918 DCHECK(!hinstr->has_transition()); 3919 DCHECK(!hinstr->NeedsWriteBarrier()); 3920 DoubleRegister value = ToDoubleRegister(instr->value()); 3921 DCHECK(offset >= 0); 3922 __ std(value, FieldMemOperand(object, offset)); 3923 return; 3924 } 3925 3926 if (hinstr->has_transition()) { 3927 Handle<Map> transition = hinstr->transition_map(); 3928 AddDeprecationDependency(transition); 3929 __ mov(scratch, Operand(transition)); 3930 __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0); 3931 if (hinstr->NeedsWriteBarrierForMap()) { 3932 Register temp = ToRegister(instr->temp()); 3933 // Update the write barrier for the map field. 3934 __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(), 3935 kSaveFPRegs); 3936 } 3937 } 3938 3939 // Do the store. 3940 Register record_dest = object; 3941 Register record_value = no_reg; 3942 Register record_scratch = scratch; 3943#if V8_TARGET_ARCH_S390X 3944 if (FLAG_unbox_double_fields && representation.IsDouble()) { 3945 DCHECK(access.IsInobject()); 3946 DoubleRegister value = ToDoubleRegister(instr->value()); 3947 __ std(value, FieldMemOperand(object, offset)); 3948 if (hinstr->NeedsWriteBarrier()) { 3949 record_value = ToRegister(instr->value()); 3950 } 3951 } else { 3952 if (representation.IsSmi() && 3953 hinstr->value()->representation().IsInteger32()) { 3954 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); 3955 // 64-bit Smi optimization 3956 // Store int value directly to upper half of the smi. 3957 offset = SmiWordOffset(offset); 3958 representation = Representation::Integer32(); 3959 } 3960#endif 3961 if (access.IsInobject()) { 3962 Register value = ToRegister(instr->value()); 3963 MemOperand operand = FieldMemOperand(object, offset); 3964 __ StoreRepresentation(value, operand, representation, r0); 3965 record_value = value; 3966 } else { 3967 Register value = ToRegister(instr->value()); 3968 __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); 3969 MemOperand operand = FieldMemOperand(scratch, offset); 3970 __ StoreRepresentation(value, operand, representation, r0); 3971 record_dest = scratch; 3972 record_value = value; 3973 record_scratch = object; 3974 } 3975#if V8_TARGET_ARCH_S390X 3976 } 3977#endif 3978 3979 if (hinstr->NeedsWriteBarrier()) { 3980 __ RecordWriteField(record_dest, offset, record_value, record_scratch, 3981 GetLinkRegisterState(), kSaveFPRegs, 3982 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(), 3983 hinstr->PointersToHereCheckForValue()); 3984 } 3985} 3986 3987void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 3988 DCHECK(ToRegister(instr->context()).is(cp)); 3989 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); 3990 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); 3991 3992 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr); 3993 3994 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name())); 3995 Handle<Code> ic = 3996 CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode()) 3997 .code(); 3998 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3999} 4000 4001void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { 4002 Representation representation = instr->hydrogen()->length()->representation(); 4003 DCHECK(representation.Equals(instr->hydrogen()->index()->representation())); 4004 DCHECK(representation.IsSmiOrInteger32()); 4005 4006 Condition cc = instr->hydrogen()->allow_equality() ? lt : le; 4007 if (instr->length()->IsConstantOperand()) { 4008 int32_t length = ToInteger32(LConstantOperand::cast(instr->length())); 4009 Register index = ToRegister(instr->index()); 4010 if (representation.IsSmi()) { 4011 __ CmpLogicalP(index, Operand(Smi::FromInt(length))); 4012 } else { 4013 __ CmpLogical32(index, Operand(length)); 4014 } 4015 cc = CommuteCondition(cc); 4016 } else if (instr->index()->IsConstantOperand()) { 4017 int32_t index = ToInteger32(LConstantOperand::cast(instr->index())); 4018 Register length = ToRegister(instr->length()); 4019 if (representation.IsSmi()) { 4020 __ CmpLogicalP(length, Operand(Smi::FromInt(index))); 4021 } else { 4022 __ CmpLogical32(length, Operand(index)); 4023 } 4024 } else { 4025 Register index = ToRegister(instr->index()); 4026 Register length = ToRegister(instr->length()); 4027 if (representation.IsSmi()) { 4028 __ CmpLogicalP(length, index); 4029 } else { 4030 __ CmpLogical32(length, index); 4031 } 4032 } 4033 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 4034 Label done; 4035 __ b(NegateCondition(cc), &done, Label::kNear); 4036 __ stop("eliminated bounds check failed"); 4037 __ bind(&done); 4038 } else { 4039 DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds); 4040 } 4041} 4042 4043void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 4044 Register external_pointer = ToRegister(instr->elements()); 4045 Register key = no_reg; 4046 ElementsKind elements_kind = instr->elements_kind(); 4047 bool key_is_constant = instr->key()->IsConstantOperand(); 4048 int constant_key = 0; 4049 if (key_is_constant) { 4050 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4051 if (constant_key & 0xF0000000) { 4052 Abort(kArrayIndexConstantValueTooBig); 4053 } 4054 } else { 4055 key = ToRegister(instr->key()); 4056 } 4057 int element_size_shift = ElementsKindToShiftSize(elements_kind); 4058 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); 4059 bool keyMaybeNegative = instr->hydrogen()->IsDehoisted(); 4060 int base_offset = instr->base_offset(); 4061 4062 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) { 4063 Register address = scratch0(); 4064 DoubleRegister value(ToDoubleRegister(instr->value())); 4065 if (key_is_constant) { 4066 if (constant_key != 0) { 4067 base_offset += constant_key << element_size_shift; 4068 if (!is_int20(base_offset)) { 4069 __ mov(address, Operand(base_offset)); 4070 __ AddP(address, external_pointer); 4071 } else { 4072 __ AddP(address, external_pointer, Operand(base_offset)); 4073 } 4074 base_offset = 0; 4075 } else { 4076 address = external_pointer; 4077 } 4078 } else { 4079 __ IndexToArrayOffset(address, key, element_size_shift, key_is_smi, 4080 keyMaybeNegative); 4081 __ AddP(address, external_pointer); 4082 } 4083 if (elements_kind == FLOAT32_ELEMENTS) { 4084 __ ledbr(double_scratch0(), value); 4085 __ StoreFloat32(double_scratch0(), MemOperand(address, base_offset)); 4086 } else { // Storing doubles, not floats. 4087 __ StoreDouble(value, MemOperand(address, base_offset)); 4088 } 4089 } else { 4090 Register value(ToRegister(instr->value())); 4091 MemOperand mem_operand = 4092 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi, 4093 constant_key, element_size_shift, base_offset, 4094 keyMaybeNegative); 4095 switch (elements_kind) { 4096 case UINT8_ELEMENTS: 4097 case UINT8_CLAMPED_ELEMENTS: 4098 case INT8_ELEMENTS: 4099 if (key_is_constant) { 4100 __ StoreByte(value, mem_operand, r0); 4101 } else { 4102 __ StoreByte(value, mem_operand); 4103 } 4104 break; 4105 case INT16_ELEMENTS: 4106 case UINT16_ELEMENTS: 4107 if (key_is_constant) { 4108 __ StoreHalfWord(value, mem_operand, r0); 4109 } else { 4110 __ StoreHalfWord(value, mem_operand); 4111 } 4112 break; 4113 case INT32_ELEMENTS: 4114 case UINT32_ELEMENTS: 4115 if (key_is_constant) { 4116 __ StoreW(value, mem_operand, r0); 4117 } else { 4118 __ StoreW(value, mem_operand); 4119 } 4120 break; 4121 case FLOAT32_ELEMENTS: 4122 case FLOAT64_ELEMENTS: 4123 case FAST_DOUBLE_ELEMENTS: 4124 case FAST_ELEMENTS: 4125 case FAST_SMI_ELEMENTS: 4126 case FAST_HOLEY_DOUBLE_ELEMENTS: 4127 case FAST_HOLEY_ELEMENTS: 4128 case FAST_HOLEY_SMI_ELEMENTS: 4129 case DICTIONARY_ELEMENTS: 4130 case FAST_SLOPPY_ARGUMENTS_ELEMENTS: 4131 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: 4132 case FAST_STRING_WRAPPER_ELEMENTS: 4133 case SLOW_STRING_WRAPPER_ELEMENTS: 4134 case NO_ELEMENTS: 4135 UNREACHABLE(); 4136 break; 4137 } 4138 } 4139} 4140 4141void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 4142 DoubleRegister value = ToDoubleRegister(instr->value()); 4143 Register elements = ToRegister(instr->elements()); 4144 Register key = no_reg; 4145 Register scratch = scratch0(); 4146 DoubleRegister double_scratch = double_scratch0(); 4147 bool key_is_constant = instr->key()->IsConstantOperand(); 4148 int constant_key = 0; 4149 4150 // Calculate the effective address of the slot in the array to store the 4151 // double value. 4152 if (key_is_constant) { 4153 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4154 if (constant_key & 0xF0000000) { 4155 Abort(kArrayIndexConstantValueTooBig); 4156 } 4157 } else { 4158 key = ToRegister(instr->key()); 4159 } 4160 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); 4161 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); 4162 bool keyMaybeNegative = instr->hydrogen()->IsDehoisted(); 4163 int base_offset = instr->base_offset() + constant_key * kDoubleSize; 4164 bool use_scratch = false; 4165 intptr_t address_offset = base_offset; 4166 4167 if (key_is_constant) { 4168 // Memory references support up to 20-bits signed displacement in RXY form 4169 if (!is_int20((address_offset))) { 4170 __ mov(scratch, Operand(address_offset)); 4171 address_offset = 0; 4172 use_scratch = true; 4173 } 4174 } else { 4175 use_scratch = true; 4176 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi, 4177 keyMaybeNegative); 4178 // Memory references support up to 20-bits signed displacement in RXY form 4179 if (!is_int20((address_offset))) { 4180 __ AddP(scratch, Operand(address_offset)); 4181 address_offset = 0; 4182 } 4183 } 4184 4185 if (instr->NeedsCanonicalization()) { 4186 // Turn potential sNaN value into qNaN. 4187 __ CanonicalizeNaN(double_scratch, value); 4188 DCHECK(address_offset >= 0); 4189 if (use_scratch) 4190 __ std(double_scratch, MemOperand(scratch, elements, address_offset)); 4191 else 4192 __ std(double_scratch, MemOperand(elements, address_offset)); 4193 } else { 4194 if (use_scratch) 4195 __ std(value, MemOperand(scratch, elements, address_offset)); 4196 else 4197 __ std(value, MemOperand(elements, address_offset)); 4198 } 4199} 4200 4201void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { 4202 HStoreKeyed* hinstr = instr->hydrogen(); 4203 Register value = ToRegister(instr->value()); 4204 Register elements = ToRegister(instr->elements()); 4205 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; 4206 Register scratch = scratch0(); 4207 int offset = instr->base_offset(); 4208 4209 // Do the store. 4210 if (instr->key()->IsConstantOperand()) { 4211 DCHECK(!hinstr->NeedsWriteBarrier()); 4212 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 4213 offset += ToInteger32(const_operand) * kPointerSize; 4214 } else { 4215 // Even though the HLoadKeyed instruction forces the input 4216 // representation for the key to be an integer, the input gets replaced 4217 // during bound check elimination with the index argument to the bounds 4218 // check, which can be tagged, so that case must be handled here, too. 4219 if (hinstr->key()->representation().IsSmi()) { 4220 __ SmiToPtrArrayOffset(scratch, key); 4221 } else { 4222 if (instr->hydrogen()->IsDehoisted() || 4223 !CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) { 4224#if V8_TARGET_ARCH_S390X 4225 // If array access is dehoisted, the key, being an int32, can contain 4226 // a negative value, as needs to be sign-extended to 64-bit for 4227 // memory access. 4228 __ lgfr(key, key); 4229#endif 4230 __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2)); 4231 } else { 4232 // Small optimization to reduce pathlength. After Bounds Check, 4233 // the key is guaranteed to be non-negative. Leverage RISBG, 4234 // which also performs zero-extension. 4235 __ risbg(scratch, key, Operand(32 - kPointerSizeLog2), 4236 Operand(63 - kPointerSizeLog2), Operand(kPointerSizeLog2), 4237 true); 4238 } 4239 } 4240 } 4241 4242 Representation representation = hinstr->value()->representation(); 4243 4244#if V8_TARGET_ARCH_S390X 4245 // 64-bit Smi optimization 4246 if (representation.IsInteger32()) { 4247 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); 4248 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS); 4249 // Store int value directly to upper half of the smi. 4250 offset = SmiWordOffset(offset); 4251 } 4252#endif 4253 4254 if (instr->key()->IsConstantOperand()) { 4255 __ StoreRepresentation(value, MemOperand(elements, offset), representation, 4256 scratch); 4257 } else { 4258 __ StoreRepresentation(value, MemOperand(scratch, elements, offset), 4259 representation, r0); 4260 } 4261 4262 if (hinstr->NeedsWriteBarrier()) { 4263 SmiCheck check_needed = hinstr->value()->type().IsHeapObject() 4264 ? OMIT_SMI_CHECK 4265 : INLINE_SMI_CHECK; 4266 // Compute address of modified element and store it into key register. 4267 if (instr->key()->IsConstantOperand()) { 4268 __ lay(key, MemOperand(elements, offset)); 4269 } else { 4270 __ lay(key, MemOperand(scratch, elements, offset)); 4271 } 4272 __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs, 4273 EMIT_REMEMBERED_SET, check_needed, 4274 hinstr->PointersToHereCheckForValue()); 4275 } 4276} 4277 4278void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { 4279 // By cases: external, fast double 4280 if (instr->is_fixed_typed_array()) { 4281 DoStoreKeyedExternalArray(instr); 4282 } else if (instr->hydrogen()->value()->representation().IsDouble()) { 4283 DoStoreKeyedFixedDoubleArray(instr); 4284 } else { 4285 DoStoreKeyedFixedArray(instr); 4286 } 4287} 4288 4289void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 4290 DCHECK(ToRegister(instr->context()).is(cp)); 4291 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); 4292 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister())); 4293 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); 4294 4295 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr); 4296 4297 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode( 4298 isolate(), instr->language_mode()) 4299 .code(); 4300 CallCode(ic, RelocInfo::CODE_TARGET, instr); 4301} 4302 4303void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) { 4304 class DeferredMaybeGrowElements final : public LDeferredCode { 4305 public: 4306 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr) 4307 : LDeferredCode(codegen), instr_(instr) {} 4308 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); } 4309 LInstruction* instr() override { return instr_; } 4310 4311 private: 4312 LMaybeGrowElements* instr_; 4313 }; 4314 4315 Register result = r2; 4316 DeferredMaybeGrowElements* deferred = 4317 new (zone()) DeferredMaybeGrowElements(this, instr); 4318 LOperand* key = instr->key(); 4319 LOperand* current_capacity = instr->current_capacity(); 4320 4321 DCHECK(instr->hydrogen()->key()->representation().IsInteger32()); 4322 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32()); 4323 DCHECK(key->IsConstantOperand() || key->IsRegister()); 4324 DCHECK(current_capacity->IsConstantOperand() || 4325 current_capacity->IsRegister()); 4326 4327 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) { 4328 int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); 4329 int32_t constant_capacity = 4330 ToInteger32(LConstantOperand::cast(current_capacity)); 4331 if (constant_key >= constant_capacity) { 4332 // Deferred case. 4333 __ b(deferred->entry()); 4334 } 4335 } else if (key->IsConstantOperand()) { 4336 int32_t constant_key = ToInteger32(LConstantOperand::cast(key)); 4337 __ Cmp32(ToRegister(current_capacity), Operand(constant_key)); 4338 __ ble(deferred->entry()); 4339 } else if (current_capacity->IsConstantOperand()) { 4340 int32_t constant_capacity = 4341 ToInteger32(LConstantOperand::cast(current_capacity)); 4342 __ Cmp32(ToRegister(key), Operand(constant_capacity)); 4343 __ bge(deferred->entry()); 4344 } else { 4345 __ Cmp32(ToRegister(key), ToRegister(current_capacity)); 4346 __ bge(deferred->entry()); 4347 } 4348 4349 if (instr->elements()->IsRegister()) { 4350 __ Move(result, ToRegister(instr->elements())); 4351 } else { 4352 __ LoadP(result, ToMemOperand(instr->elements())); 4353 } 4354 4355 __ bind(deferred->exit()); 4356} 4357 4358void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) { 4359 // TODO(3095996): Get rid of this. For now, we need to make the 4360 // result register contain a valid pointer because it is already 4361 // contained in the register pointer map. 4362 Register result = r2; 4363 __ LoadImmP(result, Operand::Zero()); 4364 4365 // We have to call a stub. 4366 { 4367 PushSafepointRegistersScope scope(this); 4368 if (instr->object()->IsRegister()) { 4369 __ Move(result, ToRegister(instr->object())); 4370 } else { 4371 __ LoadP(result, ToMemOperand(instr->object())); 4372 } 4373 4374 LOperand* key = instr->key(); 4375 if (key->IsConstantOperand()) { 4376 LConstantOperand* constant_key = LConstantOperand::cast(key); 4377 int32_t int_key = ToInteger32(constant_key); 4378 if (Smi::IsValid(int_key)) { 4379 __ LoadSmiLiteral(r5, Smi::FromInt(int_key)); 4380 } else { 4381 // We should never get here at runtime because there is a smi check on 4382 // the key before this point. 4383 __ stop("expected smi"); 4384 } 4385 } else { 4386 __ SmiTag(r5, ToRegister(key)); 4387 } 4388 4389 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind()); 4390 __ CallStub(&stub); 4391 RecordSafepointWithLazyDeopt( 4392 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 4393 __ StoreToSafepointRegisterSlot(result, result); 4394 } 4395 4396 // Deopt on smi, which means the elements array changed to dictionary mode. 4397 __ TestIfSmi(result); 4398 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); 4399} 4400 4401void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4402 Register object_reg = ToRegister(instr->object()); 4403 Register scratch = scratch0(); 4404 4405 Handle<Map> from_map = instr->original_map(); 4406 Handle<Map> to_map = instr->transitioned_map(); 4407 ElementsKind from_kind = instr->from_kind(); 4408 ElementsKind to_kind = instr->to_kind(); 4409 4410 Label not_applicable; 4411 __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4412 __ CmpP(scratch, Operand(from_map)); 4413 __ bne(¬_applicable); 4414 4415 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { 4416 Register new_map_reg = ToRegister(instr->new_map_temp()); 4417 __ mov(new_map_reg, Operand(to_map)); 4418 __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4419 // Write barrier. 4420 __ RecordWriteForMap(object_reg, new_map_reg, scratch, 4421 GetLinkRegisterState(), kDontSaveFPRegs); 4422 } else { 4423 DCHECK(ToRegister(instr->context()).is(cp)); 4424 DCHECK(object_reg.is(r2)); 4425 PushSafepointRegistersScope scope(this); 4426 __ Move(r3, to_map); 4427 TransitionElementsKindStub stub(isolate(), from_kind, to_kind); 4428 __ CallStub(&stub); 4429 RecordSafepointWithRegisters(instr->pointer_map(), 0, 4430 Safepoint::kLazyDeopt); 4431 } 4432 __ bind(¬_applicable); 4433} 4434 4435void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { 4436 Register object = ToRegister(instr->object()); 4437 Register temp1 = ToRegister(instr->temp1()); 4438 Register temp2 = ToRegister(instr->temp2()); 4439 Label no_memento_found; 4440 __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); 4441 DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound); 4442 __ bind(&no_memento_found); 4443} 4444 4445void LCodeGen::DoStringAdd(LStringAdd* instr) { 4446 DCHECK(ToRegister(instr->context()).is(cp)); 4447 DCHECK(ToRegister(instr->left()).is(r3)); 4448 DCHECK(ToRegister(instr->right()).is(r2)); 4449 StringAddStub stub(isolate(), instr->hydrogen()->flags(), 4450 instr->hydrogen()->pretenure_flag()); 4451 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4452} 4453 4454void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { 4455 class DeferredStringCharCodeAt final : public LDeferredCode { 4456 public: 4457 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) 4458 : LDeferredCode(codegen), instr_(instr) {} 4459 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); } 4460 LInstruction* instr() override { return instr_; } 4461 4462 private: 4463 LStringCharCodeAt* instr_; 4464 }; 4465 4466 DeferredStringCharCodeAt* deferred = 4467 new (zone()) DeferredStringCharCodeAt(this, instr); 4468 4469 StringCharLoadGenerator::Generate( 4470 masm(), ToRegister(instr->string()), ToRegister(instr->index()), 4471 ToRegister(instr->result()), deferred->entry()); 4472 __ bind(deferred->exit()); 4473} 4474 4475void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { 4476 Register string = ToRegister(instr->string()); 4477 Register result = ToRegister(instr->result()); 4478 Register scratch = scratch0(); 4479 4480 // TODO(3095996): Get rid of this. For now, we need to make the 4481 // result register contain a valid pointer because it is already 4482 // contained in the register pointer map. 4483 __ LoadImmP(result, Operand::Zero()); 4484 4485 PushSafepointRegistersScope scope(this); 4486 __ push(string); 4487 // Push the index as a smi. This is safe because of the checks in 4488 // DoStringCharCodeAt above. 4489 if (instr->index()->IsConstantOperand()) { 4490 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 4491 __ LoadSmiLiteral(scratch, Smi::FromInt(const_index)); 4492 __ push(scratch); 4493 } else { 4494 Register index = ToRegister(instr->index()); 4495 __ SmiTag(index); 4496 __ push(index); 4497 } 4498 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, 4499 instr->context()); 4500 __ AssertSmi(r2); 4501 __ SmiUntag(r2); 4502 __ StoreToSafepointRegisterSlot(r2, result); 4503} 4504 4505void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { 4506 class DeferredStringCharFromCode final : public LDeferredCode { 4507 public: 4508 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) 4509 : LDeferredCode(codegen), instr_(instr) {} 4510 void Generate() override { 4511 codegen()->DoDeferredStringCharFromCode(instr_); 4512 } 4513 LInstruction* instr() override { return instr_; } 4514 4515 private: 4516 LStringCharFromCode* instr_; 4517 }; 4518 4519 DeferredStringCharFromCode* deferred = 4520 new (zone()) DeferredStringCharFromCode(this, instr); 4521 4522 DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); 4523 Register char_code = ToRegister(instr->char_code()); 4524 Register result = ToRegister(instr->result()); 4525 DCHECK(!char_code.is(result)); 4526 4527 __ CmpLogicalP(char_code, Operand(String::kMaxOneByteCharCode)); 4528 __ bgt(deferred->entry()); 4529 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); 4530 __ ShiftLeftP(r0, char_code, Operand(kPointerSizeLog2)); 4531 __ AddP(result, r0); 4532 __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize)); 4533 __ CompareRoot(result, Heap::kUndefinedValueRootIndex); 4534 __ beq(deferred->entry()); 4535 __ bind(deferred->exit()); 4536} 4537 4538void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { 4539 Register char_code = ToRegister(instr->char_code()); 4540 Register result = ToRegister(instr->result()); 4541 4542 // TODO(3095996): Get rid of this. For now, we need to make the 4543 // result register contain a valid pointer because it is already 4544 // contained in the register pointer map. 4545 __ LoadImmP(result, Operand::Zero()); 4546 4547 PushSafepointRegistersScope scope(this); 4548 __ SmiTag(char_code); 4549 __ push(char_code); 4550 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr, 4551 instr->context()); 4552 __ StoreToSafepointRegisterSlot(r2, result); 4553} 4554 4555void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4556 LOperand* input = instr->value(); 4557 DCHECK(input->IsRegister() || input->IsStackSlot()); 4558 LOperand* output = instr->result(); 4559 DCHECK(output->IsDoubleRegister()); 4560 if (input->IsStackSlot()) { 4561 Register scratch = scratch0(); 4562 __ LoadP(scratch, ToMemOperand(input)); 4563 __ ConvertIntToDouble(scratch, ToDoubleRegister(output)); 4564 } else { 4565 __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output)); 4566 } 4567} 4568 4569void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4570 LOperand* input = instr->value(); 4571 LOperand* output = instr->result(); 4572 __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output)); 4573} 4574 4575void LCodeGen::DoNumberTagI(LNumberTagI* instr) { 4576 class DeferredNumberTagI final : public LDeferredCode { 4577 public: 4578 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) 4579 : LDeferredCode(codegen), instr_(instr) {} 4580 void Generate() override { 4581 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), 4582 instr_->temp2(), SIGNED_INT32); 4583 } 4584 LInstruction* instr() override { return instr_; } 4585 4586 private: 4587 LNumberTagI* instr_; 4588 }; 4589 4590 Register src = ToRegister(instr->value()); 4591 Register dst = ToRegister(instr->result()); 4592 4593 DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr); 4594#if V8_TARGET_ARCH_S390X 4595 __ SmiTag(dst, src); 4596#else 4597 // Add src to itself to defect SMI overflow. 4598 __ Add32(dst, src, src); 4599 __ b(overflow, deferred->entry()); 4600#endif 4601 __ bind(deferred->exit()); 4602} 4603 4604void LCodeGen::DoNumberTagU(LNumberTagU* instr) { 4605 class DeferredNumberTagU final : public LDeferredCode { 4606 public: 4607 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) 4608 : LDeferredCode(codegen), instr_(instr) {} 4609 void Generate() override { 4610 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), 4611 instr_->temp2(), UNSIGNED_INT32); 4612 } 4613 LInstruction* instr() override { return instr_; } 4614 4615 private: 4616 LNumberTagU* instr_; 4617 }; 4618 4619 Register input = ToRegister(instr->value()); 4620 Register result = ToRegister(instr->result()); 4621 4622 DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr); 4623 __ CmpLogicalP(input, Operand(Smi::kMaxValue)); 4624 __ bgt(deferred->entry()); 4625 __ SmiTag(result, input); 4626 __ bind(deferred->exit()); 4627} 4628 4629void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value, 4630 LOperand* temp1, LOperand* temp2, 4631 IntegerSignedness signedness) { 4632 Label done, slow; 4633 Register src = ToRegister(value); 4634 Register dst = ToRegister(instr->result()); 4635 Register tmp1 = scratch0(); 4636 Register tmp2 = ToRegister(temp1); 4637 Register tmp3 = ToRegister(temp2); 4638 DoubleRegister dbl_scratch = double_scratch0(); 4639 4640 if (signedness == SIGNED_INT32) { 4641 // There was overflow, so bits 30 and 31 of the original integer 4642 // disagree. Try to allocate a heap number in new space and store 4643 // the value in there. If that fails, call the runtime system. 4644 if (dst.is(src)) { 4645 __ SmiUntag(src, dst); 4646 __ xilf(src, Operand(HeapNumber::kSignMask)); 4647 } 4648 __ ConvertIntToDouble(src, dbl_scratch); 4649 } else { 4650 __ ConvertUnsignedIntToDouble(src, dbl_scratch); 4651 } 4652 4653 if (FLAG_inline_new) { 4654 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex); 4655 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow); 4656 __ b(&done); 4657 } 4658 4659 // Slow case: Call the runtime system to do the number allocation. 4660 __ bind(&slow); 4661 { 4662 // TODO(3095996): Put a valid pointer value in the stack slot where the 4663 // result register is stored, as this register is in the pointer map, but 4664 // contains an integer value. 4665 __ LoadImmP(dst, Operand::Zero()); 4666 4667 // Preserve the value of all registers. 4668 PushSafepointRegistersScope scope(this); 4669 // Reset the context register. 4670 if (!dst.is(cp)) { 4671 __ LoadImmP(cp, Operand::Zero()); 4672 } 4673 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4674 RecordSafepointWithRegisters(instr->pointer_map(), 0, 4675 Safepoint::kNoLazyDeopt); 4676 __ StoreToSafepointRegisterSlot(r2, dst); 4677 } 4678 4679 // Done. Put the value in dbl_scratch into the value of the allocated heap 4680 // number. 4681 __ bind(&done); 4682 __ StoreDouble(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); 4683} 4684 4685void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4686 class DeferredNumberTagD final : public LDeferredCode { 4687 public: 4688 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4689 : LDeferredCode(codegen), instr_(instr) {} 4690 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); } 4691 LInstruction* instr() override { return instr_; } 4692 4693 private: 4694 LNumberTagD* instr_; 4695 }; 4696 4697 DoubleRegister input_reg = ToDoubleRegister(instr->value()); 4698 Register scratch = scratch0(); 4699 Register reg = ToRegister(instr->result()); 4700 Register temp1 = ToRegister(instr->temp()); 4701 Register temp2 = ToRegister(instr->temp2()); 4702 4703 DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr); 4704 if (FLAG_inline_new) { 4705 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); 4706 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); 4707 } else { 4708 __ b(deferred->entry()); 4709 } 4710 __ bind(deferred->exit()); 4711 __ StoreDouble(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset)); 4712} 4713 4714void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4715 // TODO(3095996): Get rid of this. For now, we need to make the 4716 // result register contain a valid pointer because it is already 4717 // contained in the register pointer map. 4718 Register reg = ToRegister(instr->result()); 4719 __ LoadImmP(reg, Operand::Zero()); 4720 4721 PushSafepointRegistersScope scope(this); 4722 // Reset the context register. 4723 if (!reg.is(cp)) { 4724 __ LoadImmP(cp, Operand::Zero()); 4725 } 4726 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4727 RecordSafepointWithRegisters(instr->pointer_map(), 0, 4728 Safepoint::kNoLazyDeopt); 4729 __ StoreToSafepointRegisterSlot(r2, reg); 4730} 4731 4732void LCodeGen::DoSmiTag(LSmiTag* instr) { 4733 HChange* hchange = instr->hydrogen(); 4734 Register input = ToRegister(instr->value()); 4735 Register output = ToRegister(instr->result()); 4736 if (hchange->CheckFlag(HValue::kCanOverflow) && 4737 hchange->value()->CheckFlag(HValue::kUint32)) { 4738 __ TestUnsignedSmiCandidate(input, r0); 4739 DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, cr0); 4740 } 4741#if !V8_TARGET_ARCH_S390X 4742 if (hchange->CheckFlag(HValue::kCanOverflow) && 4743 !hchange->value()->CheckFlag(HValue::kUint32)) { 4744 __ SmiTagCheckOverflow(output, input, r0); 4745 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); 4746 } else { 4747#endif 4748 __ SmiTag(output, input); 4749#if !V8_TARGET_ARCH_S390X 4750 } 4751#endif 4752} 4753 4754void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 4755 Register input = ToRegister(instr->value()); 4756 Register result = ToRegister(instr->result()); 4757 if (instr->needs_check()) { 4758 __ tmll(input, Operand(kHeapObjectTag)); 4759 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0); 4760 __ SmiUntag(result, input); 4761 } else { 4762 __ SmiUntag(result, input); 4763 } 4764} 4765 4766void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, 4767 DoubleRegister result_reg, 4768 NumberUntagDMode mode) { 4769 bool can_convert_undefined_to_nan = 4770 instr->hydrogen()->can_convert_undefined_to_nan(); 4771 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); 4772 4773 Register scratch = scratch0(); 4774 DCHECK(!result_reg.is(double_scratch0())); 4775 4776 Label convert, load_smi, done; 4777 4778 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 4779 // Smi check. 4780 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); 4781 4782 // Heap number map check. 4783 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4784 __ CmpP(scratch, RootMemOperand(Heap::kHeapNumberMapRootIndex)); 4785 4786 if (can_convert_undefined_to_nan) { 4787 __ bne(&convert, Label::kNear); 4788 } else { 4789 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); 4790 } 4791 // load heap number 4792 __ ld(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 4793 if (deoptimize_on_minus_zero) { 4794 __ TestDoubleIsMinusZero(result_reg, scratch, ip); 4795 DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero); 4796 } 4797 __ b(&done, Label::kNear); 4798 if (can_convert_undefined_to_nan) { 4799 __ bind(&convert); 4800 // Convert undefined (and hole) to NaN. 4801 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); 4802 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined); 4803 __ LoadRoot(scratch, Heap::kNanValueRootIndex); 4804 __ ld(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); 4805 __ b(&done, Label::kNear); 4806 } 4807 } else { 4808 __ SmiUntag(scratch, input_reg); 4809 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); 4810 } 4811 // Smi to double register conversion 4812 __ bind(&load_smi); 4813 // scratch: untagged value of input_reg 4814 __ ConvertIntToDouble(scratch, result_reg); 4815 __ bind(&done); 4816} 4817 4818void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { 4819 Register input_reg = ToRegister(instr->value()); 4820 Register scratch1 = scratch0(); 4821 Register scratch2 = ToRegister(instr->temp()); 4822 DoubleRegister double_scratch = double_scratch0(); 4823 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2()); 4824 4825 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); 4826 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); 4827 4828 Label done; 4829 4830 // Heap number map check. 4831 __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4832 __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex); 4833 4834 if (instr->truncating()) { 4835 // Performs a truncating conversion of a floating point number as used by 4836 // the JS bitwise operations. 4837 Label no_heap_number, check_bools, check_false; 4838 __ bne(&no_heap_number, Label::kNear); 4839 __ LoadRR(scratch2, input_reg); 4840 __ TruncateHeapNumberToI(input_reg, scratch2); 4841 __ b(&done, Label::kNear); 4842 4843 // Check for Oddballs. Undefined/False is converted to zero and True to one 4844 // for truncating conversions. 4845 __ bind(&no_heap_number); 4846 __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); 4847 __ bne(&check_bools); 4848 __ LoadImmP(input_reg, Operand::Zero()); 4849 __ b(&done, Label::kNear); 4850 4851 __ bind(&check_bools); 4852 __ CompareRoot(input_reg, Heap::kTrueValueRootIndex); 4853 __ bne(&check_false, Label::kNear); 4854 __ LoadImmP(input_reg, Operand(1)); 4855 __ b(&done, Label::kNear); 4856 4857 __ bind(&check_false); 4858 __ CompareRoot(input_reg, Heap::kFalseValueRootIndex); 4859 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean); 4860 __ LoadImmP(input_reg, Operand::Zero()); 4861 } else { 4862 // Deoptimize if we don't have a heap number. 4863 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber); 4864 4865 __ ld(double_scratch2, 4866 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 4867 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4868 // preserve heap number pointer in scratch2 for minus zero check below 4869 __ LoadRR(scratch2, input_reg); 4870 } 4871 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1, 4872 double_scratch); 4873 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); 4874 4875 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4876 __ CmpP(input_reg, Operand::Zero()); 4877 __ bne(&done, Label::kNear); 4878 __ TestHeapNumberSign(scratch2, scratch1); 4879 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); 4880 } 4881 } 4882 __ bind(&done); 4883} 4884 4885void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 4886 class DeferredTaggedToI final : public LDeferredCode { 4887 public: 4888 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 4889 : LDeferredCode(codegen), instr_(instr) {} 4890 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); } 4891 LInstruction* instr() override { return instr_; } 4892 4893 private: 4894 LTaggedToI* instr_; 4895 }; 4896 4897 LOperand* input = instr->value(); 4898 DCHECK(input->IsRegister()); 4899 DCHECK(input->Equals(instr->result())); 4900 4901 Register input_reg = ToRegister(input); 4902 4903 if (instr->hydrogen()->value()->representation().IsSmi()) { 4904 __ SmiUntag(input_reg); 4905 } else { 4906 DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr); 4907 4908 // Branch to deferred code if the input is a HeapObject. 4909 __ JumpIfNotSmi(input_reg, deferred->entry()); 4910 4911 __ SmiUntag(input_reg); 4912 __ bind(deferred->exit()); 4913 } 4914} 4915 4916void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 4917 LOperand* input = instr->value(); 4918 DCHECK(input->IsRegister()); 4919 LOperand* result = instr->result(); 4920 DCHECK(result->IsDoubleRegister()); 4921 4922 Register input_reg = ToRegister(input); 4923 DoubleRegister result_reg = ToDoubleRegister(result); 4924 4925 HValue* value = instr->hydrogen()->value(); 4926 NumberUntagDMode mode = value->representation().IsSmi() 4927 ? NUMBER_CANDIDATE_IS_SMI 4928 : NUMBER_CANDIDATE_IS_ANY_TAGGED; 4929 4930 EmitNumberUntagD(instr, input_reg, result_reg, mode); 4931} 4932 4933void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 4934 Register result_reg = ToRegister(instr->result()); 4935 Register scratch1 = scratch0(); 4936 DoubleRegister double_input = ToDoubleRegister(instr->value()); 4937 DoubleRegister double_scratch = double_scratch0(); 4938 4939 if (instr->truncating()) { 4940 __ TruncateDoubleToI(result_reg, double_input); 4941 } else { 4942 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, 4943 double_scratch); 4944 // Deoptimize if the input wasn't a int32 (inside a double). 4945 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); 4946 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4947 Label done; 4948 __ CmpP(result_reg, Operand::Zero()); 4949 __ bne(&done, Label::kNear); 4950 __ TestDoubleSign(double_input, scratch1); 4951 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); 4952 __ bind(&done); 4953 } 4954 } 4955} 4956 4957void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 4958 Register result_reg = ToRegister(instr->result()); 4959 Register scratch1 = scratch0(); 4960 DoubleRegister double_input = ToDoubleRegister(instr->value()); 4961 DoubleRegister double_scratch = double_scratch0(); 4962 4963 if (instr->truncating()) { 4964 __ TruncateDoubleToI(result_reg, double_input); 4965 } else { 4966 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, 4967 double_scratch); 4968 // Deoptimize if the input wasn't a int32 (inside a double). 4969 DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN); 4970 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4971 Label done; 4972 __ CmpP(result_reg, Operand::Zero()); 4973 __ bne(&done, Label::kNear); 4974 __ TestDoubleSign(double_input, scratch1); 4975 DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero); 4976 __ bind(&done); 4977 } 4978 } 4979#if V8_TARGET_ARCH_S390X 4980 __ SmiTag(result_reg); 4981#else 4982 __ SmiTagCheckOverflow(result_reg, r0); 4983 DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0); 4984#endif 4985} 4986 4987void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 4988 LOperand* input = instr->value(); 4989 __ TestIfSmi(ToRegister(input)); 4990 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0); 4991} 4992 4993void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 4994 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 4995 LOperand* input = instr->value(); 4996 __ TestIfSmi(ToRegister(input)); 4997 DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0); 4998 } 4999} 5000 5001void LCodeGen::DoCheckArrayBufferNotNeutered( 5002 LCheckArrayBufferNotNeutered* instr) { 5003 Register view = ToRegister(instr->view()); 5004 Register scratch = scratch0(); 5005 5006 __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset)); 5007 __ LoadlW(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); 5008 __ And(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift)); 5009 DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, cr0); 5010} 5011 5012void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 5013 Register input = ToRegister(instr->value()); 5014 Register scratch = scratch0(); 5015 5016 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5017 5018 if (instr->hydrogen()->is_interval_check()) { 5019 InstanceType first; 5020 InstanceType last; 5021 instr->hydrogen()->GetCheckInterval(&first, &last); 5022 5023 __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset), 5024 Operand(first)); 5025 5026 // If there is only one type in the interval check for equality. 5027 if (first == last) { 5028 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType); 5029 } else { 5030 DeoptimizeIf(lt, instr, DeoptimizeReason::kWrongInstanceType); 5031 // Omit check for the last type. 5032 if (last != LAST_TYPE) { 5033 __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset), 5034 Operand(last)); 5035 DeoptimizeIf(gt, instr, DeoptimizeReason::kWrongInstanceType); 5036 } 5037 } 5038 } else { 5039 uint8_t mask; 5040 uint8_t tag; 5041 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 5042 5043 __ LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 5044 5045 if (base::bits::IsPowerOfTwo32(mask)) { 5046 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); 5047 __ AndP(scratch, Operand(mask)); 5048 DeoptimizeIf(tag == 0 ? ne : eq, instr, 5049 DeoptimizeReason::kWrongInstanceType); 5050 } else { 5051 __ AndP(scratch, Operand(mask)); 5052 __ CmpP(scratch, Operand(tag)); 5053 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType); 5054 } 5055 } 5056} 5057 5058void LCodeGen::DoCheckValue(LCheckValue* instr) { 5059 Register reg = ToRegister(instr->value()); 5060 Handle<HeapObject> object = instr->hydrogen()->object().handle(); 5061 AllowDeferredHandleDereference smi_check; 5062 if (isolate()->heap()->InNewSpace(*object)) { 5063 Register reg = ToRegister(instr->value()); 5064 Handle<Cell> cell = isolate()->factory()->NewCell(object); 5065 __ mov(ip, Operand(cell)); 5066 __ CmpP(reg, FieldMemOperand(ip, Cell::kValueOffset)); 5067 } else { 5068 __ CmpP(reg, Operand(object)); 5069 } 5070 DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch); 5071} 5072 5073void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { 5074 Register temp = ToRegister(instr->temp()); 5075 { 5076 PushSafepointRegistersScope scope(this); 5077 __ push(object); 5078 __ LoadImmP(cp, Operand::Zero()); 5079 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); 5080 RecordSafepointWithRegisters(instr->pointer_map(), 1, 5081 Safepoint::kNoLazyDeopt); 5082 __ StoreToSafepointRegisterSlot(r2, temp); 5083 } 5084 __ TestIfSmi(temp); 5085 DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0); 5086} 5087 5088void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 5089 class DeferredCheckMaps final : public LDeferredCode { 5090 public: 5091 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 5092 : LDeferredCode(codegen), instr_(instr), object_(object) { 5093 SetExit(check_maps()); 5094 } 5095 void Generate() override { 5096 codegen()->DoDeferredInstanceMigration(instr_, object_); 5097 } 5098 Label* check_maps() { return &check_maps_; } 5099 LInstruction* instr() override { return instr_; } 5100 5101 private: 5102 LCheckMaps* instr_; 5103 Label check_maps_; 5104 Register object_; 5105 }; 5106 5107 if (instr->hydrogen()->IsStabilityCheck()) { 5108 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 5109 for (int i = 0; i < maps->size(); ++i) { 5110 AddStabilityDependency(maps->at(i).handle()); 5111 } 5112 return; 5113 } 5114 5115 LOperand* input = instr->value(); 5116 DCHECK(input->IsRegister()); 5117 Register reg = ToRegister(input); 5118 5119 DeferredCheckMaps* deferred = NULL; 5120 if (instr->hydrogen()->HasMigrationTarget()) { 5121 deferred = new (zone()) DeferredCheckMaps(this, instr, reg); 5122 __ bind(deferred->check_maps()); 5123 } 5124 5125 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 5126 Label success; 5127 for (int i = 0; i < maps->size() - 1; i++) { 5128 Handle<Map> map = maps->at(i).handle(); 5129 __ CompareMap(reg, map, &success); 5130 __ beq(&success); 5131 } 5132 5133 Handle<Map> map = maps->at(maps->size() - 1).handle(); 5134 __ CompareMap(reg, map, &success); 5135 if (instr->hydrogen()->HasMigrationTarget()) { 5136 __ bne(deferred->entry()); 5137 } else { 5138 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap); 5139 } 5140 5141 __ bind(&success); 5142} 5143 5144void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5145 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); 5146 Register result_reg = ToRegister(instr->result()); 5147 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); 5148} 5149 5150void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 5151 Register unclamped_reg = ToRegister(instr->unclamped()); 5152 Register result_reg = ToRegister(instr->result()); 5153 __ ClampUint8(result_reg, unclamped_reg); 5154} 5155 5156void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 5157 Register scratch = scratch0(); 5158 Register input_reg = ToRegister(instr->unclamped()); 5159 Register result_reg = ToRegister(instr->result()); 5160 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); 5161 Label is_smi, done, heap_number; 5162 5163 // Both smi and heap number cases are handled. 5164 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); 5165 5166 // Check for heap number 5167 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 5168 __ CmpP(scratch, Operand(factory()->heap_number_map())); 5169 __ beq(&heap_number, Label::kNear); 5170 5171 // Check for undefined. Undefined is converted to zero for clamping 5172 // conversions. 5173 __ CmpP(input_reg, Operand(factory()->undefined_value())); 5174 DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined); 5175 __ LoadImmP(result_reg, Operand::Zero()); 5176 __ b(&done, Label::kNear); 5177 5178 // Heap number 5179 __ bind(&heap_number); 5180 __ ld(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 5181 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); 5182 __ b(&done, Label::kNear); 5183 5184 // smi 5185 __ bind(&is_smi); 5186 __ ClampUint8(result_reg, result_reg); 5187 5188 __ bind(&done); 5189} 5190 5191void LCodeGen::DoAllocate(LAllocate* instr) { 5192 class DeferredAllocate final : public LDeferredCode { 5193 public: 5194 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) 5195 : LDeferredCode(codegen), instr_(instr) {} 5196 void Generate() override { codegen()->DoDeferredAllocate(instr_); } 5197 LInstruction* instr() override { return instr_; } 5198 5199 private: 5200 LAllocate* instr_; 5201 }; 5202 5203 DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr); 5204 5205 Register result = ToRegister(instr->result()); 5206 Register scratch = ToRegister(instr->temp1()); 5207 Register scratch2 = ToRegister(instr->temp2()); 5208 5209 // Allocate memory for the object. 5210 AllocationFlags flags = NO_ALLOCATION_FLAGS; 5211 if (instr->hydrogen()->MustAllocateDoubleAligned()) { 5212 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); 5213 } 5214 if (instr->hydrogen()->IsOldSpaceAllocation()) { 5215 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5216 flags = static_cast<AllocationFlags>(flags | PRETENURE); 5217 } 5218 5219 if (instr->hydrogen()->IsAllocationFoldingDominator()) { 5220 flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR); 5221 } 5222 5223 DCHECK(!instr->hydrogen()->IsAllocationFolded()); 5224 5225 if (instr->size()->IsConstantOperand()) { 5226 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5227 CHECK(size <= Page::kMaxRegularHeapObjectSize); 5228 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); 5229 } else { 5230 Register size = ToRegister(instr->size()); 5231 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); 5232 } 5233 5234 __ bind(deferred->exit()); 5235 5236 if (instr->hydrogen()->MustPrefillWithFiller()) { 5237 if (instr->size()->IsConstantOperand()) { 5238 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5239 __ LoadIntLiteral(scratch, size); 5240 } else { 5241 scratch = ToRegister(instr->size()); 5242 } 5243 __ lay(scratch, MemOperand(scratch, -kPointerSize)); 5244 Label loop; 5245 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); 5246 __ bind(&loop); 5247 __ StoreP(scratch2, MemOperand(scratch, result, -kHeapObjectTag)); 5248#if V8_TARGET_ARCH_S390X 5249 __ lay(scratch, MemOperand(scratch, -kPointerSize)); 5250#else 5251 // TODO(joransiu): Improve the following sequence. 5252 // Need to use AHI instead of LAY as top nibble is not set with LAY, causing 5253 // incorrect result with the signed compare 5254 __ AddP(scratch, Operand(-kPointerSize)); 5255#endif 5256 __ CmpP(scratch, Operand::Zero()); 5257 __ bge(&loop); 5258 } 5259} 5260 5261void LCodeGen::DoDeferredAllocate(LAllocate* instr) { 5262 Register result = ToRegister(instr->result()); 5263 5264 // TODO(3095996): Get rid of this. For now, we need to make the 5265 // result register contain a valid pointer because it is already 5266 // contained in the register pointer map. 5267 __ LoadSmiLiteral(result, Smi::FromInt(0)); 5268 5269 PushSafepointRegistersScope scope(this); 5270 if (instr->size()->IsRegister()) { 5271 Register size = ToRegister(instr->size()); 5272 DCHECK(!size.is(result)); 5273 __ SmiTag(size); 5274 __ push(size); 5275 } else { 5276 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5277#if !V8_TARGET_ARCH_S390X 5278 if (size >= 0 && size <= Smi::kMaxValue) { 5279#endif 5280 __ Push(Smi::FromInt(size)); 5281#if !V8_TARGET_ARCH_S390X 5282 } else { 5283 // We should never get here at runtime => abort 5284 __ stop("invalid allocation size"); 5285 return; 5286 } 5287#endif 5288 } 5289 5290 int flags = AllocateDoubleAlignFlag::encode( 5291 instr->hydrogen()->MustAllocateDoubleAligned()); 5292 if (instr->hydrogen()->IsOldSpaceAllocation()) { 5293 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5294 flags = AllocateTargetSpace::update(flags, OLD_SPACE); 5295 } else { 5296 flags = AllocateTargetSpace::update(flags, NEW_SPACE); 5297 } 5298 __ Push(Smi::FromInt(flags)); 5299 5300 CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr, 5301 instr->context()); 5302 __ StoreToSafepointRegisterSlot(r2, result); 5303 5304 if (instr->hydrogen()->IsAllocationFoldingDominator()) { 5305 AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS; 5306 if (instr->hydrogen()->IsOldSpaceAllocation()) { 5307 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5308 allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE); 5309 } 5310 // If the allocation folding dominator allocate triggered a GC, allocation 5311 // happend in the runtime. We have to reset the top pointer to virtually 5312 // undo the allocation. 5313 ExternalReference allocation_top = 5314 AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags); 5315 Register top_address = scratch0(); 5316 __ SubP(r2, r2, Operand(kHeapObjectTag)); 5317 __ mov(top_address, Operand(allocation_top)); 5318 __ StoreP(r2, MemOperand(top_address)); 5319 __ AddP(r2, r2, Operand(kHeapObjectTag)); 5320 } 5321} 5322 5323void LCodeGen::DoFastAllocate(LFastAllocate* instr) { 5324 DCHECK(instr->hydrogen()->IsAllocationFolded()); 5325 DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator()); 5326 Register result = ToRegister(instr->result()); 5327 Register scratch1 = ToRegister(instr->temp1()); 5328 Register scratch2 = ToRegister(instr->temp2()); 5329 5330 AllocationFlags flags = ALLOCATION_FOLDED; 5331 if (instr->hydrogen()->MustAllocateDoubleAligned()) { 5332 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); 5333 } 5334 if (instr->hydrogen()->IsOldSpaceAllocation()) { 5335 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation()); 5336 flags = static_cast<AllocationFlags>(flags | PRETENURE); 5337 } 5338 if (instr->size()->IsConstantOperand()) { 5339 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5340 CHECK(size <= Page::kMaxRegularHeapObjectSize); 5341 __ FastAllocate(size, result, scratch1, scratch2, flags); 5342 } else { 5343 Register size = ToRegister(instr->size()); 5344 __ FastAllocate(size, result, scratch1, scratch2, flags); 5345 } 5346} 5347 5348void LCodeGen::DoTypeof(LTypeof* instr) { 5349 DCHECK(ToRegister(instr->value()).is(r5)); 5350 DCHECK(ToRegister(instr->result()).is(r2)); 5351 Label end, do_call; 5352 Register value_register = ToRegister(instr->value()); 5353 __ JumpIfNotSmi(value_register, &do_call); 5354 __ mov(r2, Operand(isolate()->factory()->number_string())); 5355 __ b(&end); 5356 __ bind(&do_call); 5357 TypeofStub stub(isolate()); 5358 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 5359 __ bind(&end); 5360} 5361 5362void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { 5363 Register input = ToRegister(instr->value()); 5364 5365 Condition final_branch_condition = 5366 EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input, 5367 instr->type_literal()); 5368 if (final_branch_condition != kNoCondition) { 5369 EmitBranch(instr, final_branch_condition); 5370 } 5371} 5372 5373Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label, 5374 Register input, Handle<String> type_name) { 5375 Condition final_branch_condition = kNoCondition; 5376 Register scratch = scratch0(); 5377 Factory* factory = isolate()->factory(); 5378 if (String::Equals(type_name, factory->number_string())) { 5379 __ JumpIfSmi(input, true_label); 5380 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5381 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); 5382 final_branch_condition = eq; 5383 5384 } else if (String::Equals(type_name, factory->string_string())) { 5385 __ JumpIfSmi(input, false_label); 5386 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE); 5387 final_branch_condition = lt; 5388 5389 } else if (String::Equals(type_name, factory->symbol_string())) { 5390 __ JumpIfSmi(input, false_label); 5391 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE); 5392 final_branch_condition = eq; 5393 5394 } else if (String::Equals(type_name, factory->boolean_string())) { 5395 __ CompareRoot(input, Heap::kTrueValueRootIndex); 5396 __ beq(true_label); 5397 __ CompareRoot(input, Heap::kFalseValueRootIndex); 5398 final_branch_condition = eq; 5399 5400 } else if (String::Equals(type_name, factory->undefined_string())) { 5401 __ CompareRoot(input, Heap::kNullValueRootIndex); 5402 __ beq(false_label); 5403 __ JumpIfSmi(input, false_label); 5404 // Check for undetectable objects => true. 5405 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5406 __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); 5407 __ ExtractBit(r0, scratch, Map::kIsUndetectable); 5408 __ CmpP(r0, Operand::Zero()); 5409 final_branch_condition = ne; 5410 5411 } else if (String::Equals(type_name, factory->function_string())) { 5412 __ JumpIfSmi(input, false_label); 5413 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 5414 __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); 5415 __ AndP(scratch, scratch, 5416 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); 5417 __ CmpP(scratch, Operand(1 << Map::kIsCallable)); 5418 final_branch_condition = eq; 5419 5420 } else if (String::Equals(type_name, factory->object_string())) { 5421 __ JumpIfSmi(input, false_label); 5422 __ CompareRoot(input, Heap::kNullValueRootIndex); 5423 __ beq(true_label); 5424 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); 5425 __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE); 5426 __ blt(false_label); 5427 // Check for callable or undetectable objects => false. 5428 __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); 5429 __ AndP(r0, scratch, 5430 Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable))); 5431 __ CmpP(r0, Operand::Zero()); 5432 final_branch_condition = eq; 5433 5434// clang-format off 5435#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \ 5436 } else if (String::Equals(type_name, factory->type##_string())) { \ 5437 __ JumpIfSmi(input, false_label); \ 5438 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \ 5439 __ CompareRoot(scratch, Heap::k##Type##MapRootIndex); \ 5440 final_branch_condition = eq; 5441 SIMD128_TYPES(SIMD128_TYPE) 5442#undef SIMD128_TYPE 5443 // clang-format on 5444 5445 } else { 5446 __ b(false_label); 5447 } 5448 5449 return final_branch_condition; 5450} 5451 5452void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { 5453 if (info()->ShouldEnsureSpaceForLazyDeopt()) { 5454 // Ensure that we have enough space after the previous lazy-bailout 5455 // instruction for patching the code here. 5456 int current_pc = masm()->pc_offset(); 5457 if (current_pc < last_lazy_deopt_pc_ + space_needed) { 5458 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; 5459 DCHECK_EQ(0, padding_size % 2); 5460 while (padding_size > 0) { 5461 __ nop(); 5462 padding_size -= 2; 5463 } 5464 } 5465 } 5466 last_lazy_deopt_pc_ = masm()->pc_offset(); 5467} 5468 5469void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 5470 last_lazy_deopt_pc_ = masm()->pc_offset(); 5471 DCHECK(instr->HasEnvironment()); 5472 LEnvironment* env = instr->environment(); 5473 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5474 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5475} 5476 5477void LCodeGen::DoDeoptimize(LDeoptimize* instr) { 5478 Deoptimizer::BailoutType type = instr->hydrogen()->type(); 5479 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the 5480 // needed return address), even though the implementation of LAZY and EAGER is 5481 // now identical. When LAZY is eventually completely folded into EAGER, remove 5482 // the special case below. 5483 if (info()->IsStub() && type == Deoptimizer::EAGER) { 5484 type = Deoptimizer::LAZY; 5485 } 5486 5487 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type); 5488} 5489 5490void LCodeGen::DoDummy(LDummy* instr) { 5491 // Nothing to see here, move on! 5492} 5493 5494void LCodeGen::DoDummyUse(LDummyUse* instr) { 5495 // Nothing to see here, move on! 5496} 5497 5498void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { 5499 PushSafepointRegistersScope scope(this); 5500 LoadContextFromDeferred(instr->context()); 5501 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); 5502 RecordSafepointWithLazyDeopt( 5503 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 5504 DCHECK(instr->HasEnvironment()); 5505 LEnvironment* env = instr->environment(); 5506 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 5507} 5508 5509void LCodeGen::DoStackCheck(LStackCheck* instr) { 5510 class DeferredStackCheck final : public LDeferredCode { 5511 public: 5512 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) 5513 : LDeferredCode(codegen), instr_(instr) {} 5514 void Generate() override { codegen()->DoDeferredStackCheck(instr_); } 5515 LInstruction* instr() override { return instr_; } 5516 5517 private: 5518 LStackCheck* instr_; 5519 }; 5520 5521 DCHECK(instr->HasEnvironment()); 5522 LEnvironment* env = instr->environment(); 5523 // There is no LLazyBailout instruction for stack-checks. We have to 5524 // prepare for lazy deoptimization explicitly here. 5525 if (instr->hydrogen()->is_function_entry()) { 5526 // Perform stack overflow check. 5527 Label done; 5528 __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex)); 5529 __ bge(&done, Label::kNear); 5530 DCHECK(instr->context()->IsRegister()); 5531 DCHECK(ToRegister(instr->context()).is(cp)); 5532 CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET, 5533 instr); 5534 __ bind(&done); 5535 } else { 5536 DCHECK(instr->hydrogen()->is_backwards_branch()); 5537 // Perform stack overflow check if this goto needs it before jumping. 5538 DeferredStackCheck* deferred_stack_check = 5539 new (zone()) DeferredStackCheck(this, instr); 5540 __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex)); 5541 __ blt(deferred_stack_check->entry()); 5542 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 5543 __ bind(instr->done_label()); 5544 deferred_stack_check->SetExit(instr->done_label()); 5545 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5546 // Don't record a deoptimization index for the safepoint here. 5547 // This will be done explicitly when emitting call and the safepoint in 5548 // the deferred code. 5549 } 5550} 5551 5552void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 5553 // This is a pseudo-instruction that ensures that the environment here is 5554 // properly registered for deoptimization and records the assembler's PC 5555 // offset. 5556 LEnvironment* environment = instr->environment(); 5557 5558 // If the environment were already registered, we would have no way of 5559 // backpatching it with the spill slot operands. 5560 DCHECK(!environment->HasBeenRegistered()); 5561 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 5562 5563 GenerateOsrPrologue(); 5564} 5565 5566void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 5567 Label use_cache, call_runtime; 5568 __ CheckEnumCache(&call_runtime); 5569 5570 __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); 5571 __ b(&use_cache); 5572 5573 // Get the set of properties to enumerate. 5574 __ bind(&call_runtime); 5575 __ push(r2); 5576 CallRuntime(Runtime::kForInEnumerate, instr); 5577 __ bind(&use_cache); 5578} 5579 5580void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { 5581 Register map = ToRegister(instr->map()); 5582 Register result = ToRegister(instr->result()); 5583 Label load_cache, done; 5584 __ EnumLength(result, map); 5585 __ CmpSmiLiteral(result, Smi::FromInt(0), r0); 5586 __ bne(&load_cache, Label::kNear); 5587 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); 5588 __ b(&done, Label::kNear); 5589 5590 __ bind(&load_cache); 5591 __ LoadInstanceDescriptors(map, result); 5592 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); 5593 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); 5594 __ CmpP(result, Operand::Zero()); 5595 DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache); 5596 5597 __ bind(&done); 5598} 5599 5600void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 5601 Register object = ToRegister(instr->value()); 5602 Register map = ToRegister(instr->map()); 5603 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); 5604 __ CmpP(map, scratch0()); 5605 DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap); 5606} 5607 5608void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, 5609 Register result, Register object, 5610 Register index) { 5611 PushSafepointRegistersScope scope(this); 5612 __ Push(object, index); 5613 __ LoadImmP(cp, Operand::Zero()); 5614 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); 5615 RecordSafepointWithRegisters(instr->pointer_map(), 2, 5616 Safepoint::kNoLazyDeopt); 5617 __ StoreToSafepointRegisterSlot(r2, result); 5618} 5619 5620void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { 5621 class DeferredLoadMutableDouble final : public LDeferredCode { 5622 public: 5623 DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr, 5624 Register result, Register object, Register index) 5625 : LDeferredCode(codegen), 5626 instr_(instr), 5627 result_(result), 5628 object_(object), 5629 index_(index) {} 5630 void Generate() override { 5631 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); 5632 } 5633 LInstruction* instr() override { return instr_; } 5634 5635 private: 5636 LLoadFieldByIndex* instr_; 5637 Register result_; 5638 Register object_; 5639 Register index_; 5640 }; 5641 5642 Register object = ToRegister(instr->object()); 5643 Register index = ToRegister(instr->index()); 5644 Register result = ToRegister(instr->result()); 5645 Register scratch = scratch0(); 5646 5647 DeferredLoadMutableDouble* deferred; 5648 deferred = new (zone()) 5649 DeferredLoadMutableDouble(this, instr, result, object, index); 5650 5651 Label out_of_object, done; 5652 5653 __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0); 5654 __ bne(deferred->entry()); 5655 __ ShiftRightArithP(index, index, Operand(1)); 5656 5657 __ CmpP(index, Operand::Zero()); 5658 __ blt(&out_of_object, Label::kNear); 5659 5660 __ SmiToPtrArrayOffset(r0, index); 5661 __ AddP(scratch, object, r0); 5662 __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); 5663 5664 __ b(&done, Label::kNear); 5665 5666 __ bind(&out_of_object); 5667 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 5668 // Index is equal to negated out of object property index plus 1. 5669 __ SmiToPtrArrayOffset(r0, index); 5670 __ SubP(scratch, result, r0); 5671 __ LoadP(result, 5672 FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize)); 5673 __ bind(deferred->exit()); 5674 __ bind(&done); 5675} 5676 5677#undef __ 5678 5679} // namespace internal 5680} // namespace v8 5681