code-generator-mips64.cc revision 21efce637eb329c94f1323b6a2334a1c977e1a9d
1// Copyright 2014 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "src/ast/scopes.h" 6#include "src/compiler/code-generator.h" 7#include "src/compiler/code-generator-impl.h" 8#include "src/compiler/gap-resolver.h" 9#include "src/compiler/node-matchers.h" 10#include "src/compiler/osr.h" 11#include "src/mips/macro-assembler-mips.h" 12 13namespace v8 { 14namespace internal { 15namespace compiler { 16 17#define __ masm()-> 18 19 20// TODO(plind): Possibly avoid using these lithium names. 21#define kScratchReg kLithiumScratchReg 22#define kScratchReg2 kLithiumScratchReg2 23#define kScratchDoubleReg kLithiumScratchDouble 24 25 26// TODO(plind): consider renaming these macros. 27#define TRACE_MSG(msg) \ 28 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \ 29 __LINE__) 30 31#define TRACE_UNIMPL() \ 32 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \ 33 __LINE__) 34 35 36// Adds Mips-specific methods to convert InstructionOperands. 37class MipsOperandConverter final : public InstructionOperandConverter { 38 public: 39 MipsOperandConverter(CodeGenerator* gen, Instruction* instr) 40 : InstructionOperandConverter(gen, instr) {} 41 42 FloatRegister OutputSingleRegister(size_t index = 0) { 43 return ToSingleRegister(instr_->OutputAt(index)); 44 } 45 46 FloatRegister InputSingleRegister(size_t index) { 47 return ToSingleRegister(instr_->InputAt(index)); 48 } 49 50 FloatRegister ToSingleRegister(InstructionOperand* op) { 51 // Single (Float) and Double register namespace is same on MIPS, 52 // both are typedefs of FPURegister. 53 return ToDoubleRegister(op); 54 } 55 56 DoubleRegister InputOrZeroDoubleRegister(size_t index) { 57 if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; 58 59 return InputDoubleRegister(index); 60 } 61 62 DoubleRegister InputOrZeroSingleRegister(size_t index) { 63 if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; 64 65 return InputSingleRegister(index); 66 } 67 68 Operand InputImmediate(size_t index) { 69 Constant constant = ToConstant(instr_->InputAt(index)); 70 switch (constant.type()) { 71 case Constant::kInt32: 72 return Operand(constant.ToInt32()); 73 case Constant::kInt64: 74 return Operand(constant.ToInt64()); 75 case Constant::kFloat32: 76 return Operand( 77 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED)); 78 case Constant::kFloat64: 79 return Operand( 80 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED)); 81 case Constant::kExternalReference: 82 case Constant::kHeapObject: 83 // TODO(plind): Maybe we should handle ExtRef & HeapObj here? 84 // maybe not done on arm due to const pool ?? 85 break; 86 case Constant::kRpoNumber: 87 UNREACHABLE(); // TODO(titzer): RPO immediates on mips? 88 break; 89 } 90 UNREACHABLE(); 91 return Operand(zero_reg); 92 } 93 94 Operand InputOperand(size_t index) { 95 InstructionOperand* op = instr_->InputAt(index); 96 if (op->IsRegister()) { 97 return Operand(ToRegister(op)); 98 } 99 return InputImmediate(index); 100 } 101 102 MemOperand MemoryOperand(size_t* first_index) { 103 const size_t index = *first_index; 104 switch (AddressingModeField::decode(instr_->opcode())) { 105 case kMode_None: 106 break; 107 case kMode_MRI: 108 *first_index += 2; 109 return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); 110 case kMode_MRR: 111 // TODO(plind): r6 address mode, to be implemented ... 112 UNREACHABLE(); 113 } 114 UNREACHABLE(); 115 return MemOperand(no_reg); 116 } 117 118 MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); } 119 120 MemOperand ToMemOperand(InstructionOperand* op) const { 121 DCHECK_NOT_NULL(op); 122 DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); 123 return SlotToMemOperand(AllocatedOperand::cast(op)->index()); 124 } 125 126 MemOperand SlotToMemOperand(int slot) const { 127 FrameOffset offset = frame_access_state()->GetFrameOffset(slot); 128 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); 129 } 130}; 131 132 133static inline bool HasRegisterInput(Instruction* instr, size_t index) { 134 return instr->InputAt(index)->IsRegister(); 135} 136 137 138namespace { 139 140class OutOfLineLoadSingle final : public OutOfLineCode { 141 public: 142 OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result) 143 : OutOfLineCode(gen), result_(result) {} 144 145 void Generate() final { 146 __ Move(result_, std::numeric_limits<float>::quiet_NaN()); 147 } 148 149 private: 150 FloatRegister const result_; 151}; 152 153 154class OutOfLineLoadDouble final : public OutOfLineCode { 155 public: 156 OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result) 157 : OutOfLineCode(gen), result_(result) {} 158 159 void Generate() final { 160 __ Move(result_, std::numeric_limits<double>::quiet_NaN()); 161 } 162 163 private: 164 DoubleRegister const result_; 165}; 166 167 168class OutOfLineLoadInteger final : public OutOfLineCode { 169 public: 170 OutOfLineLoadInteger(CodeGenerator* gen, Register result) 171 : OutOfLineCode(gen), result_(result) {} 172 173 void Generate() final { __ mov(result_, zero_reg); } 174 175 private: 176 Register const result_; 177}; 178 179 180class OutOfLineRound : public OutOfLineCode { 181 public: 182 OutOfLineRound(CodeGenerator* gen, DoubleRegister result) 183 : OutOfLineCode(gen), result_(result) {} 184 185 void Generate() final { 186 // Handle rounding to zero case where sign has to be preserved. 187 // High bits of double input already in kScratchReg. 188 __ dsrl(at, kScratchReg, 31); 189 __ dsll(at, at, 31); 190 __ mthc1(at, result_); 191 } 192 193 private: 194 DoubleRegister const result_; 195}; 196 197 198class OutOfLineRound32 : public OutOfLineCode { 199 public: 200 OutOfLineRound32(CodeGenerator* gen, DoubleRegister result) 201 : OutOfLineCode(gen), result_(result) {} 202 203 void Generate() final { 204 // Handle rounding to zero case where sign has to be preserved. 205 // High bits of float input already in kScratchReg. 206 __ srl(at, kScratchReg, 31); 207 __ sll(at, at, 31); 208 __ mtc1(at, result_); 209 } 210 211 private: 212 DoubleRegister const result_; 213}; 214 215 216class OutOfLineRecordWrite final : public OutOfLineCode { 217 public: 218 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index, 219 Register value, Register scratch0, Register scratch1, 220 RecordWriteMode mode) 221 : OutOfLineCode(gen), 222 object_(object), 223 index_(index), 224 value_(value), 225 scratch0_(scratch0), 226 scratch1_(scratch1), 227 mode_(mode), 228 must_save_lr_(!gen->frame_access_state()->has_frame()) {} 229 230 void Generate() final { 231 if (mode_ > RecordWriteMode::kValueIsPointer) { 232 __ JumpIfSmi(value_, exit()); 233 } 234 __ CheckPageFlag(value_, scratch0_, 235 MemoryChunk::kPointersToHereAreInterestingMask, eq, 236 exit()); 237 RememberedSetAction const remembered_set_action = 238 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET 239 : OMIT_REMEMBERED_SET; 240 SaveFPRegsMode const save_fp_mode = 241 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; 242 if (must_save_lr_) { 243 // We need to save and restore ra if the frame was elided. 244 __ Push(ra); 245 } 246 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_, 247 remembered_set_action, save_fp_mode); 248 __ Daddu(scratch1_, object_, index_); 249 __ CallStub(&stub); 250 if (must_save_lr_) { 251 __ Pop(ra); 252 } 253 } 254 255 private: 256 Register const object_; 257 Register const index_; 258 Register const value_; 259 Register const scratch0_; 260 Register const scratch1_; 261 RecordWriteMode const mode_; 262 bool must_save_lr_; 263}; 264 265 266Condition FlagsConditionToConditionCmp(FlagsCondition condition) { 267 switch (condition) { 268 case kEqual: 269 return eq; 270 case kNotEqual: 271 return ne; 272 case kSignedLessThan: 273 return lt; 274 case kSignedGreaterThanOrEqual: 275 return ge; 276 case kSignedLessThanOrEqual: 277 return le; 278 case kSignedGreaterThan: 279 return gt; 280 case kUnsignedLessThan: 281 return lo; 282 case kUnsignedGreaterThanOrEqual: 283 return hs; 284 case kUnsignedLessThanOrEqual: 285 return ls; 286 case kUnsignedGreaterThan: 287 return hi; 288 case kUnorderedEqual: 289 case kUnorderedNotEqual: 290 break; 291 default: 292 break; 293 } 294 UNREACHABLE(); 295 return kNoCondition; 296} 297 298 299Condition FlagsConditionToConditionTst(FlagsCondition condition) { 300 switch (condition) { 301 case kNotEqual: 302 return ne; 303 case kEqual: 304 return eq; 305 default: 306 break; 307 } 308 UNREACHABLE(); 309 return kNoCondition; 310} 311 312 313Condition FlagsConditionToConditionOvf(FlagsCondition condition) { 314 switch (condition) { 315 case kOverflow: 316 return ne; 317 case kNotOverflow: 318 return eq; 319 default: 320 break; 321 } 322 UNREACHABLE(); 323 return kNoCondition; 324} 325 326 327FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate, 328 FlagsCondition condition) { 329 switch (condition) { 330 case kEqual: 331 predicate = true; 332 return EQ; 333 case kNotEqual: 334 predicate = false; 335 return EQ; 336 case kUnsignedLessThan: 337 predicate = true; 338 return OLT; 339 case kUnsignedGreaterThanOrEqual: 340 predicate = false; 341 return ULT; 342 case kUnsignedLessThanOrEqual: 343 predicate = true; 344 return OLE; 345 case kUnsignedGreaterThan: 346 predicate = false; 347 return ULE; 348 case kUnorderedEqual: 349 case kUnorderedNotEqual: 350 predicate = true; 351 break; 352 default: 353 predicate = true; 354 break; 355 } 356 UNREACHABLE(); 357 return kNoFPUCondition; 358} 359 360} // namespace 361 362#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \ 363 do { \ 364 auto result = i.Output##width##Register(); \ 365 auto ool = new (zone()) OutOfLineLoad##width(this, result); \ 366 if (instr->InputAt(0)->IsRegister()) { \ 367 auto offset = i.InputRegister(0); \ 368 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \ 369 __ And(kScratchReg, offset, Operand(0xffffffff)); \ 370 __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \ 371 __ asm_instr(result, MemOperand(kScratchReg, 0)); \ 372 } else { \ 373 int offset = static_cast<int>(i.InputOperand(0).immediate()); \ 374 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \ 375 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \ 376 } \ 377 __ bind(ool->exit()); \ 378 } while (0) 379 380#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \ 381 do { \ 382 auto result = i.OutputRegister(); \ 383 auto ool = new (zone()) OutOfLineLoadInteger(this, result); \ 384 if (instr->InputAt(0)->IsRegister()) { \ 385 auto offset = i.InputRegister(0); \ 386 __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \ 387 __ And(kScratchReg, offset, Operand(0xffffffff)); \ 388 __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \ 389 __ asm_instr(result, MemOperand(kScratchReg, 0)); \ 390 } else { \ 391 int offset = static_cast<int>(i.InputOperand(0).immediate()); \ 392 __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \ 393 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \ 394 } \ 395 __ bind(ool->exit()); \ 396 } while (0) 397 398#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \ 399 do { \ 400 Label done; \ 401 if (instr->InputAt(0)->IsRegister()) { \ 402 auto offset = i.InputRegister(0); \ 403 auto value = i.Input##width##Register(2); \ 404 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \ 405 __ And(kScratchReg, offset, Operand(0xffffffff)); \ 406 __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \ 407 __ asm_instr(value, MemOperand(kScratchReg, 0)); \ 408 } else { \ 409 int offset = static_cast<int>(i.InputOperand(0).immediate()); \ 410 auto value = i.Input##width##Register(2); \ 411 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \ 412 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \ 413 } \ 414 __ bind(&done); \ 415 } while (0) 416 417#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \ 418 do { \ 419 Label done; \ 420 if (instr->InputAt(0)->IsRegister()) { \ 421 auto offset = i.InputRegister(0); \ 422 auto value = i.InputRegister(2); \ 423 __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \ 424 __ And(kScratchReg, offset, Operand(0xffffffff)); \ 425 __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \ 426 __ asm_instr(value, MemOperand(kScratchReg, 0)); \ 427 } else { \ 428 int offset = static_cast<int>(i.InputOperand(0).immediate()); \ 429 auto value = i.InputRegister(2); \ 430 __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \ 431 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \ 432 } \ 433 __ bind(&done); \ 434 } while (0) 435 436#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \ 437 if (kArchVariant == kMips64r6) { \ 438 __ cfc1(kScratchReg, FCSR); \ 439 __ li(at, Operand(mode_##mode)); \ 440 __ ctc1(at, FCSR); \ 441 __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \ 442 __ ctc1(kScratchReg, FCSR); \ 443 } else { \ 444 auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister()); \ 445 Label done; \ 446 __ mfhc1(kScratchReg, i.InputDoubleRegister(0)); \ 447 __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \ 448 HeapNumber::kExponentBits); \ 449 __ Branch(USE_DELAY_SLOT, &done, hs, at, \ 450 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \ 451 __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \ 452 __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \ 453 __ dmfc1(at, i.OutputDoubleRegister()); \ 454 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \ 455 __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \ 456 __ bind(ool->exit()); \ 457 __ bind(&done); \ 458 } 459 460#define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode) \ 461 if (kArchVariant == kMips64r6) { \ 462 __ cfc1(kScratchReg, FCSR); \ 463 __ li(at, Operand(mode_##mode)); \ 464 __ ctc1(at, FCSR); \ 465 __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \ 466 __ ctc1(kScratchReg, FCSR); \ 467 } else { \ 468 int32_t kFloat32ExponentBias = 127; \ 469 int32_t kFloat32MantissaBits = 23; \ 470 int32_t kFloat32ExponentBits = 8; \ 471 auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \ 472 Label done; \ 473 __ mfc1(kScratchReg, i.InputDoubleRegister(0)); \ 474 __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits); \ 475 __ Branch(USE_DELAY_SLOT, &done, hs, at, \ 476 Operand(kFloat32ExponentBias + kFloat32MantissaBits)); \ 477 __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \ 478 __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \ 479 __ mfc1(at, i.OutputDoubleRegister()); \ 480 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \ 481 __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \ 482 __ bind(ool->exit()); \ 483 __ bind(&done); \ 484 } 485 486#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ 487 do { \ 488 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \ 489 __ sync(); \ 490 } while (0) 491 492#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \ 493 do { \ 494 __ sync(); \ 495 __ asm_instr(i.InputRegister(2), i.MemoryOperand()); \ 496 __ sync(); \ 497 } while (0) 498 499#define ASSEMBLE_IEEE754_BINOP(name) \ 500 do { \ 501 FrameScope scope(masm(), StackFrame::MANUAL); \ 502 __ PrepareCallCFunction(0, 2, kScratchReg); \ 503 __ MovToFloatParameters(i.InputDoubleRegister(0), \ 504 i.InputDoubleRegister(1)); \ 505 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \ 506 0, 2); \ 507 /* Move the result in the double result register. */ \ 508 __ MovFromFloatResult(i.OutputDoubleRegister()); \ 509 } while (0) 510 511#define ASSEMBLE_IEEE754_UNOP(name) \ 512 do { \ 513 FrameScope scope(masm(), StackFrame::MANUAL); \ 514 __ PrepareCallCFunction(0, 1, kScratchReg); \ 515 __ MovToFloatParameter(i.InputDoubleRegister(0)); \ 516 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \ 517 0, 1); \ 518 /* Move the result in the double result register. */ \ 519 __ MovFromFloatResult(i.OutputDoubleRegister()); \ 520 } while (0) 521 522void CodeGenerator::AssembleDeconstructFrame() { 523 __ mov(sp, fp); 524 __ Pop(ra, fp); 525} 526 527void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) { 528 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta); 529 if (sp_slot_delta > 0) { 530 __ daddiu(sp, sp, sp_slot_delta * kPointerSize); 531 } 532 frame_access_state()->SetFrameAccessToDefault(); 533} 534 535 536void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) { 537 int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta); 538 if (sp_slot_delta < 0) { 539 __ Dsubu(sp, sp, Operand(-sp_slot_delta * kPointerSize)); 540 frame_access_state()->IncreaseSPDelta(-sp_slot_delta); 541 } 542 if (frame_access_state()->has_frame()) { 543 __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); 544 __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 545 } 546 frame_access_state()->SetFrameAccessToSP(); 547} 548 549void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg, 550 Register scratch1, 551 Register scratch2, 552 Register scratch3) { 553 DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3)); 554 Label done; 555 556 // Check if current frame is an arguments adaptor frame. 557 __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset)); 558 __ Branch(&done, ne, scratch3, 559 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 560 561 // Load arguments count from current arguments adaptor frame (note, it 562 // does not include receiver). 563 Register caller_args_count_reg = scratch1; 564 __ ld(caller_args_count_reg, 565 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); 566 __ SmiUntag(caller_args_count_reg); 567 568 ParameterCount callee_args_count(args_reg); 569 __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2, 570 scratch3); 571 __ bind(&done); 572} 573 574// Assembles an instruction after register allocation, producing machine code. 575CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( 576 Instruction* instr) { 577 MipsOperandConverter i(this, instr); 578 InstructionCode opcode = instr->opcode(); 579 ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode); 580 switch (arch_opcode) { 581 case kArchCallCodeObject: { 582 EnsureSpaceForLazyDeopt(); 583 if (instr->InputAt(0)->IsImmediate()) { 584 __ Call(Handle<Code>::cast(i.InputHeapObject(0)), 585 RelocInfo::CODE_TARGET); 586 } else { 587 __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag); 588 __ Call(at); 589 } 590 RecordCallPosition(instr); 591 frame_access_state()->ClearSPDelta(); 592 break; 593 } 594 case kArchTailCallCodeObjectFromJSFunction: 595 case kArchTailCallCodeObject: { 596 int stack_param_delta = i.InputInt32(instr->InputCount() - 1); 597 AssembleDeconstructActivationRecord(stack_param_delta); 598 if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) { 599 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, 600 i.TempRegister(0), i.TempRegister(1), 601 i.TempRegister(2)); 602 } 603 if (instr->InputAt(0)->IsImmediate()) { 604 __ Jump(Handle<Code>::cast(i.InputHeapObject(0)), 605 RelocInfo::CODE_TARGET); 606 } else { 607 __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag); 608 __ Jump(at); 609 } 610 frame_access_state()->ClearSPDelta(); 611 break; 612 } 613 case kArchTailCallAddress: { 614 int stack_param_delta = i.InputInt32(instr->InputCount() - 1); 615 AssembleDeconstructActivationRecord(stack_param_delta); 616 CHECK(!instr->InputAt(0)->IsImmediate()); 617 __ Jump(i.InputRegister(0)); 618 frame_access_state()->ClearSPDelta(); 619 break; 620 } 621 case kArchCallJSFunction: { 622 EnsureSpaceForLazyDeopt(); 623 Register func = i.InputRegister(0); 624 if (FLAG_debug_code) { 625 // Check the function's context matches the context argument. 626 __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); 627 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg)); 628 } 629 __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); 630 __ Call(at); 631 RecordCallPosition(instr); 632 frame_access_state()->ClearSPDelta(); 633 break; 634 } 635 case kArchTailCallJSFunctionFromJSFunction: 636 case kArchTailCallJSFunction: { 637 Register func = i.InputRegister(0); 638 if (FLAG_debug_code) { 639 // Check the function's context matches the context argument. 640 __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); 641 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg)); 642 } 643 int stack_param_delta = i.InputInt32(instr->InputCount() - 1); 644 AssembleDeconstructActivationRecord(stack_param_delta); 645 if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) { 646 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, 647 i.TempRegister(0), i.TempRegister(1), 648 i.TempRegister(2)); 649 } 650 __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); 651 __ Jump(at); 652 frame_access_state()->ClearSPDelta(); 653 break; 654 } 655 case kArchPrepareCallCFunction: { 656 int const num_parameters = MiscField::decode(instr->opcode()); 657 __ PrepareCallCFunction(num_parameters, kScratchReg); 658 // Frame alignment requires using FP-relative frame addressing. 659 frame_access_state()->SetFrameAccessToFP(); 660 break; 661 } 662 case kArchPrepareTailCall: 663 AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1)); 664 break; 665 case kArchCallCFunction: { 666 int const num_parameters = MiscField::decode(instr->opcode()); 667 if (instr->InputAt(0)->IsImmediate()) { 668 ExternalReference ref = i.InputExternalReference(0); 669 __ CallCFunction(ref, num_parameters); 670 } else { 671 Register func = i.InputRegister(0); 672 __ CallCFunction(func, num_parameters); 673 } 674 frame_access_state()->SetFrameAccessToDefault(); 675 frame_access_state()->ClearSPDelta(); 676 break; 677 } 678 case kArchJmp: 679 AssembleArchJump(i.InputRpo(0)); 680 break; 681 case kArchLookupSwitch: 682 AssembleArchLookupSwitch(instr); 683 break; 684 case kArchTableSwitch: 685 AssembleArchTableSwitch(instr); 686 break; 687 case kArchDebugBreak: 688 __ stop("kArchDebugBreak"); 689 break; 690 case kArchComment: { 691 Address comment_string = i.InputExternalReference(0).address(); 692 __ RecordComment(reinterpret_cast<const char*>(comment_string)); 693 break; 694 } 695 case kArchNop: 696 case kArchThrowTerminator: 697 // don't emit code for nops. 698 break; 699 case kArchDeoptimize: { 700 int deopt_state_id = 701 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore()); 702 Deoptimizer::BailoutType bailout_type = 703 Deoptimizer::BailoutType(MiscField::decode(instr->opcode())); 704 CodeGenResult result = 705 AssembleDeoptimizerCall(deopt_state_id, bailout_type); 706 if (result != kSuccess) return result; 707 break; 708 } 709 case kArchRet: 710 AssembleReturn(); 711 break; 712 case kArchStackPointer: 713 __ mov(i.OutputRegister(), sp); 714 break; 715 case kArchFramePointer: 716 __ mov(i.OutputRegister(), fp); 717 break; 718 case kArchParentFramePointer: 719 if (frame_access_state()->has_frame()) { 720 __ ld(i.OutputRegister(), MemOperand(fp, 0)); 721 } else { 722 __ mov(i.OutputRegister(), fp); 723 } 724 break; 725 case kArchTruncateDoubleToI: 726 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0)); 727 break; 728 case kArchStoreWithWriteBarrier: { 729 RecordWriteMode mode = 730 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode())); 731 Register object = i.InputRegister(0); 732 Register index = i.InputRegister(1); 733 Register value = i.InputRegister(2); 734 Register scratch0 = i.TempRegister(0); 735 Register scratch1 = i.TempRegister(1); 736 auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value, 737 scratch0, scratch1, mode); 738 __ Daddu(at, object, index); 739 __ sd(value, MemOperand(at)); 740 __ CheckPageFlag(object, scratch0, 741 MemoryChunk::kPointersFromHereAreInterestingMask, ne, 742 ool->entry()); 743 __ bind(ool->exit()); 744 break; 745 } 746 case kArchStackSlot: { 747 FrameOffset offset = 748 frame_access_state()->GetFrameOffset(i.InputInt32(0)); 749 __ Daddu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp, 750 Operand(offset.offset())); 751 break; 752 } 753 case kIeee754Float64Atan: 754 ASSEMBLE_IEEE754_UNOP(atan); 755 break; 756 case kIeee754Float64Atan2: 757 ASSEMBLE_IEEE754_BINOP(atan2); 758 break; 759 case kIeee754Float64Atanh: 760 ASSEMBLE_IEEE754_UNOP(atanh); 761 break; 762 case kIeee754Float64Cos: 763 ASSEMBLE_IEEE754_UNOP(cos); 764 break; 765 case kIeee754Float64Cbrt: 766 ASSEMBLE_IEEE754_UNOP(cbrt); 767 break; 768 case kIeee754Float64Exp: 769 ASSEMBLE_IEEE754_UNOP(exp); 770 break; 771 case kIeee754Float64Expm1: 772 ASSEMBLE_IEEE754_UNOP(expm1); 773 break; 774 case kIeee754Float64Log: 775 ASSEMBLE_IEEE754_UNOP(log); 776 break; 777 case kIeee754Float64Log1p: 778 ASSEMBLE_IEEE754_UNOP(log1p); 779 break; 780 case kIeee754Float64Log2: 781 ASSEMBLE_IEEE754_UNOP(log2); 782 break; 783 case kIeee754Float64Log10: 784 ASSEMBLE_IEEE754_UNOP(log10); 785 break; 786 case kIeee754Float64Sin: 787 ASSEMBLE_IEEE754_UNOP(sin); 788 break; 789 case kIeee754Float64Tan: 790 ASSEMBLE_IEEE754_UNOP(tan); 791 break; 792 case kMips64Add: 793 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 794 break; 795 case kMips64Dadd: 796 __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 797 break; 798 case kMips64DaddOvf: 799 // Pseudo-instruction used for overflow/branch. No opcode emitted here. 800 break; 801 case kMips64Sub: 802 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 803 break; 804 case kMips64Dsub: 805 __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 806 break; 807 case kMips64DsubOvf: 808 // Pseudo-instruction used for overflow/branch. No opcode emitted here. 809 break; 810 case kMips64Mul: 811 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 812 break; 813 case kMips64MulHigh: 814 __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 815 break; 816 case kMips64MulHighU: 817 __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 818 break; 819 case kMips64DMulHigh: 820 __ Dmulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 821 break; 822 case kMips64Div: 823 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 824 if (kArchVariant == kMips64r6) { 825 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); 826 } else { 827 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1)); 828 } 829 break; 830 case kMips64DivU: 831 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 832 if (kArchVariant == kMips64r6) { 833 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); 834 } else { 835 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1)); 836 } 837 break; 838 case kMips64Mod: 839 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 840 break; 841 case kMips64ModU: 842 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 843 break; 844 case kMips64Dmul: 845 __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 846 break; 847 case kMips64Ddiv: 848 __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 849 if (kArchVariant == kMips64r6) { 850 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); 851 } else { 852 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1)); 853 } 854 break; 855 case kMips64DdivU: 856 __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 857 if (kArchVariant == kMips64r6) { 858 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); 859 } else { 860 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1)); 861 } 862 break; 863 case kMips64Dmod: 864 __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 865 break; 866 case kMips64DmodU: 867 __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 868 break; 869 case kMips64Dlsa: 870 DCHECK(instr->InputAt(2)->IsImmediate()); 871 __ Dlsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), 872 i.InputInt8(2)); 873 break; 874 case kMips64Lsa: 875 DCHECK(instr->InputAt(2)->IsImmediate()); 876 __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), 877 i.InputInt8(2)); 878 break; 879 case kMips64And: 880 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 881 break; 882 case kMips64Or: 883 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 884 break; 885 case kMips64Nor: 886 if (instr->InputAt(1)->IsRegister()) { 887 __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 888 } else { 889 DCHECK(i.InputOperand(1).immediate() == 0); 890 __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg); 891 } 892 break; 893 case kMips64Xor: 894 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 895 break; 896 case kMips64Clz: 897 __ Clz(i.OutputRegister(), i.InputRegister(0)); 898 break; 899 case kMips64Dclz: 900 __ dclz(i.OutputRegister(), i.InputRegister(0)); 901 break; 902 case kMips64Ctz: { 903 Register reg1 = kScratchReg; 904 Register reg2 = kScratchReg2; 905 Label skip_for_zero; 906 Label end; 907 // Branch if the operand is zero 908 __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg)); 909 // Find the number of bits before the last bit set to 1. 910 __ Subu(reg2, zero_reg, i.InputRegister(0)); 911 __ And(reg2, reg2, i.InputRegister(0)); 912 __ clz(reg2, reg2); 913 // Get the number of bits after the last bit set to 1. 914 __ li(reg1, 0x1F); 915 __ Subu(i.OutputRegister(), reg1, reg2); 916 __ Branch(&end); 917 __ bind(&skip_for_zero); 918 // If the operand is zero, return word length as the result. 919 __ li(i.OutputRegister(), 0x20); 920 __ bind(&end); 921 } break; 922 case kMips64Dctz: { 923 Register reg1 = kScratchReg; 924 Register reg2 = kScratchReg2; 925 Label skip_for_zero; 926 Label end; 927 // Branch if the operand is zero 928 __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg)); 929 // Find the number of bits before the last bit set to 1. 930 __ Dsubu(reg2, zero_reg, i.InputRegister(0)); 931 __ And(reg2, reg2, i.InputRegister(0)); 932 __ dclz(reg2, reg2); 933 // Get the number of bits after the last bit set to 1. 934 __ li(reg1, 0x3F); 935 __ Subu(i.OutputRegister(), reg1, reg2); 936 __ Branch(&end); 937 __ bind(&skip_for_zero); 938 // If the operand is zero, return word length as the result. 939 __ li(i.OutputRegister(), 0x40); 940 __ bind(&end); 941 } break; 942 case kMips64Popcnt: { 943 Register reg1 = kScratchReg; 944 Register reg2 = kScratchReg2; 945 uint32_t m1 = 0x55555555; 946 uint32_t m2 = 0x33333333; 947 uint32_t m4 = 0x0f0f0f0f; 948 uint32_t m8 = 0x00ff00ff; 949 uint32_t m16 = 0x0000ffff; 950 951 // Put count of ones in every 2 bits into those 2 bits. 952 __ li(at, m1); 953 __ dsrl(reg1, i.InputRegister(0), 1); 954 __ And(reg2, i.InputRegister(0), at); 955 __ And(reg1, reg1, at); 956 __ Daddu(reg1, reg1, reg2); 957 958 // Put count of ones in every 4 bits into those 4 bits. 959 __ li(at, m2); 960 __ dsrl(reg2, reg1, 2); 961 __ And(reg2, reg2, at); 962 __ And(reg1, reg1, at); 963 __ Daddu(reg1, reg1, reg2); 964 965 // Put count of ones in every 8 bits into those 8 bits. 966 __ li(at, m4); 967 __ dsrl(reg2, reg1, 4); 968 __ And(reg2, reg2, at); 969 __ And(reg1, reg1, at); 970 __ Daddu(reg1, reg1, reg2); 971 972 // Put count of ones in every 16 bits into those 16 bits. 973 __ li(at, m8); 974 __ dsrl(reg2, reg1, 8); 975 __ And(reg2, reg2, at); 976 __ And(reg1, reg1, at); 977 __ Daddu(reg1, reg1, reg2); 978 979 // Calculate total number of ones. 980 __ li(at, m16); 981 __ dsrl(reg2, reg1, 16); 982 __ And(reg2, reg2, at); 983 __ And(reg1, reg1, at); 984 __ Daddu(i.OutputRegister(), reg1, reg2); 985 } break; 986 case kMips64Dpopcnt: { 987 Register reg1 = kScratchReg; 988 Register reg2 = kScratchReg2; 989 uint64_t m1 = 0x5555555555555555; 990 uint64_t m2 = 0x3333333333333333; 991 uint64_t m4 = 0x0f0f0f0f0f0f0f0f; 992 uint64_t m8 = 0x00ff00ff00ff00ff; 993 uint64_t m16 = 0x0000ffff0000ffff; 994 uint64_t m32 = 0x00000000ffffffff; 995 996 // Put count of ones in every 2 bits into those 2 bits. 997 __ li(at, m1); 998 __ dsrl(reg1, i.InputRegister(0), 1); 999 __ and_(reg2, i.InputRegister(0), at); 1000 __ and_(reg1, reg1, at); 1001 __ Daddu(reg1, reg1, reg2); 1002 1003 // Put count of ones in every 4 bits into those 4 bits. 1004 __ li(at, m2); 1005 __ dsrl(reg2, reg1, 2); 1006 __ and_(reg2, reg2, at); 1007 __ and_(reg1, reg1, at); 1008 __ Daddu(reg1, reg1, reg2); 1009 1010 // Put count of ones in every 8 bits into those 8 bits. 1011 __ li(at, m4); 1012 __ dsrl(reg2, reg1, 4); 1013 __ and_(reg2, reg2, at); 1014 __ and_(reg1, reg1, at); 1015 __ Daddu(reg1, reg1, reg2); 1016 1017 // Put count of ones in every 16 bits into those 16 bits. 1018 __ li(at, m8); 1019 __ dsrl(reg2, reg1, 8); 1020 __ and_(reg2, reg2, at); 1021 __ and_(reg1, reg1, at); 1022 __ Daddu(reg1, reg1, reg2); 1023 1024 // Put count of ones in every 32 bits into those 32 bits. 1025 __ li(at, m16); 1026 __ dsrl(reg2, reg1, 16); 1027 __ and_(reg2, reg2, at); 1028 __ and_(reg1, reg1, at); 1029 __ Daddu(reg1, reg1, reg2); 1030 1031 // Calculate total number of ones. 1032 __ li(at, m32); 1033 __ dsrl32(reg2, reg1, 0); 1034 __ and_(reg2, reg2, at); 1035 __ and_(reg1, reg1, at); 1036 __ Daddu(i.OutputRegister(), reg1, reg2); 1037 } break; 1038 case kMips64Shl: 1039 if (instr->InputAt(1)->IsRegister()) { 1040 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); 1041 } else { 1042 int64_t imm = i.InputOperand(1).immediate(); 1043 __ sll(i.OutputRegister(), i.InputRegister(0), 1044 static_cast<uint16_t>(imm)); 1045 } 1046 break; 1047 case kMips64Shr: 1048 if (instr->InputAt(1)->IsRegister()) { 1049 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); 1050 } else { 1051 int64_t imm = i.InputOperand(1).immediate(); 1052 __ srl(i.OutputRegister(), i.InputRegister(0), 1053 static_cast<uint16_t>(imm)); 1054 } 1055 break; 1056 case kMips64Sar: 1057 if (instr->InputAt(1)->IsRegister()) { 1058 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); 1059 } else { 1060 int64_t imm = i.InputOperand(1).immediate(); 1061 __ sra(i.OutputRegister(), i.InputRegister(0), 1062 static_cast<uint16_t>(imm)); 1063 } 1064 break; 1065 case kMips64Ext: 1066 __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), 1067 i.InputInt8(2)); 1068 break; 1069 case kMips64Ins: 1070 if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) { 1071 __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2)); 1072 } else { 1073 __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), 1074 i.InputInt8(2)); 1075 } 1076 break; 1077 case kMips64Dext: { 1078 int16_t pos = i.InputInt8(1); 1079 int16_t size = i.InputInt8(2); 1080 if (size > 0 && size <= 32 && pos >= 0 && pos < 32) { 1081 __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), 1082 i.InputInt8(2)); 1083 } else if (size > 32 && size <= 64 && pos > 0 && pos < 32) { 1084 __ Dextm(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), 1085 i.InputInt8(2)); 1086 } else { 1087 DCHECK(size > 0 && size <= 32 && pos >= 32 && pos < 64); 1088 __ Dextu(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), 1089 i.InputInt8(2)); 1090 } 1091 break; 1092 } 1093 case kMips64Dins: 1094 if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) { 1095 __ Dins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2)); 1096 } else { 1097 __ Dins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), 1098 i.InputInt8(2)); 1099 } 1100 break; 1101 case kMips64Dshl: 1102 if (instr->InputAt(1)->IsRegister()) { 1103 __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); 1104 } else { 1105 int64_t imm = i.InputOperand(1).immediate(); 1106 if (imm < 32) { 1107 __ dsll(i.OutputRegister(), i.InputRegister(0), 1108 static_cast<uint16_t>(imm)); 1109 } else { 1110 __ dsll32(i.OutputRegister(), i.InputRegister(0), 1111 static_cast<uint16_t>(imm - 32)); 1112 } 1113 } 1114 break; 1115 case kMips64Dshr: 1116 if (instr->InputAt(1)->IsRegister()) { 1117 __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); 1118 } else { 1119 int64_t imm = i.InputOperand(1).immediate(); 1120 if (imm < 32) { 1121 __ dsrl(i.OutputRegister(), i.InputRegister(0), 1122 static_cast<uint16_t>(imm)); 1123 } else { 1124 __ dsrl32(i.OutputRegister(), i.InputRegister(0), 1125 static_cast<uint16_t>(imm - 32)); 1126 } 1127 } 1128 break; 1129 case kMips64Dsar: 1130 if (instr->InputAt(1)->IsRegister()) { 1131 __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); 1132 } else { 1133 int64_t imm = i.InputOperand(1).immediate(); 1134 if (imm < 32) { 1135 __ dsra(i.OutputRegister(), i.InputRegister(0), imm); 1136 } else { 1137 __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32); 1138 } 1139 } 1140 break; 1141 case kMips64Ror: 1142 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 1143 break; 1144 case kMips64Dror: 1145 __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); 1146 break; 1147 case kMips64Tst: 1148 // Pseudo-instruction used for cmp/branch. No opcode emitted here. 1149 break; 1150 case kMips64Cmp: 1151 // Pseudo-instruction used for cmp/branch. No opcode emitted here. 1152 break; 1153 case kMips64Mov: 1154 // TODO(plind): Should we combine mov/li like this, or use separate instr? 1155 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType 1156 if (HasRegisterInput(instr, 0)) { 1157 __ mov(i.OutputRegister(), i.InputRegister(0)); 1158 } else { 1159 __ li(i.OutputRegister(), i.InputOperand(0)); 1160 } 1161 break; 1162 1163 case kMips64CmpS: 1164 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here. 1165 break; 1166 case kMips64AddS: 1167 // TODO(plind): add special case: combine mult & add. 1168 __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 1169 i.InputDoubleRegister(1)); 1170 break; 1171 case kMips64SubS: 1172 __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 1173 i.InputDoubleRegister(1)); 1174 break; 1175 case kMips64SubPreserveNanS: 1176 __ SubNanPreservePayloadAndSign_s(i.OutputDoubleRegister(), 1177 i.InputDoubleRegister(0), 1178 i.InputDoubleRegister(1)); 1179 break; 1180 case kMips64MulS: 1181 // TODO(plind): add special case: right op is -1.0, see arm port. 1182 __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 1183 i.InputDoubleRegister(1)); 1184 break; 1185 case kMips64DivS: 1186 __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 1187 i.InputDoubleRegister(1)); 1188 break; 1189 case kMips64ModS: { 1190 // TODO(bmeurer): We should really get rid of this special instruction, 1191 // and generate a CallAddress instruction instead. 1192 FrameScope scope(masm(), StackFrame::MANUAL); 1193 __ PrepareCallCFunction(0, 2, kScratchReg); 1194 __ MovToFloatParameters(i.InputDoubleRegister(0), 1195 i.InputDoubleRegister(1)); 1196 // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate()) 1197 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), 1198 0, 2); 1199 // Move the result in the double result register. 1200 __ MovFromFloatResult(i.OutputSingleRegister()); 1201 break; 1202 } 1203 case kMips64AbsS: 1204 __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); 1205 break; 1206 case kMips64SqrtS: { 1207 __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); 1208 break; 1209 } 1210 case kMips64MaxS: 1211 __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 1212 i.InputDoubleRegister(1)); 1213 break; 1214 case kMips64MinS: 1215 __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 1216 i.InputDoubleRegister(1)); 1217 break; 1218 case kMips64CmpD: 1219 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here. 1220 break; 1221 case kMips64AddD: 1222 // TODO(plind): add special case: combine mult & add. 1223 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 1224 i.InputDoubleRegister(1)); 1225 break; 1226 case kMips64SubD: 1227 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 1228 i.InputDoubleRegister(1)); 1229 break; 1230 case kMips64SubPreserveNanD: 1231 __ SubNanPreservePayloadAndSign_d(i.OutputDoubleRegister(), 1232 i.InputDoubleRegister(0), 1233 i.InputDoubleRegister(1)); 1234 break; 1235 case kMips64MulD: 1236 // TODO(plind): add special case: right op is -1.0, see arm port. 1237 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 1238 i.InputDoubleRegister(1)); 1239 break; 1240 case kMips64DivD: 1241 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 1242 i.InputDoubleRegister(1)); 1243 break; 1244 case kMips64ModD: { 1245 // TODO(bmeurer): We should really get rid of this special instruction, 1246 // and generate a CallAddress instruction instead. 1247 FrameScope scope(masm(), StackFrame::MANUAL); 1248 __ PrepareCallCFunction(0, 2, kScratchReg); 1249 __ MovToFloatParameters(i.InputDoubleRegister(0), 1250 i.InputDoubleRegister(1)); 1251 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), 1252 0, 2); 1253 // Move the result in the double result register. 1254 __ MovFromFloatResult(i.OutputDoubleRegister()); 1255 break; 1256 } 1257 case kMips64AbsD: 1258 __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); 1259 break; 1260 case kMips64SqrtD: { 1261 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); 1262 break; 1263 } 1264 case kMips64MaxD: 1265 __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 1266 i.InputDoubleRegister(1)); 1267 break; 1268 case kMips64MinD: 1269 __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), 1270 i.InputDoubleRegister(1)); 1271 break; 1272 case kMips64Float64RoundDown: { 1273 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor); 1274 break; 1275 } 1276 case kMips64Float32RoundDown: { 1277 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor); 1278 break; 1279 } 1280 case kMips64Float64RoundTruncate: { 1281 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc); 1282 break; 1283 } 1284 case kMips64Float32RoundTruncate: { 1285 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc); 1286 break; 1287 } 1288 case kMips64Float64RoundUp: { 1289 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil); 1290 break; 1291 } 1292 case kMips64Float32RoundUp: { 1293 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil); 1294 break; 1295 } 1296 case kMips64Float64RoundTiesEven: { 1297 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round); 1298 break; 1299 } 1300 case kMips64Float32RoundTiesEven: { 1301 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round); 1302 break; 1303 } 1304 case kMips64Float64Max: { 1305 // (b < a) ? a : b 1306 if (kArchVariant == kMips64r6) { 1307 __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1), 1308 i.InputDoubleRegister(0)); 1309 __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1), 1310 i.InputDoubleRegister(0)); 1311 } else { 1312 __ c_d(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); 1313 // Left operand is result, passthrough if false. 1314 __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); 1315 } 1316 break; 1317 } 1318 case kMips64Float64Min: { 1319 // (a < b) ? a : b 1320 if (kArchVariant == kMips64r6) { 1321 __ cmp_d(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0), 1322 i.InputDoubleRegister(1)); 1323 __ sel_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1), 1324 i.InputDoubleRegister(0)); 1325 } else { 1326 __ c_d(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0)); 1327 // Right operand is result, passthrough if false. 1328 __ movt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); 1329 } 1330 break; 1331 } 1332 case kMips64Float64SilenceNaN: 1333 __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); 1334 break; 1335 case kMips64Float32Max: { 1336 // (b < a) ? a : b 1337 if (kArchVariant == kMips64r6) { 1338 __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(1), 1339 i.InputDoubleRegister(0)); 1340 __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1), 1341 i.InputDoubleRegister(0)); 1342 } else { 1343 __ c_s(OLT, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); 1344 // Left operand is result, passthrough if false. 1345 __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); 1346 } 1347 break; 1348 } 1349 case kMips64Float32Min: { 1350 // (a < b) ? a : b 1351 if (kArchVariant == kMips64r6) { 1352 __ cmp_s(OLT, i.OutputDoubleRegister(), i.InputDoubleRegister(0), 1353 i.InputDoubleRegister(1)); 1354 __ sel_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1), 1355 i.InputDoubleRegister(0)); 1356 } else { 1357 __ c_s(OLT, i.InputDoubleRegister(1), i.InputDoubleRegister(0)); 1358 // Right operand is result, passthrough if false. 1359 __ movt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(1)); 1360 } 1361 break; 1362 } 1363 case kMips64CvtSD: 1364 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0)); 1365 break; 1366 case kMips64CvtDS: 1367 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0)); 1368 break; 1369 case kMips64CvtDW: { 1370 FPURegister scratch = kScratchDoubleReg; 1371 __ mtc1(i.InputRegister(0), scratch); 1372 __ cvt_d_w(i.OutputDoubleRegister(), scratch); 1373 break; 1374 } 1375 case kMips64CvtSW: { 1376 FPURegister scratch = kScratchDoubleReg; 1377 __ mtc1(i.InputRegister(0), scratch); 1378 __ cvt_s_w(i.OutputDoubleRegister(), scratch); 1379 break; 1380 } 1381 case kMips64CvtSUw: { 1382 __ Cvt_s_uw(i.OutputDoubleRegister(), i.InputRegister(0)); 1383 break; 1384 } 1385 case kMips64CvtSL: { 1386 FPURegister scratch = kScratchDoubleReg; 1387 __ dmtc1(i.InputRegister(0), scratch); 1388 __ cvt_s_l(i.OutputDoubleRegister(), scratch); 1389 break; 1390 } 1391 case kMips64CvtDL: { 1392 FPURegister scratch = kScratchDoubleReg; 1393 __ dmtc1(i.InputRegister(0), scratch); 1394 __ cvt_d_l(i.OutputDoubleRegister(), scratch); 1395 break; 1396 } 1397 case kMips64CvtDUw: { 1398 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0)); 1399 break; 1400 } 1401 case kMips64CvtDUl: { 1402 __ Cvt_d_ul(i.OutputDoubleRegister(), i.InputRegister(0)); 1403 break; 1404 } 1405 case kMips64CvtSUl: { 1406 __ Cvt_s_ul(i.OutputDoubleRegister(), i.InputRegister(0)); 1407 break; 1408 } 1409 case kMips64FloorWD: { 1410 FPURegister scratch = kScratchDoubleReg; 1411 __ floor_w_d(scratch, i.InputDoubleRegister(0)); 1412 __ mfc1(i.OutputRegister(), scratch); 1413 break; 1414 } 1415 case kMips64CeilWD: { 1416 FPURegister scratch = kScratchDoubleReg; 1417 __ ceil_w_d(scratch, i.InputDoubleRegister(0)); 1418 __ mfc1(i.OutputRegister(), scratch); 1419 break; 1420 } 1421 case kMips64RoundWD: { 1422 FPURegister scratch = kScratchDoubleReg; 1423 __ round_w_d(scratch, i.InputDoubleRegister(0)); 1424 __ mfc1(i.OutputRegister(), scratch); 1425 break; 1426 } 1427 case kMips64TruncWD: { 1428 FPURegister scratch = kScratchDoubleReg; 1429 // Other arches use round to zero here, so we follow. 1430 __ trunc_w_d(scratch, i.InputDoubleRegister(0)); 1431 __ mfc1(i.OutputRegister(), scratch); 1432 break; 1433 } 1434 case kMips64FloorWS: { 1435 FPURegister scratch = kScratchDoubleReg; 1436 __ floor_w_s(scratch, i.InputDoubleRegister(0)); 1437 __ mfc1(i.OutputRegister(), scratch); 1438 break; 1439 } 1440 case kMips64CeilWS: { 1441 FPURegister scratch = kScratchDoubleReg; 1442 __ ceil_w_s(scratch, i.InputDoubleRegister(0)); 1443 __ mfc1(i.OutputRegister(), scratch); 1444 break; 1445 } 1446 case kMips64RoundWS: { 1447 FPURegister scratch = kScratchDoubleReg; 1448 __ round_w_s(scratch, i.InputDoubleRegister(0)); 1449 __ mfc1(i.OutputRegister(), scratch); 1450 break; 1451 } 1452 case kMips64TruncWS: { 1453 FPURegister scratch = kScratchDoubleReg; 1454 __ trunc_w_s(scratch, i.InputDoubleRegister(0)); 1455 __ mfc1(i.OutputRegister(), scratch); 1456 break; 1457 } 1458 case kMips64TruncLS: { 1459 FPURegister scratch = kScratchDoubleReg; 1460 Register tmp_fcsr = kScratchReg; 1461 Register result = kScratchReg2; 1462 1463 bool load_status = instr->OutputCount() > 1; 1464 if (load_status) { 1465 // Save FCSR. 1466 __ cfc1(tmp_fcsr, FCSR); 1467 // Clear FPU flags. 1468 __ ctc1(zero_reg, FCSR); 1469 } 1470 // Other arches use round to zero here, so we follow. 1471 __ trunc_l_s(scratch, i.InputDoubleRegister(0)); 1472 __ dmfc1(i.OutputRegister(), scratch); 1473 if (load_status) { 1474 __ cfc1(result, FCSR); 1475 // Check for overflow and NaNs. 1476 __ andi(result, result, 1477 (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask)); 1478 __ Slt(result, zero_reg, result); 1479 __ xori(result, result, 1); 1480 __ mov(i.OutputRegister(1), result); 1481 // Restore FCSR 1482 __ ctc1(tmp_fcsr, FCSR); 1483 } 1484 break; 1485 } 1486 case kMips64TruncLD: { 1487 FPURegister scratch = kScratchDoubleReg; 1488 Register tmp_fcsr = kScratchReg; 1489 Register result = kScratchReg2; 1490 1491 bool load_status = instr->OutputCount() > 1; 1492 if (load_status) { 1493 // Save FCSR. 1494 __ cfc1(tmp_fcsr, FCSR); 1495 // Clear FPU flags. 1496 __ ctc1(zero_reg, FCSR); 1497 } 1498 // Other arches use round to zero here, so we follow. 1499 __ trunc_l_d(scratch, i.InputDoubleRegister(0)); 1500 __ dmfc1(i.OutputRegister(0), scratch); 1501 if (load_status) { 1502 __ cfc1(result, FCSR); 1503 // Check for overflow and NaNs. 1504 __ andi(result, result, 1505 (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask)); 1506 __ Slt(result, zero_reg, result); 1507 __ xori(result, result, 1); 1508 __ mov(i.OutputRegister(1), result); 1509 // Restore FCSR 1510 __ ctc1(tmp_fcsr, FCSR); 1511 } 1512 break; 1513 } 1514 case kMips64TruncUwD: { 1515 FPURegister scratch = kScratchDoubleReg; 1516 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function. 1517 __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch); 1518 break; 1519 } 1520 case kMips64TruncUwS: { 1521 FPURegister scratch = kScratchDoubleReg; 1522 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function. 1523 __ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch); 1524 break; 1525 } 1526 case kMips64TruncUlS: { 1527 FPURegister scratch = kScratchDoubleReg; 1528 Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; 1529 // TODO(plind): Fix wrong param order of Trunc_ul_s() macro-asm function. 1530 __ Trunc_ul_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch, 1531 result); 1532 break; 1533 } 1534 case kMips64TruncUlD: { 1535 FPURegister scratch = kScratchDoubleReg; 1536 Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg; 1537 // TODO(plind): Fix wrong param order of Trunc_ul_d() macro-asm function. 1538 __ Trunc_ul_d(i.InputDoubleRegister(0), i.OutputRegister(0), scratch, 1539 result); 1540 break; 1541 } 1542 case kMips64BitcastDL: 1543 __ dmfc1(i.OutputRegister(), i.InputDoubleRegister(0)); 1544 break; 1545 case kMips64BitcastLD: 1546 __ dmtc1(i.InputRegister(0), i.OutputDoubleRegister()); 1547 break; 1548 case kMips64Float64ExtractLowWord32: 1549 __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0)); 1550 break; 1551 case kMips64Float64ExtractHighWord32: 1552 __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0)); 1553 break; 1554 case kMips64Float64InsertLowWord32: 1555 __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1)); 1556 break; 1557 case kMips64Float64InsertHighWord32: 1558 __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1)); 1559 break; 1560 // ... more basic instructions ... 1561 1562 case kMips64Lbu: 1563 __ lbu(i.OutputRegister(), i.MemoryOperand()); 1564 break; 1565 case kMips64Lb: 1566 __ lb(i.OutputRegister(), i.MemoryOperand()); 1567 break; 1568 case kMips64Sb: 1569 __ sb(i.InputRegister(2), i.MemoryOperand()); 1570 break; 1571 case kMips64Lhu: 1572 __ lhu(i.OutputRegister(), i.MemoryOperand()); 1573 break; 1574 case kMips64Lh: 1575 __ lh(i.OutputRegister(), i.MemoryOperand()); 1576 break; 1577 case kMips64Sh: 1578 __ sh(i.InputRegister(2), i.MemoryOperand()); 1579 break; 1580 case kMips64Lw: 1581 __ lw(i.OutputRegister(), i.MemoryOperand()); 1582 break; 1583 case kMips64Lwu: 1584 __ lwu(i.OutputRegister(), i.MemoryOperand()); 1585 break; 1586 case kMips64Ld: 1587 __ ld(i.OutputRegister(), i.MemoryOperand()); 1588 break; 1589 case kMips64Sw: 1590 __ sw(i.InputRegister(2), i.MemoryOperand()); 1591 break; 1592 case kMips64Sd: 1593 __ sd(i.InputRegister(2), i.MemoryOperand()); 1594 break; 1595 case kMips64Lwc1: { 1596 __ lwc1(i.OutputSingleRegister(), i.MemoryOperand()); 1597 break; 1598 } 1599 case kMips64Swc1: { 1600 size_t index = 0; 1601 MemOperand operand = i.MemoryOperand(&index); 1602 __ swc1(i.InputSingleRegister(index), operand); 1603 break; 1604 } 1605 case kMips64Ldc1: 1606 __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand()); 1607 break; 1608 case kMips64Sdc1: 1609 __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand()); 1610 break; 1611 case kMips64Push: 1612 if (instr->InputAt(0)->IsFPRegister()) { 1613 __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); 1614 __ Subu(sp, sp, Operand(kDoubleSize)); 1615 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize); 1616 } else { 1617 __ Push(i.InputRegister(0)); 1618 frame_access_state()->IncreaseSPDelta(1); 1619 } 1620 break; 1621 case kMips64StackClaim: { 1622 __ Dsubu(sp, sp, Operand(i.InputInt32(0))); 1623 frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize); 1624 break; 1625 } 1626 case kMips64StoreToStackSlot: { 1627 if (instr->InputAt(0)->IsFPRegister()) { 1628 __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1))); 1629 } else { 1630 __ sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1))); 1631 } 1632 break; 1633 } 1634 case kCheckedLoadInt8: 1635 ASSEMBLE_CHECKED_LOAD_INTEGER(lb); 1636 break; 1637 case kCheckedLoadUint8: 1638 ASSEMBLE_CHECKED_LOAD_INTEGER(lbu); 1639 break; 1640 case kCheckedLoadInt16: 1641 ASSEMBLE_CHECKED_LOAD_INTEGER(lh); 1642 break; 1643 case kCheckedLoadUint16: 1644 ASSEMBLE_CHECKED_LOAD_INTEGER(lhu); 1645 break; 1646 case kCheckedLoadWord32: 1647 ASSEMBLE_CHECKED_LOAD_INTEGER(lw); 1648 break; 1649 case kCheckedLoadWord64: 1650 ASSEMBLE_CHECKED_LOAD_INTEGER(ld); 1651 break; 1652 case kCheckedLoadFloat32: 1653 ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1); 1654 break; 1655 case kCheckedLoadFloat64: 1656 ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1); 1657 break; 1658 case kCheckedStoreWord8: 1659 ASSEMBLE_CHECKED_STORE_INTEGER(sb); 1660 break; 1661 case kCheckedStoreWord16: 1662 ASSEMBLE_CHECKED_STORE_INTEGER(sh); 1663 break; 1664 case kCheckedStoreWord32: 1665 ASSEMBLE_CHECKED_STORE_INTEGER(sw); 1666 break; 1667 case kCheckedStoreWord64: 1668 ASSEMBLE_CHECKED_STORE_INTEGER(sd); 1669 break; 1670 case kCheckedStoreFloat32: 1671 ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1); 1672 break; 1673 case kCheckedStoreFloat64: 1674 ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1); 1675 break; 1676 case kAtomicLoadInt8: 1677 ASSEMBLE_ATOMIC_LOAD_INTEGER(lb); 1678 break; 1679 case kAtomicLoadUint8: 1680 ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu); 1681 break; 1682 case kAtomicLoadInt16: 1683 ASSEMBLE_ATOMIC_LOAD_INTEGER(lh); 1684 break; 1685 case kAtomicLoadUint16: 1686 ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu); 1687 break; 1688 case kAtomicLoadWord32: 1689 ASSEMBLE_ATOMIC_LOAD_INTEGER(lw); 1690 break; 1691 case kAtomicStoreWord8: 1692 ASSEMBLE_ATOMIC_STORE_INTEGER(sb); 1693 break; 1694 case kAtomicStoreWord16: 1695 ASSEMBLE_ATOMIC_STORE_INTEGER(sh); 1696 break; 1697 case kAtomicStoreWord32: 1698 ASSEMBLE_ATOMIC_STORE_INTEGER(sw); 1699 break; 1700 } 1701 return kSuccess; 1702} // NOLINT(readability/fn_size) 1703 1704 1705#define UNSUPPORTED_COND(opcode, condition) \ 1706 OFStream out(stdout); \ 1707 out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \ 1708 UNIMPLEMENTED(); 1709 1710static bool convertCondition(FlagsCondition condition, Condition& cc) { 1711 switch (condition) { 1712 case kEqual: 1713 cc = eq; 1714 return true; 1715 case kNotEqual: 1716 cc = ne; 1717 return true; 1718 case kUnsignedLessThan: 1719 cc = lt; 1720 return true; 1721 case kUnsignedGreaterThanOrEqual: 1722 cc = uge; 1723 return true; 1724 case kUnsignedLessThanOrEqual: 1725 cc = le; 1726 return true; 1727 case kUnsignedGreaterThan: 1728 cc = ugt; 1729 return true; 1730 default: 1731 break; 1732 } 1733 return false; 1734} 1735 1736 1737// Assembles branches after an instruction. 1738void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { 1739 MipsOperandConverter i(this, instr); 1740 Label* tlabel = branch->true_label; 1741 Label* flabel = branch->false_label; 1742 Condition cc = kNoCondition; 1743 // MIPS does not have condition code flags, so compare and branch are 1744 // implemented differently than on the other arch's. The compare operations 1745 // emit mips psuedo-instructions, which are handled here by branch 1746 // instructions that do the actual comparison. Essential that the input 1747 // registers to compare pseudo-op are not modified before this branch op, as 1748 // they are tested here. 1749 1750 if (instr->arch_opcode() == kMips64Tst) { 1751 cc = FlagsConditionToConditionTst(branch->condition); 1752 __ And(at, i.InputRegister(0), i.InputOperand(1)); 1753 __ Branch(tlabel, cc, at, Operand(zero_reg)); 1754 } else if (instr->arch_opcode() == kMips64Dadd || 1755 instr->arch_opcode() == kMips64Dsub) { 1756 cc = FlagsConditionToConditionOvf(branch->condition); 1757 __ dsra32(kScratchReg, i.OutputRegister(), 0); 1758 __ sra(at, i.OutputRegister(), 31); 1759 __ Branch(tlabel, cc, at, Operand(kScratchReg)); 1760 } else if (instr->arch_opcode() == kMips64DaddOvf) { 1761 switch (branch->condition) { 1762 case kOverflow: 1763 __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0), 1764 i.InputOperand(1), tlabel, flabel); 1765 break; 1766 case kNotOverflow: 1767 __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0), 1768 i.InputOperand(1), flabel, tlabel); 1769 break; 1770 default: 1771 UNSUPPORTED_COND(kMips64DaddOvf, branch->condition); 1772 break; 1773 } 1774 } else if (instr->arch_opcode() == kMips64DsubOvf) { 1775 switch (branch->condition) { 1776 case kOverflow: 1777 __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0), 1778 i.InputOperand(1), tlabel, flabel); 1779 break; 1780 case kNotOverflow: 1781 __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0), 1782 i.InputOperand(1), flabel, tlabel); 1783 break; 1784 default: 1785 UNSUPPORTED_COND(kMips64DsubOvf, branch->condition); 1786 break; 1787 } 1788 } else if (instr->arch_opcode() == kMips64Cmp) { 1789 cc = FlagsConditionToConditionCmp(branch->condition); 1790 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); 1791 } else if (instr->arch_opcode() == kMips64CmpS) { 1792 if (!convertCondition(branch->condition, cc)) { 1793 UNSUPPORTED_COND(kMips64CmpS, branch->condition); 1794 } 1795 FPURegister left = i.InputOrZeroSingleRegister(0); 1796 FPURegister right = i.InputOrZeroSingleRegister(1); 1797 if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) && 1798 !__ IsDoubleZeroRegSet()) { 1799 __ Move(kDoubleRegZero, 0.0); 1800 } 1801 __ BranchF32(tlabel, nullptr, cc, left, right); 1802 } else if (instr->arch_opcode() == kMips64CmpD) { 1803 if (!convertCondition(branch->condition, cc)) { 1804 UNSUPPORTED_COND(kMips64CmpD, branch->condition); 1805 } 1806 FPURegister left = i.InputOrZeroDoubleRegister(0); 1807 FPURegister right = i.InputOrZeroDoubleRegister(1); 1808 if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) && 1809 !__ IsDoubleZeroRegSet()) { 1810 __ Move(kDoubleRegZero, 0.0); 1811 } 1812 __ BranchF64(tlabel, nullptr, cc, left, right); 1813 } else { 1814 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n", 1815 instr->arch_opcode()); 1816 UNIMPLEMENTED(); 1817 } 1818 if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel. 1819} 1820 1821 1822void CodeGenerator::AssembleArchJump(RpoNumber target) { 1823 if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target)); 1824} 1825 1826 1827// Assembles boolean materializations after an instruction. 1828void CodeGenerator::AssembleArchBoolean(Instruction* instr, 1829 FlagsCondition condition) { 1830 MipsOperandConverter i(this, instr); 1831 Label done; 1832 1833 // Materialize a full 32-bit 1 or 0 value. The result register is always the 1834 // last output of the instruction. 1835 Label false_value; 1836 DCHECK_NE(0u, instr->OutputCount()); 1837 Register result = i.OutputRegister(instr->OutputCount() - 1); 1838 Condition cc = kNoCondition; 1839 // MIPS does not have condition code flags, so compare and branch are 1840 // implemented differently than on the other arch's. The compare operations 1841 // emit mips pseudo-instructions, which are checked and handled here. 1842 1843 if (instr->arch_opcode() == kMips64Tst) { 1844 cc = FlagsConditionToConditionTst(condition); 1845 __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1)); 1846 __ Sltu(result, zero_reg, kScratchReg); 1847 if (cc == eq) { 1848 // Sltu produces 0 for equality, invert the result. 1849 __ xori(result, result, 1); 1850 } 1851 return; 1852 } else if (instr->arch_opcode() == kMips64Dadd || 1853 instr->arch_opcode() == kMips64Dsub) { 1854 cc = FlagsConditionToConditionOvf(condition); 1855 // Check for overflow creates 1 or 0 for result. 1856 __ dsrl32(kScratchReg, i.OutputRegister(), 31); 1857 __ srl(at, i.OutputRegister(), 31); 1858 __ xor_(result, kScratchReg, at); 1859 if (cc == eq) // Toggle result for not overflow. 1860 __ xori(result, result, 1); 1861 return; 1862 } else if (instr->arch_opcode() == kMips64DaddOvf || 1863 instr->arch_opcode() == kMips64DsubOvf) { 1864 Label flabel, tlabel; 1865 switch (instr->arch_opcode()) { 1866 case kMips64DaddOvf: 1867 __ DaddBranchNoOvf(i.OutputRegister(), i.InputRegister(0), 1868 i.InputOperand(1), &flabel); 1869 1870 break; 1871 case kMips64DsubOvf: 1872 __ DsubBranchNoOvf(i.OutputRegister(), i.InputRegister(0), 1873 i.InputOperand(1), &flabel); 1874 break; 1875 default: 1876 UNREACHABLE(); 1877 break; 1878 } 1879 __ li(result, 1); 1880 __ Branch(&tlabel); 1881 __ bind(&flabel); 1882 __ li(result, 0); 1883 __ bind(&tlabel); 1884 } else if (instr->arch_opcode() == kMips64Cmp) { 1885 cc = FlagsConditionToConditionCmp(condition); 1886 switch (cc) { 1887 case eq: 1888 case ne: { 1889 Register left = i.InputRegister(0); 1890 Operand right = i.InputOperand(1); 1891 Register select; 1892 if (instr->InputAt(1)->IsImmediate() && right.immediate() == 0) { 1893 // Pass left operand if right is zero. 1894 select = left; 1895 } else { 1896 __ Dsubu(kScratchReg, left, right); 1897 select = kScratchReg; 1898 } 1899 __ Sltu(result, zero_reg, select); 1900 if (cc == eq) { 1901 // Sltu produces 0 for equality, invert the result. 1902 __ xori(result, result, 1); 1903 } 1904 } break; 1905 case lt: 1906 case ge: { 1907 Register left = i.InputRegister(0); 1908 Operand right = i.InputOperand(1); 1909 __ Slt(result, left, right); 1910 if (cc == ge) { 1911 __ xori(result, result, 1); 1912 } 1913 } break; 1914 case gt: 1915 case le: { 1916 Register left = i.InputRegister(1); 1917 Operand right = i.InputOperand(0); 1918 __ Slt(result, left, right); 1919 if (cc == le) { 1920 __ xori(result, result, 1); 1921 } 1922 } break; 1923 case lo: 1924 case hs: { 1925 Register left = i.InputRegister(0); 1926 Operand right = i.InputOperand(1); 1927 __ Sltu(result, left, right); 1928 if (cc == hs) { 1929 __ xori(result, result, 1); 1930 } 1931 } break; 1932 case hi: 1933 case ls: { 1934 Register left = i.InputRegister(1); 1935 Operand right = i.InputOperand(0); 1936 __ Sltu(result, left, right); 1937 if (cc == ls) { 1938 __ xori(result, result, 1); 1939 } 1940 } break; 1941 default: 1942 UNREACHABLE(); 1943 } 1944 return; 1945 } else if (instr->arch_opcode() == kMips64CmpD || 1946 instr->arch_opcode() == kMips64CmpS) { 1947 FPURegister left = i.InputOrZeroDoubleRegister(0); 1948 FPURegister right = i.InputOrZeroDoubleRegister(1); 1949 if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) && 1950 !__ IsDoubleZeroRegSet()) { 1951 __ Move(kDoubleRegZero, 0.0); 1952 } 1953 bool predicate; 1954 FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition); 1955 if (kArchVariant != kMips64r6) { 1956 __ li(result, Operand(1)); 1957 if (instr->arch_opcode() == kMips64CmpD) { 1958 __ c(cc, D, left, right); 1959 } else { 1960 DCHECK(instr->arch_opcode() == kMips64CmpS); 1961 __ c(cc, S, left, right); 1962 } 1963 if (predicate) { 1964 __ Movf(result, zero_reg); 1965 } else { 1966 __ Movt(result, zero_reg); 1967 } 1968 } else { 1969 if (instr->arch_opcode() == kMips64CmpD) { 1970 __ cmp(cc, L, kDoubleCompareReg, left, right); 1971 } else { 1972 DCHECK(instr->arch_opcode() == kMips64CmpS); 1973 __ cmp(cc, W, kDoubleCompareReg, left, right); 1974 } 1975 __ dmfc1(result, kDoubleCompareReg); 1976 __ andi(result, result, 1); // Cmp returns all 1's/0's, use only LSB. 1977 1978 if (!predicate) // Toggle result for not equal. 1979 __ xori(result, result, 1); 1980 } 1981 return; 1982 } else { 1983 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n", 1984 instr->arch_opcode()); 1985 TRACE_UNIMPL(); 1986 UNIMPLEMENTED(); 1987 } 1988} 1989 1990 1991void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) { 1992 MipsOperandConverter i(this, instr); 1993 Register input = i.InputRegister(0); 1994 for (size_t index = 2; index < instr->InputCount(); index += 2) { 1995 __ li(at, Operand(i.InputInt32(index + 0))); 1996 __ beq(input, at, GetLabel(i.InputRpo(index + 1))); 1997 } 1998 __ nop(); // Branch delay slot of the last beq. 1999 AssembleArchJump(i.InputRpo(1)); 2000} 2001 2002void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { 2003 MipsOperandConverter i(this, instr); 2004 Register input = i.InputRegister(0); 2005 size_t const case_count = instr->InputCount() - 2; 2006 2007 __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count)); 2008 __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) { 2009 return GetLabel(i.InputRpo(index + 2)); 2010 }); 2011} 2012 2013CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall( 2014 int deoptimization_id, Deoptimizer::BailoutType bailout_type) { 2015 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( 2016 isolate(), deoptimization_id, bailout_type); 2017 if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts; 2018 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY); 2019 return kSuccess; 2020} 2021 2022void CodeGenerator::FinishFrame(Frame* frame) { 2023 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); 2024 2025 const RegList saves_fpu = descriptor->CalleeSavedFPRegisters(); 2026 if (saves_fpu != 0) { 2027 int count = base::bits::CountPopulation32(saves_fpu); 2028 DCHECK(kNumCalleeSavedFPU == count); 2029 frame->AllocateSavedCalleeRegisterSlots(count * 2030 (kDoubleSize / kPointerSize)); 2031 } 2032 2033 const RegList saves = descriptor->CalleeSavedRegisters(); 2034 if (saves != 0) { 2035 int count = base::bits::CountPopulation32(saves); 2036 DCHECK(kNumCalleeSaved == count + 1); 2037 frame->AllocateSavedCalleeRegisterSlots(count); 2038 } 2039} 2040 2041void CodeGenerator::AssembleConstructFrame() { 2042 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); 2043 if (frame_access_state()->has_frame()) { 2044 if (descriptor->IsCFunctionCall()) { 2045 __ Push(ra, fp); 2046 __ mov(fp, sp); 2047 } else if (descriptor->IsJSFunctionCall()) { 2048 __ Prologue(this->info()->GeneratePreagedPrologue()); 2049 } else { 2050 __ StubPrologue(info()->GetOutputStackFrameType()); 2051 } 2052 } 2053 2054 int shrink_slots = frame()->GetSpillSlotCount(); 2055 2056 if (info()->is_osr()) { 2057 // TurboFan OSR-compiled functions cannot be entered directly. 2058 __ Abort(kShouldNotDirectlyEnterOsrFunction); 2059 2060 // Unoptimized code jumps directly to this entrypoint while the unoptimized 2061 // frame is still on the stack. Optimized code uses OSR values directly from 2062 // the unoptimized frame. Thus, all that needs to be done is to allocate the 2063 // remaining stack slots. 2064 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --"); 2065 osr_pc_offset_ = __ pc_offset(); 2066 shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots(); 2067 } 2068 2069 if (shrink_slots > 0) { 2070 __ Dsubu(sp, sp, Operand(shrink_slots * kPointerSize)); 2071 } 2072 2073 const RegList saves_fpu = descriptor->CalleeSavedFPRegisters(); 2074 if (saves_fpu != 0) { 2075 // Save callee-saved FPU registers. 2076 __ MultiPushFPU(saves_fpu); 2077 DCHECK(kNumCalleeSavedFPU == base::bits::CountPopulation32(saves_fpu)); 2078 } 2079 2080 const RegList saves = descriptor->CalleeSavedRegisters(); 2081 if (saves != 0) { 2082 // Save callee-saved registers. 2083 __ MultiPush(saves); 2084 DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1); 2085 } 2086} 2087 2088 2089void CodeGenerator::AssembleReturn() { 2090 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor(); 2091 2092 // Restore GP registers. 2093 const RegList saves = descriptor->CalleeSavedRegisters(); 2094 if (saves != 0) { 2095 __ MultiPop(saves); 2096 } 2097 2098 // Restore FPU registers. 2099 const RegList saves_fpu = descriptor->CalleeSavedFPRegisters(); 2100 if (saves_fpu != 0) { 2101 __ MultiPopFPU(saves_fpu); 2102 } 2103 2104 if (descriptor->IsCFunctionCall()) { 2105 AssembleDeconstructFrame(); 2106 } else if (frame_access_state()->has_frame()) { 2107 // Canonicalize JSFunction return sites for now. 2108 if (return_label_.is_bound()) { 2109 __ Branch(&return_label_); 2110 return; 2111 } else { 2112 __ bind(&return_label_); 2113 AssembleDeconstructFrame(); 2114 } 2115 } 2116 int pop_count = static_cast<int>(descriptor->StackParameterCount()); 2117 if (pop_count != 0) { 2118 __ DropAndRet(pop_count); 2119 } else { 2120 __ Ret(); 2121 } 2122} 2123 2124 2125void CodeGenerator::AssembleMove(InstructionOperand* source, 2126 InstructionOperand* destination) { 2127 MipsOperandConverter g(this, nullptr); 2128 // Dispatch on the source and destination operand kinds. Not all 2129 // combinations are possible. 2130 if (source->IsRegister()) { 2131 DCHECK(destination->IsRegister() || destination->IsStackSlot()); 2132 Register src = g.ToRegister(source); 2133 if (destination->IsRegister()) { 2134 __ mov(g.ToRegister(destination), src); 2135 } else { 2136 __ sd(src, g.ToMemOperand(destination)); 2137 } 2138 } else if (source->IsStackSlot()) { 2139 DCHECK(destination->IsRegister() || destination->IsStackSlot()); 2140 MemOperand src = g.ToMemOperand(source); 2141 if (destination->IsRegister()) { 2142 __ ld(g.ToRegister(destination), src); 2143 } else { 2144 Register temp = kScratchReg; 2145 __ ld(temp, src); 2146 __ sd(temp, g.ToMemOperand(destination)); 2147 } 2148 } else if (source->IsConstant()) { 2149 Constant src = g.ToConstant(source); 2150 if (destination->IsRegister() || destination->IsStackSlot()) { 2151 Register dst = 2152 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg; 2153 switch (src.type()) { 2154 case Constant::kInt32: 2155 if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) { 2156 __ li(dst, Operand(src.ToInt32(), src.rmode())); 2157 } else { 2158 __ li(dst, Operand(src.ToInt32())); 2159 } 2160 break; 2161 case Constant::kFloat32: 2162 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED)); 2163 break; 2164 case Constant::kInt64: 2165 if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE || 2166 src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) { 2167 __ li(dst, Operand(src.ToInt64(), src.rmode())); 2168 } else { 2169 DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE); 2170 __ li(dst, Operand(src.ToInt64())); 2171 } 2172 break; 2173 case Constant::kFloat64: 2174 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED)); 2175 break; 2176 case Constant::kExternalReference: 2177 __ li(dst, Operand(src.ToExternalReference())); 2178 break; 2179 case Constant::kHeapObject: { 2180 Handle<HeapObject> src_object = src.ToHeapObject(); 2181 Heap::RootListIndex index; 2182 int slot; 2183 if (IsMaterializableFromFrame(src_object, &slot)) { 2184 __ ld(dst, g.SlotToMemOperand(slot)); 2185 } else if (IsMaterializableFromRoot(src_object, &index)) { 2186 __ LoadRoot(dst, index); 2187 } else { 2188 __ li(dst, src_object); 2189 } 2190 break; 2191 } 2192 case Constant::kRpoNumber: 2193 UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64. 2194 break; 2195 } 2196 if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination)); 2197 } else if (src.type() == Constant::kFloat32) { 2198 if (destination->IsFPStackSlot()) { 2199 MemOperand dst = g.ToMemOperand(destination); 2200 __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32()))); 2201 __ sw(at, dst); 2202 } else { 2203 FloatRegister dst = g.ToSingleRegister(destination); 2204 __ Move(dst, src.ToFloat32()); 2205 } 2206 } else { 2207 DCHECK_EQ(Constant::kFloat64, src.type()); 2208 DoubleRegister dst = destination->IsFPRegister() 2209 ? g.ToDoubleRegister(destination) 2210 : kScratchDoubleReg; 2211 __ Move(dst, src.ToFloat64()); 2212 if (destination->IsFPStackSlot()) { 2213 __ sdc1(dst, g.ToMemOperand(destination)); 2214 } 2215 } 2216 } else if (source->IsFPRegister()) { 2217 FPURegister src = g.ToDoubleRegister(source); 2218 if (destination->IsFPRegister()) { 2219 FPURegister dst = g.ToDoubleRegister(destination); 2220 __ Move(dst, src); 2221 } else { 2222 DCHECK(destination->IsFPStackSlot()); 2223 __ sdc1(src, g.ToMemOperand(destination)); 2224 } 2225 } else if (source->IsFPStackSlot()) { 2226 DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); 2227 MemOperand src = g.ToMemOperand(source); 2228 if (destination->IsFPRegister()) { 2229 __ ldc1(g.ToDoubleRegister(destination), src); 2230 } else { 2231 FPURegister temp = kScratchDoubleReg; 2232 __ ldc1(temp, src); 2233 __ sdc1(temp, g.ToMemOperand(destination)); 2234 } 2235 } else { 2236 UNREACHABLE(); 2237 } 2238} 2239 2240 2241void CodeGenerator::AssembleSwap(InstructionOperand* source, 2242 InstructionOperand* destination) { 2243 MipsOperandConverter g(this, nullptr); 2244 // Dispatch on the source and destination operand kinds. Not all 2245 // combinations are possible. 2246 if (source->IsRegister()) { 2247 // Register-register. 2248 Register temp = kScratchReg; 2249 Register src = g.ToRegister(source); 2250 if (destination->IsRegister()) { 2251 Register dst = g.ToRegister(destination); 2252 __ Move(temp, src); 2253 __ Move(src, dst); 2254 __ Move(dst, temp); 2255 } else { 2256 DCHECK(destination->IsStackSlot()); 2257 MemOperand dst = g.ToMemOperand(destination); 2258 __ mov(temp, src); 2259 __ ld(src, dst); 2260 __ sd(temp, dst); 2261 } 2262 } else if (source->IsStackSlot()) { 2263 DCHECK(destination->IsStackSlot()); 2264 Register temp_0 = kScratchReg; 2265 Register temp_1 = kScratchReg2; 2266 MemOperand src = g.ToMemOperand(source); 2267 MemOperand dst = g.ToMemOperand(destination); 2268 __ ld(temp_0, src); 2269 __ ld(temp_1, dst); 2270 __ sd(temp_0, dst); 2271 __ sd(temp_1, src); 2272 } else if (source->IsFPRegister()) { 2273 FPURegister temp = kScratchDoubleReg; 2274 FPURegister src = g.ToDoubleRegister(source); 2275 if (destination->IsFPRegister()) { 2276 FPURegister dst = g.ToDoubleRegister(destination); 2277 __ Move(temp, src); 2278 __ Move(src, dst); 2279 __ Move(dst, temp); 2280 } else { 2281 DCHECK(destination->IsFPStackSlot()); 2282 MemOperand dst = g.ToMemOperand(destination); 2283 __ Move(temp, src); 2284 __ ldc1(src, dst); 2285 __ sdc1(temp, dst); 2286 } 2287 } else if (source->IsFPStackSlot()) { 2288 DCHECK(destination->IsFPStackSlot()); 2289 Register temp_0 = kScratchReg; 2290 FPURegister temp_1 = kScratchDoubleReg; 2291 MemOperand src0 = g.ToMemOperand(source); 2292 MemOperand src1(src0.rm(), src0.offset() + kIntSize); 2293 MemOperand dst0 = g.ToMemOperand(destination); 2294 MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize); 2295 __ ldc1(temp_1, dst0); // Save destination in temp_1. 2296 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination. 2297 __ sw(temp_0, dst0); 2298 __ lw(temp_0, src1); 2299 __ sw(temp_0, dst1); 2300 __ sdc1(temp_1, src0); 2301 } else { 2302 // No other combinations are possible. 2303 UNREACHABLE(); 2304 } 2305} 2306 2307 2308void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) { 2309 // On 64-bit MIPS we emit the jump tables inline. 2310 UNREACHABLE(); 2311} 2312 2313 2314void CodeGenerator::EnsureSpaceForLazyDeopt() { 2315 if (!info()->ShouldEnsureSpaceForLazyDeopt()) { 2316 return; 2317 } 2318 2319 int space_needed = Deoptimizer::patch_size(); 2320 // Ensure that we have enough space after the previous lazy-bailout 2321 // instruction for patching the code here. 2322 int current_pc = masm()->pc_offset(); 2323 if (current_pc < last_lazy_deopt_pc_ + space_needed) { 2324 // Block tramoline pool emission for duration of padding. 2325 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool( 2326 masm()); 2327 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; 2328 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize); 2329 while (padding_size > 0) { 2330 __ nop(); 2331 padding_size -= v8::internal::Assembler::kInstrSize; 2332 } 2333 } 2334} 2335 2336#undef __ 2337 2338} // namespace compiler 2339} // namespace internal 2340} // namespace v8 2341