1// Copyright (c) 1994-2006 Sun Microsystems Inc. 2// All Rights Reserved. 3// 4// Redistribution and use in source and binary forms, with or without 5// modification, are permitted provided that the following conditions are 6// met: 7// 8// - Redistributions of source code must retain the above copyright notice, 9// this list of conditions and the following disclaimer. 10// 11// - Redistribution in binary form must reproduce the above copyright 12// notice, this list of conditions and the following disclaimer in the 13// documentation and/or other materials provided with the distribution. 14// 15// - Neither the name of Sun Microsystems or the names of contributors may 16// be used to endorse or promote products derived from this software without 17// specific prior written permission. 18// 19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31// The original source code covered by the above license above has been 32// modified significantly by Google Inc. 33// Copyright 2012 the V8 project authors. All rights reserved. 34 35 36#ifndef V8_MIPS_ASSEMBLER_MIPS_H_ 37#define V8_MIPS_ASSEMBLER_MIPS_H_ 38 39#include <stdio.h> 40 41#include <set> 42 43#include "src/assembler.h" 44#include "src/mips/constants-mips.h" 45 46namespace v8 { 47namespace internal { 48 49// clang-format off 50#define GENERAL_REGISTERS(V) \ 51 V(zero_reg) V(at) V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \ 52 V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) \ 53 V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(t8) V(t9) \ 54 V(k0) V(k1) V(gp) V(sp) V(fp) V(ra) 55 56#define ALLOCATABLE_GENERAL_REGISTERS(V) \ 57 V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \ 58 V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(s7) 59 60#define DOUBLE_REGISTERS(V) \ 61 V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \ 62 V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \ 63 V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \ 64 V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31) 65 66#define FLOAT_REGISTERS DOUBLE_REGISTERS 67 68#define ALLOCATABLE_DOUBLE_REGISTERS(V) \ 69 V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \ 70 V(f16) V(f18) V(f20) V(f22) V(f24) V(f26) 71// clang-format on 72 73// CPU Registers. 74// 75// 1) We would prefer to use an enum, but enum values are assignment- 76// compatible with int, which has caused code-generation bugs. 77// 78// 2) We would prefer to use a class instead of a struct but we don't like 79// the register initialization to depend on the particular initialization 80// order (which appears to be different on OS X, Linux, and Windows for the 81// installed versions of C++ we tried). Using a struct permits C-style 82// "initialization". Also, the Register objects cannot be const as this 83// forces initialization stubs in MSVC, making us dependent on initialization 84// order. 85// 86// 3) By not using an enum, we are possibly preventing the compiler from 87// doing certain constant folds, which may significantly reduce the 88// code generated for some assembly instructions (because they boil down 89// to a few constants). If this is a problem, we could change the code 90// such that we use an enum in optimized mode, and the struct in debug 91// mode. This way we get the compile-time error checking in debug mode 92// and best performance in optimized code. 93 94 95// ----------------------------------------------------------------------------- 96// Implementation of Register and FPURegister. 97 98struct Register { 99 static const int kCpRegister = 23; // cp (s7) is the 23rd register. 100 101 enum Code { 102#define REGISTER_CODE(R) kCode_##R, 103 GENERAL_REGISTERS(REGISTER_CODE) 104#undef REGISTER_CODE 105 kAfterLast, 106 kCode_no_reg = -1 107 }; 108 109 static const int kNumRegisters = Code::kAfterLast; 110 111#if defined(V8_TARGET_LITTLE_ENDIAN) 112 static const int kMantissaOffset = 0; 113 static const int kExponentOffset = 4; 114#elif defined(V8_TARGET_BIG_ENDIAN) 115 static const int kMantissaOffset = 4; 116 static const int kExponentOffset = 0; 117#else 118#error Unknown endianness 119#endif 120 121 122 static Register from_code(int code) { 123 DCHECK(code >= 0); 124 DCHECK(code < kNumRegisters); 125 Register r = {code}; 126 return r; 127 } 128 bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; } 129 bool is(Register reg) const { return reg_code == reg.reg_code; } 130 int code() const { 131 DCHECK(is_valid()); 132 return reg_code; 133 } 134 int bit() const { 135 DCHECK(is_valid()); 136 return 1 << reg_code; 137 } 138 139 // Unfortunately we can't make this private in a struct. 140 int reg_code; 141}; 142 143// s7: context register 144// s3: lithium scratch 145// s4: lithium scratch2 146#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R}; 147GENERAL_REGISTERS(DECLARE_REGISTER) 148#undef DECLARE_REGISTER 149const Register no_reg = {Register::kCode_no_reg}; 150 151 152int ToNumber(Register reg); 153 154Register ToRegister(int num); 155 156static const bool kSimpleFPAliasing = true; 157 158// Coprocessor register. 159struct FPURegister { 160 enum Code { 161#define REGISTER_CODE(R) kCode_##R, 162 DOUBLE_REGISTERS(REGISTER_CODE) 163#undef REGISTER_CODE 164 kAfterLast, 165 kCode_no_reg = -1 166 }; 167 168 static const int kMaxNumRegisters = Code::kAfterLast; 169 170 inline static int NumRegisters(); 171 172 // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers 173 // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to 174 // number of Double regs (64-bit regs, or FPU-reg-pairs). 175 176 bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; } 177 bool is(FPURegister reg) const { return reg_code == reg.reg_code; } 178 FPURegister low() const { 179 // Find low reg of a Double-reg pair, which is the reg itself. 180 DCHECK(reg_code % 2 == 0); // Specified Double reg must be even. 181 FPURegister reg; 182 reg.reg_code = reg_code; 183 DCHECK(reg.is_valid()); 184 return reg; 185 } 186 FPURegister high() const { 187 // Find high reg of a Doubel-reg pair, which is reg + 1. 188 DCHECK(reg_code % 2 == 0); // Specified Double reg must be even. 189 FPURegister reg; 190 reg.reg_code = reg_code + 1; 191 DCHECK(reg.is_valid()); 192 return reg; 193 } 194 195 int code() const { 196 DCHECK(is_valid()); 197 return reg_code; 198 } 199 int bit() const { 200 DCHECK(is_valid()); 201 return 1 << reg_code; 202 } 203 204 static FPURegister from_code(int code) { 205 FPURegister r = {code}; 206 return r; 207 } 208 void setcode(int f) { 209 reg_code = f; 210 DCHECK(is_valid()); 211 } 212 // Unfortunately we can't make this private in a struct. 213 int reg_code; 214}; 215 216// A few double registers are reserved: one as a scratch register and one to 217// hold 0.0. 218// f28: 0.0 219// f30: scratch register. 220 221// V8 now supports the O32 ABI, and the FPU Registers are organized as 32 222// 32-bit registers, f0 through f31. When used as 'double' they are used 223// in pairs, starting with the even numbered register. So a double operation 224// on f0 really uses f0 and f1. 225// (Modern mips hardware also supports 32 64-bit registers, via setting 226// (priviledged) Status Register FR bit to 1. This is used by the N32 ABI, 227// but it is not in common use. Someday we will want to support this in v8.) 228 229// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers. 230typedef FPURegister FloatRegister; 231 232typedef FPURegister DoubleRegister; 233 234// TODO(mips) Define SIMD registers. 235typedef FPURegister Simd128Register; 236 237const DoubleRegister no_freg = {-1}; 238 239const DoubleRegister f0 = {0}; // Return value in hard float mode. 240const DoubleRegister f1 = {1}; 241const DoubleRegister f2 = {2}; 242const DoubleRegister f3 = {3}; 243const DoubleRegister f4 = {4}; 244const DoubleRegister f5 = {5}; 245const DoubleRegister f6 = {6}; 246const DoubleRegister f7 = {7}; 247const DoubleRegister f8 = {8}; 248const DoubleRegister f9 = {9}; 249const DoubleRegister f10 = {10}; 250const DoubleRegister f11 = {11}; 251const DoubleRegister f12 = {12}; // Arg 0 in hard float mode. 252const DoubleRegister f13 = {13}; 253const DoubleRegister f14 = {14}; // Arg 1 in hard float mode. 254const DoubleRegister f15 = {15}; 255const DoubleRegister f16 = {16}; 256const DoubleRegister f17 = {17}; 257const DoubleRegister f18 = {18}; 258const DoubleRegister f19 = {19}; 259const DoubleRegister f20 = {20}; 260const DoubleRegister f21 = {21}; 261const DoubleRegister f22 = {22}; 262const DoubleRegister f23 = {23}; 263const DoubleRegister f24 = {24}; 264const DoubleRegister f25 = {25}; 265const DoubleRegister f26 = {26}; 266const DoubleRegister f27 = {27}; 267const DoubleRegister f28 = {28}; 268const DoubleRegister f29 = {29}; 269const DoubleRegister f30 = {30}; 270const DoubleRegister f31 = {31}; 271 272// Register aliases. 273// cp is assumed to be a callee saved register. 274// Defined using #define instead of "static const Register&" because Clang 275// complains otherwise when a compilation unit that includes this header 276// doesn't use the variables. 277#define kRootRegister s6 278#define cp s7 279#define kLithiumScratchReg s3 280#define kLithiumScratchReg2 s4 281#define kLithiumScratchDouble f30 282#define kDoubleRegZero f28 283// Used on mips32r6 for compare operations. 284// We use the last non-callee saved odd register for O32 ABI 285#define kDoubleCompareReg f19 286 287// FPU (coprocessor 1) control registers. 288// Currently only FCSR (#31) is implemented. 289struct FPUControlRegister { 290 bool is_valid() const { return reg_code == kFCSRRegister; } 291 bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; } 292 int code() const { 293 DCHECK(is_valid()); 294 return reg_code; 295 } 296 int bit() const { 297 DCHECK(is_valid()); 298 return 1 << reg_code; 299 } 300 void setcode(int f) { 301 reg_code = f; 302 DCHECK(is_valid()); 303 } 304 // Unfortunately we can't make this private in a struct. 305 int reg_code; 306}; 307 308const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister }; 309const FPUControlRegister FCSR = { kFCSRRegister }; 310 311// ----------------------------------------------------------------------------- 312// Machine instruction Operands. 313 314// Class Operand represents a shifter operand in data processing instructions. 315class Operand BASE_EMBEDDED { 316 public: 317 // Immediate. 318 INLINE(explicit Operand(int32_t immediate, 319 RelocInfo::Mode rmode = RelocInfo::NONE32)); 320 INLINE(explicit Operand(const ExternalReference& f)); 321 INLINE(explicit Operand(const char* s)); 322 INLINE(explicit Operand(Object** opp)); 323 INLINE(explicit Operand(Context** cpp)); 324 explicit Operand(Handle<Object> handle); 325 INLINE(explicit Operand(Smi* value)); 326 327 // Register. 328 INLINE(explicit Operand(Register rm)); 329 330 // Return true if this is a register operand. 331 INLINE(bool is_reg() const); 332 333 inline int32_t immediate() const { 334 DCHECK(!is_reg()); 335 return imm32_; 336 } 337 338 Register rm() const { return rm_; } 339 340 private: 341 Register rm_; 342 int32_t imm32_; // Valid if rm_ == no_reg. 343 RelocInfo::Mode rmode_; 344 345 friend class Assembler; 346 friend class MacroAssembler; 347}; 348 349 350// On MIPS we have only one adressing mode with base_reg + offset. 351// Class MemOperand represents a memory operand in load and store instructions. 352class MemOperand : public Operand { 353 public: 354 // Immediate value attached to offset. 355 enum OffsetAddend { 356 offset_minus_one = -1, 357 offset_zero = 0 358 }; 359 360 explicit MemOperand(Register rn, int32_t offset = 0); 361 explicit MemOperand(Register rn, int32_t unit, int32_t multiplier, 362 OffsetAddend offset_addend = offset_zero); 363 int32_t offset() const { return offset_; } 364 365 bool OffsetIsInt16Encodable() const { 366 return is_int16(offset_); 367 } 368 369 private: 370 int32_t offset_; 371 372 friend class Assembler; 373}; 374 375 376class Assembler : public AssemblerBase { 377 public: 378 // Create an assembler. Instructions and relocation information are emitted 379 // into a buffer, with the instructions starting from the beginning and the 380 // relocation information starting from the end of the buffer. See CodeDesc 381 // for a detailed comment on the layout (globals.h). 382 // 383 // If the provided buffer is NULL, the assembler allocates and grows its own 384 // buffer, and buffer_size determines the initial buffer size. The buffer is 385 // owned by the assembler and deallocated upon destruction of the assembler. 386 // 387 // If the provided buffer is not NULL, the assembler uses the provided buffer 388 // for code generation and assumes its size to be buffer_size. If the buffer 389 // is too small, a fatal error occurs. No deallocation of the buffer is done 390 // upon destruction of the assembler. 391 Assembler(Isolate* isolate, void* buffer, int buffer_size); 392 virtual ~Assembler() { } 393 394 // GetCode emits any pending (non-emitted) code and fills the descriptor 395 // desc. GetCode() is idempotent; it returns the same result if no other 396 // Assembler functions are invoked in between GetCode() calls. 397 void GetCode(CodeDesc* desc); 398 399 // Label operations & relative jumps (PPUM Appendix D). 400 // 401 // Takes a branch opcode (cc) and a label (L) and generates 402 // either a backward branch or a forward branch and links it 403 // to the label fixup chain. Usage: 404 // 405 // Label L; // unbound label 406 // j(cc, &L); // forward branch to unbound label 407 // bind(&L); // bind label to the current pc 408 // j(cc, &L); // backward branch to bound label 409 // bind(&L); // illegal: a label may be bound only once 410 // 411 // Note: The same Label can be used for forward and backward branches 412 // but it may be bound only once. 413 void bind(Label* L); // Binds an unbound label L to current code position. 414 415 enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 }; 416 417 // Determines if Label is bound and near enough so that branch instruction 418 // can be used to reach it, instead of jump instruction. 419 bool is_near(Label* L); 420 bool is_near(Label* L, OffsetSize bits); 421 bool is_near_branch(Label* L); 422 inline bool is_near_pre_r6(Label* L) { 423 DCHECK(!IsMipsArchVariant(kMips32r6)); 424 return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize; 425 } 426 inline bool is_near_r6(Label* L) { 427 DCHECK(IsMipsArchVariant(kMips32r6)); 428 return pc_offset() - L->pos() < kMaxCompactBranchOffset - 4 * kInstrSize; 429 } 430 431 int BranchOffset(Instr instr); 432 433 // Returns the branch offset to the given label from the current code 434 // position. Links the label to the current position if it is still unbound. 435 // Manages the jump elimination optimization if the second parameter is true. 436 int32_t branch_offset_helper(Label* L, OffsetSize bits); 437 inline int32_t branch_offset(Label* L) { 438 return branch_offset_helper(L, OffsetSize::kOffset16); 439 } 440 inline int32_t branch_offset21(Label* L) { 441 return branch_offset_helper(L, OffsetSize::kOffset21); 442 } 443 inline int32_t branch_offset26(Label* L) { 444 return branch_offset_helper(L, OffsetSize::kOffset26); 445 } 446 inline int32_t shifted_branch_offset(Label* L) { 447 return branch_offset(L) >> 2; 448 } 449 inline int32_t shifted_branch_offset21(Label* L) { 450 return branch_offset21(L) >> 2; 451 } 452 inline int32_t shifted_branch_offset26(Label* L) { 453 return branch_offset26(L) >> 2; 454 } 455 uint32_t jump_address(Label* L); 456 457 // Puts a labels target address at the given position. 458 // The high 8 bits are set to zero. 459 void label_at_put(Label* L, int at_offset); 460 461 // Read/Modify the code target address in the branch/call instruction at pc. 462 static Address target_address_at(Address pc); 463 static void set_target_address_at( 464 Isolate* isolate, Address pc, Address target, 465 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); 466 // On MIPS there is no Constant Pool so we skip that parameter. 467 INLINE(static Address target_address_at(Address pc, Address constant_pool)) { 468 return target_address_at(pc); 469 } 470 INLINE(static void set_target_address_at( 471 Isolate* isolate, Address pc, Address constant_pool, Address target, 472 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) { 473 set_target_address_at(isolate, pc, target, icache_flush_mode); 474 } 475 INLINE(static Address target_address_at(Address pc, Code* code)) { 476 Address constant_pool = code ? code->constant_pool() : NULL; 477 return target_address_at(pc, constant_pool); 478 } 479 INLINE(static void set_target_address_at( 480 Isolate* isolate, Address pc, Code* code, Address target, 481 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) { 482 Address constant_pool = code ? code->constant_pool() : NULL; 483 set_target_address_at(isolate, pc, constant_pool, target, 484 icache_flush_mode); 485 } 486 487 // Return the code target address at a call site from the return address 488 // of that call in the instruction stream. 489 inline static Address target_address_from_return_address(Address pc); 490 491 static void QuietNaN(HeapObject* nan); 492 493 // This sets the branch destination (which gets loaded at the call address). 494 // This is for calls and branches within generated code. The serializer 495 // has already deserialized the lui/ori instructions etc. 496 inline static void deserialization_set_special_target_at( 497 Isolate* isolate, Address instruction_payload, Code* code, 498 Address target) { 499 set_target_address_at( 500 isolate, 501 instruction_payload - kInstructionsFor32BitConstant * kInstrSize, code, 502 target); 503 } 504 505 // This sets the internal reference at the pc. 506 inline static void deserialization_set_target_internal_reference_at( 507 Isolate* isolate, Address pc, Address target, 508 RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE); 509 510 // Size of an instruction. 511 static const int kInstrSize = sizeof(Instr); 512 513 // Difference between address of current opcode and target address offset. 514 static const int kBranchPCOffset = 4; 515 516 // Here we are patching the address in the LUI/ORI instruction pair. 517 // These values are used in the serialization process and must be zero for 518 // MIPS platform, as Code, Embedded Object or External-reference pointers 519 // are split across two consecutive instructions and don't exist separately 520 // in the code, so the serializer should not step forwards in memory after 521 // a target is resolved and written. 522 static const int kSpecialTargetSize = 0; 523 524 // Number of consecutive instructions used to store 32bit constant. This 525 // constant is used in RelocInfo::target_address_address() function to tell 526 // serializer address of the instruction that follows LUI/ORI instruction 527 // pair. 528 static const int kInstructionsFor32BitConstant = 2; 529 530 // Distance between the instruction referring to the address of the call 531 // target and the return address. 532#ifdef _MIPS_ARCH_MIPS32R6 533 static const int kCallTargetAddressOffset = 3 * kInstrSize; 534#else 535 static const int kCallTargetAddressOffset = 4 * kInstrSize; 536#endif 537 538 // Distance between start of patched debug break slot and the emitted address 539 // to jump to. 540 static const int kPatchDebugBreakSlotAddressOffset = 4 * kInstrSize; 541 542 // Difference between address of current opcode and value read from pc 543 // register. 544 static const int kPcLoadDelta = 4; 545 546#ifdef _MIPS_ARCH_MIPS32R6 547 static const int kDebugBreakSlotInstructions = 3; 548#else 549 static const int kDebugBreakSlotInstructions = 4; 550#endif 551 static const int kDebugBreakSlotLength = 552 kDebugBreakSlotInstructions * kInstrSize; 553 554 555 // --------------------------------------------------------------------------- 556 // Code generation. 557 558 // Insert the smallest number of nop instructions 559 // possible to align the pc offset to a multiple 560 // of m. m must be a power of 2 (>= 4). 561 void Align(int m); 562 // Insert the smallest number of zero bytes possible to align the pc offset 563 // to a mulitple of m. m must be a power of 2 (>= 2). 564 void DataAlign(int m); 565 // Aligns code to something that's optimal for a jump target for the platform. 566 void CodeTargetAlign(); 567 568 // Different nop operations are used by the code generator to detect certain 569 // states of the generated code. 570 enum NopMarkerTypes { 571 NON_MARKING_NOP = 0, 572 DEBUG_BREAK_NOP, 573 // IC markers. 574 PROPERTY_ACCESS_INLINED, 575 PROPERTY_ACCESS_INLINED_CONTEXT, 576 PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, 577 // Helper values. 578 LAST_CODE_MARKER, 579 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, 580 // Code aging 581 CODE_AGE_MARKER_NOP = 6, 582 CODE_AGE_SEQUENCE_NOP 583 }; 584 585 // Type == 0 is the default non-marking nop. For mips this is a 586 // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero 587 // marking, to avoid conflict with ssnop and ehb instructions. 588 void nop(unsigned int type = 0) { 589 DCHECK(type < 32); 590 Register nop_rt_reg = (type == 0) ? zero_reg : at; 591 sll(zero_reg, nop_rt_reg, type, true); 592 } 593 594 595 // --------Branch-and-jump-instructions---------- 596 // We don't use likely variant of instructions. 597 void b(int16_t offset); 598 inline void b(Label* L) { b(shifted_branch_offset(L)); } 599 void bal(int16_t offset); 600 inline void bal(Label* L) { bal(shifted_branch_offset(L)); } 601 void bc(int32_t offset); 602 inline void bc(Label* L) { bc(shifted_branch_offset26(L)); } 603 void balc(int32_t offset); 604 inline void balc(Label* L) { balc(shifted_branch_offset26(L)); } 605 606 void beq(Register rs, Register rt, int16_t offset); 607 inline void beq(Register rs, Register rt, Label* L) { 608 beq(rs, rt, shifted_branch_offset(L)); 609 } 610 void bgez(Register rs, int16_t offset); 611 void bgezc(Register rt, int16_t offset); 612 inline void bgezc(Register rt, Label* L) { 613 bgezc(rt, shifted_branch_offset(L)); 614 } 615 void bgeuc(Register rs, Register rt, int16_t offset); 616 inline void bgeuc(Register rs, Register rt, Label* L) { 617 bgeuc(rs, rt, shifted_branch_offset(L)); 618 } 619 void bgec(Register rs, Register rt, int16_t offset); 620 inline void bgec(Register rs, Register rt, Label* L) { 621 bgec(rs, rt, shifted_branch_offset(L)); 622 } 623 void bgezal(Register rs, int16_t offset); 624 void bgezalc(Register rt, int16_t offset); 625 inline void bgezalc(Register rt, Label* L) { 626 bgezalc(rt, shifted_branch_offset(L)); 627 } 628 void bgezall(Register rs, int16_t offset); 629 inline void bgezall(Register rs, Label* L) { 630 bgezall(rs, branch_offset(L) >> 2); 631 } 632 void bgtz(Register rs, int16_t offset); 633 void bgtzc(Register rt, int16_t offset); 634 inline void bgtzc(Register rt, Label* L) { 635 bgtzc(rt, shifted_branch_offset(L)); 636 } 637 void blez(Register rs, int16_t offset); 638 void blezc(Register rt, int16_t offset); 639 inline void blezc(Register rt, Label* L) { 640 blezc(rt, shifted_branch_offset(L)); 641 } 642 void bltz(Register rs, int16_t offset); 643 void bltzc(Register rt, int16_t offset); 644 inline void bltzc(Register rt, Label* L) { 645 bltzc(rt, shifted_branch_offset(L)); 646 } 647 void bltuc(Register rs, Register rt, int16_t offset); 648 inline void bltuc(Register rs, Register rt, Label* L) { 649 bltuc(rs, rt, shifted_branch_offset(L)); 650 } 651 void bltc(Register rs, Register rt, int16_t offset); 652 inline void bltc(Register rs, Register rt, Label* L) { 653 bltc(rs, rt, shifted_branch_offset(L)); 654 } 655 void bltzal(Register rs, int16_t offset); 656 void blezalc(Register rt, int16_t offset); 657 inline void blezalc(Register rt, Label* L) { 658 blezalc(rt, shifted_branch_offset(L)); 659 } 660 void bltzalc(Register rt, int16_t offset); 661 inline void bltzalc(Register rt, Label* L) { 662 bltzalc(rt, shifted_branch_offset(L)); 663 } 664 void bgtzalc(Register rt, int16_t offset); 665 inline void bgtzalc(Register rt, Label* L) { 666 bgtzalc(rt, shifted_branch_offset(L)); 667 } 668 void beqzalc(Register rt, int16_t offset); 669 inline void beqzalc(Register rt, Label* L) { 670 beqzalc(rt, shifted_branch_offset(L)); 671 } 672 void beqc(Register rs, Register rt, int16_t offset); 673 inline void beqc(Register rs, Register rt, Label* L) { 674 beqc(rs, rt, shifted_branch_offset(L)); 675 } 676 void beqzc(Register rs, int32_t offset); 677 inline void beqzc(Register rs, Label* L) { 678 beqzc(rs, shifted_branch_offset21(L)); 679 } 680 void bnezalc(Register rt, int16_t offset); 681 inline void bnezalc(Register rt, Label* L) { 682 bnezalc(rt, shifted_branch_offset(L)); 683 } 684 void bnec(Register rs, Register rt, int16_t offset); 685 inline void bnec(Register rs, Register rt, Label* L) { 686 bnec(rs, rt, shifted_branch_offset(L)); 687 } 688 void bnezc(Register rt, int32_t offset); 689 inline void bnezc(Register rt, Label* L) { 690 bnezc(rt, shifted_branch_offset21(L)); 691 } 692 void bne(Register rs, Register rt, int16_t offset); 693 inline void bne(Register rs, Register rt, Label* L) { 694 bne(rs, rt, shifted_branch_offset(L)); 695 } 696 void bovc(Register rs, Register rt, int16_t offset); 697 inline void bovc(Register rs, Register rt, Label* L) { 698 bovc(rs, rt, shifted_branch_offset(L)); 699 } 700 void bnvc(Register rs, Register rt, int16_t offset); 701 inline void bnvc(Register rs, Register rt, Label* L) { 702 bnvc(rs, rt, shifted_branch_offset(L)); 703 } 704 705 // Never use the int16_t b(l)cond version with a branch offset 706 // instead of using the Label* version. 707 708 // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits. 709 void j(int32_t target); 710 void jal(int32_t target); 711 void jalr(Register rs, Register rd = ra); 712 void jr(Register target); 713 void jic(Register rt, int16_t offset); 714 void jialc(Register rt, int16_t offset); 715 716 717 // -------Data-processing-instructions--------- 718 719 // Arithmetic. 720 void addu(Register rd, Register rs, Register rt); 721 void subu(Register rd, Register rs, Register rt); 722 void mult(Register rs, Register rt); 723 void multu(Register rs, Register rt); 724 void div(Register rs, Register rt); 725 void divu(Register rs, Register rt); 726 void div(Register rd, Register rs, Register rt); 727 void divu(Register rd, Register rs, Register rt); 728 void mod(Register rd, Register rs, Register rt); 729 void modu(Register rd, Register rs, Register rt); 730 void mul(Register rd, Register rs, Register rt); 731 void muh(Register rd, Register rs, Register rt); 732 void mulu(Register rd, Register rs, Register rt); 733 void muhu(Register rd, Register rs, Register rt); 734 735 void addiu(Register rd, Register rs, int32_t j); 736 737 // Logical. 738 void and_(Register rd, Register rs, Register rt); 739 void or_(Register rd, Register rs, Register rt); 740 void xor_(Register rd, Register rs, Register rt); 741 void nor(Register rd, Register rs, Register rt); 742 743 void andi(Register rd, Register rs, int32_t j); 744 void ori(Register rd, Register rs, int32_t j); 745 void xori(Register rd, Register rs, int32_t j); 746 void lui(Register rd, int32_t j); 747 void aui(Register rs, Register rt, int32_t j); 748 749 // Shifts. 750 // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop 751 // and may cause problems in normal code. coming_from_nop makes sure this 752 // doesn't happen. 753 void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false); 754 void sllv(Register rd, Register rt, Register rs); 755 void srl(Register rd, Register rt, uint16_t sa); 756 void srlv(Register rd, Register rt, Register rs); 757 void sra(Register rt, Register rd, uint16_t sa); 758 void srav(Register rt, Register rd, Register rs); 759 void rotr(Register rd, Register rt, uint16_t sa); 760 void rotrv(Register rd, Register rt, Register rs); 761 762 // ------------Memory-instructions------------- 763 764 void lb(Register rd, const MemOperand& rs); 765 void lbu(Register rd, const MemOperand& rs); 766 void lh(Register rd, const MemOperand& rs); 767 void lhu(Register rd, const MemOperand& rs); 768 void lw(Register rd, const MemOperand& rs); 769 void lwl(Register rd, const MemOperand& rs); 770 void lwr(Register rd, const MemOperand& rs); 771 void sb(Register rd, const MemOperand& rs); 772 void sh(Register rd, const MemOperand& rs); 773 void sw(Register rd, const MemOperand& rs); 774 void swl(Register rd, const MemOperand& rs); 775 void swr(Register rd, const MemOperand& rs); 776 777 778 // ---------PC-Relative-instructions----------- 779 780 void addiupc(Register rs, int32_t imm19); 781 void lwpc(Register rs, int32_t offset19); 782 void auipc(Register rs, int16_t imm16); 783 void aluipc(Register rs, int16_t imm16); 784 785 786 // ----------------Prefetch-------------------- 787 788 void pref(int32_t hint, const MemOperand& rs); 789 790 791 // -------------Misc-instructions-------------- 792 793 // Break / Trap instructions. 794 void break_(uint32_t code, bool break_as_stop = false); 795 void stop(const char* msg, uint32_t code = kMaxStopCode); 796 void tge(Register rs, Register rt, uint16_t code); 797 void tgeu(Register rs, Register rt, uint16_t code); 798 void tlt(Register rs, Register rt, uint16_t code); 799 void tltu(Register rs, Register rt, uint16_t code); 800 void teq(Register rs, Register rt, uint16_t code); 801 void tne(Register rs, Register rt, uint16_t code); 802 803 // Memory barrier instruction. 804 void sync(); 805 806 // Move from HI/LO register. 807 void mfhi(Register rd); 808 void mflo(Register rd); 809 810 // Set on less than. 811 void slt(Register rd, Register rs, Register rt); 812 void sltu(Register rd, Register rs, Register rt); 813 void slti(Register rd, Register rs, int32_t j); 814 void sltiu(Register rd, Register rs, int32_t j); 815 816 // Conditional move. 817 void movz(Register rd, Register rs, Register rt); 818 void movn(Register rd, Register rs, Register rt); 819 void movt(Register rd, Register rs, uint16_t cc = 0); 820 void movf(Register rd, Register rs, uint16_t cc = 0); 821 822 void sel(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft); 823 void sel_s(FPURegister fd, FPURegister fs, FPURegister ft); 824 void sel_d(FPURegister fd, FPURegister fs, FPURegister ft); 825 void seleqz(Register rd, Register rs, Register rt); 826 void seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs, 827 FPURegister ft); 828 void selnez(Register rd, Register rs, Register rt); 829 void selnez(SecondaryField fmt, FPURegister fd, FPURegister fs, 830 FPURegister ft); 831 void seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft); 832 void seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft); 833 void selnez_d(FPURegister fd, FPURegister fs, FPURegister ft); 834 void selnez_s(FPURegister fd, FPURegister fs, FPURegister ft); 835 836 void movz_s(FPURegister fd, FPURegister fs, Register rt); 837 void movz_d(FPURegister fd, FPURegister fs, Register rt); 838 void movt_s(FPURegister fd, FPURegister fs, uint16_t cc = 0); 839 void movt_d(FPURegister fd, FPURegister fs, uint16_t cc = 0); 840 void movf_s(FPURegister fd, FPURegister fs, uint16_t cc = 0); 841 void movf_d(FPURegister fd, FPURegister fs, uint16_t cc = 0); 842 void movn_s(FPURegister fd, FPURegister fs, Register rt); 843 void movn_d(FPURegister fd, FPURegister fs, Register rt); 844 // Bit twiddling. 845 void clz(Register rd, Register rs); 846 void ins_(Register rt, Register rs, uint16_t pos, uint16_t size); 847 void ext_(Register rt, Register rs, uint16_t pos, uint16_t size); 848 void bitswap(Register rd, Register rt); 849 void align(Register rd, Register rs, Register rt, uint8_t bp); 850 851 void wsbh(Register rd, Register rt); 852 void seh(Register rd, Register rt); 853 void seb(Register rd, Register rt); 854 855 // --------Coprocessor-instructions---------------- 856 857 // Load, store, and move. 858 void lwc1(FPURegister fd, const MemOperand& src); 859 void ldc1(FPURegister fd, const MemOperand& src); 860 861 void swc1(FPURegister fs, const MemOperand& dst); 862 void sdc1(FPURegister fs, const MemOperand& dst); 863 864 void mtc1(Register rt, FPURegister fs); 865 void mthc1(Register rt, FPURegister fs); 866 867 void mfc1(Register rt, FPURegister fs); 868 void mfhc1(Register rt, FPURegister fs); 869 870 void ctc1(Register rt, FPUControlRegister fs); 871 void cfc1(Register rt, FPUControlRegister fs); 872 873 // Arithmetic. 874 void add_s(FPURegister fd, FPURegister fs, FPURegister ft); 875 void add_d(FPURegister fd, FPURegister fs, FPURegister ft); 876 void sub_s(FPURegister fd, FPURegister fs, FPURegister ft); 877 void sub_d(FPURegister fd, FPURegister fs, FPURegister ft); 878 void mul_s(FPURegister fd, FPURegister fs, FPURegister ft); 879 void mul_d(FPURegister fd, FPURegister fs, FPURegister ft); 880 void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft); 881 void div_s(FPURegister fd, FPURegister fs, FPURegister ft); 882 void div_d(FPURegister fd, FPURegister fs, FPURegister ft); 883 void abs_s(FPURegister fd, FPURegister fs); 884 void abs_d(FPURegister fd, FPURegister fs); 885 void mov_d(FPURegister fd, FPURegister fs); 886 void mov_s(FPURegister fd, FPURegister fs); 887 void neg_s(FPURegister fd, FPURegister fs); 888 void neg_d(FPURegister fd, FPURegister fs); 889 void sqrt_s(FPURegister fd, FPURegister fs); 890 void sqrt_d(FPURegister fd, FPURegister fs); 891 void rsqrt_s(FPURegister fd, FPURegister fs); 892 void rsqrt_d(FPURegister fd, FPURegister fs); 893 void recip_d(FPURegister fd, FPURegister fs); 894 void recip_s(FPURegister fd, FPURegister fs); 895 896 // Conversion. 897 void cvt_w_s(FPURegister fd, FPURegister fs); 898 void cvt_w_d(FPURegister fd, FPURegister fs); 899 void trunc_w_s(FPURegister fd, FPURegister fs); 900 void trunc_w_d(FPURegister fd, FPURegister fs); 901 void round_w_s(FPURegister fd, FPURegister fs); 902 void round_w_d(FPURegister fd, FPURegister fs); 903 void floor_w_s(FPURegister fd, FPURegister fs); 904 void floor_w_d(FPURegister fd, FPURegister fs); 905 void ceil_w_s(FPURegister fd, FPURegister fs); 906 void ceil_w_d(FPURegister fd, FPURegister fs); 907 void rint_s(FPURegister fd, FPURegister fs); 908 void rint_d(FPURegister fd, FPURegister fs); 909 void rint(SecondaryField fmt, FPURegister fd, FPURegister fs); 910 911 void cvt_l_s(FPURegister fd, FPURegister fs); 912 void cvt_l_d(FPURegister fd, FPURegister fs); 913 void trunc_l_s(FPURegister fd, FPURegister fs); 914 void trunc_l_d(FPURegister fd, FPURegister fs); 915 void round_l_s(FPURegister fd, FPURegister fs); 916 void round_l_d(FPURegister fd, FPURegister fs); 917 void floor_l_s(FPURegister fd, FPURegister fs); 918 void floor_l_d(FPURegister fd, FPURegister fs); 919 void ceil_l_s(FPURegister fd, FPURegister fs); 920 void ceil_l_d(FPURegister fd, FPURegister fs); 921 922 void class_s(FPURegister fd, FPURegister fs); 923 void class_d(FPURegister fd, FPURegister fs); 924 925 void min(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft); 926 void mina(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft); 927 void max(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft); 928 void maxa(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft); 929 void min_s(FPURegister fd, FPURegister fs, FPURegister ft); 930 void min_d(FPURegister fd, FPURegister fs, FPURegister ft); 931 void max_s(FPURegister fd, FPURegister fs, FPURegister ft); 932 void max_d(FPURegister fd, FPURegister fs, FPURegister ft); 933 void mina_s(FPURegister fd, FPURegister fs, FPURegister ft); 934 void mina_d(FPURegister fd, FPURegister fs, FPURegister ft); 935 void maxa_s(FPURegister fd, FPURegister fs, FPURegister ft); 936 void maxa_d(FPURegister fd, FPURegister fs, FPURegister ft); 937 938 void cvt_s_w(FPURegister fd, FPURegister fs); 939 void cvt_s_l(FPURegister fd, FPURegister fs); 940 void cvt_s_d(FPURegister fd, FPURegister fs); 941 942 void cvt_d_w(FPURegister fd, FPURegister fs); 943 void cvt_d_l(FPURegister fd, FPURegister fs); 944 void cvt_d_s(FPURegister fd, FPURegister fs); 945 946 // Conditions and branches for MIPSr6. 947 void cmp(FPUCondition cond, SecondaryField fmt, 948 FPURegister fd, FPURegister ft, FPURegister fs); 949 void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft); 950 void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft); 951 952 void bc1eqz(int16_t offset, FPURegister ft); 953 inline void bc1eqz(Label* L, FPURegister ft) { 954 bc1eqz(shifted_branch_offset(L), ft); 955 } 956 void bc1nez(int16_t offset, FPURegister ft); 957 inline void bc1nez(Label* L, FPURegister ft) { 958 bc1nez(shifted_branch_offset(L), ft); 959 } 960 961 // Conditions and branches for non MIPSr6. 962 void c(FPUCondition cond, SecondaryField fmt, 963 FPURegister ft, FPURegister fs, uint16_t cc = 0); 964 void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0); 965 void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0); 966 967 void bc1f(int16_t offset, uint16_t cc = 0); 968 inline void bc1f(Label* L, uint16_t cc = 0) { 969 bc1f(shifted_branch_offset(L), cc); 970 } 971 void bc1t(int16_t offset, uint16_t cc = 0); 972 inline void bc1t(Label* L, uint16_t cc = 0) { 973 bc1t(shifted_branch_offset(L), cc); 974 } 975 void fcmp(FPURegister src1, const double src2, FPUCondition cond); 976 977 // Check the code size generated from label to here. 978 int SizeOfCodeGeneratedSince(Label* label) { 979 return pc_offset() - label->pos(); 980 } 981 982 // Check the number of instructions generated from label to here. 983 int InstructionsGeneratedSince(Label* label) { 984 return SizeOfCodeGeneratedSince(label) / kInstrSize; 985 } 986 987 // Class for scoping postponing the trampoline pool generation. 988 class BlockTrampolinePoolScope { 989 public: 990 explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { 991 assem_->StartBlockTrampolinePool(); 992 } 993 ~BlockTrampolinePoolScope() { 994 assem_->EndBlockTrampolinePool(); 995 } 996 997 private: 998 Assembler* assem_; 999 1000 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); 1001 }; 1002 1003 // Class for postponing the assembly buffer growth. Typically used for 1004 // sequences of instructions that must be emitted as a unit, before 1005 // buffer growth (and relocation) can occur. 1006 // This blocking scope is not nestable. 1007 class BlockGrowBufferScope { 1008 public: 1009 explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) { 1010 assem_->StartBlockGrowBuffer(); 1011 } 1012 ~BlockGrowBufferScope() { 1013 assem_->EndBlockGrowBuffer(); 1014 } 1015 1016 private: 1017 Assembler* assem_; 1018 1019 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope); 1020 }; 1021 1022 // Debugging. 1023 1024 // Mark generator continuation. 1025 void RecordGeneratorContinuation(); 1026 1027 // Mark address of a debug break slot. 1028 void RecordDebugBreakSlot(RelocInfo::Mode mode); 1029 1030 // Record the AST id of the CallIC being compiled, so that it can be placed 1031 // in the relocation information. 1032 void SetRecordedAstId(TypeFeedbackId ast_id) { 1033 DCHECK(recorded_ast_id_.IsNone()); 1034 recorded_ast_id_ = ast_id; 1035 } 1036 1037 TypeFeedbackId RecordedAstId() { 1038 DCHECK(!recorded_ast_id_.IsNone()); 1039 return recorded_ast_id_; 1040 } 1041 1042 void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); } 1043 1044 // Record a comment relocation entry that can be used by a disassembler. 1045 // Use --code-comments to enable. 1046 void RecordComment(const char* msg); 1047 1048 // Record a deoptimization reason that can be used by a log or cpu profiler. 1049 // Use --trace-deopt to enable. 1050 void RecordDeoptReason(const int reason, int raw_position, int id); 1051 1052 static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc, 1053 intptr_t pc_delta); 1054 1055 // Writes a single byte or word of data in the code stream. Used for 1056 // inline tables, e.g., jump-tables. 1057 void db(uint8_t data); 1058 void dd(uint32_t data); 1059 void dq(uint64_t data); 1060 void dp(uintptr_t data) { dd(data); } 1061 void dd(Label* label); 1062 1063 AssemblerPositionsRecorder* positions_recorder() { 1064 return &positions_recorder_; 1065 } 1066 1067 // Postpone the generation of the trampoline pool for the specified number of 1068 // instructions. 1069 void BlockTrampolinePoolFor(int instructions); 1070 1071 // Check if there is less than kGap bytes available in the buffer. 1072 // If this is the case, we need to grow the buffer before emitting 1073 // an instruction or relocation information. 1074 inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } 1075 1076 // Get the number of bytes available in the buffer. 1077 inline int available_space() const { return reloc_info_writer.pos() - pc_; } 1078 1079 // Read/patch instructions. 1080 static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } 1081 static void instr_at_put(byte* pc, Instr instr) { 1082 *reinterpret_cast<Instr*>(pc) = instr; 1083 } 1084 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } 1085 void instr_at_put(int pos, Instr instr) { 1086 *reinterpret_cast<Instr*>(buffer_ + pos) = instr; 1087 } 1088 1089 // Check if an instruction is a branch of some kind. 1090 static bool IsBranch(Instr instr); 1091 static bool IsBc(Instr instr); 1092 static bool IsBzc(Instr instr); 1093 static bool IsBeq(Instr instr); 1094 static bool IsBne(Instr instr); 1095 static bool IsBeqzc(Instr instr); 1096 static bool IsBnezc(Instr instr); 1097 static bool IsBeqc(Instr instr); 1098 static bool IsBnec(Instr instr); 1099 static bool IsJicOrJialc(Instr instr); 1100 1101 static bool IsJump(Instr instr); 1102 static bool IsJ(Instr instr); 1103 static bool IsLui(Instr instr); 1104 static bool IsOri(Instr instr); 1105 1106 static bool IsJal(Instr instr); 1107 static bool IsJr(Instr instr); 1108 static bool IsJalr(Instr instr); 1109 1110 static bool IsNop(Instr instr, unsigned int type); 1111 static bool IsPop(Instr instr); 1112 static bool IsPush(Instr instr); 1113 static bool IsLwRegFpOffset(Instr instr); 1114 static bool IsSwRegFpOffset(Instr instr); 1115 static bool IsLwRegFpNegOffset(Instr instr); 1116 static bool IsSwRegFpNegOffset(Instr instr); 1117 1118 static Register GetRtReg(Instr instr); 1119 static Register GetRsReg(Instr instr); 1120 static Register GetRdReg(Instr instr); 1121 1122 static uint32_t GetRt(Instr instr); 1123 static uint32_t GetRtField(Instr instr); 1124 static uint32_t GetRs(Instr instr); 1125 static uint32_t GetRsField(Instr instr); 1126 static uint32_t GetRd(Instr instr); 1127 static uint32_t GetRdField(Instr instr); 1128 static uint32_t GetSa(Instr instr); 1129 static uint32_t GetSaField(Instr instr); 1130 static uint32_t GetOpcodeField(Instr instr); 1131 static uint32_t GetFunction(Instr instr); 1132 static uint32_t GetFunctionField(Instr instr); 1133 static uint32_t GetImmediate16(Instr instr); 1134 static uint32_t GetLabelConst(Instr instr); 1135 1136 static int32_t GetBranchOffset(Instr instr); 1137 static bool IsLw(Instr instr); 1138 static int16_t GetLwOffset(Instr instr); 1139 static int16_t GetJicOrJialcOffset(Instr instr); 1140 static int16_t GetLuiOffset(Instr instr); 1141 static Instr SetLwOffset(Instr instr, int16_t offset); 1142 1143 static bool IsSw(Instr instr); 1144 static Instr SetSwOffset(Instr instr, int16_t offset); 1145 static bool IsAddImmediate(Instr instr); 1146 static Instr SetAddImmediateOffset(Instr instr, int16_t offset); 1147 static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic); 1148 static void UnpackTargetAddress(uint32_t address, int16_t& lui_offset, 1149 int16_t& jic_offset); 1150 static void UnpackTargetAddressUnsigned(uint32_t address, 1151 uint32_t& lui_offset, 1152 uint32_t& jic_offset); 1153 1154 static bool IsAndImmediate(Instr instr); 1155 static bool IsEmittedConstant(Instr instr); 1156 1157 void CheckTrampolinePool(); 1158 1159 void PatchConstantPoolAccessInstruction(int pc_offset, int offset, 1160 ConstantPoolEntry::Access access, 1161 ConstantPoolEntry::Type type) { 1162 // No embedded constant pool support. 1163 UNREACHABLE(); 1164 } 1165 1166 bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; } 1167 1168 inline int UnboundLabelsCount() { return unbound_labels_count_; } 1169 1170 protected: 1171 // Load Scaled Address instruction. 1172 void lsa(Register rd, Register rt, Register rs, uint8_t sa); 1173 1174 // Helpers. 1175 void LoadRegPlusOffsetToAt(const MemOperand& src); 1176 1177 // Relocation for a type-recording IC has the AST id added to it. This 1178 // member variable is a way to pass the information from the call site to 1179 // the relocation info. 1180 TypeFeedbackId recorded_ast_id_; 1181 1182 int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; } 1183 1184 // Decode branch instruction at pos and return branch target pos. 1185 int target_at(int pos, bool is_internal); 1186 1187 // Patch branch instruction at pos to branch to given branch target pos. 1188 void target_at_put(int pos, int target_pos, bool is_internal); 1189 1190 // Say if we need to relocate with this mode. 1191 bool MustUseReg(RelocInfo::Mode rmode); 1192 1193 // Record reloc info for current pc_. 1194 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); 1195 1196 // Block the emission of the trampoline pool before pc_offset. 1197 void BlockTrampolinePoolBefore(int pc_offset) { 1198 if (no_trampoline_pool_before_ < pc_offset) 1199 no_trampoline_pool_before_ = pc_offset; 1200 } 1201 1202 void StartBlockTrampolinePool() { 1203 trampoline_pool_blocked_nesting_++; 1204 } 1205 1206 void EndBlockTrampolinePool() { 1207 trampoline_pool_blocked_nesting_--; 1208 } 1209 1210 bool is_trampoline_pool_blocked() const { 1211 return trampoline_pool_blocked_nesting_ > 0; 1212 } 1213 1214 bool has_exception() const { 1215 return internal_trampoline_exception_; 1216 } 1217 1218 void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi); 1219 1220 bool is_trampoline_emitted() const { 1221 return trampoline_emitted_; 1222 } 1223 1224 // Temporarily block automatic assembly buffer growth. 1225 void StartBlockGrowBuffer() { 1226 DCHECK(!block_buffer_growth_); 1227 block_buffer_growth_ = true; 1228 } 1229 1230 void EndBlockGrowBuffer() { 1231 DCHECK(block_buffer_growth_); 1232 block_buffer_growth_ = false; 1233 } 1234 1235 bool is_buffer_growth_blocked() const { 1236 return block_buffer_growth_; 1237 } 1238 1239 void EmitForbiddenSlotInstruction() { 1240 if (IsPrevInstrCompactBranch()) { 1241 nop(); 1242 } 1243 } 1244 1245 inline void CheckTrampolinePoolQuick(int extra_instructions = 0); 1246 1247 inline void CheckBuffer(); 1248 1249 private: 1250 inline static void set_target_internal_reference_encoded_at(Address pc, 1251 Address target); 1252 1253 // Buffer size and constant pool distance are checked together at regular 1254 // intervals of kBufferCheckInterval emitted bytes. 1255 static const int kBufferCheckInterval = 1*KB/2; 1256 1257 // Code generation. 1258 // The relocation writer's position is at least kGap bytes below the end of 1259 // the generated instructions. This is so that multi-instruction sequences do 1260 // not have to check for overflow. The same is true for writes of large 1261 // relocation info entries. 1262 static const int kGap = 32; 1263 1264 1265 // Repeated checking whether the trampoline pool should be emitted is rather 1266 // expensive. By default we only check again once a number of instructions 1267 // has been generated. 1268 static const int kCheckConstIntervalInst = 32; 1269 static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize; 1270 1271 int next_buffer_check_; // pc offset of next buffer check. 1272 1273 // Emission of the trampoline pool may be blocked in some code sequences. 1274 int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. 1275 int no_trampoline_pool_before_; // Block emission before this pc offset. 1276 1277 // Keep track of the last emitted pool to guarantee a maximal distance. 1278 int last_trampoline_pool_end_; // pc offset of the end of the last pool. 1279 1280 // Automatic growth of the assembly buffer may be blocked for some sequences. 1281 bool block_buffer_growth_; // Block growth when true. 1282 1283 // Relocation information generation. 1284 // Each relocation is encoded as a variable size value. 1285 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; 1286 RelocInfoWriter reloc_info_writer; 1287 1288 // The bound position, before this we cannot do instruction elimination. 1289 int last_bound_pos_; 1290 1291 // Readable constants for compact branch handling in emit() 1292 enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true }; 1293 1294 // Code emission. 1295 void GrowBuffer(); 1296 inline void emit(Instr x, 1297 CompactBranchType is_compact_branch = CompactBranchType::NO); 1298 inline void emit(uint64_t x); 1299 inline void CheckForEmitInForbiddenSlot(); 1300 template <typename T> 1301 inline void EmitHelper(T x); 1302 inline void EmitHelper(Instr x, CompactBranchType is_compact_branch); 1303 1304 // Instruction generation. 1305 // We have 3 different kind of encoding layout on MIPS. 1306 // However due to many different types of objects encoded in the same fields 1307 // we have quite a few aliases for each mode. 1308 // Using the same structure to refer to Register and FPURegister would spare a 1309 // few aliases, but mixing both does not look clean to me. 1310 // Anyway we could surely implement this differently. 1311 1312 void GenInstrRegister(Opcode opcode, 1313 Register rs, 1314 Register rt, 1315 Register rd, 1316 uint16_t sa = 0, 1317 SecondaryField func = NULLSF); 1318 1319 void GenInstrRegister(Opcode opcode, 1320 Register rs, 1321 Register rt, 1322 uint16_t msb, 1323 uint16_t lsb, 1324 SecondaryField func); 1325 1326 void GenInstrRegister(Opcode opcode, 1327 SecondaryField fmt, 1328 FPURegister ft, 1329 FPURegister fs, 1330 FPURegister fd, 1331 SecondaryField func = NULLSF); 1332 1333 void GenInstrRegister(Opcode opcode, 1334 FPURegister fr, 1335 FPURegister ft, 1336 FPURegister fs, 1337 FPURegister fd, 1338 SecondaryField func = NULLSF); 1339 1340 void GenInstrRegister(Opcode opcode, 1341 SecondaryField fmt, 1342 Register rt, 1343 FPURegister fs, 1344 FPURegister fd, 1345 SecondaryField func = NULLSF); 1346 1347 void GenInstrRegister(Opcode opcode, 1348 SecondaryField fmt, 1349 Register rt, 1350 FPUControlRegister fs, 1351 SecondaryField func = NULLSF); 1352 1353 void GenInstrImmediate( 1354 Opcode opcode, Register rs, Register rt, int32_t j, 1355 CompactBranchType is_compact_branch = CompactBranchType::NO); 1356 void GenInstrImmediate( 1357 Opcode opcode, Register rs, SecondaryField SF, int32_t j, 1358 CompactBranchType is_compact_branch = CompactBranchType::NO); 1359 void GenInstrImmediate( 1360 Opcode opcode, Register r1, FPURegister r2, int32_t j, 1361 CompactBranchType is_compact_branch = CompactBranchType::NO); 1362 void GenInstrImmediate( 1363 Opcode opcode, Register rs, int32_t offset21, 1364 CompactBranchType is_compact_branch = CompactBranchType::NO); 1365 void GenInstrImmediate(Opcode opcode, Register rs, uint32_t offset21); 1366 void GenInstrImmediate( 1367 Opcode opcode, int32_t offset26, 1368 CompactBranchType is_compact_branch = CompactBranchType::NO); 1369 1370 1371 void GenInstrJump(Opcode opcode, 1372 uint32_t address); 1373 1374 1375 // Labels. 1376 void print(Label* L); 1377 void bind_to(Label* L, int pos); 1378 void next(Label* L, bool is_internal); 1379 1380 // One trampoline consists of: 1381 // - space for trampoline slots, 1382 // - space for labels. 1383 // 1384 // Space for trampoline slots is equal to slot_count * 2 * kInstrSize. 1385 // Space for trampoline slots preceeds space for labels. Each label is of one 1386 // instruction size, so total amount for labels is equal to 1387 // label_count * kInstrSize. 1388 class Trampoline { 1389 public: 1390 Trampoline() { 1391 start_ = 0; 1392 next_slot_ = 0; 1393 free_slot_count_ = 0; 1394 end_ = 0; 1395 } 1396 Trampoline(int start, int slot_count) { 1397 start_ = start; 1398 next_slot_ = start; 1399 free_slot_count_ = slot_count; 1400 end_ = start + slot_count * kTrampolineSlotsSize; 1401 } 1402 int start() { 1403 return start_; 1404 } 1405 int end() { 1406 return end_; 1407 } 1408 int take_slot() { 1409 int trampoline_slot = kInvalidSlotPos; 1410 if (free_slot_count_ <= 0) { 1411 // We have run out of space on trampolines. 1412 // Make sure we fail in debug mode, so we become aware of each case 1413 // when this happens. 1414 DCHECK(0); 1415 // Internal exception will be caught. 1416 } else { 1417 trampoline_slot = next_slot_; 1418 free_slot_count_--; 1419 next_slot_ += kTrampolineSlotsSize; 1420 } 1421 return trampoline_slot; 1422 } 1423 1424 private: 1425 int start_; 1426 int end_; 1427 int next_slot_; 1428 int free_slot_count_; 1429 }; 1430 1431 int32_t get_trampoline_entry(int32_t pos); 1432 int unbound_labels_count_; 1433 // If trampoline is emitted, generated code is becoming large. As this is 1434 // already a slow case which can possibly break our code generation for the 1435 // extreme case, we use this information to trigger different mode of 1436 // branch instruction generation, where we use jump instructions rather 1437 // than regular branch instructions. 1438 bool trampoline_emitted_; 1439#ifdef _MIPS_ARCH_MIPS32R6 1440 static const int kTrampolineSlotsSize = 2 * kInstrSize; 1441#else 1442 static const int kTrampolineSlotsSize = 4 * kInstrSize; 1443#endif 1444 static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; 1445 static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1; 1446 static const int kInvalidSlotPos = -1; 1447 1448 // Internal reference positions, required for unbounded internal reference 1449 // labels. 1450 std::set<int> internal_reference_positions_; 1451 1452 void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; } 1453 void ClearCompactBranchState() { prev_instr_compact_branch_ = false; } 1454 bool prev_instr_compact_branch_ = false; 1455 1456 Trampoline trampoline_; 1457 bool internal_trampoline_exception_; 1458 1459 friend class RegExpMacroAssemblerMIPS; 1460 friend class RelocInfo; 1461 friend class CodePatcher; 1462 friend class BlockTrampolinePoolScope; 1463 1464 AssemblerPositionsRecorder positions_recorder_; 1465 friend class AssemblerPositionsRecorder; 1466 friend class EnsureSpace; 1467}; 1468 1469 1470class EnsureSpace BASE_EMBEDDED { 1471 public: 1472 explicit EnsureSpace(Assembler* assembler) { 1473 assembler->CheckBuffer(); 1474 } 1475}; 1476 1477} // namespace internal 1478} // namespace v8 1479 1480#endif // V8_ARM_ASSEMBLER_MIPS_H_ 1481