1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
33// Copyright 2012 the V8 project authors. All rights reserved.
34
35
36#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
37#define V8_MIPS_ASSEMBLER_MIPS_H_
38
39#include <stdio.h>
40
41#include <set>
42
43#include "src/assembler.h"
44#include "src/mips64/constants-mips64.h"
45
46namespace v8 {
47namespace internal {
48
49// clang-format off
50#define GENERAL_REGISTERS(V)                              \
51  V(zero_reg)  V(at)  V(v0)  V(v1)  V(a0)  V(a1)  V(a2)  V(a3)  \
52  V(a4)  V(a5)  V(a6)  V(a7)  V(t0)  V(t1)  V(t2)  V(t3)  \
53  V(s0)  V(s1)  V(s2)  V(s3)  V(s4)  V(s5)  V(s6)  V(s7)  V(t8)  V(t9) \
54  V(k0)  V(k1)  V(gp)  V(sp)  V(fp)  V(ra)
55
56#define ALLOCATABLE_GENERAL_REGISTERS(V) \
57  V(v0)  V(v1)  V(a0)  V(a1)  V(a2)  V(a3) \
58  V(a4)  V(a5)  V(a6)  V(a7)  V(t0)  V(t1)  V(t2) V(s7)
59
60#define DOUBLE_REGISTERS(V)                               \
61  V(f0)  V(f1)  V(f2)  V(f3)  V(f4)  V(f5)  V(f6)  V(f7)  \
62  V(f8)  V(f9)  V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
63  V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
64  V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
65
66#define FLOAT_REGISTERS DOUBLE_REGISTERS
67
68#define ALLOCATABLE_DOUBLE_REGISTERS(V)                   \
69  V(f0)  V(f2)  V(f4)  V(f6)  V(f8)  V(f10) V(f12) V(f14) \
70  V(f16) V(f18) V(f20) V(f22) V(f24) V(f26)
71// clang-format on
72
73// CPU Registers.
74//
75// 1) We would prefer to use an enum, but enum values are assignment-
76// compatible with int, which has caused code-generation bugs.
77//
78// 2) We would prefer to use a class instead of a struct but we don't like
79// the register initialization to depend on the particular initialization
80// order (which appears to be different on OS X, Linux, and Windows for the
81// installed versions of C++ we tried). Using a struct permits C-style
82// "initialization". Also, the Register objects cannot be const as this
83// forces initialization stubs in MSVC, making us dependent on initialization
84// order.
85//
86// 3) By not using an enum, we are possibly preventing the compiler from
87// doing certain constant folds, which may significantly reduce the
88// code generated for some assembly instructions (because they boil down
89// to a few constants). If this is a problem, we could change the code
90// such that we use an enum in optimized mode, and the struct in debug
91// mode. This way we get the compile-time error checking in debug mode
92// and best performance in optimized code.
93
94
95// -----------------------------------------------------------------------------
96// Implementation of Register and FPURegister.
97
98struct Register {
99  static const int kCpRegister = 23;  // cp (s7) is the 23rd register.
100
101#if defined(V8_TARGET_LITTLE_ENDIAN)
102  static const int kMantissaOffset = 0;
103  static const int kExponentOffset = 4;
104#elif defined(V8_TARGET_BIG_ENDIAN)
105  static const int kMantissaOffset = 4;
106  static const int kExponentOffset = 0;
107#else
108#error Unknown endianness
109#endif
110
111  enum Code {
112#define REGISTER_CODE(R) kCode_##R,
113    GENERAL_REGISTERS(REGISTER_CODE)
114#undef REGISTER_CODE
115        kAfterLast,
116    kCode_no_reg = -1
117  };
118
119  static const int kNumRegisters = Code::kAfterLast;
120
121  static Register from_code(int code) {
122    DCHECK(code >= 0);
123    DCHECK(code < kNumRegisters);
124    Register r = { code };
125    return r;
126  }
127
128  bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
129  bool is(Register reg) const { return reg_code == reg.reg_code; }
130  int code() const {
131    DCHECK(is_valid());
132    return reg_code;
133  }
134  int bit() const {
135    DCHECK(is_valid());
136    return 1 << reg_code;
137  }
138
139  // Unfortunately we can't make this private in a struct.
140  int reg_code;
141};
142
143// s7: context register
144// s3: lithium scratch
145// s4: lithium scratch2
146#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
147GENERAL_REGISTERS(DECLARE_REGISTER)
148#undef DECLARE_REGISTER
149const Register no_reg = {Register::kCode_no_reg};
150
151
152int ToNumber(Register reg);
153
154Register ToRegister(int num);
155
156static const bool kSimpleFPAliasing = true;
157
158// Coprocessor register.
159struct FPURegister {
160  enum Code {
161#define REGISTER_CODE(R) kCode_##R,
162    DOUBLE_REGISTERS(REGISTER_CODE)
163#undef REGISTER_CODE
164        kAfterLast,
165    kCode_no_reg = -1
166  };
167
168  static const int kMaxNumRegisters = Code::kAfterLast;
169
170  inline static int NumRegisters();
171
172  // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
173  // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
174  // number of Double regs (64-bit regs, or FPU-reg-pairs).
175
176  bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
177  bool is(FPURegister reg) const { return reg_code == reg.reg_code; }
178  FPURegister low() const {
179    // TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1.
180    // Find low reg of a Double-reg pair, which is the reg itself.
181    DCHECK(reg_code % 2 == 0);  // Specified Double reg must be even.
182    FPURegister reg;
183    reg.reg_code = reg_code;
184    DCHECK(reg.is_valid());
185    return reg;
186  }
187  FPURegister high() const {
188    // TODO(plind): Create DCHECK for FR=0 mode. This usage illegal in FR=1.
189    // Find high reg of a Doubel-reg pair, which is reg + 1.
190    DCHECK(reg_code % 2 == 0);  // Specified Double reg must be even.
191    FPURegister reg;
192    reg.reg_code = reg_code + 1;
193    DCHECK(reg.is_valid());
194    return reg;
195  }
196
197  int code() const {
198    DCHECK(is_valid());
199    return reg_code;
200  }
201  int bit() const {
202    DCHECK(is_valid());
203    return 1 << reg_code;
204  }
205
206  static FPURegister from_code(int code) {
207    FPURegister r = {code};
208    return r;
209  }
210  void setcode(int f) {
211    reg_code = f;
212    DCHECK(is_valid());
213  }
214  // Unfortunately we can't make this private in a struct.
215  int reg_code;
216};
217
218// A few double registers are reserved: one as a scratch register and one to
219// hold 0.0.
220//  f28: 0.0
221//  f30: scratch register.
222
223// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
224// 32-bit registers, f0 through f31. When used as 'double' they are used
225// in pairs, starting with the even numbered register. So a double operation
226// on f0 really uses f0 and f1.
227// (Modern mips hardware also supports 32 64-bit registers, via setting
228// (privileged) Status Register FR bit to 1. This is used by the N32 ABI,
229// but it is not in common use. Someday we will want to support this in v8.)
230
231// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
232typedef FPURegister FloatRegister;
233
234typedef FPURegister DoubleRegister;
235
236// TODO(mips64) Define SIMD registers.
237typedef FPURegister Simd128Register;
238
239const DoubleRegister no_freg = {-1};
240
241const DoubleRegister f0 = {0};  // Return value in hard float mode.
242const DoubleRegister f1 = {1};
243const DoubleRegister f2 = {2};
244const DoubleRegister f3 = {3};
245const DoubleRegister f4 = {4};
246const DoubleRegister f5 = {5};
247const DoubleRegister f6 = {6};
248const DoubleRegister f7 = {7};
249const DoubleRegister f8 = {8};
250const DoubleRegister f9 = {9};
251const DoubleRegister f10 = {10};
252const DoubleRegister f11 = {11};
253const DoubleRegister f12 = {12};  // Arg 0 in hard float mode.
254const DoubleRegister f13 = {13};
255const DoubleRegister f14 = {14};  // Arg 1 in hard float mode.
256const DoubleRegister f15 = {15};
257const DoubleRegister f16 = {16};
258const DoubleRegister f17 = {17};
259const DoubleRegister f18 = {18};
260const DoubleRegister f19 = {19};
261const DoubleRegister f20 = {20};
262const DoubleRegister f21 = {21};
263const DoubleRegister f22 = {22};
264const DoubleRegister f23 = {23};
265const DoubleRegister f24 = {24};
266const DoubleRegister f25 = {25};
267const DoubleRegister f26 = {26};
268const DoubleRegister f27 = {27};
269const DoubleRegister f28 = {28};
270const DoubleRegister f29 = {29};
271const DoubleRegister f30 = {30};
272const DoubleRegister f31 = {31};
273
274// Register aliases.
275// cp is assumed to be a callee saved register.
276// Defined using #define instead of "static const Register&" because Clang
277// complains otherwise when a compilation unit that includes this header
278// doesn't use the variables.
279#define kRootRegister s6
280#define cp s7
281#define kLithiumScratchReg s3
282#define kLithiumScratchReg2 s4
283#define kLithiumScratchDouble f30
284#define kDoubleRegZero f28
285// Used on mips64r6 for compare operations.
286// We use the last non-callee saved odd register for N64 ABI
287#define kDoubleCompareReg f23
288
289// FPU (coprocessor 1) control registers.
290// Currently only FCSR (#31) is implemented.
291struct FPUControlRegister {
292  bool is_valid() const { return reg_code == kFCSRRegister; }
293  bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
294  int code() const {
295    DCHECK(is_valid());
296    return reg_code;
297  }
298  int bit() const {
299    DCHECK(is_valid());
300    return 1 << reg_code;
301  }
302  void setcode(int f) {
303    reg_code = f;
304    DCHECK(is_valid());
305  }
306  // Unfortunately we can't make this private in a struct.
307  int reg_code;
308};
309
310const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
311const FPUControlRegister FCSR = { kFCSRRegister };
312
313// -----------------------------------------------------------------------------
314// Machine instruction Operands.
315const int kSmiShift = kSmiTagSize + kSmiShiftSize;
316const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
317// Class Operand represents a shifter operand in data processing instructions.
318class Operand BASE_EMBEDDED {
319 public:
320  // Immediate.
321  INLINE(explicit Operand(int64_t immediate,
322         RelocInfo::Mode rmode = RelocInfo::NONE64));
323  INLINE(explicit Operand(const ExternalReference& f));
324  INLINE(explicit Operand(const char* s));
325  INLINE(explicit Operand(Object** opp));
326  INLINE(explicit Operand(Context** cpp));
327  explicit Operand(Handle<Object> handle);
328  INLINE(explicit Operand(Smi* value));
329
330  // Register.
331  INLINE(explicit Operand(Register rm));
332
333  // Return true if this is a register operand.
334  INLINE(bool is_reg() const);
335
336  inline int64_t immediate() const {
337    DCHECK(!is_reg());
338    return imm64_;
339  }
340
341  Register rm() const { return rm_; }
342
343 private:
344  Register rm_;
345  int64_t imm64_;  // Valid if rm_ == no_reg.
346  RelocInfo::Mode rmode_;
347
348  friend class Assembler;
349  friend class MacroAssembler;
350};
351
352
353// On MIPS we have only one adressing mode with base_reg + offset.
354// Class MemOperand represents a memory operand in load and store instructions.
355class MemOperand : public Operand {
356 public:
357  // Immediate value attached to offset.
358  enum OffsetAddend {
359    offset_minus_one = -1,
360    offset_zero = 0
361  };
362
363  explicit MemOperand(Register rn, int32_t offset = 0);
364  explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
365                      OffsetAddend offset_addend = offset_zero);
366  int32_t offset() const { return offset_; }
367
368  bool OffsetIsInt16Encodable() const {
369    return is_int16(offset_);
370  }
371
372 private:
373  int32_t offset_;
374
375  friend class Assembler;
376};
377
378
379class Assembler : public AssemblerBase {
380 public:
381  // Create an assembler. Instructions and relocation information are emitted
382  // into a buffer, with the instructions starting from the beginning and the
383  // relocation information starting from the end of the buffer. See CodeDesc
384  // for a detailed comment on the layout (globals.h).
385  //
386  // If the provided buffer is NULL, the assembler allocates and grows its own
387  // buffer, and buffer_size determines the initial buffer size. The buffer is
388  // owned by the assembler and deallocated upon destruction of the assembler.
389  //
390  // If the provided buffer is not NULL, the assembler uses the provided buffer
391  // for code generation and assumes its size to be buffer_size. If the buffer
392  // is too small, a fatal error occurs. No deallocation of the buffer is done
393  // upon destruction of the assembler.
394  Assembler(Isolate* isolate, void* buffer, int buffer_size);
395  virtual ~Assembler() { }
396
397  // GetCode emits any pending (non-emitted) code and fills the descriptor
398  // desc. GetCode() is idempotent; it returns the same result if no other
399  // Assembler functions are invoked in between GetCode() calls.
400  void GetCode(CodeDesc* desc);
401
402  // Label operations & relative jumps (PPUM Appendix D).
403  //
404  // Takes a branch opcode (cc) and a label (L) and generates
405  // either a backward branch or a forward branch and links it
406  // to the label fixup chain. Usage:
407  //
408  // Label L;    // unbound label
409  // j(cc, &L);  // forward branch to unbound label
410  // bind(&L);   // bind label to the current pc
411  // j(cc, &L);  // backward branch to bound label
412  // bind(&L);   // illegal: a label may be bound only once
413  //
414  // Note: The same Label can be used for forward and backward branches
415  // but it may be bound only once.
416  void bind(Label* L);  // Binds an unbound label L to current code position.
417
418  enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 };
419
420  // Determines if Label is bound and near enough so that branch instruction
421  // can be used to reach it, instead of jump instruction.
422  bool is_near(Label* L);
423  bool is_near(Label* L, OffsetSize bits);
424  bool is_near_branch(Label* L);
425  inline bool is_near_pre_r6(Label* L) {
426    DCHECK(!(kArchVariant == kMips64r6));
427    return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
428  }
429  inline bool is_near_r6(Label* L) {
430    DCHECK(kArchVariant == kMips64r6);
431    return pc_offset() - L->pos() < kMaxCompactBranchOffset - 4 * kInstrSize;
432  }
433
434  int BranchOffset(Instr instr);
435
436  // Returns the branch offset to the given label from the current code
437  // position. Links the label to the current position if it is still unbound.
438  // Manages the jump elimination optimization if the second parameter is true.
439  int32_t branch_offset_helper(Label* L, OffsetSize bits);
440  inline int32_t branch_offset(Label* L) {
441    return branch_offset_helper(L, OffsetSize::kOffset16);
442  }
443  inline int32_t branch_offset21(Label* L) {
444    return branch_offset_helper(L, OffsetSize::kOffset21);
445  }
446  inline int32_t branch_offset26(Label* L) {
447    return branch_offset_helper(L, OffsetSize::kOffset26);
448  }
449  inline int32_t shifted_branch_offset(Label* L) {
450    return branch_offset(L) >> 2;
451  }
452  inline int32_t shifted_branch_offset21(Label* L) {
453    return branch_offset21(L) >> 2;
454  }
455  inline int32_t shifted_branch_offset26(Label* L) {
456    return branch_offset26(L) >> 2;
457  }
458  uint64_t jump_address(Label* L);
459  uint64_t jump_offset(Label* L);
460
461  // Puts a labels target address at the given position.
462  // The high 8 bits are set to zero.
463  void label_at_put(Label* L, int at_offset);
464
465  // Read/Modify the code target address in the branch/call instruction at pc.
466  static Address target_address_at(Address pc);
467  static void set_target_address_at(
468      Isolate* isolate, Address pc, Address target,
469      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
470  // On MIPS there is no Constant Pool so we skip that parameter.
471  INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
472    return target_address_at(pc);
473  }
474  INLINE(static void set_target_address_at(
475      Isolate* isolate, Address pc, Address constant_pool, Address target,
476      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
477    set_target_address_at(isolate, pc, target, icache_flush_mode);
478  }
479  INLINE(static Address target_address_at(Address pc, Code* code)) {
480    Address constant_pool = code ? code->constant_pool() : NULL;
481    return target_address_at(pc, constant_pool);
482  }
483  INLINE(static void set_target_address_at(
484      Isolate* isolate, Address pc, Code* code, Address target,
485      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
486    Address constant_pool = code ? code->constant_pool() : NULL;
487    set_target_address_at(isolate, pc, constant_pool, target,
488                          icache_flush_mode);
489  }
490
491  // Return the code target address at a call site from the return address
492  // of that call in the instruction stream.
493  inline static Address target_address_from_return_address(Address pc);
494
495  static void JumpLabelToJumpRegister(Address pc);
496
497  static void QuietNaN(HeapObject* nan);
498
499  // This sets the branch destination (which gets loaded at the call address).
500  // This is for calls and branches within generated code.  The serializer
501  // has already deserialized the lui/ori instructions etc.
502  inline static void deserialization_set_special_target_at(
503      Isolate* isolate, Address instruction_payload, Code* code,
504      Address target) {
505    set_target_address_at(
506        isolate,
507        instruction_payload - kInstructionsFor64BitConstant * kInstrSize, code,
508        target);
509  }
510
511  // This sets the internal reference at the pc.
512  inline static void deserialization_set_target_internal_reference_at(
513      Isolate* isolate, Address pc, Address target,
514      RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
515
516  // Size of an instruction.
517  static const int kInstrSize = sizeof(Instr);
518
519  // Difference between address of current opcode and target address offset.
520  static const int kBranchPCOffset = 4;
521
522  // Here we are patching the address in the LUI/ORI instruction pair.
523  // These values are used in the serialization process and must be zero for
524  // MIPS platform, as Code, Embedded Object or External-reference pointers
525  // are split across two consecutive instructions and don't exist separately
526  // in the code, so the serializer should not step forwards in memory after
527  // a target is resolved and written.
528  static const int kSpecialTargetSize = 0;
529
530  // Number of consecutive instructions used to store 32bit/64bit constant.
531  // This constant was used in RelocInfo::target_address_address() function
532  // to tell serializer address of the instruction that follows
533  // LUI/ORI instruction pair.
534  static const int kInstructionsFor32BitConstant = 2;
535  static const int kInstructionsFor64BitConstant = 4;
536
537  // Distance between the instruction referring to the address of the call
538  // target and the return address.
539#ifdef _MIPS_ARCH_MIPS64R6
540  static const int kCallTargetAddressOffset = 5 * kInstrSize;
541#else
542  static const int kCallTargetAddressOffset = 6 * kInstrSize;
543#endif
544
545  // Distance between start of patched debug break slot and the emitted address
546  // to jump to.
547  static const int kPatchDebugBreakSlotAddressOffset = 6 * kInstrSize;
548
549  // Difference between address of current opcode and value read from pc
550  // register.
551  static const int kPcLoadDelta = 4;
552
553#ifdef _MIPS_ARCH_MIPS64R6
554  static const int kDebugBreakSlotInstructions = 5;
555#else
556  static const int kDebugBreakSlotInstructions = 6;
557#endif
558  static const int kDebugBreakSlotLength =
559      kDebugBreakSlotInstructions * kInstrSize;
560
561
562  // ---------------------------------------------------------------------------
563  // Code generation.
564
565  // Insert the smallest number of nop instructions
566  // possible to align the pc offset to a multiple
567  // of m. m must be a power of 2 (>= 4).
568  void Align(int m);
569  // Insert the smallest number of zero bytes possible to align the pc offset
570  // to a mulitple of m. m must be a power of 2 (>= 2).
571  void DataAlign(int m);
572  // Aligns code to something that's optimal for a jump target for the platform.
573  void CodeTargetAlign();
574
575  // Different nop operations are used by the code generator to detect certain
576  // states of the generated code.
577  enum NopMarkerTypes {
578    NON_MARKING_NOP = 0,
579    DEBUG_BREAK_NOP,
580    // IC markers.
581    PROPERTY_ACCESS_INLINED,
582    PROPERTY_ACCESS_INLINED_CONTEXT,
583    PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
584    // Helper values.
585    LAST_CODE_MARKER,
586    FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
587    // Code aging
588    CODE_AGE_MARKER_NOP = 6,
589    CODE_AGE_SEQUENCE_NOP
590  };
591
592  // Type == 0 is the default non-marking nop. For mips this is a
593  // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero
594  // marking, to avoid conflict with ssnop and ehb instructions.
595  void nop(unsigned int type = 0) {
596    DCHECK(type < 32);
597    Register nop_rt_reg = (type == 0) ? zero_reg : at;
598    sll(zero_reg, nop_rt_reg, type, true);
599  }
600
601
602  // --------Branch-and-jump-instructions----------
603  // We don't use likely variant of instructions.
604  void b(int16_t offset);
605  inline void b(Label* L) { b(shifted_branch_offset(L)); }
606  void bal(int16_t offset);
607  inline void bal(Label* L) { bal(shifted_branch_offset(L)); }
608  void bc(int32_t offset);
609  inline void bc(Label* L) { bc(shifted_branch_offset26(L)); }
610  void balc(int32_t offset);
611  inline void balc(Label* L) { balc(shifted_branch_offset26(L)); }
612
613  void beq(Register rs, Register rt, int16_t offset);
614  inline void beq(Register rs, Register rt, Label* L) {
615    beq(rs, rt, shifted_branch_offset(L));
616  }
617  void bgez(Register rs, int16_t offset);
618  void bgezc(Register rt, int16_t offset);
619  inline void bgezc(Register rt, Label* L) {
620    bgezc(rt, shifted_branch_offset(L));
621  }
622  void bgeuc(Register rs, Register rt, int16_t offset);
623  inline void bgeuc(Register rs, Register rt, Label* L) {
624    bgeuc(rs, rt, shifted_branch_offset(L));
625  }
626  void bgec(Register rs, Register rt, int16_t offset);
627  inline void bgec(Register rs, Register rt, Label* L) {
628    bgec(rs, rt, shifted_branch_offset(L));
629  }
630  void bgezal(Register rs, int16_t offset);
631  void bgezalc(Register rt, int16_t offset);
632  inline void bgezalc(Register rt, Label* L) {
633    bgezalc(rt, shifted_branch_offset(L));
634  }
635  void bgezall(Register rs, int16_t offset);
636  inline void bgezall(Register rs, Label* L) {
637    bgezall(rs, branch_offset(L) >> 2);
638  }
639  void bgtz(Register rs, int16_t offset);
640  void bgtzc(Register rt, int16_t offset);
641  inline void bgtzc(Register rt, Label* L) {
642    bgtzc(rt, shifted_branch_offset(L));
643  }
644  void blez(Register rs, int16_t offset);
645  void blezc(Register rt, int16_t offset);
646  inline void blezc(Register rt, Label* L) {
647    blezc(rt, shifted_branch_offset(L));
648  }
649  void bltz(Register rs, int16_t offset);
650  void bltzc(Register rt, int16_t offset);
651  inline void bltzc(Register rt, Label* L) {
652    bltzc(rt, shifted_branch_offset(L));
653  }
654  void bltuc(Register rs, Register rt, int16_t offset);
655  inline void bltuc(Register rs, Register rt, Label* L) {
656    bltuc(rs, rt, shifted_branch_offset(L));
657  }
658  void bltc(Register rs, Register rt, int16_t offset);
659  inline void bltc(Register rs, Register rt, Label* L) {
660    bltc(rs, rt, shifted_branch_offset(L));
661  }
662  void bltzal(Register rs, int16_t offset);
663  void blezalc(Register rt, int16_t offset);
664  inline void blezalc(Register rt, Label* L) {
665    blezalc(rt, shifted_branch_offset(L));
666  }
667  void bltzalc(Register rt, int16_t offset);
668  inline void bltzalc(Register rt, Label* L) {
669    bltzalc(rt, shifted_branch_offset(L));
670  }
671  void bgtzalc(Register rt, int16_t offset);
672  inline void bgtzalc(Register rt, Label* L) {
673    bgtzalc(rt, shifted_branch_offset(L));
674  }
675  void beqzalc(Register rt, int16_t offset);
676  inline void beqzalc(Register rt, Label* L) {
677    beqzalc(rt, shifted_branch_offset(L));
678  }
679  void beqc(Register rs, Register rt, int16_t offset);
680  inline void beqc(Register rs, Register rt, Label* L) {
681    beqc(rs, rt, shifted_branch_offset(L));
682  }
683  void beqzc(Register rs, int32_t offset);
684  inline void beqzc(Register rs, Label* L) {
685    beqzc(rs, shifted_branch_offset21(L));
686  }
687  void bnezalc(Register rt, int16_t offset);
688  inline void bnezalc(Register rt, Label* L) {
689    bnezalc(rt, shifted_branch_offset(L));
690  }
691  void bnec(Register rs, Register rt, int16_t offset);
692  inline void bnec(Register rs, Register rt, Label* L) {
693    bnec(rs, rt, shifted_branch_offset(L));
694  }
695  void bnezc(Register rt, int32_t offset);
696  inline void bnezc(Register rt, Label* L) {
697    bnezc(rt, shifted_branch_offset21(L));
698  }
699  void bne(Register rs, Register rt, int16_t offset);
700  inline void bne(Register rs, Register rt, Label* L) {
701    bne(rs, rt, shifted_branch_offset(L));
702  }
703  void bovc(Register rs, Register rt, int16_t offset);
704  inline void bovc(Register rs, Register rt, Label* L) {
705    bovc(rs, rt, shifted_branch_offset(L));
706  }
707  void bnvc(Register rs, Register rt, int16_t offset);
708  inline void bnvc(Register rs, Register rt, Label* L) {
709    bnvc(rs, rt, shifted_branch_offset(L));
710  }
711
712  // Never use the int16_t b(l)cond version with a branch offset
713  // instead of using the Label* version.
714
715  // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
716  void j(int64_t target);
717  void jal(int64_t target);
718  void j(Label* target);
719  void jal(Label* target);
720  void jalr(Register rs, Register rd = ra);
721  void jr(Register target);
722  void jic(Register rt, int16_t offset);
723  void jialc(Register rt, int16_t offset);
724
725
726  // -------Data-processing-instructions---------
727
728  // Arithmetic.
729  void addu(Register rd, Register rs, Register rt);
730  void subu(Register rd, Register rs, Register rt);
731
732  void div(Register rs, Register rt);
733  void divu(Register rs, Register rt);
734  void ddiv(Register rs, Register rt);
735  void ddivu(Register rs, Register rt);
736  void div(Register rd, Register rs, Register rt);
737  void divu(Register rd, Register rs, Register rt);
738  void ddiv(Register rd, Register rs, Register rt);
739  void ddivu(Register rd, Register rs, Register rt);
740  void mod(Register rd, Register rs, Register rt);
741  void modu(Register rd, Register rs, Register rt);
742  void dmod(Register rd, Register rs, Register rt);
743  void dmodu(Register rd, Register rs, Register rt);
744
745  void mul(Register rd, Register rs, Register rt);
746  void muh(Register rd, Register rs, Register rt);
747  void mulu(Register rd, Register rs, Register rt);
748  void muhu(Register rd, Register rs, Register rt);
749  void mult(Register rs, Register rt);
750  void multu(Register rs, Register rt);
751  void dmul(Register rd, Register rs, Register rt);
752  void dmuh(Register rd, Register rs, Register rt);
753  void dmulu(Register rd, Register rs, Register rt);
754  void dmuhu(Register rd, Register rs, Register rt);
755  void daddu(Register rd, Register rs, Register rt);
756  void dsubu(Register rd, Register rs, Register rt);
757  void dmult(Register rs, Register rt);
758  void dmultu(Register rs, Register rt);
759
760  void addiu(Register rd, Register rs, int32_t j);
761  void daddiu(Register rd, Register rs, int32_t j);
762
763  // Logical.
764  void and_(Register rd, Register rs, Register rt);
765  void or_(Register rd, Register rs, Register rt);
766  void xor_(Register rd, Register rs, Register rt);
767  void nor(Register rd, Register rs, Register rt);
768
769  void andi(Register rd, Register rs, int32_t j);
770  void ori(Register rd, Register rs, int32_t j);
771  void xori(Register rd, Register rs, int32_t j);
772  void lui(Register rd, int32_t j);
773  void aui(Register rt, Register rs, int32_t j);
774  void daui(Register rt, Register rs, int32_t j);
775  void dahi(Register rs, int32_t j);
776  void dati(Register rs, int32_t j);
777
778  // Shifts.
779  // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
780  // and may cause problems in normal code. coming_from_nop makes sure this
781  // doesn't happen.
782  void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
783  void sllv(Register rd, Register rt, Register rs);
784  void srl(Register rd, Register rt, uint16_t sa);
785  void srlv(Register rd, Register rt, Register rs);
786  void sra(Register rt, Register rd, uint16_t sa);
787  void srav(Register rt, Register rd, Register rs);
788  void rotr(Register rd, Register rt, uint16_t sa);
789  void rotrv(Register rd, Register rt, Register rs);
790  void dsll(Register rd, Register rt, uint16_t sa);
791  void dsllv(Register rd, Register rt, Register rs);
792  void dsrl(Register rd, Register rt, uint16_t sa);
793  void dsrlv(Register rd, Register rt, Register rs);
794  void drotr(Register rd, Register rt, uint16_t sa);
795  void drotr32(Register rd, Register rt, uint16_t sa);
796  void drotrv(Register rd, Register rt, Register rs);
797  void dsra(Register rt, Register rd, uint16_t sa);
798  void dsrav(Register rd, Register rt, Register rs);
799  void dsll32(Register rt, Register rd, uint16_t sa);
800  void dsrl32(Register rt, Register rd, uint16_t sa);
801  void dsra32(Register rt, Register rd, uint16_t sa);
802
803  // ------------Memory-instructions-------------
804
805  void lb(Register rd, const MemOperand& rs);
806  void lbu(Register rd, const MemOperand& rs);
807  void lh(Register rd, const MemOperand& rs);
808  void lhu(Register rd, const MemOperand& rs);
809  void lw(Register rd, const MemOperand& rs);
810  void lwu(Register rd, const MemOperand& rs);
811  void lwl(Register rd, const MemOperand& rs);
812  void lwr(Register rd, const MemOperand& rs);
813  void sb(Register rd, const MemOperand& rs);
814  void sh(Register rd, const MemOperand& rs);
815  void sw(Register rd, const MemOperand& rs);
816  void swl(Register rd, const MemOperand& rs);
817  void swr(Register rd, const MemOperand& rs);
818  void ldl(Register rd, const MemOperand& rs);
819  void ldr(Register rd, const MemOperand& rs);
820  void sdl(Register rd, const MemOperand& rs);
821  void sdr(Register rd, const MemOperand& rs);
822  void ld(Register rd, const MemOperand& rs);
823  void sd(Register rd, const MemOperand& rs);
824
825
826  // ---------PC-Relative-instructions-----------
827
828  void addiupc(Register rs, int32_t imm19);
829  void lwpc(Register rs, int32_t offset19);
830  void lwupc(Register rs, int32_t offset19);
831  void ldpc(Register rs, int32_t offset18);
832  void auipc(Register rs, int16_t imm16);
833  void aluipc(Register rs, int16_t imm16);
834
835
836  // ----------------Prefetch--------------------
837
838  void pref(int32_t hint, const MemOperand& rs);
839
840
841  // -------------Misc-instructions--------------
842
843  // Break / Trap instructions.
844  void break_(uint32_t code, bool break_as_stop = false);
845  void stop(const char* msg, uint32_t code = kMaxStopCode);
846  void tge(Register rs, Register rt, uint16_t code);
847  void tgeu(Register rs, Register rt, uint16_t code);
848  void tlt(Register rs, Register rt, uint16_t code);
849  void tltu(Register rs, Register rt, uint16_t code);
850  void teq(Register rs, Register rt, uint16_t code);
851  void tne(Register rs, Register rt, uint16_t code);
852
853  // Memory barrier instruction.
854  void sync();
855
856  // Move from HI/LO register.
857  void mfhi(Register rd);
858  void mflo(Register rd);
859
860  // Set on less than.
861  void slt(Register rd, Register rs, Register rt);
862  void sltu(Register rd, Register rs, Register rt);
863  void slti(Register rd, Register rs, int32_t j);
864  void sltiu(Register rd, Register rs, int32_t j);
865
866  // Conditional move.
867  void movz(Register rd, Register rs, Register rt);
868  void movn(Register rd, Register rs, Register rt);
869  void movt(Register rd, Register rs, uint16_t cc = 0);
870  void movf(Register rd, Register rs, uint16_t cc = 0);
871
872  void sel(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
873  void sel_s(FPURegister fd, FPURegister fs, FPURegister ft);
874  void sel_d(FPURegister fd, FPURegister fs, FPURegister ft);
875  void seleqz(Register rd, Register rs, Register rt);
876  void seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
877              FPURegister ft);
878  void selnez(Register rs, Register rt, Register rd);
879  void selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
880              FPURegister ft);
881  void seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft);
882  void seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft);
883  void selnez_d(FPURegister fd, FPURegister fs, FPURegister ft);
884  void selnez_s(FPURegister fd, FPURegister fs, FPURegister ft);
885
886  void movz_s(FPURegister fd, FPURegister fs, Register rt);
887  void movz_d(FPURegister fd, FPURegister fs, Register rt);
888  void movt_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
889  void movt_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
890  void movf_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
891  void movf_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
892  void movn_s(FPURegister fd, FPURegister fs, Register rt);
893  void movn_d(FPURegister fd, FPURegister fs, Register rt);
894  // Bit twiddling.
895  void clz(Register rd, Register rs);
896  void dclz(Register rd, Register rs);
897  void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
898  void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
899  void dext_(Register rt, Register rs, uint16_t pos, uint16_t size);
900  void dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
901  void dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
902  void dins_(Register rt, Register rs, uint16_t pos, uint16_t size);
903  void bitswap(Register rd, Register rt);
904  void dbitswap(Register rd, Register rt);
905  void align(Register rd, Register rs, Register rt, uint8_t bp);
906  void dalign(Register rd, Register rs, Register rt, uint8_t bp);
907
908  void wsbh(Register rd, Register rt);
909  void dsbh(Register rd, Register rt);
910  void dshd(Register rd, Register rt);
911  void seh(Register rd, Register rt);
912  void seb(Register rd, Register rt);
913
914  // --------Coprocessor-instructions----------------
915
916  // Load, store, and move.
917  void lwc1(FPURegister fd, const MemOperand& src);
918  void ldc1(FPURegister fd, const MemOperand& src);
919
920  void swc1(FPURegister fs, const MemOperand& dst);
921  void sdc1(FPURegister fs, const MemOperand& dst);
922
923  void mtc1(Register rt, FPURegister fs);
924  void mthc1(Register rt, FPURegister fs);
925  void dmtc1(Register rt, FPURegister fs);
926
927  void mfc1(Register rt, FPURegister fs);
928  void mfhc1(Register rt, FPURegister fs);
929  void dmfc1(Register rt, FPURegister fs);
930
931  void ctc1(Register rt, FPUControlRegister fs);
932  void cfc1(Register rt, FPUControlRegister fs);
933
934  // Arithmetic.
935  void add_s(FPURegister fd, FPURegister fs, FPURegister ft);
936  void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
937  void sub_s(FPURegister fd, FPURegister fs, FPURegister ft);
938  void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
939  void mul_s(FPURegister fd, FPURegister fs, FPURegister ft);
940  void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
941  void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
942  void div_s(FPURegister fd, FPURegister fs, FPURegister ft);
943  void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
944  void abs_s(FPURegister fd, FPURegister fs);
945  void abs_d(FPURegister fd, FPURegister fs);
946  void mov_d(FPURegister fd, FPURegister fs);
947  void mov_s(FPURegister fd, FPURegister fs);
948  void neg_s(FPURegister fd, FPURegister fs);
949  void neg_d(FPURegister fd, FPURegister fs);
950  void sqrt_s(FPURegister fd, FPURegister fs);
951  void sqrt_d(FPURegister fd, FPURegister fs);
952  void rsqrt_s(FPURegister fd, FPURegister fs);
953  void rsqrt_d(FPURegister fd, FPURegister fs);
954  void recip_d(FPURegister fd, FPURegister fs);
955  void recip_s(FPURegister fd, FPURegister fs);
956
957  // Conversion.
958  void cvt_w_s(FPURegister fd, FPURegister fs);
959  void cvt_w_d(FPURegister fd, FPURegister fs);
960  void trunc_w_s(FPURegister fd, FPURegister fs);
961  void trunc_w_d(FPURegister fd, FPURegister fs);
962  void round_w_s(FPURegister fd, FPURegister fs);
963  void round_w_d(FPURegister fd, FPURegister fs);
964  void floor_w_s(FPURegister fd, FPURegister fs);
965  void floor_w_d(FPURegister fd, FPURegister fs);
966  void ceil_w_s(FPURegister fd, FPURegister fs);
967  void ceil_w_d(FPURegister fd, FPURegister fs);
968  void rint_s(FPURegister fd, FPURegister fs);
969  void rint_d(FPURegister fd, FPURegister fs);
970  void rint(SecondaryField fmt, FPURegister fd, FPURegister fs);
971
972
973  void cvt_l_s(FPURegister fd, FPURegister fs);
974  void cvt_l_d(FPURegister fd, FPURegister fs);
975  void trunc_l_s(FPURegister fd, FPURegister fs);
976  void trunc_l_d(FPURegister fd, FPURegister fs);
977  void round_l_s(FPURegister fd, FPURegister fs);
978  void round_l_d(FPURegister fd, FPURegister fs);
979  void floor_l_s(FPURegister fd, FPURegister fs);
980  void floor_l_d(FPURegister fd, FPURegister fs);
981  void ceil_l_s(FPURegister fd, FPURegister fs);
982  void ceil_l_d(FPURegister fd, FPURegister fs);
983
984  void class_s(FPURegister fd, FPURegister fs);
985  void class_d(FPURegister fd, FPURegister fs);
986
987  void min(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
988  void mina(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
989  void max(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
990  void maxa(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
991  void min_s(FPURegister fd, FPURegister fs, FPURegister ft);
992  void min_d(FPURegister fd, FPURegister fs, FPURegister ft);
993  void max_s(FPURegister fd, FPURegister fs, FPURegister ft);
994  void max_d(FPURegister fd, FPURegister fs, FPURegister ft);
995  void mina_s(FPURegister fd, FPURegister fs, FPURegister ft);
996  void mina_d(FPURegister fd, FPURegister fs, FPURegister ft);
997  void maxa_s(FPURegister fd, FPURegister fs, FPURegister ft);
998  void maxa_d(FPURegister fd, FPURegister fs, FPURegister ft);
999
1000  void cvt_s_w(FPURegister fd, FPURegister fs);
1001  void cvt_s_l(FPURegister fd, FPURegister fs);
1002  void cvt_s_d(FPURegister fd, FPURegister fs);
1003
1004  void cvt_d_w(FPURegister fd, FPURegister fs);
1005  void cvt_d_l(FPURegister fd, FPURegister fs);
1006  void cvt_d_s(FPURegister fd, FPURegister fs);
1007
1008  // Conditions and branches for MIPSr6.
1009  void cmp(FPUCondition cond, SecondaryField fmt,
1010         FPURegister fd, FPURegister ft, FPURegister fs);
1011  void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
1012  void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
1013
1014  void bc1eqz(int16_t offset, FPURegister ft);
1015  inline void bc1eqz(Label* L, FPURegister ft) {
1016    bc1eqz(shifted_branch_offset(L), ft);
1017  }
1018  void bc1nez(int16_t offset, FPURegister ft);
1019  inline void bc1nez(Label* L, FPURegister ft) {
1020    bc1nez(shifted_branch_offset(L), ft);
1021  }
1022
1023  // Conditions and branches for non MIPSr6.
1024  void c(FPUCondition cond, SecondaryField fmt,
1025         FPURegister ft, FPURegister fs, uint16_t cc = 0);
1026  void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
1027  void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
1028
1029  void bc1f(int16_t offset, uint16_t cc = 0);
1030  inline void bc1f(Label* L, uint16_t cc = 0) {
1031    bc1f(shifted_branch_offset(L), cc);
1032  }
1033  void bc1t(int16_t offset, uint16_t cc = 0);
1034  inline void bc1t(Label* L, uint16_t cc = 0) {
1035    bc1t(shifted_branch_offset(L), cc);
1036  }
1037  void fcmp(FPURegister src1, const double src2, FPUCondition cond);
1038
1039  // Check the code size generated from label to here.
1040  int SizeOfCodeGeneratedSince(Label* label) {
1041    return pc_offset() - label->pos();
1042  }
1043
1044  // Check the number of instructions generated from label to here.
1045  int InstructionsGeneratedSince(Label* label) {
1046    return SizeOfCodeGeneratedSince(label) / kInstrSize;
1047  }
1048
1049  // Class for scoping postponing the trampoline pool generation.
1050  class BlockTrampolinePoolScope {
1051   public:
1052    explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
1053      assem_->StartBlockTrampolinePool();
1054    }
1055    ~BlockTrampolinePoolScope() {
1056      assem_->EndBlockTrampolinePool();
1057    }
1058
1059   private:
1060    Assembler* assem_;
1061
1062    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
1063  };
1064
1065  // Class for postponing the assembly buffer growth. Typically used for
1066  // sequences of instructions that must be emitted as a unit, before
1067  // buffer growth (and relocation) can occur.
1068  // This blocking scope is not nestable.
1069  class BlockGrowBufferScope {
1070   public:
1071    explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
1072      assem_->StartBlockGrowBuffer();
1073    }
1074    ~BlockGrowBufferScope() {
1075      assem_->EndBlockGrowBuffer();
1076    }
1077
1078   private:
1079    Assembler* assem_;
1080
1081    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
1082  };
1083
1084  // Debugging.
1085
1086  // Mark generator continuation.
1087  void RecordGeneratorContinuation();
1088
1089  // Mark address of a debug break slot.
1090  void RecordDebugBreakSlot(RelocInfo::Mode mode);
1091
1092  // Record the AST id of the CallIC being compiled, so that it can be placed
1093  // in the relocation information.
1094  void SetRecordedAstId(TypeFeedbackId ast_id) {
1095    DCHECK(recorded_ast_id_.IsNone());
1096    recorded_ast_id_ = ast_id;
1097  }
1098
1099  TypeFeedbackId RecordedAstId() {
1100    DCHECK(!recorded_ast_id_.IsNone());
1101    return recorded_ast_id_;
1102  }
1103
1104  void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
1105
1106  // Record a comment relocation entry that can be used by a disassembler.
1107  // Use --code-comments to enable.
1108  void RecordComment(const char* msg);
1109
1110  // Record a deoptimization reason that can be used by a log or cpu profiler.
1111  // Use --trace-deopt to enable.
1112  void RecordDeoptReason(const int reason, int raw_position, int id);
1113
1114  static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
1115                                       intptr_t pc_delta);
1116
1117  // Writes a single byte or word of data in the code stream.  Used for
1118  // inline tables, e.g., jump-tables.
1119  void db(uint8_t data);
1120  void dd(uint32_t data);
1121  void dq(uint64_t data);
1122  void dp(uintptr_t data) { dq(data); }
1123  void dd(Label* label);
1124
1125  AssemblerPositionsRecorder* positions_recorder() {
1126    return &positions_recorder_;
1127  }
1128
1129  // Postpone the generation of the trampoline pool for the specified number of
1130  // instructions.
1131  void BlockTrampolinePoolFor(int instructions);
1132
1133  // Check if there is less than kGap bytes available in the buffer.
1134  // If this is the case, we need to grow the buffer before emitting
1135  // an instruction or relocation information.
1136  inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
1137
1138  // Get the number of bytes available in the buffer.
1139  inline intptr_t available_space() const {
1140    return reloc_info_writer.pos() - pc_;
1141  }
1142
1143  // Read/patch instructions.
1144  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
1145  static void instr_at_put(byte* pc, Instr instr) {
1146    *reinterpret_cast<Instr*>(pc) = instr;
1147  }
1148  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
1149  void instr_at_put(int pos, Instr instr) {
1150    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
1151  }
1152
1153  // Check if an instruction is a branch of some kind.
1154  static bool IsBranch(Instr instr);
1155  static bool IsBc(Instr instr);
1156  static bool IsBzc(Instr instr);
1157
1158  static bool IsBeq(Instr instr);
1159  static bool IsBne(Instr instr);
1160  static bool IsBeqzc(Instr instr);
1161  static bool IsBnezc(Instr instr);
1162  static bool IsBeqc(Instr instr);
1163  static bool IsBnec(Instr instr);
1164
1165
1166  static bool IsJump(Instr instr);
1167  static bool IsJ(Instr instr);
1168  static bool IsLui(Instr instr);
1169  static bool IsOri(Instr instr);
1170
1171  static bool IsJal(Instr instr);
1172  static bool IsJr(Instr instr);
1173  static bool IsJalr(Instr instr);
1174
1175  static bool IsNop(Instr instr, unsigned int type);
1176  static bool IsPop(Instr instr);
1177  static bool IsPush(Instr instr);
1178  static bool IsLwRegFpOffset(Instr instr);
1179  static bool IsSwRegFpOffset(Instr instr);
1180  static bool IsLwRegFpNegOffset(Instr instr);
1181  static bool IsSwRegFpNegOffset(Instr instr);
1182
1183  static Register GetRtReg(Instr instr);
1184  static Register GetRsReg(Instr instr);
1185  static Register GetRdReg(Instr instr);
1186
1187  static uint32_t GetRt(Instr instr);
1188  static uint32_t GetRtField(Instr instr);
1189  static uint32_t GetRs(Instr instr);
1190  static uint32_t GetRsField(Instr instr);
1191  static uint32_t GetRd(Instr instr);
1192  static uint32_t GetRdField(Instr instr);
1193  static uint32_t GetSa(Instr instr);
1194  static uint32_t GetSaField(Instr instr);
1195  static uint32_t GetOpcodeField(Instr instr);
1196  static uint32_t GetFunction(Instr instr);
1197  static uint32_t GetFunctionField(Instr instr);
1198  static uint32_t GetImmediate16(Instr instr);
1199  static uint32_t GetLabelConst(Instr instr);
1200
1201  static int32_t GetBranchOffset(Instr instr);
1202  static bool IsLw(Instr instr);
1203  static int16_t GetLwOffset(Instr instr);
1204  static Instr SetLwOffset(Instr instr, int16_t offset);
1205
1206  static bool IsSw(Instr instr);
1207  static Instr SetSwOffset(Instr instr, int16_t offset);
1208  static bool IsAddImmediate(Instr instr);
1209  static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
1210
1211  static bool IsAndImmediate(Instr instr);
1212  static bool IsEmittedConstant(Instr instr);
1213
1214  void CheckTrampolinePool();
1215
1216  void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
1217                                          ConstantPoolEntry::Access access,
1218                                          ConstantPoolEntry::Type type) {
1219    // No embedded constant pool support.
1220    UNREACHABLE();
1221  }
1222
1223  bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
1224
1225  inline int UnboundLabelsCount() { return unbound_labels_count_; }
1226
1227 protected:
1228  // Load Scaled Address instructions.
1229  void lsa(Register rd, Register rt, Register rs, uint8_t sa);
1230  void dlsa(Register rd, Register rt, Register rs, uint8_t sa);
1231
1232  // Helpers.
1233  void LoadRegPlusOffsetToAt(const MemOperand& src);
1234
1235  // Relocation for a type-recording IC has the AST id added to it.  This
1236  // member variable is a way to pass the information from the call site to
1237  // the relocation info.
1238  TypeFeedbackId recorded_ast_id_;
1239
1240  inline static void set_target_internal_reference_encoded_at(Address pc,
1241                                                              Address target);
1242
1243  int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
1244
1245  // Decode branch instruction at pos and return branch target pos.
1246  int target_at(int pos, bool is_internal);
1247
1248  // Patch branch instruction at pos to branch to given branch target pos.
1249  void target_at_put(int pos, int target_pos, bool is_internal);
1250
1251  // Say if we need to relocate with this mode.
1252  bool MustUseReg(RelocInfo::Mode rmode);
1253
1254  // Record reloc info for current pc_.
1255  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
1256
1257  // Block the emission of the trampoline pool before pc_offset.
1258  void BlockTrampolinePoolBefore(int pc_offset) {
1259    if (no_trampoline_pool_before_ < pc_offset)
1260      no_trampoline_pool_before_ = pc_offset;
1261  }
1262
1263  void StartBlockTrampolinePool() {
1264    trampoline_pool_blocked_nesting_++;
1265  }
1266
1267  void EndBlockTrampolinePool() {
1268    trampoline_pool_blocked_nesting_--;
1269  }
1270
1271  bool is_trampoline_pool_blocked() const {
1272    return trampoline_pool_blocked_nesting_ > 0;
1273  }
1274
1275  bool has_exception() const {
1276    return internal_trampoline_exception_;
1277  }
1278
1279  void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);
1280
1281  bool is_trampoline_emitted() const {
1282    return trampoline_emitted_;
1283  }
1284
1285  // Temporarily block automatic assembly buffer growth.
1286  void StartBlockGrowBuffer() {
1287    DCHECK(!block_buffer_growth_);
1288    block_buffer_growth_ = true;
1289  }
1290
1291  void EndBlockGrowBuffer() {
1292    DCHECK(block_buffer_growth_);
1293    block_buffer_growth_ = false;
1294  }
1295
1296  bool is_buffer_growth_blocked() const {
1297    return block_buffer_growth_;
1298  }
1299
1300  void EmitForbiddenSlotInstruction() {
1301    if (IsPrevInstrCompactBranch()) {
1302      nop();
1303    }
1304  }
1305
1306  inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
1307
1308 private:
1309  // Buffer size and constant pool distance are checked together at regular
1310  // intervals of kBufferCheckInterval emitted bytes.
1311  static const int kBufferCheckInterval = 1*KB/2;
1312
1313  // Code generation.
1314  // The relocation writer's position is at least kGap bytes below the end of
1315  // the generated instructions. This is so that multi-instruction sequences do
1316  // not have to check for overflow. The same is true for writes of large
1317  // relocation info entries.
1318  static const int kGap = 32;
1319
1320
1321  // Repeated checking whether the trampoline pool should be emitted is rather
1322  // expensive. By default we only check again once a number of instructions
1323  // has been generated.
1324  static const int kCheckConstIntervalInst = 32;
1325  static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
1326
1327  int next_buffer_check_;  // pc offset of next buffer check.
1328
1329  // Emission of the trampoline pool may be blocked in some code sequences.
1330  int trampoline_pool_blocked_nesting_;  // Block emission if this is not zero.
1331  int no_trampoline_pool_before_;  // Block emission before this pc offset.
1332
1333  // Keep track of the last emitted pool to guarantee a maximal distance.
1334  int last_trampoline_pool_end_;  // pc offset of the end of the last pool.
1335
1336  // Automatic growth of the assembly buffer may be blocked for some sequences.
1337  bool block_buffer_growth_;  // Block growth when true.
1338
1339  // Relocation information generation.
1340  // Each relocation is encoded as a variable size value.
1341  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
1342  RelocInfoWriter reloc_info_writer;
1343
1344  // The bound position, before this we cannot do instruction elimination.
1345  int last_bound_pos_;
1346
1347  // Readable constants for compact branch handling in emit()
1348  enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true };
1349
1350  // Code emission.
1351  inline void CheckBuffer();
1352  void GrowBuffer();
1353  inline void emit(Instr x,
1354                   CompactBranchType is_compact_branch = CompactBranchType::NO);
1355  inline void emit(uint64_t x);
1356  inline void CheckForEmitInForbiddenSlot();
1357  template <typename T>
1358  inline void EmitHelper(T x);
1359  inline void EmitHelper(Instr x, CompactBranchType is_compact_branch);
1360
1361  // Instruction generation.
1362  // We have 3 different kind of encoding layout on MIPS.
1363  // However due to many different types of objects encoded in the same fields
1364  // we have quite a few aliases for each mode.
1365  // Using the same structure to refer to Register and FPURegister would spare a
1366  // few aliases, but mixing both does not look clean to me.
1367  // Anyway we could surely implement this differently.
1368
1369  void GenInstrRegister(Opcode opcode,
1370                        Register rs,
1371                        Register rt,
1372                        Register rd,
1373                        uint16_t sa = 0,
1374                        SecondaryField func = NULLSF);
1375
1376  void GenInstrRegister(Opcode opcode,
1377                        Register rs,
1378                        Register rt,
1379                        uint16_t msb,
1380                        uint16_t lsb,
1381                        SecondaryField func);
1382
1383  void GenInstrRegister(Opcode opcode,
1384                        SecondaryField fmt,
1385                        FPURegister ft,
1386                        FPURegister fs,
1387                        FPURegister fd,
1388                        SecondaryField func = NULLSF);
1389
1390  void GenInstrRegister(Opcode opcode,
1391                        FPURegister fr,
1392                        FPURegister ft,
1393                        FPURegister fs,
1394                        FPURegister fd,
1395                        SecondaryField func = NULLSF);
1396
1397  void GenInstrRegister(Opcode opcode,
1398                        SecondaryField fmt,
1399                        Register rt,
1400                        FPURegister fs,
1401                        FPURegister fd,
1402                        SecondaryField func = NULLSF);
1403
1404  void GenInstrRegister(Opcode opcode,
1405                        SecondaryField fmt,
1406                        Register rt,
1407                        FPUControlRegister fs,
1408                        SecondaryField func = NULLSF);
1409
1410
1411  void GenInstrImmediate(
1412      Opcode opcode, Register rs, Register rt, int32_t j,
1413      CompactBranchType is_compact_branch = CompactBranchType::NO);
1414  void GenInstrImmediate(
1415      Opcode opcode, Register rs, SecondaryField SF, int32_t j,
1416      CompactBranchType is_compact_branch = CompactBranchType::NO);
1417  void GenInstrImmediate(
1418      Opcode opcode, Register r1, FPURegister r2, int32_t j,
1419      CompactBranchType is_compact_branch = CompactBranchType::NO);
1420  void GenInstrImmediate(
1421      Opcode opcode, Register rs, int32_t offset21,
1422      CompactBranchType is_compact_branch = CompactBranchType::NO);
1423  void GenInstrImmediate(Opcode opcode, Register rs, uint32_t offset21);
1424  void GenInstrImmediate(
1425      Opcode opcode, int32_t offset26,
1426      CompactBranchType is_compact_branch = CompactBranchType::NO);
1427
1428  void GenInstrJump(Opcode opcode,
1429                     uint32_t address);
1430
1431  // Labels.
1432  void print(Label* L);
1433  void bind_to(Label* L, int pos);
1434  void next(Label* L, bool is_internal);
1435
1436  // One trampoline consists of:
1437  // - space for trampoline slots,
1438  // - space for labels.
1439  //
1440  // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
1441  // Space for trampoline slots preceeds space for labels. Each label is of one
1442  // instruction size, so total amount for labels is equal to
1443  // label_count *  kInstrSize.
1444  class Trampoline {
1445   public:
1446    Trampoline() {
1447      start_ = 0;
1448      next_slot_ = 0;
1449      free_slot_count_ = 0;
1450      end_ = 0;
1451    }
1452    Trampoline(int start, int slot_count) {
1453      start_ = start;
1454      next_slot_ = start;
1455      free_slot_count_ = slot_count;
1456      end_ = start + slot_count * kTrampolineSlotsSize;
1457    }
1458    int start() {
1459      return start_;
1460    }
1461    int end() {
1462      return end_;
1463    }
1464    int take_slot() {
1465      int trampoline_slot = kInvalidSlotPos;
1466      if (free_slot_count_ <= 0) {
1467        // We have run out of space on trampolines.
1468        // Make sure we fail in debug mode, so we become aware of each case
1469        // when this happens.
1470        DCHECK(0);
1471        // Internal exception will be caught.
1472      } else {
1473        trampoline_slot = next_slot_;
1474        free_slot_count_--;
1475        next_slot_ += kTrampolineSlotsSize;
1476      }
1477      return trampoline_slot;
1478    }
1479
1480   private:
1481    int start_;
1482    int end_;
1483    int next_slot_;
1484    int free_slot_count_;
1485  };
1486
1487  int32_t get_trampoline_entry(int32_t pos);
1488  int unbound_labels_count_;
1489  // After trampoline is emitted, long branches are used in generated code for
1490  // the forward branches whose target offsets could be beyond reach of branch
1491  // instruction. We use this information to trigger different mode of
1492  // branch instruction generation, where we use jump instructions rather
1493  // than regular branch instructions.
1494  bool trampoline_emitted_;
1495  static const int kTrampolineSlotsSize = 2 * kInstrSize;
1496  static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
1497  static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
1498  static const int kInvalidSlotPos = -1;
1499
1500  // Internal reference positions, required for unbounded internal reference
1501  // labels.
1502  std::set<int64_t> internal_reference_positions_;
1503
1504  void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
1505  void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
1506  bool prev_instr_compact_branch_ = false;
1507
1508  Trampoline trampoline_;
1509  bool internal_trampoline_exception_;
1510
1511  friend class RegExpMacroAssemblerMIPS;
1512  friend class RelocInfo;
1513  friend class CodePatcher;
1514  friend class BlockTrampolinePoolScope;
1515
1516  AssemblerPositionsRecorder positions_recorder_;
1517  friend class AssemblerPositionsRecorder;
1518  friend class EnsureSpace;
1519};
1520
1521
1522class EnsureSpace BASE_EMBEDDED {
1523 public:
1524  explicit EnsureSpace(Assembler* assembler) {
1525    assembler->CheckBuffer();
1526  }
1527};
1528
1529}  // namespace internal
1530}  // namespace v8
1531
1532#endif  // V8_ARM_ASSEMBLER_MIPS_H_
1533