1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
33// Copyright 2012 the V8 project authors. All rights reserved.
34
35
36#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
37#define V8_MIPS_ASSEMBLER_MIPS_H_
38
39#include <stdio.h>
40
41#include <set>
42
43#include "src/assembler.h"
44#include "src/mips/constants-mips.h"
45
46namespace v8 {
47namespace internal {
48
49// clang-format off
50#define GENERAL_REGISTERS(V)                              \
51  V(zero_reg)  V(at)  V(v0)  V(v1)  V(a0)  V(a1)  V(a2)  V(a3)  \
52  V(t0)  V(t1)  V(t2)  V(t3)  V(t4)  V(t5)  V(t6)  V(t7)  \
53  V(s0)  V(s1)  V(s2)  V(s3)  V(s4)  V(s5)  V(s6)  V(s7)  V(t8)  V(t9) \
54  V(k0)  V(k1)  V(gp)  V(sp)  V(fp)  V(ra)
55
56#define ALLOCATABLE_GENERAL_REGISTERS(V) \
57  V(v0)  V(v1)  V(a0)  V(a1)  V(a2)  V(a3) \
58  V(t0)  V(t1)  V(t2)  V(t3)  V(t4)  V(t5)  V(t6) V(s7)
59
60#define DOUBLE_REGISTERS(V)                               \
61  V(f0)  V(f1)  V(f2)  V(f3)  V(f4)  V(f5)  V(f6)  V(f7)  \
62  V(f8)  V(f9)  V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
63  V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
64  V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
65
66#define ALLOCATABLE_DOUBLE_REGISTERS(V)                   \
67  V(f0)  V(f2)  V(f4)  V(f6)  V(f8)  V(f10) V(f12) V(f14) \
68  V(f16) V(f18) V(f20) V(f22) V(f24) V(f26)
69// clang-format on
70
71// CPU Registers.
72//
73// 1) We would prefer to use an enum, but enum values are assignment-
74// compatible with int, which has caused code-generation bugs.
75//
76// 2) We would prefer to use a class instead of a struct but we don't like
77// the register initialization to depend on the particular initialization
78// order (which appears to be different on OS X, Linux, and Windows for the
79// installed versions of C++ we tried). Using a struct permits C-style
80// "initialization". Also, the Register objects cannot be const as this
81// forces initialization stubs in MSVC, making us dependent on initialization
82// order.
83//
84// 3) By not using an enum, we are possibly preventing the compiler from
85// doing certain constant folds, which may significantly reduce the
86// code generated for some assembly instructions (because they boil down
87// to a few constants). If this is a problem, we could change the code
88// such that we use an enum in optimized mode, and the struct in debug
89// mode. This way we get the compile-time error checking in debug mode
90// and best performance in optimized code.
91
92
93// -----------------------------------------------------------------------------
94// Implementation of Register and FPURegister.
95
96struct Register {
97  static const int kCpRegister = 23;  // cp (s7) is the 23rd register.
98
99  enum Code {
100#define REGISTER_CODE(R) kCode_##R,
101    GENERAL_REGISTERS(REGISTER_CODE)
102#undef REGISTER_CODE
103        kAfterLast,
104    kCode_no_reg = -1
105  };
106
107  static const int kNumRegisters = Code::kAfterLast;
108
109#if defined(V8_TARGET_LITTLE_ENDIAN)
110  static const int kMantissaOffset = 0;
111  static const int kExponentOffset = 4;
112#elif defined(V8_TARGET_BIG_ENDIAN)
113  static const int kMantissaOffset = 4;
114  static const int kExponentOffset = 0;
115#else
116#error Unknown endianness
117#endif
118
119
120  static Register from_code(int code) {
121    DCHECK(code >= 0);
122    DCHECK(code < kNumRegisters);
123    Register r = {code};
124    return r;
125  }
126  const char* ToString();
127  bool IsAllocatable() const;
128  bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
129  bool is(Register reg) const { return reg_code == reg.reg_code; }
130  int code() const {
131    DCHECK(is_valid());
132    return reg_code;
133  }
134  int bit() const {
135    DCHECK(is_valid());
136    return 1 << reg_code;
137  }
138
139  // Unfortunately we can't make this private in a struct.
140  int reg_code;
141};
142
143// s7: context register
144// s3: lithium scratch
145// s4: lithium scratch2
146#define DECLARE_REGISTER(R) const Register R = {Register::kCode_##R};
147GENERAL_REGISTERS(DECLARE_REGISTER)
148#undef DECLARE_REGISTER
149const Register no_reg = {Register::kCode_no_reg};
150
151
152int ToNumber(Register reg);
153
154Register ToRegister(int num);
155
156// Coprocessor register.
157struct DoubleRegister {
158  enum Code {
159#define REGISTER_CODE(R) kCode_##R,
160    DOUBLE_REGISTERS(REGISTER_CODE)
161#undef REGISTER_CODE
162        kAfterLast,
163    kCode_no_reg = -1
164  };
165
166  static const int kMaxNumRegisters = Code::kAfterLast;
167
168  inline static int NumRegisters();
169
170  // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
171  // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
172  // number of Double regs (64-bit regs, or FPU-reg-pairs).
173
174  const char* ToString();
175  bool IsAllocatable() const;
176  bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
177  bool is(DoubleRegister reg) const { return reg_code == reg.reg_code; }
178  DoubleRegister low() const {
179    // Find low reg of a Double-reg pair, which is the reg itself.
180    DCHECK(reg_code % 2 == 0);  // Specified Double reg must be even.
181    DoubleRegister reg;
182    reg.reg_code = reg_code;
183    DCHECK(reg.is_valid());
184    return reg;
185  }
186  DoubleRegister high() const {
187    // Find high reg of a Doubel-reg pair, which is reg + 1.
188    DCHECK(reg_code % 2 == 0);  // Specified Double reg must be even.
189    DoubleRegister reg;
190    reg.reg_code = reg_code + 1;
191    DCHECK(reg.is_valid());
192    return reg;
193  }
194
195  int code() const {
196    DCHECK(is_valid());
197    return reg_code;
198  }
199  int bit() const {
200    DCHECK(is_valid());
201    return 1 << reg_code;
202  }
203
204  static DoubleRegister from_code(int code) {
205    DoubleRegister r = {code};
206    return r;
207  }
208  void setcode(int f) {
209    reg_code = f;
210    DCHECK(is_valid());
211  }
212  // Unfortunately we can't make this private in a struct.
213  int reg_code;
214};
215
216// A few double registers are reserved: one as a scratch register and one to
217// hold 0.0.
218//  f28: 0.0
219//  f30: scratch register.
220
221// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
222// 32-bit registers, f0 through f31. When used as 'double' they are used
223// in pairs, starting with the even numbered register. So a double operation
224// on f0 really uses f0 and f1.
225// (Modern mips hardware also supports 32 64-bit registers, via setting
226// (priviledged) Status Register FR bit to 1. This is used by the N32 ABI,
227// but it is not in common use. Someday we will want to support this in v8.)
228
229// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
230typedef DoubleRegister FPURegister;
231typedef DoubleRegister FloatRegister;
232
233const DoubleRegister no_freg = {-1};
234
235const DoubleRegister f0 = {0};  // Return value in hard float mode.
236const DoubleRegister f1 = {1};
237const DoubleRegister f2 = {2};
238const DoubleRegister f3 = {3};
239const DoubleRegister f4 = {4};
240const DoubleRegister f5 = {5};
241const DoubleRegister f6 = {6};
242const DoubleRegister f7 = {7};
243const DoubleRegister f8 = {8};
244const DoubleRegister f9 = {9};
245const DoubleRegister f10 = {10};
246const DoubleRegister f11 = {11};
247const DoubleRegister f12 = {12};  // Arg 0 in hard float mode.
248const DoubleRegister f13 = {13};
249const DoubleRegister f14 = {14};  // Arg 1 in hard float mode.
250const DoubleRegister f15 = {15};
251const DoubleRegister f16 = {16};
252const DoubleRegister f17 = {17};
253const DoubleRegister f18 = {18};
254const DoubleRegister f19 = {19};
255const DoubleRegister f20 = {20};
256const DoubleRegister f21 = {21};
257const DoubleRegister f22 = {22};
258const DoubleRegister f23 = {23};
259const DoubleRegister f24 = {24};
260const DoubleRegister f25 = {25};
261const DoubleRegister f26 = {26};
262const DoubleRegister f27 = {27};
263const DoubleRegister f28 = {28};
264const DoubleRegister f29 = {29};
265const DoubleRegister f30 = {30};
266const DoubleRegister f31 = {31};
267
268// Register aliases.
269// cp is assumed to be a callee saved register.
270// Defined using #define instead of "static const Register&" because Clang
271// complains otherwise when a compilation unit that includes this header
272// doesn't use the variables.
273#define kRootRegister s6
274#define cp s7
275#define kLithiumScratchReg s3
276#define kLithiumScratchReg2 s4
277#define kLithiumScratchDouble f30
278#define kDoubleRegZero f28
279// Used on mips32r6 for compare operations.
280// We use the last non-callee saved odd register for O32 ABI
281#define kDoubleCompareReg f19
282
283// FPU (coprocessor 1) control registers.
284// Currently only FCSR (#31) is implemented.
285struct FPUControlRegister {
286  bool is_valid() const { return reg_code == kFCSRRegister; }
287  bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
288  int code() const {
289    DCHECK(is_valid());
290    return reg_code;
291  }
292  int bit() const {
293    DCHECK(is_valid());
294    return 1 << reg_code;
295  }
296  void setcode(int f) {
297    reg_code = f;
298    DCHECK(is_valid());
299  }
300  // Unfortunately we can't make this private in a struct.
301  int reg_code;
302};
303
304const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
305const FPUControlRegister FCSR = { kFCSRRegister };
306
307
308// -----------------------------------------------------------------------------
309// Machine instruction Operands.
310
311// Class Operand represents a shifter operand in data processing instructions.
312class Operand BASE_EMBEDDED {
313 public:
314  // Immediate.
315  INLINE(explicit Operand(int32_t immediate,
316         RelocInfo::Mode rmode = RelocInfo::NONE32));
317  INLINE(explicit Operand(const ExternalReference& f));
318  INLINE(explicit Operand(const char* s));
319  INLINE(explicit Operand(Object** opp));
320  INLINE(explicit Operand(Context** cpp));
321  explicit Operand(Handle<Object> handle);
322  INLINE(explicit Operand(Smi* value));
323
324  // Register.
325  INLINE(explicit Operand(Register rm));
326
327  // Return true if this is a register operand.
328  INLINE(bool is_reg() const);
329
330  inline int32_t immediate() const {
331    DCHECK(!is_reg());
332    return imm32_;
333  }
334
335  Register rm() const { return rm_; }
336
337 private:
338  Register rm_;
339  int32_t imm32_;  // Valid if rm_ == no_reg.
340  RelocInfo::Mode rmode_;
341
342  friend class Assembler;
343  friend class MacroAssembler;
344};
345
346
347// On MIPS we have only one adressing mode with base_reg + offset.
348// Class MemOperand represents a memory operand in load and store instructions.
349class MemOperand : public Operand {
350 public:
351  // Immediate value attached to offset.
352  enum OffsetAddend {
353    offset_minus_one = -1,
354    offset_zero = 0
355  };
356
357  explicit MemOperand(Register rn, int32_t offset = 0);
358  explicit MemOperand(Register rn, int32_t unit, int32_t multiplier,
359                      OffsetAddend offset_addend = offset_zero);
360  int32_t offset() const { return offset_; }
361
362  bool OffsetIsInt16Encodable() const {
363    return is_int16(offset_);
364  }
365
366 private:
367  int32_t offset_;
368
369  friend class Assembler;
370};
371
372
373class Assembler : public AssemblerBase {
374 public:
375  // Create an assembler. Instructions and relocation information are emitted
376  // into a buffer, with the instructions starting from the beginning and the
377  // relocation information starting from the end of the buffer. See CodeDesc
378  // for a detailed comment on the layout (globals.h).
379  //
380  // If the provided buffer is NULL, the assembler allocates and grows its own
381  // buffer, and buffer_size determines the initial buffer size. The buffer is
382  // owned by the assembler and deallocated upon destruction of the assembler.
383  //
384  // If the provided buffer is not NULL, the assembler uses the provided buffer
385  // for code generation and assumes its size to be buffer_size. If the buffer
386  // is too small, a fatal error occurs. No deallocation of the buffer is done
387  // upon destruction of the assembler.
388  Assembler(Isolate* isolate, void* buffer, int buffer_size);
389  virtual ~Assembler() { }
390
391  // GetCode emits any pending (non-emitted) code and fills the descriptor
392  // desc. GetCode() is idempotent; it returns the same result if no other
393  // Assembler functions are invoked in between GetCode() calls.
394  void GetCode(CodeDesc* desc);
395
396  // Label operations & relative jumps (PPUM Appendix D).
397  //
398  // Takes a branch opcode (cc) and a label (L) and generates
399  // either a backward branch or a forward branch and links it
400  // to the label fixup chain. Usage:
401  //
402  // Label L;    // unbound label
403  // j(cc, &L);  // forward branch to unbound label
404  // bind(&L);   // bind label to the current pc
405  // j(cc, &L);  // backward branch to bound label
406  // bind(&L);   // illegal: a label may be bound only once
407  //
408  // Note: The same Label can be used for forward and backward branches
409  // but it may be bound only once.
410  void bind(Label* L);  // Binds an unbound label L to current code position.
411
412  enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 };
413
414  // Determines if Label is bound and near enough so that branch instruction
415  // can be used to reach it, instead of jump instruction.
416  bool is_near(Label* L);
417  bool is_near(Label* L, OffsetSize bits);
418  bool is_near_branch(Label* L);
419  inline bool is_near_pre_r6(Label* L) {
420    DCHECK(!IsMipsArchVariant(kMips32r6));
421    return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize;
422  }
423  inline bool is_near_r6(Label* L) {
424    DCHECK(IsMipsArchVariant(kMips32r6));
425    return pc_offset() - L->pos() < kMaxCompactBranchOffset - 4 * kInstrSize;
426  }
427
428  int BranchOffset(Instr instr);
429
430  // Returns the branch offset to the given label from the current code
431  // position. Links the label to the current position if it is still unbound.
432  // Manages the jump elimination optimization if the second parameter is true.
433  int32_t branch_offset_helper(Label* L, OffsetSize bits);
434  inline int32_t branch_offset(Label* L) {
435    return branch_offset_helper(L, OffsetSize::kOffset16);
436  }
437  inline int32_t branch_offset21(Label* L) {
438    return branch_offset_helper(L, OffsetSize::kOffset21);
439  }
440  inline int32_t branch_offset26(Label* L) {
441    return branch_offset_helper(L, OffsetSize::kOffset26);
442  }
443  inline int32_t shifted_branch_offset(Label* L) {
444    return branch_offset(L) >> 2;
445  }
446  inline int32_t shifted_branch_offset21(Label* L) {
447    return branch_offset21(L) >> 2;
448  }
449  inline int32_t shifted_branch_offset26(Label* L) {
450    return branch_offset26(L) >> 2;
451  }
452  uint32_t jump_address(Label* L);
453
454  // Puts a labels target address at the given position.
455  // The high 8 bits are set to zero.
456  void label_at_put(Label* L, int at_offset);
457
458  // Read/Modify the code target address in the branch/call instruction at pc.
459  static Address target_address_at(Address pc);
460  static void set_target_address_at(
461      Isolate* isolate, Address pc, Address target,
462      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
463  // On MIPS there is no Constant Pool so we skip that parameter.
464  INLINE(static Address target_address_at(Address pc, Address constant_pool)) {
465    return target_address_at(pc);
466  }
467  INLINE(static void set_target_address_at(
468      Isolate* isolate, Address pc, Address constant_pool, Address target,
469      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
470    set_target_address_at(isolate, pc, target, icache_flush_mode);
471  }
472  INLINE(static Address target_address_at(Address pc, Code* code)) {
473    Address constant_pool = code ? code->constant_pool() : NULL;
474    return target_address_at(pc, constant_pool);
475  }
476  INLINE(static void set_target_address_at(
477      Isolate* isolate, Address pc, Code* code, Address target,
478      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
479    Address constant_pool = code ? code->constant_pool() : NULL;
480    set_target_address_at(isolate, pc, constant_pool, target,
481                          icache_flush_mode);
482  }
483
484  // Return the code target address at a call site from the return address
485  // of that call in the instruction stream.
486  inline static Address target_address_from_return_address(Address pc);
487
488  static void QuietNaN(HeapObject* nan);
489
490  // This sets the branch destination (which gets loaded at the call address).
491  // This is for calls and branches within generated code.  The serializer
492  // has already deserialized the lui/ori instructions etc.
493  inline static void deserialization_set_special_target_at(
494      Isolate* isolate, Address instruction_payload, Code* code,
495      Address target) {
496    set_target_address_at(
497        isolate,
498        instruction_payload - kInstructionsFor32BitConstant * kInstrSize, code,
499        target);
500  }
501
502  // This sets the internal reference at the pc.
503  inline static void deserialization_set_target_internal_reference_at(
504      Isolate* isolate, Address pc, Address target,
505      RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
506
507  // Size of an instruction.
508  static const int kInstrSize = sizeof(Instr);
509
510  // Difference between address of current opcode and target address offset.
511  static const int kBranchPCOffset = 4;
512
513  // Here we are patching the address in the LUI/ORI instruction pair.
514  // These values are used in the serialization process and must be zero for
515  // MIPS platform, as Code, Embedded Object or External-reference pointers
516  // are split across two consecutive instructions and don't exist separately
517  // in the code, so the serializer should not step forwards in memory after
518  // a target is resolved and written.
519  static const int kSpecialTargetSize = 0;
520
521  // Number of consecutive instructions used to store 32bit constant.
522  // Before jump-optimizations, this constant was used in
523  // RelocInfo::target_address_address() function to tell serializer address of
524  // the instruction that follows LUI/ORI instruction pair. Now, with new jump
525  // optimization, where jump-through-register instruction that usually
526  // follows LUI/ORI pair is substituted with J/JAL, this constant equals
527  // to 3 instructions (LUI+ORI+J/JAL/JR/JALR).
528  static const int kInstructionsFor32BitConstant = 3;
529
530  // Distance between the instruction referring to the address of the call
531  // target and the return address.
532  static const int kCallTargetAddressOffset = 4 * kInstrSize;
533
534  // Distance between start of patched debug break slot and the emitted address
535  // to jump to.
536  static const int kPatchDebugBreakSlotAddressOffset = 4 * kInstrSize;
537
538  // Difference between address of current opcode and value read from pc
539  // register.
540  static const int kPcLoadDelta = 4;
541
542  static const int kDebugBreakSlotInstructions = 4;
543  static const int kDebugBreakSlotLength =
544      kDebugBreakSlotInstructions * kInstrSize;
545
546
547  // ---------------------------------------------------------------------------
548  // Code generation.
549
550  // Insert the smallest number of nop instructions
551  // possible to align the pc offset to a multiple
552  // of m. m must be a power of 2 (>= 4).
553  void Align(int m);
554  // Insert the smallest number of zero bytes possible to align the pc offset
555  // to a mulitple of m. m must be a power of 2 (>= 2).
556  void DataAlign(int m);
557  // Aligns code to something that's optimal for a jump target for the platform.
558  void CodeTargetAlign();
559
560  // Different nop operations are used by the code generator to detect certain
561  // states of the generated code.
562  enum NopMarkerTypes {
563    NON_MARKING_NOP = 0,
564    DEBUG_BREAK_NOP,
565    // IC markers.
566    PROPERTY_ACCESS_INLINED,
567    PROPERTY_ACCESS_INLINED_CONTEXT,
568    PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
569    // Helper values.
570    LAST_CODE_MARKER,
571    FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
572    // Code aging
573    CODE_AGE_MARKER_NOP = 6,
574    CODE_AGE_SEQUENCE_NOP
575  };
576
577  // Type == 0 is the default non-marking nop. For mips this is a
578  // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero
579  // marking, to avoid conflict with ssnop and ehb instructions.
580  void nop(unsigned int type = 0) {
581    DCHECK(type < 32);
582    Register nop_rt_reg = (type == 0) ? zero_reg : at;
583    sll(zero_reg, nop_rt_reg, type, true);
584  }
585
586
587  // --------Branch-and-jump-instructions----------
588  // We don't use likely variant of instructions.
589  void b(int16_t offset);
590  inline void b(Label* L) { b(shifted_branch_offset(L)); }
591  void bal(int16_t offset);
592  inline void bal(Label* L) { bal(shifted_branch_offset(L)); }
593  void bc(int32_t offset);
594  inline void bc(Label* L) { bc(shifted_branch_offset26(L)); }
595  void balc(int32_t offset);
596  inline void balc(Label* L) { balc(shifted_branch_offset26(L)); }
597
598  void beq(Register rs, Register rt, int16_t offset);
599  inline void beq(Register rs, Register rt, Label* L) {
600    beq(rs, rt, shifted_branch_offset(L));
601  }
602  void bgez(Register rs, int16_t offset);
603  void bgezc(Register rt, int16_t offset);
604  inline void bgezc(Register rt, Label* L) {
605    bgezc(rt, shifted_branch_offset(L));
606  }
607  void bgeuc(Register rs, Register rt, int16_t offset);
608  inline void bgeuc(Register rs, Register rt, Label* L) {
609    bgeuc(rs, rt, shifted_branch_offset(L));
610  }
611  void bgec(Register rs, Register rt, int16_t offset);
612  inline void bgec(Register rs, Register rt, Label* L) {
613    bgec(rs, rt, shifted_branch_offset(L));
614  }
615  void bgezal(Register rs, int16_t offset);
616  void bgezalc(Register rt, int16_t offset);
617  inline void bgezalc(Register rt, Label* L) {
618    bgezalc(rt, shifted_branch_offset(L));
619  }
620  void bgezall(Register rs, int16_t offset);
621  inline void bgezall(Register rs, Label* L) {
622    bgezall(rs, branch_offset(L) >> 2);
623  }
624  void bgtz(Register rs, int16_t offset);
625  void bgtzc(Register rt, int16_t offset);
626  inline void bgtzc(Register rt, Label* L) {
627    bgtzc(rt, shifted_branch_offset(L));
628  }
629  void blez(Register rs, int16_t offset);
630  void blezc(Register rt, int16_t offset);
631  inline void blezc(Register rt, Label* L) {
632    blezc(rt, shifted_branch_offset(L));
633  }
634  void bltz(Register rs, int16_t offset);
635  void bltzc(Register rt, int16_t offset);
636  inline void bltzc(Register rt, Label* L) {
637    bltzc(rt, shifted_branch_offset(L));
638  }
639  void bltuc(Register rs, Register rt, int16_t offset);
640  inline void bltuc(Register rs, Register rt, Label* L) {
641    bltuc(rs, rt, shifted_branch_offset(L));
642  }
643  void bltc(Register rs, Register rt, int16_t offset);
644  inline void bltc(Register rs, Register rt, Label* L) {
645    bltc(rs, rt, shifted_branch_offset(L));
646  }
647  void bltzal(Register rs, int16_t offset);
648  void blezalc(Register rt, int16_t offset);
649  inline void blezalc(Register rt, Label* L) {
650    blezalc(rt, shifted_branch_offset(L));
651  }
652  void bltzalc(Register rt, int16_t offset);
653  inline void bltzalc(Register rt, Label* L) {
654    bltzalc(rt, shifted_branch_offset(L));
655  }
656  void bgtzalc(Register rt, int16_t offset);
657  inline void bgtzalc(Register rt, Label* L) {
658    bgtzalc(rt, shifted_branch_offset(L));
659  }
660  void beqzalc(Register rt, int16_t offset);
661  inline void beqzalc(Register rt, Label* L) {
662    beqzalc(rt, shifted_branch_offset(L));
663  }
664  void beqc(Register rs, Register rt, int16_t offset);
665  inline void beqc(Register rs, Register rt, Label* L) {
666    beqc(rs, rt, shifted_branch_offset(L));
667  }
668  void beqzc(Register rs, int32_t offset);
669  inline void beqzc(Register rs, Label* L) {
670    beqzc(rs, shifted_branch_offset21(L));
671  }
672  void bnezalc(Register rt, int16_t offset);
673  inline void bnezalc(Register rt, Label* L) {
674    bnezalc(rt, shifted_branch_offset(L));
675  }
676  void bnec(Register rs, Register rt, int16_t offset);
677  inline void bnec(Register rs, Register rt, Label* L) {
678    bnec(rs, rt, shifted_branch_offset(L));
679  }
680  void bnezc(Register rt, int32_t offset);
681  inline void bnezc(Register rt, Label* L) {
682    bnezc(rt, shifted_branch_offset21(L));
683  }
684  void bne(Register rs, Register rt, int16_t offset);
685  inline void bne(Register rs, Register rt, Label* L) {
686    bne(rs, rt, shifted_branch_offset(L));
687  }
688  void bovc(Register rs, Register rt, int16_t offset);
689  inline void bovc(Register rs, Register rt, Label* L) {
690    bovc(rs, rt, shifted_branch_offset(L));
691  }
692  void bnvc(Register rs, Register rt, int16_t offset);
693  inline void bnvc(Register rs, Register rt, Label* L) {
694    bnvc(rs, rt, shifted_branch_offset(L));
695  }
696
697  // Never use the int16_t b(l)cond version with a branch offset
698  // instead of using the Label* version.
699
700  // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
701  void j(int32_t target);
702  void jal(int32_t target);
703  void jalr(Register rs, Register rd = ra);
704  void jr(Register target);
705  void jic(Register rt, int16_t offset);
706  void jialc(Register rt, int16_t offset);
707
708
709  // -------Data-processing-instructions---------
710
711  // Arithmetic.
712  void addu(Register rd, Register rs, Register rt);
713  void subu(Register rd, Register rs, Register rt);
714  void mult(Register rs, Register rt);
715  void multu(Register rs, Register rt);
716  void div(Register rs, Register rt);
717  void divu(Register rs, Register rt);
718  void div(Register rd, Register rs, Register rt);
719  void divu(Register rd, Register rs, Register rt);
720  void mod(Register rd, Register rs, Register rt);
721  void modu(Register rd, Register rs, Register rt);
722  void mul(Register rd, Register rs, Register rt);
723  void muh(Register rd, Register rs, Register rt);
724  void mulu(Register rd, Register rs, Register rt);
725  void muhu(Register rd, Register rs, Register rt);
726
727  void addiu(Register rd, Register rs, int32_t j);
728
729  // Logical.
730  void and_(Register rd, Register rs, Register rt);
731  void or_(Register rd, Register rs, Register rt);
732  void xor_(Register rd, Register rs, Register rt);
733  void nor(Register rd, Register rs, Register rt);
734
735  void andi(Register rd, Register rs, int32_t j);
736  void ori(Register rd, Register rs, int32_t j);
737  void xori(Register rd, Register rs, int32_t j);
738  void lui(Register rd, int32_t j);
739  void aui(Register rs, Register rt, int32_t j);
740
741  // Shifts.
742  // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
743  // and may cause problems in normal code. coming_from_nop makes sure this
744  // doesn't happen.
745  void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
746  void sllv(Register rd, Register rt, Register rs);
747  void srl(Register rd, Register rt, uint16_t sa);
748  void srlv(Register rd, Register rt, Register rs);
749  void sra(Register rt, Register rd, uint16_t sa);
750  void srav(Register rt, Register rd, Register rs);
751  void rotr(Register rd, Register rt, uint16_t sa);
752  void rotrv(Register rd, Register rt, Register rs);
753
754  // Address computing instructions with shift.
755  void lsa(Register rd, Register rt, Register rs, uint8_t sa);
756
757  // ------------Memory-instructions-------------
758
759  void lb(Register rd, const MemOperand& rs);
760  void lbu(Register rd, const MemOperand& rs);
761  void lh(Register rd, const MemOperand& rs);
762  void lhu(Register rd, const MemOperand& rs);
763  void lw(Register rd, const MemOperand& rs);
764  void lwl(Register rd, const MemOperand& rs);
765  void lwr(Register rd, const MemOperand& rs);
766  void sb(Register rd, const MemOperand& rs);
767  void sh(Register rd, const MemOperand& rs);
768  void sw(Register rd, const MemOperand& rs);
769  void swl(Register rd, const MemOperand& rs);
770  void swr(Register rd, const MemOperand& rs);
771
772
773  // ---------PC-Relative-instructions-----------
774
775  void addiupc(Register rs, int32_t imm19);
776  void lwpc(Register rs, int32_t offset19);
777  void auipc(Register rs, int16_t imm16);
778  void aluipc(Register rs, int16_t imm16);
779
780
781  // ----------------Prefetch--------------------
782
783  void pref(int32_t hint, const MemOperand& rs);
784
785
786  // -------------Misc-instructions--------------
787
788  // Break / Trap instructions.
789  void break_(uint32_t code, bool break_as_stop = false);
790  void stop(const char* msg, uint32_t code = kMaxStopCode);
791  void tge(Register rs, Register rt, uint16_t code);
792  void tgeu(Register rs, Register rt, uint16_t code);
793  void tlt(Register rs, Register rt, uint16_t code);
794  void tltu(Register rs, Register rt, uint16_t code);
795  void teq(Register rs, Register rt, uint16_t code);
796  void tne(Register rs, Register rt, uint16_t code);
797
798  // Move from HI/LO register.
799  void mfhi(Register rd);
800  void mflo(Register rd);
801
802  // Set on less than.
803  void slt(Register rd, Register rs, Register rt);
804  void sltu(Register rd, Register rs, Register rt);
805  void slti(Register rd, Register rs, int32_t j);
806  void sltiu(Register rd, Register rs, int32_t j);
807
808  // Conditional move.
809  void movz(Register rd, Register rs, Register rt);
810  void movn(Register rd, Register rs, Register rt);
811  void movt(Register rd, Register rs, uint16_t cc = 0);
812  void movf(Register rd, Register rs, uint16_t cc = 0);
813
814  void sel(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
815  void sel_s(FPURegister fd, FPURegister fs, FPURegister ft);
816  void sel_d(FPURegister fd, FPURegister fs, FPURegister ft);
817  void seleqz(Register rd, Register rs, Register rt);
818  void seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs,
819              FPURegister ft);
820  void selnez(Register rd, Register rs, Register rt);
821  void selnez(SecondaryField fmt, FPURegister fd, FPURegister fs,
822              FPURegister ft);
823  void seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft);
824  void seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft);
825  void selnez_d(FPURegister fd, FPURegister fs, FPURegister ft);
826  void selnez_s(FPURegister fd, FPURegister fs, FPURegister ft);
827
828  void movz_s(FPURegister fd, FPURegister fs, Register rt);
829  void movz_d(FPURegister fd, FPURegister fs, Register rt);
830  void movt_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
831  void movt_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
832  void movf_s(FPURegister fd, FPURegister fs, uint16_t cc = 0);
833  void movf_d(FPURegister fd, FPURegister fs, uint16_t cc = 0);
834  void movn_s(FPURegister fd, FPURegister fs, Register rt);
835  void movn_d(FPURegister fd, FPURegister fs, Register rt);
836  // Bit twiddling.
837  void clz(Register rd, Register rs);
838  void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
839  void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
840  void bitswap(Register rd, Register rt);
841  void align(Register rd, Register rs, Register rt, uint8_t bp);
842
843  // --------Coprocessor-instructions----------------
844
845  // Load, store, and move.
846  void lwc1(FPURegister fd, const MemOperand& src);
847  void ldc1(FPURegister fd, const MemOperand& src);
848
849  void swc1(FPURegister fs, const MemOperand& dst);
850  void sdc1(FPURegister fs, const MemOperand& dst);
851
852  void mtc1(Register rt, FPURegister fs);
853  void mthc1(Register rt, FPURegister fs);
854
855  void mfc1(Register rt, FPURegister fs);
856  void mfhc1(Register rt, FPURegister fs);
857
858  void ctc1(Register rt, FPUControlRegister fs);
859  void cfc1(Register rt, FPUControlRegister fs);
860
861  // Arithmetic.
862  void add_s(FPURegister fd, FPURegister fs, FPURegister ft);
863  void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
864  void sub_s(FPURegister fd, FPURegister fs, FPURegister ft);
865  void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
866  void mul_s(FPURegister fd, FPURegister fs, FPURegister ft);
867  void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
868  void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
869  void div_s(FPURegister fd, FPURegister fs, FPURegister ft);
870  void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
871  void abs_s(FPURegister fd, FPURegister fs);
872  void abs_d(FPURegister fd, FPURegister fs);
873  void mov_d(FPURegister fd, FPURegister fs);
874  void mov_s(FPURegister fd, FPURegister fs);
875  void neg_s(FPURegister fd, FPURegister fs);
876  void neg_d(FPURegister fd, FPURegister fs);
877  void sqrt_s(FPURegister fd, FPURegister fs);
878  void sqrt_d(FPURegister fd, FPURegister fs);
879  void rsqrt_s(FPURegister fd, FPURegister fs);
880  void rsqrt_d(FPURegister fd, FPURegister fs);
881  void recip_d(FPURegister fd, FPURegister fs);
882  void recip_s(FPURegister fd, FPURegister fs);
883
884  // Conversion.
885  void cvt_w_s(FPURegister fd, FPURegister fs);
886  void cvt_w_d(FPURegister fd, FPURegister fs);
887  void trunc_w_s(FPURegister fd, FPURegister fs);
888  void trunc_w_d(FPURegister fd, FPURegister fs);
889  void round_w_s(FPURegister fd, FPURegister fs);
890  void round_w_d(FPURegister fd, FPURegister fs);
891  void floor_w_s(FPURegister fd, FPURegister fs);
892  void floor_w_d(FPURegister fd, FPURegister fs);
893  void ceil_w_s(FPURegister fd, FPURegister fs);
894  void ceil_w_d(FPURegister fd, FPURegister fs);
895  void rint_s(FPURegister fd, FPURegister fs);
896  void rint_d(FPURegister fd, FPURegister fs);
897  void rint(SecondaryField fmt, FPURegister fd, FPURegister fs);
898
899  void cvt_l_s(FPURegister fd, FPURegister fs);
900  void cvt_l_d(FPURegister fd, FPURegister fs);
901  void trunc_l_s(FPURegister fd, FPURegister fs);
902  void trunc_l_d(FPURegister fd, FPURegister fs);
903  void round_l_s(FPURegister fd, FPURegister fs);
904  void round_l_d(FPURegister fd, FPURegister fs);
905  void floor_l_s(FPURegister fd, FPURegister fs);
906  void floor_l_d(FPURegister fd, FPURegister fs);
907  void ceil_l_s(FPURegister fd, FPURegister fs);
908  void ceil_l_d(FPURegister fd, FPURegister fs);
909
910  void class_s(FPURegister fd, FPURegister fs);
911  void class_d(FPURegister fd, FPURegister fs);
912
913  void min(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
914  void mina(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
915  void max(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
916  void maxa(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft);
917  void min_s(FPURegister fd, FPURegister fs, FPURegister ft);
918  void min_d(FPURegister fd, FPURegister fs, FPURegister ft);
919  void max_s(FPURegister fd, FPURegister fs, FPURegister ft);
920  void max_d(FPURegister fd, FPURegister fs, FPURegister ft);
921  void mina_s(FPURegister fd, FPURegister fs, FPURegister ft);
922  void mina_d(FPURegister fd, FPURegister fs, FPURegister ft);
923  void maxa_s(FPURegister fd, FPURegister fs, FPURegister ft);
924  void maxa_d(FPURegister fd, FPURegister fs, FPURegister ft);
925
926  void cvt_s_w(FPURegister fd, FPURegister fs);
927  void cvt_s_l(FPURegister fd, FPURegister fs);
928  void cvt_s_d(FPURegister fd, FPURegister fs);
929
930  void cvt_d_w(FPURegister fd, FPURegister fs);
931  void cvt_d_l(FPURegister fd, FPURegister fs);
932  void cvt_d_s(FPURegister fd, FPURegister fs);
933
934  // Conditions and branches for MIPSr6.
935  void cmp(FPUCondition cond, SecondaryField fmt,
936         FPURegister fd, FPURegister ft, FPURegister fs);
937  void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
938  void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft);
939
940  void bc1eqz(int16_t offset, FPURegister ft);
941  inline void bc1eqz(Label* L, FPURegister ft) {
942    bc1eqz(shifted_branch_offset(L), ft);
943  }
944  void bc1nez(int16_t offset, FPURegister ft);
945  inline void bc1nez(Label* L, FPURegister ft) {
946    bc1nez(shifted_branch_offset(L), ft);
947  }
948
949  // Conditions and branches for non MIPSr6.
950  void c(FPUCondition cond, SecondaryField fmt,
951         FPURegister ft, FPURegister fs, uint16_t cc = 0);
952  void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
953  void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0);
954
955  void bc1f(int16_t offset, uint16_t cc = 0);
956  inline void bc1f(Label* L, uint16_t cc = 0) {
957    bc1f(shifted_branch_offset(L), cc);
958  }
959  void bc1t(int16_t offset, uint16_t cc = 0);
960  inline void bc1t(Label* L, uint16_t cc = 0) {
961    bc1t(shifted_branch_offset(L), cc);
962  }
963  void fcmp(FPURegister src1, const double src2, FPUCondition cond);
964
965  // Check the code size generated from label to here.
966  int SizeOfCodeGeneratedSince(Label* label) {
967    return pc_offset() - label->pos();
968  }
969
970  // Check the number of instructions generated from label to here.
971  int InstructionsGeneratedSince(Label* label) {
972    return SizeOfCodeGeneratedSince(label) / kInstrSize;
973  }
974
975  // Class for scoping postponing the trampoline pool generation.
976  class BlockTrampolinePoolScope {
977   public:
978    explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
979      assem_->StartBlockTrampolinePool();
980    }
981    ~BlockTrampolinePoolScope() {
982      assem_->EndBlockTrampolinePool();
983    }
984
985   private:
986    Assembler* assem_;
987
988    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
989  };
990
991  // Class for postponing the assembly buffer growth. Typically used for
992  // sequences of instructions that must be emitted as a unit, before
993  // buffer growth (and relocation) can occur.
994  // This blocking scope is not nestable.
995  class BlockGrowBufferScope {
996   public:
997    explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
998      assem_->StartBlockGrowBuffer();
999    }
1000    ~BlockGrowBufferScope() {
1001      assem_->EndBlockGrowBuffer();
1002    }
1003
1004   private:
1005    Assembler* assem_;
1006
1007    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
1008  };
1009
1010  // Debugging.
1011
1012  // Mark generator continuation.
1013  void RecordGeneratorContinuation();
1014
1015  // Mark address of a debug break slot.
1016  void RecordDebugBreakSlot(RelocInfo::Mode mode);
1017
1018  // Record the AST id of the CallIC being compiled, so that it can be placed
1019  // in the relocation information.
1020  void SetRecordedAstId(TypeFeedbackId ast_id) {
1021    DCHECK(recorded_ast_id_.IsNone());
1022    recorded_ast_id_ = ast_id;
1023  }
1024
1025  TypeFeedbackId RecordedAstId() {
1026    DCHECK(!recorded_ast_id_.IsNone());
1027    return recorded_ast_id_;
1028  }
1029
1030  void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
1031
1032  // Record a comment relocation entry that can be used by a disassembler.
1033  // Use --code-comments to enable.
1034  void RecordComment(const char* msg);
1035
1036  // Record a deoptimization reason that can be used by a log or cpu profiler.
1037  // Use --trace-deopt to enable.
1038  void RecordDeoptReason(const int reason, const SourcePosition position);
1039
1040
1041  static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
1042                                       intptr_t pc_delta);
1043
1044  // Writes a single byte or word of data in the code stream.  Used for
1045  // inline tables, e.g., jump-tables.
1046  void db(uint8_t data);
1047  void dd(uint32_t data);
1048  void dq(uint64_t data);
1049  void dp(uintptr_t data) { dd(data); }
1050  void dd(Label* label);
1051
1052  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
1053
1054  // Postpone the generation of the trampoline pool for the specified number of
1055  // instructions.
1056  void BlockTrampolinePoolFor(int instructions);
1057
1058  // Check if there is less than kGap bytes available in the buffer.
1059  // If this is the case, we need to grow the buffer before emitting
1060  // an instruction or relocation information.
1061  inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
1062
1063  // Get the number of bytes available in the buffer.
1064  inline int available_space() const { return reloc_info_writer.pos() - pc_; }
1065
1066  // Read/patch instructions.
1067  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
1068  static void instr_at_put(byte* pc, Instr instr) {
1069    *reinterpret_cast<Instr*>(pc) = instr;
1070  }
1071  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
1072  void instr_at_put(int pos, Instr instr) {
1073    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
1074  }
1075
1076  // Check if an instruction is a branch of some kind.
1077  static bool IsBranch(Instr instr);
1078  static bool IsBc(Instr instr);
1079  static bool IsBzc(Instr instr);
1080  static bool IsBeq(Instr instr);
1081  static bool IsBne(Instr instr);
1082  static bool IsBeqzc(Instr instr);
1083  static bool IsBnezc(Instr instr);
1084  static bool IsBeqc(Instr instr);
1085  static bool IsBnec(Instr instr);
1086
1087  static bool IsJump(Instr instr);
1088  static bool IsJ(Instr instr);
1089  static bool IsLui(Instr instr);
1090  static bool IsOri(Instr instr);
1091
1092  static bool IsJal(Instr instr);
1093  static bool IsJr(Instr instr);
1094  static bool IsJalr(Instr instr);
1095
1096  static bool IsNop(Instr instr, unsigned int type);
1097  static bool IsPop(Instr instr);
1098  static bool IsPush(Instr instr);
1099  static bool IsLwRegFpOffset(Instr instr);
1100  static bool IsSwRegFpOffset(Instr instr);
1101  static bool IsLwRegFpNegOffset(Instr instr);
1102  static bool IsSwRegFpNegOffset(Instr instr);
1103
1104  static Register GetRtReg(Instr instr);
1105  static Register GetRsReg(Instr instr);
1106  static Register GetRdReg(Instr instr);
1107
1108  static uint32_t GetRt(Instr instr);
1109  static uint32_t GetRtField(Instr instr);
1110  static uint32_t GetRs(Instr instr);
1111  static uint32_t GetRsField(Instr instr);
1112  static uint32_t GetRd(Instr instr);
1113  static uint32_t GetRdField(Instr instr);
1114  static uint32_t GetSa(Instr instr);
1115  static uint32_t GetSaField(Instr instr);
1116  static uint32_t GetOpcodeField(Instr instr);
1117  static uint32_t GetFunction(Instr instr);
1118  static uint32_t GetFunctionField(Instr instr);
1119  static uint32_t GetImmediate16(Instr instr);
1120  static uint32_t GetLabelConst(Instr instr);
1121
1122  static int32_t GetBranchOffset(Instr instr);
1123  static bool IsLw(Instr instr);
1124  static int16_t GetLwOffset(Instr instr);
1125  static Instr SetLwOffset(Instr instr, int16_t offset);
1126
1127  static bool IsSw(Instr instr);
1128  static Instr SetSwOffset(Instr instr, int16_t offset);
1129  static bool IsAddImmediate(Instr instr);
1130  static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
1131
1132  static bool IsAndImmediate(Instr instr);
1133  static bool IsEmittedConstant(Instr instr);
1134
1135  void CheckTrampolinePool();
1136
1137  void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
1138                                          ConstantPoolEntry::Access access,
1139                                          ConstantPoolEntry::Type type) {
1140    // No embedded constant pool support.
1141    UNREACHABLE();
1142  }
1143
1144  bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
1145
1146 protected:
1147  // Relocation for a type-recording IC has the AST id added to it.  This
1148  // member variable is a way to pass the information from the call site to
1149  // the relocation info.
1150  TypeFeedbackId recorded_ast_id_;
1151
1152  int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
1153
1154  // Decode branch instruction at pos and return branch target pos.
1155  int target_at(int pos, bool is_internal);
1156
1157  // Patch branch instruction at pos to branch to given branch target pos.
1158  void target_at_put(int pos, int target_pos, bool is_internal);
1159
1160  // Say if we need to relocate with this mode.
1161  bool MustUseReg(RelocInfo::Mode rmode);
1162
1163  // Record reloc info for current pc_.
1164  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
1165
1166  // Block the emission of the trampoline pool before pc_offset.
1167  void BlockTrampolinePoolBefore(int pc_offset) {
1168    if (no_trampoline_pool_before_ < pc_offset)
1169      no_trampoline_pool_before_ = pc_offset;
1170  }
1171
1172  void StartBlockTrampolinePool() {
1173    trampoline_pool_blocked_nesting_++;
1174  }
1175
1176  void EndBlockTrampolinePool() {
1177    trampoline_pool_blocked_nesting_--;
1178  }
1179
1180  bool is_trampoline_pool_blocked() const {
1181    return trampoline_pool_blocked_nesting_ > 0;
1182  }
1183
1184  bool has_exception() const {
1185    return internal_trampoline_exception_;
1186  }
1187
1188  void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);
1189
1190  bool is_trampoline_emitted() const {
1191    return trampoline_emitted_;
1192  }
1193
1194  // Temporarily block automatic assembly buffer growth.
1195  void StartBlockGrowBuffer() {
1196    DCHECK(!block_buffer_growth_);
1197    block_buffer_growth_ = true;
1198  }
1199
1200  void EndBlockGrowBuffer() {
1201    DCHECK(block_buffer_growth_);
1202    block_buffer_growth_ = false;
1203  }
1204
1205  bool is_buffer_growth_blocked() const {
1206    return block_buffer_growth_;
1207  }
1208
1209  inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
1210
1211 private:
1212  inline static void set_target_internal_reference_encoded_at(Address pc,
1213                                                              Address target);
1214
1215  // Buffer size and constant pool distance are checked together at regular
1216  // intervals of kBufferCheckInterval emitted bytes.
1217  static const int kBufferCheckInterval = 1*KB/2;
1218
1219  // Code generation.
1220  // The relocation writer's position is at least kGap bytes below the end of
1221  // the generated instructions. This is so that multi-instruction sequences do
1222  // not have to check for overflow. The same is true for writes of large
1223  // relocation info entries.
1224  static const int kGap = 32;
1225
1226
1227  // Repeated checking whether the trampoline pool should be emitted is rather
1228  // expensive. By default we only check again once a number of instructions
1229  // has been generated.
1230  static const int kCheckConstIntervalInst = 32;
1231  static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
1232
1233  int next_buffer_check_;  // pc offset of next buffer check.
1234
1235  // Emission of the trampoline pool may be blocked in some code sequences.
1236  int trampoline_pool_blocked_nesting_;  // Block emission if this is not zero.
1237  int no_trampoline_pool_before_;  // Block emission before this pc offset.
1238
1239  // Keep track of the last emitted pool to guarantee a maximal distance.
1240  int last_trampoline_pool_end_;  // pc offset of the end of the last pool.
1241
1242  // Automatic growth of the assembly buffer may be blocked for some sequences.
1243  bool block_buffer_growth_;  // Block growth when true.
1244
1245  // Relocation information generation.
1246  // Each relocation is encoded as a variable size value.
1247  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
1248  RelocInfoWriter reloc_info_writer;
1249
1250  // The bound position, before this we cannot do instruction elimination.
1251  int last_bound_pos_;
1252
1253  // Readable constants for compact branch handling in emit()
1254  enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true };
1255
1256  // Code emission.
1257  inline void CheckBuffer();
1258  void GrowBuffer();
1259  inline void emit(Instr x,
1260                   CompactBranchType is_compact_branch = CompactBranchType::NO);
1261  inline void emit(uint64_t x);
1262  inline void CheckForEmitInForbiddenSlot();
1263  template <typename T>
1264  inline void EmitHelper(T x);
1265  inline void EmitHelper(Instr x, CompactBranchType is_compact_branch);
1266
1267  // Instruction generation.
1268  // We have 3 different kind of encoding layout on MIPS.
1269  // However due to many different types of objects encoded in the same fields
1270  // we have quite a few aliases for each mode.
1271  // Using the same structure to refer to Register and FPURegister would spare a
1272  // few aliases, but mixing both does not look clean to me.
1273  // Anyway we could surely implement this differently.
1274
1275  void GenInstrRegister(Opcode opcode,
1276                        Register rs,
1277                        Register rt,
1278                        Register rd,
1279                        uint16_t sa = 0,
1280                        SecondaryField func = NULLSF);
1281
1282  void GenInstrRegister(Opcode opcode,
1283                        Register rs,
1284                        Register rt,
1285                        uint16_t msb,
1286                        uint16_t lsb,
1287                        SecondaryField func);
1288
1289  void GenInstrRegister(Opcode opcode,
1290                        SecondaryField fmt,
1291                        FPURegister ft,
1292                        FPURegister fs,
1293                        FPURegister fd,
1294                        SecondaryField func = NULLSF);
1295
1296  void GenInstrRegister(Opcode opcode,
1297                        FPURegister fr,
1298                        FPURegister ft,
1299                        FPURegister fs,
1300                        FPURegister fd,
1301                        SecondaryField func = NULLSF);
1302
1303  void GenInstrRegister(Opcode opcode,
1304                        SecondaryField fmt,
1305                        Register rt,
1306                        FPURegister fs,
1307                        FPURegister fd,
1308                        SecondaryField func = NULLSF);
1309
1310  void GenInstrRegister(Opcode opcode,
1311                        SecondaryField fmt,
1312                        Register rt,
1313                        FPUControlRegister fs,
1314                        SecondaryField func = NULLSF);
1315
1316  void GenInstrImmediate(
1317      Opcode opcode, Register rs, Register rt, int32_t j,
1318      CompactBranchType is_compact_branch = CompactBranchType::NO);
1319  void GenInstrImmediate(
1320      Opcode opcode, Register rs, SecondaryField SF, int32_t j,
1321      CompactBranchType is_compact_branch = CompactBranchType::NO);
1322  void GenInstrImmediate(
1323      Opcode opcode, Register r1, FPURegister r2, int32_t j,
1324      CompactBranchType is_compact_branch = CompactBranchType::NO);
1325  void GenInstrImmediate(
1326      Opcode opcode, Register rs, int32_t offset21,
1327      CompactBranchType is_compact_branch = CompactBranchType::NO);
1328  void GenInstrImmediate(Opcode opcode, Register rs, uint32_t offset21);
1329  void GenInstrImmediate(
1330      Opcode opcode, int32_t offset26,
1331      CompactBranchType is_compact_branch = CompactBranchType::NO);
1332
1333
1334  void GenInstrJump(Opcode opcode,
1335                     uint32_t address);
1336
1337  // Helpers.
1338  void LoadRegPlusOffsetToAt(const MemOperand& src);
1339
1340  // Labels.
1341  void print(Label* L);
1342  void bind_to(Label* L, int pos);
1343  void next(Label* L, bool is_internal);
1344
1345  // One trampoline consists of:
1346  // - space for trampoline slots,
1347  // - space for labels.
1348  //
1349  // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
1350  // Space for trampoline slots preceeds space for labels. Each label is of one
1351  // instruction size, so total amount for labels is equal to
1352  // label_count *  kInstrSize.
1353  class Trampoline {
1354   public:
1355    Trampoline() {
1356      start_ = 0;
1357      next_slot_ = 0;
1358      free_slot_count_ = 0;
1359      end_ = 0;
1360    }
1361    Trampoline(int start, int slot_count) {
1362      start_ = start;
1363      next_slot_ = start;
1364      free_slot_count_ = slot_count;
1365      end_ = start + slot_count * kTrampolineSlotsSize;
1366    }
1367    int start() {
1368      return start_;
1369    }
1370    int end() {
1371      return end_;
1372    }
1373    int take_slot() {
1374      int trampoline_slot = kInvalidSlotPos;
1375      if (free_slot_count_ <= 0) {
1376        // We have run out of space on trampolines.
1377        // Make sure we fail in debug mode, so we become aware of each case
1378        // when this happens.
1379        DCHECK(0);
1380        // Internal exception will be caught.
1381      } else {
1382        trampoline_slot = next_slot_;
1383        free_slot_count_--;
1384        next_slot_ += kTrampolineSlotsSize;
1385      }
1386      return trampoline_slot;
1387    }
1388
1389   private:
1390    int start_;
1391    int end_;
1392    int next_slot_;
1393    int free_slot_count_;
1394  };
1395
1396  int32_t get_trampoline_entry(int32_t pos);
1397  int unbound_labels_count_;
1398  // If trampoline is emitted, generated code is becoming large. As this is
1399  // already a slow case which can possibly break our code generation for the
1400  // extreme case, we use this information to trigger different mode of
1401  // branch instruction generation, where we use jump instructions rather
1402  // than regular branch instructions.
1403  bool trampoline_emitted_;
1404  static const int kTrampolineSlotsSize = 4 * kInstrSize;
1405  static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
1406  static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
1407  static const int kInvalidSlotPos = -1;
1408
1409  // Internal reference positions, required for unbounded internal reference
1410  // labels.
1411  std::set<int> internal_reference_positions_;
1412
1413  void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
1414  void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
1415  bool prev_instr_compact_branch_ = false;
1416
1417  Trampoline trampoline_;
1418  bool internal_trampoline_exception_;
1419
1420  friend class RegExpMacroAssemblerMIPS;
1421  friend class RelocInfo;
1422  friend class CodePatcher;
1423  friend class BlockTrampolinePoolScope;
1424
1425  PositionsRecorder positions_recorder_;
1426  friend class PositionsRecorder;
1427  friend class EnsureSpace;
1428};
1429
1430
1431class EnsureSpace BASE_EMBEDDED {
1432 public:
1433  explicit EnsureSpace(Assembler* assembler) {
1434    assembler->CheckBuffer();
1435  }
1436};
1437
1438}  // namespace internal
1439}  // namespace v8
1440
1441#endif  // V8_ARM_ASSEMBLER_MIPS_H_
1442