assembler-mips.cc revision 69a99ed0b2b2ef69d393c371b03db3a98aaf880e
1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
33// Copyright 2011 the V8 project authors. All rights reserved.
34
35
36#include "v8.h"
37
38#if defined(V8_TARGET_ARCH_MIPS)
39
40#include "mips/assembler-mips-inl.h"
41#include "serialize.h"
42
43namespace v8 {
44namespace internal {
45
46#ifdef DEBUG
47bool CpuFeatures::initialized_ = false;
48#endif
49unsigned CpuFeatures::supported_ = 0;
50unsigned CpuFeatures::found_by_runtime_probing_ = 0;
51
52void CpuFeatures::Probe() {
53  ASSERT(!initialized_);
54#ifdef DEBUG
55  initialized_ = true;
56#endif
57  // If the compiler is allowed to use fpu then we can use fpu too in our
58  // code generation.
59#if !defined(__mips__)
60  // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
61  if (FLAG_enable_fpu) {
62      supported_ |= 1u << FPU;
63  }
64#else
65  if (Serializer::enabled()) {
66    supported_ |= OS::CpuFeaturesImpliedByPlatform();
67    return;  // No features if we might serialize.
68  }
69
70  if (OS::MipsCpuHasFeature(FPU)) {
71    // This implementation also sets the FPU flags if
72    // runtime detection of FPU returns true.
73    supported_ |= 1u << FPU;
74    found_by_runtime_probing_ |= 1u << FPU;
75  }
76#endif
77}
78
79
80int ToNumber(Register reg) {
81  ASSERT(reg.is_valid());
82  const int kNumbers[] = {
83    0,    // zero_reg
84    1,    // at
85    2,    // v0
86    3,    // v1
87    4,    // a0
88    5,    // a1
89    6,    // a2
90    7,    // a3
91    8,    // t0
92    9,    // t1
93    10,   // t2
94    11,   // t3
95    12,   // t4
96    13,   // t5
97    14,   // t6
98    15,   // t7
99    16,   // s0
100    17,   // s1
101    18,   // s2
102    19,   // s3
103    20,   // s4
104    21,   // s5
105    22,   // s6
106    23,   // s7
107    24,   // t8
108    25,   // t9
109    26,   // k0
110    27,   // k1
111    28,   // gp
112    29,   // sp
113    30,   // s8_fp
114    31,   // ra
115  };
116  return kNumbers[reg.code()];
117}
118
119
120Register ToRegister(int num) {
121  ASSERT(num >= 0 && num < kNumRegisters);
122  const Register kRegisters[] = {
123    zero_reg,
124    at,
125    v0, v1,
126    a0, a1, a2, a3,
127    t0, t1, t2, t3, t4, t5, t6, t7,
128    s0, s1, s2, s3, s4, s5, s6, s7,
129    t8, t9,
130    k0, k1,
131    gp,
132    sp,
133    s8_fp,
134    ra
135  };
136  return kRegisters[num];
137}
138
139
140// -----------------------------------------------------------------------------
141// Implementation of RelocInfo.
142
143const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
144
145
146bool RelocInfo::IsCodedSpecially() {
147  // The deserializer needs to know whether a pointer is specially coded.  Being
148  // specially coded on MIPS means that it is a lui/ori instruction, and that is
149  // always the case inside code objects.
150  return true;
151}
152
153
154// Patch the code at the current address with the supplied instructions.
155void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
156  Instr* pc = reinterpret_cast<Instr*>(pc_);
157  Instr* instr = reinterpret_cast<Instr*>(instructions);
158  for (int i = 0; i < instruction_count; i++) {
159    *(pc + i) = *(instr + i);
160  }
161
162  // Indicate that code has changed.
163  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
164}
165
166
167// Patch the code at the current PC with a call to the target address.
168// Additional guard instructions can be added if required.
169void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
170  // Patch the code at the current address with a call to the target.
171  UNIMPLEMENTED_MIPS();
172}
173
174
175// -----------------------------------------------------------------------------
176// Implementation of Operand and MemOperand.
177// See assembler-mips-inl.h for inlined constructors.
178
179Operand::Operand(Handle<Object> handle) {
180  rm_ = no_reg;
181  // Verify all Objects referred by code are NOT in new space.
182  Object* obj = *handle;
183  ASSERT(!HEAP->InNewSpace(obj));
184  if (obj->IsHeapObject()) {
185    imm32_ = reinterpret_cast<intptr_t>(handle.location());
186    rmode_ = RelocInfo::EMBEDDED_OBJECT;
187  } else {
188    // No relocation needed.
189    imm32_ = reinterpret_cast<intptr_t>(obj);
190    rmode_ = RelocInfo::NONE;
191  }
192}
193
194
195MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
196  offset_ = offset;
197}
198
199
200// -----------------------------------------------------------------------------
201// Specific instructions, constants, and masks.
202
203static const int kNegOffset = 0x00008000;
204// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
205// operations as post-increment of sp.
206const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift)
207      | (sp.code() << kRtShift) | (kPointerSize & kImm16Mask);
208// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
209const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift)
210      | (sp.code() << kRtShift) | (-kPointerSize & kImm16Mask);
211// sw(r, MemOperand(sp, 0))
212const Instr kPushRegPattern = SW | (sp.code() << kRsShift)
213      |  (0 & kImm16Mask);
214//  lw(r, MemOperand(sp, 0))
215const Instr kPopRegPattern = LW | (sp.code() << kRsShift)
216      |  (0 & kImm16Mask);
217
218const Instr kLwRegFpOffsetPattern = LW | (s8_fp.code() << kRsShift)
219      |  (0 & kImm16Mask);
220
221const Instr kSwRegFpOffsetPattern = SW | (s8_fp.code() << kRsShift)
222      |  (0 & kImm16Mask);
223
224const Instr kLwRegFpNegOffsetPattern = LW | (s8_fp.code() << kRsShift)
225      |  (kNegOffset & kImm16Mask);
226
227const Instr kSwRegFpNegOffsetPattern = SW | (s8_fp.code() << kRsShift)
228      |  (kNegOffset & kImm16Mask);
229// A mask for the Rt register for push, pop, lw, sw instructions.
230const Instr kRtMask = kRtFieldMask;
231const Instr kLwSwInstrTypeMask = 0xffe00000;
232const Instr kLwSwInstrArgumentMask  = ~kLwSwInstrTypeMask;
233const Instr kLwSwOffsetMask = kImm16Mask;
234
235
236// Spare buffer.
237static const int kMinimalBufferSize = 4 * KB;
238
239
240Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
241    : AssemblerBase(arg_isolate),
242      positions_recorder_(this),
243      emit_debug_code_(FLAG_debug_code) {
244  if (buffer == NULL) {
245    // Do our own buffer management.
246    if (buffer_size <= kMinimalBufferSize) {
247      buffer_size = kMinimalBufferSize;
248
249      if (isolate()->assembler_spare_buffer() != NULL) {
250        buffer = isolate()->assembler_spare_buffer();
251        isolate()->set_assembler_spare_buffer(NULL);
252      }
253    }
254    if (buffer == NULL) {
255      buffer_ = NewArray<byte>(buffer_size);
256    } else {
257      buffer_ = static_cast<byte*>(buffer);
258    }
259    buffer_size_ = buffer_size;
260    own_buffer_ = true;
261
262  } else {
263    // Use externally provided buffer instead.
264    ASSERT(buffer_size > 0);
265    buffer_ = static_cast<byte*>(buffer);
266    buffer_size_ = buffer_size;
267    own_buffer_ = false;
268  }
269
270  // Setup buffer pointers.
271  ASSERT(buffer_ != NULL);
272  pc_ = buffer_;
273  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
274
275  last_trampoline_pool_end_ = 0;
276  no_trampoline_pool_before_ = 0;
277  trampoline_pool_blocked_nesting_ = 0;
278  // We leave space (16 * kTrampolineSlotsSize)
279  // for BlockTrampolinePoolScope buffer.
280  next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
281  internal_trampoline_exception_ = false;
282  last_bound_pos_ = 0;
283
284  trampoline_emitted_ = false;
285  unbound_labels_count_ = 0;
286  block_buffer_growth_ = false;
287
288  ClearRecordedAstId();
289}
290
291
292Assembler::~Assembler() {
293  if (own_buffer_) {
294    if (isolate()->assembler_spare_buffer() == NULL &&
295        buffer_size_ == kMinimalBufferSize) {
296      isolate()->set_assembler_spare_buffer(buffer_);
297    } else {
298      DeleteArray(buffer_);
299    }
300  }
301}
302
303
304void Assembler::GetCode(CodeDesc* desc) {
305  ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
306  // Setup code descriptor.
307  desc->buffer = buffer_;
308  desc->buffer_size = buffer_size_;
309  desc->instr_size = pc_offset();
310  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
311}
312
313
314void Assembler::Align(int m) {
315  ASSERT(m >= 4 && IsPowerOf2(m));
316  while ((pc_offset() & (m - 1)) != 0) {
317    nop();
318  }
319}
320
321
322void Assembler::CodeTargetAlign() {
323  // No advantage to aligning branch/call targets to more than
324  // single instruction, that I am aware of.
325  Align(4);
326}
327
328
329Register Assembler::GetRtReg(Instr instr) {
330  Register rt;
331  rt.code_ = (instr & kRtFieldMask) >> kRtShift;
332  return rt;
333}
334
335
336Register Assembler::GetRsReg(Instr instr) {
337  Register rs;
338  rs.code_ = (instr & kRsFieldMask) >> kRsShift;
339  return rs;
340}
341
342
343Register Assembler::GetRdReg(Instr instr) {
344  Register rd;
345  rd.code_ = (instr & kRdFieldMask) >> kRdShift;
346  return rd;
347}
348
349
350uint32_t Assembler::GetRt(Instr instr) {
351  return (instr & kRtFieldMask) >> kRtShift;
352}
353
354
355uint32_t Assembler::GetRtField(Instr instr) {
356  return instr & kRtFieldMask;
357}
358
359
360uint32_t Assembler::GetRs(Instr instr) {
361  return (instr & kRsFieldMask) >> kRsShift;
362}
363
364
365uint32_t Assembler::GetRsField(Instr instr) {
366  return instr & kRsFieldMask;
367}
368
369
370uint32_t Assembler::GetRd(Instr instr) {
371  return  (instr & kRdFieldMask) >> kRdShift;
372}
373
374
375uint32_t Assembler::GetRdField(Instr instr) {
376  return  instr & kRdFieldMask;
377}
378
379
380uint32_t Assembler::GetSa(Instr instr) {
381  return (instr & kSaFieldMask) >> kSaShift;
382}
383
384
385uint32_t Assembler::GetSaField(Instr instr) {
386  return instr & kSaFieldMask;
387}
388
389
390uint32_t Assembler::GetOpcodeField(Instr instr) {
391  return instr & kOpcodeMask;
392}
393
394
395uint32_t Assembler::GetFunction(Instr instr) {
396  return (instr & kFunctionFieldMask) >> kFunctionShift;
397}
398
399
400uint32_t Assembler::GetFunctionField(Instr instr) {
401  return instr & kFunctionFieldMask;
402}
403
404
405uint32_t Assembler::GetImmediate16(Instr instr) {
406  return instr & kImm16Mask;
407}
408
409
410uint32_t Assembler::GetLabelConst(Instr instr) {
411  return instr & ~kImm16Mask;
412}
413
414
415bool Assembler::IsPop(Instr instr) {
416  return (instr & ~kRtMask) == kPopRegPattern;
417}
418
419
420bool Assembler::IsPush(Instr instr) {
421  return (instr & ~kRtMask) == kPushRegPattern;
422}
423
424
425bool Assembler::IsSwRegFpOffset(Instr instr) {
426  return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
427}
428
429
430bool Assembler::IsLwRegFpOffset(Instr instr) {
431  return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
432}
433
434
435bool Assembler::IsSwRegFpNegOffset(Instr instr) {
436  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
437          kSwRegFpNegOffsetPattern);
438}
439
440
441bool Assembler::IsLwRegFpNegOffset(Instr instr) {
442  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
443          kLwRegFpNegOffsetPattern);
444}
445
446
447// Labels refer to positions in the (to be) generated code.
448// There are bound, linked, and unused labels.
449//
450// Bound labels refer to known positions in the already
451// generated code. pos() is the position the label refers to.
452//
453// Linked labels refer to unknown positions in the code
454// to be generated; pos() is the position of the last
455// instruction using the label.
456
457// The link chain is terminated by a value in the instruction of -1,
458// which is an otherwise illegal value (branch -1 is inf loop).
459// The instruction 16-bit offset field addresses 32-bit words, but in
460// code is conv to an 18-bit value addressing bytes, hence the -4 value.
461
462const int kEndOfChain = -4;
463// Determines the end of the Jump chain (a subset of the label link chain).
464const int kEndOfJumpChain = 0;
465
466
467bool Assembler::IsBranch(Instr instr) {
468  uint32_t opcode   = GetOpcodeField(instr);
469  uint32_t rt_field = GetRtField(instr);
470  uint32_t rs_field = GetRsField(instr);
471  uint32_t label_constant = GetLabelConst(instr);
472  // Checks if the instruction is a branch.
473  return opcode == BEQ ||
474      opcode == BNE ||
475      opcode == BLEZ ||
476      opcode == BGTZ ||
477      opcode == BEQL ||
478      opcode == BNEL ||
479      opcode == BLEZL ||
480      opcode == BGTZL ||
481      (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
482                            rt_field == BLTZAL || rt_field == BGEZAL)) ||
483      (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
484      label_constant == 0;  // Emitted label const in reg-exp engine.
485}
486
487
488bool Assembler::IsBeq(Instr instr) {
489  return GetOpcodeField(instr) == BEQ;
490}
491
492
493bool Assembler::IsBne(Instr instr) {
494  return GetOpcodeField(instr) == BNE;
495}
496
497
498bool Assembler::IsJump(Instr instr) {
499  uint32_t opcode   = GetOpcodeField(instr);
500  uint32_t rt_field = GetRtField(instr);
501  uint32_t rd_field = GetRdField(instr);
502  uint32_t function_field = GetFunctionField(instr);
503  // Checks if the instruction is a jump.
504  return opcode == J || opcode == JAL ||
505      (opcode == SPECIAL && rt_field == 0 &&
506      ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
507}
508
509
510bool Assembler::IsJ(Instr instr) {
511  uint32_t opcode = GetOpcodeField(instr);
512  // Checks if the instruction is a jump.
513  return opcode == J;
514}
515
516
517bool Assembler::IsLui(Instr instr) {
518  uint32_t opcode = GetOpcodeField(instr);
519  // Checks if the instruction is a load upper immediate.
520  return opcode == LUI;
521}
522
523
524bool Assembler::IsOri(Instr instr) {
525  uint32_t opcode = GetOpcodeField(instr);
526  // Checks if the instruction is a load upper immediate.
527  return opcode == ORI;
528}
529
530
531bool Assembler::IsNop(Instr instr, unsigned int type) {
532  // See Assembler::nop(type).
533  ASSERT(type < 32);
534  uint32_t opcode = GetOpcodeField(instr);
535  uint32_t rt = GetRt(instr);
536  uint32_t rs = GetRs(instr);
537  uint32_t sa = GetSa(instr);
538
539  // nop(type) == sll(zero_reg, zero_reg, type);
540  // Technically all these values will be 0 but
541  // this makes more sense to the reader.
542
543  bool ret = (opcode == SLL &&
544              rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
545              rs == static_cast<uint32_t>(ToNumber(zero_reg)) &&
546              sa == type);
547
548  return ret;
549}
550
551
552int32_t Assembler::GetBranchOffset(Instr instr) {
553  ASSERT(IsBranch(instr));
554  return ((int16_t)(instr & kImm16Mask)) << 2;
555}
556
557
558bool Assembler::IsLw(Instr instr) {
559  return ((instr & kOpcodeMask) == LW);
560}
561
562
563int16_t Assembler::GetLwOffset(Instr instr) {
564  ASSERT(IsLw(instr));
565  return ((instr & kImm16Mask));
566}
567
568
569Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
570  ASSERT(IsLw(instr));
571
572  // We actually create a new lw instruction based on the original one.
573  Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
574      | (offset & kImm16Mask);
575
576  return temp_instr;
577}
578
579
580bool Assembler::IsSw(Instr instr) {
581  return ((instr & kOpcodeMask) == SW);
582}
583
584
585Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
586  ASSERT(IsSw(instr));
587  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
588}
589
590
591bool Assembler::IsAddImmediate(Instr instr) {
592  return ((instr & kOpcodeMask) == ADDIU);
593}
594
595
596Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
597  ASSERT(IsAddImmediate(instr));
598  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
599}
600
601
602bool Assembler::IsAndImmediate(Instr instr) {
603  return GetOpcodeField(instr) == ANDI;
604}
605
606
607int Assembler::target_at(int32_t pos) {
608  Instr instr = instr_at(pos);
609  if ((instr & ~kImm16Mask) == 0) {
610    // Emitted label constant, not part of a branch.
611    if (instr == 0) {
612       return kEndOfChain;
613     } else {
614       int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
615       return (imm18 + pos);
616     }
617  }
618  // Check we have a branch or jump instruction.
619  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
620  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
621  // the compiler uses arithmectic shifts for signed integers.
622  if (IsBranch(instr)) {
623    int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
624
625    if (imm18 == kEndOfChain) {
626      // EndOfChain sentinel is returned directly, not relative to pc or pos.
627      return kEndOfChain;
628    } else {
629      return pos + kBranchPCOffset + imm18;
630    }
631  } else if (IsLui(instr)) {
632    Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
633    Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
634    ASSERT(IsOri(instr_ori));
635    int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
636    imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
637
638    if (imm == kEndOfJumpChain) {
639      // EndOfChain sentinel is returned directly, not relative to pc or pos.
640      return kEndOfChain;
641    } else {
642      uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
643      int32_t delta = instr_address - imm;
644      ASSERT(pos > delta);
645      return pos - delta;
646    }
647  } else {
648    int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
649    if (imm28 == kEndOfJumpChain) {
650      // EndOfChain sentinel is returned directly, not relative to pc or pos.
651      return kEndOfChain;
652    } else {
653      uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
654      instr_address &= kImm28Mask;
655      int32_t delta = instr_address - imm28;
656      ASSERT(pos > delta);
657      return pos - delta;
658    }
659  }
660}
661
662
663void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
664  Instr instr = instr_at(pos);
665  if ((instr & ~kImm16Mask) == 0) {
666    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
667    // Emitted label constant, not part of a branch.
668    // Make label relative to Code* of generated Code object.
669    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
670    return;
671  }
672
673  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
674  if (IsBranch(instr)) {
675    int32_t imm18 = target_pos - (pos + kBranchPCOffset);
676    ASSERT((imm18 & 3) == 0);
677
678    instr &= ~kImm16Mask;
679    int32_t imm16 = imm18 >> 2;
680    ASSERT(is_int16(imm16));
681
682    instr_at_put(pos, instr | (imm16 & kImm16Mask));
683  } else if (IsLui(instr)) {
684    Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
685    Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
686    ASSERT(IsOri(instr_ori));
687    uint32_t imm = (uint32_t)buffer_ + target_pos;
688    ASSERT((imm & 3) == 0);
689
690    instr_lui &= ~kImm16Mask;
691    instr_ori &= ~kImm16Mask;
692
693    instr_at_put(pos + 0 * Assembler::kInstrSize,
694                 instr_lui | ((imm & kHiMask) >> kLuiShift));
695    instr_at_put(pos + 1 * Assembler::kInstrSize,
696                 instr_ori | (imm & kImm16Mask));
697  } else {
698    uint32_t imm28 = (uint32_t)buffer_ + target_pos;
699    imm28 &= kImm28Mask;
700    ASSERT((imm28 & 3) == 0);
701
702    instr &= ~kImm26Mask;
703    uint32_t imm26 = imm28 >> 2;
704    ASSERT(is_uint26(imm26));
705
706    instr_at_put(pos, instr | (imm26 & kImm26Mask));
707  }
708}
709
710
711void Assembler::print(Label* L) {
712  if (L->is_unused()) {
713    PrintF("unused label\n");
714  } else if (L->is_bound()) {
715    PrintF("bound label to %d\n", L->pos());
716  } else if (L->is_linked()) {
717    Label l = *L;
718    PrintF("unbound label");
719    while (l.is_linked()) {
720      PrintF("@ %d ", l.pos());
721      Instr instr = instr_at(l.pos());
722      if ((instr & ~kImm16Mask) == 0) {
723        PrintF("value\n");
724      } else {
725        PrintF("%d\n", instr);
726      }
727      next(&l);
728    }
729  } else {
730    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
731  }
732}
733
734
735void Assembler::bind_to(Label* L, int pos) {
736  ASSERT(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
737  int32_t trampoline_pos = kInvalidSlotPos;
738  if (L->is_linked() && !trampoline_emitted_) {
739    unbound_labels_count_--;
740    next_buffer_check_ += kTrampolineSlotsSize;
741  }
742
743  while (L->is_linked()) {
744    int32_t fixup_pos = L->pos();
745    int32_t dist = pos - fixup_pos;
746    next(L);  // Call next before overwriting link with target at fixup_pos.
747    Instr instr = instr_at(fixup_pos);
748    if (IsBranch(instr)) {
749      if (dist > kMaxBranchOffset) {
750        if (trampoline_pos == kInvalidSlotPos) {
751          trampoline_pos = get_trampoline_entry(fixup_pos);
752          CHECK(trampoline_pos != kInvalidSlotPos);
753        }
754        ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
755        target_at_put(fixup_pos, trampoline_pos);
756        fixup_pos = trampoline_pos;
757        dist = pos - fixup_pos;
758      }
759      target_at_put(fixup_pos, pos);
760    } else {
761      ASSERT(IsJ(instr) || IsLui(instr));
762      target_at_put(fixup_pos, pos);
763    }
764  }
765  L->bind_to(pos);
766
767  // Keep track of the last bound label so we don't eliminate any instructions
768  // before a bound label.
769  if (pos > last_bound_pos_)
770    last_bound_pos_ = pos;
771}
772
773
774void Assembler::bind(Label* L) {
775  ASSERT(!L->is_bound());  // Label can only be bound once.
776  bind_to(L, pc_offset());
777}
778
779
780void Assembler::next(Label* L) {
781  ASSERT(L->is_linked());
782  int link = target_at(L->pos());
783  if (link == kEndOfChain) {
784    L->Unuse();
785  } else {
786    ASSERT(link >= 0);
787    L->link_to(link);
788  }
789}
790
791bool Assembler::is_near(Label* L) {
792  if (L->is_bound()) {
793    return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
794  }
795  return false;
796}
797
798// We have to use a temporary register for things that can be relocated even
799// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
800// space.  There is no guarantee that the relocated location can be similarly
801// encoded.
802bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
803  return rmode != RelocInfo::NONE;
804}
805
806
807void Assembler::GenInstrRegister(Opcode opcode,
808                                 Register rs,
809                                 Register rt,
810                                 Register rd,
811                                 uint16_t sa,
812                                 SecondaryField func) {
813  ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
814  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
815      | (rd.code() << kRdShift) | (sa << kSaShift) | func;
816  emit(instr);
817}
818
819
820void Assembler::GenInstrRegister(Opcode opcode,
821                                 Register rs,
822                                 Register rt,
823                                 uint16_t msb,
824                                 uint16_t lsb,
825                                 SecondaryField func) {
826  ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
827  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
828      | (msb << kRdShift) | (lsb << kSaShift) | func;
829  emit(instr);
830}
831
832
833void Assembler::GenInstrRegister(Opcode opcode,
834                                 SecondaryField fmt,
835                                 FPURegister ft,
836                                 FPURegister fs,
837                                 FPURegister fd,
838                                 SecondaryField func) {
839  ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
840  ASSERT(CpuFeatures::IsEnabled(FPU));
841  Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
842      | (fd.code() << kFdShift) | func;
843  emit(instr);
844}
845
846
847void Assembler::GenInstrRegister(Opcode opcode,
848                                 SecondaryField fmt,
849                                 Register rt,
850                                 FPURegister fs,
851                                 FPURegister fd,
852                                 SecondaryField func) {
853  ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
854  ASSERT(CpuFeatures::IsEnabled(FPU));
855  Instr instr = opcode | fmt | (rt.code() << kRtShift)
856      | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
857  emit(instr);
858}
859
860
861void Assembler::GenInstrRegister(Opcode opcode,
862                                 SecondaryField fmt,
863                                 Register rt,
864                                 FPUControlRegister fs,
865                                 SecondaryField func) {
866  ASSERT(fs.is_valid() && rt.is_valid());
867  ASSERT(CpuFeatures::IsEnabled(FPU));
868  Instr instr =
869      opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
870  emit(instr);
871}
872
873
874// Instructions with immediate value.
875// Registers are in the order of the instruction encoding, from left to right.
876void Assembler::GenInstrImmediate(Opcode opcode,
877                                  Register rs,
878                                  Register rt,
879                                  int32_t j) {
880  ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
881  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
882      | (j & kImm16Mask);
883  emit(instr);
884}
885
886
887void Assembler::GenInstrImmediate(Opcode opcode,
888                                  Register rs,
889                                  SecondaryField SF,
890                                  int32_t j) {
891  ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
892  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
893  emit(instr);
894}
895
896
897void Assembler::GenInstrImmediate(Opcode opcode,
898                                  Register rs,
899                                  FPURegister ft,
900                                  int32_t j) {
901  ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
902  ASSERT(CpuFeatures::IsEnabled(FPU));
903  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
904      | (j & kImm16Mask);
905  emit(instr);
906}
907
908
909void Assembler::GenInstrJump(Opcode opcode,
910                              uint32_t address) {
911  BlockTrampolinePoolScope block_trampoline_pool(this);
912  ASSERT(is_uint26(address));
913  Instr instr = opcode | address;
914  emit(instr);
915  BlockTrampolinePoolFor(1);  // For associated delay slot.
916}
917
918
919// Returns the next free trampoline entry.
920int32_t Assembler::get_trampoline_entry(int32_t pos) {
921  int32_t trampoline_entry = kInvalidSlotPos;
922
923  if (!internal_trampoline_exception_) {
924    if (trampoline_.start() > pos) {
925     trampoline_entry = trampoline_.take_slot();
926    }
927
928    if (kInvalidSlotPos == trampoline_entry) {
929      internal_trampoline_exception_ = true;
930    }
931  }
932  return trampoline_entry;
933}
934
935
936uint32_t Assembler::jump_address(Label* L) {
937  int32_t target_pos;
938
939  if (L->is_bound()) {
940    target_pos = L->pos();
941  } else {
942    if (L->is_linked()) {
943      target_pos = L->pos();  // L's link.
944      L->link_to(pc_offset());
945    } else {
946      L->link_to(pc_offset());
947      return kEndOfJumpChain;
948    }
949  }
950
951  uint32_t imm = (uint32_t)buffer_ + target_pos;
952  ASSERT((imm & 3) == 0);
953
954  return imm;
955}
956
957
958int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
959  int32_t target_pos;
960
961  if (L->is_bound()) {
962    target_pos = L->pos();
963  } else {
964    if (L->is_linked()) {
965      target_pos = L->pos();
966      L->link_to(pc_offset());
967    } else {
968      L->link_to(pc_offset());
969      if (!trampoline_emitted_) {
970        unbound_labels_count_++;
971        next_buffer_check_ -= kTrampolineSlotsSize;
972      }
973      return kEndOfChain;
974    }
975  }
976
977  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
978  ASSERT((offset & 3) == 0);
979  ASSERT(is_int16(offset >> 2));
980
981  return offset;
982}
983
984
985void Assembler::label_at_put(Label* L, int at_offset) {
986  int target_pos;
987  if (L->is_bound()) {
988    target_pos = L->pos();
989    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
990  } else {
991    if (L->is_linked()) {
992      target_pos = L->pos();  // L's link.
993      int32_t imm18 = target_pos - at_offset;
994      ASSERT((imm18 & 3) == 0);
995      int32_t imm16 = imm18 >> 2;
996      ASSERT(is_int16(imm16));
997      instr_at_put(at_offset, (imm16 & kImm16Mask));
998    } else {
999      target_pos = kEndOfChain;
1000      instr_at_put(at_offset, 0);
1001      if (!trampoline_emitted_) {
1002        unbound_labels_count_++;
1003        next_buffer_check_ -= kTrampolineSlotsSize;
1004      }
1005    }
1006    L->link_to(at_offset);
1007  }
1008}
1009
1010
1011//------- Branch and jump instructions --------
1012
1013void Assembler::b(int16_t offset) {
1014  beq(zero_reg, zero_reg, offset);
1015}
1016
1017
1018void Assembler::bal(int16_t offset) {
1019  positions_recorder()->WriteRecordedPositions();
1020  bgezal(zero_reg, offset);
1021}
1022
1023
1024void Assembler::beq(Register rs, Register rt, int16_t offset) {
1025  BlockTrampolinePoolScope block_trampoline_pool(this);
1026  GenInstrImmediate(BEQ, rs, rt, offset);
1027  BlockTrampolinePoolFor(1);  // For associated delay slot.
1028}
1029
1030
1031void Assembler::bgez(Register rs, int16_t offset) {
1032  BlockTrampolinePoolScope block_trampoline_pool(this);
1033  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1034  BlockTrampolinePoolFor(1);  // For associated delay slot.
1035}
1036
1037
1038void Assembler::bgezal(Register rs, int16_t offset) {
1039  BlockTrampolinePoolScope block_trampoline_pool(this);
1040  positions_recorder()->WriteRecordedPositions();
1041  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1042  BlockTrampolinePoolFor(1);  // For associated delay slot.
1043}
1044
1045
1046void Assembler::bgtz(Register rs, int16_t offset) {
1047  BlockTrampolinePoolScope block_trampoline_pool(this);
1048  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1049  BlockTrampolinePoolFor(1);  // For associated delay slot.
1050}
1051
1052
1053void Assembler::blez(Register rs, int16_t offset) {
1054  BlockTrampolinePoolScope block_trampoline_pool(this);
1055  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1056  BlockTrampolinePoolFor(1);  // For associated delay slot.
1057}
1058
1059
1060void Assembler::bltz(Register rs, int16_t offset) {
1061  BlockTrampolinePoolScope block_trampoline_pool(this);
1062  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1063  BlockTrampolinePoolFor(1);  // For associated delay slot.
1064}
1065
1066
1067void Assembler::bltzal(Register rs, int16_t offset) {
1068  BlockTrampolinePoolScope block_trampoline_pool(this);
1069  positions_recorder()->WriteRecordedPositions();
1070  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1071  BlockTrampolinePoolFor(1);  // For associated delay slot.
1072}
1073
1074
1075void Assembler::bne(Register rs, Register rt, int16_t offset) {
1076  BlockTrampolinePoolScope block_trampoline_pool(this);
1077  GenInstrImmediate(BNE, rs, rt, offset);
1078  BlockTrampolinePoolFor(1);  // For associated delay slot.
1079}
1080
1081
1082void Assembler::j(int32_t target) {
1083  ASSERT(is_uint28(target) && ((target & 3) == 0));
1084  GenInstrJump(J, target >> 2);
1085}
1086
1087
1088void Assembler::jr(Register rs) {
1089  BlockTrampolinePoolScope block_trampoline_pool(this);
1090  if (rs.is(ra)) {
1091    positions_recorder()->WriteRecordedPositions();
1092  }
1093  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1094  BlockTrampolinePoolFor(1);  // For associated delay slot.
1095}
1096
1097
1098void Assembler::jal(int32_t target) {
1099  positions_recorder()->WriteRecordedPositions();
1100  ASSERT(is_uint28(target) && ((target & 3) == 0));
1101  GenInstrJump(JAL, target >> 2);
1102}
1103
1104
1105void Assembler::jalr(Register rs, Register rd) {
1106  BlockTrampolinePoolScope block_trampoline_pool(this);
1107  positions_recorder()->WriteRecordedPositions();
1108  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1109  BlockTrampolinePoolFor(1);  // For associated delay slot.
1110}
1111
1112
1113//-------Data-processing-instructions---------
1114
1115// Arithmetic.
1116
1117void Assembler::addu(Register rd, Register rs, Register rt) {
1118  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1119}
1120
1121
1122void Assembler::addiu(Register rd, Register rs, int32_t j) {
1123  GenInstrImmediate(ADDIU, rs, rd, j);
1124}
1125
1126
1127void Assembler::subu(Register rd, Register rs, Register rt) {
1128  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1129}
1130
1131
1132void Assembler::mul(Register rd, Register rs, Register rt) {
1133  GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1134}
1135
1136
1137void Assembler::mult(Register rs, Register rt) {
1138  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1139}
1140
1141
1142void Assembler::multu(Register rs, Register rt) {
1143  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1144}
1145
1146
1147void Assembler::div(Register rs, Register rt) {
1148  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1149}
1150
1151
1152void Assembler::divu(Register rs, Register rt) {
1153  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1154}
1155
1156
1157// Logical.
1158
1159void Assembler::and_(Register rd, Register rs, Register rt) {
1160  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1161}
1162
1163
1164void Assembler::andi(Register rt, Register rs, int32_t j) {
1165  GenInstrImmediate(ANDI, rs, rt, j);
1166}
1167
1168
1169void Assembler::or_(Register rd, Register rs, Register rt) {
1170  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1171}
1172
1173
1174void Assembler::ori(Register rt, Register rs, int32_t j) {
1175  GenInstrImmediate(ORI, rs, rt, j);
1176}
1177
1178
1179void Assembler::xor_(Register rd, Register rs, Register rt) {
1180  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1181}
1182
1183
1184void Assembler::xori(Register rt, Register rs, int32_t j) {
1185  GenInstrImmediate(XORI, rs, rt, j);
1186}
1187
1188
1189void Assembler::nor(Register rd, Register rs, Register rt) {
1190  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1191}
1192
1193
1194// Shifts.
1195void Assembler::sll(Register rd,
1196                    Register rt,
1197                    uint16_t sa,
1198                    bool coming_from_nop) {
1199  // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1200  // generated using the sll instruction. They must be generated using
1201  // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1202  // instructions.
1203  ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1204  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1205}
1206
1207
1208void Assembler::sllv(Register rd, Register rt, Register rs) {
1209  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1210}
1211
1212
1213void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1214  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1215}
1216
1217
1218void Assembler::srlv(Register rd, Register rt, Register rs) {
1219  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1220}
1221
1222
1223void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1224  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1225}
1226
1227
1228void Assembler::srav(Register rd, Register rt, Register rs) {
1229  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1230}
1231
1232
1233void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1234  // Should be called via MacroAssembler::Ror.
1235  ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1236  ASSERT(mips32r2);
1237  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1238      | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1239  emit(instr);
1240}
1241
1242
1243void Assembler::rotrv(Register rd, Register rt, Register rs) {
1244  // Should be called via MacroAssembler::Ror.
1245  ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1246  ASSERT(mips32r2);
1247  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1248     | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1249  emit(instr);
1250}
1251
1252
1253//------------Memory-instructions-------------
1254
1255// Helper for base-reg + offset, when offset is larger than int16.
1256void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1257  ASSERT(!src.rm().is(at));
1258  lui(at, src.offset_ >> kLuiShift);
1259  ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
1260  addu(at, at, src.rm());  // Add base register.
1261}
1262
1263
1264void Assembler::lb(Register rd, const MemOperand& rs) {
1265  if (is_int16(rs.offset_)) {
1266    GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1267  } else {  // Offset > 16 bits, use multiple instructions to load.
1268    LoadRegPlusOffsetToAt(rs);
1269    GenInstrImmediate(LB, at, rd, 0);  // Equiv to lb(rd, MemOperand(at, 0));
1270  }
1271}
1272
1273
1274void Assembler::lbu(Register rd, const MemOperand& rs) {
1275  if (is_int16(rs.offset_)) {
1276    GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1277  } else {  // Offset > 16 bits, use multiple instructions to load.
1278    LoadRegPlusOffsetToAt(rs);
1279    GenInstrImmediate(LBU, at, rd, 0);  // Equiv to lbu(rd, MemOperand(at, 0));
1280  }
1281}
1282
1283
1284void Assembler::lh(Register rd, const MemOperand& rs) {
1285  if (is_int16(rs.offset_)) {
1286    GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1287  } else {  // Offset > 16 bits, use multiple instructions to load.
1288    LoadRegPlusOffsetToAt(rs);
1289    GenInstrImmediate(LH, at, rd, 0);  // Equiv to lh(rd, MemOperand(at, 0));
1290  }
1291}
1292
1293
1294void Assembler::lhu(Register rd, const MemOperand& rs) {
1295  if (is_int16(rs.offset_)) {
1296    GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1297  } else {  // Offset > 16 bits, use multiple instructions to load.
1298    LoadRegPlusOffsetToAt(rs);
1299    GenInstrImmediate(LHU, at, rd, 0);  // Equiv to lhu(rd, MemOperand(at, 0));
1300  }
1301}
1302
1303
1304void Assembler::lw(Register rd, const MemOperand& rs) {
1305  if (is_int16(rs.offset_)) {
1306    GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1307  } else {  // Offset > 16 bits, use multiple instructions to load.
1308    LoadRegPlusOffsetToAt(rs);
1309    GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
1310  }
1311}
1312
1313
1314void Assembler::lwl(Register rd, const MemOperand& rs) {
1315  GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1316}
1317
1318
1319void Assembler::lwr(Register rd, const MemOperand& rs) {
1320  GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1321}
1322
1323
1324void Assembler::sb(Register rd, const MemOperand& rs) {
1325  if (is_int16(rs.offset_)) {
1326    GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1327  } else {  // Offset > 16 bits, use multiple instructions to store.
1328    LoadRegPlusOffsetToAt(rs);
1329    GenInstrImmediate(SB, at, rd, 0);  // Equiv to sb(rd, MemOperand(at, 0));
1330  }
1331}
1332
1333
1334void Assembler::sh(Register rd, const MemOperand& rs) {
1335  if (is_int16(rs.offset_)) {
1336    GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1337  } else {  // Offset > 16 bits, use multiple instructions to store.
1338    LoadRegPlusOffsetToAt(rs);
1339    GenInstrImmediate(SH, at, rd, 0);  // Equiv to sh(rd, MemOperand(at, 0));
1340  }
1341}
1342
1343
1344void Assembler::sw(Register rd, const MemOperand& rs) {
1345  if (is_int16(rs.offset_)) {
1346    GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1347  } else {  // Offset > 16 bits, use multiple instructions to store.
1348    LoadRegPlusOffsetToAt(rs);
1349    GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
1350  }
1351}
1352
1353
1354void Assembler::swl(Register rd, const MemOperand& rs) {
1355  GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1356}
1357
1358
1359void Assembler::swr(Register rd, const MemOperand& rs) {
1360  GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1361}
1362
1363
1364void Assembler::lui(Register rd, int32_t j) {
1365  GenInstrImmediate(LUI, zero_reg, rd, j);
1366}
1367
1368
1369//-------------Misc-instructions--------------
1370
1371// Break / Trap instructions.
1372void Assembler::break_(uint32_t code, bool break_as_stop) {
1373  ASSERT((code & ~0xfffff) == 0);
1374  // We need to invalidate breaks that could be stops as well because the
1375  // simulator expects a char pointer after the stop instruction.
1376  // See constants-mips.h for explanation.
1377  ASSERT((break_as_stop &&
1378          code <= kMaxStopCode &&
1379          code > kMaxWatchpointCode) ||
1380         (!break_as_stop &&
1381          (code > kMaxStopCode ||
1382           code <= kMaxWatchpointCode)));
1383  Instr break_instr = SPECIAL | BREAK | (code << 6);
1384  emit(break_instr);
1385}
1386
1387
1388void Assembler::stop(const char* msg, uint32_t code) {
1389  ASSERT(code > kMaxWatchpointCode);
1390  ASSERT(code <= kMaxStopCode);
1391#if defined(V8_HOST_ARCH_MIPS)
1392  break_(0x54321);
1393#else  // V8_HOST_ARCH_MIPS
1394  BlockTrampolinePoolFor(2);
1395  // The Simulator will handle the stop instruction and get the message address.
1396  // On MIPS stop() is just a special kind of break_().
1397  break_(code, true);
1398  emit(reinterpret_cast<Instr>(msg));
1399#endif
1400}
1401
1402
1403void Assembler::tge(Register rs, Register rt, uint16_t code) {
1404  ASSERT(is_uint10(code));
1405  Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1406      | rt.code() << kRtShift | code << 6;
1407  emit(instr);
1408}
1409
1410
1411void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
1412  ASSERT(is_uint10(code));
1413  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
1414      | rt.code() << kRtShift | code << 6;
1415  emit(instr);
1416}
1417
1418
1419void Assembler::tlt(Register rs, Register rt, uint16_t code) {
1420  ASSERT(is_uint10(code));
1421  Instr instr =
1422      SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1423  emit(instr);
1424}
1425
1426
1427void Assembler::tltu(Register rs, Register rt, uint16_t code) {
1428  ASSERT(is_uint10(code));
1429  Instr instr =
1430      SPECIAL | TLTU | rs.code() << kRsShift
1431      | rt.code() << kRtShift | code << 6;
1432  emit(instr);
1433}
1434
1435
1436void Assembler::teq(Register rs, Register rt, uint16_t code) {
1437  ASSERT(is_uint10(code));
1438  Instr instr =
1439      SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1440  emit(instr);
1441}
1442
1443
1444void Assembler::tne(Register rs, Register rt, uint16_t code) {
1445  ASSERT(is_uint10(code));
1446  Instr instr =
1447      SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1448  emit(instr);
1449}
1450
1451
1452// Move from HI/LO register.
1453
1454void Assembler::mfhi(Register rd) {
1455  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
1456}
1457
1458
1459void Assembler::mflo(Register rd) {
1460  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
1461}
1462
1463
1464// Set on less than instructions.
1465void Assembler::slt(Register rd, Register rs, Register rt) {
1466  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
1467}
1468
1469
1470void Assembler::sltu(Register rd, Register rs, Register rt) {
1471  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
1472}
1473
1474
1475void Assembler::slti(Register rt, Register rs, int32_t j) {
1476  GenInstrImmediate(SLTI, rs, rt, j);
1477}
1478
1479
1480void Assembler::sltiu(Register rt, Register rs, int32_t j) {
1481  GenInstrImmediate(SLTIU, rs, rt, j);
1482}
1483
1484
1485// Conditional move.
1486void Assembler::movz(Register rd, Register rs, Register rt) {
1487  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
1488}
1489
1490
1491void Assembler::movn(Register rd, Register rs, Register rt) {
1492  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
1493}
1494
1495
1496void Assembler::movt(Register rd, Register rs, uint16_t cc) {
1497  Register rt;
1498  rt.code_ = (cc & 0x0007) << 2 | 1;
1499  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1500}
1501
1502
1503void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1504  Register rt;
1505  rt.code_ = (cc & 0x0007) << 2 | 0;
1506  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1507}
1508
1509
1510// Bit twiddling.
1511void Assembler::clz(Register rd, Register rs) {
1512  // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1513  GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1514}
1515
1516
1517void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1518  // Should be called via MacroAssembler::Ins.
1519  // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1520  ASSERT(mips32r2);
1521  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1522}
1523
1524
1525void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1526  // Should be called via MacroAssembler::Ext.
1527  // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1528  ASSERT(mips32r2);
1529  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1530}
1531
1532
1533//--------Coprocessor-instructions----------------
1534
1535// Load, store, move.
1536void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1537  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1538}
1539
1540
1541void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
1542  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1543  // load to two 32-bit loads.
1544  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1545  FPURegister nextfpreg;
1546  nextfpreg.setcode(fd.code() + 1);
1547  GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
1548}
1549
1550
1551void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1552  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1553}
1554
1555
1556void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
1557  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1558  // store to two 32-bit stores.
1559  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1560  FPURegister nextfpreg;
1561  nextfpreg.setcode(fd.code() + 1);
1562  GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
1563}
1564
1565
1566void Assembler::mtc1(Register rt, FPURegister fs) {
1567  GenInstrRegister(COP1, MTC1, rt, fs, f0);
1568}
1569
1570
1571void Assembler::mfc1(Register rt, FPURegister fs) {
1572  GenInstrRegister(COP1, MFC1, rt, fs, f0);
1573}
1574
1575
1576void Assembler::ctc1(Register rt, FPUControlRegister fs) {
1577  GenInstrRegister(COP1, CTC1, rt, fs);
1578}
1579
1580
1581void Assembler::cfc1(Register rt, FPUControlRegister fs) {
1582  GenInstrRegister(COP1, CFC1, rt, fs);
1583}
1584
1585
1586// Arithmetic.
1587
1588void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1589  GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
1590}
1591
1592
1593void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1594  GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
1595}
1596
1597
1598void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1599  GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
1600}
1601
1602
1603void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1604  GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
1605}
1606
1607
1608void Assembler::abs_d(FPURegister fd, FPURegister fs) {
1609  GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
1610}
1611
1612
1613void Assembler::mov_d(FPURegister fd, FPURegister fs) {
1614  GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
1615}
1616
1617
1618void Assembler::neg_d(FPURegister fd, FPURegister fs) {
1619  GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
1620}
1621
1622
1623void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
1624  GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
1625}
1626
1627
1628// Conversions.
1629
1630void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
1631  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
1632}
1633
1634
1635void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
1636  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
1637}
1638
1639
1640void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
1641  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
1642}
1643
1644
1645void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
1646  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
1647}
1648
1649
1650void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
1651  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
1652}
1653
1654
1655void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
1656  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
1657}
1658
1659
1660void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
1661  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
1662}
1663
1664
1665void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
1666  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
1667}
1668
1669
1670void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
1671  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
1672}
1673
1674
1675void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
1676  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
1677}
1678
1679
1680void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
1681  ASSERT(mips32r2);
1682  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
1683}
1684
1685
1686void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
1687  ASSERT(mips32r2);
1688  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
1689}
1690
1691
1692void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
1693  ASSERT(mips32r2);
1694  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
1695}
1696
1697
1698void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
1699  ASSERT(mips32r2);
1700  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
1701}
1702
1703
1704void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
1705  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
1706}
1707
1708
1709void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
1710  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
1711}
1712
1713
1714void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
1715  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
1716}
1717
1718
1719void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
1720  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
1721}
1722
1723
1724void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
1725  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
1726}
1727
1728
1729void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
1730  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
1731}
1732
1733
1734void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
1735  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
1736}
1737
1738
1739void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
1740  ASSERT(mips32r2);
1741  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
1742}
1743
1744
1745void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
1746  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
1747}
1748
1749
1750void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
1751  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
1752}
1753
1754
1755void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
1756  ASSERT(mips32r2);
1757  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
1758}
1759
1760
1761void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
1762  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
1763}
1764
1765
1766// Conditions.
1767void Assembler::c(FPUCondition cond, SecondaryField fmt,
1768    FPURegister fs, FPURegister ft, uint16_t cc) {
1769  ASSERT(CpuFeatures::IsEnabled(FPU));
1770  ASSERT(is_uint3(cc));
1771  ASSERT((fmt & ~(31 << kRsShift)) == 0);
1772  Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
1773      | cc << 8 | 3 << 4 | cond;
1774  emit(instr);
1775}
1776
1777
1778void Assembler::fcmp(FPURegister src1, const double src2,
1779      FPUCondition cond) {
1780  ASSERT(CpuFeatures::IsEnabled(FPU));
1781  ASSERT(src2 == 0.0);
1782  mtc1(zero_reg, f14);
1783  cvt_d_w(f14, f14);
1784  c(cond, D, src1, f14, 0);
1785}
1786
1787
1788void Assembler::bc1f(int16_t offset, uint16_t cc) {
1789  ASSERT(CpuFeatures::IsEnabled(FPU));
1790  ASSERT(is_uint3(cc));
1791  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
1792  emit(instr);
1793}
1794
1795
1796void Assembler::bc1t(int16_t offset, uint16_t cc) {
1797  ASSERT(CpuFeatures::IsEnabled(FPU));
1798  ASSERT(is_uint3(cc));
1799  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
1800  emit(instr);
1801}
1802
1803
1804// Debugging.
1805void Assembler::RecordJSReturn() {
1806  positions_recorder()->WriteRecordedPositions();
1807  CheckBuffer();
1808  RecordRelocInfo(RelocInfo::JS_RETURN);
1809}
1810
1811
1812void Assembler::RecordDebugBreakSlot() {
1813  positions_recorder()->WriteRecordedPositions();
1814  CheckBuffer();
1815  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
1816}
1817
1818
1819void Assembler::RecordComment(const char* msg) {
1820  if (FLAG_code_comments) {
1821    CheckBuffer();
1822    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1823  }
1824}
1825
1826
1827int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
1828  Instr instr = instr_at(pc);
1829  ASSERT(IsJ(instr) || IsLui(instr));
1830  if (IsLui(instr)) {
1831    Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
1832    Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
1833    ASSERT(IsOri(instr_ori));
1834    int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
1835    imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
1836    if (imm == kEndOfJumpChain) {
1837      return 0;  // Number of instructions patched.
1838    }
1839    imm += pc_delta;
1840    ASSERT((imm & 3) == 0);
1841
1842    instr_lui &= ~kImm16Mask;
1843    instr_ori &= ~kImm16Mask;
1844
1845    instr_at_put(pc + 0 * Assembler::kInstrSize,
1846                 instr_lui | ((imm >> kLuiShift) & kImm16Mask));
1847    instr_at_put(pc + 1 * Assembler::kInstrSize,
1848                 instr_ori | (imm & kImm16Mask));
1849    return 2;  // Number of instructions patched.
1850  } else {
1851    uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
1852    if ((int32_t)imm28 == kEndOfJumpChain) {
1853      return 0;  // Number of instructions patched.
1854    }
1855    imm28 += pc_delta;
1856    imm28 &= kImm28Mask;
1857    ASSERT((imm28 & 3) == 0);
1858
1859    instr &= ~kImm26Mask;
1860    uint32_t imm26 = imm28 >> 2;
1861    ASSERT(is_uint26(imm26));
1862
1863    instr_at_put(pc, instr | (imm26 & kImm26Mask));
1864    return 1;  // Number of instructions patched.
1865  }
1866}
1867
1868
1869void Assembler::GrowBuffer() {
1870  if (!own_buffer_) FATAL("external code buffer is too small");
1871
1872  // Compute new buffer size.
1873  CodeDesc desc;  // The new buffer.
1874  if (buffer_size_ < 4*KB) {
1875    desc.buffer_size = 4*KB;
1876  } else if (buffer_size_ < 1*MB) {
1877    desc.buffer_size = 2*buffer_size_;
1878  } else {
1879    desc.buffer_size = buffer_size_ + 1*MB;
1880  }
1881  CHECK_GT(desc.buffer_size, 0);  // No overflow.
1882
1883  // Setup new buffer.
1884  desc.buffer = NewArray<byte>(desc.buffer_size);
1885
1886  desc.instr_size = pc_offset();
1887  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1888
1889  // Copy the data.
1890  int pc_delta = desc.buffer - buffer_;
1891  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1892  memmove(desc.buffer, buffer_, desc.instr_size);
1893  memmove(reloc_info_writer.pos() + rc_delta,
1894          reloc_info_writer.pos(), desc.reloc_size);
1895
1896  // Switch buffers.
1897  DeleteArray(buffer_);
1898  buffer_ = desc.buffer;
1899  buffer_size_ = desc.buffer_size;
1900  pc_ += pc_delta;
1901  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1902                               reloc_info_writer.last_pc() + pc_delta);
1903
1904  // Relocate runtime entries.
1905  for (RelocIterator it(desc); !it.done(); it.next()) {
1906    RelocInfo::Mode rmode = it.rinfo()->rmode();
1907    if (rmode == RelocInfo::INTERNAL_REFERENCE) {
1908      byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
1909      RelocateInternalReference(p, pc_delta);
1910    }
1911  }
1912
1913  ASSERT(!overflow());
1914}
1915
1916
1917void Assembler::db(uint8_t data) {
1918  CheckBuffer();
1919  *reinterpret_cast<uint8_t*>(pc_) = data;
1920  pc_ += sizeof(uint8_t);
1921}
1922
1923
1924void Assembler::dd(uint32_t data) {
1925  CheckBuffer();
1926  *reinterpret_cast<uint32_t*>(pc_) = data;
1927  pc_ += sizeof(uint32_t);
1928}
1929
1930
1931void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
1932  RelocInfo rinfo(pc_, rmode, data);  // We do not try to reuse pool constants.
1933  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
1934    // Adjust code for new modes.
1935    ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
1936           || RelocInfo::IsJSReturn(rmode)
1937           || RelocInfo::IsComment(rmode)
1938           || RelocInfo::IsPosition(rmode));
1939    // These modes do not need an entry in the constant pool.
1940  }
1941  if (rinfo.rmode() != RelocInfo::NONE) {
1942    // Don't record external references unless the heap will be serialized.
1943    if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
1944        !Serializer::enabled() &&
1945        !FLAG_debug_code) {
1946      return;
1947    }
1948    ASSERT(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
1949    if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
1950      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
1951      ClearRecordedAstId();
1952      reloc_info_writer.Write(&reloc_info_with_ast_id);
1953    } else {
1954      reloc_info_writer.Write(&rinfo);
1955    }
1956  }
1957}
1958
1959
1960void Assembler::BlockTrampolinePoolFor(int instructions) {
1961  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
1962}
1963
1964
1965void Assembler::CheckTrampolinePool() {
1966  // Some small sequences of instructions must not be broken up by the
1967  // insertion of a trampoline pool; such sequences are protected by setting
1968  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
1969  // which are both checked here. Also, recursive calls to CheckTrampolinePool
1970  // are blocked by trampoline_pool_blocked_nesting_.
1971  if ((trampoline_pool_blocked_nesting_ > 0) ||
1972      (pc_offset() < no_trampoline_pool_before_)) {
1973    // Emission is currently blocked; make sure we try again as soon as
1974    // possible.
1975    if (trampoline_pool_blocked_nesting_ > 0) {
1976      next_buffer_check_ = pc_offset() + kInstrSize;
1977    } else {
1978      next_buffer_check_ = no_trampoline_pool_before_;
1979    }
1980    return;
1981  }
1982
1983  ASSERT(!trampoline_emitted_);
1984  ASSERT(unbound_labels_count_ >= 0);
1985  if (unbound_labels_count_ > 0) {
1986    // First we emit jump (2 instructions), then we emit trampoline pool.
1987    { BlockTrampolinePoolScope block_trampoline_pool(this);
1988      Label after_pool;
1989      b(&after_pool);
1990      nop();
1991
1992      int pool_start = pc_offset();
1993      for (int i = 0; i < unbound_labels_count_; i++) {
1994        uint32_t imm32;
1995        imm32 = jump_address(&after_pool);
1996        { BlockGrowBufferScope block_buf_growth(this);
1997          // Buffer growth (and relocation) must be blocked for internal
1998          // references until associated instructions are emitted and available
1999          // to be patched.
2000          RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2001          lui(at, (imm32 & kHiMask) >> kLuiShift);
2002          ori(at, at, (imm32 & kImm16Mask));
2003        }
2004        jr(at);
2005        nop();
2006      }
2007      bind(&after_pool);
2008      trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2009
2010      trampoline_emitted_ = true;
2011      // As we are only going to emit trampoline once, we need to prevent any
2012      // further emission.
2013      next_buffer_check_ = kMaxInt;
2014    }
2015  } else {
2016    // Number of branches to unbound label at this point is zero, so we can
2017    // move next buffer check to maximum.
2018    next_buffer_check_ = pc_offset() +
2019        kMaxBranchOffset - kTrampolineSlotsSize * 16;
2020  }
2021  return;
2022}
2023
2024
2025Address Assembler::target_address_at(Address pc) {
2026  Instr instr1 = instr_at(pc);
2027  Instr instr2 = instr_at(pc + kInstrSize);
2028  // Interpret 2 instructions generated by li: lui/ori
2029  if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
2030    // Assemble the 32 bit value.
2031    return reinterpret_cast<Address>(
2032        (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
2033  }
2034
2035  // We should never get here, force a bad address if we do.
2036  UNREACHABLE();
2037  return (Address)0x0;
2038}
2039
2040
2041void Assembler::set_target_address_at(Address pc, Address target) {
2042  // On MIPS we patch the address into lui/ori instruction pair.
2043
2044  // First check we have an li (lui/ori pair).
2045  Instr instr2 = instr_at(pc + kInstrSize);
2046#ifdef DEBUG
2047  Instr instr1 = instr_at(pc);
2048
2049  // Check we have indeed the result from a li with MustUseReg true.
2050  CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
2051#endif
2052
2053  uint32_t rt_code = GetRtField(instr2);
2054  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2055  uint32_t itarget = reinterpret_cast<uint32_t>(target);
2056
2057  // lui rt, high-16.
2058  // ori rt rt, low-16.
2059  *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2060  *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
2061
2062  CPU::FlushICache(pc, 2 * sizeof(int32_t));
2063}
2064
2065
2066} }  // namespace v8::internal
2067
2068#endif  // V8_TARGET_ARCH_MIPS
2069