1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
33// Copyright 2012 the V8 project authors. All rights reserved.
34
35
36#include "v8.h"
37
38#if defined(V8_TARGET_ARCH_MIPS)
39
40#include "mips/assembler-mips-inl.h"
41#include "serialize.h"
42
43namespace v8 {
44namespace internal {
45
46#ifdef DEBUG
47bool CpuFeatures::initialized_ = false;
48#endif
49unsigned CpuFeatures::supported_ = 0;
50unsigned CpuFeatures::found_by_runtime_probing_ = 0;
51
52
53// Get the CPU features enabled by the build. For cross compilation the
54// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
55// can be defined to enable FPU instructions when building the
56// snapshot.
57static uint64_t CpuFeaturesImpliedByCompiler() {
58  uint64_t answer = 0;
59#ifdef CAN_USE_FPU_INSTRUCTIONS
60  answer |= 1u << FPU;
61#endif  // def CAN_USE_FPU_INSTRUCTIONS
62
63#ifdef __mips__
64  // If the compiler is allowed to use FPU then we can use FPU too in our code
65  // generation even when generating snapshots.  This won't work for cross
66  // compilation.
67#if(defined(__mips_hard_float) && __mips_hard_float != 0)
68  answer |= 1u << FPU;
69#endif  // defined(__mips_hard_float) && __mips_hard_float != 0
70#endif  // def __mips__
71
72  return answer;
73}
74
75
76void CpuFeatures::Probe() {
77  unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
78                                CpuFeaturesImpliedByCompiler());
79  ASSERT(supported_ == 0 || supported_ == standard_features);
80#ifdef DEBUG
81  initialized_ = true;
82#endif
83
84  // Get the features implied by the OS and the compiler settings. This is the
85  // minimal set of features which is also allowed for generated code in the
86  // snapshot.
87  supported_ |= standard_features;
88
89  if (Serializer::enabled()) {
90    // No probing for features if we might serialize (generate snapshot).
91    return;
92  }
93
94  // If the compiler is allowed to use fpu then we can use fpu too in our
95  // code generation.
96#if !defined(__mips__)
97  // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
98  if (FLAG_enable_fpu) {
99      supported_ |= 1u << FPU;
100  }
101#else
102  // Probe for additional features not already known to be available.
103  if (OS::MipsCpuHasFeature(FPU)) {
104    // This implementation also sets the FPU flags if
105    // runtime detection of FPU returns true.
106    supported_ |= 1u << FPU;
107    found_by_runtime_probing_ |= 1u << FPU;
108  }
109#endif
110}
111
112
113int ToNumber(Register reg) {
114  ASSERT(reg.is_valid());
115  const int kNumbers[] = {
116    0,    // zero_reg
117    1,    // at
118    2,    // v0
119    3,    // v1
120    4,    // a0
121    5,    // a1
122    6,    // a2
123    7,    // a3
124    8,    // t0
125    9,    // t1
126    10,   // t2
127    11,   // t3
128    12,   // t4
129    13,   // t5
130    14,   // t6
131    15,   // t7
132    16,   // s0
133    17,   // s1
134    18,   // s2
135    19,   // s3
136    20,   // s4
137    21,   // s5
138    22,   // s6
139    23,   // s7
140    24,   // t8
141    25,   // t9
142    26,   // k0
143    27,   // k1
144    28,   // gp
145    29,   // sp
146    30,   // fp
147    31,   // ra
148  };
149  return kNumbers[reg.code()];
150}
151
152
153Register ToRegister(int num) {
154  ASSERT(num >= 0 && num < kNumRegisters);
155  const Register kRegisters[] = {
156    zero_reg,
157    at,
158    v0, v1,
159    a0, a1, a2, a3,
160    t0, t1, t2, t3, t4, t5, t6, t7,
161    s0, s1, s2, s3, s4, s5, s6, s7,
162    t8, t9,
163    k0, k1,
164    gp,
165    sp,
166    fp,
167    ra
168  };
169  return kRegisters[num];
170}
171
172
173// -----------------------------------------------------------------------------
174// Implementation of RelocInfo.
175
176const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
177                                  1 << RelocInfo::INTERNAL_REFERENCE;
178
179
180bool RelocInfo::IsCodedSpecially() {
181  // The deserializer needs to know whether a pointer is specially coded.  Being
182  // specially coded on MIPS means that it is a lui/ori instruction, and that is
183  // always the case inside code objects.
184  return true;
185}
186
187
188// Patch the code at the current address with the supplied instructions.
189void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
190  Instr* pc = reinterpret_cast<Instr*>(pc_);
191  Instr* instr = reinterpret_cast<Instr*>(instructions);
192  for (int i = 0; i < instruction_count; i++) {
193    *(pc + i) = *(instr + i);
194  }
195
196  // Indicate that code has changed.
197  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
198}
199
200
201// Patch the code at the current PC with a call to the target address.
202// Additional guard instructions can be added if required.
203void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
204  // Patch the code at the current address with a call to the target.
205  UNIMPLEMENTED_MIPS();
206}
207
208
209// -----------------------------------------------------------------------------
210// Implementation of Operand and MemOperand.
211// See assembler-mips-inl.h for inlined constructors.
212
213Operand::Operand(Handle<Object> handle) {
214  rm_ = no_reg;
215  // Verify all Objects referred by code are NOT in new space.
216  Object* obj = *handle;
217  ASSERT(!HEAP->InNewSpace(obj));
218  if (obj->IsHeapObject()) {
219    imm32_ = reinterpret_cast<intptr_t>(handle.location());
220    rmode_ = RelocInfo::EMBEDDED_OBJECT;
221  } else {
222    // No relocation needed.
223    imm32_ = reinterpret_cast<intptr_t>(obj);
224    rmode_ = RelocInfo::NONE;
225  }
226}
227
228
229MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
230  offset_ = offset;
231}
232
233
234// -----------------------------------------------------------------------------
235// Specific instructions, constants, and masks.
236
237static const int kNegOffset = 0x00008000;
238// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
239// operations as post-increment of sp.
240const Instr kPopInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
241      | (kRegister_sp_Code << kRtShift) | (kPointerSize & kImm16Mask);
242// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
243const Instr kPushInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
244      | (kRegister_sp_Code << kRtShift) | (-kPointerSize & kImm16Mask);
245// sw(r, MemOperand(sp, 0))
246const Instr kPushRegPattern = SW | (kRegister_sp_Code << kRsShift)
247      |  (0 & kImm16Mask);
248//  lw(r, MemOperand(sp, 0))
249const Instr kPopRegPattern = LW | (kRegister_sp_Code << kRsShift)
250      |  (0 & kImm16Mask);
251
252const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
253      |  (0 & kImm16Mask);
254
255const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
256      |  (0 & kImm16Mask);
257
258const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
259      |  (kNegOffset & kImm16Mask);
260
261const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
262      |  (kNegOffset & kImm16Mask);
263// A mask for the Rt register for push, pop, lw, sw instructions.
264const Instr kRtMask = kRtFieldMask;
265const Instr kLwSwInstrTypeMask = 0xffe00000;
266const Instr kLwSwInstrArgumentMask  = ~kLwSwInstrTypeMask;
267const Instr kLwSwOffsetMask = kImm16Mask;
268
269
270// Spare buffer.
271static const int kMinimalBufferSize = 4 * KB;
272
273
274Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
275    : AssemblerBase(arg_isolate),
276      positions_recorder_(this),
277      emit_debug_code_(FLAG_debug_code) {
278  if (buffer == NULL) {
279    // Do our own buffer management.
280    if (buffer_size <= kMinimalBufferSize) {
281      buffer_size = kMinimalBufferSize;
282
283      if (isolate()->assembler_spare_buffer() != NULL) {
284        buffer = isolate()->assembler_spare_buffer();
285        isolate()->set_assembler_spare_buffer(NULL);
286      }
287    }
288    if (buffer == NULL) {
289      buffer_ = NewArray<byte>(buffer_size);
290    } else {
291      buffer_ = static_cast<byte*>(buffer);
292    }
293    buffer_size_ = buffer_size;
294    own_buffer_ = true;
295
296  } else {
297    // Use externally provided buffer instead.
298    ASSERT(buffer_size > 0);
299    buffer_ = static_cast<byte*>(buffer);
300    buffer_size_ = buffer_size;
301    own_buffer_ = false;
302  }
303
304  // Set up buffer pointers.
305  ASSERT(buffer_ != NULL);
306  pc_ = buffer_;
307  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
308
309  last_trampoline_pool_end_ = 0;
310  no_trampoline_pool_before_ = 0;
311  trampoline_pool_blocked_nesting_ = 0;
312  // We leave space (16 * kTrampolineSlotsSize)
313  // for BlockTrampolinePoolScope buffer.
314  next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
315  internal_trampoline_exception_ = false;
316  last_bound_pos_ = 0;
317
318  trampoline_emitted_ = false;
319  unbound_labels_count_ = 0;
320  block_buffer_growth_ = false;
321
322  ClearRecordedAstId();
323}
324
325
326Assembler::~Assembler() {
327  if (own_buffer_) {
328    if (isolate()->assembler_spare_buffer() == NULL &&
329        buffer_size_ == kMinimalBufferSize) {
330      isolate()->set_assembler_spare_buffer(buffer_);
331    } else {
332      DeleteArray(buffer_);
333    }
334  }
335}
336
337
338void Assembler::GetCode(CodeDesc* desc) {
339  ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
340  // Set up code descriptor.
341  desc->buffer = buffer_;
342  desc->buffer_size = buffer_size_;
343  desc->instr_size = pc_offset();
344  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
345}
346
347
348void Assembler::Align(int m) {
349  ASSERT(m >= 4 && IsPowerOf2(m));
350  while ((pc_offset() & (m - 1)) != 0) {
351    nop();
352  }
353}
354
355
356void Assembler::CodeTargetAlign() {
357  // No advantage to aligning branch/call targets to more than
358  // single instruction, that I am aware of.
359  Align(4);
360}
361
362
363Register Assembler::GetRtReg(Instr instr) {
364  Register rt;
365  rt.code_ = (instr & kRtFieldMask) >> kRtShift;
366  return rt;
367}
368
369
370Register Assembler::GetRsReg(Instr instr) {
371  Register rs;
372  rs.code_ = (instr & kRsFieldMask) >> kRsShift;
373  return rs;
374}
375
376
377Register Assembler::GetRdReg(Instr instr) {
378  Register rd;
379  rd.code_ = (instr & kRdFieldMask) >> kRdShift;
380  return rd;
381}
382
383
384uint32_t Assembler::GetRt(Instr instr) {
385  return (instr & kRtFieldMask) >> kRtShift;
386}
387
388
389uint32_t Assembler::GetRtField(Instr instr) {
390  return instr & kRtFieldMask;
391}
392
393
394uint32_t Assembler::GetRs(Instr instr) {
395  return (instr & kRsFieldMask) >> kRsShift;
396}
397
398
399uint32_t Assembler::GetRsField(Instr instr) {
400  return instr & kRsFieldMask;
401}
402
403
404uint32_t Assembler::GetRd(Instr instr) {
405  return  (instr & kRdFieldMask) >> kRdShift;
406}
407
408
409uint32_t Assembler::GetRdField(Instr instr) {
410  return  instr & kRdFieldMask;
411}
412
413
414uint32_t Assembler::GetSa(Instr instr) {
415  return (instr & kSaFieldMask) >> kSaShift;
416}
417
418
419uint32_t Assembler::GetSaField(Instr instr) {
420  return instr & kSaFieldMask;
421}
422
423
424uint32_t Assembler::GetOpcodeField(Instr instr) {
425  return instr & kOpcodeMask;
426}
427
428
429uint32_t Assembler::GetFunction(Instr instr) {
430  return (instr & kFunctionFieldMask) >> kFunctionShift;
431}
432
433
434uint32_t Assembler::GetFunctionField(Instr instr) {
435  return instr & kFunctionFieldMask;
436}
437
438
439uint32_t Assembler::GetImmediate16(Instr instr) {
440  return instr & kImm16Mask;
441}
442
443
444uint32_t Assembler::GetLabelConst(Instr instr) {
445  return instr & ~kImm16Mask;
446}
447
448
449bool Assembler::IsPop(Instr instr) {
450  return (instr & ~kRtMask) == kPopRegPattern;
451}
452
453
454bool Assembler::IsPush(Instr instr) {
455  return (instr & ~kRtMask) == kPushRegPattern;
456}
457
458
459bool Assembler::IsSwRegFpOffset(Instr instr) {
460  return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
461}
462
463
464bool Assembler::IsLwRegFpOffset(Instr instr) {
465  return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
466}
467
468
469bool Assembler::IsSwRegFpNegOffset(Instr instr) {
470  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
471          kSwRegFpNegOffsetPattern);
472}
473
474
475bool Assembler::IsLwRegFpNegOffset(Instr instr) {
476  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
477          kLwRegFpNegOffsetPattern);
478}
479
480
481// Labels refer to positions in the (to be) generated code.
482// There are bound, linked, and unused labels.
483//
484// Bound labels refer to known positions in the already
485// generated code. pos() is the position the label refers to.
486//
487// Linked labels refer to unknown positions in the code
488// to be generated; pos() is the position of the last
489// instruction using the label.
490
491// The link chain is terminated by a value in the instruction of -1,
492// which is an otherwise illegal value (branch -1 is inf loop).
493// The instruction 16-bit offset field addresses 32-bit words, but in
494// code is conv to an 18-bit value addressing bytes, hence the -4 value.
495
496const int kEndOfChain = -4;
497// Determines the end of the Jump chain (a subset of the label link chain).
498const int kEndOfJumpChain = 0;
499
500
501bool Assembler::IsBranch(Instr instr) {
502  uint32_t opcode   = GetOpcodeField(instr);
503  uint32_t rt_field = GetRtField(instr);
504  uint32_t rs_field = GetRsField(instr);
505  uint32_t label_constant = GetLabelConst(instr);
506  // Checks if the instruction is a branch.
507  return opcode == BEQ ||
508      opcode == BNE ||
509      opcode == BLEZ ||
510      opcode == BGTZ ||
511      opcode == BEQL ||
512      opcode == BNEL ||
513      opcode == BLEZL ||
514      opcode == BGTZL ||
515      (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
516                            rt_field == BLTZAL || rt_field == BGEZAL)) ||
517      (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
518      label_constant == 0;  // Emitted label const in reg-exp engine.
519}
520
521
522bool Assembler::IsBeq(Instr instr) {
523  return GetOpcodeField(instr) == BEQ;
524}
525
526
527bool Assembler::IsBne(Instr instr) {
528  return GetOpcodeField(instr) == BNE;
529}
530
531
532bool Assembler::IsJump(Instr instr) {
533  uint32_t opcode   = GetOpcodeField(instr);
534  uint32_t rt_field = GetRtField(instr);
535  uint32_t rd_field = GetRdField(instr);
536  uint32_t function_field = GetFunctionField(instr);
537  // Checks if the instruction is a jump.
538  return opcode == J || opcode == JAL ||
539      (opcode == SPECIAL && rt_field == 0 &&
540      ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
541}
542
543
544bool Assembler::IsJ(Instr instr) {
545  uint32_t opcode = GetOpcodeField(instr);
546  // Checks if the instruction is a jump.
547  return opcode == J;
548}
549
550
551bool Assembler::IsJal(Instr instr) {
552  return GetOpcodeField(instr) == JAL;
553}
554
555bool Assembler::IsJr(Instr instr) {
556  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
557}
558
559bool Assembler::IsJalr(Instr instr) {
560  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
561}
562
563
564bool Assembler::IsLui(Instr instr) {
565  uint32_t opcode = GetOpcodeField(instr);
566  // Checks if the instruction is a load upper immediate.
567  return opcode == LUI;
568}
569
570
571bool Assembler::IsOri(Instr instr) {
572  uint32_t opcode = GetOpcodeField(instr);
573  // Checks if the instruction is a load upper immediate.
574  return opcode == ORI;
575}
576
577
578bool Assembler::IsNop(Instr instr, unsigned int type) {
579  // See Assembler::nop(type).
580  ASSERT(type < 32);
581  uint32_t opcode = GetOpcodeField(instr);
582  uint32_t rt = GetRt(instr);
583  uint32_t rs = GetRs(instr);
584  uint32_t sa = GetSa(instr);
585
586  // nop(type) == sll(zero_reg, zero_reg, type);
587  // Technically all these values will be 0 but
588  // this makes more sense to the reader.
589
590  bool ret = (opcode == SLL &&
591              rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
592              rs == static_cast<uint32_t>(ToNumber(zero_reg)) &&
593              sa == type);
594
595  return ret;
596}
597
598
599int32_t Assembler::GetBranchOffset(Instr instr) {
600  ASSERT(IsBranch(instr));
601  return ((int16_t)(instr & kImm16Mask)) << 2;
602}
603
604
605bool Assembler::IsLw(Instr instr) {
606  return ((instr & kOpcodeMask) == LW);
607}
608
609
610int16_t Assembler::GetLwOffset(Instr instr) {
611  ASSERT(IsLw(instr));
612  return ((instr & kImm16Mask));
613}
614
615
616Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
617  ASSERT(IsLw(instr));
618
619  // We actually create a new lw instruction based on the original one.
620  Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
621      | (offset & kImm16Mask);
622
623  return temp_instr;
624}
625
626
627bool Assembler::IsSw(Instr instr) {
628  return ((instr & kOpcodeMask) == SW);
629}
630
631
632Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
633  ASSERT(IsSw(instr));
634  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
635}
636
637
638bool Assembler::IsAddImmediate(Instr instr) {
639  return ((instr & kOpcodeMask) == ADDIU);
640}
641
642
643Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
644  ASSERT(IsAddImmediate(instr));
645  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
646}
647
648
649bool Assembler::IsAndImmediate(Instr instr) {
650  return GetOpcodeField(instr) == ANDI;
651}
652
653
654int Assembler::target_at(int32_t pos) {
655  Instr instr = instr_at(pos);
656  if ((instr & ~kImm16Mask) == 0) {
657    // Emitted label constant, not part of a branch.
658    if (instr == 0) {
659       return kEndOfChain;
660     } else {
661       int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
662       return (imm18 + pos);
663     }
664  }
665  // Check we have a branch or jump instruction.
666  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
667  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
668  // the compiler uses arithmectic shifts for signed integers.
669  if (IsBranch(instr)) {
670    int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
671
672    if (imm18 == kEndOfChain) {
673      // EndOfChain sentinel is returned directly, not relative to pc or pos.
674      return kEndOfChain;
675    } else {
676      return pos + kBranchPCOffset + imm18;
677    }
678  } else if (IsLui(instr)) {
679    Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
680    Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
681    ASSERT(IsOri(instr_ori));
682    int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
683    imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
684
685    if (imm == kEndOfJumpChain) {
686      // EndOfChain sentinel is returned directly, not relative to pc or pos.
687      return kEndOfChain;
688    } else {
689      uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
690      int32_t delta = instr_address - imm;
691      ASSERT(pos > delta);
692      return pos - delta;
693    }
694  } else {
695    int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
696    if (imm28 == kEndOfJumpChain) {
697      // EndOfChain sentinel is returned directly, not relative to pc or pos.
698      return kEndOfChain;
699    } else {
700      uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
701      instr_address &= kImm28Mask;
702      int32_t delta = instr_address - imm28;
703      ASSERT(pos > delta);
704      return pos - delta;
705    }
706  }
707}
708
709
710void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
711  Instr instr = instr_at(pos);
712  if ((instr & ~kImm16Mask) == 0) {
713    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
714    // Emitted label constant, not part of a branch.
715    // Make label relative to Code* of generated Code object.
716    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
717    return;
718  }
719
720  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
721  if (IsBranch(instr)) {
722    int32_t imm18 = target_pos - (pos + kBranchPCOffset);
723    ASSERT((imm18 & 3) == 0);
724
725    instr &= ~kImm16Mask;
726    int32_t imm16 = imm18 >> 2;
727    ASSERT(is_int16(imm16));
728
729    instr_at_put(pos, instr | (imm16 & kImm16Mask));
730  } else if (IsLui(instr)) {
731    Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
732    Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
733    ASSERT(IsOri(instr_ori));
734    uint32_t imm = (uint32_t)buffer_ + target_pos;
735    ASSERT((imm & 3) == 0);
736
737    instr_lui &= ~kImm16Mask;
738    instr_ori &= ~kImm16Mask;
739
740    instr_at_put(pos + 0 * Assembler::kInstrSize,
741                 instr_lui | ((imm & kHiMask) >> kLuiShift));
742    instr_at_put(pos + 1 * Assembler::kInstrSize,
743                 instr_ori | (imm & kImm16Mask));
744  } else {
745    uint32_t imm28 = (uint32_t)buffer_ + target_pos;
746    imm28 &= kImm28Mask;
747    ASSERT((imm28 & 3) == 0);
748
749    instr &= ~kImm26Mask;
750    uint32_t imm26 = imm28 >> 2;
751    ASSERT(is_uint26(imm26));
752
753    instr_at_put(pos, instr | (imm26 & kImm26Mask));
754  }
755}
756
757
758void Assembler::print(Label* L) {
759  if (L->is_unused()) {
760    PrintF("unused label\n");
761  } else if (L->is_bound()) {
762    PrintF("bound label to %d\n", L->pos());
763  } else if (L->is_linked()) {
764    Label l = *L;
765    PrintF("unbound label");
766    while (l.is_linked()) {
767      PrintF("@ %d ", l.pos());
768      Instr instr = instr_at(l.pos());
769      if ((instr & ~kImm16Mask) == 0) {
770        PrintF("value\n");
771      } else {
772        PrintF("%d\n", instr);
773      }
774      next(&l);
775    }
776  } else {
777    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
778  }
779}
780
781
782void Assembler::bind_to(Label* L, int pos) {
783  ASSERT(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
784  int32_t trampoline_pos = kInvalidSlotPos;
785  if (L->is_linked() && !trampoline_emitted_) {
786    unbound_labels_count_--;
787    next_buffer_check_ += kTrampolineSlotsSize;
788  }
789
790  while (L->is_linked()) {
791    int32_t fixup_pos = L->pos();
792    int32_t dist = pos - fixup_pos;
793    next(L);  // Call next before overwriting link with target at fixup_pos.
794    Instr instr = instr_at(fixup_pos);
795    if (IsBranch(instr)) {
796      if (dist > kMaxBranchOffset) {
797        if (trampoline_pos == kInvalidSlotPos) {
798          trampoline_pos = get_trampoline_entry(fixup_pos);
799          CHECK(trampoline_pos != kInvalidSlotPos);
800        }
801        ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
802        target_at_put(fixup_pos, trampoline_pos);
803        fixup_pos = trampoline_pos;
804        dist = pos - fixup_pos;
805      }
806      target_at_put(fixup_pos, pos);
807    } else {
808      ASSERT(IsJ(instr) || IsLui(instr));
809      target_at_put(fixup_pos, pos);
810    }
811  }
812  L->bind_to(pos);
813
814  // Keep track of the last bound label so we don't eliminate any instructions
815  // before a bound label.
816  if (pos > last_bound_pos_)
817    last_bound_pos_ = pos;
818}
819
820
821void Assembler::bind(Label* L) {
822  ASSERT(!L->is_bound());  // Label can only be bound once.
823  bind_to(L, pc_offset());
824}
825
826
827void Assembler::next(Label* L) {
828  ASSERT(L->is_linked());
829  int link = target_at(L->pos());
830  if (link == kEndOfChain) {
831    L->Unuse();
832  } else {
833    ASSERT(link >= 0);
834    L->link_to(link);
835  }
836}
837
838bool Assembler::is_near(Label* L) {
839  if (L->is_bound()) {
840    return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
841  }
842  return false;
843}
844
845// We have to use a temporary register for things that can be relocated even
846// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
847// space.  There is no guarantee that the relocated location can be similarly
848// encoded.
849bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
850  return rmode != RelocInfo::NONE;
851}
852
853void Assembler::GenInstrRegister(Opcode opcode,
854                                 Register rs,
855                                 Register rt,
856                                 Register rd,
857                                 uint16_t sa,
858                                 SecondaryField func) {
859  ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
860  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
861      | (rd.code() << kRdShift) | (sa << kSaShift) | func;
862  emit(instr);
863}
864
865
866void Assembler::GenInstrRegister(Opcode opcode,
867                                 Register rs,
868                                 Register rt,
869                                 uint16_t msb,
870                                 uint16_t lsb,
871                                 SecondaryField func) {
872  ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
873  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
874      | (msb << kRdShift) | (lsb << kSaShift) | func;
875  emit(instr);
876}
877
878
879void Assembler::GenInstrRegister(Opcode opcode,
880                                 SecondaryField fmt,
881                                 FPURegister ft,
882                                 FPURegister fs,
883                                 FPURegister fd,
884                                 SecondaryField func) {
885  ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
886  ASSERT(CpuFeatures::IsEnabled(FPU));
887  Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
888      | (fd.code() << kFdShift) | func;
889  emit(instr);
890}
891
892
893void Assembler::GenInstrRegister(Opcode opcode,
894                                 SecondaryField fmt,
895                                 Register rt,
896                                 FPURegister fs,
897                                 FPURegister fd,
898                                 SecondaryField func) {
899  ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
900  ASSERT(CpuFeatures::IsEnabled(FPU));
901  Instr instr = opcode | fmt | (rt.code() << kRtShift)
902      | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
903  emit(instr);
904}
905
906
907void Assembler::GenInstrRegister(Opcode opcode,
908                                 SecondaryField fmt,
909                                 Register rt,
910                                 FPUControlRegister fs,
911                                 SecondaryField func) {
912  ASSERT(fs.is_valid() && rt.is_valid());
913  ASSERT(CpuFeatures::IsEnabled(FPU));
914  Instr instr =
915      opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
916  emit(instr);
917}
918
919
920// Instructions with immediate value.
921// Registers are in the order of the instruction encoding, from left to right.
922void Assembler::GenInstrImmediate(Opcode opcode,
923                                  Register rs,
924                                  Register rt,
925                                  int32_t j) {
926  ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
927  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
928      | (j & kImm16Mask);
929  emit(instr);
930}
931
932
933void Assembler::GenInstrImmediate(Opcode opcode,
934                                  Register rs,
935                                  SecondaryField SF,
936                                  int32_t j) {
937  ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
938  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
939  emit(instr);
940}
941
942
943void Assembler::GenInstrImmediate(Opcode opcode,
944                                  Register rs,
945                                  FPURegister ft,
946                                  int32_t j) {
947  ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
948  ASSERT(CpuFeatures::IsEnabled(FPU));
949  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
950      | (j & kImm16Mask);
951  emit(instr);
952}
953
954
955void Assembler::GenInstrJump(Opcode opcode,
956                             uint32_t address) {
957  BlockTrampolinePoolScope block_trampoline_pool(this);
958  ASSERT(is_uint26(address));
959  Instr instr = opcode | address;
960  emit(instr);
961  BlockTrampolinePoolFor(1);  // For associated delay slot.
962}
963
964
965// Returns the next free trampoline entry.
966int32_t Assembler::get_trampoline_entry(int32_t pos) {
967  int32_t trampoline_entry = kInvalidSlotPos;
968
969  if (!internal_trampoline_exception_) {
970    if (trampoline_.start() > pos) {
971     trampoline_entry = trampoline_.take_slot();
972    }
973
974    if (kInvalidSlotPos == trampoline_entry) {
975      internal_trampoline_exception_ = true;
976    }
977  }
978  return trampoline_entry;
979}
980
981
982uint32_t Assembler::jump_address(Label* L) {
983  int32_t target_pos;
984
985  if (L->is_bound()) {
986    target_pos = L->pos();
987  } else {
988    if (L->is_linked()) {
989      target_pos = L->pos();  // L's link.
990      L->link_to(pc_offset());
991    } else {
992      L->link_to(pc_offset());
993      return kEndOfJumpChain;
994    }
995  }
996
997  uint32_t imm = (uint32_t)buffer_ + target_pos;
998  ASSERT((imm & 3) == 0);
999
1000  return imm;
1001}
1002
1003
1004int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1005  int32_t target_pos;
1006
1007  if (L->is_bound()) {
1008    target_pos = L->pos();
1009  } else {
1010    if (L->is_linked()) {
1011      target_pos = L->pos();
1012      L->link_to(pc_offset());
1013    } else {
1014      L->link_to(pc_offset());
1015      if (!trampoline_emitted_) {
1016        unbound_labels_count_++;
1017        next_buffer_check_ -= kTrampolineSlotsSize;
1018      }
1019      return kEndOfChain;
1020    }
1021  }
1022
1023  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1024  ASSERT((offset & 3) == 0);
1025  ASSERT(is_int16(offset >> 2));
1026
1027  return offset;
1028}
1029
1030
1031void Assembler::label_at_put(Label* L, int at_offset) {
1032  int target_pos;
1033  if (L->is_bound()) {
1034    target_pos = L->pos();
1035    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1036  } else {
1037    if (L->is_linked()) {
1038      target_pos = L->pos();  // L's link.
1039      int32_t imm18 = target_pos - at_offset;
1040      ASSERT((imm18 & 3) == 0);
1041      int32_t imm16 = imm18 >> 2;
1042      ASSERT(is_int16(imm16));
1043      instr_at_put(at_offset, (imm16 & kImm16Mask));
1044    } else {
1045      target_pos = kEndOfChain;
1046      instr_at_put(at_offset, 0);
1047      if (!trampoline_emitted_) {
1048        unbound_labels_count_++;
1049        next_buffer_check_ -= kTrampolineSlotsSize;
1050      }
1051    }
1052    L->link_to(at_offset);
1053  }
1054}
1055
1056
1057//------- Branch and jump instructions --------
1058
1059void Assembler::b(int16_t offset) {
1060  beq(zero_reg, zero_reg, offset);
1061}
1062
1063
1064void Assembler::bal(int16_t offset) {
1065  positions_recorder()->WriteRecordedPositions();
1066  bgezal(zero_reg, offset);
1067}
1068
1069
1070void Assembler::beq(Register rs, Register rt, int16_t offset) {
1071  BlockTrampolinePoolScope block_trampoline_pool(this);
1072  GenInstrImmediate(BEQ, rs, rt, offset);
1073  BlockTrampolinePoolFor(1);  // For associated delay slot.
1074}
1075
1076
1077void Assembler::bgez(Register rs, int16_t offset) {
1078  BlockTrampolinePoolScope block_trampoline_pool(this);
1079  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1080  BlockTrampolinePoolFor(1);  // For associated delay slot.
1081}
1082
1083
1084void Assembler::bgezal(Register rs, int16_t offset) {
1085  BlockTrampolinePoolScope block_trampoline_pool(this);
1086  positions_recorder()->WriteRecordedPositions();
1087  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1088  BlockTrampolinePoolFor(1);  // For associated delay slot.
1089}
1090
1091
1092void Assembler::bgtz(Register rs, int16_t offset) {
1093  BlockTrampolinePoolScope block_trampoline_pool(this);
1094  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1095  BlockTrampolinePoolFor(1);  // For associated delay slot.
1096}
1097
1098
1099void Assembler::blez(Register rs, int16_t offset) {
1100  BlockTrampolinePoolScope block_trampoline_pool(this);
1101  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1102  BlockTrampolinePoolFor(1);  // For associated delay slot.
1103}
1104
1105
1106void Assembler::bltz(Register rs, int16_t offset) {
1107  BlockTrampolinePoolScope block_trampoline_pool(this);
1108  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1109  BlockTrampolinePoolFor(1);  // For associated delay slot.
1110}
1111
1112
1113void Assembler::bltzal(Register rs, int16_t offset) {
1114  BlockTrampolinePoolScope block_trampoline_pool(this);
1115  positions_recorder()->WriteRecordedPositions();
1116  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1117  BlockTrampolinePoolFor(1);  // For associated delay slot.
1118}
1119
1120
1121void Assembler::bne(Register rs, Register rt, int16_t offset) {
1122  BlockTrampolinePoolScope block_trampoline_pool(this);
1123  GenInstrImmediate(BNE, rs, rt, offset);
1124  BlockTrampolinePoolFor(1);  // For associated delay slot.
1125}
1126
1127
1128void Assembler::j(int32_t target) {
1129#if DEBUG
1130  // Get pc of delay slot.
1131  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1132  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1133  ASSERT(in_range && ((target & 3) == 0));
1134#endif
1135  GenInstrJump(J, target >> 2);
1136}
1137
1138
1139void Assembler::jr(Register rs) {
1140  BlockTrampolinePoolScope block_trampoline_pool(this);
1141  if (rs.is(ra)) {
1142    positions_recorder()->WriteRecordedPositions();
1143  }
1144  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1145  BlockTrampolinePoolFor(1);  // For associated delay slot.
1146}
1147
1148
1149void Assembler::jal(int32_t target) {
1150#ifdef DEBUG
1151  // Get pc of delay slot.
1152  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1153  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1154  ASSERT(in_range && ((target & 3) == 0));
1155#endif
1156  positions_recorder()->WriteRecordedPositions();
1157  GenInstrJump(JAL, target >> 2);
1158}
1159
1160
1161void Assembler::jalr(Register rs, Register rd) {
1162  BlockTrampolinePoolScope block_trampoline_pool(this);
1163  positions_recorder()->WriteRecordedPositions();
1164  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1165  BlockTrampolinePoolFor(1);  // For associated delay slot.
1166}
1167
1168
1169void Assembler::j_or_jr(int32_t target, Register rs) {
1170  // Get pc of delay slot.
1171  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1172  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1173
1174  if (in_range) {
1175      j(target);
1176  } else {
1177      jr(t9);
1178  }
1179}
1180
1181
1182void Assembler::jal_or_jalr(int32_t target, Register rs) {
1183  // Get pc of delay slot.
1184  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1185  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1186
1187  if (in_range) {
1188      jal(target);
1189  } else {
1190      jalr(t9);
1191  }
1192}
1193
1194
1195//-------Data-processing-instructions---------
1196
1197// Arithmetic.
1198
1199void Assembler::addu(Register rd, Register rs, Register rt) {
1200  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1201}
1202
1203
1204void Assembler::addiu(Register rd, Register rs, int32_t j) {
1205  GenInstrImmediate(ADDIU, rs, rd, j);
1206}
1207
1208
1209void Assembler::subu(Register rd, Register rs, Register rt) {
1210  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1211}
1212
1213
1214void Assembler::mul(Register rd, Register rs, Register rt) {
1215  GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1216}
1217
1218
1219void Assembler::mult(Register rs, Register rt) {
1220  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1221}
1222
1223
1224void Assembler::multu(Register rs, Register rt) {
1225  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1226}
1227
1228
1229void Assembler::div(Register rs, Register rt) {
1230  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1231}
1232
1233
1234void Assembler::divu(Register rs, Register rt) {
1235  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1236}
1237
1238
1239// Logical.
1240
1241void Assembler::and_(Register rd, Register rs, Register rt) {
1242  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1243}
1244
1245
1246void Assembler::andi(Register rt, Register rs, int32_t j) {
1247  ASSERT(is_uint16(j));
1248  GenInstrImmediate(ANDI, rs, rt, j);
1249}
1250
1251
1252void Assembler::or_(Register rd, Register rs, Register rt) {
1253  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1254}
1255
1256
1257void Assembler::ori(Register rt, Register rs, int32_t j) {
1258  ASSERT(is_uint16(j));
1259  GenInstrImmediate(ORI, rs, rt, j);
1260}
1261
1262
1263void Assembler::xor_(Register rd, Register rs, Register rt) {
1264  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1265}
1266
1267
1268void Assembler::xori(Register rt, Register rs, int32_t j) {
1269  ASSERT(is_uint16(j));
1270  GenInstrImmediate(XORI, rs, rt, j);
1271}
1272
1273
1274void Assembler::nor(Register rd, Register rs, Register rt) {
1275  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1276}
1277
1278
1279// Shifts.
1280void Assembler::sll(Register rd,
1281                    Register rt,
1282                    uint16_t sa,
1283                    bool coming_from_nop) {
1284  // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1285  // generated using the sll instruction. They must be generated using
1286  // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1287  // instructions.
1288  ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1289  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1290}
1291
1292
1293void Assembler::sllv(Register rd, Register rt, Register rs) {
1294  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1295}
1296
1297
1298void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1299  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1300}
1301
1302
1303void Assembler::srlv(Register rd, Register rt, Register rs) {
1304  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1305}
1306
1307
1308void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1309  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1310}
1311
1312
1313void Assembler::srav(Register rd, Register rt, Register rs) {
1314  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1315}
1316
1317
1318void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1319  // Should be called via MacroAssembler::Ror.
1320  ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1321  ASSERT(kArchVariant == kMips32r2);
1322  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1323      | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1324  emit(instr);
1325}
1326
1327
1328void Assembler::rotrv(Register rd, Register rt, Register rs) {
1329  // Should be called via MacroAssembler::Ror.
1330  ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1331  ASSERT(kArchVariant == kMips32r2);
1332  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1333     | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1334  emit(instr);
1335}
1336
1337
1338//------------Memory-instructions-------------
1339
1340// Helper for base-reg + offset, when offset is larger than int16.
1341void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1342  ASSERT(!src.rm().is(at));
1343  lui(at, src.offset_ >> kLuiShift);
1344  ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
1345  addu(at, at, src.rm());  // Add base register.
1346}
1347
1348
1349void Assembler::lb(Register rd, const MemOperand& rs) {
1350  if (is_int16(rs.offset_)) {
1351    GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1352  } else {  // Offset > 16 bits, use multiple instructions to load.
1353    LoadRegPlusOffsetToAt(rs);
1354    GenInstrImmediate(LB, at, rd, 0);  // Equiv to lb(rd, MemOperand(at, 0));
1355  }
1356}
1357
1358
1359void Assembler::lbu(Register rd, const MemOperand& rs) {
1360  if (is_int16(rs.offset_)) {
1361    GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1362  } else {  // Offset > 16 bits, use multiple instructions to load.
1363    LoadRegPlusOffsetToAt(rs);
1364    GenInstrImmediate(LBU, at, rd, 0);  // Equiv to lbu(rd, MemOperand(at, 0));
1365  }
1366}
1367
1368
1369void Assembler::lh(Register rd, const MemOperand& rs) {
1370  if (is_int16(rs.offset_)) {
1371    GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1372  } else {  // Offset > 16 bits, use multiple instructions to load.
1373    LoadRegPlusOffsetToAt(rs);
1374    GenInstrImmediate(LH, at, rd, 0);  // Equiv to lh(rd, MemOperand(at, 0));
1375  }
1376}
1377
1378
1379void Assembler::lhu(Register rd, const MemOperand& rs) {
1380  if (is_int16(rs.offset_)) {
1381    GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1382  } else {  // Offset > 16 bits, use multiple instructions to load.
1383    LoadRegPlusOffsetToAt(rs);
1384    GenInstrImmediate(LHU, at, rd, 0);  // Equiv to lhu(rd, MemOperand(at, 0));
1385  }
1386}
1387
1388
1389void Assembler::lw(Register rd, const MemOperand& rs) {
1390  if (is_int16(rs.offset_)) {
1391    GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1392  } else {  // Offset > 16 bits, use multiple instructions to load.
1393    LoadRegPlusOffsetToAt(rs);
1394    GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
1395  }
1396}
1397
1398
1399void Assembler::lwl(Register rd, const MemOperand& rs) {
1400  GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1401}
1402
1403
1404void Assembler::lwr(Register rd, const MemOperand& rs) {
1405  GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1406}
1407
1408
1409void Assembler::sb(Register rd, const MemOperand& rs) {
1410  if (is_int16(rs.offset_)) {
1411    GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1412  } else {  // Offset > 16 bits, use multiple instructions to store.
1413    LoadRegPlusOffsetToAt(rs);
1414    GenInstrImmediate(SB, at, rd, 0);  // Equiv to sb(rd, MemOperand(at, 0));
1415  }
1416}
1417
1418
1419void Assembler::sh(Register rd, const MemOperand& rs) {
1420  if (is_int16(rs.offset_)) {
1421    GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1422  } else {  // Offset > 16 bits, use multiple instructions to store.
1423    LoadRegPlusOffsetToAt(rs);
1424    GenInstrImmediate(SH, at, rd, 0);  // Equiv to sh(rd, MemOperand(at, 0));
1425  }
1426}
1427
1428
1429void Assembler::sw(Register rd, const MemOperand& rs) {
1430  if (is_int16(rs.offset_)) {
1431    GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1432  } else {  // Offset > 16 bits, use multiple instructions to store.
1433    LoadRegPlusOffsetToAt(rs);
1434    GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
1435  }
1436}
1437
1438
1439void Assembler::swl(Register rd, const MemOperand& rs) {
1440  GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1441}
1442
1443
1444void Assembler::swr(Register rd, const MemOperand& rs) {
1445  GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1446}
1447
1448
1449void Assembler::lui(Register rd, int32_t j) {
1450  ASSERT(is_uint16(j));
1451  GenInstrImmediate(LUI, zero_reg, rd, j);
1452}
1453
1454
1455//-------------Misc-instructions--------------
1456
1457// Break / Trap instructions.
1458void Assembler::break_(uint32_t code, bool break_as_stop) {
1459  ASSERT((code & ~0xfffff) == 0);
1460  // We need to invalidate breaks that could be stops as well because the
1461  // simulator expects a char pointer after the stop instruction.
1462  // See constants-mips.h for explanation.
1463  ASSERT((break_as_stop &&
1464          code <= kMaxStopCode &&
1465          code > kMaxWatchpointCode) ||
1466         (!break_as_stop &&
1467          (code > kMaxStopCode ||
1468           code <= kMaxWatchpointCode)));
1469  Instr break_instr = SPECIAL | BREAK | (code << 6);
1470  emit(break_instr);
1471}
1472
1473
1474void Assembler::stop(const char* msg, uint32_t code) {
1475  ASSERT(code > kMaxWatchpointCode);
1476  ASSERT(code <= kMaxStopCode);
1477#if defined(V8_HOST_ARCH_MIPS)
1478  break_(0x54321);
1479#else  // V8_HOST_ARCH_MIPS
1480  BlockTrampolinePoolFor(2);
1481  // The Simulator will handle the stop instruction and get the message address.
1482  // On MIPS stop() is just a special kind of break_().
1483  break_(code, true);
1484  emit(reinterpret_cast<Instr>(msg));
1485#endif
1486}
1487
1488
1489void Assembler::tge(Register rs, Register rt, uint16_t code) {
1490  ASSERT(is_uint10(code));
1491  Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1492      | rt.code() << kRtShift | code << 6;
1493  emit(instr);
1494}
1495
1496
1497void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
1498  ASSERT(is_uint10(code));
1499  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
1500      | rt.code() << kRtShift | code << 6;
1501  emit(instr);
1502}
1503
1504
1505void Assembler::tlt(Register rs, Register rt, uint16_t code) {
1506  ASSERT(is_uint10(code));
1507  Instr instr =
1508      SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1509  emit(instr);
1510}
1511
1512
1513void Assembler::tltu(Register rs, Register rt, uint16_t code) {
1514  ASSERT(is_uint10(code));
1515  Instr instr =
1516      SPECIAL | TLTU | rs.code() << kRsShift
1517      | rt.code() << kRtShift | code << 6;
1518  emit(instr);
1519}
1520
1521
1522void Assembler::teq(Register rs, Register rt, uint16_t code) {
1523  ASSERT(is_uint10(code));
1524  Instr instr =
1525      SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1526  emit(instr);
1527}
1528
1529
1530void Assembler::tne(Register rs, Register rt, uint16_t code) {
1531  ASSERT(is_uint10(code));
1532  Instr instr =
1533      SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1534  emit(instr);
1535}
1536
1537
1538// Move from HI/LO register.
1539
1540void Assembler::mfhi(Register rd) {
1541  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
1542}
1543
1544
1545void Assembler::mflo(Register rd) {
1546  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
1547}
1548
1549
1550// Set on less than instructions.
1551void Assembler::slt(Register rd, Register rs, Register rt) {
1552  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
1553}
1554
1555
1556void Assembler::sltu(Register rd, Register rs, Register rt) {
1557  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
1558}
1559
1560
1561void Assembler::slti(Register rt, Register rs, int32_t j) {
1562  GenInstrImmediate(SLTI, rs, rt, j);
1563}
1564
1565
1566void Assembler::sltiu(Register rt, Register rs, int32_t j) {
1567  GenInstrImmediate(SLTIU, rs, rt, j);
1568}
1569
1570
1571// Conditional move.
1572void Assembler::movz(Register rd, Register rs, Register rt) {
1573  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
1574}
1575
1576
1577void Assembler::movn(Register rd, Register rs, Register rt) {
1578  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
1579}
1580
1581
1582void Assembler::movt(Register rd, Register rs, uint16_t cc) {
1583  Register rt;
1584  rt.code_ = (cc & 0x0007) << 2 | 1;
1585  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1586}
1587
1588
1589void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1590  Register rt;
1591  rt.code_ = (cc & 0x0007) << 2 | 0;
1592  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1593}
1594
1595
1596// Bit twiddling.
1597void Assembler::clz(Register rd, Register rs) {
1598  // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1599  GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1600}
1601
1602
1603void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1604  // Should be called via MacroAssembler::Ins.
1605  // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1606  ASSERT(kArchVariant == kMips32r2);
1607  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1608}
1609
1610
1611void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1612  // Should be called via MacroAssembler::Ext.
1613  // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1614  ASSERT(kArchVariant == kMips32r2);
1615  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1616}
1617
1618
1619//--------Coprocessor-instructions----------------
1620
1621// Load, store, move.
1622void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1623  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1624}
1625
1626
1627void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
1628  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1629  // load to two 32-bit loads.
1630  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1631  FPURegister nextfpreg;
1632  nextfpreg.setcode(fd.code() + 1);
1633  GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
1634}
1635
1636
1637void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1638  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1639}
1640
1641
1642void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
1643  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1644  // store to two 32-bit stores.
1645  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1646  FPURegister nextfpreg;
1647  nextfpreg.setcode(fd.code() + 1);
1648  GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
1649}
1650
1651
1652void Assembler::mtc1(Register rt, FPURegister fs) {
1653  GenInstrRegister(COP1, MTC1, rt, fs, f0);
1654}
1655
1656
1657void Assembler::mfc1(Register rt, FPURegister fs) {
1658  GenInstrRegister(COP1, MFC1, rt, fs, f0);
1659}
1660
1661
1662void Assembler::ctc1(Register rt, FPUControlRegister fs) {
1663  GenInstrRegister(COP1, CTC1, rt, fs);
1664}
1665
1666
1667void Assembler::cfc1(Register rt, FPUControlRegister fs) {
1668  GenInstrRegister(COP1, CFC1, rt, fs);
1669}
1670
1671void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
1672  uint64_t i;
1673  memcpy(&i, &d, 8);
1674
1675  *lo = i & 0xffffffff;
1676  *hi = i >> 32;
1677}
1678
1679// Arithmetic.
1680
1681void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1682  GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
1683}
1684
1685
1686void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1687  GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
1688}
1689
1690
1691void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1692  GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
1693}
1694
1695
1696void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1697  GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
1698}
1699
1700
1701void Assembler::abs_d(FPURegister fd, FPURegister fs) {
1702  GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
1703}
1704
1705
1706void Assembler::mov_d(FPURegister fd, FPURegister fs) {
1707  GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
1708}
1709
1710
1711void Assembler::neg_d(FPURegister fd, FPURegister fs) {
1712  GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
1713}
1714
1715
1716void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
1717  GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
1718}
1719
1720
1721// Conversions.
1722
1723void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
1724  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
1725}
1726
1727
1728void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
1729  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
1730}
1731
1732
1733void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
1734  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
1735}
1736
1737
1738void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
1739  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
1740}
1741
1742
1743void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
1744  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
1745}
1746
1747
1748void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
1749  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
1750}
1751
1752
1753void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
1754  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
1755}
1756
1757
1758void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
1759  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
1760}
1761
1762
1763void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
1764  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
1765}
1766
1767
1768void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
1769  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
1770}
1771
1772
1773void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
1774  ASSERT(kArchVariant == kMips32r2);
1775  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
1776}
1777
1778
1779void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
1780  ASSERT(kArchVariant == kMips32r2);
1781  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
1782}
1783
1784
1785void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
1786  ASSERT(kArchVariant == kMips32r2);
1787  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
1788}
1789
1790
1791void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
1792  ASSERT(kArchVariant == kMips32r2);
1793  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
1794}
1795
1796
1797void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
1798  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
1799}
1800
1801
1802void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
1803  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
1804}
1805
1806
1807void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
1808  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
1809}
1810
1811
1812void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
1813  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
1814}
1815
1816
1817void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
1818  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
1819}
1820
1821
1822void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
1823  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
1824}
1825
1826
1827void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
1828  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
1829}
1830
1831
1832void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
1833  ASSERT(kArchVariant == kMips32r2);
1834  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
1835}
1836
1837
1838void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
1839  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
1840}
1841
1842
1843void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
1844  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
1845}
1846
1847
1848void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
1849  ASSERT(kArchVariant == kMips32r2);
1850  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
1851}
1852
1853
1854void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
1855  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
1856}
1857
1858
1859// Conditions.
1860void Assembler::c(FPUCondition cond, SecondaryField fmt,
1861    FPURegister fs, FPURegister ft, uint16_t cc) {
1862  ASSERT(CpuFeatures::IsEnabled(FPU));
1863  ASSERT(is_uint3(cc));
1864  ASSERT((fmt & ~(31 << kRsShift)) == 0);
1865  Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
1866      | cc << 8 | 3 << 4 | cond;
1867  emit(instr);
1868}
1869
1870
1871void Assembler::fcmp(FPURegister src1, const double src2,
1872      FPUCondition cond) {
1873  ASSERT(CpuFeatures::IsEnabled(FPU));
1874  ASSERT(src2 == 0.0);
1875  mtc1(zero_reg, f14);
1876  cvt_d_w(f14, f14);
1877  c(cond, D, src1, f14, 0);
1878}
1879
1880
1881void Assembler::bc1f(int16_t offset, uint16_t cc) {
1882  ASSERT(CpuFeatures::IsEnabled(FPU));
1883  ASSERT(is_uint3(cc));
1884  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
1885  emit(instr);
1886}
1887
1888
1889void Assembler::bc1t(int16_t offset, uint16_t cc) {
1890  ASSERT(CpuFeatures::IsEnabled(FPU));
1891  ASSERT(is_uint3(cc));
1892  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
1893  emit(instr);
1894}
1895
1896
1897// Debugging.
1898void Assembler::RecordJSReturn() {
1899  positions_recorder()->WriteRecordedPositions();
1900  CheckBuffer();
1901  RecordRelocInfo(RelocInfo::JS_RETURN);
1902}
1903
1904
1905void Assembler::RecordDebugBreakSlot() {
1906  positions_recorder()->WriteRecordedPositions();
1907  CheckBuffer();
1908  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
1909}
1910
1911
1912void Assembler::RecordComment(const char* msg) {
1913  if (FLAG_code_comments) {
1914    CheckBuffer();
1915    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1916  }
1917}
1918
1919
1920int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
1921  Instr instr = instr_at(pc);
1922  ASSERT(IsJ(instr) || IsLui(instr));
1923  if (IsLui(instr)) {
1924    Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
1925    Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
1926    ASSERT(IsOri(instr_ori));
1927    int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
1928    imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
1929    if (imm == kEndOfJumpChain) {
1930      return 0;  // Number of instructions patched.
1931    }
1932    imm += pc_delta;
1933    ASSERT((imm & 3) == 0);
1934
1935    instr_lui &= ~kImm16Mask;
1936    instr_ori &= ~kImm16Mask;
1937
1938    instr_at_put(pc + 0 * Assembler::kInstrSize,
1939                 instr_lui | ((imm >> kLuiShift) & kImm16Mask));
1940    instr_at_put(pc + 1 * Assembler::kInstrSize,
1941                 instr_ori | (imm & kImm16Mask));
1942    return 2;  // Number of instructions patched.
1943  } else {
1944    uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
1945    if ((int32_t)imm28 == kEndOfJumpChain) {
1946      return 0;  // Number of instructions patched.
1947    }
1948    imm28 += pc_delta;
1949    imm28 &= kImm28Mask;
1950    ASSERT((imm28 & 3) == 0);
1951
1952    instr &= ~kImm26Mask;
1953    uint32_t imm26 = imm28 >> 2;
1954    ASSERT(is_uint26(imm26));
1955
1956    instr_at_put(pc, instr | (imm26 & kImm26Mask));
1957    return 1;  // Number of instructions patched.
1958  }
1959}
1960
1961
1962void Assembler::GrowBuffer() {
1963  if (!own_buffer_) FATAL("external code buffer is too small");
1964
1965  // Compute new buffer size.
1966  CodeDesc desc;  // The new buffer.
1967  if (buffer_size_ < 4*KB) {
1968    desc.buffer_size = 4*KB;
1969  } else if (buffer_size_ < 1*MB) {
1970    desc.buffer_size = 2*buffer_size_;
1971  } else {
1972    desc.buffer_size = buffer_size_ + 1*MB;
1973  }
1974  CHECK_GT(desc.buffer_size, 0);  // No overflow.
1975
1976  // Set up new buffer.
1977  desc.buffer = NewArray<byte>(desc.buffer_size);
1978
1979  desc.instr_size = pc_offset();
1980  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1981
1982  // Copy the data.
1983  int pc_delta = desc.buffer - buffer_;
1984  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1985  memmove(desc.buffer, buffer_, desc.instr_size);
1986  memmove(reloc_info_writer.pos() + rc_delta,
1987          reloc_info_writer.pos(), desc.reloc_size);
1988
1989  // Switch buffers.
1990  DeleteArray(buffer_);
1991  buffer_ = desc.buffer;
1992  buffer_size_ = desc.buffer_size;
1993  pc_ += pc_delta;
1994  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1995                               reloc_info_writer.last_pc() + pc_delta);
1996
1997  // Relocate runtime entries.
1998  for (RelocIterator it(desc); !it.done(); it.next()) {
1999    RelocInfo::Mode rmode = it.rinfo()->rmode();
2000    if (rmode == RelocInfo::INTERNAL_REFERENCE) {
2001      byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
2002      RelocateInternalReference(p, pc_delta);
2003    }
2004  }
2005
2006  ASSERT(!overflow());
2007}
2008
2009
2010void Assembler::db(uint8_t data) {
2011  CheckBuffer();
2012  *reinterpret_cast<uint8_t*>(pc_) = data;
2013  pc_ += sizeof(uint8_t);
2014}
2015
2016
2017void Assembler::dd(uint32_t data) {
2018  CheckBuffer();
2019  *reinterpret_cast<uint32_t*>(pc_) = data;
2020  pc_ += sizeof(uint32_t);
2021}
2022
2023
2024void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2025  // We do not try to reuse pool constants.
2026  RelocInfo rinfo(pc_, rmode, data, NULL);
2027  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2028    // Adjust code for new modes.
2029    ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2030           || RelocInfo::IsJSReturn(rmode)
2031           || RelocInfo::IsComment(rmode)
2032           || RelocInfo::IsPosition(rmode));
2033    // These modes do not need an entry in the constant pool.
2034  }
2035  if (rinfo.rmode() != RelocInfo::NONE) {
2036    // Don't record external references unless the heap will be serialized.
2037    if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2038#ifdef DEBUG
2039      if (!Serializer::enabled()) {
2040        Serializer::TooLateToEnableNow();
2041      }
2042#endif
2043      if (!Serializer::enabled() && !emit_debug_code()) {
2044        return;
2045      }
2046    }
2047    ASSERT(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
2048    if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2049      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId(), NULL);
2050      ClearRecordedAstId();
2051      reloc_info_writer.Write(&reloc_info_with_ast_id);
2052    } else {
2053      reloc_info_writer.Write(&rinfo);
2054    }
2055  }
2056}
2057
2058
2059void Assembler::BlockTrampolinePoolFor(int instructions) {
2060  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2061}
2062
2063
2064void Assembler::CheckTrampolinePool() {
2065  // Some small sequences of instructions must not be broken up by the
2066  // insertion of a trampoline pool; such sequences are protected by setting
2067  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2068  // which are both checked here. Also, recursive calls to CheckTrampolinePool
2069  // are blocked by trampoline_pool_blocked_nesting_.
2070  if ((trampoline_pool_blocked_nesting_ > 0) ||
2071      (pc_offset() < no_trampoline_pool_before_)) {
2072    // Emission is currently blocked; make sure we try again as soon as
2073    // possible.
2074    if (trampoline_pool_blocked_nesting_ > 0) {
2075      next_buffer_check_ = pc_offset() + kInstrSize;
2076    } else {
2077      next_buffer_check_ = no_trampoline_pool_before_;
2078    }
2079    return;
2080  }
2081
2082  ASSERT(!trampoline_emitted_);
2083  ASSERT(unbound_labels_count_ >= 0);
2084  if (unbound_labels_count_ > 0) {
2085    // First we emit jump (2 instructions), then we emit trampoline pool.
2086    { BlockTrampolinePoolScope block_trampoline_pool(this);
2087      Label after_pool;
2088      b(&after_pool);
2089      nop();
2090
2091      int pool_start = pc_offset();
2092      for (int i = 0; i < unbound_labels_count_; i++) {
2093        uint32_t imm32;
2094        imm32 = jump_address(&after_pool);
2095        { BlockGrowBufferScope block_buf_growth(this);
2096          // Buffer growth (and relocation) must be blocked for internal
2097          // references until associated instructions are emitted and available
2098          // to be patched.
2099          RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2100          lui(at, (imm32 & kHiMask) >> kLuiShift);
2101          ori(at, at, (imm32 & kImm16Mask));
2102        }
2103        jr(at);
2104        nop();
2105      }
2106      bind(&after_pool);
2107      trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2108
2109      trampoline_emitted_ = true;
2110      // As we are only going to emit trampoline once, we need to prevent any
2111      // further emission.
2112      next_buffer_check_ = kMaxInt;
2113    }
2114  } else {
2115    // Number of branches to unbound label at this point is zero, so we can
2116    // move next buffer check to maximum.
2117    next_buffer_check_ = pc_offset() +
2118        kMaxBranchOffset - kTrampolineSlotsSize * 16;
2119  }
2120  return;
2121}
2122
2123
2124Address Assembler::target_address_at(Address pc) {
2125  Instr instr1 = instr_at(pc);
2126  Instr instr2 = instr_at(pc + kInstrSize);
2127  // Interpret 2 instructions generated by li: lui/ori
2128  if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
2129    // Assemble the 32 bit value.
2130    return reinterpret_cast<Address>(
2131        (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
2132  }
2133
2134  // We should never get here, force a bad address if we do.
2135  UNREACHABLE();
2136  return (Address)0x0;
2137}
2138
2139
2140// MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
2141// qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
2142// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
2143// OS::nan_value() returns a qNaN.
2144void Assembler::QuietNaN(HeapObject* object) {
2145  HeapNumber::cast(object)->set_value(OS::nan_value());
2146}
2147
2148
2149// On Mips, a target address is stored in a lui/ori instruction pair, each
2150// of which load 16 bits of the 32-bit address to a register.
2151// Patching the address must replace both instr, and flush the i-cache.
2152//
2153// There is an optimization below, which emits a nop when the address
2154// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
2155// and possibly removed.
2156void Assembler::set_target_address_at(Address pc, Address target) {
2157  Instr instr2 = instr_at(pc + kInstrSize);
2158  uint32_t rt_code = GetRtField(instr2);
2159  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2160  uint32_t itarget = reinterpret_cast<uint32_t>(target);
2161
2162#ifdef DEBUG
2163  // Check we have the result from a li macro-instruction, using instr pair.
2164  Instr instr1 = instr_at(pc);
2165  CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
2166#endif
2167
2168  // Must use 2 instructions to insure patchable code => just use lui and ori.
2169  // lui rt, upper-16.
2170  // ori rt rt, lower-16.
2171  *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2172  *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
2173
2174  // The following code is an optimization for the common case of Call()
2175  // or Jump() which is load to register, and jump through register:
2176  //     li(t9, address); jalr(t9)    (or jr(t9)).
2177  // If the destination address is in the same 256 MB page as the call, it
2178  // is faster to do a direct jal, or j, rather than jump thru register, since
2179  // that lets the cpu pipeline prefetch the target address. However each
2180  // time the address above is patched, we have to patch the direct jal/j
2181  // instruction, as well as possibly revert to jalr/jr if we now cross a
2182  // 256 MB page. Note that with the jal/j instructions, we do not need to
2183  // load the register, but that code is left, since it makes it easy to
2184  // revert this process. A further optimization could try replacing the
2185  // li sequence with nops.
2186  // This optimization can only be applied if the rt-code from instr2 is the
2187  // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
2188  // mips return. Occasionally this lands after an li().
2189
2190  Instr instr3 = instr_at(pc + 2 * kInstrSize);
2191  uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
2192  bool in_range =
2193             ((uint32_t)(ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
2194  uint32_t target_field = (uint32_t)(itarget & kJumpAddrMask) >> kImmFieldShift;
2195  bool patched_jump = false;
2196
2197#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
2198  // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
2199  // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
2200  // apply this workaround for all cores so we don't have to identify the core.
2201  if (in_range) {
2202    // The 24k core E156 bug has some very specific requirements, we only check
2203    // the most simple one: if the address of the delay slot instruction is in
2204    // the first or last 32 KB of the 256 MB segment.
2205    uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
2206    uint32_t ipc_segment_addr = ipc & segment_mask;
2207    if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
2208      in_range = false;
2209  }
2210#endif
2211
2212  if (IsJalr(instr3)) {
2213    // Try to convert JALR to JAL.
2214    if (in_range && GetRt(instr2) == GetRs(instr3)) {
2215      *(p+2) = JAL | target_field;
2216      patched_jump = true;
2217    }
2218  } else if (IsJr(instr3)) {
2219    // Try to convert JR to J, skip returns (jr ra).
2220    bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
2221    if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
2222      *(p+2) = J | target_field;
2223      patched_jump = true;
2224    }
2225  } else if (IsJal(instr3)) {
2226    if (in_range) {
2227      // We are patching an already converted JAL.
2228      *(p+2) = JAL | target_field;
2229    } else {
2230      // Patch JAL, but out of range, revert to JALR.
2231      // JALR rs reg is the rt reg specified in the ORI instruction.
2232      uint32_t rs_field = GetRt(instr2) << kRsShift;
2233      uint32_t rd_field = ra.code() << kRdShift;  // Return-address (ra) reg.
2234      *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2235    }
2236    patched_jump = true;
2237  } else if (IsJ(instr3)) {
2238    if (in_range) {
2239      // We are patching an already converted J (jump).
2240      *(p+2) = J | target_field;
2241    } else {
2242      // Trying patch J, but out of range, just go back to JR.
2243      // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
2244      uint32_t rs_field = GetRt(instr2) << kRsShift;
2245      *(p+2) = SPECIAL | rs_field | JR;
2246    }
2247    patched_jump = true;
2248  }
2249
2250  CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
2251}
2252
2253void Assembler::JumpLabelToJumpRegister(Address pc) {
2254  // Address pc points to lui/ori instructions.
2255  // Jump to label may follow at pc + 2 * kInstrSize.
2256  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2257#ifdef DEBUG
2258  Instr instr1 = instr_at(pc);
2259#endif
2260  Instr instr2 = instr_at(pc + 1 * kInstrSize);
2261  Instr instr3 = instr_at(pc + 2 * kInstrSize);
2262  bool patched = false;
2263
2264  if (IsJal(instr3)) {
2265    ASSERT(GetOpcodeField(instr1) == LUI);
2266    ASSERT(GetOpcodeField(instr2) == ORI);
2267
2268    uint32_t rs_field = GetRt(instr2) << kRsShift;
2269    uint32_t rd_field = ra.code() << kRdShift;  // Return-address (ra) reg.
2270    *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2271    patched = true;
2272  } else if (IsJ(instr3)) {
2273    ASSERT(GetOpcodeField(instr1) == LUI);
2274    ASSERT(GetOpcodeField(instr2) == ORI);
2275
2276    uint32_t rs_field = GetRt(instr2) << kRsShift;
2277    *(p+2) = SPECIAL | rs_field | JR;
2278    patched = true;
2279  }
2280
2281  if (patched) {
2282      CPU::FlushICache(pc+2, sizeof(Address));
2283  }
2284}
2285
2286} }  // namespace v8::internal
2287
2288#endif  // V8_TARGET_ARCH_MIPS
2289