1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
33// Copyright 2010 the V8 project authors. All rights reserved.
34
35
36#include "v8.h"
37
38#if defined(V8_TARGET_ARCH_MIPS)
39
40#include "mips/assembler-mips-inl.h"
41#include "serialize.h"
42
43namespace v8 {
44namespace internal {
45
46CpuFeatures::CpuFeatures()
47    : supported_(0),
48      enabled_(0),
49      found_by_runtime_probing_(0) {
50}
51
52void CpuFeatures::Probe(bool portable) {
53  // If the compiler is allowed to use fpu then we can use fpu too in our
54  // code generation.
55#if !defined(__mips__)
56  // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
57  if (FLAG_enable_fpu) {
58      supported_ |= 1u << FPU;
59  }
60#else
61  if (portable && Serializer::enabled()) {
62    supported_ |= OS::CpuFeaturesImpliedByPlatform();
63    return;  // No features if we might serialize.
64  }
65
66  if (OS::MipsCpuHasFeature(FPU)) {
67    // This implementation also sets the FPU flags if
68    // runtime detection of FPU returns true.
69    supported_ |= 1u << FPU;
70    found_by_runtime_probing_ |= 1u << FPU;
71  }
72
73  if (!portable) found_by_runtime_probing_ = 0;
74#endif
75}
76
77
78int ToNumber(Register reg) {
79  ASSERT(reg.is_valid());
80  const int kNumbers[] = {
81    0,    // zero_reg
82    1,    // at
83    2,    // v0
84    3,    // v1
85    4,    // a0
86    5,    // a1
87    6,    // a2
88    7,    // a3
89    8,    // t0
90    9,    // t1
91    10,   // t2
92    11,   // t3
93    12,   // t4
94    13,   // t5
95    14,   // t6
96    15,   // t7
97    16,   // s0
98    17,   // s1
99    18,   // s2
100    19,   // s3
101    20,   // s4
102    21,   // s5
103    22,   // s6
104    23,   // s7
105    24,   // t8
106    25,   // t9
107    26,   // k0
108    27,   // k1
109    28,   // gp
110    29,   // sp
111    30,   // s8_fp
112    31,   // ra
113  };
114  return kNumbers[reg.code()];
115}
116
117
118Register ToRegister(int num) {
119  ASSERT(num >= 0 && num < kNumRegisters);
120  const Register kRegisters[] = {
121    zero_reg,
122    at,
123    v0, v1,
124    a0, a1, a2, a3,
125    t0, t1, t2, t3, t4, t5, t6, t7,
126    s0, s1, s2, s3, s4, s5, s6, s7,
127    t8, t9,
128    k0, k1,
129    gp,
130    sp,
131    s8_fp,
132    ra
133  };
134  return kRegisters[num];
135}
136
137
138// -----------------------------------------------------------------------------
139// Implementation of RelocInfo.
140
141const int RelocInfo::kApplyMask = 0;
142
143
144bool RelocInfo::IsCodedSpecially() {
145  // The deserializer needs to know whether a pointer is specially coded.  Being
146  // specially coded on MIPS means that it is a lui/ori instruction, and that is
147  // always the case inside code objects.
148  return true;
149}
150
151
152// Patch the code at the current address with the supplied instructions.
153void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
154  Instr* pc = reinterpret_cast<Instr*>(pc_);
155  Instr* instr = reinterpret_cast<Instr*>(instructions);
156  for (int i = 0; i < instruction_count; i++) {
157    *(pc + i) = *(instr + i);
158  }
159
160  // Indicate that code has changed.
161  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
162}
163
164
165// Patch the code at the current PC with a call to the target address.
166// Additional guard instructions can be added if required.
167void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
168  // Patch the code at the current address with a call to the target.
169  UNIMPLEMENTED_MIPS();
170}
171
172
173// -----------------------------------------------------------------------------
174// Implementation of Operand and MemOperand.
175// See assembler-mips-inl.h for inlined constructors.
176
177Operand::Operand(Handle<Object> handle) {
178  rm_ = no_reg;
179  // Verify all Objects referred by code are NOT in new space.
180  Object* obj = *handle;
181  ASSERT(!HEAP->InNewSpace(obj));
182  if (obj->IsHeapObject()) {
183    imm32_ = reinterpret_cast<intptr_t>(handle.location());
184    rmode_ = RelocInfo::EMBEDDED_OBJECT;
185  } else {
186    // No relocation needed.
187    imm32_ = reinterpret_cast<intptr_t>(obj);
188    rmode_ = RelocInfo::NONE;
189  }
190}
191
192
193MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
194  offset_ = offset;
195}
196
197
198// -----------------------------------------------------------------------------
199// Specific instructions, constants, and masks.
200
201static const int kNegOffset = 0x00008000;
202// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
203// operations as post-increment of sp.
204const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift)
205      | (sp.code() << kRtShift) | (kPointerSize & kImm16Mask);
206// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
207const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift)
208      | (sp.code() << kRtShift) | (-kPointerSize & kImm16Mask);
209// sw(r, MemOperand(sp, 0))
210const Instr kPushRegPattern = SW | (sp.code() << kRsShift)
211      |  (0 & kImm16Mask);
212//  lw(r, MemOperand(sp, 0))
213const Instr kPopRegPattern = LW | (sp.code() << kRsShift)
214      |  (0 & kImm16Mask);
215
216const Instr kLwRegFpOffsetPattern = LW | (s8_fp.code() << kRsShift)
217      |  (0 & kImm16Mask);
218
219const Instr kSwRegFpOffsetPattern = SW | (s8_fp.code() << kRsShift)
220      |  (0 & kImm16Mask);
221
222const Instr kLwRegFpNegOffsetPattern = LW | (s8_fp.code() << kRsShift)
223      |  (kNegOffset & kImm16Mask);
224
225const Instr kSwRegFpNegOffsetPattern = SW | (s8_fp.code() << kRsShift)
226      |  (kNegOffset & kImm16Mask);
227// A mask for the Rt register for push, pop, lw, sw instructions.
228const Instr kRtMask = kRtFieldMask;
229const Instr kLwSwInstrTypeMask = 0xffe00000;
230const Instr kLwSwInstrArgumentMask  = ~kLwSwInstrTypeMask;
231const Instr kLwSwOffsetMask = kImm16Mask;
232
233
234// Spare buffer.
235static const int kMinimalBufferSize = 4 * KB;
236
237
238Assembler::Assembler(void* buffer, int buffer_size)
239    : AssemblerBase(Isolate::Current()),
240      positions_recorder_(this),
241      allow_peephole_optimization_(false) {
242  // BUG(3245989): disable peephole optimization if crankshaft is enabled.
243  allow_peephole_optimization_ = FLAG_peephole_optimization;
244  if (buffer == NULL) {
245    // Do our own buffer management.
246    if (buffer_size <= kMinimalBufferSize) {
247      buffer_size = kMinimalBufferSize;
248
249      if (isolate()->assembler_spare_buffer() != NULL) {
250        buffer = isolate()->assembler_spare_buffer();
251        isolate()->set_assembler_spare_buffer(NULL);
252      }
253    }
254    if (buffer == NULL) {
255      buffer_ = NewArray<byte>(buffer_size);
256    } else {
257      buffer_ = static_cast<byte*>(buffer);
258    }
259    buffer_size_ = buffer_size;
260    own_buffer_ = true;
261
262  } else {
263    // Use externally provided buffer instead.
264    ASSERT(buffer_size > 0);
265    buffer_ = static_cast<byte*>(buffer);
266    buffer_size_ = buffer_size;
267    own_buffer_ = false;
268  }
269
270  // Setup buffer pointers.
271  ASSERT(buffer_ != NULL);
272  pc_ = buffer_;
273  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
274
275  last_trampoline_pool_end_ = 0;
276  no_trampoline_pool_before_ = 0;
277  trampoline_pool_blocked_nesting_ = 0;
278  next_buffer_check_ = kMaxBranchOffset - kTrampolineSize;
279}
280
281
282Assembler::~Assembler() {
283  if (own_buffer_) {
284    if (isolate()->assembler_spare_buffer() == NULL &&
285      buffer_size_ == kMinimalBufferSize) {
286      isolate()->set_assembler_spare_buffer(buffer_);
287    } else {
288      DeleteArray(buffer_);
289    }
290  }
291}
292
293
294void Assembler::GetCode(CodeDesc* desc) {
295  ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
296  // Setup code descriptor.
297  desc->buffer = buffer_;
298  desc->buffer_size = buffer_size_;
299  desc->instr_size = pc_offset();
300  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
301}
302
303
304void Assembler::Align(int m) {
305  ASSERT(m >= 4 && IsPowerOf2(m));
306  while ((pc_offset() & (m - 1)) != 0) {
307    nop();
308  }
309}
310
311
312void Assembler::CodeTargetAlign() {
313  // No advantage to aligning branch/call targets to more than
314  // single instruction, that I am aware of.
315  Align(4);
316}
317
318
319Register Assembler::GetRt(Instr instr) {
320  Register rt;
321  rt.code_ = (instr & kRtMask) >> kRtShift;
322  return rt;
323}
324
325
326bool Assembler::IsPop(Instr instr) {
327  return (instr & ~kRtMask) == kPopRegPattern;
328}
329
330
331bool Assembler::IsPush(Instr instr) {
332  return (instr & ~kRtMask) == kPushRegPattern;
333}
334
335
336bool Assembler::IsSwRegFpOffset(Instr instr) {
337  return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
338}
339
340
341bool Assembler::IsLwRegFpOffset(Instr instr) {
342  return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
343}
344
345
346bool Assembler::IsSwRegFpNegOffset(Instr instr) {
347  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
348          kSwRegFpNegOffsetPattern);
349}
350
351
352bool Assembler::IsLwRegFpNegOffset(Instr instr) {
353  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
354          kLwRegFpNegOffsetPattern);
355}
356
357
358// Labels refer to positions in the (to be) generated code.
359// There are bound, linked, and unused labels.
360//
361// Bound labels refer to known positions in the already
362// generated code. pos() is the position the label refers to.
363//
364// Linked labels refer to unknown positions in the code
365// to be generated; pos() is the position of the last
366// instruction using the label.
367
368// The link chain is terminated by a value in the instruction of -1,
369// which is an otherwise illegal value (branch -1 is inf loop).
370// The instruction 16-bit offset field addresses 32-bit words, but in
371// code is conv to an 18-bit value addressing bytes, hence the -4 value.
372
373const int kEndOfChain = -4;
374
375
376bool Assembler::IsBranch(Instr instr) {
377  uint32_t opcode   = ((instr & kOpcodeMask));
378  uint32_t rt_field = ((instr & kRtFieldMask));
379  uint32_t rs_field = ((instr & kRsFieldMask));
380  uint32_t label_constant = (instr & ~kImm16Mask);
381  // Checks if the instruction is a branch.
382  return opcode == BEQ ||
383      opcode == BNE ||
384      opcode == BLEZ ||
385      opcode == BGTZ ||
386      opcode == BEQL ||
387      opcode == BNEL ||
388      opcode == BLEZL ||
389      opcode == BGTZL||
390      (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
391                            rt_field == BLTZAL || rt_field == BGEZAL)) ||
392      (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
393      label_constant == 0;  // Emitted label const in reg-exp engine.
394}
395
396
397bool Assembler::IsNop(Instr instr, unsigned int type) {
398  // See Assembler::nop(type).
399  ASSERT(type < 32);
400  uint32_t opcode = ((instr & kOpcodeMask));
401  uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
402  uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
403  uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
404
405  // nop(type) == sll(zero_reg, zero_reg, type);
406  // Technically all these values will be 0 but
407  // this makes more sense to the reader.
408
409  bool ret = (opcode == SLL &&
410              rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
411              rs == static_cast<uint32_t>(ToNumber(zero_reg)) &&
412              sa == type);
413
414  return ret;
415}
416
417
418int32_t Assembler::GetBranchOffset(Instr instr) {
419  ASSERT(IsBranch(instr));
420  return ((int16_t)(instr & kImm16Mask)) << 2;
421}
422
423
424bool Assembler::IsLw(Instr instr) {
425  return ((instr & kOpcodeMask) == LW);
426}
427
428
429int16_t Assembler::GetLwOffset(Instr instr) {
430  ASSERT(IsLw(instr));
431  return ((instr & kImm16Mask));
432}
433
434
435Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
436  ASSERT(IsLw(instr));
437
438  // We actually create a new lw instruction based on the original one.
439  Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
440      | (offset & kImm16Mask);
441
442  return temp_instr;
443}
444
445
446bool Assembler::IsSw(Instr instr) {
447  return ((instr & kOpcodeMask) == SW);
448}
449
450
451Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
452  ASSERT(IsSw(instr));
453  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
454}
455
456
457bool Assembler::IsAddImmediate(Instr instr) {
458  return ((instr & kOpcodeMask) == ADDIU);
459}
460
461
462Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
463  ASSERT(IsAddImmediate(instr));
464  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
465}
466
467
468int Assembler::target_at(int32_t pos) {
469  Instr instr = instr_at(pos);
470  if ((instr & ~kImm16Mask) == 0) {
471    // Emitted label constant, not part of a branch.
472    if (instr == 0) {
473       return kEndOfChain;
474     } else {
475       int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
476       return (imm18 + pos);
477     }
478  }
479  // Check we have a branch instruction.
480  ASSERT(IsBranch(instr));
481  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
482  // the compiler uses arithmectic shifts for signed integers.
483  int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
484
485  if (imm18 == kEndOfChain) {
486    // EndOfChain sentinel is returned directly, not relative to pc or pos.
487    return kEndOfChain;
488  } else {
489    return pos + kBranchPCOffset + imm18;
490  }
491}
492
493
494void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
495  Instr instr = instr_at(pos);
496  if ((instr & ~kImm16Mask) == 0) {
497    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
498    // Emitted label constant, not part of a branch.
499    // Make label relative to Code* of generated Code object.
500    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
501    return;
502  }
503
504  ASSERT(IsBranch(instr));
505  int32_t imm18 = target_pos - (pos + kBranchPCOffset);
506  ASSERT((imm18 & 3) == 0);
507
508  instr &= ~kImm16Mask;
509  int32_t imm16 = imm18 >> 2;
510  ASSERT(is_int16(imm16));
511
512  instr_at_put(pos, instr | (imm16 & kImm16Mask));
513}
514
515
516void Assembler::print(Label* L) {
517  if (L->is_unused()) {
518    PrintF("unused label\n");
519  } else if (L->is_bound()) {
520    PrintF("bound label to %d\n", L->pos());
521  } else if (L->is_linked()) {
522    Label l = *L;
523    PrintF("unbound label");
524    while (l.is_linked()) {
525      PrintF("@ %d ", l.pos());
526      Instr instr = instr_at(l.pos());
527      if ((instr & ~kImm16Mask) == 0) {
528        PrintF("value\n");
529      } else {
530        PrintF("%d\n", instr);
531      }
532      next(&l);
533    }
534  } else {
535    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
536  }
537}
538
539
540void Assembler::bind_to(Label* L, int pos) {
541  ASSERT(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
542  while (L->is_linked()) {
543    int32_t fixup_pos = L->pos();
544    int32_t dist = pos - fixup_pos;
545    next(L);  // Call next before overwriting link with target at fixup_pos.
546    if (dist > kMaxBranchOffset) {
547      do {
548        int32_t trampoline_pos = get_trampoline_entry(fixup_pos);
549        ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
550        target_at_put(fixup_pos, trampoline_pos);
551        fixup_pos = trampoline_pos;
552        dist = pos - fixup_pos;
553      } while (dist > kMaxBranchOffset);
554    } else if (dist < -kMaxBranchOffset) {
555      do {
556        int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false);
557        ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset);
558        target_at_put(fixup_pos, trampoline_pos);
559        fixup_pos = trampoline_pos;
560        dist = pos - fixup_pos;
561      } while (dist < -kMaxBranchOffset);
562    };
563    target_at_put(fixup_pos, pos);
564  }
565  L->bind_to(pos);
566
567  // Keep track of the last bound label so we don't eliminate any instructions
568  // before a bound label.
569  if (pos > last_bound_pos_)
570    last_bound_pos_ = pos;
571}
572
573
574void Assembler::link_to(Label* L, Label* appendix) {
575  if (appendix->is_linked()) {
576    if (L->is_linked()) {
577      // Append appendix to L's list.
578      int fixup_pos;
579      int link = L->pos();
580      do {
581        fixup_pos = link;
582        link = target_at(fixup_pos);
583      } while (link > 0);
584      ASSERT(link == kEndOfChain);
585      target_at_put(fixup_pos, appendix->pos());
586    } else {
587      // L is empty, simply use appendix.
588      *L = *appendix;
589    }
590  }
591  appendix->Unuse();  // Appendix should not be used anymore.
592}
593
594
595void Assembler::bind(Label* L) {
596  ASSERT(!L->is_bound());  // Label can only be bound once.
597  bind_to(L, pc_offset());
598}
599
600
601void Assembler::next(Label* L) {
602  ASSERT(L->is_linked());
603  int link = target_at(L->pos());
604  ASSERT(link > 0 || link == kEndOfChain);
605  if (link == kEndOfChain) {
606    L->Unuse();
607  } else if (link > 0) {
608    L->link_to(link);
609  }
610}
611
612
613// We have to use a temporary register for things that can be relocated even
614// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
615// space.  There is no guarantee that the relocated location can be similarly
616// encoded.
617bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
618  return rmode != RelocInfo::NONE;
619}
620
621
622void Assembler::GenInstrRegister(Opcode opcode,
623                                 Register rs,
624                                 Register rt,
625                                 Register rd,
626                                 uint16_t sa,
627                                 SecondaryField func) {
628  ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
629  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
630      | (rd.code() << kRdShift) | (sa << kSaShift) | func;
631  emit(instr);
632}
633
634
635void Assembler::GenInstrRegister(Opcode opcode,
636                                 Register rs,
637                                 Register rt,
638                                 uint16_t msb,
639                                 uint16_t lsb,
640                                 SecondaryField func) {
641  ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
642  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
643      | (msb << kRdShift) | (lsb << kSaShift) | func;
644  emit(instr);
645}
646
647
648void Assembler::GenInstrRegister(Opcode opcode,
649                                 SecondaryField fmt,
650                                 FPURegister ft,
651                                 FPURegister fs,
652                                 FPURegister fd,
653                                 SecondaryField func) {
654  ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
655  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
656  Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
657      | (fd.code() << kFdShift) | func;
658  emit(instr);
659}
660
661
662void Assembler::GenInstrRegister(Opcode opcode,
663                                 SecondaryField fmt,
664                                 Register rt,
665                                 FPURegister fs,
666                                 FPURegister fd,
667                                 SecondaryField func) {
668  ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
669  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
670  Instr instr = opcode | fmt | (rt.code() << kRtShift)
671      | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
672  emit(instr);
673}
674
675
676void Assembler::GenInstrRegister(Opcode opcode,
677                                 SecondaryField fmt,
678                                 Register rt,
679                                 FPUControlRegister fs,
680                                 SecondaryField func) {
681  ASSERT(fs.is_valid() && rt.is_valid());
682  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
683  Instr instr =
684      opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
685  emit(instr);
686}
687
688
689// Instructions with immediate value.
690// Registers are in the order of the instruction encoding, from left to right.
691void Assembler::GenInstrImmediate(Opcode opcode,
692                                  Register rs,
693                                  Register rt,
694                                  int32_t j) {
695  ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
696  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
697      | (j & kImm16Mask);
698  emit(instr);
699}
700
701
702void Assembler::GenInstrImmediate(Opcode opcode,
703                                  Register rs,
704                                  SecondaryField SF,
705                                  int32_t j) {
706  ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
707  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
708  emit(instr);
709}
710
711
712void Assembler::GenInstrImmediate(Opcode opcode,
713                                  Register rs,
714                                  FPURegister ft,
715                                  int32_t j) {
716  ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
717  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
718  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
719      | (j & kImm16Mask);
720  emit(instr);
721}
722
723
724// Registers are in the order of the instruction encoding, from left to right.
725void Assembler::GenInstrJump(Opcode opcode,
726                              uint32_t address) {
727  BlockTrampolinePoolScope block_trampoline_pool(this);
728  ASSERT(is_uint26(address));
729  Instr instr = opcode | address;
730  emit(instr);
731  BlockTrampolinePoolFor(1);  // For associated delay slot.
732}
733
734
735// Returns the next free label entry from the next trampoline pool.
736int32_t Assembler::get_label_entry(int32_t pos, bool next_pool) {
737  int trampoline_count = trampolines_.length();
738  int32_t label_entry = 0;
739  ASSERT(trampoline_count > 0);
740
741  if (next_pool) {
742    for (int i = 0; i < trampoline_count; i++) {
743      if (trampolines_[i].start() > pos) {
744       label_entry = trampolines_[i].take_label();
745       break;
746      }
747    }
748  } else {  //  Caller needs a label entry from the previous pool.
749    for (int i = trampoline_count-1; i >= 0; i--) {
750      if (trampolines_[i].end() < pos) {
751       label_entry = trampolines_[i].take_label();
752       break;
753      }
754    }
755  }
756  return label_entry;
757}
758
759
760// Returns the next free trampoline entry from the next trampoline pool.
761int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) {
762  int trampoline_count = trampolines_.length();
763  int32_t trampoline_entry = 0;
764  ASSERT(trampoline_count > 0);
765
766  if (next_pool) {
767    for (int i = 0; i < trampoline_count; i++) {
768      if (trampolines_[i].start() > pos) {
769       trampoline_entry = trampolines_[i].take_slot();
770       break;
771      }
772    }
773  } else {  // Caller needs a trampoline entry from the previous pool.
774    for (int i = trampoline_count-1; i >= 0; i--) {
775      if (trampolines_[i].end() < pos) {
776       trampoline_entry = trampolines_[i].take_slot();
777       break;
778      }
779    }
780  }
781  return trampoline_entry;
782}
783
784
785int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
786  int32_t target_pos;
787  int32_t pc_offset_v = pc_offset();
788
789  if (L->is_bound()) {
790    target_pos = L->pos();
791    int32_t dist = pc_offset_v - target_pos;
792    if (dist > kMaxBranchOffset) {
793      do {
794        int32_t trampoline_pos = get_trampoline_entry(target_pos);
795        ASSERT((trampoline_pos - target_pos) > 0);
796        ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset);
797        target_at_put(trampoline_pos, target_pos);
798        target_pos = trampoline_pos;
799        dist = pc_offset_v - target_pos;
800      } while (dist > kMaxBranchOffset);
801    } else if (dist < -kMaxBranchOffset) {
802      do {
803        int32_t trampoline_pos = get_trampoline_entry(target_pos, false);
804        ASSERT((target_pos - trampoline_pos) > 0);
805        ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset);
806        target_at_put(trampoline_pos, target_pos);
807        target_pos = trampoline_pos;
808        dist = pc_offset_v - target_pos;
809      } while (dist < -kMaxBranchOffset);
810    }
811  } else {
812    if (L->is_linked()) {
813      target_pos = L->pos();  // L's link.
814      int32_t dist = pc_offset_v - target_pos;
815      if (dist > kMaxBranchOffset) {
816        do {
817          int32_t label_pos = get_label_entry(target_pos);
818          ASSERT((label_pos - target_pos) < kMaxBranchOffset);
819          label_at_put(L, label_pos);
820          target_pos = label_pos;
821          dist = pc_offset_v - target_pos;
822        } while (dist > kMaxBranchOffset);
823      } else if (dist < -kMaxBranchOffset) {
824        do {
825          int32_t label_pos = get_label_entry(target_pos, false);
826          ASSERT((label_pos - target_pos) > -kMaxBranchOffset);
827          label_at_put(L, label_pos);
828          target_pos = label_pos;
829          dist = pc_offset_v - target_pos;
830        } while (dist < -kMaxBranchOffset);
831      }
832      L->link_to(pc_offset());
833    } else {
834      L->link_to(pc_offset());
835      return kEndOfChain;
836    }
837  }
838
839  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
840  ASSERT((offset & 3) == 0);
841  ASSERT(is_int16(offset >> 2));
842
843  return offset;
844}
845
846
847void Assembler::label_at_put(Label* L, int at_offset) {
848  int target_pos;
849  if (L->is_bound()) {
850    target_pos = L->pos();
851    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
852  } else {
853    if (L->is_linked()) {
854      target_pos = L->pos();  // L's link.
855      int32_t imm18 = target_pos - at_offset;
856      ASSERT((imm18 & 3) == 0);
857      int32_t imm16 = imm18 >> 2;
858      ASSERT(is_int16(imm16));
859      instr_at_put(at_offset, (imm16 & kImm16Mask));
860    } else {
861      target_pos = kEndOfChain;
862      instr_at_put(at_offset, 0);
863    }
864    L->link_to(at_offset);
865  }
866}
867
868
869//------- Branch and jump instructions --------
870
871void Assembler::b(int16_t offset) {
872  beq(zero_reg, zero_reg, offset);
873}
874
875
876void Assembler::bal(int16_t offset) {
877  positions_recorder()->WriteRecordedPositions();
878  bgezal(zero_reg, offset);
879}
880
881
882void Assembler::beq(Register rs, Register rt, int16_t offset) {
883  BlockTrampolinePoolScope block_trampoline_pool(this);
884  GenInstrImmediate(BEQ, rs, rt, offset);
885  BlockTrampolinePoolFor(1);  // For associated delay slot.
886}
887
888
889void Assembler::bgez(Register rs, int16_t offset) {
890  BlockTrampolinePoolScope block_trampoline_pool(this);
891  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
892  BlockTrampolinePoolFor(1);  // For associated delay slot.
893}
894
895
896void Assembler::bgezal(Register rs, int16_t offset) {
897  BlockTrampolinePoolScope block_trampoline_pool(this);
898  positions_recorder()->WriteRecordedPositions();
899  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
900  BlockTrampolinePoolFor(1);  // For associated delay slot.
901}
902
903
904void Assembler::bgtz(Register rs, int16_t offset) {
905  BlockTrampolinePoolScope block_trampoline_pool(this);
906  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
907  BlockTrampolinePoolFor(1);  // For associated delay slot.
908}
909
910
911void Assembler::blez(Register rs, int16_t offset) {
912  BlockTrampolinePoolScope block_trampoline_pool(this);
913  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
914  BlockTrampolinePoolFor(1);  // For associated delay slot.
915}
916
917
918void Assembler::bltz(Register rs, int16_t offset) {
919  BlockTrampolinePoolScope block_trampoline_pool(this);
920  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
921  BlockTrampolinePoolFor(1);  // For associated delay slot.
922}
923
924
925void Assembler::bltzal(Register rs, int16_t offset) {
926  BlockTrampolinePoolScope block_trampoline_pool(this);
927  positions_recorder()->WriteRecordedPositions();
928  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
929  BlockTrampolinePoolFor(1);  // For associated delay slot.
930}
931
932
933void Assembler::bne(Register rs, Register rt, int16_t offset) {
934  BlockTrampolinePoolScope block_trampoline_pool(this);
935  GenInstrImmediate(BNE, rs, rt, offset);
936  BlockTrampolinePoolFor(1);  // For associated delay slot.
937}
938
939
940void Assembler::j(int32_t target) {
941  ASSERT(is_uint28(target) && ((target & 3) == 0));
942  GenInstrJump(J, target >> 2);
943}
944
945
946void Assembler::jr(Register rs) {
947  BlockTrampolinePoolScope block_trampoline_pool(this);
948  if (rs.is(ra)) {
949    positions_recorder()->WriteRecordedPositions();
950  }
951  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
952  BlockTrampolinePoolFor(1);  // For associated delay slot.
953}
954
955
956void Assembler::jal(int32_t target) {
957  positions_recorder()->WriteRecordedPositions();
958  ASSERT(is_uint28(target) && ((target & 3) == 0));
959  GenInstrJump(JAL, target >> 2);
960}
961
962
963void Assembler::jalr(Register rs, Register rd) {
964  BlockTrampolinePoolScope block_trampoline_pool(this);
965  positions_recorder()->WriteRecordedPositions();
966  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
967  BlockTrampolinePoolFor(1);  // For associated delay slot.
968}
969
970
971//-------Data-processing-instructions---------
972
973// Arithmetic.
974
975void Assembler::addu(Register rd, Register rs, Register rt) {
976  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
977}
978
979
980void Assembler::addiu(Register rd, Register rs, int32_t j) {
981  GenInstrImmediate(ADDIU, rs, rd, j);
982
983  // Eliminate pattern: push(r), pop().
984  //   addiu(sp, sp, Operand(-kPointerSize));
985  //   sw(src, MemOperand(sp, 0);
986  //   addiu(sp, sp, Operand(kPointerSize));
987  // Both instructions can be eliminated.
988  if (can_peephole_optimize(3) &&
989      // Pattern.
990      instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
991      (instr_at(pc_ - 2 * kInstrSize) & ~kRtMask) == kPushRegPattern &&
992      (instr_at(pc_ - 3 * kInstrSize)) == kPushInstruction) {
993    pc_ -= 3 * kInstrSize;
994    if (FLAG_print_peephole_optimization) {
995      PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
996    }
997  }
998
999  // Eliminate pattern: push(ry), pop(rx).
1000  //   addiu(sp, sp, -kPointerSize)
1001  //   sw(ry, MemOperand(sp, 0)
1002  //   lw(rx, MemOperand(sp, 0)
1003  //   addiu(sp, sp, kPointerSize);
1004  // Both instructions can be eliminated if ry = rx.
1005  // If ry != rx, a register copy from ry to rx is inserted
1006  // after eliminating the push and the pop instructions.
1007  if (can_peephole_optimize(4)) {
1008    Instr pre_push_sp_set = instr_at(pc_ - 4 * kInstrSize);
1009    Instr push_instr = instr_at(pc_ - 3 * kInstrSize);
1010    Instr pop_instr = instr_at(pc_ - 2 * kInstrSize);
1011    Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
1012
1013    if (IsPush(push_instr) &&
1014        IsPop(pop_instr) && pre_push_sp_set == kPushInstruction &&
1015        post_pop_sp_set == kPopInstruction) {
1016      if ((pop_instr & kRtMask) != (push_instr & kRtMask)) {
1017        // For consecutive push and pop on different registers,
1018        // we delete both the push & pop and insert a register move.
1019        // push ry, pop rx --> mov rx, ry.
1020        Register reg_pushed, reg_popped;
1021        reg_pushed = GetRt(push_instr);
1022        reg_popped = GetRt(pop_instr);
1023        pc_ -= 4 * kInstrSize;
1024        // Insert a mov instruction, which is better than a pair of push & pop.
1025        or_(reg_popped, reg_pushed, zero_reg);
1026        if (FLAG_print_peephole_optimization) {
1027          PrintF("%x push/pop (diff reg) replaced by a reg move\n",
1028                 pc_offset());
1029        }
1030      } else {
1031        // For consecutive push and pop on the same register,
1032        // both the push and the pop can be deleted.
1033        pc_ -= 4 * kInstrSize;
1034        if (FLAG_print_peephole_optimization) {
1035          PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
1036        }
1037      }
1038    }
1039  }
1040
1041  if (can_peephole_optimize(5)) {
1042    Instr pre_push_sp_set = instr_at(pc_ - 5 * kInstrSize);
1043    Instr mem_write_instr = instr_at(pc_ - 4 * kInstrSize);
1044    Instr lw_instr = instr_at(pc_ - 3 * kInstrSize);
1045    Instr mem_read_instr = instr_at(pc_ - 2 * kInstrSize);
1046    Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
1047
1048    if (IsPush(mem_write_instr) &&
1049        pre_push_sp_set == kPushInstruction &&
1050        IsPop(mem_read_instr) &&
1051        post_pop_sp_set == kPopInstruction) {
1052      if ((IsLwRegFpOffset(lw_instr) ||
1053        IsLwRegFpNegOffset(lw_instr))) {
1054        if ((mem_write_instr & kRtMask) ==
1055              (mem_read_instr & kRtMask)) {
1056          // Pattern: push & pop from/to same register,
1057          // with a fp+offset lw in between.
1058          //
1059          // The following:
1060          // addiu sp, sp, -4
1061          // sw rx, [sp, #0]!
1062          // lw rz, [fp, #-24]
1063          // lw rx, [sp, 0],
1064          // addiu sp, sp, 4
1065          //
1066          // Becomes:
1067          // if(rx == rz)
1068          //   delete all
1069          // else
1070          //   lw rz, [fp, #-24]
1071
1072          if ((mem_write_instr & kRtMask) == (lw_instr & kRtMask)) {
1073            pc_ -= 5 * kInstrSize;
1074          } else {
1075            pc_ -= 5 * kInstrSize;
1076            // Reinsert back the lw rz.
1077            emit(lw_instr);
1078          }
1079          if (FLAG_print_peephole_optimization) {
1080            PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
1081          }
1082        } else {
1083          // Pattern: push & pop from/to different registers
1084          // with a fp + offset lw in between.
1085          //
1086          // The following:
1087          // addiu sp, sp ,-4
1088          // sw rx, [sp, 0]
1089          // lw rz, [fp, #-24]
1090          // lw ry, [sp, 0]
1091          // addiu sp, sp, 4
1092          //
1093          // Becomes:
1094          // if(ry == rz)
1095          //   mov ry, rx;
1096          // else if(rx != rz)
1097          //   lw rz, [fp, #-24]
1098          //   mov ry, rx
1099          // else if((ry != rz) || (rx == rz)) becomes:
1100          //   mov ry, rx
1101          //   lw rz, [fp, #-24]
1102
1103          Register reg_pushed, reg_popped;
1104          if ((mem_read_instr & kRtMask) == (lw_instr & kRtMask)) {
1105            reg_pushed = GetRt(mem_write_instr);
1106            reg_popped = GetRt(mem_read_instr);
1107            pc_ -= 5 * kInstrSize;
1108            or_(reg_popped, reg_pushed, zero_reg);  // Move instruction.
1109          } else if ((mem_write_instr & kRtMask)
1110                                != (lw_instr & kRtMask)) {
1111            reg_pushed = GetRt(mem_write_instr);
1112            reg_popped = GetRt(mem_read_instr);
1113            pc_ -= 5 * kInstrSize;
1114            emit(lw_instr);
1115            or_(reg_popped, reg_pushed, zero_reg);  // Move instruction.
1116          } else if (((mem_read_instr & kRtMask)
1117                                     != (lw_instr & kRtMask)) ||
1118                    ((mem_write_instr & kRtMask)
1119                                     == (lw_instr & kRtMask)) ) {
1120            reg_pushed = GetRt(mem_write_instr);
1121            reg_popped = GetRt(mem_read_instr);
1122            pc_ -= 5 * kInstrSize;
1123            or_(reg_popped, reg_pushed, zero_reg);  // Move instruction.
1124            emit(lw_instr);
1125          }
1126          if (FLAG_print_peephole_optimization) {
1127            PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
1128          }
1129        }
1130      }
1131    }
1132  }
1133}
1134
1135
1136void Assembler::subu(Register rd, Register rs, Register rt) {
1137  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1138}
1139
1140
1141void Assembler::mul(Register rd, Register rs, Register rt) {
1142  GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1143}
1144
1145
1146void Assembler::mult(Register rs, Register rt) {
1147  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1148}
1149
1150
1151void Assembler::multu(Register rs, Register rt) {
1152  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1153}
1154
1155
1156void Assembler::div(Register rs, Register rt) {
1157  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1158}
1159
1160
1161void Assembler::divu(Register rs, Register rt) {
1162  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1163}
1164
1165
1166// Logical.
1167
1168void Assembler::and_(Register rd, Register rs, Register rt) {
1169  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1170}
1171
1172
1173void Assembler::andi(Register rt, Register rs, int32_t j) {
1174  GenInstrImmediate(ANDI, rs, rt, j);
1175}
1176
1177
1178void Assembler::or_(Register rd, Register rs, Register rt) {
1179  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1180}
1181
1182
1183void Assembler::ori(Register rt, Register rs, int32_t j) {
1184  GenInstrImmediate(ORI, rs, rt, j);
1185}
1186
1187
1188void Assembler::xor_(Register rd, Register rs, Register rt) {
1189  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1190}
1191
1192
1193void Assembler::xori(Register rt, Register rs, int32_t j) {
1194  GenInstrImmediate(XORI, rs, rt, j);
1195}
1196
1197
1198void Assembler::nor(Register rd, Register rs, Register rt) {
1199  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1200}
1201
1202
1203// Shifts.
1204void Assembler::sll(Register rd,
1205                    Register rt,
1206                    uint16_t sa,
1207                    bool coming_from_nop) {
1208  // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1209  // generated using the sll instruction. They must be generated using
1210  // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1211  // instructions.
1212  ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1213  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1214}
1215
1216
1217void Assembler::sllv(Register rd, Register rt, Register rs) {
1218  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1219}
1220
1221
1222void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1223  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1224}
1225
1226
1227void Assembler::srlv(Register rd, Register rt, Register rs) {
1228  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1229}
1230
1231
1232void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1233  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1234}
1235
1236
1237void Assembler::srav(Register rd, Register rt, Register rs) {
1238  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1239}
1240
1241
1242void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1243  // Should be called via MacroAssembler::Ror.
1244  ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1245  ASSERT(mips32r2);
1246  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1247      | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1248  emit(instr);
1249}
1250
1251
1252void Assembler::rotrv(Register rd, Register rt, Register rs) {
1253  // Should be called via MacroAssembler::Ror.
1254  ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1255  ASSERT(mips32r2);
1256  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1257     | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1258  emit(instr);
1259}
1260
1261
1262//------------Memory-instructions-------------
1263
1264// Helper for base-reg + offset, when offset is larger than int16.
1265void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1266  ASSERT(!src.rm().is(at));
1267  lui(at, src.offset_ >> kLuiShift);
1268  ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
1269  addu(at, at, src.rm());  // Add base register.
1270}
1271
1272
1273void Assembler::lb(Register rd, const MemOperand& rs) {
1274  if (is_int16(rs.offset_)) {
1275    GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1276  } else {  // Offset > 16 bits, use multiple instructions to load.
1277    LoadRegPlusOffsetToAt(rs);
1278    GenInstrImmediate(LB, at, rd, 0);  // Equiv to lb(rd, MemOperand(at, 0));
1279  }
1280}
1281
1282
1283void Assembler::lbu(Register rd, const MemOperand& rs) {
1284  if (is_int16(rs.offset_)) {
1285    GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1286  } else {  // Offset > 16 bits, use multiple instructions to load.
1287    LoadRegPlusOffsetToAt(rs);
1288    GenInstrImmediate(LBU, at, rd, 0);  // Equiv to lbu(rd, MemOperand(at, 0));
1289  }
1290}
1291
1292
1293void Assembler::lh(Register rd, const MemOperand& rs) {
1294  if (is_int16(rs.offset_)) {
1295    GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1296  } else {  // Offset > 16 bits, use multiple instructions to load.
1297    LoadRegPlusOffsetToAt(rs);
1298    GenInstrImmediate(LH, at, rd, 0);  // Equiv to lh(rd, MemOperand(at, 0));
1299  }
1300}
1301
1302
1303void Assembler::lhu(Register rd, const MemOperand& rs) {
1304  if (is_int16(rs.offset_)) {
1305    GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1306  } else {  // Offset > 16 bits, use multiple instructions to load.
1307    LoadRegPlusOffsetToAt(rs);
1308    GenInstrImmediate(LHU, at, rd, 0);  // Equiv to lhu(rd, MemOperand(at, 0));
1309  }
1310}
1311
1312
1313void Assembler::lw(Register rd, const MemOperand& rs) {
1314  if (is_int16(rs.offset_)) {
1315    GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1316  } else {  // Offset > 16 bits, use multiple instructions to load.
1317    LoadRegPlusOffsetToAt(rs);
1318    GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
1319  }
1320
1321  if (can_peephole_optimize(2)) {
1322    Instr sw_instr = instr_at(pc_ - 2 * kInstrSize);
1323    Instr lw_instr = instr_at(pc_ - 1 * kInstrSize);
1324
1325    if ((IsSwRegFpOffset(sw_instr) &&
1326         IsLwRegFpOffset(lw_instr)) ||
1327       (IsSwRegFpNegOffset(sw_instr) &&
1328         IsLwRegFpNegOffset(lw_instr))) {
1329      if ((lw_instr & kLwSwInstrArgumentMask) ==
1330            (sw_instr & kLwSwInstrArgumentMask)) {
1331        // Pattern: Lw/sw same fp+offset, same register.
1332        //
1333        // The following:
1334        // sw rx, [fp, #-12]
1335        // lw rx, [fp, #-12]
1336        //
1337        // Becomes:
1338        // sw rx, [fp, #-12]
1339
1340        pc_ -= 1 * kInstrSize;
1341        if (FLAG_print_peephole_optimization) {
1342          PrintF("%x sw/lw (fp + same offset), same reg\n", pc_offset());
1343        }
1344      } else if ((lw_instr & kLwSwOffsetMask) ==
1345                 (sw_instr & kLwSwOffsetMask)) {
1346        // Pattern: Lw/sw same fp+offset, different register.
1347        //
1348        // The following:
1349        // sw rx, [fp, #-12]
1350        // lw ry, [fp, #-12]
1351        //
1352        // Becomes:
1353        // sw rx, [fp, #-12]
1354        // mov ry, rx
1355
1356        Register reg_stored, reg_loaded;
1357        reg_stored = GetRt(sw_instr);
1358        reg_loaded = GetRt(lw_instr);
1359        pc_ -= 1 * kInstrSize;
1360        // Insert a mov instruction, which is better than lw.
1361        or_(reg_loaded, reg_stored, zero_reg);  // Move instruction.
1362        if (FLAG_print_peephole_optimization) {
1363          PrintF("%x sw/lw (fp + same offset), diff reg \n", pc_offset());
1364        }
1365      }
1366    }
1367  }
1368}
1369
1370
1371void Assembler::lwl(Register rd, const MemOperand& rs) {
1372  GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1373}
1374
1375
1376void Assembler::lwr(Register rd, const MemOperand& rs) {
1377  GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1378}
1379
1380
1381void Assembler::sb(Register rd, const MemOperand& rs) {
1382  if (is_int16(rs.offset_)) {
1383    GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1384  } else {  // Offset > 16 bits, use multiple instructions to store.
1385    LoadRegPlusOffsetToAt(rs);
1386    GenInstrImmediate(SB, at, rd, 0);  // Equiv to sb(rd, MemOperand(at, 0));
1387  }
1388}
1389
1390
1391void Assembler::sh(Register rd, const MemOperand& rs) {
1392  if (is_int16(rs.offset_)) {
1393    GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1394  } else {  // Offset > 16 bits, use multiple instructions to store.
1395    LoadRegPlusOffsetToAt(rs);
1396    GenInstrImmediate(SH, at, rd, 0);  // Equiv to sh(rd, MemOperand(at, 0));
1397  }
1398}
1399
1400
1401void Assembler::sw(Register rd, const MemOperand& rs) {
1402  if (is_int16(rs.offset_)) {
1403    GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1404  } else {  // Offset > 16 bits, use multiple instructions to store.
1405    LoadRegPlusOffsetToAt(rs);
1406    GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
1407  }
1408
1409  // Eliminate pattern: pop(), push(r).
1410  //     addiu sp, sp, Operand(kPointerSize);
1411  //     addiu sp, sp, Operand(-kPointerSize);
1412  // ->  sw r, MemOpernad(sp, 0);
1413  if (can_peephole_optimize(3) &&
1414     // Pattern.
1415     instr_at(pc_ - 1 * kInstrSize) ==
1416       (kPushRegPattern | (rd.code() << kRtShift)) &&
1417     instr_at(pc_ - 2 * kInstrSize) == kPushInstruction &&
1418     instr_at(pc_ - 3 * kInstrSize) == kPopInstruction) {
1419    pc_ -= 3 * kInstrSize;
1420    GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1421    if (FLAG_print_peephole_optimization) {
1422      PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
1423    }
1424  }
1425}
1426
1427
1428void Assembler::swl(Register rd, const MemOperand& rs) {
1429  GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1430}
1431
1432
1433void Assembler::swr(Register rd, const MemOperand& rs) {
1434  GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1435}
1436
1437
1438void Assembler::lui(Register rd, int32_t j) {
1439  GenInstrImmediate(LUI, zero_reg, rd, j);
1440}
1441
1442
1443//-------------Misc-instructions--------------
1444
1445// Break / Trap instructions.
1446void Assembler::break_(uint32_t code) {
1447  ASSERT((code & ~0xfffff) == 0);
1448  Instr break_instr = SPECIAL | BREAK | (code << 6);
1449  emit(break_instr);
1450}
1451
1452
1453void Assembler::tge(Register rs, Register rt, uint16_t code) {
1454  ASSERT(is_uint10(code));
1455  Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1456      | rt.code() << kRtShift | code << 6;
1457  emit(instr);
1458}
1459
1460
1461void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
1462  ASSERT(is_uint10(code));
1463  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
1464      | rt.code() << kRtShift | code << 6;
1465  emit(instr);
1466}
1467
1468
1469void Assembler::tlt(Register rs, Register rt, uint16_t code) {
1470  ASSERT(is_uint10(code));
1471  Instr instr =
1472      SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1473  emit(instr);
1474}
1475
1476
1477void Assembler::tltu(Register rs, Register rt, uint16_t code) {
1478  ASSERT(is_uint10(code));
1479  Instr instr =
1480      SPECIAL | TLTU | rs.code() << kRsShift
1481      | rt.code() << kRtShift | code << 6;
1482  emit(instr);
1483}
1484
1485
1486void Assembler::teq(Register rs, Register rt, uint16_t code) {
1487  ASSERT(is_uint10(code));
1488  Instr instr =
1489      SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1490  emit(instr);
1491}
1492
1493
1494void Assembler::tne(Register rs, Register rt, uint16_t code) {
1495  ASSERT(is_uint10(code));
1496  Instr instr =
1497      SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1498  emit(instr);
1499}
1500
1501
1502// Move from HI/LO register.
1503
1504void Assembler::mfhi(Register rd) {
1505  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
1506}
1507
1508
1509void Assembler::mflo(Register rd) {
1510  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
1511}
1512
1513
1514// Set on less than instructions.
1515void Assembler::slt(Register rd, Register rs, Register rt) {
1516  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
1517}
1518
1519
1520void Assembler::sltu(Register rd, Register rs, Register rt) {
1521  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
1522}
1523
1524
1525void Assembler::slti(Register rt, Register rs, int32_t j) {
1526  GenInstrImmediate(SLTI, rs, rt, j);
1527}
1528
1529
1530void Assembler::sltiu(Register rt, Register rs, int32_t j) {
1531  GenInstrImmediate(SLTIU, rs, rt, j);
1532}
1533
1534
1535// Conditional move.
1536void Assembler::movz(Register rd, Register rs, Register rt) {
1537  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
1538}
1539
1540
1541void Assembler::movn(Register rd, Register rs, Register rt) {
1542  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
1543}
1544
1545
1546void Assembler::movt(Register rd, Register rs, uint16_t cc) {
1547  Register rt;
1548  rt.code_ = (cc & 0x0003) << 2 | 1;
1549  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1550}
1551
1552
1553void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1554  Register rt;
1555  rt.code_ = (cc & 0x0003) << 2 | 0;
1556  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1557}
1558
1559
1560// Bit twiddling.
1561void Assembler::clz(Register rd, Register rs) {
1562  // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1563  GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1564}
1565
1566
1567void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1568  // Should be called via MacroAssembler::Ins.
1569  // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1570  ASSERT(mips32r2);
1571  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1572}
1573
1574
1575void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1576  // Should be called via MacroAssembler::Ext.
1577  // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1578  ASSERT(mips32r2);
1579  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1580}
1581
1582
1583//--------Coprocessor-instructions----------------
1584
1585// Load, store, move.
1586void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1587  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1588}
1589
1590
1591void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
1592  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1593  // load to two 32-bit loads.
1594  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1595  FPURegister nextfpreg;
1596  nextfpreg.setcode(fd.code() + 1);
1597  GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
1598}
1599
1600
1601void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1602  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1603}
1604
1605
1606void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
1607  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1608  // store to two 32-bit stores.
1609  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1610  FPURegister nextfpreg;
1611  nextfpreg.setcode(fd.code() + 1);
1612  GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
1613}
1614
1615
1616void Assembler::mtc1(Register rt, FPURegister fs) {
1617  GenInstrRegister(COP1, MTC1, rt, fs, f0);
1618}
1619
1620
1621void Assembler::mfc1(Register rt, FPURegister fs) {
1622  GenInstrRegister(COP1, MFC1, rt, fs, f0);
1623}
1624
1625
1626void Assembler::ctc1(Register rt, FPUControlRegister fs) {
1627  GenInstrRegister(COP1, CTC1, rt, fs);
1628}
1629
1630
1631void Assembler::cfc1(Register rt, FPUControlRegister fs) {
1632  GenInstrRegister(COP1, CFC1, rt, fs);
1633}
1634
1635
1636// Arithmetic.
1637
1638void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1639  GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
1640}
1641
1642
1643void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1644  GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
1645}
1646
1647
1648void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1649  GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
1650}
1651
1652
1653void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1654  GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
1655}
1656
1657
1658void Assembler::abs_d(FPURegister fd, FPURegister fs) {
1659  GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
1660}
1661
1662
1663void Assembler::mov_d(FPURegister fd, FPURegister fs) {
1664  GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
1665}
1666
1667
1668void Assembler::neg_d(FPURegister fd, FPURegister fs) {
1669  GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
1670}
1671
1672
1673void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
1674  GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
1675}
1676
1677
1678// Conversions.
1679
1680void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
1681  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
1682}
1683
1684
1685void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
1686  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
1687}
1688
1689
1690void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
1691  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
1692}
1693
1694
1695void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
1696  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
1697}
1698
1699
1700void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
1701  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
1702}
1703
1704
1705void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
1706  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
1707}
1708
1709
1710void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
1711  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
1712}
1713
1714
1715void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
1716  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
1717}
1718
1719
1720void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
1721  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
1722}
1723
1724
1725void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
1726  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
1727}
1728
1729
1730void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
1731  ASSERT(mips32r2);
1732  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
1733}
1734
1735
1736void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
1737  ASSERT(mips32r2);
1738  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
1739}
1740
1741
1742void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
1743  ASSERT(mips32r2);
1744  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
1745}
1746
1747
1748void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
1749  ASSERT(mips32r2);
1750  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
1751}
1752
1753
1754void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
1755  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
1756}
1757
1758
1759void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
1760  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
1761}
1762
1763
1764void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
1765  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
1766}
1767
1768
1769void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
1770  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
1771}
1772
1773
1774void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
1775  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
1776}
1777
1778
1779void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
1780  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
1781}
1782
1783
1784void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
1785  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
1786}
1787
1788
1789void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
1790  ASSERT(mips32r2);
1791  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
1792}
1793
1794
1795void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
1796  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
1797}
1798
1799
1800void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
1801  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
1802}
1803
1804
1805void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
1806  ASSERT(mips32r2);
1807  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
1808}
1809
1810
1811void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
1812  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
1813}
1814
1815
1816// Conditions.
1817void Assembler::c(FPUCondition cond, SecondaryField fmt,
1818    FPURegister fs, FPURegister ft, uint16_t cc) {
1819  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
1820  ASSERT(is_uint3(cc));
1821  ASSERT((fmt & ~(31 << kRsShift)) == 0);
1822  Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
1823      | cc << 8 | 3 << 4 | cond;
1824  emit(instr);
1825}
1826
1827
1828void Assembler::fcmp(FPURegister src1, const double src2,
1829      FPUCondition cond) {
1830  ASSERT(isolate()->cpu_features()->IsSupported(FPU));
1831  ASSERT(src2 == 0.0);
1832  mtc1(zero_reg, f14);
1833  cvt_d_w(f14, f14);
1834  c(cond, D, src1, f14, 0);
1835}
1836
1837
1838void Assembler::bc1f(int16_t offset, uint16_t cc) {
1839  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
1840  ASSERT(is_uint3(cc));
1841  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
1842  emit(instr);
1843}
1844
1845
1846void Assembler::bc1t(int16_t offset, uint16_t cc) {
1847  ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
1848  ASSERT(is_uint3(cc));
1849  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
1850  emit(instr);
1851}
1852
1853
1854// Debugging.
1855void Assembler::RecordJSReturn() {
1856  positions_recorder()->WriteRecordedPositions();
1857  CheckBuffer();
1858  RecordRelocInfo(RelocInfo::JS_RETURN);
1859}
1860
1861
1862void Assembler::RecordDebugBreakSlot() {
1863  positions_recorder()->WriteRecordedPositions();
1864  CheckBuffer();
1865  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
1866}
1867
1868
1869void Assembler::RecordComment(const char* msg) {
1870  if (FLAG_code_comments) {
1871    CheckBuffer();
1872    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1873  }
1874}
1875
1876
1877void Assembler::GrowBuffer() {
1878  if (!own_buffer_) FATAL("external code buffer is too small");
1879
1880  // Compute new buffer size.
1881  CodeDesc desc;  // The new buffer.
1882  if (buffer_size_ < 4*KB) {
1883    desc.buffer_size = 4*KB;
1884  } else if (buffer_size_ < 1*MB) {
1885    desc.buffer_size = 2*buffer_size_;
1886  } else {
1887    desc.buffer_size = buffer_size_ + 1*MB;
1888  }
1889  CHECK_GT(desc.buffer_size, 0);  // No overflow.
1890
1891  // Setup new buffer.
1892  desc.buffer = NewArray<byte>(desc.buffer_size);
1893
1894  desc.instr_size = pc_offset();
1895  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1896
1897  // Copy the data.
1898  int pc_delta = desc.buffer - buffer_;
1899  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1900  memmove(desc.buffer, buffer_, desc.instr_size);
1901  memmove(reloc_info_writer.pos() + rc_delta,
1902          reloc_info_writer.pos(), desc.reloc_size);
1903
1904  // Switch buffers.
1905  DeleteArray(buffer_);
1906  buffer_ = desc.buffer;
1907  buffer_size_ = desc.buffer_size;
1908  pc_ += pc_delta;
1909  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1910                               reloc_info_writer.last_pc() + pc_delta);
1911
1912  // On ia32 and ARM pc relative addressing is used, and we thus need to apply a
1913  // shift by pc_delta. But on MIPS the target address it directly loaded, so
1914  // we do not need to relocate here.
1915
1916  ASSERT(!overflow());
1917}
1918
1919
1920void Assembler::db(uint8_t data) {
1921  CheckBuffer();
1922  *reinterpret_cast<uint8_t*>(pc_) = data;
1923  pc_ += sizeof(uint8_t);
1924}
1925
1926
1927void Assembler::dd(uint32_t data) {
1928  CheckBuffer();
1929  *reinterpret_cast<uint32_t*>(pc_) = data;
1930  pc_ += sizeof(uint32_t);
1931}
1932
1933
1934void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
1935  RelocInfo rinfo(pc_, rmode, data);  // We do not try to reuse pool constants.
1936  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
1937    // Adjust code for new modes.
1938    ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
1939           || RelocInfo::IsJSReturn(rmode)
1940           || RelocInfo::IsComment(rmode)
1941           || RelocInfo::IsPosition(rmode));
1942    // These modes do not need an entry in the constant pool.
1943  }
1944  if (rinfo.rmode() != RelocInfo::NONE) {
1945    // Don't record external references unless the heap will be serialized.
1946    if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
1947        !Serializer::enabled() &&
1948        !FLAG_debug_code) {
1949      return;
1950    }
1951    ASSERT(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
1952    reloc_info_writer.Write(&rinfo);
1953  }
1954}
1955
1956
1957void Assembler::BlockTrampolinePoolFor(int instructions) {
1958  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
1959}
1960
1961
1962void Assembler::CheckTrampolinePool(bool force_emit) {
1963  // Calculate the offset of the next check.
1964  next_buffer_check_ = pc_offset() + kCheckConstInterval;
1965
1966  int dist = pc_offset() - last_trampoline_pool_end_;
1967
1968  if (dist <= kMaxDistBetweenPools && !force_emit) {
1969    return;
1970  }
1971
1972  // Some small sequences of instructions must not be broken up by the
1973  // insertion of a trampoline pool; such sequences are protected by setting
1974  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
1975  // which are both checked here. Also, recursive calls to CheckTrampolinePool
1976  // are blocked by trampoline_pool_blocked_nesting_.
1977  if ((trampoline_pool_blocked_nesting_ > 0) ||
1978      (pc_offset() < no_trampoline_pool_before_)) {
1979    // Emission is currently blocked; make sure we try again as soon as
1980    // possible.
1981    if (trampoline_pool_blocked_nesting_ > 0) {
1982      next_buffer_check_ = pc_offset() + kInstrSize;
1983    } else {
1984      next_buffer_check_ = no_trampoline_pool_before_;
1985    }
1986    return;
1987  }
1988
1989  // First we emit jump (2 instructions), then we emit trampoline pool.
1990  { BlockTrampolinePoolScope block_trampoline_pool(this);
1991    Label after_pool;
1992    b(&after_pool);
1993    nop();
1994
1995    int pool_start = pc_offset();
1996    for (int i = 0; i < kSlotsPerTrampoline; i++) {
1997      b(&after_pool);
1998      nop();
1999    }
2000    for (int i = 0; i < kLabelsPerTrampoline; i++) {
2001      emit(0);
2002    }
2003    last_trampoline_pool_end_ = pc_offset() - kInstrSize;
2004    bind(&after_pool);
2005    trampolines_.Add(Trampoline(pool_start,
2006                                kSlotsPerTrampoline,
2007                                kLabelsPerTrampoline));
2008
2009    // Since a trampoline pool was just emitted,
2010    // move the check offset forward by the standard interval.
2011    next_buffer_check_ = last_trampoline_pool_end_ + kMaxDistBetweenPools;
2012  }
2013  return;
2014}
2015
2016
2017Address Assembler::target_address_at(Address pc) {
2018  Instr instr1 = instr_at(pc);
2019  Instr instr2 = instr_at(pc + kInstrSize);
2020  // Check we have 2 instructions generated by li.
2021  ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
2022         ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI ||
2023                            (instr2 & kOpcodeMask) == ORI ||
2024                            (instr2 & kOpcodeMask) == LUI)));
2025  // Interpret these 2 instructions.
2026  if (instr1 == nopInstr) {
2027    if ((instr2 & kOpcodeMask) == ADDI) {
2028      return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16);
2029    } else if ((instr2 & kOpcodeMask) == ORI) {
2030      return reinterpret_cast<Address>(instr2 & kImm16Mask);
2031    } else if ((instr2 & kOpcodeMask) == LUI) {
2032      return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
2033    }
2034  } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
2035    // 32 bit value.
2036    return reinterpret_cast<Address>(
2037        (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
2038  }
2039
2040  // We should never get here.
2041  UNREACHABLE();
2042  return (Address)0x0;
2043}
2044
2045
2046void Assembler::set_target_address_at(Address pc, Address target) {
2047  // On MIPS we need to patch the code to generate.
2048
2049  // First check we have a li.
2050  Instr instr2 = instr_at(pc + kInstrSize);
2051#ifdef DEBUG
2052  Instr instr1 = instr_at(pc);
2053
2054  // Check we have indeed the result from a li with MustUseReg true.
2055  CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
2056        ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
2057                           (instr2 & kOpcodeMask)== ORI ||
2058                           (instr2 & kOpcodeMask)== LUI)));
2059#endif
2060
2061  uint32_t rt_code = (instr2 & kRtFieldMask);
2062  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2063  uint32_t itarget = reinterpret_cast<uint32_t>(target);
2064
2065  if (is_int16(itarget)) {
2066    // nop.
2067    // addiu rt zero_reg j.
2068    *p = nopInstr;
2069    *(p+1) = ADDIU | rt_code | (itarget & kImm16Mask);
2070  } else if (!(itarget & kHiMask)) {
2071    // nop.
2072    // ori rt zero_reg j.
2073    *p = nopInstr;
2074    *(p+1) = ORI | rt_code | (itarget & kImm16Mask);
2075  } else if (!(itarget & kImm16Mask)) {
2076    // nop.
2077    // lui rt (kHiMask & itarget) >> kLuiShift.
2078    *p = nopInstr;
2079    *(p+1) = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2080  } else {
2081    // lui rt (kHiMask & itarget) >> kLuiShift.
2082    // ori rt rt, (kImm16Mask & itarget).
2083    *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2084    *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
2085  }
2086
2087  CPU::FlushICache(pc, 2 * sizeof(int32_t));
2088}
2089
2090
2091} }  // namespace v8::internal
2092
2093#endif  // V8_TARGET_ARCH_MIPS
2094