assembler-mips.cc revision 589d6979ff2ef66fca2d8fa51404c369ca5e9250
1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
33// Copyright 2011 the V8 project authors. All rights reserved.
34
35
36#include "v8.h"
37
38#if defined(V8_TARGET_ARCH_MIPS)
39
40#include "mips/assembler-mips-inl.h"
41#include "serialize.h"
42
43namespace v8 {
44namespace internal {
45
46#ifdef DEBUG
47bool CpuFeatures::initialized_ = false;
48#endif
49unsigned CpuFeatures::supported_ = 0;
50unsigned CpuFeatures::found_by_runtime_probing_ = 0;
51
52
53// Get the CPU features enabled by the build. For cross compilation the
54// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
55// can be defined to enable FPU instructions when building the
56// snapshot.
57static uint64_t CpuFeaturesImpliedByCompiler() {
58  uint64_t answer = 0;
59#ifdef CAN_USE_FPU_INSTRUCTIONS
60  answer |= 1u << FPU;
61#endif  // def CAN_USE_FPU_INSTRUCTIONS
62
63#ifdef __mips__
64  // If the compiler is allowed to use FPU then we can use FPU too in our code
65  // generation even when generating snapshots.  This won't work for cross
66  // compilation.
67#if(defined(__mips_hard_float) && __mips_hard_float != 0)
68  answer |= 1u << FPU;
69#endif  // defined(__mips_hard_float) && __mips_hard_float != 0
70#endif  // def __mips__
71
72  return answer;
73}
74
75
76void CpuFeatures::Probe() {
77  ASSERT(!initialized_);
78#ifdef DEBUG
79  initialized_ = true;
80#endif
81
82  // Get the features implied by the OS and the compiler settings. This is the
83  // minimal set of features which is also allowed for generated code in the
84  // snapshot.
85  supported_ |= OS::CpuFeaturesImpliedByPlatform();
86  supported_ |= CpuFeaturesImpliedByCompiler();
87
88  if (Serializer::enabled()) {
89    // No probing for features if we might serialize (generate snapshot).
90    return;
91  }
92
93  // If the compiler is allowed to use fpu then we can use fpu too in our
94  // code generation.
95#if !defined(__mips__)
96  // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
97  if (FLAG_enable_fpu) {
98      supported_ |= 1u << FPU;
99  }
100#else
101  // Probe for additional features not already known to be available.
102  if (OS::MipsCpuHasFeature(FPU)) {
103    // This implementation also sets the FPU flags if
104    // runtime detection of FPU returns true.
105    supported_ |= 1u << FPU;
106    found_by_runtime_probing_ |= 1u << FPU;
107  }
108#endif
109}
110
111
112int ToNumber(Register reg) {
113  ASSERT(reg.is_valid());
114  const int kNumbers[] = {
115    0,    // zero_reg
116    1,    // at
117    2,    // v0
118    3,    // v1
119    4,    // a0
120    5,    // a1
121    6,    // a2
122    7,    // a3
123    8,    // t0
124    9,    // t1
125    10,   // t2
126    11,   // t3
127    12,   // t4
128    13,   // t5
129    14,   // t6
130    15,   // t7
131    16,   // s0
132    17,   // s1
133    18,   // s2
134    19,   // s3
135    20,   // s4
136    21,   // s5
137    22,   // s6
138    23,   // s7
139    24,   // t8
140    25,   // t9
141    26,   // k0
142    27,   // k1
143    28,   // gp
144    29,   // sp
145    30,   // s8_fp
146    31,   // ra
147  };
148  return kNumbers[reg.code()];
149}
150
151
152Register ToRegister(int num) {
153  ASSERT(num >= 0 && num < kNumRegisters);
154  const Register kRegisters[] = {
155    zero_reg,
156    at,
157    v0, v1,
158    a0, a1, a2, a3,
159    t0, t1, t2, t3, t4, t5, t6, t7,
160    s0, s1, s2, s3, s4, s5, s6, s7,
161    t8, t9,
162    k0, k1,
163    gp,
164    sp,
165    s8_fp,
166    ra
167  };
168  return kRegisters[num];
169}
170
171
172// -----------------------------------------------------------------------------
173// Implementation of RelocInfo.
174
175const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
176                                  1 << RelocInfo::INTERNAL_REFERENCE;
177
178
179bool RelocInfo::IsCodedSpecially() {
180  // The deserializer needs to know whether a pointer is specially coded.  Being
181  // specially coded on MIPS means that it is a lui/ori instruction, and that is
182  // always the case inside code objects.
183  return true;
184}
185
186
187// Patch the code at the current address with the supplied instructions.
188void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
189  Instr* pc = reinterpret_cast<Instr*>(pc_);
190  Instr* instr = reinterpret_cast<Instr*>(instructions);
191  for (int i = 0; i < instruction_count; i++) {
192    *(pc + i) = *(instr + i);
193  }
194
195  // Indicate that code has changed.
196  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
197}
198
199
200// Patch the code at the current PC with a call to the target address.
201// Additional guard instructions can be added if required.
202void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
203  // Patch the code at the current address with a call to the target.
204  UNIMPLEMENTED_MIPS();
205}
206
207
208// -----------------------------------------------------------------------------
209// Implementation of Operand and MemOperand.
210// See assembler-mips-inl.h for inlined constructors.
211
212Operand::Operand(Handle<Object> handle) {
213  rm_ = no_reg;
214  // Verify all Objects referred by code are NOT in new space.
215  Object* obj = *handle;
216  ASSERT(!HEAP->InNewSpace(obj));
217  if (obj->IsHeapObject()) {
218    imm32_ = reinterpret_cast<intptr_t>(handle.location());
219    rmode_ = RelocInfo::EMBEDDED_OBJECT;
220  } else {
221    // No relocation needed.
222    imm32_ = reinterpret_cast<intptr_t>(obj);
223    rmode_ = RelocInfo::NONE;
224  }
225}
226
227
228MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
229  offset_ = offset;
230}
231
232
233// -----------------------------------------------------------------------------
234// Specific instructions, constants, and masks.
235
236static const int kNegOffset = 0x00008000;
237// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
238// operations as post-increment of sp.
239const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift)
240      | (sp.code() << kRtShift) | (kPointerSize & kImm16Mask);
241// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
242const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift)
243      | (sp.code() << kRtShift) | (-kPointerSize & kImm16Mask);
244// sw(r, MemOperand(sp, 0))
245const Instr kPushRegPattern = SW | (sp.code() << kRsShift)
246      |  (0 & kImm16Mask);
247//  lw(r, MemOperand(sp, 0))
248const Instr kPopRegPattern = LW | (sp.code() << kRsShift)
249      |  (0 & kImm16Mask);
250
251const Instr kLwRegFpOffsetPattern = LW | (s8_fp.code() << kRsShift)
252      |  (0 & kImm16Mask);
253
254const Instr kSwRegFpOffsetPattern = SW | (s8_fp.code() << kRsShift)
255      |  (0 & kImm16Mask);
256
257const Instr kLwRegFpNegOffsetPattern = LW | (s8_fp.code() << kRsShift)
258      |  (kNegOffset & kImm16Mask);
259
260const Instr kSwRegFpNegOffsetPattern = SW | (s8_fp.code() << kRsShift)
261      |  (kNegOffset & kImm16Mask);
262// A mask for the Rt register for push, pop, lw, sw instructions.
263const Instr kRtMask = kRtFieldMask;
264const Instr kLwSwInstrTypeMask = 0xffe00000;
265const Instr kLwSwInstrArgumentMask  = ~kLwSwInstrTypeMask;
266const Instr kLwSwOffsetMask = kImm16Mask;
267
268
269// Spare buffer.
270static const int kMinimalBufferSize = 4 * KB;
271
272
273Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
274    : AssemblerBase(arg_isolate),
275      positions_recorder_(this),
276      emit_debug_code_(FLAG_debug_code) {
277  if (buffer == NULL) {
278    // Do our own buffer management.
279    if (buffer_size <= kMinimalBufferSize) {
280      buffer_size = kMinimalBufferSize;
281
282      if (isolate()->assembler_spare_buffer() != NULL) {
283        buffer = isolate()->assembler_spare_buffer();
284        isolate()->set_assembler_spare_buffer(NULL);
285      }
286    }
287    if (buffer == NULL) {
288      buffer_ = NewArray<byte>(buffer_size);
289    } else {
290      buffer_ = static_cast<byte*>(buffer);
291    }
292    buffer_size_ = buffer_size;
293    own_buffer_ = true;
294
295  } else {
296    // Use externally provided buffer instead.
297    ASSERT(buffer_size > 0);
298    buffer_ = static_cast<byte*>(buffer);
299    buffer_size_ = buffer_size;
300    own_buffer_ = false;
301  }
302
303  // Setup buffer pointers.
304  ASSERT(buffer_ != NULL);
305  pc_ = buffer_;
306  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
307
308  last_trampoline_pool_end_ = 0;
309  no_trampoline_pool_before_ = 0;
310  trampoline_pool_blocked_nesting_ = 0;
311  // We leave space (16 * kTrampolineSlotsSize)
312  // for BlockTrampolinePoolScope buffer.
313  next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
314  internal_trampoline_exception_ = false;
315  last_bound_pos_ = 0;
316
317  trampoline_emitted_ = false;
318  unbound_labels_count_ = 0;
319  block_buffer_growth_ = false;
320
321  ClearRecordedAstId();
322}
323
324
325Assembler::~Assembler() {
326  if (own_buffer_) {
327    if (isolate()->assembler_spare_buffer() == NULL &&
328        buffer_size_ == kMinimalBufferSize) {
329      isolate()->set_assembler_spare_buffer(buffer_);
330    } else {
331      DeleteArray(buffer_);
332    }
333  }
334}
335
336
337void Assembler::GetCode(CodeDesc* desc) {
338  ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
339  // Setup code descriptor.
340  desc->buffer = buffer_;
341  desc->buffer_size = buffer_size_;
342  desc->instr_size = pc_offset();
343  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
344}
345
346
347void Assembler::Align(int m) {
348  ASSERT(m >= 4 && IsPowerOf2(m));
349  while ((pc_offset() & (m - 1)) != 0) {
350    nop();
351  }
352}
353
354
355void Assembler::CodeTargetAlign() {
356  // No advantage to aligning branch/call targets to more than
357  // single instruction, that I am aware of.
358  Align(4);
359}
360
361
362Register Assembler::GetRtReg(Instr instr) {
363  Register rt;
364  rt.code_ = (instr & kRtFieldMask) >> kRtShift;
365  return rt;
366}
367
368
369Register Assembler::GetRsReg(Instr instr) {
370  Register rs;
371  rs.code_ = (instr & kRsFieldMask) >> kRsShift;
372  return rs;
373}
374
375
376Register Assembler::GetRdReg(Instr instr) {
377  Register rd;
378  rd.code_ = (instr & kRdFieldMask) >> kRdShift;
379  return rd;
380}
381
382
383uint32_t Assembler::GetRt(Instr instr) {
384  return (instr & kRtFieldMask) >> kRtShift;
385}
386
387
388uint32_t Assembler::GetRtField(Instr instr) {
389  return instr & kRtFieldMask;
390}
391
392
393uint32_t Assembler::GetRs(Instr instr) {
394  return (instr & kRsFieldMask) >> kRsShift;
395}
396
397
398uint32_t Assembler::GetRsField(Instr instr) {
399  return instr & kRsFieldMask;
400}
401
402
403uint32_t Assembler::GetRd(Instr instr) {
404  return  (instr & kRdFieldMask) >> kRdShift;
405}
406
407
408uint32_t Assembler::GetRdField(Instr instr) {
409  return  instr & kRdFieldMask;
410}
411
412
413uint32_t Assembler::GetSa(Instr instr) {
414  return (instr & kSaFieldMask) >> kSaShift;
415}
416
417
418uint32_t Assembler::GetSaField(Instr instr) {
419  return instr & kSaFieldMask;
420}
421
422
423uint32_t Assembler::GetOpcodeField(Instr instr) {
424  return instr & kOpcodeMask;
425}
426
427
428uint32_t Assembler::GetFunction(Instr instr) {
429  return (instr & kFunctionFieldMask) >> kFunctionShift;
430}
431
432
433uint32_t Assembler::GetFunctionField(Instr instr) {
434  return instr & kFunctionFieldMask;
435}
436
437
438uint32_t Assembler::GetImmediate16(Instr instr) {
439  return instr & kImm16Mask;
440}
441
442
443uint32_t Assembler::GetLabelConst(Instr instr) {
444  return instr & ~kImm16Mask;
445}
446
447
448bool Assembler::IsPop(Instr instr) {
449  return (instr & ~kRtMask) == kPopRegPattern;
450}
451
452
453bool Assembler::IsPush(Instr instr) {
454  return (instr & ~kRtMask) == kPushRegPattern;
455}
456
457
458bool Assembler::IsSwRegFpOffset(Instr instr) {
459  return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
460}
461
462
463bool Assembler::IsLwRegFpOffset(Instr instr) {
464  return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
465}
466
467
468bool Assembler::IsSwRegFpNegOffset(Instr instr) {
469  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
470          kSwRegFpNegOffsetPattern);
471}
472
473
474bool Assembler::IsLwRegFpNegOffset(Instr instr) {
475  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
476          kLwRegFpNegOffsetPattern);
477}
478
479
480// Labels refer to positions in the (to be) generated code.
481// There are bound, linked, and unused labels.
482//
483// Bound labels refer to known positions in the already
484// generated code. pos() is the position the label refers to.
485//
486// Linked labels refer to unknown positions in the code
487// to be generated; pos() is the position of the last
488// instruction using the label.
489
490// The link chain is terminated by a value in the instruction of -1,
491// which is an otherwise illegal value (branch -1 is inf loop).
492// The instruction 16-bit offset field addresses 32-bit words, but in
493// code is conv to an 18-bit value addressing bytes, hence the -4 value.
494
495const int kEndOfChain = -4;
496// Determines the end of the Jump chain (a subset of the label link chain).
497const int kEndOfJumpChain = 0;
498
499
500bool Assembler::IsBranch(Instr instr) {
501  uint32_t opcode   = GetOpcodeField(instr);
502  uint32_t rt_field = GetRtField(instr);
503  uint32_t rs_field = GetRsField(instr);
504  uint32_t label_constant = GetLabelConst(instr);
505  // Checks if the instruction is a branch.
506  return opcode == BEQ ||
507      opcode == BNE ||
508      opcode == BLEZ ||
509      opcode == BGTZ ||
510      opcode == BEQL ||
511      opcode == BNEL ||
512      opcode == BLEZL ||
513      opcode == BGTZL ||
514      (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
515                            rt_field == BLTZAL || rt_field == BGEZAL)) ||
516      (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
517      label_constant == 0;  // Emitted label const in reg-exp engine.
518}
519
520
521bool Assembler::IsBeq(Instr instr) {
522  return GetOpcodeField(instr) == BEQ;
523}
524
525
526bool Assembler::IsBne(Instr instr) {
527  return GetOpcodeField(instr) == BNE;
528}
529
530
531bool Assembler::IsJump(Instr instr) {
532  uint32_t opcode   = GetOpcodeField(instr);
533  uint32_t rt_field = GetRtField(instr);
534  uint32_t rd_field = GetRdField(instr);
535  uint32_t function_field = GetFunctionField(instr);
536  // Checks if the instruction is a jump.
537  return opcode == J || opcode == JAL ||
538      (opcode == SPECIAL && rt_field == 0 &&
539      ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
540}
541
542
543bool Assembler::IsJ(Instr instr) {
544  uint32_t opcode = GetOpcodeField(instr);
545  // Checks if the instruction is a jump.
546  return opcode == J;
547}
548
549
550bool Assembler::IsJal(Instr instr) {
551  return GetOpcodeField(instr) == JAL;
552}
553
554bool Assembler::IsJr(Instr instr) {
555  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
556}
557
558bool Assembler::IsJalr(Instr instr) {
559  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
560}
561
562
563bool Assembler::IsLui(Instr instr) {
564  uint32_t opcode = GetOpcodeField(instr);
565  // Checks if the instruction is a load upper immediate.
566  return opcode == LUI;
567}
568
569
570bool Assembler::IsOri(Instr instr) {
571  uint32_t opcode = GetOpcodeField(instr);
572  // Checks if the instruction is a load upper immediate.
573  return opcode == ORI;
574}
575
576
577bool Assembler::IsNop(Instr instr, unsigned int type) {
578  // See Assembler::nop(type).
579  ASSERT(type < 32);
580  uint32_t opcode = GetOpcodeField(instr);
581  uint32_t rt = GetRt(instr);
582  uint32_t rs = GetRs(instr);
583  uint32_t sa = GetSa(instr);
584
585  // nop(type) == sll(zero_reg, zero_reg, type);
586  // Technically all these values will be 0 but
587  // this makes more sense to the reader.
588
589  bool ret = (opcode == SLL &&
590              rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
591              rs == static_cast<uint32_t>(ToNumber(zero_reg)) &&
592              sa == type);
593
594  return ret;
595}
596
597
598int32_t Assembler::GetBranchOffset(Instr instr) {
599  ASSERT(IsBranch(instr));
600  return ((int16_t)(instr & kImm16Mask)) << 2;
601}
602
603
604bool Assembler::IsLw(Instr instr) {
605  return ((instr & kOpcodeMask) == LW);
606}
607
608
609int16_t Assembler::GetLwOffset(Instr instr) {
610  ASSERT(IsLw(instr));
611  return ((instr & kImm16Mask));
612}
613
614
615Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
616  ASSERT(IsLw(instr));
617
618  // We actually create a new lw instruction based on the original one.
619  Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
620      | (offset & kImm16Mask);
621
622  return temp_instr;
623}
624
625
626bool Assembler::IsSw(Instr instr) {
627  return ((instr & kOpcodeMask) == SW);
628}
629
630
631Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
632  ASSERT(IsSw(instr));
633  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
634}
635
636
637bool Assembler::IsAddImmediate(Instr instr) {
638  return ((instr & kOpcodeMask) == ADDIU);
639}
640
641
642Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
643  ASSERT(IsAddImmediate(instr));
644  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
645}
646
647
648bool Assembler::IsAndImmediate(Instr instr) {
649  return GetOpcodeField(instr) == ANDI;
650}
651
652
653int Assembler::target_at(int32_t pos) {
654  Instr instr = instr_at(pos);
655  if ((instr & ~kImm16Mask) == 0) {
656    // Emitted label constant, not part of a branch.
657    if (instr == 0) {
658       return kEndOfChain;
659     } else {
660       int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
661       return (imm18 + pos);
662     }
663  }
664  // Check we have a branch or jump instruction.
665  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
666  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
667  // the compiler uses arithmectic shifts for signed integers.
668  if (IsBranch(instr)) {
669    int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
670
671    if (imm18 == kEndOfChain) {
672      // EndOfChain sentinel is returned directly, not relative to pc or pos.
673      return kEndOfChain;
674    } else {
675      return pos + kBranchPCOffset + imm18;
676    }
677  } else if (IsLui(instr)) {
678    Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
679    Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
680    ASSERT(IsOri(instr_ori));
681    int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
682    imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
683
684    if (imm == kEndOfJumpChain) {
685      // EndOfChain sentinel is returned directly, not relative to pc or pos.
686      return kEndOfChain;
687    } else {
688      uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
689      int32_t delta = instr_address - imm;
690      ASSERT(pos > delta);
691      return pos - delta;
692    }
693  } else {
694    int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
695    if (imm28 == kEndOfJumpChain) {
696      // EndOfChain sentinel is returned directly, not relative to pc or pos.
697      return kEndOfChain;
698    } else {
699      uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
700      instr_address &= kImm28Mask;
701      int32_t delta = instr_address - imm28;
702      ASSERT(pos > delta);
703      return pos - delta;
704    }
705  }
706}
707
708
709void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
710  Instr instr = instr_at(pos);
711  if ((instr & ~kImm16Mask) == 0) {
712    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
713    // Emitted label constant, not part of a branch.
714    // Make label relative to Code* of generated Code object.
715    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
716    return;
717  }
718
719  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
720  if (IsBranch(instr)) {
721    int32_t imm18 = target_pos - (pos + kBranchPCOffset);
722    ASSERT((imm18 & 3) == 0);
723
724    instr &= ~kImm16Mask;
725    int32_t imm16 = imm18 >> 2;
726    ASSERT(is_int16(imm16));
727
728    instr_at_put(pos, instr | (imm16 & kImm16Mask));
729  } else if (IsLui(instr)) {
730    Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
731    Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
732    ASSERT(IsOri(instr_ori));
733    uint32_t imm = (uint32_t)buffer_ + target_pos;
734    ASSERT((imm & 3) == 0);
735
736    instr_lui &= ~kImm16Mask;
737    instr_ori &= ~kImm16Mask;
738
739    instr_at_put(pos + 0 * Assembler::kInstrSize,
740                 instr_lui | ((imm & kHiMask) >> kLuiShift));
741    instr_at_put(pos + 1 * Assembler::kInstrSize,
742                 instr_ori | (imm & kImm16Mask));
743  } else {
744    uint32_t imm28 = (uint32_t)buffer_ + target_pos;
745    imm28 &= kImm28Mask;
746    ASSERT((imm28 & 3) == 0);
747
748    instr &= ~kImm26Mask;
749    uint32_t imm26 = imm28 >> 2;
750    ASSERT(is_uint26(imm26));
751
752    instr_at_put(pos, instr | (imm26 & kImm26Mask));
753  }
754}
755
756
757void Assembler::print(Label* L) {
758  if (L->is_unused()) {
759    PrintF("unused label\n");
760  } else if (L->is_bound()) {
761    PrintF("bound label to %d\n", L->pos());
762  } else if (L->is_linked()) {
763    Label l = *L;
764    PrintF("unbound label");
765    while (l.is_linked()) {
766      PrintF("@ %d ", l.pos());
767      Instr instr = instr_at(l.pos());
768      if ((instr & ~kImm16Mask) == 0) {
769        PrintF("value\n");
770      } else {
771        PrintF("%d\n", instr);
772      }
773      next(&l);
774    }
775  } else {
776    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
777  }
778}
779
780
781void Assembler::bind_to(Label* L, int pos) {
782  ASSERT(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
783  int32_t trampoline_pos = kInvalidSlotPos;
784  if (L->is_linked() && !trampoline_emitted_) {
785    unbound_labels_count_--;
786    next_buffer_check_ += kTrampolineSlotsSize;
787  }
788
789  while (L->is_linked()) {
790    int32_t fixup_pos = L->pos();
791    int32_t dist = pos - fixup_pos;
792    next(L);  // Call next before overwriting link with target at fixup_pos.
793    Instr instr = instr_at(fixup_pos);
794    if (IsBranch(instr)) {
795      if (dist > kMaxBranchOffset) {
796        if (trampoline_pos == kInvalidSlotPos) {
797          trampoline_pos = get_trampoline_entry(fixup_pos);
798          CHECK(trampoline_pos != kInvalidSlotPos);
799        }
800        ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
801        target_at_put(fixup_pos, trampoline_pos);
802        fixup_pos = trampoline_pos;
803        dist = pos - fixup_pos;
804      }
805      target_at_put(fixup_pos, pos);
806    } else {
807      ASSERT(IsJ(instr) || IsLui(instr));
808      target_at_put(fixup_pos, pos);
809    }
810  }
811  L->bind_to(pos);
812
813  // Keep track of the last bound label so we don't eliminate any instructions
814  // before a bound label.
815  if (pos > last_bound_pos_)
816    last_bound_pos_ = pos;
817}
818
819
820void Assembler::bind(Label* L) {
821  ASSERT(!L->is_bound());  // Label can only be bound once.
822  bind_to(L, pc_offset());
823}
824
825
826void Assembler::next(Label* L) {
827  ASSERT(L->is_linked());
828  int link = target_at(L->pos());
829  if (link == kEndOfChain) {
830    L->Unuse();
831  } else {
832    ASSERT(link >= 0);
833    L->link_to(link);
834  }
835}
836
837bool Assembler::is_near(Label* L) {
838  if (L->is_bound()) {
839    return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
840  }
841  return false;
842}
843
844// We have to use a temporary register for things that can be relocated even
845// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
846// space.  There is no guarantee that the relocated location can be similarly
847// encoded.
848bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
849  return rmode != RelocInfo::NONE;
850}
851
852
853void Assembler::GenInstrRegister(Opcode opcode,
854                                 Register rs,
855                                 Register rt,
856                                 Register rd,
857                                 uint16_t sa,
858                                 SecondaryField func) {
859  ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
860  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
861      | (rd.code() << kRdShift) | (sa << kSaShift) | func;
862  emit(instr);
863}
864
865
866void Assembler::GenInstrRegister(Opcode opcode,
867                                 Register rs,
868                                 Register rt,
869                                 uint16_t msb,
870                                 uint16_t lsb,
871                                 SecondaryField func) {
872  ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
873  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
874      | (msb << kRdShift) | (lsb << kSaShift) | func;
875  emit(instr);
876}
877
878
879void Assembler::GenInstrRegister(Opcode opcode,
880                                 SecondaryField fmt,
881                                 FPURegister ft,
882                                 FPURegister fs,
883                                 FPURegister fd,
884                                 SecondaryField func) {
885  ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
886  ASSERT(CpuFeatures::IsEnabled(FPU));
887  Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
888      | (fd.code() << kFdShift) | func;
889  emit(instr);
890}
891
892
893void Assembler::GenInstrRegister(Opcode opcode,
894                                 SecondaryField fmt,
895                                 Register rt,
896                                 FPURegister fs,
897                                 FPURegister fd,
898                                 SecondaryField func) {
899  ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
900  ASSERT(CpuFeatures::IsEnabled(FPU));
901  Instr instr = opcode | fmt | (rt.code() << kRtShift)
902      | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
903  emit(instr);
904}
905
906
907void Assembler::GenInstrRegister(Opcode opcode,
908                                 SecondaryField fmt,
909                                 Register rt,
910                                 FPUControlRegister fs,
911                                 SecondaryField func) {
912  ASSERT(fs.is_valid() && rt.is_valid());
913  ASSERT(CpuFeatures::IsEnabled(FPU));
914  Instr instr =
915      opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
916  emit(instr);
917}
918
919
920// Instructions with immediate value.
921// Registers are in the order of the instruction encoding, from left to right.
922void Assembler::GenInstrImmediate(Opcode opcode,
923                                  Register rs,
924                                  Register rt,
925                                  int32_t j) {
926  ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
927  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
928      | (j & kImm16Mask);
929  emit(instr);
930}
931
932
933void Assembler::GenInstrImmediate(Opcode opcode,
934                                  Register rs,
935                                  SecondaryField SF,
936                                  int32_t j) {
937  ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
938  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
939  emit(instr);
940}
941
942
943void Assembler::GenInstrImmediate(Opcode opcode,
944                                  Register rs,
945                                  FPURegister ft,
946                                  int32_t j) {
947  ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
948  ASSERT(CpuFeatures::IsEnabled(FPU));
949  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
950      | (j & kImm16Mask);
951  emit(instr);
952}
953
954
955void Assembler::GenInstrJump(Opcode opcode,
956                             uint32_t address) {
957  BlockTrampolinePoolScope block_trampoline_pool(this);
958  ASSERT(is_uint26(address));
959  Instr instr = opcode | address;
960  emit(instr);
961  BlockTrampolinePoolFor(1);  // For associated delay slot.
962}
963
964
965// Returns the next free trampoline entry.
966int32_t Assembler::get_trampoline_entry(int32_t pos) {
967  int32_t trampoline_entry = kInvalidSlotPos;
968
969  if (!internal_trampoline_exception_) {
970    if (trampoline_.start() > pos) {
971     trampoline_entry = trampoline_.take_slot();
972    }
973
974    if (kInvalidSlotPos == trampoline_entry) {
975      internal_trampoline_exception_ = true;
976    }
977  }
978  return trampoline_entry;
979}
980
981
982uint32_t Assembler::jump_address(Label* L) {
983  int32_t target_pos;
984
985  if (L->is_bound()) {
986    target_pos = L->pos();
987  } else {
988    if (L->is_linked()) {
989      target_pos = L->pos();  // L's link.
990      L->link_to(pc_offset());
991    } else {
992      L->link_to(pc_offset());
993      return kEndOfJumpChain;
994    }
995  }
996
997  uint32_t imm = (uint32_t)buffer_ + target_pos;
998  ASSERT((imm & 3) == 0);
999
1000  return imm;
1001}
1002
1003
1004int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1005  int32_t target_pos;
1006
1007  if (L->is_bound()) {
1008    target_pos = L->pos();
1009  } else {
1010    if (L->is_linked()) {
1011      target_pos = L->pos();
1012      L->link_to(pc_offset());
1013    } else {
1014      L->link_to(pc_offset());
1015      if (!trampoline_emitted_) {
1016        unbound_labels_count_++;
1017        next_buffer_check_ -= kTrampolineSlotsSize;
1018      }
1019      return kEndOfChain;
1020    }
1021  }
1022
1023  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1024  ASSERT((offset & 3) == 0);
1025  ASSERT(is_int16(offset >> 2));
1026
1027  return offset;
1028}
1029
1030
1031void Assembler::label_at_put(Label* L, int at_offset) {
1032  int target_pos;
1033  if (L->is_bound()) {
1034    target_pos = L->pos();
1035    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1036  } else {
1037    if (L->is_linked()) {
1038      target_pos = L->pos();  // L's link.
1039      int32_t imm18 = target_pos - at_offset;
1040      ASSERT((imm18 & 3) == 0);
1041      int32_t imm16 = imm18 >> 2;
1042      ASSERT(is_int16(imm16));
1043      instr_at_put(at_offset, (imm16 & kImm16Mask));
1044    } else {
1045      target_pos = kEndOfChain;
1046      instr_at_put(at_offset, 0);
1047      if (!trampoline_emitted_) {
1048        unbound_labels_count_++;
1049        next_buffer_check_ -= kTrampolineSlotsSize;
1050      }
1051    }
1052    L->link_to(at_offset);
1053  }
1054}
1055
1056
1057//------- Branch and jump instructions --------
1058
1059void Assembler::b(int16_t offset) {
1060  beq(zero_reg, zero_reg, offset);
1061}
1062
1063
1064void Assembler::bal(int16_t offset) {
1065  positions_recorder()->WriteRecordedPositions();
1066  bgezal(zero_reg, offset);
1067}
1068
1069
1070void Assembler::beq(Register rs, Register rt, int16_t offset) {
1071  BlockTrampolinePoolScope block_trampoline_pool(this);
1072  GenInstrImmediate(BEQ, rs, rt, offset);
1073  BlockTrampolinePoolFor(1);  // For associated delay slot.
1074}
1075
1076
1077void Assembler::bgez(Register rs, int16_t offset) {
1078  BlockTrampolinePoolScope block_trampoline_pool(this);
1079  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1080  BlockTrampolinePoolFor(1);  // For associated delay slot.
1081}
1082
1083
1084void Assembler::bgezal(Register rs, int16_t offset) {
1085  BlockTrampolinePoolScope block_trampoline_pool(this);
1086  positions_recorder()->WriteRecordedPositions();
1087  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1088  BlockTrampolinePoolFor(1);  // For associated delay slot.
1089}
1090
1091
1092void Assembler::bgtz(Register rs, int16_t offset) {
1093  BlockTrampolinePoolScope block_trampoline_pool(this);
1094  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1095  BlockTrampolinePoolFor(1);  // For associated delay slot.
1096}
1097
1098
1099void Assembler::blez(Register rs, int16_t offset) {
1100  BlockTrampolinePoolScope block_trampoline_pool(this);
1101  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1102  BlockTrampolinePoolFor(1);  // For associated delay slot.
1103}
1104
1105
1106void Assembler::bltz(Register rs, int16_t offset) {
1107  BlockTrampolinePoolScope block_trampoline_pool(this);
1108  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1109  BlockTrampolinePoolFor(1);  // For associated delay slot.
1110}
1111
1112
1113void Assembler::bltzal(Register rs, int16_t offset) {
1114  BlockTrampolinePoolScope block_trampoline_pool(this);
1115  positions_recorder()->WriteRecordedPositions();
1116  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1117  BlockTrampolinePoolFor(1);  // For associated delay slot.
1118}
1119
1120
1121void Assembler::bne(Register rs, Register rt, int16_t offset) {
1122  BlockTrampolinePoolScope block_trampoline_pool(this);
1123  GenInstrImmediate(BNE, rs, rt, offset);
1124  BlockTrampolinePoolFor(1);  // For associated delay slot.
1125}
1126
1127
1128void Assembler::j(int32_t target) {
1129#if DEBUG
1130  // Get pc of delay slot.
1131  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1132  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1133  ASSERT(in_range && ((target & 3) == 0));
1134#endif
1135  GenInstrJump(J, target >> 2);
1136}
1137
1138
1139void Assembler::jr(Register rs) {
1140  BlockTrampolinePoolScope block_trampoline_pool(this);
1141  if (rs.is(ra)) {
1142    positions_recorder()->WriteRecordedPositions();
1143  }
1144  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1145  BlockTrampolinePoolFor(1);  // For associated delay slot.
1146}
1147
1148
1149void Assembler::jal(int32_t target) {
1150#ifdef DEBUG
1151  // Get pc of delay slot.
1152  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1153  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1154  ASSERT(in_range && ((target & 3) == 0));
1155#endif
1156  positions_recorder()->WriteRecordedPositions();
1157  GenInstrJump(JAL, target >> 2);
1158}
1159
1160
1161void Assembler::jalr(Register rs, Register rd) {
1162  BlockTrampolinePoolScope block_trampoline_pool(this);
1163  positions_recorder()->WriteRecordedPositions();
1164  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1165  BlockTrampolinePoolFor(1);  // For associated delay slot.
1166}
1167
1168
1169void Assembler::j_or_jr(int32_t target, Register rs) {
1170  // Get pc of delay slot.
1171  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1172  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1173
1174  if (in_range) {
1175      j(target);
1176  } else {
1177      jr(t9);
1178  }
1179}
1180
1181
1182void Assembler::jal_or_jalr(int32_t target, Register rs) {
1183  // Get pc of delay slot.
1184  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1185  bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
1186
1187  if (in_range) {
1188      jal(target);
1189  } else {
1190      jalr(t9);
1191  }
1192}
1193
1194
1195//-------Data-processing-instructions---------
1196
1197// Arithmetic.
1198
1199void Assembler::addu(Register rd, Register rs, Register rt) {
1200  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1201}
1202
1203
1204void Assembler::addiu(Register rd, Register rs, int32_t j) {
1205  GenInstrImmediate(ADDIU, rs, rd, j);
1206}
1207
1208
1209void Assembler::subu(Register rd, Register rs, Register rt) {
1210  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1211}
1212
1213
1214void Assembler::mul(Register rd, Register rs, Register rt) {
1215  GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1216}
1217
1218
1219void Assembler::mult(Register rs, Register rt) {
1220  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1221}
1222
1223
1224void Assembler::multu(Register rs, Register rt) {
1225  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1226}
1227
1228
1229void Assembler::div(Register rs, Register rt) {
1230  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1231}
1232
1233
1234void Assembler::divu(Register rs, Register rt) {
1235  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1236}
1237
1238
1239// Logical.
1240
1241void Assembler::and_(Register rd, Register rs, Register rt) {
1242  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1243}
1244
1245
1246void Assembler::andi(Register rt, Register rs, int32_t j) {
1247  GenInstrImmediate(ANDI, rs, rt, j);
1248}
1249
1250
1251void Assembler::or_(Register rd, Register rs, Register rt) {
1252  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1253}
1254
1255
1256void Assembler::ori(Register rt, Register rs, int32_t j) {
1257  GenInstrImmediate(ORI, rs, rt, j);
1258}
1259
1260
1261void Assembler::xor_(Register rd, Register rs, Register rt) {
1262  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1263}
1264
1265
1266void Assembler::xori(Register rt, Register rs, int32_t j) {
1267  GenInstrImmediate(XORI, rs, rt, j);
1268}
1269
1270
1271void Assembler::nor(Register rd, Register rs, Register rt) {
1272  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1273}
1274
1275
1276// Shifts.
1277void Assembler::sll(Register rd,
1278                    Register rt,
1279                    uint16_t sa,
1280                    bool coming_from_nop) {
1281  // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1282  // generated using the sll instruction. They must be generated using
1283  // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1284  // instructions.
1285  ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1286  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1287}
1288
1289
1290void Assembler::sllv(Register rd, Register rt, Register rs) {
1291  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1292}
1293
1294
1295void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1296  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1297}
1298
1299
1300void Assembler::srlv(Register rd, Register rt, Register rs) {
1301  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1302}
1303
1304
1305void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1306  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1307}
1308
1309
1310void Assembler::srav(Register rd, Register rt, Register rs) {
1311  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1312}
1313
1314
1315void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1316  // Should be called via MacroAssembler::Ror.
1317  ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1318  ASSERT(mips32r2);
1319  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1320      | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1321  emit(instr);
1322}
1323
1324
1325void Assembler::rotrv(Register rd, Register rt, Register rs) {
1326  // Should be called via MacroAssembler::Ror.
1327  ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1328  ASSERT(mips32r2);
1329  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1330     | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1331  emit(instr);
1332}
1333
1334
1335//------------Memory-instructions-------------
1336
1337// Helper for base-reg + offset, when offset is larger than int16.
1338void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1339  ASSERT(!src.rm().is(at));
1340  lui(at, src.offset_ >> kLuiShift);
1341  ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
1342  addu(at, at, src.rm());  // Add base register.
1343}
1344
1345
1346void Assembler::lb(Register rd, const MemOperand& rs) {
1347  if (is_int16(rs.offset_)) {
1348    GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1349  } else {  // Offset > 16 bits, use multiple instructions to load.
1350    LoadRegPlusOffsetToAt(rs);
1351    GenInstrImmediate(LB, at, rd, 0);  // Equiv to lb(rd, MemOperand(at, 0));
1352  }
1353}
1354
1355
1356void Assembler::lbu(Register rd, const MemOperand& rs) {
1357  if (is_int16(rs.offset_)) {
1358    GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1359  } else {  // Offset > 16 bits, use multiple instructions to load.
1360    LoadRegPlusOffsetToAt(rs);
1361    GenInstrImmediate(LBU, at, rd, 0);  // Equiv to lbu(rd, MemOperand(at, 0));
1362  }
1363}
1364
1365
1366void Assembler::lh(Register rd, const MemOperand& rs) {
1367  if (is_int16(rs.offset_)) {
1368    GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1369  } else {  // Offset > 16 bits, use multiple instructions to load.
1370    LoadRegPlusOffsetToAt(rs);
1371    GenInstrImmediate(LH, at, rd, 0);  // Equiv to lh(rd, MemOperand(at, 0));
1372  }
1373}
1374
1375
1376void Assembler::lhu(Register rd, const MemOperand& rs) {
1377  if (is_int16(rs.offset_)) {
1378    GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1379  } else {  // Offset > 16 bits, use multiple instructions to load.
1380    LoadRegPlusOffsetToAt(rs);
1381    GenInstrImmediate(LHU, at, rd, 0);  // Equiv to lhu(rd, MemOperand(at, 0));
1382  }
1383}
1384
1385
1386void Assembler::lw(Register rd, const MemOperand& rs) {
1387  if (is_int16(rs.offset_)) {
1388    GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1389  } else {  // Offset > 16 bits, use multiple instructions to load.
1390    LoadRegPlusOffsetToAt(rs);
1391    GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
1392  }
1393}
1394
1395
1396void Assembler::lwl(Register rd, const MemOperand& rs) {
1397  GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1398}
1399
1400
1401void Assembler::lwr(Register rd, const MemOperand& rs) {
1402  GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1403}
1404
1405
1406void Assembler::sb(Register rd, const MemOperand& rs) {
1407  if (is_int16(rs.offset_)) {
1408    GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1409  } else {  // Offset > 16 bits, use multiple instructions to store.
1410    LoadRegPlusOffsetToAt(rs);
1411    GenInstrImmediate(SB, at, rd, 0);  // Equiv to sb(rd, MemOperand(at, 0));
1412  }
1413}
1414
1415
1416void Assembler::sh(Register rd, const MemOperand& rs) {
1417  if (is_int16(rs.offset_)) {
1418    GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1419  } else {  // Offset > 16 bits, use multiple instructions to store.
1420    LoadRegPlusOffsetToAt(rs);
1421    GenInstrImmediate(SH, at, rd, 0);  // Equiv to sh(rd, MemOperand(at, 0));
1422  }
1423}
1424
1425
1426void Assembler::sw(Register rd, const MemOperand& rs) {
1427  if (is_int16(rs.offset_)) {
1428    GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1429  } else {  // Offset > 16 bits, use multiple instructions to store.
1430    LoadRegPlusOffsetToAt(rs);
1431    GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
1432  }
1433}
1434
1435
1436void Assembler::swl(Register rd, const MemOperand& rs) {
1437  GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1438}
1439
1440
1441void Assembler::swr(Register rd, const MemOperand& rs) {
1442  GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1443}
1444
1445
1446void Assembler::lui(Register rd, int32_t j) {
1447  GenInstrImmediate(LUI, zero_reg, rd, j);
1448}
1449
1450
1451//-------------Misc-instructions--------------
1452
1453// Break / Trap instructions.
1454void Assembler::break_(uint32_t code, bool break_as_stop) {
1455  ASSERT((code & ~0xfffff) == 0);
1456  // We need to invalidate breaks that could be stops as well because the
1457  // simulator expects a char pointer after the stop instruction.
1458  // See constants-mips.h for explanation.
1459  ASSERT((break_as_stop &&
1460          code <= kMaxStopCode &&
1461          code > kMaxWatchpointCode) ||
1462         (!break_as_stop &&
1463          (code > kMaxStopCode ||
1464           code <= kMaxWatchpointCode)));
1465  Instr break_instr = SPECIAL | BREAK | (code << 6);
1466  emit(break_instr);
1467}
1468
1469
1470void Assembler::stop(const char* msg, uint32_t code) {
1471  ASSERT(code > kMaxWatchpointCode);
1472  ASSERT(code <= kMaxStopCode);
1473#if defined(V8_HOST_ARCH_MIPS)
1474  break_(0x54321);
1475#else  // V8_HOST_ARCH_MIPS
1476  BlockTrampolinePoolFor(2);
1477  // The Simulator will handle the stop instruction and get the message address.
1478  // On MIPS stop() is just a special kind of break_().
1479  break_(code, true);
1480  emit(reinterpret_cast<Instr>(msg));
1481#endif
1482}
1483
1484
1485void Assembler::tge(Register rs, Register rt, uint16_t code) {
1486  ASSERT(is_uint10(code));
1487  Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1488      | rt.code() << kRtShift | code << 6;
1489  emit(instr);
1490}
1491
1492
1493void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
1494  ASSERT(is_uint10(code));
1495  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
1496      | rt.code() << kRtShift | code << 6;
1497  emit(instr);
1498}
1499
1500
1501void Assembler::tlt(Register rs, Register rt, uint16_t code) {
1502  ASSERT(is_uint10(code));
1503  Instr instr =
1504      SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1505  emit(instr);
1506}
1507
1508
1509void Assembler::tltu(Register rs, Register rt, uint16_t code) {
1510  ASSERT(is_uint10(code));
1511  Instr instr =
1512      SPECIAL | TLTU | rs.code() << kRsShift
1513      | rt.code() << kRtShift | code << 6;
1514  emit(instr);
1515}
1516
1517
1518void Assembler::teq(Register rs, Register rt, uint16_t code) {
1519  ASSERT(is_uint10(code));
1520  Instr instr =
1521      SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1522  emit(instr);
1523}
1524
1525
1526void Assembler::tne(Register rs, Register rt, uint16_t code) {
1527  ASSERT(is_uint10(code));
1528  Instr instr =
1529      SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1530  emit(instr);
1531}
1532
1533
1534// Move from HI/LO register.
1535
1536void Assembler::mfhi(Register rd) {
1537  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
1538}
1539
1540
1541void Assembler::mflo(Register rd) {
1542  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
1543}
1544
1545
1546// Set on less than instructions.
1547void Assembler::slt(Register rd, Register rs, Register rt) {
1548  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
1549}
1550
1551
1552void Assembler::sltu(Register rd, Register rs, Register rt) {
1553  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
1554}
1555
1556
1557void Assembler::slti(Register rt, Register rs, int32_t j) {
1558  GenInstrImmediate(SLTI, rs, rt, j);
1559}
1560
1561
1562void Assembler::sltiu(Register rt, Register rs, int32_t j) {
1563  GenInstrImmediate(SLTIU, rs, rt, j);
1564}
1565
1566
1567// Conditional move.
1568void Assembler::movz(Register rd, Register rs, Register rt) {
1569  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
1570}
1571
1572
1573void Assembler::movn(Register rd, Register rs, Register rt) {
1574  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
1575}
1576
1577
1578void Assembler::movt(Register rd, Register rs, uint16_t cc) {
1579  Register rt;
1580  rt.code_ = (cc & 0x0007) << 2 | 1;
1581  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1582}
1583
1584
1585void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1586  Register rt;
1587  rt.code_ = (cc & 0x0007) << 2 | 0;
1588  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1589}
1590
1591
1592// Bit twiddling.
1593void Assembler::clz(Register rd, Register rs) {
1594  // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1595  GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1596}
1597
1598
1599void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1600  // Should be called via MacroAssembler::Ins.
1601  // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1602  ASSERT(mips32r2);
1603  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1604}
1605
1606
1607void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1608  // Should be called via MacroAssembler::Ext.
1609  // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1610  ASSERT(mips32r2);
1611  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1612}
1613
1614
1615//--------Coprocessor-instructions----------------
1616
1617// Load, store, move.
1618void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1619  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1620}
1621
1622
1623void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
1624  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1625  // load to two 32-bit loads.
1626  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1627  FPURegister nextfpreg;
1628  nextfpreg.setcode(fd.code() + 1);
1629  GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
1630}
1631
1632
1633void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1634  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1635}
1636
1637
1638void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
1639  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1640  // store to two 32-bit stores.
1641  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1642  FPURegister nextfpreg;
1643  nextfpreg.setcode(fd.code() + 1);
1644  GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
1645}
1646
1647
1648void Assembler::mtc1(Register rt, FPURegister fs) {
1649  GenInstrRegister(COP1, MTC1, rt, fs, f0);
1650}
1651
1652
1653void Assembler::mfc1(Register rt, FPURegister fs) {
1654  GenInstrRegister(COP1, MFC1, rt, fs, f0);
1655}
1656
1657
1658void Assembler::ctc1(Register rt, FPUControlRegister fs) {
1659  GenInstrRegister(COP1, CTC1, rt, fs);
1660}
1661
1662
1663void Assembler::cfc1(Register rt, FPUControlRegister fs) {
1664  GenInstrRegister(COP1, CFC1, rt, fs);
1665}
1666
1667void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
1668  uint64_t i;
1669  memcpy(&i, &d, 8);
1670
1671  *lo = i & 0xffffffff;
1672  *hi = i >> 32;
1673}
1674
1675// Arithmetic.
1676
1677void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1678  GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
1679}
1680
1681
1682void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1683  GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
1684}
1685
1686
1687void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1688  GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
1689}
1690
1691
1692void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1693  GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
1694}
1695
1696
1697void Assembler::abs_d(FPURegister fd, FPURegister fs) {
1698  GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
1699}
1700
1701
1702void Assembler::mov_d(FPURegister fd, FPURegister fs) {
1703  GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
1704}
1705
1706
1707void Assembler::neg_d(FPURegister fd, FPURegister fs) {
1708  GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
1709}
1710
1711
1712void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
1713  GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
1714}
1715
1716
1717// Conversions.
1718
1719void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
1720  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
1721}
1722
1723
1724void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
1725  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
1726}
1727
1728
1729void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
1730  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
1731}
1732
1733
1734void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
1735  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
1736}
1737
1738
1739void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
1740  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
1741}
1742
1743
1744void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
1745  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
1746}
1747
1748
1749void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
1750  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
1751}
1752
1753
1754void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
1755  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
1756}
1757
1758
1759void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
1760  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
1761}
1762
1763
1764void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
1765  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
1766}
1767
1768
1769void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
1770  ASSERT(mips32r2);
1771  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
1772}
1773
1774
1775void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
1776  ASSERT(mips32r2);
1777  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
1778}
1779
1780
1781void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
1782  ASSERT(mips32r2);
1783  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
1784}
1785
1786
1787void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
1788  ASSERT(mips32r2);
1789  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
1790}
1791
1792
1793void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
1794  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
1795}
1796
1797
1798void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
1799  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
1800}
1801
1802
1803void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
1804  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
1805}
1806
1807
1808void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
1809  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
1810}
1811
1812
1813void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
1814  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
1815}
1816
1817
1818void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
1819  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
1820}
1821
1822
1823void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
1824  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
1825}
1826
1827
1828void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
1829  ASSERT(mips32r2);
1830  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
1831}
1832
1833
1834void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
1835  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
1836}
1837
1838
1839void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
1840  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
1841}
1842
1843
1844void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
1845  ASSERT(mips32r2);
1846  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
1847}
1848
1849
1850void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
1851  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
1852}
1853
1854
1855// Conditions.
1856void Assembler::c(FPUCondition cond, SecondaryField fmt,
1857    FPURegister fs, FPURegister ft, uint16_t cc) {
1858  ASSERT(CpuFeatures::IsEnabled(FPU));
1859  ASSERT(is_uint3(cc));
1860  ASSERT((fmt & ~(31 << kRsShift)) == 0);
1861  Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
1862      | cc << 8 | 3 << 4 | cond;
1863  emit(instr);
1864}
1865
1866
1867void Assembler::fcmp(FPURegister src1, const double src2,
1868      FPUCondition cond) {
1869  ASSERT(CpuFeatures::IsEnabled(FPU));
1870  ASSERT(src2 == 0.0);
1871  mtc1(zero_reg, f14);
1872  cvt_d_w(f14, f14);
1873  c(cond, D, src1, f14, 0);
1874}
1875
1876
1877void Assembler::bc1f(int16_t offset, uint16_t cc) {
1878  ASSERT(CpuFeatures::IsEnabled(FPU));
1879  ASSERT(is_uint3(cc));
1880  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
1881  emit(instr);
1882}
1883
1884
1885void Assembler::bc1t(int16_t offset, uint16_t cc) {
1886  ASSERT(CpuFeatures::IsEnabled(FPU));
1887  ASSERT(is_uint3(cc));
1888  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
1889  emit(instr);
1890}
1891
1892
1893// Debugging.
1894void Assembler::RecordJSReturn() {
1895  positions_recorder()->WriteRecordedPositions();
1896  CheckBuffer();
1897  RecordRelocInfo(RelocInfo::JS_RETURN);
1898}
1899
1900
1901void Assembler::RecordDebugBreakSlot() {
1902  positions_recorder()->WriteRecordedPositions();
1903  CheckBuffer();
1904  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
1905}
1906
1907
1908void Assembler::RecordComment(const char* msg) {
1909  if (FLAG_code_comments) {
1910    CheckBuffer();
1911    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1912  }
1913}
1914
1915
1916int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
1917  Instr instr = instr_at(pc);
1918  ASSERT(IsJ(instr) || IsLui(instr));
1919  if (IsLui(instr)) {
1920    Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
1921    Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
1922    ASSERT(IsOri(instr_ori));
1923    int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
1924    imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
1925    if (imm == kEndOfJumpChain) {
1926      return 0;  // Number of instructions patched.
1927    }
1928    imm += pc_delta;
1929    ASSERT((imm & 3) == 0);
1930
1931    instr_lui &= ~kImm16Mask;
1932    instr_ori &= ~kImm16Mask;
1933
1934    instr_at_put(pc + 0 * Assembler::kInstrSize,
1935                 instr_lui | ((imm >> kLuiShift) & kImm16Mask));
1936    instr_at_put(pc + 1 * Assembler::kInstrSize,
1937                 instr_ori | (imm & kImm16Mask));
1938    return 2;  // Number of instructions patched.
1939  } else {
1940    uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
1941    if ((int32_t)imm28 == kEndOfJumpChain) {
1942      return 0;  // Number of instructions patched.
1943    }
1944    imm28 += pc_delta;
1945    imm28 &= kImm28Mask;
1946    ASSERT((imm28 & 3) == 0);
1947
1948    instr &= ~kImm26Mask;
1949    uint32_t imm26 = imm28 >> 2;
1950    ASSERT(is_uint26(imm26));
1951
1952    instr_at_put(pc, instr | (imm26 & kImm26Mask));
1953    return 1;  // Number of instructions patched.
1954  }
1955}
1956
1957
1958void Assembler::GrowBuffer() {
1959  if (!own_buffer_) FATAL("external code buffer is too small");
1960
1961  // Compute new buffer size.
1962  CodeDesc desc;  // The new buffer.
1963  if (buffer_size_ < 4*KB) {
1964    desc.buffer_size = 4*KB;
1965  } else if (buffer_size_ < 1*MB) {
1966    desc.buffer_size = 2*buffer_size_;
1967  } else {
1968    desc.buffer_size = buffer_size_ + 1*MB;
1969  }
1970  CHECK_GT(desc.buffer_size, 0);  // No overflow.
1971
1972  // Setup new buffer.
1973  desc.buffer = NewArray<byte>(desc.buffer_size);
1974
1975  desc.instr_size = pc_offset();
1976  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1977
1978  // Copy the data.
1979  int pc_delta = desc.buffer - buffer_;
1980  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1981  memmove(desc.buffer, buffer_, desc.instr_size);
1982  memmove(reloc_info_writer.pos() + rc_delta,
1983          reloc_info_writer.pos(), desc.reloc_size);
1984
1985  // Switch buffers.
1986  DeleteArray(buffer_);
1987  buffer_ = desc.buffer;
1988  buffer_size_ = desc.buffer_size;
1989  pc_ += pc_delta;
1990  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1991                               reloc_info_writer.last_pc() + pc_delta);
1992
1993  // Relocate runtime entries.
1994  for (RelocIterator it(desc); !it.done(); it.next()) {
1995    RelocInfo::Mode rmode = it.rinfo()->rmode();
1996    if (rmode == RelocInfo::INTERNAL_REFERENCE) {
1997      byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
1998      RelocateInternalReference(p, pc_delta);
1999    }
2000  }
2001
2002  ASSERT(!overflow());
2003}
2004
2005
2006void Assembler::db(uint8_t data) {
2007  CheckBuffer();
2008  *reinterpret_cast<uint8_t*>(pc_) = data;
2009  pc_ += sizeof(uint8_t);
2010}
2011
2012
2013void Assembler::dd(uint32_t data) {
2014  CheckBuffer();
2015  *reinterpret_cast<uint32_t*>(pc_) = data;
2016  pc_ += sizeof(uint32_t);
2017}
2018
2019
2020void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2021  RelocInfo rinfo(pc_, rmode, data);  // We do not try to reuse pool constants.
2022  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2023    // Adjust code for new modes.
2024    ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2025           || RelocInfo::IsJSReturn(rmode)
2026           || RelocInfo::IsComment(rmode)
2027           || RelocInfo::IsPosition(rmode));
2028    // These modes do not need an entry in the constant pool.
2029  }
2030  if (rinfo.rmode() != RelocInfo::NONE) {
2031    // Don't record external references unless the heap will be serialized.
2032    if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2033#ifdef DEBUG
2034      if (!Serializer::enabled()) {
2035        Serializer::TooLateToEnableNow();
2036      }
2037#endif
2038      if (!Serializer::enabled() && !emit_debug_code()) {
2039        return;
2040      }
2041    }
2042    ASSERT(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
2043    if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2044      RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
2045      ClearRecordedAstId();
2046      reloc_info_writer.Write(&reloc_info_with_ast_id);
2047    } else {
2048      reloc_info_writer.Write(&rinfo);
2049    }
2050  }
2051}
2052
2053
2054void Assembler::BlockTrampolinePoolFor(int instructions) {
2055  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2056}
2057
2058
2059void Assembler::CheckTrampolinePool() {
2060  // Some small sequences of instructions must not be broken up by the
2061  // insertion of a trampoline pool; such sequences are protected by setting
2062  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2063  // which are both checked here. Also, recursive calls to CheckTrampolinePool
2064  // are blocked by trampoline_pool_blocked_nesting_.
2065  if ((trampoline_pool_blocked_nesting_ > 0) ||
2066      (pc_offset() < no_trampoline_pool_before_)) {
2067    // Emission is currently blocked; make sure we try again as soon as
2068    // possible.
2069    if (trampoline_pool_blocked_nesting_ > 0) {
2070      next_buffer_check_ = pc_offset() + kInstrSize;
2071    } else {
2072      next_buffer_check_ = no_trampoline_pool_before_;
2073    }
2074    return;
2075  }
2076
2077  ASSERT(!trampoline_emitted_);
2078  ASSERT(unbound_labels_count_ >= 0);
2079  if (unbound_labels_count_ > 0) {
2080    // First we emit jump (2 instructions), then we emit trampoline pool.
2081    { BlockTrampolinePoolScope block_trampoline_pool(this);
2082      Label after_pool;
2083      b(&after_pool);
2084      nop();
2085
2086      int pool_start = pc_offset();
2087      for (int i = 0; i < unbound_labels_count_; i++) {
2088        uint32_t imm32;
2089        imm32 = jump_address(&after_pool);
2090        { BlockGrowBufferScope block_buf_growth(this);
2091          // Buffer growth (and relocation) must be blocked for internal
2092          // references until associated instructions are emitted and available
2093          // to be patched.
2094          RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2095          lui(at, (imm32 & kHiMask) >> kLuiShift);
2096          ori(at, at, (imm32 & kImm16Mask));
2097        }
2098        jr(at);
2099        nop();
2100      }
2101      bind(&after_pool);
2102      trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2103
2104      trampoline_emitted_ = true;
2105      // As we are only going to emit trampoline once, we need to prevent any
2106      // further emission.
2107      next_buffer_check_ = kMaxInt;
2108    }
2109  } else {
2110    // Number of branches to unbound label at this point is zero, so we can
2111    // move next buffer check to maximum.
2112    next_buffer_check_ = pc_offset() +
2113        kMaxBranchOffset - kTrampolineSlotsSize * 16;
2114  }
2115  return;
2116}
2117
2118
2119Address Assembler::target_address_at(Address pc) {
2120  Instr instr1 = instr_at(pc);
2121  Instr instr2 = instr_at(pc + kInstrSize);
2122  // Interpret 2 instructions generated by li: lui/ori
2123  if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
2124    // Assemble the 32 bit value.
2125    return reinterpret_cast<Address>(
2126        (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
2127  }
2128
2129  // We should never get here, force a bad address if we do.
2130  UNREACHABLE();
2131  return (Address)0x0;
2132}
2133
2134
2135// On Mips, a target address is stored in a lui/ori instruction pair, each
2136// of which load 16 bits of the 32-bit address to a register.
2137// Patching the address must replace both instr, and flush the i-cache.
2138//
2139// There is an optimization below, which emits a nop when the address
2140// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
2141// and possibly removed.
2142void Assembler::set_target_address_at(Address pc, Address target) {
2143  Instr instr2 = instr_at(pc + kInstrSize);
2144  uint32_t rt_code = GetRtField(instr2);
2145  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2146  uint32_t itarget = reinterpret_cast<uint32_t>(target);
2147
2148#ifdef DEBUG
2149  // Check we have the result from a li macro-instruction, using instr pair.
2150  Instr instr1 = instr_at(pc);
2151  CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
2152#endif
2153
2154  // Must use 2 instructions to insure patchable code => just use lui and ori.
2155  // lui rt, upper-16.
2156  // ori rt rt, lower-16.
2157  *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2158  *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
2159
2160  // The following code is an optimization for the common case of Call()
2161  // or Jump() which is load to register, and jump through register:
2162  //     li(t9, address); jalr(t9)    (or jr(t9)).
2163  // If the destination address is in the same 256 MB page as the call, it
2164  // is faster to do a direct jal, or j, rather than jump thru register, since
2165  // that lets the cpu pipeline prefetch the target address. However each
2166  // time the address above is patched, we have to patch the direct jal/j
2167  // instruction, as well as possibly revert to jalr/jr if we now cross a
2168  // 256 MB page. Note that with the jal/j instructions, we do not need to
2169  // load the register, but that code is left, since it makes it easy to
2170  // revert this process. A further optimization could try replacing the
2171  // li sequence with nops.
2172  // This optimization can only be applied if the rt-code from instr2 is the
2173  // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
2174  // mips return. Occasionally this lands after an li().
2175
2176  Instr instr3 = instr_at(pc + 2 * kInstrSize);
2177  uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
2178  bool in_range =
2179             ((uint32_t)(ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
2180  uint32_t target_field = (uint32_t)(itarget & kJumpAddrMask) >> kImmFieldShift;
2181  bool patched_jump = false;
2182
2183#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
2184  // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
2185  // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
2186  // apply this workaround for all cores so we don't have to identify the core.
2187  if (in_range) {
2188    // The 24k core E156 bug has some very specific requirements, we only check
2189    // the most simple one: if the address of the delay slot instruction is in
2190    // the first or last 32 KB of the 256 MB segment.
2191    uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
2192    uint32_t ipc_segment_addr = ipc & segment_mask;
2193    if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
2194      in_range = false;
2195  }
2196#endif
2197
2198  if (IsJalr(instr3)) {
2199    // Try to convert JALR to JAL.
2200    if (in_range && GetRt(instr2) == GetRs(instr3)) {
2201      *(p+2) = JAL | target_field;
2202      patched_jump = true;
2203    }
2204  } else if (IsJr(instr3)) {
2205    // Try to convert JR to J, skip returns (jr ra).
2206    bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
2207    if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
2208      *(p+2) = J | target_field;
2209      patched_jump = true;
2210    }
2211  } else if (IsJal(instr3)) {
2212    if (in_range) {
2213      // We are patching an already converted JAL.
2214      *(p+2) = JAL | target_field;
2215    } else {
2216      // Patch JAL, but out of range, revert to JALR.
2217      // JALR rs reg is the rt reg specified in the ORI instruction.
2218      uint32_t rs_field = GetRt(instr2) << kRsShift;
2219      uint32_t rd_field = ra.code() << kRdShift;  // Return-address (ra) reg.
2220      *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2221    }
2222    patched_jump = true;
2223  } else if (IsJ(instr3)) {
2224    if (in_range) {
2225      // We are patching an already converted J (jump).
2226      *(p+2) = J | target_field;
2227    } else {
2228      // Trying patch J, but out of range, just go back to JR.
2229      // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
2230      uint32_t rs_field = GetRt(instr2) << kRsShift;
2231      *(p+2) = SPECIAL | rs_field | JR;
2232    }
2233    patched_jump = true;
2234  }
2235
2236  CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
2237}
2238
2239void Assembler::JumpLabelToJumpRegister(Address pc) {
2240  // Address pc points to lui/ori instructions.
2241  // Jump to label may follow at pc + 2 * kInstrSize.
2242  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2243#ifdef DEBUG
2244  Instr instr1 = instr_at(pc);
2245#endif
2246  Instr instr2 = instr_at(pc + 1 * kInstrSize);
2247  Instr instr3 = instr_at(pc + 2 * kInstrSize);
2248  bool patched = false;
2249
2250  if (IsJal(instr3)) {
2251    ASSERT(GetOpcodeField(instr1) == LUI);
2252    ASSERT(GetOpcodeField(instr2) == ORI);
2253
2254    uint32_t rs_field = GetRt(instr2) << kRsShift;
2255    uint32_t rd_field = ra.code() << kRdShift;  // Return-address (ra) reg.
2256    *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2257    patched = true;
2258  } else if (IsJ(instr3)) {
2259    ASSERT(GetOpcodeField(instr1) == LUI);
2260    ASSERT(GetOpcodeField(instr2) == ORI);
2261
2262    uint32_t rs_field = GetRt(instr2) << kRsShift;
2263    *(p+2) = SPECIAL | rs_field | JR;
2264    patched = true;
2265  }
2266
2267  if (patched) {
2268      CPU::FlushICache(pc+2, sizeof(Address));
2269  }
2270}
2271
2272} }  // namespace v8::internal
2273
2274#endif  // V8_TARGET_ARCH_MIPS
2275