assembler-mips.cc revision 3100271588b61cbc1dc472a3f2f105d2eed8497f
1// Copyright (c) 1994-2006 Sun Microsystems Inc.
2// All Rights Reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are
6// met:
7//
8// - Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10//
11// - Redistribution in binary form must reproduce the above copyright
12// notice, this list of conditions and the following disclaimer in the
13// documentation and/or other materials provided with the distribution.
14//
15// - Neither the name of Sun Microsystems or the names of contributors may
16// be used to endorse or promote products derived from this software without
17// specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// The original source code covered by the above license above has been
32// modified significantly by Google Inc.
33// Copyright 2010 the V8 project authors. All rights reserved.
34
35
36#include "v8.h"
37#include "mips/assembler-mips-inl.h"
38#include "serialize.h"
39
40
41namespace v8 {
42namespace internal {
43
44
45
46const Register no_reg = { -1 };
47
48const Register zero_reg = { 0 };
49const Register at = { 1 };
50const Register v0 = { 2 };
51const Register v1 = { 3 };
52const Register a0 = { 4 };
53const Register a1 = { 5 };
54const Register a2 = { 6 };
55const Register a3 = { 7 };
56const Register t0 = { 8 };
57const Register t1 = { 9 };
58const Register t2 = { 10 };
59const Register t3 = { 11 };
60const Register t4 = { 12 };
61const Register t5 = { 13 };
62const Register t6 = { 14 };
63const Register t7 = { 15 };
64const Register s0 = { 16 };
65const Register s1 = { 17 };
66const Register s2 = { 18 };
67const Register s3 = { 19 };
68const Register s4 = { 20 };
69const Register s5 = { 21 };
70const Register s6 = { 22 };
71const Register s7 = { 23 };
72const Register t8 = { 24 };
73const Register t9 = { 25 };
74const Register k0 = { 26 };
75const Register k1 = { 27 };
76const Register gp = { 28 };
77const Register sp = { 29 };
78const Register s8_fp = { 30 };
79const Register ra = { 31 };
80
81
82const FPURegister no_creg = { -1 };
83
84const FPURegister f0 = { 0 };
85const FPURegister f1 = { 1 };
86const FPURegister f2 = { 2 };
87const FPURegister f3 = { 3 };
88const FPURegister f4 = { 4 };
89const FPURegister f5 = { 5 };
90const FPURegister f6 = { 6 };
91const FPURegister f7 = { 7 };
92const FPURegister f8 = { 8 };
93const FPURegister f9 = { 9 };
94const FPURegister f10 = { 10 };
95const FPURegister f11 = { 11 };
96const FPURegister f12 = { 12 };
97const FPURegister f13 = { 13 };
98const FPURegister f14 = { 14 };
99const FPURegister f15 = { 15 };
100const FPURegister f16 = { 16 };
101const FPURegister f17 = { 17 };
102const FPURegister f18 = { 18 };
103const FPURegister f19 = { 19 };
104const FPURegister f20 = { 20 };
105const FPURegister f21 = { 21 };
106const FPURegister f22 = { 22 };
107const FPURegister f23 = { 23 };
108const FPURegister f24 = { 24 };
109const FPURegister f25 = { 25 };
110const FPURegister f26 = { 26 };
111const FPURegister f27 = { 27 };
112const FPURegister f28 = { 28 };
113const FPURegister f29 = { 29 };
114const FPURegister f30 = { 30 };
115const FPURegister f31 = { 31 };
116
117int ToNumber(Register reg) {
118  ASSERT(reg.is_valid());
119  const int kNumbers[] = {
120    0,    // zero_reg
121    1,    // at
122    2,    // v0
123    3,    // v1
124    4,    // a0
125    5,    // a1
126    6,    // a2
127    7,    // a3
128    8,    // t0
129    9,    // t1
130    10,   // t2
131    11,   // t3
132    12,   // t4
133    13,   // t5
134    14,   // t6
135    15,   // t7
136    16,   // s0
137    17,   // s1
138    18,   // s2
139    19,   // s3
140    20,   // s4
141    21,   // s5
142    22,   // s6
143    23,   // s7
144    24,   // t8
145    25,   // t9
146    26,   // k0
147    27,   // k1
148    28,   // gp
149    29,   // sp
150    30,   // s8_fp
151    31,   // ra
152  };
153  return kNumbers[reg.code()];
154}
155
156Register ToRegister(int num) {
157  ASSERT(num >= 0 && num < kNumRegisters);
158  const Register kRegisters[] = {
159    zero_reg,
160    at,
161    v0, v1,
162    a0, a1, a2, a3,
163    t0, t1, t2, t3, t4, t5, t6, t7,
164    s0, s1, s2, s3, s4, s5, s6, s7,
165    t8, t9,
166    k0, k1,
167    gp,
168    sp,
169    s8_fp,
170    ra
171  };
172  return kRegisters[num];
173}
174
175
176// -----------------------------------------------------------------------------
177// Implementation of RelocInfo.
178
179const int RelocInfo::kApplyMask = 0;
180
181// Patch the code at the current address with the supplied instructions.
182void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
183  Instr* pc = reinterpret_cast<Instr*>(pc_);
184  Instr* instr = reinterpret_cast<Instr*>(instructions);
185  for (int i = 0; i < instruction_count; i++) {
186    *(pc + i) = *(instr + i);
187  }
188
189  // Indicate that code has changed.
190  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
191}
192
193
194// Patch the code at the current PC with a call to the target address.
195// Additional guard instructions can be added if required.
196void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
197  // Patch the code at the current address with a call to the target.
198  UNIMPLEMENTED_MIPS();
199}
200
201
202// -----------------------------------------------------------------------------
203// Implementation of Operand and MemOperand.
204// See assembler-mips-inl.h for inlined constructors.
205
206Operand::Operand(Handle<Object> handle) {
207  rm_ = no_reg;
208  // Verify all Objects referred by code are NOT in new space.
209  Object* obj = *handle;
210  ASSERT(!Heap::InNewSpace(obj));
211  if (obj->IsHeapObject()) {
212    imm32_ = reinterpret_cast<intptr_t>(handle.location());
213    rmode_ = RelocInfo::EMBEDDED_OBJECT;
214  } else {
215    // No relocation needed.
216    imm32_ = reinterpret_cast<intptr_t>(obj);
217    rmode_ = RelocInfo::NONE;
218  }
219}
220
221MemOperand::MemOperand(Register rm, int16_t offset) : Operand(rm) {
222  offset_ = offset;
223}
224
225
226// -----------------------------------------------------------------------------
227// Implementation of Assembler.
228
229static const int kMinimalBufferSize = 4*KB;
230static byte* spare_buffer_ = NULL;
231
232Assembler::Assembler(void* buffer, int buffer_size) {
233  if (buffer == NULL) {
234    // Do our own buffer management.
235    if (buffer_size <= kMinimalBufferSize) {
236      buffer_size = kMinimalBufferSize;
237
238      if (spare_buffer_ != NULL) {
239        buffer = spare_buffer_;
240        spare_buffer_ = NULL;
241      }
242    }
243    if (buffer == NULL) {
244      buffer_ = NewArray<byte>(buffer_size);
245    } else {
246      buffer_ = static_cast<byte*>(buffer);
247    }
248    buffer_size_ = buffer_size;
249    own_buffer_ = true;
250
251  } else {
252    // Use externally provided buffer instead.
253    ASSERT(buffer_size > 0);
254    buffer_ = static_cast<byte*>(buffer);
255    buffer_size_ = buffer_size;
256    own_buffer_ = false;
257  }
258
259  // Setup buffer pointers.
260  ASSERT(buffer_ != NULL);
261  pc_ = buffer_;
262  reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
263  current_statement_position_ = RelocInfo::kNoPosition;
264  current_position_ = RelocInfo::kNoPosition;
265  written_statement_position_ = current_statement_position_;
266  written_position_ = current_position_;
267}
268
269
270Assembler::~Assembler() {
271  if (own_buffer_) {
272    if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
273      spare_buffer_ = buffer_;
274    } else {
275      DeleteArray(buffer_);
276    }
277  }
278}
279
280
281void Assembler::GetCode(CodeDesc* desc) {
282  ASSERT(pc_ <= reloc_info_writer.pos());  // no overlap
283  // Setup code descriptor.
284  desc->buffer = buffer_;
285  desc->buffer_size = buffer_size_;
286  desc->instr_size = pc_offset();
287  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
288}
289
290
291// Labels refer to positions in the (to be) generated code.
292// There are bound, linked, and unused labels.
293//
294// Bound labels refer to known positions in the already
295// generated code. pos() is the position the label refers to.
296//
297// Linked labels refer to unknown positions in the code
298// to be generated; pos() is the position of the last
299// instruction using the label.
300
301
302// The link chain is terminated by a negative code position (must be aligned).
303const int kEndOfChain = -4;
304
305bool Assembler::is_branch(Instr instr) {
306  uint32_t opcode   = ((instr & kOpcodeMask));
307  uint32_t rt_field = ((instr & kRtFieldMask));
308  uint32_t rs_field = ((instr & kRsFieldMask));
309  // Checks if the instruction is a branch.
310  return opcode == BEQ ||
311      opcode == BNE ||
312      opcode == BLEZ ||
313      opcode == BGTZ ||
314      opcode == BEQL ||
315      opcode == BNEL ||
316      opcode == BLEZL ||
317      opcode == BGTZL||
318      (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
319                            rt_field == BLTZAL || rt_field == BGEZAL)) ||
320      (opcode == COP1 && rs_field == BC1);  // Coprocessor branch.
321}
322
323
324int Assembler::target_at(int32_t pos) {
325  Instr instr = instr_at(pos);
326  if ((instr & ~kImm16Mask) == 0) {
327    // Emitted label constant, not part of a branch.
328    return instr - (Code::kHeaderSize - kHeapObjectTag);
329  }
330  // Check we have a branch instruction.
331  ASSERT(is_branch(instr));
332  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
333  // the compiler uses arithmectic shifts for signed integers.
334  int32_t imm18 = ((instr &
335                    static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
336
337  return pos + kBranchPCOffset + imm18;
338}
339
340
341void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
342  Instr instr = instr_at(pos);
343  if ((instr & ~kImm16Mask) == 0) {
344    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
345    // Emitted label constant, not part of a branch.
346    // Make label relative to Code* of generated Code object.
347    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
348    return;
349  }
350
351  ASSERT(is_branch(instr));
352  int32_t imm18 = target_pos - (pos + kBranchPCOffset);
353  ASSERT((imm18 & 3) == 0);
354
355  instr &= ~kImm16Mask;
356  int32_t imm16 = imm18 >> 2;
357  ASSERT(is_int16(imm16));
358
359  instr_at_put(pos, instr | (imm16 & kImm16Mask));
360}
361
362
363void Assembler::print(Label* L) {
364  if (L->is_unused()) {
365    PrintF("unused label\n");
366  } else if (L->is_bound()) {
367    PrintF("bound label to %d\n", L->pos());
368  } else if (L->is_linked()) {
369    Label l = *L;
370    PrintF("unbound label");
371    while (l.is_linked()) {
372      PrintF("@ %d ", l.pos());
373      Instr instr = instr_at(l.pos());
374      if ((instr & ~kImm16Mask) == 0) {
375        PrintF("value\n");
376      } else {
377        PrintF("%d\n", instr);
378      }
379      next(&l);
380    }
381  } else {
382    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
383  }
384}
385
386
387void Assembler::bind_to(Label* L, int pos) {
388  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
389  while (L->is_linked()) {
390    int32_t fixup_pos = L->pos();
391    next(L);  // call next before overwriting link with target at fixup_pos
392    target_at_put(fixup_pos, pos);
393  }
394  L->bind_to(pos);
395
396  // Keep track of the last bound label so we don't eliminate any instructions
397  // before a bound label.
398  if (pos > last_bound_pos_)
399    last_bound_pos_ = pos;
400}
401
402
403void Assembler::link_to(Label* L, Label* appendix) {
404  if (appendix->is_linked()) {
405    if (L->is_linked()) {
406      // Append appendix to L's list.
407      int fixup_pos;
408      int link = L->pos();
409      do {
410        fixup_pos = link;
411        link = target_at(fixup_pos);
412      } while (link > 0);
413      ASSERT(link == kEndOfChain);
414      target_at_put(fixup_pos, appendix->pos());
415    } else {
416      // L is empty, simply use appendix
417      *L = *appendix;
418    }
419  }
420  appendix->Unuse();  // appendix should not be used anymore
421}
422
423
424void Assembler::bind(Label* L) {
425  ASSERT(!L->is_bound());  // label can only be bound once
426  bind_to(L, pc_offset());
427}
428
429
430void Assembler::next(Label* L) {
431  ASSERT(L->is_linked());
432  int link = target_at(L->pos());
433  if (link > 0) {
434    L->link_to(link);
435  } else {
436    ASSERT(link == kEndOfChain);
437    L->Unuse();
438  }
439}
440
441
442// We have to use a temporary register for things that can be relocated even
443// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
444// space.  There is no guarantee that the relocated location can be similarly
445// encoded.
446bool Assembler::MustUseAt(RelocInfo::Mode rmode) {
447  if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
448    return Serializer::enabled();
449  } else if (rmode == RelocInfo::NONE) {
450    return false;
451  }
452  return true;
453}
454
455
456void Assembler::GenInstrRegister(Opcode opcode,
457                                 Register rs,
458                                 Register rt,
459                                 Register rd,
460                                 uint16_t sa,
461                                 SecondaryField func) {
462  ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
463  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
464      | (rd.code() << kRdShift) | (sa << kSaShift) | func;
465  emit(instr);
466}
467
468
469void Assembler::GenInstrRegister(Opcode opcode,
470                                 SecondaryField fmt,
471                                 FPURegister ft,
472                                 FPURegister fs,
473                                 FPURegister fd,
474                                 SecondaryField func) {
475  ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
476  Instr instr = opcode | fmt | (ft.code() << 16) | (fs.code() << kFsShift)
477      | (fd.code() << 6) | func;
478  emit(instr);
479}
480
481
482void Assembler::GenInstrRegister(Opcode opcode,
483                                 SecondaryField fmt,
484                                 Register rt,
485                                 FPURegister fs,
486                                 FPURegister fd,
487                                 SecondaryField func) {
488  ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
489  Instr instr = opcode | fmt | (rt.code() << kRtShift)
490      | (fs.code() << kFsShift) | (fd.code() << 6) | func;
491  emit(instr);
492}
493
494
495// Instructions with immediate value.
496// Registers are in the order of the instruction encoding, from left to right.
497void Assembler::GenInstrImmediate(Opcode opcode,
498                                  Register rs,
499                                  Register rt,
500                                  int32_t j) {
501  ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
502  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
503      | (j & kImm16Mask);
504  emit(instr);
505}
506
507
508void Assembler::GenInstrImmediate(Opcode opcode,
509                                  Register rs,
510                                  SecondaryField SF,
511                                  int32_t j) {
512  ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
513  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
514  emit(instr);
515}
516
517
518void Assembler::GenInstrImmediate(Opcode opcode,
519                                  Register rs,
520                                  FPURegister ft,
521                                  int32_t j) {
522  ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
523  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
524      | (j & kImm16Mask);
525  emit(instr);
526}
527
528
529// Registers are in the order of the instruction encoding, from left to right.
530void Assembler::GenInstrJump(Opcode opcode,
531                              uint32_t address) {
532  ASSERT(is_uint26(address));
533  Instr instr = opcode | address;
534  emit(instr);
535}
536
537
538int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
539  int32_t target_pos;
540  if (L->is_bound()) {
541    target_pos = L->pos();
542  } else {
543    if (L->is_linked()) {
544      target_pos = L->pos();  // L's link
545    } else {
546      target_pos = kEndOfChain;
547    }
548    L->link_to(pc_offset());
549  }
550
551  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
552  return offset;
553}
554
555
556void Assembler::label_at_put(Label* L, int at_offset) {
557  int target_pos;
558  if (L->is_bound()) {
559    target_pos = L->pos();
560  } else {
561    if (L->is_linked()) {
562      target_pos = L->pos();  // L's link
563    } else {
564      target_pos = kEndOfChain;
565    }
566    L->link_to(at_offset);
567    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
568  }
569}
570
571
572//------- Branch and jump instructions --------
573
574void Assembler::b(int16_t offset) {
575  beq(zero_reg, zero_reg, offset);
576}
577
578
579void Assembler::bal(int16_t offset) {
580  bgezal(zero_reg, offset);
581}
582
583
584void Assembler::beq(Register rs, Register rt, int16_t offset) {
585  GenInstrImmediate(BEQ, rs, rt, offset);
586}
587
588
589void Assembler::bgez(Register rs, int16_t offset) {
590  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
591}
592
593
594void Assembler::bgezal(Register rs, int16_t offset) {
595  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
596}
597
598
599void Assembler::bgtz(Register rs, int16_t offset) {
600  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
601}
602
603
604void Assembler::blez(Register rs, int16_t offset) {
605  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
606}
607
608
609void Assembler::bltz(Register rs, int16_t offset) {
610  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
611}
612
613
614void Assembler::bltzal(Register rs, int16_t offset) {
615  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
616}
617
618
619void Assembler::bne(Register rs, Register rt, int16_t offset) {
620  GenInstrImmediate(BNE, rs, rt, offset);
621}
622
623
624void Assembler::j(int32_t target) {
625  ASSERT(is_uint28(target) && ((target & 3) == 0));
626  GenInstrJump(J, target >> 2);
627}
628
629
630void Assembler::jr(Register rs) {
631  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
632}
633
634
635void Assembler::jal(int32_t target) {
636  ASSERT(is_uint28(target) && ((target & 3) == 0));
637  GenInstrJump(JAL, target >> 2);
638}
639
640
641void Assembler::jalr(Register rs, Register rd) {
642  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
643}
644
645
646//-------Data-processing-instructions---------
647
648// Arithmetic.
649
650void Assembler::add(Register rd, Register rs, Register rt) {
651  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADD);
652}
653
654
655void Assembler::addu(Register rd, Register rs, Register rt) {
656  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
657}
658
659
660void Assembler::addi(Register rd, Register rs, int32_t j) {
661  GenInstrImmediate(ADDI, rs, rd, j);
662}
663
664
665void Assembler::addiu(Register rd, Register rs, int32_t j) {
666  GenInstrImmediate(ADDIU, rs, rd, j);
667}
668
669
670void Assembler::sub(Register rd, Register rs, Register rt) {
671  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUB);
672}
673
674
675void Assembler::subu(Register rd, Register rs, Register rt) {
676  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
677}
678
679
680void Assembler::mul(Register rd, Register rs, Register rt) {
681  GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
682}
683
684
685void Assembler::mult(Register rs, Register rt) {
686  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
687}
688
689
690void Assembler::multu(Register rs, Register rt) {
691  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
692}
693
694
695void Assembler::div(Register rs, Register rt) {
696  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
697}
698
699
700void Assembler::divu(Register rs, Register rt) {
701  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
702}
703
704
705// Logical.
706
707void Assembler::and_(Register rd, Register rs, Register rt) {
708  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
709}
710
711
712void Assembler::andi(Register rt, Register rs, int32_t j) {
713  GenInstrImmediate(ANDI, rs, rt, j);
714}
715
716
717void Assembler::or_(Register rd, Register rs, Register rt) {
718  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
719}
720
721
722void Assembler::ori(Register rt, Register rs, int32_t j) {
723  GenInstrImmediate(ORI, rs, rt, j);
724}
725
726
727void Assembler::xor_(Register rd, Register rs, Register rt) {
728  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
729}
730
731
732void Assembler::xori(Register rt, Register rs, int32_t j) {
733  GenInstrImmediate(XORI, rs, rt, j);
734}
735
736
737void Assembler::nor(Register rd, Register rs, Register rt) {
738  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
739}
740
741
742// Shifts.
743void Assembler::sll(Register rd, Register rt, uint16_t sa) {
744  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
745}
746
747
748void Assembler::sllv(Register rd, Register rt, Register rs) {
749  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
750}
751
752
753void Assembler::srl(Register rd, Register rt, uint16_t sa) {
754  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
755}
756
757
758void Assembler::srlv(Register rd, Register rt, Register rs) {
759  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
760}
761
762
763void Assembler::sra(Register rd, Register rt, uint16_t sa) {
764  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
765}
766
767
768void Assembler::srav(Register rd, Register rt, Register rs) {
769  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
770}
771
772
773//------------Memory-instructions-------------
774
775void Assembler::lb(Register rd, const MemOperand& rs) {
776  GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
777}
778
779
780void Assembler::lbu(Register rd, const MemOperand& rs) {
781  GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
782}
783
784
785void Assembler::lw(Register rd, const MemOperand& rs) {
786  GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
787}
788
789
790void Assembler::sb(Register rd, const MemOperand& rs) {
791  GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
792}
793
794
795void Assembler::sw(Register rd, const MemOperand& rs) {
796  GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
797}
798
799
800void Assembler::lui(Register rd, int32_t j) {
801  GenInstrImmediate(LUI, zero_reg, rd, j);
802}
803
804
805//-------------Misc-instructions--------------
806
807// Break / Trap instructions.
808void Assembler::break_(uint32_t code) {
809  ASSERT((code & ~0xfffff) == 0);
810  Instr break_instr = SPECIAL | BREAK | (code << 6);
811  emit(break_instr);
812}
813
814
815void Assembler::tge(Register rs, Register rt, uint16_t code) {
816  ASSERT(is_uint10(code));
817  Instr instr = SPECIAL | TGE | rs.code() << kRsShift
818      | rt.code() << kRtShift | code << 6;
819  emit(instr);
820}
821
822
823void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
824  ASSERT(is_uint10(code));
825  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
826      | rt.code() << kRtShift | code << 6;
827  emit(instr);
828}
829
830
831void Assembler::tlt(Register rs, Register rt, uint16_t code) {
832  ASSERT(is_uint10(code));
833  Instr instr =
834      SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
835  emit(instr);
836}
837
838
839void Assembler::tltu(Register rs, Register rt, uint16_t code) {
840  ASSERT(is_uint10(code));
841  Instr instr = SPECIAL | TLTU | rs.code() << kRsShift
842      | rt.code() << kRtShift | code << 6;
843  emit(instr);
844}
845
846
847void Assembler::teq(Register rs, Register rt, uint16_t code) {
848  ASSERT(is_uint10(code));
849  Instr instr =
850      SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
851  emit(instr);
852}
853
854
855void Assembler::tne(Register rs, Register rt, uint16_t code) {
856  ASSERT(is_uint10(code));
857  Instr instr =
858      SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
859  emit(instr);
860}
861
862
863// Move from HI/LO register.
864
865void Assembler::mfhi(Register rd) {
866  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
867}
868
869
870void Assembler::mflo(Register rd) {
871  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
872}
873
874
875// Set on less than instructions.
876void Assembler::slt(Register rd, Register rs, Register rt) {
877  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
878}
879
880
881void Assembler::sltu(Register rd, Register rs, Register rt) {
882  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
883}
884
885
886void Assembler::slti(Register rt, Register rs, int32_t j) {
887  GenInstrImmediate(SLTI, rs, rt, j);
888}
889
890
891void Assembler::sltiu(Register rt, Register rs, int32_t j) {
892  GenInstrImmediate(SLTIU, rs, rt, j);
893}
894
895
896//--------Coprocessor-instructions----------------
897
898// Load, store, move.
899void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
900  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
901}
902
903
904void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
905  GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
906}
907
908
909void Assembler::swc1(FPURegister fd, const MemOperand& src) {
910  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
911}
912
913
914void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
915  GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
916}
917
918
919void Assembler::mtc1(FPURegister fs, Register rt) {
920  GenInstrRegister(COP1, MTC1, rt, fs, f0);
921}
922
923
924void Assembler::mthc1(FPURegister fs, Register rt) {
925  GenInstrRegister(COP1, MTHC1, rt, fs, f0);
926}
927
928
929void Assembler::mfc1(FPURegister fs, Register rt) {
930  GenInstrRegister(COP1, MFC1, rt, fs, f0);
931}
932
933
934void Assembler::mfhc1(FPURegister fs, Register rt) {
935  GenInstrRegister(COP1, MFHC1, rt, fs, f0);
936}
937
938
939// Conversions.
940
941void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
942  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
943}
944
945
946void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
947  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
948}
949
950
951void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
952  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
953}
954
955
956void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
957  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
958}
959
960
961void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
962  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
963}
964
965
966void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
967  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
968}
969
970
971void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
972  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
973}
974
975
976void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
977  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
978}
979
980
981void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
982  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
983}
984
985
986void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
987  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
988}
989
990
991// Conditions.
992void Assembler::c(FPUCondition cond, SecondaryField fmt,
993    FPURegister ft, FPURegister fs, uint16_t cc) {
994  ASSERT(is_uint3(cc));
995  ASSERT((fmt & ~(31 << kRsShift)) == 0);
996  Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
997      | cc << 8 | 3 << 4 | cond;
998  emit(instr);
999}
1000
1001
1002void Assembler::bc1f(int16_t offset, uint16_t cc) {
1003  ASSERT(is_uint3(cc));
1004  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
1005  emit(instr);
1006}
1007
1008
1009void Assembler::bc1t(int16_t offset, uint16_t cc) {
1010  ASSERT(is_uint3(cc));
1011  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
1012  emit(instr);
1013}
1014
1015
1016// Debugging.
1017void Assembler::RecordJSReturn() {
1018  WriteRecordedPositions();
1019  CheckBuffer();
1020  RecordRelocInfo(RelocInfo::JS_RETURN);
1021}
1022
1023
1024void Assembler::RecordComment(const char* msg) {
1025  if (FLAG_debug_code) {
1026    CheckBuffer();
1027    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1028  }
1029}
1030
1031
1032void Assembler::RecordPosition(int pos) {
1033  if (pos == RelocInfo::kNoPosition) return;
1034  ASSERT(pos >= 0);
1035  current_position_ = pos;
1036}
1037
1038
1039void Assembler::RecordStatementPosition(int pos) {
1040  if (pos == RelocInfo::kNoPosition) return;
1041  ASSERT(pos >= 0);
1042  current_statement_position_ = pos;
1043}
1044
1045
1046void Assembler::WriteRecordedPositions() {
1047  // Write the statement position if it is different from what was written last
1048  // time.
1049  if (current_statement_position_ != written_statement_position_) {
1050    CheckBuffer();
1051    RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
1052    written_statement_position_ = current_statement_position_;
1053  }
1054
1055  // Write the position if it is different from what was written last time and
1056  // also different from the written statement position.
1057  if (current_position_ != written_position_ &&
1058      current_position_ != written_statement_position_) {
1059    CheckBuffer();
1060    RecordRelocInfo(RelocInfo::POSITION, current_position_);
1061    written_position_ = current_position_;
1062  }
1063}
1064
1065
1066void Assembler::GrowBuffer() {
1067  if (!own_buffer_) FATAL("external code buffer is too small");
1068
1069  // Compute new buffer size.
1070  CodeDesc desc;  // the new buffer
1071  if (buffer_size_ < 4*KB) {
1072    desc.buffer_size = 4*KB;
1073  } else if (buffer_size_ < 1*MB) {
1074    desc.buffer_size = 2*buffer_size_;
1075  } else {
1076    desc.buffer_size = buffer_size_ + 1*MB;
1077  }
1078  CHECK_GT(desc.buffer_size, 0);  // no overflow
1079
1080  // Setup new buffer.
1081  desc.buffer = NewArray<byte>(desc.buffer_size);
1082
1083  desc.instr_size = pc_offset();
1084  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1085
1086  // Copy the data.
1087  int pc_delta = desc.buffer - buffer_;
1088  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1089  memmove(desc.buffer, buffer_, desc.instr_size);
1090  memmove(reloc_info_writer.pos() + rc_delta,
1091          reloc_info_writer.pos(), desc.reloc_size);
1092
1093  // Switch buffers.
1094  DeleteArray(buffer_);
1095  buffer_ = desc.buffer;
1096  buffer_size_ = desc.buffer_size;
1097  pc_ += pc_delta;
1098  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1099                               reloc_info_writer.last_pc() + pc_delta);
1100
1101
1102  // On ia32 and ARM pc relative addressing is used, and we thus need to apply a
1103  // shift by pc_delta. But on MIPS the target address it directly loaded, so
1104  // we do not need to relocate here.
1105
1106  ASSERT(!overflow());
1107}
1108
1109
1110void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
1111  RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
1112  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
1113    // Adjust code for new modes.
1114    ASSERT(RelocInfo::IsJSReturn(rmode)
1115           || RelocInfo::IsComment(rmode)
1116           || RelocInfo::IsPosition(rmode));
1117    // These modes do not need an entry in the constant pool.
1118  }
1119  if (rinfo.rmode() != RelocInfo::NONE) {
1120    // Don't record external references unless the heap will be serialized.
1121    if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
1122        !Serializer::enabled() &&
1123        !FLAG_debug_code) {
1124      return;
1125    }
1126    ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
1127    reloc_info_writer.Write(&rinfo);
1128  }
1129}
1130
1131
1132Address Assembler::target_address_at(Address pc) {
1133  Instr instr1 = instr_at(pc);
1134  Instr instr2 = instr_at(pc + kInstrSize);
1135  // Check we have 2 instructions generated by li.
1136  ASSERT(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
1137         ((instr1 == nopInstr) && ((instr2 & kOpcodeMask) == ADDI ||
1138                            (instr2 & kOpcodeMask) == ORI ||
1139                            (instr2 & kOpcodeMask) == LUI)));
1140  // Interpret these 2 instructions.
1141  if (instr1 == nopInstr) {
1142    if ((instr2 & kOpcodeMask) == ADDI) {
1143      return reinterpret_cast<Address>(((instr2 & kImm16Mask) << 16) >> 16);
1144    } else if ((instr2 & kOpcodeMask) == ORI) {
1145      return reinterpret_cast<Address>(instr2 & kImm16Mask);
1146    } else if ((instr2 & kOpcodeMask) == LUI) {
1147      return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
1148    }
1149  } else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
1150    // 32 bits value.
1151    return reinterpret_cast<Address>(
1152        (instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
1153  }
1154
1155  // We should never get here.
1156  UNREACHABLE();
1157  return (Address)0x0;
1158}
1159
1160
1161void Assembler::set_target_address_at(Address pc, Address target) {
1162  // On MIPS we need to patch the code to generate.
1163
1164  // First check we have a li.
1165  Instr instr2 = instr_at(pc + kInstrSize);
1166#ifdef DEBUG
1167  Instr instr1 = instr_at(pc);
1168
1169  // Check we have indeed the result from a li with MustUseAt true.
1170  CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
1171        ((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
1172                           (instr2 & kOpcodeMask)== ORI ||
1173                           (instr2 & kOpcodeMask)== LUI)));
1174#endif
1175
1176
1177  uint32_t rt_code = (instr2 & kRtFieldMask);
1178  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
1179  uint32_t itarget = reinterpret_cast<uint32_t>(target);
1180
1181  if (is_int16(itarget)) {
1182    // nop
1183    // addiu rt zero_reg j
1184    *p = nopInstr;
1185    *(p+1) = ADDIU | rt_code | (itarget & LOMask);
1186  } else if (!(itarget & HIMask)) {
1187    // nop
1188    // ori rt zero_reg j
1189    *p = nopInstr;
1190    *(p+1) = ORI | rt_code | (itarget & LOMask);
1191  } else if (!(itarget & LOMask)) {
1192    // nop
1193    // lui rt (HIMask & itarget)>>16
1194    *p = nopInstr;
1195    *(p+1) = LUI | rt_code | ((itarget & HIMask)>>16);
1196  } else {
1197    // lui rt (HIMask & itarget)>>16
1198    // ori rt rt, (LOMask & itarget)
1199    *p = LUI | rt_code | ((itarget & HIMask)>>16);
1200    *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & LOMask);
1201  }
1202
1203  CPU::FlushICache(pc, 2 * sizeof(int32_t));
1204}
1205
1206
1207} }  // namespace v8::internal
1208
1209