1// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28
29
30#include "v8.h"
31
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "debug.h"
35#include "runtime.h"
36
37namespace v8 {
38namespace internal {
39
40MacroAssembler::MacroAssembler(void* buffer, int size)
41    : Assembler(buffer, size),
42      unresolved_(0),
43      generating_stub_(false),
44      allow_stub_calls_(true),
45      code_object_(Heap::undefined_value()) {
46}
47
48
49
50void MacroAssembler::Jump(Register target, Condition cond,
51                          Register r1, const Operand& r2) {
52  Jump(Operand(target), cond, r1, r2);
53}
54
55
56void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
57                          Condition cond, Register r1, const Operand& r2) {
58  Jump(Operand(target), cond, r1, r2);
59}
60
61
62void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
63                          Condition cond, Register r1, const Operand& r2) {
64  ASSERT(!RelocInfo::IsCodeTarget(rmode));
65  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
66}
67
68
69void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
70                          Condition cond, Register r1, const Operand& r2) {
71  ASSERT(RelocInfo::IsCodeTarget(rmode));
72  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
73}
74
75
76void MacroAssembler::Call(Register target,
77                          Condition cond, Register r1, const Operand& r2) {
78  Call(Operand(target), cond, r1, r2);
79}
80
81
82void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
83                          Condition cond, Register r1, const Operand& r2) {
84  Call(Operand(target), cond, r1, r2);
85}
86
87
88void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
89                          Condition cond, Register r1, const Operand& r2) {
90  ASSERT(!RelocInfo::IsCodeTarget(rmode));
91  Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
92}
93
94
95void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
96                          Condition cond, Register r1, const Operand& r2) {
97  ASSERT(RelocInfo::IsCodeTarget(rmode));
98  Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
99}
100
101
102void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) {
103  Jump(Operand(ra), cond, r1, r2);
104}
105
106
107void MacroAssembler::LoadRoot(Register destination,
108                              Heap::RootListIndex index) {
109  lw(destination, MemOperand(s4, index << kPointerSizeLog2));
110}
111
112void MacroAssembler::LoadRoot(Register destination,
113                              Heap::RootListIndex index,
114                              Condition cond,
115                              Register src1, const Operand& src2) {
116  Branch(NegateCondition(cond), 2, src1, src2);
117  nop();
118  lw(destination, MemOperand(s4, index << kPointerSizeLog2));
119}
120
121
122void MacroAssembler::RecordWrite(Register object, Register offset,
123                                 Register scratch) {
124  UNIMPLEMENTED_MIPS();
125}
126
127
128// ---------------------------------------------------------------------------
129// Instruction macros
130
131void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) {
132  if (rt.is_reg()) {
133    add(rd, rs, rt.rm());
134  } else {
135    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
136      addi(rd, rs, rt.imm32_);
137    } else {
138      // li handles the relocation.
139      ASSERT(!rs.is(at));
140      li(at, rt);
141      add(rd, rs, at);
142    }
143  }
144}
145
146
147void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
148  if (rt.is_reg()) {
149    addu(rd, rs, rt.rm());
150  } else {
151    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
152      addiu(rd, rs, rt.imm32_);
153    } else {
154      // li handles the relocation.
155      ASSERT(!rs.is(at));
156      li(at, rt);
157      addu(rd, rs, at);
158    }
159  }
160}
161
162
163void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
164  if (rt.is_reg()) {
165    mul(rd, rs, rt.rm());
166  } else {
167    // li handles the relocation.
168    ASSERT(!rs.is(at));
169    li(at, rt);
170    mul(rd, rs, at);
171  }
172}
173
174
175void MacroAssembler::Mult(Register rs, const Operand& rt) {
176  if (rt.is_reg()) {
177    mult(rs, rt.rm());
178  } else {
179    // li handles the relocation.
180    ASSERT(!rs.is(at));
181    li(at, rt);
182    mult(rs, at);
183  }
184}
185
186
187void MacroAssembler::Multu(Register rs, const Operand& rt) {
188  if (rt.is_reg()) {
189    multu(rs, rt.rm());
190  } else {
191    // li handles the relocation.
192    ASSERT(!rs.is(at));
193    li(at, rt);
194    multu(rs, at);
195  }
196}
197
198
199void MacroAssembler::Div(Register rs, const Operand& rt) {
200  if (rt.is_reg()) {
201    div(rs, rt.rm());
202  } else {
203    // li handles the relocation.
204    ASSERT(!rs.is(at));
205    li(at, rt);
206    div(rs, at);
207  }
208}
209
210
211void MacroAssembler::Divu(Register rs, const Operand& rt) {
212  if (rt.is_reg()) {
213    divu(rs, rt.rm());
214  } else {
215    // li handles the relocation.
216    ASSERT(!rs.is(at));
217    li(at, rt);
218    divu(rs, at);
219  }
220}
221
222
223void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
224  if (rt.is_reg()) {
225    and_(rd, rs, rt.rm());
226  } else {
227    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
228      andi(rd, rs, rt.imm32_);
229    } else {
230      // li handles the relocation.
231      ASSERT(!rs.is(at));
232      li(at, rt);
233      and_(rd, rs, at);
234    }
235  }
236}
237
238
239void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
240  if (rt.is_reg()) {
241    or_(rd, rs, rt.rm());
242  } else {
243    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
244      ori(rd, rs, rt.imm32_);
245    } else {
246      // li handles the relocation.
247      ASSERT(!rs.is(at));
248      li(at, rt);
249      or_(rd, rs, at);
250    }
251  }
252}
253
254
255void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
256  if (rt.is_reg()) {
257    xor_(rd, rs, rt.rm());
258  } else {
259    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
260      xori(rd, rs, rt.imm32_);
261    } else {
262      // li handles the relocation.
263      ASSERT(!rs.is(at));
264      li(at, rt);
265      xor_(rd, rs, at);
266    }
267  }
268}
269
270
271void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
272  if (rt.is_reg()) {
273    nor(rd, rs, rt.rm());
274  } else {
275    // li handles the relocation.
276    ASSERT(!rs.is(at));
277    li(at, rt);
278    nor(rd, rs, at);
279  }
280}
281
282
283void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
284  if (rt.is_reg()) {
285    slt(rd, rs, rt.rm());
286  } else {
287    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
288      slti(rd, rs, rt.imm32_);
289    } else {
290      // li handles the relocation.
291      ASSERT(!rs.is(at));
292      li(at, rt);
293      slt(rd, rs, at);
294    }
295  }
296}
297
298
299void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
300  if (rt.is_reg()) {
301    sltu(rd, rs, rt.rm());
302  } else {
303    if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
304      sltiu(rd, rs, rt.imm32_);
305    } else {
306      // li handles the relocation.
307      ASSERT(!rs.is(at));
308      li(at, rt);
309      sltu(rd, rs, at);
310    }
311  }
312}
313
314
315//------------Pseudo-instructions-------------
316
317void MacroAssembler::movn(Register rd, Register rt) {
318  addiu(at, zero_reg, -1);  // Fill at with ones.
319  xor_(rd, rt, at);
320}
321
322
323// load wartd in a register
324void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
325  ASSERT(!j.is_reg());
326
327  if (!MustUseAt(j.rmode_) && !gen2instr) {
328    // Normal load of an immediate value which does not need Relocation Info.
329    if (is_int16(j.imm32_)) {
330      addiu(rd, zero_reg, j.imm32_);
331    } else if (!(j.imm32_ & HIMask)) {
332      ori(rd, zero_reg, j.imm32_);
333    } else if (!(j.imm32_ & LOMask)) {
334      lui(rd, (HIMask & j.imm32_) >> 16);
335    } else {
336      lui(rd, (HIMask & j.imm32_) >> 16);
337      ori(rd, rd, (LOMask & j.imm32_));
338    }
339  } else if (MustUseAt(j.rmode_) || gen2instr) {
340    if (MustUseAt(j.rmode_)) {
341      RecordRelocInfo(j.rmode_, j.imm32_);
342    }
343    // We need always the same number of instructions as we may need to patch
344    // this code to load another value which may need 2 instructions to load.
345    if (is_int16(j.imm32_)) {
346      nop();
347      addiu(rd, zero_reg, j.imm32_);
348    } else if (!(j.imm32_ & HIMask)) {
349      nop();
350      ori(rd, zero_reg, j.imm32_);
351    } else if (!(j.imm32_ & LOMask)) {
352      nop();
353      lui(rd, (HIMask & j.imm32_) >> 16);
354    } else {
355      lui(rd, (HIMask & j.imm32_) >> 16);
356      ori(rd, rd, (LOMask & j.imm32_));
357    }
358  }
359}
360
361
362// Exception-generating instructions and debugging support
363void MacroAssembler::stop(const char* msg) {
364  // TO_UPGRADE: Just a break for now. Maybe we could upgrade it.
365  // We use the 0x54321 value to be able to find it easily when reading memory.
366  break_(0x54321);
367}
368
369
370void MacroAssembler::MultiPush(RegList regs) {
371  int16_t NumSaved = 0;
372  int16_t NumToPush = NumberOfBitsSet(regs);
373
374  addiu(sp, sp, -4 * NumToPush);
375  for (int16_t i = 0; i < kNumRegisters; i++) {
376    if ((regs & (1 << i)) != 0) {
377      sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
378    }
379  }
380}
381
382
383void MacroAssembler::MultiPushReversed(RegList regs) {
384  int16_t NumSaved = 0;
385  int16_t NumToPush = NumberOfBitsSet(regs);
386
387  addiu(sp, sp, -4 * NumToPush);
388  for (int16_t i = kNumRegisters; i > 0; i--) {
389    if ((regs & (1 << i)) != 0) {
390      sw(ToRegister(i), MemOperand(sp, 4 * (NumToPush - ++NumSaved)));
391    }
392  }
393}
394
395
396void MacroAssembler::MultiPop(RegList regs) {
397  int16_t NumSaved = 0;
398
399  for (int16_t i = kNumRegisters; i > 0; i--) {
400    if ((regs & (1 << i)) != 0) {
401      lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
402    }
403  }
404  addiu(sp, sp, 4 * NumSaved);
405}
406
407
408void MacroAssembler::MultiPopReversed(RegList regs) {
409  int16_t NumSaved = 0;
410
411  for (int16_t i = 0; i < kNumRegisters; i++) {
412    if ((regs & (1 << i)) != 0) {
413      lw(ToRegister(i), MemOperand(sp, 4 * (NumSaved++)));
414    }
415  }
416  addiu(sp, sp, 4 * NumSaved);
417}
418
419
420// Emulated condtional branches do not emit a nop in the branch delay slot.
421
422// Trashes the at register if no scratch register is provided.
423void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
424                            const Operand& rt, Register scratch) {
425  Register r2;
426  if (rt.is_reg()) {
427    // We don't want any other register but scratch clobbered.
428    ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
429    r2 = rt.rm_;
430  } else if (cond != cc_always) {
431    // We don't want any other register but scratch clobbered.
432    ASSERT(!scratch.is(rs));
433    r2 = scratch;
434    li(r2, rt);
435  }
436
437  switch (cond) {
438    case cc_always:
439      b(offset);
440      break;
441    case eq:
442      beq(rs, r2, offset);
443      break;
444    case ne:
445      bne(rs, r2, offset);
446      break;
447
448      // Signed comparison
449    case greater:
450      slt(scratch, r2, rs);
451      bne(scratch, zero_reg, offset);
452      break;
453    case greater_equal:
454      slt(scratch, rs, r2);
455      beq(scratch, zero_reg, offset);
456      break;
457    case less:
458      slt(scratch, rs, r2);
459      bne(scratch, zero_reg, offset);
460      break;
461    case less_equal:
462      slt(scratch, r2, rs);
463      beq(scratch, zero_reg, offset);
464      break;
465
466      // Unsigned comparison.
467    case Ugreater:
468      sltu(scratch, r2, rs);
469      bne(scratch, zero_reg, offset);
470      break;
471    case Ugreater_equal:
472      sltu(scratch, rs, r2);
473      beq(scratch, zero_reg, offset);
474      break;
475    case Uless:
476      sltu(scratch, rs, r2);
477      bne(scratch, zero_reg, offset);
478      break;
479    case Uless_equal:
480      sltu(scratch, r2, rs);
481      beq(scratch, zero_reg, offset);
482      break;
483
484    default:
485      UNREACHABLE();
486  }
487}
488
489
490void MacroAssembler::Branch(Condition cond,  Label* L, Register rs,
491                            const Operand& rt, Register scratch) {
492  Register r2;
493  if (rt.is_reg()) {
494    r2 = rt.rm_;
495  } else if (cond != cc_always) {
496    r2 = scratch;
497    li(r2, rt);
498  }
499
500  // We use branch_offset as an argument for the branch instructions to be sure
501  // it is called just before generating the branch instruction, as needed.
502
503  switch (cond) {
504    case cc_always:
505      b(shifted_branch_offset(L, false));
506      break;
507    case eq:
508      beq(rs, r2, shifted_branch_offset(L, false));
509      break;
510    case ne:
511      bne(rs, r2, shifted_branch_offset(L, false));
512      break;
513
514    // Signed comparison
515    case greater:
516      slt(scratch, r2, rs);
517      bne(scratch, zero_reg, shifted_branch_offset(L, false));
518      break;
519    case greater_equal:
520      slt(scratch, rs, r2);
521      beq(scratch, zero_reg, shifted_branch_offset(L, false));
522      break;
523    case less:
524      slt(scratch, rs, r2);
525      bne(scratch, zero_reg, shifted_branch_offset(L, false));
526      break;
527    case less_equal:
528      slt(scratch, r2, rs);
529      beq(scratch, zero_reg, shifted_branch_offset(L, false));
530      break;
531
532    // Unsigned comparison.
533    case Ugreater:
534      sltu(scratch, r2, rs);
535      bne(scratch, zero_reg, shifted_branch_offset(L, false));
536      break;
537    case Ugreater_equal:
538      sltu(scratch, rs, r2);
539      beq(scratch, zero_reg, shifted_branch_offset(L, false));
540      break;
541    case Uless:
542      sltu(scratch, rs, r2);
543      bne(scratch, zero_reg, shifted_branch_offset(L, false));
544      break;
545    case Uless_equal:
546      sltu(scratch, r2, rs);
547      beq(scratch, zero_reg, shifted_branch_offset(L, false));
548      break;
549
550    default:
551      UNREACHABLE();
552  }
553}
554
555
556// Trashes the at register if no scratch register is provided.
557// We need to use a bgezal or bltzal, but they can't be used directly with the
558// slt instructions. We could use sub or add instead but we would miss overflow
559// cases, so we keep slt and add an intermediate third instruction.
560void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
561                                   const Operand& rt, Register scratch) {
562  Register r2;
563  if (rt.is_reg()) {
564    r2 = rt.rm_;
565  } else if (cond != cc_always) {
566    r2 = scratch;
567    li(r2, rt);
568  }
569
570  switch (cond) {
571    case cc_always:
572      bal(offset);
573      break;
574    case eq:
575      bne(rs, r2, 2);
576      nop();
577      bal(offset);
578      break;
579    case ne:
580      beq(rs, r2, 2);
581      nop();
582      bal(offset);
583      break;
584
585    // Signed comparison
586    case greater:
587      slt(scratch, r2, rs);
588      addiu(scratch, scratch, -1);
589      bgezal(scratch, offset);
590      break;
591    case greater_equal:
592      slt(scratch, rs, r2);
593      addiu(scratch, scratch, -1);
594      bltzal(scratch, offset);
595      break;
596    case less:
597      slt(scratch, rs, r2);
598      addiu(scratch, scratch, -1);
599      bgezal(scratch, offset);
600      break;
601    case less_equal:
602      slt(scratch, r2, rs);
603      addiu(scratch, scratch, -1);
604      bltzal(scratch, offset);
605      break;
606
607    // Unsigned comparison.
608    case Ugreater:
609      sltu(scratch, r2, rs);
610      addiu(scratch, scratch, -1);
611      bgezal(scratch, offset);
612      break;
613    case Ugreater_equal:
614      sltu(scratch, rs, r2);
615      addiu(scratch, scratch, -1);
616      bltzal(scratch, offset);
617      break;
618    case Uless:
619      sltu(scratch, rs, r2);
620      addiu(scratch, scratch, -1);
621      bgezal(scratch, offset);
622      break;
623    case Uless_equal:
624      sltu(scratch, r2, rs);
625      addiu(scratch, scratch, -1);
626      bltzal(scratch, offset);
627      break;
628
629    default:
630      UNREACHABLE();
631  }
632}
633
634
635void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
636                                   const Operand& rt, Register scratch) {
637  Register r2;
638  if (rt.is_reg()) {
639    r2 = rt.rm_;
640  } else if (cond != cc_always) {
641    r2 = scratch;
642    li(r2, rt);
643  }
644
645  switch (cond) {
646    case cc_always:
647      bal(shifted_branch_offset(L, false));
648      break;
649    case eq:
650      bne(rs, r2, 2);
651      nop();
652      bal(shifted_branch_offset(L, false));
653      break;
654    case ne:
655      beq(rs, r2, 2);
656      nop();
657      bal(shifted_branch_offset(L, false));
658      break;
659
660    // Signed comparison
661    case greater:
662      slt(scratch, r2, rs);
663      addiu(scratch, scratch, -1);
664      bgezal(scratch, shifted_branch_offset(L, false));
665      break;
666    case greater_equal:
667      slt(scratch, rs, r2);
668      addiu(scratch, scratch, -1);
669      bltzal(scratch, shifted_branch_offset(L, false));
670      break;
671    case less:
672      slt(scratch, rs, r2);
673      addiu(scratch, scratch, -1);
674      bgezal(scratch, shifted_branch_offset(L, false));
675      break;
676    case less_equal:
677      slt(scratch, r2, rs);
678      addiu(scratch, scratch, -1);
679      bltzal(scratch, shifted_branch_offset(L, false));
680      break;
681
682    // Unsigned comparison.
683    case Ugreater:
684      sltu(scratch, r2, rs);
685      addiu(scratch, scratch, -1);
686      bgezal(scratch, shifted_branch_offset(L, false));
687      break;
688    case Ugreater_equal:
689      sltu(scratch, rs, r2);
690      addiu(scratch, scratch, -1);
691      bltzal(scratch, shifted_branch_offset(L, false));
692      break;
693    case Uless:
694      sltu(scratch, rs, r2);
695      addiu(scratch, scratch, -1);
696      bgezal(scratch, shifted_branch_offset(L, false));
697      break;
698    case Uless_equal:
699      sltu(scratch, r2, rs);
700      addiu(scratch, scratch, -1);
701      bltzal(scratch, shifted_branch_offset(L, false));
702      break;
703
704    default:
705      UNREACHABLE();
706  }
707}
708
709
710void MacroAssembler::Jump(const Operand& target,
711                          Condition cond, Register rs, const Operand& rt) {
712  if (target.is_reg()) {
713    if (cond == cc_always) {
714      jr(target.rm());
715    } else {
716      Branch(NegateCondition(cond), 2, rs, rt);
717      nop();
718      jr(target.rm());
719    }
720  } else {    // !target.is_reg()
721    if (!MustUseAt(target.rmode_)) {
722      if (cond == cc_always) {
723        j(target.imm32_);
724      } else {
725        Branch(NegateCondition(cond), 2, rs, rt);
726        nop();
727        j(target.imm32_);  // will generate only one instruction.
728      }
729    } else {  // MustUseAt(target)
730      li(at, rt);
731      if (cond == cc_always) {
732        jr(at);
733      } else {
734        Branch(NegateCondition(cond), 2, rs, rt);
735        nop();
736        jr(at);  // will generate only one instruction.
737      }
738    }
739  }
740}
741
742
743void MacroAssembler::Call(const Operand& target,
744                          Condition cond, Register rs, const Operand& rt) {
745  if (target.is_reg()) {
746    if (cond == cc_always) {
747      jalr(target.rm());
748    } else {
749      Branch(NegateCondition(cond), 2, rs, rt);
750      nop();
751      jalr(target.rm());
752    }
753  } else {    // !target.is_reg()
754    if (!MustUseAt(target.rmode_)) {
755      if (cond == cc_always) {
756        jal(target.imm32_);
757      } else {
758        Branch(NegateCondition(cond), 2, rs, rt);
759        nop();
760        jal(target.imm32_);  // will generate only one instruction.
761      }
762    } else {  // MustUseAt(target)
763      li(at, rt);
764      if (cond == cc_always) {
765        jalr(at);
766      } else {
767        Branch(NegateCondition(cond), 2, rs, rt);
768        nop();
769        jalr(at);  // will generate only one instruction.
770      }
771    }
772  }
773}
774
775void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
776  UNIMPLEMENTED_MIPS();
777}
778
779
780void MacroAssembler::Drop(int count, Condition cond) {
781  UNIMPLEMENTED_MIPS();
782}
783
784
785void MacroAssembler::Call(Label* target) {
786  UNIMPLEMENTED_MIPS();
787}
788
789
790// ---------------------------------------------------------------------------
791// Exception handling
792
793void MacroAssembler::PushTryHandler(CodeLocation try_location,
794                                    HandlerType type) {
795  UNIMPLEMENTED_MIPS();
796}
797
798
799void MacroAssembler::PopTryHandler() {
800  UNIMPLEMENTED_MIPS();
801}
802
803
804
805// ---------------------------------------------------------------------------
806// Activation frames
807
808void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
809                              Register r1, const Operand& r2) {
810  UNIMPLEMENTED_MIPS();
811}
812
813
814void MacroAssembler::StubReturn(int argc) {
815  UNIMPLEMENTED_MIPS();
816}
817
818
819void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
820  UNIMPLEMENTED_MIPS();
821}
822
823
824void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
825  UNIMPLEMENTED_MIPS();
826}
827
828
829void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
830                                     int num_arguments,
831                                     int result_size) {
832  UNIMPLEMENTED_MIPS();
833}
834
835
836void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) {
837  UNIMPLEMENTED_MIPS();
838}
839
840
841Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
842                                            bool* resolved) {
843  UNIMPLEMENTED_MIPS();
844  return Handle<Code>(reinterpret_cast<Code*>(NULL));   // UNIMPLEMENTED RETURN
845}
846
847
848void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
849                                   InvokeJSFlags flags) {
850  UNIMPLEMENTED_MIPS();
851}
852
853
854void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
855  UNIMPLEMENTED_MIPS();
856}
857
858
859void MacroAssembler::SetCounter(StatsCounter* counter, int value,
860                                Register scratch1, Register scratch2) {
861  UNIMPLEMENTED_MIPS();
862}
863
864
865void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
866                                      Register scratch1, Register scratch2) {
867  UNIMPLEMENTED_MIPS();
868}
869
870
871void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
872                                      Register scratch1, Register scratch2) {
873  UNIMPLEMENTED_MIPS();
874}
875
876
877
878void MacroAssembler::Assert(Condition cc, const char* msg,
879                            Register rs, Operand rt) {
880  UNIMPLEMENTED_MIPS();
881}
882
883
884void MacroAssembler::Check(Condition cc, const char* msg,
885                           Register rs, Operand rt) {
886  UNIMPLEMENTED_MIPS();
887}
888
889
890void MacroAssembler::Abort(const char* msg) {
891  UNIMPLEMENTED_MIPS();
892}
893
894} }  // namespace v8::internal
895
896