1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include <limits.h>  // For LONG_MIN, LONG_MAX.
29
30#include "v8.h"
31
32#if V8_TARGET_ARCH_MIPS
33
34#include "bootstrapper.h"
35#include "codegen.h"
36#include "cpu-profiler.h"
37#include "debug.h"
38#include "isolate-inl.h"
39#include "runtime.h"
40
41namespace v8 {
42namespace internal {
43
44MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
45    : Assembler(arg_isolate, buffer, size),
46      generating_stub_(false),
47      has_frame_(false) {
48  if (isolate() != NULL) {
49    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
50                                  isolate());
51  }
52}
53
54
55void MacroAssembler::Load(Register dst,
56                          const MemOperand& src,
57                          Representation r) {
58  ASSERT(!r.IsDouble());
59  if (r.IsInteger8()) {
60    lb(dst, src);
61  } else if (r.IsUInteger8()) {
62    lbu(dst, src);
63  } else if (r.IsInteger16()) {
64    lh(dst, src);
65  } else if (r.IsUInteger16()) {
66    lhu(dst, src);
67  } else {
68    lw(dst, src);
69  }
70}
71
72
73void MacroAssembler::Store(Register src,
74                           const MemOperand& dst,
75                           Representation r) {
76  ASSERT(!r.IsDouble());
77  if (r.IsInteger8() || r.IsUInteger8()) {
78    sb(src, dst);
79  } else if (r.IsInteger16() || r.IsUInteger16()) {
80    sh(src, dst);
81  } else {
82    sw(src, dst);
83  }
84}
85
86
87void MacroAssembler::LoadRoot(Register destination,
88                              Heap::RootListIndex index) {
89  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
90}
91
92
93void MacroAssembler::LoadRoot(Register destination,
94                              Heap::RootListIndex index,
95                              Condition cond,
96                              Register src1, const Operand& src2) {
97  Branch(2, NegateCondition(cond), src1, src2);
98  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
99}
100
101
102void MacroAssembler::StoreRoot(Register source,
103                               Heap::RootListIndex index) {
104  sw(source, MemOperand(s6, index << kPointerSizeLog2));
105}
106
107
108void MacroAssembler::StoreRoot(Register source,
109                               Heap::RootListIndex index,
110                               Condition cond,
111                               Register src1, const Operand& src2) {
112  Branch(2, NegateCondition(cond), src1, src2);
113  sw(source, MemOperand(s6, index << kPointerSizeLog2));
114}
115
116
117// Push and pop all registers that can hold pointers.
118void MacroAssembler::PushSafepointRegisters() {
119  // Safepoints expect a block of kNumSafepointRegisters values on the
120  // stack, so adjust the stack for unsaved registers.
121  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
122  ASSERT(num_unsaved >= 0);
123  if (num_unsaved > 0) {
124    Subu(sp, sp, Operand(num_unsaved * kPointerSize));
125  }
126  MultiPush(kSafepointSavedRegisters);
127}
128
129
130void MacroAssembler::PopSafepointRegisters() {
131  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
132  MultiPop(kSafepointSavedRegisters);
133  if (num_unsaved > 0) {
134    Addu(sp, sp, Operand(num_unsaved * kPointerSize));
135  }
136}
137
138
139void MacroAssembler::PushSafepointRegistersAndDoubles() {
140  PushSafepointRegisters();
141  Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
142  for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
143    FPURegister reg = FPURegister::FromAllocationIndex(i);
144    sdc1(reg, MemOperand(sp, i * kDoubleSize));
145  }
146}
147
148
149void MacroAssembler::PopSafepointRegistersAndDoubles() {
150  for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
151    FPURegister reg = FPURegister::FromAllocationIndex(i);
152    ldc1(reg, MemOperand(sp, i * kDoubleSize));
153  }
154  Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
155  PopSafepointRegisters();
156}
157
158
159void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
160                                                             Register dst) {
161  sw(src, SafepointRegistersAndDoublesSlot(dst));
162}
163
164
165void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
166  sw(src, SafepointRegisterSlot(dst));
167}
168
169
170void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
171  lw(dst, SafepointRegisterSlot(src));
172}
173
174
175int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
176  // The registers are pushed starting with the highest encoding,
177  // which means that lowest encodings are closest to the stack pointer.
178  return kSafepointRegisterStackIndexMap[reg_code];
179}
180
181
182MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
183  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
184}
185
186
187MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
188  UNIMPLEMENTED_MIPS();
189  // General purpose registers are pushed last on the stack.
190  int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
191  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
192  return MemOperand(sp, doubles_size + register_offset);
193}
194
195
196void MacroAssembler::InNewSpace(Register object,
197                                Register scratch,
198                                Condition cc,
199                                Label* branch) {
200  ASSERT(cc == eq || cc == ne);
201  And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
202  Branch(branch, cc, scratch,
203         Operand(ExternalReference::new_space_start(isolate())));
204}
205
206
207void MacroAssembler::RecordWriteField(
208    Register object,
209    int offset,
210    Register value,
211    Register dst,
212    RAStatus ra_status,
213    SaveFPRegsMode save_fp,
214    RememberedSetAction remembered_set_action,
215    SmiCheck smi_check) {
216  ASSERT(!AreAliased(value, dst, t8, object));
217  // First, check if a write barrier is even needed. The tests below
218  // catch stores of Smis.
219  Label done;
220
221  // Skip barrier if writing a smi.
222  if (smi_check == INLINE_SMI_CHECK) {
223    JumpIfSmi(value, &done);
224  }
225
226  // Although the object register is tagged, the offset is relative to the start
227  // of the object, so so offset must be a multiple of kPointerSize.
228  ASSERT(IsAligned(offset, kPointerSize));
229
230  Addu(dst, object, Operand(offset - kHeapObjectTag));
231  if (emit_debug_code()) {
232    Label ok;
233    And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
234    Branch(&ok, eq, t8, Operand(zero_reg));
235    stop("Unaligned cell in write barrier");
236    bind(&ok);
237  }
238
239  RecordWrite(object,
240              dst,
241              value,
242              ra_status,
243              save_fp,
244              remembered_set_action,
245              OMIT_SMI_CHECK);
246
247  bind(&done);
248
249  // Clobber clobbered input registers when running with the debug-code flag
250  // turned on to provoke errors.
251  if (emit_debug_code()) {
252    li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
253    li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
254  }
255}
256
257
258// Will clobber 4 registers: object, address, scratch, ip.  The
259// register 'object' contains a heap object pointer.  The heap object
260// tag is shifted away.
261void MacroAssembler::RecordWrite(Register object,
262                                 Register address,
263                                 Register value,
264                                 RAStatus ra_status,
265                                 SaveFPRegsMode fp_mode,
266                                 RememberedSetAction remembered_set_action,
267                                 SmiCheck smi_check) {
268  ASSERT(!AreAliased(object, address, value, t8));
269  ASSERT(!AreAliased(object, address, value, t9));
270
271  if (emit_debug_code()) {
272    lw(at, MemOperand(address));
273    Assert(
274        eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
275  }
276
277  // Count number of write barriers in generated code.
278  isolate()->counters()->write_barriers_static()->Increment();
279  // TODO(mstarzinger): Dynamic counter missing.
280
281  // First, check if a write barrier is even needed. The tests below
282  // catch stores of smis and stores into the young generation.
283  Label done;
284
285  if (smi_check == INLINE_SMI_CHECK) {
286    ASSERT_EQ(0, kSmiTag);
287    JumpIfSmi(value, &done);
288  }
289
290  CheckPageFlag(value,
291                value,  // Used as scratch.
292                MemoryChunk::kPointersToHereAreInterestingMask,
293                eq,
294                &done);
295  CheckPageFlag(object,
296                value,  // Used as scratch.
297                MemoryChunk::kPointersFromHereAreInterestingMask,
298                eq,
299                &done);
300
301  // Record the actual write.
302  if (ra_status == kRAHasNotBeenSaved) {
303    push(ra);
304  }
305  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
306  CallStub(&stub);
307  if (ra_status == kRAHasNotBeenSaved) {
308    pop(ra);
309  }
310
311  bind(&done);
312
313  // Clobber clobbered registers when running with the debug-code flag
314  // turned on to provoke errors.
315  if (emit_debug_code()) {
316    li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
317    li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
318  }
319}
320
321
322void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
323                                         Register address,
324                                         Register scratch,
325                                         SaveFPRegsMode fp_mode,
326                                         RememberedSetFinalAction and_then) {
327  Label done;
328  if (emit_debug_code()) {
329    Label ok;
330    JumpIfNotInNewSpace(object, scratch, &ok);
331    stop("Remembered set pointer is in new space");
332    bind(&ok);
333  }
334  // Load store buffer top.
335  ExternalReference store_buffer =
336      ExternalReference::store_buffer_top(isolate());
337  li(t8, Operand(store_buffer));
338  lw(scratch, MemOperand(t8));
339  // Store pointer to buffer and increment buffer top.
340  sw(address, MemOperand(scratch));
341  Addu(scratch, scratch, kPointerSize);
342  // Write back new top of buffer.
343  sw(scratch, MemOperand(t8));
344  // Call stub on end of buffer.
345  // Check for end of buffer.
346  And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
347  if (and_then == kFallThroughAtEnd) {
348    Branch(&done, eq, t8, Operand(zero_reg));
349  } else {
350    ASSERT(and_then == kReturnAtEnd);
351    Ret(eq, t8, Operand(zero_reg));
352  }
353  push(ra);
354  StoreBufferOverflowStub store_buffer_overflow =
355      StoreBufferOverflowStub(fp_mode);
356  CallStub(&store_buffer_overflow);
357  pop(ra);
358  bind(&done);
359  if (and_then == kReturnAtEnd) {
360    Ret();
361  }
362}
363
364
365// -----------------------------------------------------------------------------
366// Allocation support.
367
368
369void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
370                                            Register scratch,
371                                            Label* miss) {
372  Label same_contexts;
373
374  ASSERT(!holder_reg.is(scratch));
375  ASSERT(!holder_reg.is(at));
376  ASSERT(!scratch.is(at));
377
378  // Load current lexical context from the stack frame.
379  lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
380  // In debug mode, make sure the lexical context is set.
381#ifdef DEBUG
382  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
383      scratch, Operand(zero_reg));
384#endif
385
386  // Load the native context of the current context.
387  int offset =
388      Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
389  lw(scratch, FieldMemOperand(scratch, offset));
390  lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
391
392  // Check the context is a native context.
393  if (emit_debug_code()) {
394    push(holder_reg);  // Temporarily save holder on the stack.
395    // Read the first word and compare to the native_context_map.
396    lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
397    LoadRoot(at, Heap::kNativeContextMapRootIndex);
398    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
399          holder_reg, Operand(at));
400    pop(holder_reg);  // Restore holder.
401  }
402
403  // Check if both contexts are the same.
404  lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
405  Branch(&same_contexts, eq, scratch, Operand(at));
406
407  // Check the context is a native context.
408  if (emit_debug_code()) {
409    push(holder_reg);  // Temporarily save holder on the stack.
410    mov(holder_reg, at);  // Move at to its holding place.
411    LoadRoot(at, Heap::kNullValueRootIndex);
412    Check(ne, kJSGlobalProxyContextShouldNotBeNull,
413          holder_reg, Operand(at));
414
415    lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
416    LoadRoot(at, Heap::kNativeContextMapRootIndex);
417    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
418          holder_reg, Operand(at));
419    // Restore at is not needed. at is reloaded below.
420    pop(holder_reg);  // Restore holder.
421    // Restore at to holder's context.
422    lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
423  }
424
425  // Check that the security token in the calling global object is
426  // compatible with the security token in the receiving global
427  // object.
428  int token_offset = Context::kHeaderSize +
429                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
430
431  lw(scratch, FieldMemOperand(scratch, token_offset));
432  lw(at, FieldMemOperand(at, token_offset));
433  Branch(miss, ne, scratch, Operand(at));
434
435  bind(&same_contexts);
436}
437
438
439void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
440  // First of all we assign the hash seed to scratch.
441  LoadRoot(scratch, Heap::kHashSeedRootIndex);
442  SmiUntag(scratch);
443
444  // Xor original key with a seed.
445  xor_(reg0, reg0, scratch);
446
447  // Compute the hash code from the untagged key.  This must be kept in sync
448  // with ComputeIntegerHash in utils.h.
449  //
450  // hash = ~hash + (hash << 15);
451  nor(scratch, reg0, zero_reg);
452  sll(at, reg0, 15);
453  addu(reg0, scratch, at);
454
455  // hash = hash ^ (hash >> 12);
456  srl(at, reg0, 12);
457  xor_(reg0, reg0, at);
458
459  // hash = hash + (hash << 2);
460  sll(at, reg0, 2);
461  addu(reg0, reg0, at);
462
463  // hash = hash ^ (hash >> 4);
464  srl(at, reg0, 4);
465  xor_(reg0, reg0, at);
466
467  // hash = hash * 2057;
468  sll(scratch, reg0, 11);
469  sll(at, reg0, 3);
470  addu(reg0, reg0, at);
471  addu(reg0, reg0, scratch);
472
473  // hash = hash ^ (hash >> 16);
474  srl(at, reg0, 16);
475  xor_(reg0, reg0, at);
476}
477
478
479void MacroAssembler::LoadFromNumberDictionary(Label* miss,
480                                              Register elements,
481                                              Register key,
482                                              Register result,
483                                              Register reg0,
484                                              Register reg1,
485                                              Register reg2) {
486  // Register use:
487  //
488  // elements - holds the slow-case elements of the receiver on entry.
489  //            Unchanged unless 'result' is the same register.
490  //
491  // key      - holds the smi key on entry.
492  //            Unchanged unless 'result' is the same register.
493  //
494  //
495  // result   - holds the result on exit if the load succeeded.
496  //            Allowed to be the same as 'key' or 'result'.
497  //            Unchanged on bailout so 'key' or 'result' can be used
498  //            in further computation.
499  //
500  // Scratch registers:
501  //
502  // reg0 - holds the untagged key on entry and holds the hash once computed.
503  //
504  // reg1 - Used to hold the capacity mask of the dictionary.
505  //
506  // reg2 - Used for the index into the dictionary.
507  // at   - Temporary (avoid MacroAssembler instructions also using 'at').
508  Label done;
509
510  GetNumberHash(reg0, reg1);
511
512  // Compute the capacity mask.
513  lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
514  sra(reg1, reg1, kSmiTagSize);
515  Subu(reg1, reg1, Operand(1));
516
517  // Generate an unrolled loop that performs a few probes before giving up.
518  for (int i = 0; i < kNumberDictionaryProbes; i++) {
519    // Use reg2 for index calculations and keep the hash intact in reg0.
520    mov(reg2, reg0);
521    // Compute the masked index: (hash + i + i * i) & mask.
522    if (i > 0) {
523      Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
524    }
525    and_(reg2, reg2, reg1);
526
527    // Scale the index by multiplying by the element size.
528    ASSERT(SeededNumberDictionary::kEntrySize == 3);
529    sll(at, reg2, 1);  // 2x.
530    addu(reg2, reg2, at);  // reg2 = reg2 * 3.
531
532    // Check if the key is identical to the name.
533    sll(at, reg2, kPointerSizeLog2);
534    addu(reg2, elements, at);
535
536    lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
537    if (i != kNumberDictionaryProbes - 1) {
538      Branch(&done, eq, key, Operand(at));
539    } else {
540      Branch(miss, ne, key, Operand(at));
541    }
542  }
543
544  bind(&done);
545  // Check that the value is a normal property.
546  // reg2: elements + (index * kPointerSize).
547  const int kDetailsOffset =
548      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
549  lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
550  And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
551  Branch(miss, ne, at, Operand(zero_reg));
552
553  // Get the value at the masked, scaled index and return.
554  const int kValueOffset =
555      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
556  lw(result, FieldMemOperand(reg2, kValueOffset));
557}
558
559
560// ---------------------------------------------------------------------------
561// Instruction macros.
562
563void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
564  if (rt.is_reg()) {
565    addu(rd, rs, rt.rm());
566  } else {
567    if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
568      addiu(rd, rs, rt.imm32_);
569    } else {
570      // li handles the relocation.
571      ASSERT(!rs.is(at));
572      li(at, rt);
573      addu(rd, rs, at);
574    }
575  }
576}
577
578
579void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
580  if (rt.is_reg()) {
581    subu(rd, rs, rt.rm());
582  } else {
583    if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
584      addiu(rd, rs, -rt.imm32_);  // No subiu instr, use addiu(x, y, -imm).
585    } else {
586      // li handles the relocation.
587      ASSERT(!rs.is(at));
588      li(at, rt);
589      subu(rd, rs, at);
590    }
591  }
592}
593
594
595void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
596  if (rt.is_reg()) {
597    if (kArchVariant == kLoongson) {
598      mult(rs, rt.rm());
599      mflo(rd);
600    } else {
601      mul(rd, rs, rt.rm());
602    }
603  } else {
604    // li handles the relocation.
605    ASSERT(!rs.is(at));
606    li(at, rt);
607    if (kArchVariant == kLoongson) {
608      mult(rs, at);
609      mflo(rd);
610    } else {
611      mul(rd, rs, at);
612    }
613  }
614}
615
616
617void MacroAssembler::Mult(Register rs, const Operand& rt) {
618  if (rt.is_reg()) {
619    mult(rs, rt.rm());
620  } else {
621    // li handles the relocation.
622    ASSERT(!rs.is(at));
623    li(at, rt);
624    mult(rs, at);
625  }
626}
627
628
629void MacroAssembler::Multu(Register rs, const Operand& rt) {
630  if (rt.is_reg()) {
631    multu(rs, rt.rm());
632  } else {
633    // li handles the relocation.
634    ASSERT(!rs.is(at));
635    li(at, rt);
636    multu(rs, at);
637  }
638}
639
640
641void MacroAssembler::Div(Register rs, const Operand& rt) {
642  if (rt.is_reg()) {
643    div(rs, rt.rm());
644  } else {
645    // li handles the relocation.
646    ASSERT(!rs.is(at));
647    li(at, rt);
648    div(rs, at);
649  }
650}
651
652
653void MacroAssembler::Divu(Register rs, const Operand& rt) {
654  if (rt.is_reg()) {
655    divu(rs, rt.rm());
656  } else {
657    // li handles the relocation.
658    ASSERT(!rs.is(at));
659    li(at, rt);
660    divu(rs, at);
661  }
662}
663
664
665void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
666  if (rt.is_reg()) {
667    and_(rd, rs, rt.rm());
668  } else {
669    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
670      andi(rd, rs, rt.imm32_);
671    } else {
672      // li handles the relocation.
673      ASSERT(!rs.is(at));
674      li(at, rt);
675      and_(rd, rs, at);
676    }
677  }
678}
679
680
681void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
682  if (rt.is_reg()) {
683    or_(rd, rs, rt.rm());
684  } else {
685    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
686      ori(rd, rs, rt.imm32_);
687    } else {
688      // li handles the relocation.
689      ASSERT(!rs.is(at));
690      li(at, rt);
691      or_(rd, rs, at);
692    }
693  }
694}
695
696
697void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
698  if (rt.is_reg()) {
699    xor_(rd, rs, rt.rm());
700  } else {
701    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
702      xori(rd, rs, rt.imm32_);
703    } else {
704      // li handles the relocation.
705      ASSERT(!rs.is(at));
706      li(at, rt);
707      xor_(rd, rs, at);
708    }
709  }
710}
711
712
713void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
714  if (rt.is_reg()) {
715    nor(rd, rs, rt.rm());
716  } else {
717    // li handles the relocation.
718    ASSERT(!rs.is(at));
719    li(at, rt);
720    nor(rd, rs, at);
721  }
722}
723
724
725void MacroAssembler::Neg(Register rs, const Operand& rt) {
726  ASSERT(rt.is_reg());
727  ASSERT(!at.is(rs));
728  ASSERT(!at.is(rt.rm()));
729  li(at, -1);
730  xor_(rs, rt.rm(), at);
731}
732
733
734void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
735  if (rt.is_reg()) {
736    slt(rd, rs, rt.rm());
737  } else {
738    if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
739      slti(rd, rs, rt.imm32_);
740    } else {
741      // li handles the relocation.
742      ASSERT(!rs.is(at));
743      li(at, rt);
744      slt(rd, rs, at);
745    }
746  }
747}
748
749
750void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
751  if (rt.is_reg()) {
752    sltu(rd, rs, rt.rm());
753  } else {
754    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
755      sltiu(rd, rs, rt.imm32_);
756    } else {
757      // li handles the relocation.
758      ASSERT(!rs.is(at));
759      li(at, rt);
760      sltu(rd, rs, at);
761    }
762  }
763}
764
765
766void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
767  if (kArchVariant == kMips32r2) {
768    if (rt.is_reg()) {
769      rotrv(rd, rs, rt.rm());
770    } else {
771      rotr(rd, rs, rt.imm32_);
772    }
773  } else {
774    if (rt.is_reg()) {
775      subu(at, zero_reg, rt.rm());
776      sllv(at, rs, at);
777      srlv(rd, rs, rt.rm());
778      or_(rd, rd, at);
779    } else {
780      if (rt.imm32_ == 0) {
781        srl(rd, rs, 0);
782      } else {
783        srl(at, rs, rt.imm32_);
784        sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
785        or_(rd, rd, at);
786      }
787    }
788  }
789}
790
791
792//------------Pseudo-instructions-------------
793
794void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
795  AllowDeferredHandleDereference smi_check;
796  if (value->IsSmi()) {
797    li(dst, Operand(value), mode);
798  } else {
799    ASSERT(value->IsHeapObject());
800    if (isolate()->heap()->InNewSpace(*value)) {
801      Handle<Cell> cell = isolate()->factory()->NewCell(value);
802      li(dst, Operand(cell));
803      lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
804    } else {
805      li(dst, Operand(value));
806    }
807  }
808}
809
810
811void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
812  ASSERT(!j.is_reg());
813  BlockTrampolinePoolScope block_trampoline_pool(this);
814  if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
815    // Normal load of an immediate value which does not need Relocation Info.
816    if (is_int16(j.imm32_)) {
817      addiu(rd, zero_reg, j.imm32_);
818    } else if (!(j.imm32_ & kHiMask)) {
819      ori(rd, zero_reg, j.imm32_);
820    } else if (!(j.imm32_ & kImm16Mask)) {
821      lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
822    } else {
823      lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
824      ori(rd, rd, (j.imm32_ & kImm16Mask));
825    }
826  } else {
827    if (MustUseReg(j.rmode_)) {
828      RecordRelocInfo(j.rmode_, j.imm32_);
829    }
830    // We always need the same number of instructions as we may need to patch
831    // this code to load another value which may need 2 instructions to load.
832    lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
833    ori(rd, rd, (j.imm32_ & kImm16Mask));
834  }
835}
836
837
838void MacroAssembler::MultiPush(RegList regs) {
839  int16_t num_to_push = NumberOfBitsSet(regs);
840  int16_t stack_offset = num_to_push * kPointerSize;
841
842  Subu(sp, sp, Operand(stack_offset));
843  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
844    if ((regs & (1 << i)) != 0) {
845      stack_offset -= kPointerSize;
846      sw(ToRegister(i), MemOperand(sp, stack_offset));
847    }
848  }
849}
850
851
852void MacroAssembler::MultiPushReversed(RegList regs) {
853  int16_t num_to_push = NumberOfBitsSet(regs);
854  int16_t stack_offset = num_to_push * kPointerSize;
855
856  Subu(sp, sp, Operand(stack_offset));
857  for (int16_t i = 0; i < kNumRegisters; i++) {
858    if ((regs & (1 << i)) != 0) {
859      stack_offset -= kPointerSize;
860      sw(ToRegister(i), MemOperand(sp, stack_offset));
861    }
862  }
863}
864
865
866void MacroAssembler::MultiPop(RegList regs) {
867  int16_t stack_offset = 0;
868
869  for (int16_t i = 0; i < kNumRegisters; i++) {
870    if ((regs & (1 << i)) != 0) {
871      lw(ToRegister(i), MemOperand(sp, stack_offset));
872      stack_offset += kPointerSize;
873    }
874  }
875  addiu(sp, sp, stack_offset);
876}
877
878
879void MacroAssembler::MultiPopReversed(RegList regs) {
880  int16_t stack_offset = 0;
881
882  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
883    if ((regs & (1 << i)) != 0) {
884      lw(ToRegister(i), MemOperand(sp, stack_offset));
885      stack_offset += kPointerSize;
886    }
887  }
888  addiu(sp, sp, stack_offset);
889}
890
891
892void MacroAssembler::MultiPushFPU(RegList regs) {
893  int16_t num_to_push = NumberOfBitsSet(regs);
894  int16_t stack_offset = num_to_push * kDoubleSize;
895
896  Subu(sp, sp, Operand(stack_offset));
897  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
898    if ((regs & (1 << i)) != 0) {
899      stack_offset -= kDoubleSize;
900      sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
901    }
902  }
903}
904
905
906void MacroAssembler::MultiPushReversedFPU(RegList regs) {
907  int16_t num_to_push = NumberOfBitsSet(regs);
908  int16_t stack_offset = num_to_push * kDoubleSize;
909
910  Subu(sp, sp, Operand(stack_offset));
911  for (int16_t i = 0; i < kNumRegisters; i++) {
912    if ((regs & (1 << i)) != 0) {
913      stack_offset -= kDoubleSize;
914      sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
915    }
916  }
917}
918
919
920void MacroAssembler::MultiPopFPU(RegList regs) {
921  int16_t stack_offset = 0;
922
923  for (int16_t i = 0; i < kNumRegisters; i++) {
924    if ((regs & (1 << i)) != 0) {
925      ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
926      stack_offset += kDoubleSize;
927    }
928  }
929  addiu(sp, sp, stack_offset);
930}
931
932
933void MacroAssembler::MultiPopReversedFPU(RegList regs) {
934  int16_t stack_offset = 0;
935
936  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
937    if ((regs & (1 << i)) != 0) {
938      ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
939      stack_offset += kDoubleSize;
940    }
941  }
942  addiu(sp, sp, stack_offset);
943}
944
945
946void MacroAssembler::FlushICache(Register address, unsigned instructions) {
947  RegList saved_regs = kJSCallerSaved | ra.bit();
948  MultiPush(saved_regs);
949  AllowExternalCallThatCantCauseGC scope(this);
950
951  // Save to a0 in case address == t0.
952  Move(a0, address);
953  PrepareCallCFunction(2, t0);
954
955  li(a1, instructions * kInstrSize);
956  CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
957  MultiPop(saved_regs);
958}
959
960
961void MacroAssembler::Ext(Register rt,
962                         Register rs,
963                         uint16_t pos,
964                         uint16_t size) {
965  ASSERT(pos < 32);
966  ASSERT(pos + size < 33);
967
968  if (kArchVariant == kMips32r2) {
969    ext_(rt, rs, pos, size);
970  } else {
971    // Move rs to rt and shift it left then right to get the
972    // desired bitfield on the right side and zeroes on the left.
973    int shift_left = 32 - (pos + size);
974    sll(rt, rs, shift_left);  // Acts as a move if shift_left == 0.
975
976    int shift_right = 32 - size;
977    if (shift_right > 0) {
978      srl(rt, rt, shift_right);
979    }
980  }
981}
982
983
984void MacroAssembler::Ins(Register rt,
985                         Register rs,
986                         uint16_t pos,
987                         uint16_t size) {
988  ASSERT(pos < 32);
989  ASSERT(pos + size <= 32);
990  ASSERT(size != 0);
991
992  if (kArchVariant == kMips32r2) {
993    ins_(rt, rs, pos, size);
994  } else {
995    ASSERT(!rt.is(t8) && !rs.is(t8));
996    Subu(at, zero_reg, Operand(1));
997    srl(at, at, 32 - size);
998    and_(t8, rs, at);
999    sll(t8, t8, pos);
1000    sll(at, at, pos);
1001    nor(at, at, zero_reg);
1002    and_(at, rt, at);
1003    or_(rt, t8, at);
1004  }
1005}
1006
1007
1008void MacroAssembler::Cvt_d_uw(FPURegister fd,
1009                              FPURegister fs,
1010                              FPURegister scratch) {
1011  // Move the data from fs to t8.
1012  mfc1(t8, fs);
1013  Cvt_d_uw(fd, t8, scratch);
1014}
1015
1016
1017void MacroAssembler::Cvt_d_uw(FPURegister fd,
1018                              Register rs,
1019                              FPURegister scratch) {
1020  // Convert rs to a FP value in fd (and fd + 1).
1021  // We do this by converting rs minus the MSB to avoid sign conversion,
1022  // then adding 2^31 to the result (if needed).
1023
1024  ASSERT(!fd.is(scratch));
1025  ASSERT(!rs.is(t9));
1026  ASSERT(!rs.is(at));
1027
1028  // Save rs's MSB to t9.
1029  Ext(t9, rs, 31, 1);
1030  // Remove rs's MSB.
1031  Ext(at, rs, 0, 31);
1032  // Move the result to fd.
1033  mtc1(at, fd);
1034
1035  // Convert fd to a real FP value.
1036  cvt_d_w(fd, fd);
1037
1038  Label conversion_done;
1039
1040  // If rs's MSB was 0, it's done.
1041  // Otherwise we need to add that to the FP register.
1042  Branch(&conversion_done, eq, t9, Operand(zero_reg));
1043
1044  // Load 2^31 into f20 as its float representation.
1045  li(at, 0x41E00000);
1046  mtc1(at, FPURegister::from_code(scratch.code() + 1));
1047  mtc1(zero_reg, scratch);
1048  // Add it to fd.
1049  add_d(fd, fd, scratch);
1050
1051  bind(&conversion_done);
1052}
1053
1054
1055void MacroAssembler::Trunc_uw_d(FPURegister fd,
1056                                FPURegister fs,
1057                                FPURegister scratch) {
1058  Trunc_uw_d(fs, t8, scratch);
1059  mtc1(t8, fd);
1060}
1061
1062
1063void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1064  if (kArchVariant == kLoongson && fd.is(fs)) {
1065    mfc1(t8, FPURegister::from_code(fs.code() + 1));
1066    trunc_w_d(fd, fs);
1067    mtc1(t8, FPURegister::from_code(fs.code() + 1));
1068  } else {
1069    trunc_w_d(fd, fs);
1070  }
1071}
1072
1073
1074void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1075  if (kArchVariant == kLoongson && fd.is(fs)) {
1076    mfc1(t8, FPURegister::from_code(fs.code() + 1));
1077    round_w_d(fd, fs);
1078    mtc1(t8, FPURegister::from_code(fs.code() + 1));
1079  } else {
1080    round_w_d(fd, fs);
1081  }
1082}
1083
1084
1085void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1086  if (kArchVariant == kLoongson && fd.is(fs)) {
1087    mfc1(t8, FPURegister::from_code(fs.code() + 1));
1088    floor_w_d(fd, fs);
1089    mtc1(t8, FPURegister::from_code(fs.code() + 1));
1090  } else {
1091    floor_w_d(fd, fs);
1092  }
1093}
1094
1095
1096void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1097  if (kArchVariant == kLoongson && fd.is(fs)) {
1098    mfc1(t8, FPURegister::from_code(fs.code() + 1));
1099    ceil_w_d(fd, fs);
1100    mtc1(t8, FPURegister::from_code(fs.code() + 1));
1101  } else {
1102    ceil_w_d(fd, fs);
1103  }
1104}
1105
1106
1107void MacroAssembler::Trunc_uw_d(FPURegister fd,
1108                                Register rs,
1109                                FPURegister scratch) {
1110  ASSERT(!fd.is(scratch));
1111  ASSERT(!rs.is(at));
1112
1113  // Load 2^31 into scratch as its float representation.
1114  li(at, 0x41E00000);
1115  mtc1(at, FPURegister::from_code(scratch.code() + 1));
1116  mtc1(zero_reg, scratch);
1117  // Test if scratch > fd.
1118  // If fd < 2^31 we can convert it normally.
1119  Label simple_convert;
1120  BranchF(&simple_convert, NULL, lt, fd, scratch);
1121
1122  // First we subtract 2^31 from fd, then trunc it to rs
1123  // and add 2^31 to rs.
1124  sub_d(scratch, fd, scratch);
1125  trunc_w_d(scratch, scratch);
1126  mfc1(rs, scratch);
1127  Or(rs, rs, 1 << 31);
1128
1129  Label done;
1130  Branch(&done);
1131  // Simple conversion.
1132  bind(&simple_convert);
1133  trunc_w_d(scratch, fd);
1134  mfc1(rs, scratch);
1135
1136  bind(&done);
1137}
1138
1139
1140void MacroAssembler::BranchF(Label* target,
1141                             Label* nan,
1142                             Condition cc,
1143                             FPURegister cmp1,
1144                             FPURegister cmp2,
1145                             BranchDelaySlot bd) {
1146  BlockTrampolinePoolScope block_trampoline_pool(this);
1147  if (cc == al) {
1148    Branch(bd, target);
1149    return;
1150  }
1151
1152  ASSERT(nan || target);
1153  // Check for unordered (NaN) cases.
1154  if (nan) {
1155    c(UN, D, cmp1, cmp2);
1156    bc1t(nan);
1157  }
1158
1159  if (target) {
1160    // Here NaN cases were either handled by this function or are assumed to
1161    // have been handled by the caller.
1162    // Unsigned conditions are treated as their signed counterpart.
1163    switch (cc) {
1164      case lt:
1165        c(OLT, D, cmp1, cmp2);
1166        bc1t(target);
1167        break;
1168      case gt:
1169        c(ULE, D, cmp1, cmp2);
1170        bc1f(target);
1171        break;
1172      case ge:
1173        c(ULT, D, cmp1, cmp2);
1174        bc1f(target);
1175        break;
1176      case le:
1177        c(OLE, D, cmp1, cmp2);
1178        bc1t(target);
1179        break;
1180      case eq:
1181        c(EQ, D, cmp1, cmp2);
1182        bc1t(target);
1183        break;
1184      case ueq:
1185        c(UEQ, D, cmp1, cmp2);
1186        bc1t(target);
1187        break;
1188      case ne:
1189        c(EQ, D, cmp1, cmp2);
1190        bc1f(target);
1191        break;
1192      case nue:
1193        c(UEQ, D, cmp1, cmp2);
1194        bc1f(target);
1195        break;
1196      default:
1197        CHECK(0);
1198    };
1199  }
1200
1201  if (bd == PROTECT) {
1202    nop();
1203  }
1204}
1205
1206
1207void MacroAssembler::Move(FPURegister dst, double imm) {
1208  static const DoubleRepresentation minus_zero(-0.0);
1209  static const DoubleRepresentation zero(0.0);
1210  DoubleRepresentation value(imm);
1211  // Handle special values first.
1212  bool force_load = dst.is(kDoubleRegZero);
1213  if (value.bits == zero.bits && !force_load) {
1214    mov_d(dst, kDoubleRegZero);
1215  } else if (value.bits == minus_zero.bits && !force_load) {
1216    neg_d(dst, kDoubleRegZero);
1217  } else {
1218    uint32_t lo, hi;
1219    DoubleAsTwoUInt32(imm, &lo, &hi);
1220    // Move the low part of the double into the lower of the corresponding FPU
1221    // register of FPU register pair.
1222    if (lo != 0) {
1223      li(at, Operand(lo));
1224      mtc1(at, dst);
1225    } else {
1226      mtc1(zero_reg, dst);
1227    }
1228    // Move the high part of the double into the higher of the corresponding FPU
1229    // register of FPU register pair.
1230    if (hi != 0) {
1231      li(at, Operand(hi));
1232      mtc1(at, dst.high());
1233    } else {
1234      mtc1(zero_reg, dst.high());
1235    }
1236  }
1237}
1238
1239
1240void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1241  if (kArchVariant == kLoongson) {
1242    Label done;
1243    Branch(&done, ne, rt, Operand(zero_reg));
1244    mov(rd, rs);
1245    bind(&done);
1246  } else {
1247    movz(rd, rs, rt);
1248  }
1249}
1250
1251
1252void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1253  if (kArchVariant == kLoongson) {
1254    Label done;
1255    Branch(&done, eq, rt, Operand(zero_reg));
1256    mov(rd, rs);
1257    bind(&done);
1258  } else {
1259    movn(rd, rs, rt);
1260  }
1261}
1262
1263
1264void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1265  if (kArchVariant == kLoongson) {
1266    // Tests an FP condition code and then conditionally move rs to rd.
1267    // We do not currently use any FPU cc bit other than bit 0.
1268    ASSERT(cc == 0);
1269    ASSERT(!(rs.is(t8) || rd.is(t8)));
1270    Label done;
1271    Register scratch = t8;
1272    // For testing purposes we need to fetch content of the FCSR register and
1273    // than test its cc (floating point condition code) bit (for cc = 0, it is
1274    // 24. bit of the FCSR).
1275    cfc1(scratch, FCSR);
1276    // For the MIPS I, II and III architectures, the contents of scratch is
1277    // UNPREDICTABLE for the instruction immediately following CFC1.
1278    nop();
1279    srl(scratch, scratch, 16);
1280    andi(scratch, scratch, 0x0080);
1281    Branch(&done, eq, scratch, Operand(zero_reg));
1282    mov(rd, rs);
1283    bind(&done);
1284  } else {
1285    movt(rd, rs, cc);
1286  }
1287}
1288
1289
1290void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1291  if (kArchVariant == kLoongson) {
1292    // Tests an FP condition code and then conditionally move rs to rd.
1293    // We do not currently use any FPU cc bit other than bit 0.
1294    ASSERT(cc == 0);
1295    ASSERT(!(rs.is(t8) || rd.is(t8)));
1296    Label done;
1297    Register scratch = t8;
1298    // For testing purposes we need to fetch content of the FCSR register and
1299    // than test its cc (floating point condition code) bit (for cc = 0, it is
1300    // 24. bit of the FCSR).
1301    cfc1(scratch, FCSR);
1302    // For the MIPS I, II and III architectures, the contents of scratch is
1303    // UNPREDICTABLE for the instruction immediately following CFC1.
1304    nop();
1305    srl(scratch, scratch, 16);
1306    andi(scratch, scratch, 0x0080);
1307    Branch(&done, ne, scratch, Operand(zero_reg));
1308    mov(rd, rs);
1309    bind(&done);
1310  } else {
1311    movf(rd, rs, cc);
1312  }
1313}
1314
1315
1316void MacroAssembler::Clz(Register rd, Register rs) {
1317  if (kArchVariant == kLoongson) {
1318    ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1319    Register mask = t8;
1320    Register scratch = t9;
1321    Label loop, end;
1322    mov(at, rs);
1323    mov(rd, zero_reg);
1324    lui(mask, 0x8000);
1325    bind(&loop);
1326    and_(scratch, at, mask);
1327    Branch(&end, ne, scratch, Operand(zero_reg));
1328    addiu(rd, rd, 1);
1329    Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1330    srl(mask, mask, 1);
1331    bind(&end);
1332  } else {
1333    clz(rd, rs);
1334  }
1335}
1336
1337
1338void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1339                                     Register result,
1340                                     DoubleRegister double_input,
1341                                     Register scratch,
1342                                     DoubleRegister double_scratch,
1343                                     Register except_flag,
1344                                     CheckForInexactConversion check_inexact) {
1345  ASSERT(!result.is(scratch));
1346  ASSERT(!double_input.is(double_scratch));
1347  ASSERT(!except_flag.is(scratch));
1348
1349  Label done;
1350
1351  // Clear the except flag (0 = no exception)
1352  mov(except_flag, zero_reg);
1353
1354  // Test for values that can be exactly represented as a signed 32-bit integer.
1355  cvt_w_d(double_scratch, double_input);
1356  mfc1(result, double_scratch);
1357  cvt_d_w(double_scratch, double_scratch);
1358  BranchF(&done, NULL, eq, double_input, double_scratch);
1359
1360  int32_t except_mask = kFCSRFlagMask;  // Assume interested in all exceptions.
1361
1362  if (check_inexact == kDontCheckForInexactConversion) {
1363    // Ignore inexact exceptions.
1364    except_mask &= ~kFCSRInexactFlagMask;
1365  }
1366
1367  // Save FCSR.
1368  cfc1(scratch, FCSR);
1369  // Disable FPU exceptions.
1370  ctc1(zero_reg, FCSR);
1371
1372  // Do operation based on rounding mode.
1373  switch (rounding_mode) {
1374    case kRoundToNearest:
1375      Round_w_d(double_scratch, double_input);
1376      break;
1377    case kRoundToZero:
1378      Trunc_w_d(double_scratch, double_input);
1379      break;
1380    case kRoundToPlusInf:
1381      Ceil_w_d(double_scratch, double_input);
1382      break;
1383    case kRoundToMinusInf:
1384      Floor_w_d(double_scratch, double_input);
1385      break;
1386  }  // End of switch-statement.
1387
1388  // Retrieve FCSR.
1389  cfc1(except_flag, FCSR);
1390  // Restore FCSR.
1391  ctc1(scratch, FCSR);
1392  // Move the converted value into the result register.
1393  mfc1(result, double_scratch);
1394
1395  // Check for fpu exceptions.
1396  And(except_flag, except_flag, Operand(except_mask));
1397
1398  bind(&done);
1399}
1400
1401
1402void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1403                                                DoubleRegister double_input,
1404                                                Label* done) {
1405  DoubleRegister single_scratch = kLithiumScratchDouble.low();
1406  Register scratch = at;
1407  Register scratch2 = t9;
1408
1409  // Clear cumulative exception flags and save the FCSR.
1410  cfc1(scratch2, FCSR);
1411  ctc1(zero_reg, FCSR);
1412  // Try a conversion to a signed integer.
1413  trunc_w_d(single_scratch, double_input);
1414  mfc1(result, single_scratch);
1415  // Retrieve and restore the FCSR.
1416  cfc1(scratch, FCSR);
1417  ctc1(scratch2, FCSR);
1418  // Check for overflow and NaNs.
1419  And(scratch,
1420      scratch,
1421      kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1422  // If we had no exceptions we are done.
1423  Branch(done, eq, scratch, Operand(zero_reg));
1424}
1425
1426
1427void MacroAssembler::TruncateDoubleToI(Register result,
1428                                       DoubleRegister double_input) {
1429  Label done;
1430
1431  TryInlineTruncateDoubleToI(result, double_input, &done);
1432
1433  // If we fell through then inline version didn't succeed - call stub instead.
1434  push(ra);
1435  Subu(sp, sp, Operand(kDoubleSize));  // Put input on stack.
1436  sdc1(double_input, MemOperand(sp, 0));
1437
1438  DoubleToIStub stub(sp, result, 0, true, true);
1439  CallStub(&stub);
1440
1441  Addu(sp, sp, Operand(kDoubleSize));
1442  pop(ra);
1443
1444  bind(&done);
1445}
1446
1447
1448void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1449  Label done;
1450  DoubleRegister double_scratch = f12;
1451  ASSERT(!result.is(object));
1452
1453  ldc1(double_scratch,
1454       MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1455  TryInlineTruncateDoubleToI(result, double_scratch, &done);
1456
1457  // If we fell through then inline version didn't succeed - call stub instead.
1458  push(ra);
1459  DoubleToIStub stub(object,
1460                     result,
1461                     HeapNumber::kValueOffset - kHeapObjectTag,
1462                     true,
1463                     true);
1464  CallStub(&stub);
1465  pop(ra);
1466
1467  bind(&done);
1468}
1469
1470
1471void MacroAssembler::TruncateNumberToI(Register object,
1472                                       Register result,
1473                                       Register heap_number_map,
1474                                       Register scratch,
1475                                       Label* not_number) {
1476  Label done;
1477  ASSERT(!result.is(object));
1478
1479  UntagAndJumpIfSmi(result, object, &done);
1480  JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1481  TruncateHeapNumberToI(result, object);
1482
1483  bind(&done);
1484}
1485
1486
1487void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1488                                         Register src,
1489                                         int num_least_bits) {
1490  Ext(dst, src, kSmiTagSize, num_least_bits);
1491}
1492
1493
1494void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1495                                           Register src,
1496                                           int num_least_bits) {
1497  And(dst, src, Operand((1 << num_least_bits) - 1));
1498}
1499
1500
1501// Emulated condtional branches do not emit a nop in the branch delay slot.
1502//
1503// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1504#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT(                                \
1505    (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
1506    (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1507
1508
1509void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1510  BranchShort(offset, bdslot);
1511}
1512
1513
1514void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1515                            const Operand& rt,
1516                            BranchDelaySlot bdslot) {
1517  BranchShort(offset, cond, rs, rt, bdslot);
1518}
1519
1520
1521void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1522  if (L->is_bound()) {
1523    if (is_near(L)) {
1524      BranchShort(L, bdslot);
1525    } else {
1526      Jr(L, bdslot);
1527    }
1528  } else {
1529    if (is_trampoline_emitted()) {
1530      Jr(L, bdslot);
1531    } else {
1532      BranchShort(L, bdslot);
1533    }
1534  }
1535}
1536
1537
1538void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1539                            const Operand& rt,
1540                            BranchDelaySlot bdslot) {
1541  if (L->is_bound()) {
1542    if (is_near(L)) {
1543      BranchShort(L, cond, rs, rt, bdslot);
1544    } else {
1545      Label skip;
1546      Condition neg_cond = NegateCondition(cond);
1547      BranchShort(&skip, neg_cond, rs, rt);
1548      Jr(L, bdslot);
1549      bind(&skip);
1550    }
1551  } else {
1552    if (is_trampoline_emitted()) {
1553      Label skip;
1554      Condition neg_cond = NegateCondition(cond);
1555      BranchShort(&skip, neg_cond, rs, rt);
1556      Jr(L, bdslot);
1557      bind(&skip);
1558    } else {
1559      BranchShort(L, cond, rs, rt, bdslot);
1560    }
1561  }
1562}
1563
1564
1565void MacroAssembler::Branch(Label* L,
1566                            Condition cond,
1567                            Register rs,
1568                            Heap::RootListIndex index,
1569                            BranchDelaySlot bdslot) {
1570  LoadRoot(at, index);
1571  Branch(L, cond, rs, Operand(at), bdslot);
1572}
1573
1574
1575void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1576  b(offset);
1577
1578  // Emit a nop in the branch delay slot if required.
1579  if (bdslot == PROTECT)
1580    nop();
1581}
1582
1583
1584void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1585                                 const Operand& rt,
1586                                 BranchDelaySlot bdslot) {
1587  BRANCH_ARGS_CHECK(cond, rs, rt);
1588  ASSERT(!rs.is(zero_reg));
1589  Register r2 = no_reg;
1590  Register scratch = at;
1591
1592  if (rt.is_reg()) {
1593    // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1594    // rt.
1595    BlockTrampolinePoolScope block_trampoline_pool(this);
1596    r2 = rt.rm_;
1597    switch (cond) {
1598      case cc_always:
1599        b(offset);
1600        break;
1601      case eq:
1602        beq(rs, r2, offset);
1603        break;
1604      case ne:
1605        bne(rs, r2, offset);
1606        break;
1607      // Signed comparison.
1608      case greater:
1609        if (r2.is(zero_reg)) {
1610          bgtz(rs, offset);
1611        } else {
1612          slt(scratch, r2, rs);
1613          bne(scratch, zero_reg, offset);
1614        }
1615        break;
1616      case greater_equal:
1617        if (r2.is(zero_reg)) {
1618          bgez(rs, offset);
1619        } else {
1620          slt(scratch, rs, r2);
1621          beq(scratch, zero_reg, offset);
1622        }
1623        break;
1624      case less:
1625        if (r2.is(zero_reg)) {
1626          bltz(rs, offset);
1627        } else {
1628          slt(scratch, rs, r2);
1629          bne(scratch, zero_reg, offset);
1630        }
1631        break;
1632      case less_equal:
1633        if (r2.is(zero_reg)) {
1634          blez(rs, offset);
1635        } else {
1636          slt(scratch, r2, rs);
1637          beq(scratch, zero_reg, offset);
1638        }
1639        break;
1640      // Unsigned comparison.
1641      case Ugreater:
1642        if (r2.is(zero_reg)) {
1643          bgtz(rs, offset);
1644        } else {
1645          sltu(scratch, r2, rs);
1646          bne(scratch, zero_reg, offset);
1647        }
1648        break;
1649      case Ugreater_equal:
1650        if (r2.is(zero_reg)) {
1651          bgez(rs, offset);
1652        } else {
1653          sltu(scratch, rs, r2);
1654          beq(scratch, zero_reg, offset);
1655        }
1656        break;
1657      case Uless:
1658        if (r2.is(zero_reg)) {
1659          // No code needs to be emitted.
1660          return;
1661        } else {
1662          sltu(scratch, rs, r2);
1663          bne(scratch, zero_reg, offset);
1664        }
1665        break;
1666      case Uless_equal:
1667        if (r2.is(zero_reg)) {
1668          b(offset);
1669        } else {
1670          sltu(scratch, r2, rs);
1671          beq(scratch, zero_reg, offset);
1672        }
1673        break;
1674      default:
1675        UNREACHABLE();
1676    }
1677  } else {
1678    // Be careful to always use shifted_branch_offset only just before the
1679    // branch instruction, as the location will be remember for patching the
1680    // target.
1681    BlockTrampolinePoolScope block_trampoline_pool(this);
1682    switch (cond) {
1683      case cc_always:
1684        b(offset);
1685        break;
1686      case eq:
1687        // We don't want any other register but scratch clobbered.
1688        ASSERT(!scratch.is(rs));
1689        r2 = scratch;
1690        li(r2, rt);
1691        beq(rs, r2, offset);
1692        break;
1693      case ne:
1694        // We don't want any other register but scratch clobbered.
1695        ASSERT(!scratch.is(rs));
1696        r2 = scratch;
1697        li(r2, rt);
1698        bne(rs, r2, offset);
1699        break;
1700      // Signed comparison.
1701      case greater:
1702        if (rt.imm32_ == 0) {
1703          bgtz(rs, offset);
1704        } else {
1705          r2 = scratch;
1706          li(r2, rt);
1707          slt(scratch, r2, rs);
1708          bne(scratch, zero_reg, offset);
1709        }
1710        break;
1711      case greater_equal:
1712        if (rt.imm32_ == 0) {
1713          bgez(rs, offset);
1714        } else if (is_int16(rt.imm32_)) {
1715          slti(scratch, rs, rt.imm32_);
1716          beq(scratch, zero_reg, offset);
1717        } else {
1718          r2 = scratch;
1719          li(r2, rt);
1720          slt(scratch, rs, r2);
1721          beq(scratch, zero_reg, offset);
1722        }
1723        break;
1724      case less:
1725        if (rt.imm32_ == 0) {
1726          bltz(rs, offset);
1727        } else if (is_int16(rt.imm32_)) {
1728          slti(scratch, rs, rt.imm32_);
1729          bne(scratch, zero_reg, offset);
1730        } else {
1731          r2 = scratch;
1732          li(r2, rt);
1733          slt(scratch, rs, r2);
1734          bne(scratch, zero_reg, offset);
1735        }
1736        break;
1737      case less_equal:
1738        if (rt.imm32_ == 0) {
1739          blez(rs, offset);
1740        } else {
1741          r2 = scratch;
1742          li(r2, rt);
1743          slt(scratch, r2, rs);
1744          beq(scratch, zero_reg, offset);
1745       }
1746       break;
1747      // Unsigned comparison.
1748      case Ugreater:
1749        if (rt.imm32_ == 0) {
1750          bgtz(rs, offset);
1751        } else {
1752          r2 = scratch;
1753          li(r2, rt);
1754          sltu(scratch, r2, rs);
1755          bne(scratch, zero_reg, offset);
1756        }
1757        break;
1758      case Ugreater_equal:
1759        if (rt.imm32_ == 0) {
1760          bgez(rs, offset);
1761        } else if (is_int16(rt.imm32_)) {
1762          sltiu(scratch, rs, rt.imm32_);
1763          beq(scratch, zero_reg, offset);
1764        } else {
1765          r2 = scratch;
1766          li(r2, rt);
1767          sltu(scratch, rs, r2);
1768          beq(scratch, zero_reg, offset);
1769        }
1770        break;
1771      case Uless:
1772        if (rt.imm32_ == 0) {
1773          // No code needs to be emitted.
1774          return;
1775        } else if (is_int16(rt.imm32_)) {
1776          sltiu(scratch, rs, rt.imm32_);
1777          bne(scratch, zero_reg, offset);
1778        } else {
1779          r2 = scratch;
1780          li(r2, rt);
1781          sltu(scratch, rs, r2);
1782          bne(scratch, zero_reg, offset);
1783        }
1784        break;
1785      case Uless_equal:
1786        if (rt.imm32_ == 0) {
1787          b(offset);
1788        } else {
1789          r2 = scratch;
1790          li(r2, rt);
1791          sltu(scratch, r2, rs);
1792          beq(scratch, zero_reg, offset);
1793        }
1794        break;
1795      default:
1796        UNREACHABLE();
1797    }
1798  }
1799  // Emit a nop in the branch delay slot if required.
1800  if (bdslot == PROTECT)
1801    nop();
1802}
1803
1804
1805void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
1806  // We use branch_offset as an argument for the branch instructions to be sure
1807  // it is called just before generating the branch instruction, as needed.
1808
1809  b(shifted_branch_offset(L, false));
1810
1811  // Emit a nop in the branch delay slot if required.
1812  if (bdslot == PROTECT)
1813    nop();
1814}
1815
1816
1817void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1818                                 const Operand& rt,
1819                                 BranchDelaySlot bdslot) {
1820  BRANCH_ARGS_CHECK(cond, rs, rt);
1821
1822  int32_t offset = 0;
1823  Register r2 = no_reg;
1824  Register scratch = at;
1825  if (rt.is_reg()) {
1826    BlockTrampolinePoolScope block_trampoline_pool(this);
1827    r2 = rt.rm_;
1828    // Be careful to always use shifted_branch_offset only just before the
1829    // branch instruction, as the location will be remember for patching the
1830    // target.
1831    switch (cond) {
1832      case cc_always:
1833        offset = shifted_branch_offset(L, false);
1834        b(offset);
1835        break;
1836      case eq:
1837        offset = shifted_branch_offset(L, false);
1838        beq(rs, r2, offset);
1839        break;
1840      case ne:
1841        offset = shifted_branch_offset(L, false);
1842        bne(rs, r2, offset);
1843        break;
1844      // Signed comparison.
1845      case greater:
1846        if (r2.is(zero_reg)) {
1847          offset = shifted_branch_offset(L, false);
1848          bgtz(rs, offset);
1849        } else {
1850          slt(scratch, r2, rs);
1851          offset = shifted_branch_offset(L, false);
1852          bne(scratch, zero_reg, offset);
1853        }
1854        break;
1855      case greater_equal:
1856        if (r2.is(zero_reg)) {
1857          offset = shifted_branch_offset(L, false);
1858          bgez(rs, offset);
1859        } else {
1860          slt(scratch, rs, r2);
1861          offset = shifted_branch_offset(L, false);
1862          beq(scratch, zero_reg, offset);
1863        }
1864        break;
1865      case less:
1866        if (r2.is(zero_reg)) {
1867          offset = shifted_branch_offset(L, false);
1868          bltz(rs, offset);
1869        } else {
1870          slt(scratch, rs, r2);
1871          offset = shifted_branch_offset(L, false);
1872          bne(scratch, zero_reg, offset);
1873        }
1874        break;
1875      case less_equal:
1876        if (r2.is(zero_reg)) {
1877          offset = shifted_branch_offset(L, false);
1878          blez(rs, offset);
1879        } else {
1880          slt(scratch, r2, rs);
1881          offset = shifted_branch_offset(L, false);
1882          beq(scratch, zero_reg, offset);
1883        }
1884        break;
1885      // Unsigned comparison.
1886      case Ugreater:
1887        if (r2.is(zero_reg)) {
1888          offset = shifted_branch_offset(L, false);
1889           bgtz(rs, offset);
1890        } else {
1891          sltu(scratch, r2, rs);
1892          offset = shifted_branch_offset(L, false);
1893          bne(scratch, zero_reg, offset);
1894        }
1895        break;
1896      case Ugreater_equal:
1897        if (r2.is(zero_reg)) {
1898          offset = shifted_branch_offset(L, false);
1899          bgez(rs, offset);
1900        } else {
1901          sltu(scratch, rs, r2);
1902          offset = shifted_branch_offset(L, false);
1903          beq(scratch, zero_reg, offset);
1904        }
1905        break;
1906      case Uless:
1907        if (r2.is(zero_reg)) {
1908          // No code needs to be emitted.
1909          return;
1910        } else {
1911          sltu(scratch, rs, r2);
1912          offset = shifted_branch_offset(L, false);
1913          bne(scratch, zero_reg, offset);
1914        }
1915        break;
1916      case Uless_equal:
1917        if (r2.is(zero_reg)) {
1918          offset = shifted_branch_offset(L, false);
1919          b(offset);
1920        } else {
1921          sltu(scratch, r2, rs);
1922          offset = shifted_branch_offset(L, false);
1923          beq(scratch, zero_reg, offset);
1924        }
1925        break;
1926      default:
1927        UNREACHABLE();
1928    }
1929  } else {
1930    // Be careful to always use shifted_branch_offset only just before the
1931    // branch instruction, as the location will be remember for patching the
1932    // target.
1933    BlockTrampolinePoolScope block_trampoline_pool(this);
1934    switch (cond) {
1935      case cc_always:
1936        offset = shifted_branch_offset(L, false);
1937        b(offset);
1938        break;
1939      case eq:
1940        ASSERT(!scratch.is(rs));
1941        r2 = scratch;
1942        li(r2, rt);
1943        offset = shifted_branch_offset(L, false);
1944        beq(rs, r2, offset);
1945        break;
1946      case ne:
1947        ASSERT(!scratch.is(rs));
1948        r2 = scratch;
1949        li(r2, rt);
1950        offset = shifted_branch_offset(L, false);
1951        bne(rs, r2, offset);
1952        break;
1953      // Signed comparison.
1954      case greater:
1955        if (rt.imm32_ == 0) {
1956          offset = shifted_branch_offset(L, false);
1957          bgtz(rs, offset);
1958        } else {
1959          ASSERT(!scratch.is(rs));
1960          r2 = scratch;
1961          li(r2, rt);
1962          slt(scratch, r2, rs);
1963          offset = shifted_branch_offset(L, false);
1964          bne(scratch, zero_reg, offset);
1965        }
1966        break;
1967      case greater_equal:
1968        if (rt.imm32_ == 0) {
1969          offset = shifted_branch_offset(L, false);
1970          bgez(rs, offset);
1971        } else if (is_int16(rt.imm32_)) {
1972          slti(scratch, rs, rt.imm32_);
1973          offset = shifted_branch_offset(L, false);
1974          beq(scratch, zero_reg, offset);
1975        } else {
1976          ASSERT(!scratch.is(rs));
1977          r2 = scratch;
1978          li(r2, rt);
1979          slt(scratch, rs, r2);
1980          offset = shifted_branch_offset(L, false);
1981          beq(scratch, zero_reg, offset);
1982        }
1983        break;
1984      case less:
1985        if (rt.imm32_ == 0) {
1986          offset = shifted_branch_offset(L, false);
1987          bltz(rs, offset);
1988        } else if (is_int16(rt.imm32_)) {
1989          slti(scratch, rs, rt.imm32_);
1990          offset = shifted_branch_offset(L, false);
1991          bne(scratch, zero_reg, offset);
1992        } else {
1993          ASSERT(!scratch.is(rs));
1994          r2 = scratch;
1995          li(r2, rt);
1996          slt(scratch, rs, r2);
1997          offset = shifted_branch_offset(L, false);
1998          bne(scratch, zero_reg, offset);
1999        }
2000        break;
2001      case less_equal:
2002        if (rt.imm32_ == 0) {
2003          offset = shifted_branch_offset(L, false);
2004          blez(rs, offset);
2005        } else {
2006          ASSERT(!scratch.is(rs));
2007          r2 = scratch;
2008          li(r2, rt);
2009          slt(scratch, r2, rs);
2010          offset = shifted_branch_offset(L, false);
2011          beq(scratch, zero_reg, offset);
2012        }
2013        break;
2014      // Unsigned comparison.
2015      case Ugreater:
2016        if (rt.imm32_ == 0) {
2017          offset = shifted_branch_offset(L, false);
2018          bgtz(rs, offset);
2019        } else {
2020          ASSERT(!scratch.is(rs));
2021          r2 = scratch;
2022          li(r2, rt);
2023          sltu(scratch, r2, rs);
2024          offset = shifted_branch_offset(L, false);
2025          bne(scratch, zero_reg, offset);
2026        }
2027        break;
2028      case Ugreater_equal:
2029        if (rt.imm32_ == 0) {
2030          offset = shifted_branch_offset(L, false);
2031          bgez(rs, offset);
2032        } else if (is_int16(rt.imm32_)) {
2033          sltiu(scratch, rs, rt.imm32_);
2034          offset = shifted_branch_offset(L, false);
2035          beq(scratch, zero_reg, offset);
2036        } else {
2037          ASSERT(!scratch.is(rs));
2038          r2 = scratch;
2039          li(r2, rt);
2040          sltu(scratch, rs, r2);
2041          offset = shifted_branch_offset(L, false);
2042          beq(scratch, zero_reg, offset);
2043        }
2044        break;
2045     case Uless:
2046        if (rt.imm32_ == 0) {
2047          // No code needs to be emitted.
2048          return;
2049        } else if (is_int16(rt.imm32_)) {
2050          sltiu(scratch, rs, rt.imm32_);
2051          offset = shifted_branch_offset(L, false);
2052          bne(scratch, zero_reg, offset);
2053        } else {
2054          ASSERT(!scratch.is(rs));
2055          r2 = scratch;
2056          li(r2, rt);
2057          sltu(scratch, rs, r2);
2058          offset = shifted_branch_offset(L, false);
2059          bne(scratch, zero_reg, offset);
2060        }
2061        break;
2062      case Uless_equal:
2063        if (rt.imm32_ == 0) {
2064          offset = shifted_branch_offset(L, false);
2065          b(offset);
2066        } else {
2067          ASSERT(!scratch.is(rs));
2068          r2 = scratch;
2069          li(r2, rt);
2070          sltu(scratch, r2, rs);
2071          offset = shifted_branch_offset(L, false);
2072          beq(scratch, zero_reg, offset);
2073        }
2074        break;
2075      default:
2076        UNREACHABLE();
2077    }
2078  }
2079  // Check that offset could actually hold on an int16_t.
2080  ASSERT(is_int16(offset));
2081  // Emit a nop in the branch delay slot if required.
2082  if (bdslot == PROTECT)
2083    nop();
2084}
2085
2086
2087void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2088  BranchAndLinkShort(offset, bdslot);
2089}
2090
2091
2092void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2093                                   const Operand& rt,
2094                                   BranchDelaySlot bdslot) {
2095  BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2096}
2097
2098
2099void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2100  if (L->is_bound()) {
2101    if (is_near(L)) {
2102      BranchAndLinkShort(L, bdslot);
2103    } else {
2104      Jalr(L, bdslot);
2105    }
2106  } else {
2107    if (is_trampoline_emitted()) {
2108      Jalr(L, bdslot);
2109    } else {
2110      BranchAndLinkShort(L, bdslot);
2111    }
2112  }
2113}
2114
2115
2116void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2117                                   const Operand& rt,
2118                                   BranchDelaySlot bdslot) {
2119  if (L->is_bound()) {
2120    if (is_near(L)) {
2121      BranchAndLinkShort(L, cond, rs, rt, bdslot);
2122    } else {
2123      Label skip;
2124      Condition neg_cond = NegateCondition(cond);
2125      BranchShort(&skip, neg_cond, rs, rt);
2126      Jalr(L, bdslot);
2127      bind(&skip);
2128    }
2129  } else {
2130    if (is_trampoline_emitted()) {
2131      Label skip;
2132      Condition neg_cond = NegateCondition(cond);
2133      BranchShort(&skip, neg_cond, rs, rt);
2134      Jalr(L, bdslot);
2135      bind(&skip);
2136    } else {
2137      BranchAndLinkShort(L, cond, rs, rt, bdslot);
2138    }
2139  }
2140}
2141
2142
2143// We need to use a bgezal or bltzal, but they can't be used directly with the
2144// slt instructions. We could use sub or add instead but we would miss overflow
2145// cases, so we keep slt and add an intermediate third instruction.
2146void MacroAssembler::BranchAndLinkShort(int16_t offset,
2147                                        BranchDelaySlot bdslot) {
2148  bal(offset);
2149
2150  // Emit a nop in the branch delay slot if required.
2151  if (bdslot == PROTECT)
2152    nop();
2153}
2154
2155
2156void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2157                                        Register rs, const Operand& rt,
2158                                        BranchDelaySlot bdslot) {
2159  BRANCH_ARGS_CHECK(cond, rs, rt);
2160  Register r2 = no_reg;
2161  Register scratch = at;
2162
2163  if (rt.is_reg()) {
2164    r2 = rt.rm_;
2165  } else if (cond != cc_always) {
2166    r2 = scratch;
2167    li(r2, rt);
2168  }
2169
2170  {
2171    BlockTrampolinePoolScope block_trampoline_pool(this);
2172    switch (cond) {
2173      case cc_always:
2174        bal(offset);
2175        break;
2176      case eq:
2177        bne(rs, r2, 2);
2178        nop();
2179        bal(offset);
2180        break;
2181      case ne:
2182        beq(rs, r2, 2);
2183        nop();
2184        bal(offset);
2185        break;
2186
2187      // Signed comparison.
2188      case greater:
2189        slt(scratch, r2, rs);
2190        addiu(scratch, scratch, -1);
2191        bgezal(scratch, offset);
2192        break;
2193      case greater_equal:
2194        slt(scratch, rs, r2);
2195        addiu(scratch, scratch, -1);
2196        bltzal(scratch, offset);
2197        break;
2198      case less:
2199        slt(scratch, rs, r2);
2200        addiu(scratch, scratch, -1);
2201        bgezal(scratch, offset);
2202        break;
2203      case less_equal:
2204        slt(scratch, r2, rs);
2205        addiu(scratch, scratch, -1);
2206        bltzal(scratch, offset);
2207        break;
2208
2209      // Unsigned comparison.
2210      case Ugreater:
2211        sltu(scratch, r2, rs);
2212        addiu(scratch, scratch, -1);
2213        bgezal(scratch, offset);
2214        break;
2215      case Ugreater_equal:
2216        sltu(scratch, rs, r2);
2217        addiu(scratch, scratch, -1);
2218        bltzal(scratch, offset);
2219        break;
2220      case Uless:
2221        sltu(scratch, rs, r2);
2222        addiu(scratch, scratch, -1);
2223        bgezal(scratch, offset);
2224        break;
2225      case Uless_equal:
2226        sltu(scratch, r2, rs);
2227        addiu(scratch, scratch, -1);
2228        bltzal(scratch, offset);
2229        break;
2230
2231      default:
2232        UNREACHABLE();
2233    }
2234  }
2235  // Emit a nop in the branch delay slot if required.
2236  if (bdslot == PROTECT)
2237    nop();
2238}
2239
2240
2241void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2242  bal(shifted_branch_offset(L, false));
2243
2244  // Emit a nop in the branch delay slot if required.
2245  if (bdslot == PROTECT)
2246    nop();
2247}
2248
2249
2250void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2251                                        const Operand& rt,
2252                                        BranchDelaySlot bdslot) {
2253  BRANCH_ARGS_CHECK(cond, rs, rt);
2254
2255  int32_t offset = 0;
2256  Register r2 = no_reg;
2257  Register scratch = at;
2258  if (rt.is_reg()) {
2259    r2 = rt.rm_;
2260  } else if (cond != cc_always) {
2261    r2 = scratch;
2262    li(r2, rt);
2263  }
2264
2265  {
2266    BlockTrampolinePoolScope block_trampoline_pool(this);
2267    switch (cond) {
2268      case cc_always:
2269        offset = shifted_branch_offset(L, false);
2270        bal(offset);
2271        break;
2272      case eq:
2273        bne(rs, r2, 2);
2274        nop();
2275        offset = shifted_branch_offset(L, false);
2276        bal(offset);
2277        break;
2278      case ne:
2279        beq(rs, r2, 2);
2280        nop();
2281        offset = shifted_branch_offset(L, false);
2282        bal(offset);
2283        break;
2284
2285      // Signed comparison.
2286      case greater:
2287        slt(scratch, r2, rs);
2288        addiu(scratch, scratch, -1);
2289        offset = shifted_branch_offset(L, false);
2290        bgezal(scratch, offset);
2291        break;
2292      case greater_equal:
2293        slt(scratch, rs, r2);
2294        addiu(scratch, scratch, -1);
2295        offset = shifted_branch_offset(L, false);
2296        bltzal(scratch, offset);
2297        break;
2298      case less:
2299        slt(scratch, rs, r2);
2300        addiu(scratch, scratch, -1);
2301        offset = shifted_branch_offset(L, false);
2302        bgezal(scratch, offset);
2303        break;
2304      case less_equal:
2305        slt(scratch, r2, rs);
2306        addiu(scratch, scratch, -1);
2307        offset = shifted_branch_offset(L, false);
2308        bltzal(scratch, offset);
2309        break;
2310
2311      // Unsigned comparison.
2312      case Ugreater:
2313        sltu(scratch, r2, rs);
2314        addiu(scratch, scratch, -1);
2315        offset = shifted_branch_offset(L, false);
2316        bgezal(scratch, offset);
2317        break;
2318      case Ugreater_equal:
2319        sltu(scratch, rs, r2);
2320        addiu(scratch, scratch, -1);
2321        offset = shifted_branch_offset(L, false);
2322        bltzal(scratch, offset);
2323        break;
2324      case Uless:
2325        sltu(scratch, rs, r2);
2326        addiu(scratch, scratch, -1);
2327        offset = shifted_branch_offset(L, false);
2328        bgezal(scratch, offset);
2329        break;
2330      case Uless_equal:
2331        sltu(scratch, r2, rs);
2332        addiu(scratch, scratch, -1);
2333        offset = shifted_branch_offset(L, false);
2334        bltzal(scratch, offset);
2335        break;
2336
2337      default:
2338        UNREACHABLE();
2339    }
2340  }
2341  // Check that offset could actually hold on an int16_t.
2342  ASSERT(is_int16(offset));
2343
2344  // Emit a nop in the branch delay slot if required.
2345  if (bdslot == PROTECT)
2346    nop();
2347}
2348
2349
2350void MacroAssembler::Jump(Register target,
2351                          Condition cond,
2352                          Register rs,
2353                          const Operand& rt,
2354                          BranchDelaySlot bd) {
2355  BlockTrampolinePoolScope block_trampoline_pool(this);
2356  if (cond == cc_always) {
2357    jr(target);
2358  } else {
2359    BRANCH_ARGS_CHECK(cond, rs, rt);
2360    Branch(2, NegateCondition(cond), rs, rt);
2361    jr(target);
2362  }
2363  // Emit a nop in the branch delay slot if required.
2364  if (bd == PROTECT)
2365    nop();
2366}
2367
2368
2369void MacroAssembler::Jump(intptr_t target,
2370                          RelocInfo::Mode rmode,
2371                          Condition cond,
2372                          Register rs,
2373                          const Operand& rt,
2374                          BranchDelaySlot bd) {
2375  Label skip;
2376  if (cond != cc_always) {
2377    Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2378  }
2379  // The first instruction of 'li' may be placed in the delay slot.
2380  // This is not an issue, t9 is expected to be clobbered anyway.
2381  li(t9, Operand(target, rmode));
2382  Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2383  bind(&skip);
2384}
2385
2386
2387void MacroAssembler::Jump(Address target,
2388                          RelocInfo::Mode rmode,
2389                          Condition cond,
2390                          Register rs,
2391                          const Operand& rt,
2392                          BranchDelaySlot bd) {
2393  ASSERT(!RelocInfo::IsCodeTarget(rmode));
2394  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2395}
2396
2397
2398void MacroAssembler::Jump(Handle<Code> code,
2399                          RelocInfo::Mode rmode,
2400                          Condition cond,
2401                          Register rs,
2402                          const Operand& rt,
2403                          BranchDelaySlot bd) {
2404  ASSERT(RelocInfo::IsCodeTarget(rmode));
2405  AllowDeferredHandleDereference embedding_raw_address;
2406  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2407}
2408
2409
2410int MacroAssembler::CallSize(Register target,
2411                             Condition cond,
2412                             Register rs,
2413                             const Operand& rt,
2414                             BranchDelaySlot bd) {
2415  int size = 0;
2416
2417  if (cond == cc_always) {
2418    size += 1;
2419  } else {
2420    size += 3;
2421  }
2422
2423  if (bd == PROTECT)
2424    size += 1;
2425
2426  return size * kInstrSize;
2427}
2428
2429
2430// Note: To call gcc-compiled C code on mips, you must call thru t9.
2431void MacroAssembler::Call(Register target,
2432                          Condition cond,
2433                          Register rs,
2434                          const Operand& rt,
2435                          BranchDelaySlot bd) {
2436  BlockTrampolinePoolScope block_trampoline_pool(this);
2437  Label start;
2438  bind(&start);
2439  if (cond == cc_always) {
2440    jalr(target);
2441  } else {
2442    BRANCH_ARGS_CHECK(cond, rs, rt);
2443    Branch(2, NegateCondition(cond), rs, rt);
2444    jalr(target);
2445  }
2446  // Emit a nop in the branch delay slot if required.
2447  if (bd == PROTECT)
2448    nop();
2449
2450  ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2451            SizeOfCodeGeneratedSince(&start));
2452}
2453
2454
2455int MacroAssembler::CallSize(Address target,
2456                             RelocInfo::Mode rmode,
2457                             Condition cond,
2458                             Register rs,
2459                             const Operand& rt,
2460                             BranchDelaySlot bd) {
2461  int size = CallSize(t9, cond, rs, rt, bd);
2462  return size + 2 * kInstrSize;
2463}
2464
2465
2466void MacroAssembler::Call(Address target,
2467                          RelocInfo::Mode rmode,
2468                          Condition cond,
2469                          Register rs,
2470                          const Operand& rt,
2471                          BranchDelaySlot bd) {
2472  BlockTrampolinePoolScope block_trampoline_pool(this);
2473  Label start;
2474  bind(&start);
2475  int32_t target_int = reinterpret_cast<int32_t>(target);
2476  // Must record previous source positions before the
2477  // li() generates a new code target.
2478  positions_recorder()->WriteRecordedPositions();
2479  li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
2480  Call(t9, cond, rs, rt, bd);
2481  ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2482            SizeOfCodeGeneratedSince(&start));
2483}
2484
2485
2486int MacroAssembler::CallSize(Handle<Code> code,
2487                             RelocInfo::Mode rmode,
2488                             TypeFeedbackId ast_id,
2489                             Condition cond,
2490                             Register rs,
2491                             const Operand& rt,
2492                             BranchDelaySlot bd) {
2493  AllowDeferredHandleDereference using_raw_address;
2494  return CallSize(reinterpret_cast<Address>(code.location()),
2495      rmode, cond, rs, rt, bd);
2496}
2497
2498
2499void MacroAssembler::Call(Handle<Code> code,
2500                          RelocInfo::Mode rmode,
2501                          TypeFeedbackId ast_id,
2502                          Condition cond,
2503                          Register rs,
2504                          const Operand& rt,
2505                          BranchDelaySlot bd) {
2506  BlockTrampolinePoolScope block_trampoline_pool(this);
2507  Label start;
2508  bind(&start);
2509  ASSERT(RelocInfo::IsCodeTarget(rmode));
2510  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
2511    SetRecordedAstId(ast_id);
2512    rmode = RelocInfo::CODE_TARGET_WITH_ID;
2513  }
2514  AllowDeferredHandleDereference embedding_raw_address;
2515  Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2516  ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2517            SizeOfCodeGeneratedSince(&start));
2518}
2519
2520
2521void MacroAssembler::Ret(Condition cond,
2522                         Register rs,
2523                         const Operand& rt,
2524                         BranchDelaySlot bd) {
2525  Jump(ra, cond, rs, rt, bd);
2526}
2527
2528
2529void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2530  BlockTrampolinePoolScope block_trampoline_pool(this);
2531
2532  uint32_t imm28;
2533  imm28 = jump_address(L);
2534  imm28 &= kImm28Mask;
2535  { BlockGrowBufferScope block_buf_growth(this);
2536    // Buffer growth (and relocation) must be blocked for internal references
2537    // until associated instructions are emitted and available to be patched.
2538    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2539    j(imm28);
2540  }
2541  // Emit a nop in the branch delay slot if required.
2542  if (bdslot == PROTECT)
2543    nop();
2544}
2545
2546
2547void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2548  BlockTrampolinePoolScope block_trampoline_pool(this);
2549
2550  uint32_t imm32;
2551  imm32 = jump_address(L);
2552  { BlockGrowBufferScope block_buf_growth(this);
2553    // Buffer growth (and relocation) must be blocked for internal references
2554    // until associated instructions are emitted and available to be patched.
2555    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2556    lui(at, (imm32 & kHiMask) >> kLuiShift);
2557    ori(at, at, (imm32 & kImm16Mask));
2558  }
2559  jr(at);
2560
2561  // Emit a nop in the branch delay slot if required.
2562  if (bdslot == PROTECT)
2563    nop();
2564}
2565
2566
2567void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2568  BlockTrampolinePoolScope block_trampoline_pool(this);
2569
2570  uint32_t imm32;
2571  imm32 = jump_address(L);
2572  { BlockGrowBufferScope block_buf_growth(this);
2573    // Buffer growth (and relocation) must be blocked for internal references
2574    // until associated instructions are emitted and available to be patched.
2575    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2576    lui(at, (imm32 & kHiMask) >> kLuiShift);
2577    ori(at, at, (imm32 & kImm16Mask));
2578  }
2579  jalr(at);
2580
2581  // Emit a nop in the branch delay slot if required.
2582  if (bdslot == PROTECT)
2583    nop();
2584}
2585
2586
2587void MacroAssembler::DropAndRet(int drop) {
2588  Ret(USE_DELAY_SLOT);
2589  addiu(sp, sp, drop * kPointerSize);
2590}
2591
2592void MacroAssembler::DropAndRet(int drop,
2593                                Condition cond,
2594                                Register r1,
2595                                const Operand& r2) {
2596  // Both Drop and Ret need to be conditional.
2597  Label skip;
2598  if (cond != cc_always) {
2599    Branch(&skip, NegateCondition(cond), r1, r2);
2600  }
2601
2602  Drop(drop);
2603  Ret();
2604
2605  if (cond != cc_always) {
2606    bind(&skip);
2607  }
2608}
2609
2610
2611void MacroAssembler::Drop(int count,
2612                          Condition cond,
2613                          Register reg,
2614                          const Operand& op) {
2615  if (count <= 0) {
2616    return;
2617  }
2618
2619  Label skip;
2620
2621  if (cond != al) {
2622     Branch(&skip, NegateCondition(cond), reg, op);
2623  }
2624
2625  addiu(sp, sp, count * kPointerSize);
2626
2627  if (cond != al) {
2628    bind(&skip);
2629  }
2630}
2631
2632
2633
2634void MacroAssembler::Swap(Register reg1,
2635                          Register reg2,
2636                          Register scratch) {
2637  if (scratch.is(no_reg)) {
2638    Xor(reg1, reg1, Operand(reg2));
2639    Xor(reg2, reg2, Operand(reg1));
2640    Xor(reg1, reg1, Operand(reg2));
2641  } else {
2642    mov(scratch, reg1);
2643    mov(reg1, reg2);
2644    mov(reg2, scratch);
2645  }
2646}
2647
2648
2649void MacroAssembler::Call(Label* target) {
2650  BranchAndLink(target);
2651}
2652
2653
2654void MacroAssembler::Push(Handle<Object> handle) {
2655  li(at, Operand(handle));
2656  push(at);
2657}
2658
2659
2660#ifdef ENABLE_DEBUGGER_SUPPORT
2661
2662void MacroAssembler::DebugBreak() {
2663  PrepareCEntryArgs(0);
2664  PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
2665  CEntryStub ces(1);
2666  ASSERT(AllowThisStubCall(&ces));
2667  Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
2668}
2669
2670#endif  // ENABLE_DEBUGGER_SUPPORT
2671
2672
2673// ---------------------------------------------------------------------------
2674// Exception handling.
2675
2676void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2677                                    int handler_index) {
2678  // Adjust this code if not the case.
2679  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2680  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2681  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2682  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2683  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2684  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2685
2686  // For the JSEntry handler, we must preserve a0-a3 and s0.
2687  // t1-t3 are available. We will build up the handler from the bottom by
2688  // pushing on the stack.
2689  // Set up the code object (t1) and the state (t2) for pushing.
2690  unsigned state =
2691      StackHandler::IndexField::encode(handler_index) |
2692      StackHandler::KindField::encode(kind);
2693  li(t1, Operand(CodeObject()), CONSTANT_SIZE);
2694  li(t2, Operand(state));
2695
2696  // Push the frame pointer, context, state, and code object.
2697  if (kind == StackHandler::JS_ENTRY) {
2698    ASSERT_EQ(Smi::FromInt(0), 0);
2699    // The second zero_reg indicates no context.
2700    // The first zero_reg is the NULL frame pointer.
2701    // The operands are reversed to match the order of MultiPush/Pop.
2702    Push(zero_reg, zero_reg, t2, t1);
2703  } else {
2704    MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
2705  }
2706
2707  // Link the current handler as the next handler.
2708  li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2709  lw(t1, MemOperand(t2));
2710  push(t1);
2711  // Set this new handler as the current one.
2712  sw(sp, MemOperand(t2));
2713}
2714
2715
2716void MacroAssembler::PopTryHandler() {
2717  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2718  pop(a1);
2719  Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2720  li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2721  sw(a1, MemOperand(at));
2722}
2723
2724
2725void MacroAssembler::JumpToHandlerEntry() {
2726  // Compute the handler entry address and jump to it.  The handler table is
2727  // a fixed array of (smi-tagged) code offsets.
2728  // v0 = exception, a1 = code object, a2 = state.
2729  lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));  // Handler table.
2730  Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2731  srl(a2, a2, StackHandler::kKindWidth);  // Handler index.
2732  sll(a2, a2, kPointerSizeLog2);
2733  Addu(a2, a3, a2);
2734  lw(a2, MemOperand(a2));  // Smi-tagged offset.
2735  Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
2736  sra(t9, a2, kSmiTagSize);
2737  Addu(t9, t9, a1);
2738  Jump(t9);  // Jump.
2739}
2740
2741
2742void MacroAssembler::Throw(Register value) {
2743  // Adjust this code if not the case.
2744  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2745  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2746  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2747  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2748  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2749  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2750
2751  // The exception is expected in v0.
2752  Move(v0, value);
2753
2754  // Drop the stack pointer to the top of the top handler.
2755  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
2756                                   isolate())));
2757  lw(sp, MemOperand(a3));
2758
2759  // Restore the next handler.
2760  pop(a2);
2761  sw(a2, MemOperand(a3));
2762
2763  // Get the code object (a1) and state (a2).  Restore the context and frame
2764  // pointer.
2765  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2766
2767  // If the handler is a JS frame, restore the context to the frame.
2768  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
2769  // or cp.
2770  Label done;
2771  Branch(&done, eq, cp, Operand(zero_reg));
2772  sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2773  bind(&done);
2774
2775  JumpToHandlerEntry();
2776}
2777
2778
2779void MacroAssembler::ThrowUncatchable(Register value) {
2780  // Adjust this code if not the case.
2781  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2782  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2783  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2784  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2785  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2786  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2787
2788  // The exception is expected in v0.
2789  if (!value.is(v0)) {
2790    mov(v0, value);
2791  }
2792  // Drop the stack pointer to the top of the top stack handler.
2793  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2794  lw(sp, MemOperand(a3));
2795
2796  // Unwind the handlers until the ENTRY handler is found.
2797  Label fetch_next, check_kind;
2798  jmp(&check_kind);
2799  bind(&fetch_next);
2800  lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
2801
2802  bind(&check_kind);
2803  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2804  lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
2805  And(a2, a2, Operand(StackHandler::KindField::kMask));
2806  Branch(&fetch_next, ne, a2, Operand(zero_reg));
2807
2808  // Set the top handler address to next handler past the top ENTRY handler.
2809  pop(a2);
2810  sw(a2, MemOperand(a3));
2811
2812  // Get the code object (a1) and state (a2).  Clear the context and frame
2813  // pointer (0 was saved in the handler).
2814  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2815
2816  JumpToHandlerEntry();
2817}
2818
2819
2820void MacroAssembler::Allocate(int object_size,
2821                              Register result,
2822                              Register scratch1,
2823                              Register scratch2,
2824                              Label* gc_required,
2825                              AllocationFlags flags) {
2826  ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
2827  if (!FLAG_inline_new) {
2828    if (emit_debug_code()) {
2829      // Trash the registers to simulate an allocation failure.
2830      li(result, 0x7091);
2831      li(scratch1, 0x7191);
2832      li(scratch2, 0x7291);
2833    }
2834    jmp(gc_required);
2835    return;
2836  }
2837
2838  ASSERT(!result.is(scratch1));
2839  ASSERT(!result.is(scratch2));
2840  ASSERT(!scratch1.is(scratch2));
2841  ASSERT(!scratch1.is(t9));
2842  ASSERT(!scratch2.is(t9));
2843  ASSERT(!result.is(t9));
2844
2845  // Make object size into bytes.
2846  if ((flags & SIZE_IN_WORDS) != 0) {
2847    object_size *= kPointerSize;
2848  }
2849  ASSERT_EQ(0, object_size & kObjectAlignmentMask);
2850
2851  // Check relative positions of allocation top and limit addresses.
2852  // ARM adds additional checks to make sure the ldm instruction can be
2853  // used. On MIPS we don't have ldm so we don't need additional checks either.
2854  ExternalReference allocation_top =
2855      AllocationUtils::GetAllocationTopReference(isolate(), flags);
2856  ExternalReference allocation_limit =
2857      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2858
2859  intptr_t top   =
2860      reinterpret_cast<intptr_t>(allocation_top.address());
2861  intptr_t limit =
2862      reinterpret_cast<intptr_t>(allocation_limit.address());
2863  ASSERT((limit - top) == kPointerSize);
2864
2865  // Set up allocation top address and object size registers.
2866  Register topaddr = scratch1;
2867  li(topaddr, Operand(allocation_top));
2868
2869  // This code stores a temporary value in t9.
2870  if ((flags & RESULT_CONTAINS_TOP) == 0) {
2871    // Load allocation top into result and allocation limit into t9.
2872    lw(result, MemOperand(topaddr));
2873    lw(t9, MemOperand(topaddr, kPointerSize));
2874  } else {
2875    if (emit_debug_code()) {
2876      // Assert that result actually contains top on entry. t9 is used
2877      // immediately below so this use of t9 does not cause difference with
2878      // respect to register content between debug and release mode.
2879      lw(t9, MemOperand(topaddr));
2880      Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
2881    }
2882    // Load allocation limit into t9. Result already contains allocation top.
2883    lw(t9, MemOperand(topaddr, limit - top));
2884  }
2885
2886  if ((flags & DOUBLE_ALIGNMENT) != 0) {
2887    // Align the next allocation. Storing the filler map without checking top is
2888    // safe in new-space because the limit of the heap is aligned there.
2889    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
2890    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2891    And(scratch2, result, Operand(kDoubleAlignmentMask));
2892    Label aligned;
2893    Branch(&aligned, eq, scratch2, Operand(zero_reg));
2894    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
2895      Branch(gc_required, Ugreater_equal, result, Operand(t9));
2896    }
2897    li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
2898    sw(scratch2, MemOperand(result));
2899    Addu(result, result, Operand(kDoubleSize / 2));
2900    bind(&aligned);
2901  }
2902
2903  // Calculate new top and bail out if new space is exhausted. Use result
2904  // to calculate the new top.
2905  Addu(scratch2, result, Operand(object_size));
2906  Branch(gc_required, Ugreater, scratch2, Operand(t9));
2907  sw(scratch2, MemOperand(topaddr));
2908
2909  // Tag object if requested.
2910  if ((flags & TAG_OBJECT) != 0) {
2911    Addu(result, result, Operand(kHeapObjectTag));
2912  }
2913}
2914
2915
2916void MacroAssembler::Allocate(Register object_size,
2917                              Register result,
2918                              Register scratch1,
2919                              Register scratch2,
2920                              Label* gc_required,
2921                              AllocationFlags flags) {
2922  if (!FLAG_inline_new) {
2923    if (emit_debug_code()) {
2924      // Trash the registers to simulate an allocation failure.
2925      li(result, 0x7091);
2926      li(scratch1, 0x7191);
2927      li(scratch2, 0x7291);
2928    }
2929    jmp(gc_required);
2930    return;
2931  }
2932
2933  ASSERT(!result.is(scratch1));
2934  ASSERT(!result.is(scratch2));
2935  ASSERT(!scratch1.is(scratch2));
2936  ASSERT(!object_size.is(t9));
2937  ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2938
2939  // Check relative positions of allocation top and limit addresses.
2940  // ARM adds additional checks to make sure the ldm instruction can be
2941  // used. On MIPS we don't have ldm so we don't need additional checks either.
2942  ExternalReference allocation_top =
2943      AllocationUtils::GetAllocationTopReference(isolate(), flags);
2944  ExternalReference allocation_limit =
2945      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2946  intptr_t top   =
2947      reinterpret_cast<intptr_t>(allocation_top.address());
2948  intptr_t limit =
2949      reinterpret_cast<intptr_t>(allocation_limit.address());
2950  ASSERT((limit - top) == kPointerSize);
2951
2952  // Set up allocation top address and object size registers.
2953  Register topaddr = scratch1;
2954  li(topaddr, Operand(allocation_top));
2955
2956  // This code stores a temporary value in t9.
2957  if ((flags & RESULT_CONTAINS_TOP) == 0) {
2958    // Load allocation top into result and allocation limit into t9.
2959    lw(result, MemOperand(topaddr));
2960    lw(t9, MemOperand(topaddr, kPointerSize));
2961  } else {
2962    if (emit_debug_code()) {
2963      // Assert that result actually contains top on entry. t9 is used
2964      // immediately below so this use of t9 does not cause difference with
2965      // respect to register content between debug and release mode.
2966      lw(t9, MemOperand(topaddr));
2967      Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
2968    }
2969    // Load allocation limit into t9. Result already contains allocation top.
2970    lw(t9, MemOperand(topaddr, limit - top));
2971  }
2972
2973  if ((flags & DOUBLE_ALIGNMENT) != 0) {
2974    // Align the next allocation. Storing the filler map without checking top is
2975    // safe in new-space because the limit of the heap is aligned there.
2976    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
2977    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2978    And(scratch2, result, Operand(kDoubleAlignmentMask));
2979    Label aligned;
2980    Branch(&aligned, eq, scratch2, Operand(zero_reg));
2981    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
2982      Branch(gc_required, Ugreater_equal, result, Operand(t9));
2983    }
2984    li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
2985    sw(scratch2, MemOperand(result));
2986    Addu(result, result, Operand(kDoubleSize / 2));
2987    bind(&aligned);
2988  }
2989
2990  // Calculate new top and bail out if new space is exhausted. Use result
2991  // to calculate the new top. Object size may be in words so a shift is
2992  // required to get the number of bytes.
2993  if ((flags & SIZE_IN_WORDS) != 0) {
2994    sll(scratch2, object_size, kPointerSizeLog2);
2995    Addu(scratch2, result, scratch2);
2996  } else {
2997    Addu(scratch2, result, Operand(object_size));
2998  }
2999  Branch(gc_required, Ugreater, scratch2, Operand(t9));
3000
3001  // Update allocation top. result temporarily holds the new top.
3002  if (emit_debug_code()) {
3003    And(t9, scratch2, Operand(kObjectAlignmentMask));
3004    Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3005  }
3006  sw(scratch2, MemOperand(topaddr));
3007
3008  // Tag object if requested.
3009  if ((flags & TAG_OBJECT) != 0) {
3010    Addu(result, result, Operand(kHeapObjectTag));
3011  }
3012}
3013
3014
3015void MacroAssembler::UndoAllocationInNewSpace(Register object,
3016                                              Register scratch) {
3017  ExternalReference new_space_allocation_top =
3018      ExternalReference::new_space_allocation_top_address(isolate());
3019
3020  // Make sure the object has no tag before resetting top.
3021  And(object, object, Operand(~kHeapObjectTagMask));
3022#ifdef DEBUG
3023  // Check that the object un-allocated is below the current top.
3024  li(scratch, Operand(new_space_allocation_top));
3025  lw(scratch, MemOperand(scratch));
3026  Check(less, kUndoAllocationOfNonAllocatedMemory,
3027      object, Operand(scratch));
3028#endif
3029  // Write the address of the object to un-allocate as the current top.
3030  li(scratch, Operand(new_space_allocation_top));
3031  sw(object, MemOperand(scratch));
3032}
3033
3034
3035void MacroAssembler::AllocateTwoByteString(Register result,
3036                                           Register length,
3037                                           Register scratch1,
3038                                           Register scratch2,
3039                                           Register scratch3,
3040                                           Label* gc_required) {
3041  // Calculate the number of bytes needed for the characters in the string while
3042  // observing object alignment.
3043  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3044  sll(scratch1, length, 1);  // Length in bytes, not chars.
3045  addiu(scratch1, scratch1,
3046       kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3047  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3048
3049  // Allocate two-byte string in new space.
3050  Allocate(scratch1,
3051           result,
3052           scratch2,
3053           scratch3,
3054           gc_required,
3055           TAG_OBJECT);
3056
3057  // Set the map, length and hash field.
3058  InitializeNewString(result,
3059                      length,
3060                      Heap::kStringMapRootIndex,
3061                      scratch1,
3062                      scratch2);
3063}
3064
3065
3066void MacroAssembler::AllocateAsciiString(Register result,
3067                                         Register length,
3068                                         Register scratch1,
3069                                         Register scratch2,
3070                                         Register scratch3,
3071                                         Label* gc_required) {
3072  // Calculate the number of bytes needed for the characters in the string
3073  // while observing object alignment.
3074  ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3075  ASSERT(kCharSize == 1);
3076  addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3077  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3078
3079  // Allocate ASCII string in new space.
3080  Allocate(scratch1,
3081           result,
3082           scratch2,
3083           scratch3,
3084           gc_required,
3085           TAG_OBJECT);
3086
3087  // Set the map, length and hash field.
3088  InitializeNewString(result,
3089                      length,
3090                      Heap::kAsciiStringMapRootIndex,
3091                      scratch1,
3092                      scratch2);
3093}
3094
3095
3096void MacroAssembler::AllocateTwoByteConsString(Register result,
3097                                               Register length,
3098                                               Register scratch1,
3099                                               Register scratch2,
3100                                               Label* gc_required) {
3101  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3102           TAG_OBJECT);
3103  InitializeNewString(result,
3104                      length,
3105                      Heap::kConsStringMapRootIndex,
3106                      scratch1,
3107                      scratch2);
3108}
3109
3110
3111void MacroAssembler::AllocateAsciiConsString(Register result,
3112                                             Register length,
3113                                             Register scratch1,
3114                                             Register scratch2,
3115                                             Label* gc_required) {
3116  Label allocate_new_space, install_map;
3117  AllocationFlags flags = TAG_OBJECT;
3118
3119  ExternalReference high_promotion_mode = ExternalReference::
3120      new_space_high_promotion_mode_active_address(isolate());
3121  li(scratch1, Operand(high_promotion_mode));
3122  lw(scratch1, MemOperand(scratch1, 0));
3123  Branch(&allocate_new_space, eq, scratch1, Operand(zero_reg));
3124
3125  Allocate(ConsString::kSize,
3126           result,
3127           scratch1,
3128           scratch2,
3129           gc_required,
3130           static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
3131
3132  jmp(&install_map);
3133
3134  bind(&allocate_new_space);
3135  Allocate(ConsString::kSize,
3136           result,
3137           scratch1,
3138           scratch2,
3139           gc_required,
3140           flags);
3141
3142  bind(&install_map);
3143
3144  InitializeNewString(result,
3145                      length,
3146                      Heap::kConsAsciiStringMapRootIndex,
3147                      scratch1,
3148                      scratch2);
3149}
3150
3151
3152void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3153                                                 Register length,
3154                                                 Register scratch1,
3155                                                 Register scratch2,
3156                                                 Label* gc_required) {
3157  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3158           TAG_OBJECT);
3159
3160  InitializeNewString(result,
3161                      length,
3162                      Heap::kSlicedStringMapRootIndex,
3163                      scratch1,
3164                      scratch2);
3165}
3166
3167
3168void MacroAssembler::AllocateAsciiSlicedString(Register result,
3169                                               Register length,
3170                                               Register scratch1,
3171                                               Register scratch2,
3172                                               Label* gc_required) {
3173  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3174           TAG_OBJECT);
3175
3176  InitializeNewString(result,
3177                      length,
3178                      Heap::kSlicedAsciiStringMapRootIndex,
3179                      scratch1,
3180                      scratch2);
3181}
3182
3183
3184void MacroAssembler::JumpIfNotUniqueName(Register reg,
3185                                         Label* not_unique_name) {
3186  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3187  Label succeed;
3188  And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3189  Branch(&succeed, eq, at, Operand(zero_reg));
3190  Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3191
3192  bind(&succeed);
3193}
3194
3195
3196// Allocates a heap number or jumps to the label if the young space is full and
3197// a scavenge is needed.
3198void MacroAssembler::AllocateHeapNumber(Register result,
3199                                        Register scratch1,
3200                                        Register scratch2,
3201                                        Register heap_number_map,
3202                                        Label* need_gc,
3203                                        TaggingMode tagging_mode) {
3204  // Allocate an object in the heap for the heap number and tag it as a heap
3205  // object.
3206  Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3207           tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3208
3209  // Store heap number map in the allocated object.
3210  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3211  if (tagging_mode == TAG_RESULT) {
3212    sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3213  } else {
3214    sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3215  }
3216}
3217
3218
3219void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3220                                                 FPURegister value,
3221                                                 Register scratch1,
3222                                                 Register scratch2,
3223                                                 Label* gc_required) {
3224  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3225  AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3226  sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3227}
3228
3229
3230// Copies a fixed number of fields of heap objects from src to dst.
3231void MacroAssembler::CopyFields(Register dst,
3232                                Register src,
3233                                RegList temps,
3234                                int field_count) {
3235  ASSERT((temps & dst.bit()) == 0);
3236  ASSERT((temps & src.bit()) == 0);
3237  // Primitive implementation using only one temporary register.
3238
3239  Register tmp = no_reg;
3240  // Find a temp register in temps list.
3241  for (int i = 0; i < kNumRegisters; i++) {
3242    if ((temps & (1 << i)) != 0) {
3243      tmp.code_ = i;
3244      break;
3245    }
3246  }
3247  ASSERT(!tmp.is(no_reg));
3248
3249  for (int i = 0; i < field_count; i++) {
3250    lw(tmp, FieldMemOperand(src, i * kPointerSize));
3251    sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3252  }
3253}
3254
3255
3256void MacroAssembler::CopyBytes(Register src,
3257                               Register dst,
3258                               Register length,
3259                               Register scratch) {
3260  Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3261
3262  // Align src before copying in word size chunks.
3263  Branch(&byte_loop, le, length, Operand(kPointerSize));
3264  bind(&align_loop_1);
3265  And(scratch, src, kPointerSize - 1);
3266  Branch(&word_loop, eq, scratch, Operand(zero_reg));
3267  lbu(scratch, MemOperand(src));
3268  Addu(src, src, 1);
3269  sb(scratch, MemOperand(dst));
3270  Addu(dst, dst, 1);
3271  Subu(length, length, Operand(1));
3272  Branch(&align_loop_1, ne, length, Operand(zero_reg));
3273
3274  // Copy bytes in word size chunks.
3275  bind(&word_loop);
3276  if (emit_debug_code()) {
3277    And(scratch, src, kPointerSize - 1);
3278    Assert(eq, kExpectingAlignmentForCopyBytes,
3279        scratch, Operand(zero_reg));
3280  }
3281  Branch(&byte_loop, lt, length, Operand(kPointerSize));
3282  lw(scratch, MemOperand(src));
3283  Addu(src, src, kPointerSize);
3284
3285  // TODO(kalmard) check if this can be optimized to use sw in most cases.
3286  // Can't use unaligned access - copy byte by byte.
3287  sb(scratch, MemOperand(dst, 0));
3288  srl(scratch, scratch, 8);
3289  sb(scratch, MemOperand(dst, 1));
3290  srl(scratch, scratch, 8);
3291  sb(scratch, MemOperand(dst, 2));
3292  srl(scratch, scratch, 8);
3293  sb(scratch, MemOperand(dst, 3));
3294  Addu(dst, dst, 4);
3295
3296  Subu(length, length, Operand(kPointerSize));
3297  Branch(&word_loop);
3298
3299  // Copy the last bytes if any left.
3300  bind(&byte_loop);
3301  Branch(&done, eq, length, Operand(zero_reg));
3302  bind(&byte_loop_1);
3303  lbu(scratch, MemOperand(src));
3304  Addu(src, src, 1);
3305  sb(scratch, MemOperand(dst));
3306  Addu(dst, dst, 1);
3307  Subu(length, length, Operand(1));
3308  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3309  bind(&done);
3310}
3311
3312
3313void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3314                                                Register end_offset,
3315                                                Register filler) {
3316  Label loop, entry;
3317  Branch(&entry);
3318  bind(&loop);
3319  sw(filler, MemOperand(start_offset));
3320  Addu(start_offset, start_offset, kPointerSize);
3321  bind(&entry);
3322  Branch(&loop, lt, start_offset, Operand(end_offset));
3323}
3324
3325
3326void MacroAssembler::CheckFastElements(Register map,
3327                                       Register scratch,
3328                                       Label* fail) {
3329  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3330  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3331  STATIC_ASSERT(FAST_ELEMENTS == 2);
3332  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3333  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3334  Branch(fail, hi, scratch,
3335         Operand(Map::kMaximumBitField2FastHoleyElementValue));
3336}
3337
3338
3339void MacroAssembler::CheckFastObjectElements(Register map,
3340                                             Register scratch,
3341                                             Label* fail) {
3342  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3343  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3344  STATIC_ASSERT(FAST_ELEMENTS == 2);
3345  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3346  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3347  Branch(fail, ls, scratch,
3348         Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3349  Branch(fail, hi, scratch,
3350         Operand(Map::kMaximumBitField2FastHoleyElementValue));
3351}
3352
3353
3354void MacroAssembler::CheckFastSmiElements(Register map,
3355                                          Register scratch,
3356                                          Label* fail) {
3357  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3358  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3359  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3360  Branch(fail, hi, scratch,
3361         Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3362}
3363
3364
3365void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3366                                                 Register key_reg,
3367                                                 Register elements_reg,
3368                                                 Register scratch1,
3369                                                 Register scratch2,
3370                                                 Register scratch3,
3371                                                 Label* fail,
3372                                                 int elements_offset) {
3373  Label smi_value, maybe_nan, have_double_value, is_nan, done;
3374  Register mantissa_reg = scratch2;
3375  Register exponent_reg = scratch3;
3376
3377  // Handle smi values specially.
3378  JumpIfSmi(value_reg, &smi_value);
3379
3380  // Ensure that the object is a heap number
3381  CheckMap(value_reg,
3382           scratch1,
3383           Heap::kHeapNumberMapRootIndex,
3384           fail,
3385           DONT_DO_SMI_CHECK);
3386
3387  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3388  // in the exponent.
3389  li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3390  lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3391  Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3392
3393  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3394
3395  bind(&have_double_value);
3396  sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3397  Addu(scratch1, scratch1, elements_reg);
3398  sw(mantissa_reg, FieldMemOperand(
3399     scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
3400  uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
3401      sizeof(kHoleNanLower32);
3402  sw(exponent_reg, FieldMemOperand(scratch1, offset));
3403  jmp(&done);
3404
3405  bind(&maybe_nan);
3406  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3407  // it's an Infinity, and the non-NaN code path applies.
3408  Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3409  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3410  Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3411  bind(&is_nan);
3412  // Load canonical NaN for storing into the double array.
3413  uint64_t nan_int64 = BitCast<uint64_t>(
3414      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3415  li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
3416  li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
3417  jmp(&have_double_value);
3418
3419  bind(&smi_value);
3420  Addu(scratch1, elements_reg,
3421      Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3422              elements_offset));
3423  sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3424  Addu(scratch1, scratch1, scratch2);
3425  // scratch1 is now effective address of the double element
3426
3427  Register untagged_value = elements_reg;
3428  SmiUntag(untagged_value, value_reg);
3429  mtc1(untagged_value, f2);
3430  cvt_d_w(f0, f2);
3431  sdc1(f0, MemOperand(scratch1, 0));
3432  bind(&done);
3433}
3434
3435
3436void MacroAssembler::CompareMapAndBranch(Register obj,
3437                                         Register scratch,
3438                                         Handle<Map> map,
3439                                         Label* early_success,
3440                                         Condition cond,
3441                                         Label* branch_to) {
3442  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3443  CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3444}
3445
3446
3447void MacroAssembler::CompareMapAndBranch(Register obj_map,
3448                                         Handle<Map> map,
3449                                         Label* early_success,
3450                                         Condition cond,
3451                                         Label* branch_to) {
3452  Branch(branch_to, cond, obj_map, Operand(map));
3453}
3454
3455
3456void MacroAssembler::CheckMap(Register obj,
3457                              Register scratch,
3458                              Handle<Map> map,
3459                              Label* fail,
3460                              SmiCheckType smi_check_type) {
3461  if (smi_check_type == DO_SMI_CHECK) {
3462    JumpIfSmi(obj, fail);
3463  }
3464  Label success;
3465  CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3466  bind(&success);
3467}
3468
3469
3470void MacroAssembler::DispatchMap(Register obj,
3471                                 Register scratch,
3472                                 Handle<Map> map,
3473                                 Handle<Code> success,
3474                                 SmiCheckType smi_check_type) {
3475  Label fail;
3476  if (smi_check_type == DO_SMI_CHECK) {
3477    JumpIfSmi(obj, &fail);
3478  }
3479  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3480  Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3481  bind(&fail);
3482}
3483
3484
3485void MacroAssembler::CheckMap(Register obj,
3486                              Register scratch,
3487                              Heap::RootListIndex index,
3488                              Label* fail,
3489                              SmiCheckType smi_check_type) {
3490  if (smi_check_type == DO_SMI_CHECK) {
3491    JumpIfSmi(obj, fail);
3492  }
3493  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3494  LoadRoot(at, index);
3495  Branch(fail, ne, scratch, Operand(at));
3496}
3497
3498
3499void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
3500  if (IsMipsSoftFloatABI) {
3501    Move(dst, v0, v1);
3502  } else {
3503    Move(dst, f0);  // Reg f0 is o32 ABI FP return value.
3504  }
3505}
3506
3507
3508void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
3509  if (!IsMipsSoftFloatABI) {
3510    Move(f12, dreg);
3511  } else {
3512    Move(a0, a1, dreg);
3513  }
3514}
3515
3516
3517void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
3518                                             DoubleRegister dreg2) {
3519  if (!IsMipsSoftFloatABI) {
3520    if (dreg2.is(f12)) {
3521      ASSERT(!dreg1.is(f14));
3522      Move(f14, dreg2);
3523      Move(f12, dreg1);
3524    } else {
3525      Move(f12, dreg1);
3526      Move(f14, dreg2);
3527    }
3528  } else {
3529    Move(a0, a1, dreg1);
3530    Move(a2, a3, dreg2);
3531  }
3532}
3533
3534
3535void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
3536                                             Register reg) {
3537  if (!IsMipsSoftFloatABI) {
3538    Move(f12, dreg);
3539    Move(a2, reg);
3540  } else {
3541    Move(a2, reg);
3542    Move(a0, a1, dreg);
3543  }
3544}
3545
3546
3547void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
3548  // This macro takes the dst register to make the code more readable
3549  // at the call sites. However, the dst register has to be t1 to
3550  // follow the calling convention which requires the call type to be
3551  // in t1.
3552  ASSERT(dst.is(t1));
3553  if (call_kind == CALL_AS_FUNCTION) {
3554    li(dst, Operand(Smi::FromInt(1)));
3555  } else {
3556    li(dst, Operand(Smi::FromInt(0)));
3557  }
3558}
3559
3560
3561// -----------------------------------------------------------------------------
3562// JavaScript invokes.
3563
3564void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3565                                    const ParameterCount& actual,
3566                                    Handle<Code> code_constant,
3567                                    Register code_reg,
3568                                    Label* done,
3569                                    bool* definitely_mismatches,
3570                                    InvokeFlag flag,
3571                                    const CallWrapper& call_wrapper,
3572                                    CallKind call_kind) {
3573  bool definitely_matches = false;
3574  *definitely_mismatches = false;
3575  Label regular_invoke;
3576
3577  // Check whether the expected and actual arguments count match. If not,
3578  // setup registers according to contract with ArgumentsAdaptorTrampoline:
3579  //  a0: actual arguments count
3580  //  a1: function (passed through to callee)
3581  //  a2: expected arguments count
3582  //  a3: callee code entry
3583
3584  // The code below is made a lot easier because the calling code already sets
3585  // up actual and expected registers according to the contract if values are
3586  // passed in registers.
3587  ASSERT(actual.is_immediate() || actual.reg().is(a0));
3588  ASSERT(expected.is_immediate() || expected.reg().is(a2));
3589  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3590
3591  if (expected.is_immediate()) {
3592    ASSERT(actual.is_immediate());
3593    if (expected.immediate() == actual.immediate()) {
3594      definitely_matches = true;
3595    } else {
3596      li(a0, Operand(actual.immediate()));
3597      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3598      if (expected.immediate() == sentinel) {
3599        // Don't worry about adapting arguments for builtins that
3600        // don't want that done. Skip adaption code by making it look
3601        // like we have a match between expected and actual number of
3602        // arguments.
3603        definitely_matches = true;
3604      } else {
3605        *definitely_mismatches = true;
3606        li(a2, Operand(expected.immediate()));
3607      }
3608    }
3609  } else if (actual.is_immediate()) {
3610    Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3611    li(a0, Operand(actual.immediate()));
3612  } else {
3613    Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
3614  }
3615
3616  if (!definitely_matches) {
3617    if (!code_constant.is_null()) {
3618      li(a3, Operand(code_constant));
3619      addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3620    }
3621
3622    Handle<Code> adaptor =
3623        isolate()->builtins()->ArgumentsAdaptorTrampoline();
3624    if (flag == CALL_FUNCTION) {
3625      call_wrapper.BeforeCall(CallSize(adaptor));
3626      SetCallKind(t1, call_kind);
3627      Call(adaptor);
3628      call_wrapper.AfterCall();
3629      if (!*definitely_mismatches) {
3630        Branch(done);
3631      }
3632    } else {
3633      SetCallKind(t1, call_kind);
3634      Jump(adaptor, RelocInfo::CODE_TARGET);
3635    }
3636    bind(&regular_invoke);
3637  }
3638}
3639
3640
3641void MacroAssembler::InvokeCode(Register code,
3642                                const ParameterCount& expected,
3643                                const ParameterCount& actual,
3644                                InvokeFlag flag,
3645                                const CallWrapper& call_wrapper,
3646                                CallKind call_kind) {
3647  // You can't call a function without a valid frame.
3648  ASSERT(flag == JUMP_FUNCTION || has_frame());
3649
3650  Label done;
3651
3652  bool definitely_mismatches = false;
3653  InvokePrologue(expected, actual, Handle<Code>::null(), code,
3654                 &done, &definitely_mismatches, flag,
3655                 call_wrapper, call_kind);
3656  if (!definitely_mismatches) {
3657    if (flag == CALL_FUNCTION) {
3658      call_wrapper.BeforeCall(CallSize(code));
3659      SetCallKind(t1, call_kind);
3660      Call(code);
3661      call_wrapper.AfterCall();
3662    } else {
3663      ASSERT(flag == JUMP_FUNCTION);
3664      SetCallKind(t1, call_kind);
3665      Jump(code);
3666    }
3667    // Continue here if InvokePrologue does handle the invocation due to
3668    // mismatched parameter counts.
3669    bind(&done);
3670  }
3671}
3672
3673
3674void MacroAssembler::InvokeCode(Handle<Code> code,
3675                                const ParameterCount& expected,
3676                                const ParameterCount& actual,
3677                                RelocInfo::Mode rmode,
3678                                InvokeFlag flag,
3679                                CallKind call_kind) {
3680  // You can't call a function without a valid frame.
3681  ASSERT(flag == JUMP_FUNCTION || has_frame());
3682
3683  Label done;
3684
3685  bool definitely_mismatches = false;
3686  InvokePrologue(expected, actual, code, no_reg,
3687                 &done, &definitely_mismatches, flag,
3688                 NullCallWrapper(), call_kind);
3689  if (!definitely_mismatches) {
3690    if (flag == CALL_FUNCTION) {
3691      SetCallKind(t1, call_kind);
3692      Call(code, rmode);
3693    } else {
3694      SetCallKind(t1, call_kind);
3695      Jump(code, rmode);
3696    }
3697    // Continue here if InvokePrologue does handle the invocation due to
3698    // mismatched parameter counts.
3699    bind(&done);
3700  }
3701}
3702
3703
3704void MacroAssembler::InvokeFunction(Register function,
3705                                    const ParameterCount& actual,
3706                                    InvokeFlag flag,
3707                                    const CallWrapper& call_wrapper,
3708                                    CallKind call_kind) {
3709  // You can't call a function without a valid frame.
3710  ASSERT(flag == JUMP_FUNCTION || has_frame());
3711
3712  // Contract with called JS functions requires that function is passed in a1.
3713  ASSERT(function.is(a1));
3714  Register expected_reg = a2;
3715  Register code_reg = a3;
3716
3717  lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3718  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3719  lw(expected_reg,
3720      FieldMemOperand(code_reg,
3721                      SharedFunctionInfo::kFormalParameterCountOffset));
3722  sra(expected_reg, expected_reg, kSmiTagSize);
3723  lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3724
3725  ParameterCount expected(expected_reg);
3726  InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
3727}
3728
3729
3730void MacroAssembler::InvokeFunction(Register function,
3731                                    const ParameterCount& expected,
3732                                    const ParameterCount& actual,
3733                                    InvokeFlag flag,
3734                                    const CallWrapper& call_wrapper,
3735                                    CallKind call_kind) {
3736  // You can't call a function without a valid frame.
3737  ASSERT(flag == JUMP_FUNCTION || has_frame());
3738
3739  // Contract with called JS functions requires that function is passed in a1.
3740  ASSERT(function.is(a1));
3741
3742  // Get the function and setup the context.
3743  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3744
3745  // We call indirectly through the code field in the function to
3746  // allow recompilation to take effect without changing any of the
3747  // call sites.
3748  lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3749  InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
3750}
3751
3752
3753void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3754                                    const ParameterCount& expected,
3755                                    const ParameterCount& actual,
3756                                    InvokeFlag flag,
3757                                    const CallWrapper& call_wrapper,
3758                                    CallKind call_kind) {
3759  li(a1, function);
3760  InvokeFunction(a1, expected, actual, flag, call_wrapper, call_kind);
3761}
3762
3763
3764void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3765                                          Register map,
3766                                          Register scratch,
3767                                          Label* fail) {
3768  lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3769  IsInstanceJSObjectType(map, scratch, fail);
3770}
3771
3772
3773void MacroAssembler::IsInstanceJSObjectType(Register map,
3774                                            Register scratch,
3775                                            Label* fail) {
3776  lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
3777  Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3778  Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
3779}
3780
3781
3782void MacroAssembler::IsObjectJSStringType(Register object,
3783                                          Register scratch,
3784                                          Label* fail) {
3785  ASSERT(kNotStringTag != 0);
3786
3787  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3788  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3789  And(scratch, scratch, Operand(kIsNotStringMask));
3790  Branch(fail, ne, scratch, Operand(zero_reg));
3791}
3792
3793
3794void MacroAssembler::IsObjectNameType(Register object,
3795                                      Register scratch,
3796                                      Label* fail) {
3797  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3798  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3799  Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
3800}
3801
3802
3803// ---------------------------------------------------------------------------
3804// Support functions.
3805
3806
3807void MacroAssembler::TryGetFunctionPrototype(Register function,
3808                                             Register result,
3809                                             Register scratch,
3810                                             Label* miss,
3811                                             bool miss_on_bound_function) {
3812  // Check that the receiver isn't a smi.
3813  JumpIfSmi(function, miss);
3814
3815  // Check that the function really is a function.  Load map into result reg.
3816  GetObjectType(function, result, scratch);
3817  Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3818
3819  if (miss_on_bound_function) {
3820    lw(scratch,
3821       FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3822    lw(scratch,
3823       FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3824    And(scratch, scratch,
3825        Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3826    Branch(miss, ne, scratch, Operand(zero_reg));
3827  }
3828
3829  // Make sure that the function has an instance prototype.
3830  Label non_instance;
3831  lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3832  And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3833  Branch(&non_instance, ne, scratch, Operand(zero_reg));
3834
3835  // Get the prototype or initial map from the function.
3836  lw(result,
3837     FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3838
3839  // If the prototype or initial map is the hole, don't return it and
3840  // simply miss the cache instead. This will allow us to allocate a
3841  // prototype object on-demand in the runtime system.
3842  LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3843  Branch(miss, eq, result, Operand(t8));
3844
3845  // If the function does not have an initial map, we're done.
3846  Label done;
3847  GetObjectType(result, scratch, scratch);
3848  Branch(&done, ne, scratch, Operand(MAP_TYPE));
3849
3850  // Get the prototype from the initial map.
3851  lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3852  jmp(&done);
3853
3854  // Non-instance prototype: Fetch prototype from constructor field
3855  // in initial map.
3856  bind(&non_instance);
3857  lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3858
3859  // All done.
3860  bind(&done);
3861}
3862
3863
3864void MacroAssembler::GetObjectType(Register object,
3865                                   Register map,
3866                                   Register type_reg) {
3867  lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3868  lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3869}
3870
3871
3872// -----------------------------------------------------------------------------
3873// Runtime calls.
3874
3875void MacroAssembler::CallStub(CodeStub* stub,
3876                              TypeFeedbackId ast_id,
3877                              Condition cond,
3878                              Register r1,
3879                              const Operand& r2,
3880                              BranchDelaySlot bd) {
3881  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
3882  Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id,
3883       cond, r1, r2, bd);
3884}
3885
3886
3887void MacroAssembler::TailCallStub(CodeStub* stub) {
3888  Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
3889}
3890
3891
3892static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3893  return ref0.address() - ref1.address();
3894}
3895
3896
3897void MacroAssembler::CallApiFunctionAndReturn(
3898    ExternalReference function,
3899    Address function_address,
3900    ExternalReference thunk_ref,
3901    Register thunk_last_arg,
3902    int stack_space,
3903    MemOperand return_value_operand,
3904    MemOperand* context_restore_operand) {
3905  ExternalReference next_address =
3906      ExternalReference::handle_scope_next_address(isolate());
3907  const int kNextOffset = 0;
3908  const int kLimitOffset = AddressOffset(
3909      ExternalReference::handle_scope_limit_address(isolate()),
3910      next_address);
3911  const int kLevelOffset = AddressOffset(
3912      ExternalReference::handle_scope_level_address(isolate()),
3913      next_address);
3914
3915  // Allocate HandleScope in callee-save registers.
3916  li(s3, Operand(next_address));
3917  lw(s0, MemOperand(s3, kNextOffset));
3918  lw(s1, MemOperand(s3, kLimitOffset));
3919  lw(s2, MemOperand(s3, kLevelOffset));
3920  Addu(s2, s2, Operand(1));
3921  sw(s2, MemOperand(s3, kLevelOffset));
3922
3923  if (FLAG_log_timer_events) {
3924    FrameScope frame(this, StackFrame::MANUAL);
3925    PushSafepointRegisters();
3926    PrepareCallCFunction(1, a0);
3927    li(a0, Operand(ExternalReference::isolate_address(isolate())));
3928    CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
3929    PopSafepointRegisters();
3930  }
3931
3932  Label profiler_disabled;
3933  Label end_profiler_check;
3934  bool* is_profiling_flag =
3935      isolate()->cpu_profiler()->is_profiling_address();
3936  STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
3937  li(t9, reinterpret_cast<int32_t>(is_profiling_flag));
3938  lb(t9, MemOperand(t9, 0));
3939  beq(t9, zero_reg, &profiler_disabled);
3940
3941  // Third parameter is the address of the actual getter function.
3942  li(thunk_last_arg, reinterpret_cast<int32_t>(function_address));
3943  li(t9, Operand(thunk_ref));
3944  jmp(&end_profiler_check);
3945
3946  bind(&profiler_disabled);
3947  li(t9, Operand(function));
3948
3949  bind(&end_profiler_check);
3950
3951  // Native call returns to the DirectCEntry stub which redirects to the
3952  // return address pushed on stack (could have moved after GC).
3953  // DirectCEntry stub itself is generated early and never moves.
3954  DirectCEntryStub stub;
3955  stub.GenerateCall(this, t9);
3956
3957  if (FLAG_log_timer_events) {
3958    FrameScope frame(this, StackFrame::MANUAL);
3959    PushSafepointRegisters();
3960    PrepareCallCFunction(1, a0);
3961    li(a0, Operand(ExternalReference::isolate_address(isolate())));
3962    CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
3963    PopSafepointRegisters();
3964  }
3965
3966  Label promote_scheduled_exception;
3967  Label exception_handled;
3968  Label delete_allocated_handles;
3969  Label leave_exit_frame;
3970  Label return_value_loaded;
3971
3972  // Load value from ReturnValue.
3973  lw(v0, return_value_operand);
3974  bind(&return_value_loaded);
3975
3976  // No more valid handles (the result handle was the last one). Restore
3977  // previous handle scope.
3978  sw(s0, MemOperand(s3, kNextOffset));
3979  if (emit_debug_code()) {
3980    lw(a1, MemOperand(s3, kLevelOffset));
3981    Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
3982  }
3983  Subu(s2, s2, Operand(1));
3984  sw(s2, MemOperand(s3, kLevelOffset));
3985  lw(at, MemOperand(s3, kLimitOffset));
3986  Branch(&delete_allocated_handles, ne, s1, Operand(at));
3987
3988  // Check if the function scheduled an exception.
3989  bind(&leave_exit_frame);
3990  LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3991  li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3992  lw(t1, MemOperand(at));
3993  Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
3994  bind(&exception_handled);
3995
3996  bool restore_context = context_restore_operand != NULL;
3997  if (restore_context) {
3998    lw(cp, *context_restore_operand);
3999  }
4000  li(s0, Operand(stack_space));
4001  LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
4002
4003  bind(&promote_scheduled_exception);
4004  {
4005    FrameScope frame(this, StackFrame::INTERNAL);
4006    CallExternalReference(
4007        ExternalReference(Runtime::kPromoteScheduledException, isolate()),
4008        0);
4009  }
4010  jmp(&exception_handled);
4011
4012  // HandleScope limit has changed. Delete allocated extensions.
4013  bind(&delete_allocated_handles);
4014  sw(s1, MemOperand(s3, kLimitOffset));
4015  mov(s0, v0);
4016  mov(a0, v0);
4017  PrepareCallCFunction(1, s1);
4018  li(a0, Operand(ExternalReference::isolate_address(isolate())));
4019  CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4020      1);
4021  mov(v0, s0);
4022  jmp(&leave_exit_frame);
4023}
4024
4025
4026bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4027  return has_frame_ || !stub->SometimesSetsUpAFrame();
4028}
4029
4030
4031void MacroAssembler::IllegalOperation(int num_arguments) {
4032  if (num_arguments > 0) {
4033    addiu(sp, sp, num_arguments * kPointerSize);
4034  }
4035  LoadRoot(v0, Heap::kUndefinedValueRootIndex);
4036}
4037
4038
4039void MacroAssembler::IndexFromHash(Register hash,
4040                                   Register index) {
4041  // If the hash field contains an array index pick it out. The assert checks
4042  // that the constants for the maximum number of digits for an array index
4043  // cached in the hash field and the number of bits reserved for it does not
4044  // conflict.
4045  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
4046         (1 << String::kArrayIndexValueBits));
4047  // We want the smi-tagged index in key.  kArrayIndexValueMask has zeros in
4048  // the low kHashShift bits.
4049  STATIC_ASSERT(kSmiTag == 0);
4050  Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
4051  sll(index, hash, kSmiTagSize);
4052}
4053
4054
4055void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4056                                               FPURegister result,
4057                                               Register scratch1,
4058                                               Register scratch2,
4059                                               Register heap_number_map,
4060                                               Label* not_number,
4061                                               ObjectToDoubleFlags flags) {
4062  Label done;
4063  if ((flags & OBJECT_NOT_SMI) == 0) {
4064    Label not_smi;
4065    JumpIfNotSmi(object, &not_smi);
4066    // Remove smi tag and convert to double.
4067    sra(scratch1, object, kSmiTagSize);
4068    mtc1(scratch1, result);
4069    cvt_d_w(result, result);
4070    Branch(&done);
4071    bind(&not_smi);
4072  }
4073  // Check for heap number and load double value from it.
4074  lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4075  Branch(not_number, ne, scratch1, Operand(heap_number_map));
4076
4077  if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4078    // If exponent is all ones the number is either a NaN or +/-Infinity.
4079    Register exponent = scratch1;
4080    Register mask_reg = scratch2;
4081    lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4082    li(mask_reg, HeapNumber::kExponentMask);
4083
4084    And(exponent, exponent, mask_reg);
4085    Branch(not_number, eq, exponent, Operand(mask_reg));
4086  }
4087  ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4088  bind(&done);
4089}
4090
4091
4092void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4093                                            FPURegister value,
4094                                            Register scratch1) {
4095  sra(scratch1, smi, kSmiTagSize);
4096  mtc1(scratch1, value);
4097  cvt_d_w(value, value);
4098}
4099
4100
4101void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4102                                             Register left,
4103                                             Register right,
4104                                             Register overflow_dst,
4105                                             Register scratch) {
4106  ASSERT(!dst.is(overflow_dst));
4107  ASSERT(!dst.is(scratch));
4108  ASSERT(!overflow_dst.is(scratch));
4109  ASSERT(!overflow_dst.is(left));
4110  ASSERT(!overflow_dst.is(right));
4111
4112  if (left.is(right) && dst.is(left)) {
4113    ASSERT(!dst.is(t9));
4114    ASSERT(!scratch.is(t9));
4115    ASSERT(!left.is(t9));
4116    ASSERT(!right.is(t9));
4117    ASSERT(!overflow_dst.is(t9));
4118    mov(t9, right);
4119    right = t9;
4120  }
4121
4122  if (dst.is(left)) {
4123    mov(scratch, left);  // Preserve left.
4124    addu(dst, left, right);  // Left is overwritten.
4125    xor_(scratch, dst, scratch);  // Original left.
4126    xor_(overflow_dst, dst, right);
4127    and_(overflow_dst, overflow_dst, scratch);
4128  } else if (dst.is(right)) {
4129    mov(scratch, right);  // Preserve right.
4130    addu(dst, left, right);  // Right is overwritten.
4131    xor_(scratch, dst, scratch);  // Original right.
4132    xor_(overflow_dst, dst, left);
4133    and_(overflow_dst, overflow_dst, scratch);
4134  } else {
4135    addu(dst, left, right);
4136    xor_(overflow_dst, dst, left);
4137    xor_(scratch, dst, right);
4138    and_(overflow_dst, scratch, overflow_dst);
4139  }
4140}
4141
4142
4143void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4144                                             Register left,
4145                                             Register right,
4146                                             Register overflow_dst,
4147                                             Register scratch) {
4148  ASSERT(!dst.is(overflow_dst));
4149  ASSERT(!dst.is(scratch));
4150  ASSERT(!overflow_dst.is(scratch));
4151  ASSERT(!overflow_dst.is(left));
4152  ASSERT(!overflow_dst.is(right));
4153  ASSERT(!scratch.is(left));
4154  ASSERT(!scratch.is(right));
4155
4156  // This happens with some crankshaft code. Since Subu works fine if
4157  // left == right, let's not make that restriction here.
4158  if (left.is(right)) {
4159    mov(dst, zero_reg);
4160    mov(overflow_dst, zero_reg);
4161    return;
4162  }
4163
4164  if (dst.is(left)) {
4165    mov(scratch, left);  // Preserve left.
4166    subu(dst, left, right);  // Left is overwritten.
4167    xor_(overflow_dst, dst, scratch);  // scratch is original left.
4168    xor_(scratch, scratch, right);  // scratch is original left.
4169    and_(overflow_dst, scratch, overflow_dst);
4170  } else if (dst.is(right)) {
4171    mov(scratch, right);  // Preserve right.
4172    subu(dst, left, right);  // Right is overwritten.
4173    xor_(overflow_dst, dst, left);
4174    xor_(scratch, left, scratch);  // Original right.
4175    and_(overflow_dst, scratch, overflow_dst);
4176  } else {
4177    subu(dst, left, right);
4178    xor_(overflow_dst, dst, left);
4179    xor_(scratch, left, right);
4180    and_(overflow_dst, scratch, overflow_dst);
4181  }
4182}
4183
4184
4185void MacroAssembler::CallRuntime(const Runtime::Function* f,
4186                                 int num_arguments,
4187                                 SaveFPRegsMode save_doubles) {
4188  // All parameters are on the stack. v0 has the return value after call.
4189
4190  // If the expected number of arguments of the runtime function is
4191  // constant, we check that the actual number of arguments match the
4192  // expectation.
4193  if (f->nargs >= 0 && f->nargs != num_arguments) {
4194    IllegalOperation(num_arguments);
4195    return;
4196  }
4197
4198  // TODO(1236192): Most runtime routines don't need the number of
4199  // arguments passed in because it is constant. At some point we
4200  // should remove this need and make the runtime routine entry code
4201  // smarter.
4202  PrepareCEntryArgs(num_arguments);
4203  PrepareCEntryFunction(ExternalReference(f, isolate()));
4204  CEntryStub stub(1, save_doubles);
4205  CallStub(&stub);
4206}
4207
4208
4209void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4210                                           int num_arguments,
4211                                           BranchDelaySlot bd) {
4212  PrepareCEntryArgs(num_arguments);
4213  PrepareCEntryFunction(ext);
4214
4215  CEntryStub stub(1);
4216  CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4217}
4218
4219
4220void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4221                                               int num_arguments,
4222                                               int result_size) {
4223  // TODO(1236192): Most runtime routines don't need the number of
4224  // arguments passed in because it is constant. At some point we
4225  // should remove this need and make the runtime routine entry code
4226  // smarter.
4227  PrepareCEntryArgs(num_arguments);
4228  JumpToExternalReference(ext);
4229}
4230
4231
4232void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4233                                     int num_arguments,
4234                                     int result_size) {
4235  TailCallExternalReference(ExternalReference(fid, isolate()),
4236                            num_arguments,
4237                            result_size);
4238}
4239
4240
4241void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4242                                             BranchDelaySlot bd) {
4243  PrepareCEntryFunction(builtin);
4244  CEntryStub stub(1);
4245  Jump(stub.GetCode(isolate()),
4246       RelocInfo::CODE_TARGET,
4247       al,
4248       zero_reg,
4249       Operand(zero_reg),
4250       bd);
4251}
4252
4253
4254void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4255                                   InvokeFlag flag,
4256                                   const CallWrapper& call_wrapper) {
4257  // You can't call a builtin without a valid frame.
4258  ASSERT(flag == JUMP_FUNCTION || has_frame());
4259
4260  GetBuiltinEntry(t9, id);
4261  if (flag == CALL_FUNCTION) {
4262    call_wrapper.BeforeCall(CallSize(t9));
4263    SetCallKind(t1, CALL_AS_METHOD);
4264    Call(t9);
4265    call_wrapper.AfterCall();
4266  } else {
4267    ASSERT(flag == JUMP_FUNCTION);
4268    SetCallKind(t1, CALL_AS_METHOD);
4269    Jump(t9);
4270  }
4271}
4272
4273
4274void MacroAssembler::GetBuiltinFunction(Register target,
4275                                        Builtins::JavaScript id) {
4276  // Load the builtins object into target register.
4277  lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4278  lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4279  // Load the JavaScript builtin function from the builtins object.
4280  lw(target, FieldMemOperand(target,
4281                          JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4282}
4283
4284
4285void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4286  ASSERT(!target.is(a1));
4287  GetBuiltinFunction(a1, id);
4288  // Load the code entry point from the builtins object.
4289  lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4290}
4291
4292
4293void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4294                                Register scratch1, Register scratch2) {
4295  if (FLAG_native_code_counters && counter->Enabled()) {
4296    li(scratch1, Operand(value));
4297    li(scratch2, Operand(ExternalReference(counter)));
4298    sw(scratch1, MemOperand(scratch2));
4299  }
4300}
4301
4302
4303void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4304                                      Register scratch1, Register scratch2) {
4305  ASSERT(value > 0);
4306  if (FLAG_native_code_counters && counter->Enabled()) {
4307    li(scratch2, Operand(ExternalReference(counter)));
4308    lw(scratch1, MemOperand(scratch2));
4309    Addu(scratch1, scratch1, Operand(value));
4310    sw(scratch1, MemOperand(scratch2));
4311  }
4312}
4313
4314
4315void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4316                                      Register scratch1, Register scratch2) {
4317  ASSERT(value > 0);
4318  if (FLAG_native_code_counters && counter->Enabled()) {
4319    li(scratch2, Operand(ExternalReference(counter)));
4320    lw(scratch1, MemOperand(scratch2));
4321    Subu(scratch1, scratch1, Operand(value));
4322    sw(scratch1, MemOperand(scratch2));
4323  }
4324}
4325
4326
4327// -----------------------------------------------------------------------------
4328// Debugging.
4329
4330void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4331                            Register rs, Operand rt) {
4332  if (emit_debug_code())
4333    Check(cc, reason, rs, rt);
4334}
4335
4336
4337void MacroAssembler::AssertFastElements(Register elements) {
4338  if (emit_debug_code()) {
4339    ASSERT(!elements.is(at));
4340    Label ok;
4341    push(elements);
4342    lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4343    LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4344    Branch(&ok, eq, elements, Operand(at));
4345    LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4346    Branch(&ok, eq, elements, Operand(at));
4347    LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4348    Branch(&ok, eq, elements, Operand(at));
4349    Abort(kJSObjectWithFastElementsMapHasSlowElements);
4350    bind(&ok);
4351    pop(elements);
4352  }
4353}
4354
4355
4356void MacroAssembler::Check(Condition cc, BailoutReason reason,
4357                           Register rs, Operand rt) {
4358  Label L;
4359  Branch(&L, cc, rs, rt);
4360  Abort(reason);
4361  // Will not return here.
4362  bind(&L);
4363}
4364
4365
4366void MacroAssembler::Abort(BailoutReason reason) {
4367  Label abort_start;
4368  bind(&abort_start);
4369  // We want to pass the msg string like a smi to avoid GC
4370  // problems, however msg is not guaranteed to be aligned
4371  // properly. Instead, we pass an aligned pointer that is
4372  // a proper v8 smi, but also pass the alignment difference
4373  // from the real pointer as a smi.
4374  const char* msg = GetBailoutReason(reason);
4375  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
4376  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
4377  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
4378#ifdef DEBUG
4379  if (msg != NULL) {
4380    RecordComment("Abort message: ");
4381    RecordComment(msg);
4382  }
4383
4384  if (FLAG_trap_on_abort) {
4385    stop(msg);
4386    return;
4387  }
4388#endif
4389
4390  li(a0, Operand(p0));
4391  push(a0);
4392  li(a0, Operand(Smi::FromInt(p1 - p0)));
4393  push(a0);
4394  // Disable stub call restrictions to always allow calls to abort.
4395  if (!has_frame_) {
4396    // We don't actually want to generate a pile of code for this, so just
4397    // claim there is a stack frame, without generating one.
4398    FrameScope scope(this, StackFrame::NONE);
4399    CallRuntime(Runtime::kAbort, 2);
4400  } else {
4401    CallRuntime(Runtime::kAbort, 2);
4402  }
4403  // Will not return here.
4404  if (is_trampoline_pool_blocked()) {
4405    // If the calling code cares about the exact number of
4406    // instructions generated, we insert padding here to keep the size
4407    // of the Abort macro constant.
4408    // Currently in debug mode with debug_code enabled the number of
4409    // generated instructions is 14, so we use this as a maximum value.
4410    static const int kExpectedAbortInstructions = 14;
4411    int abort_instructions = InstructionsGeneratedSince(&abort_start);
4412    ASSERT(abort_instructions <= kExpectedAbortInstructions);
4413    while (abort_instructions++ < kExpectedAbortInstructions) {
4414      nop();
4415    }
4416  }
4417}
4418
4419
4420void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4421  if (context_chain_length > 0) {
4422    // Move up the chain of contexts to the context containing the slot.
4423    lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4424    for (int i = 1; i < context_chain_length; i++) {
4425      lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4426    }
4427  } else {
4428    // Slot is in the current function context.  Move it into the
4429    // destination register in case we store into it (the write barrier
4430    // cannot be allowed to destroy the context in esi).
4431    Move(dst, cp);
4432  }
4433}
4434
4435
4436void MacroAssembler::LoadTransitionedArrayMapConditional(
4437    ElementsKind expected_kind,
4438    ElementsKind transitioned_kind,
4439    Register map_in_out,
4440    Register scratch,
4441    Label* no_map_match) {
4442  // Load the global or builtins object from the current context.
4443  lw(scratch,
4444     MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4445  lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4446
4447  // Check that the function's map is the same as the expected cached map.
4448  lw(scratch,
4449     MemOperand(scratch,
4450                Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4451  size_t offset = expected_kind * kPointerSize +
4452      FixedArrayBase::kHeaderSize;
4453  lw(at, FieldMemOperand(scratch, offset));
4454  Branch(no_map_match, ne, map_in_out, Operand(at));
4455
4456  // Use the transitioned cached map.
4457  offset = transitioned_kind * kPointerSize +
4458      FixedArrayBase::kHeaderSize;
4459  lw(map_in_out, FieldMemOperand(scratch, offset));
4460}
4461
4462
4463void MacroAssembler::LoadInitialArrayMap(
4464    Register function_in, Register scratch,
4465    Register map_out, bool can_have_holes) {
4466  ASSERT(!function_in.is(map_out));
4467  Label done;
4468  lw(map_out, FieldMemOperand(function_in,
4469                              JSFunction::kPrototypeOrInitialMapOffset));
4470  if (!FLAG_smi_only_arrays) {
4471    ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
4472    LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4473                                        kind,
4474                                        map_out,
4475                                        scratch,
4476                                        &done);
4477  } else if (can_have_holes) {
4478    LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4479                                        FAST_HOLEY_SMI_ELEMENTS,
4480                                        map_out,
4481                                        scratch,
4482                                        &done);
4483  }
4484  bind(&done);
4485}
4486
4487
4488void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4489  // Load the global or builtins object from the current context.
4490  lw(function,
4491     MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4492  // Load the native context from the global or builtins object.
4493  lw(function, FieldMemOperand(function,
4494                               GlobalObject::kNativeContextOffset));
4495  // Load the function from the native context.
4496  lw(function, MemOperand(function, Context::SlotOffset(index)));
4497}
4498
4499
4500void MacroAssembler::LoadArrayFunction(Register function) {
4501  // Load the global or builtins object from the current context.
4502  lw(function,
4503     MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4504  // Load the global context from the global or builtins object.
4505  lw(function,
4506     FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
4507  // Load the array function from the native context.
4508  lw(function,
4509     MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
4510}
4511
4512
4513void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4514                                                  Register map,
4515                                                  Register scratch) {
4516  // Load the initial map. The global functions all have initial maps.
4517  lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4518  if (emit_debug_code()) {
4519    Label ok, fail;
4520    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4521    Branch(&ok);
4522    bind(&fail);
4523    Abort(kGlobalFunctionsMustHaveInitialMap);
4524    bind(&ok);
4525  }
4526}
4527
4528
4529void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
4530  if (frame_mode == BUILD_STUB_FRAME) {
4531    Push(ra, fp, cp);
4532    Push(Smi::FromInt(StackFrame::STUB));
4533    // Adjust FP to point to saved FP.
4534    Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4535  } else {
4536    PredictableCodeSizeScope predictible_code_size_scope(
4537      this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
4538    // The following three instructions must remain together and unmodified
4539    // for code aging to work properly.
4540    if (isolate()->IsCodePreAgingActive()) {
4541      // Pre-age the code.
4542      Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4543      nop(Assembler::CODE_AGE_MARKER_NOP);
4544      // Load the stub address to t9 and call it,
4545      // GetCodeAgeAndParity() extracts the stub address from this instruction.
4546      li(t9,
4547         Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
4548         CONSTANT_SIZE);
4549      nop();  // Prevent jalr to jal optimization.
4550      jalr(t9, a0);
4551      nop();  // Branch delay slot nop.
4552      nop();  // Pad the empty space.
4553    } else {
4554      Push(ra, fp, cp, a1);
4555      nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4556      // Adjust fp to point to caller's fp.
4557      Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4558    }
4559  }
4560}
4561
4562
4563void MacroAssembler::EnterFrame(StackFrame::Type type) {
4564  addiu(sp, sp, -5 * kPointerSize);
4565  li(t8, Operand(Smi::FromInt(type)));
4566  li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4567  sw(ra, MemOperand(sp, 4 * kPointerSize));
4568  sw(fp, MemOperand(sp, 3 * kPointerSize));
4569  sw(cp, MemOperand(sp, 2 * kPointerSize));
4570  sw(t8, MemOperand(sp, 1 * kPointerSize));
4571  sw(t9, MemOperand(sp, 0 * kPointerSize));
4572  // Adjust FP to point to saved FP.
4573  Addu(fp, sp,
4574       Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4575}
4576
4577
4578void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4579  mov(sp, fp);
4580  lw(fp, MemOperand(sp, 0 * kPointerSize));
4581  lw(ra, MemOperand(sp, 1 * kPointerSize));
4582  addiu(sp, sp, 2 * kPointerSize);
4583}
4584
4585
4586void MacroAssembler::EnterExitFrame(bool save_doubles,
4587                                    int stack_space) {
4588  // Set up the frame structure on the stack.
4589  STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4590  STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4591  STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4592
4593  // This is how the stack will look:
4594  // fp + 2 (==kCallerSPDisplacement) - old stack's end
4595  // [fp + 1 (==kCallerPCOffset)] - saved old ra
4596  // [fp + 0 (==kCallerFPOffset)] - saved old fp
4597  // [fp - 1 (==kSPOffset)] - sp of the called function
4598  // [fp - 2 (==kCodeOffset)] - CodeObject
4599  // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4600  //   new stack (will contain saved ra)
4601
4602  // Save registers.
4603  addiu(sp, sp, -4 * kPointerSize);
4604  sw(ra, MemOperand(sp, 3 * kPointerSize));
4605  sw(fp, MemOperand(sp, 2 * kPointerSize));
4606  addiu(fp, sp, 2 * kPointerSize);  // Set up new frame pointer.
4607
4608  if (emit_debug_code()) {
4609    sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4610  }
4611
4612  // Accessed from ExitFrame::code_slot.
4613  li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4614  sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4615
4616  // Save the frame pointer and the context in top.
4617  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4618  sw(fp, MemOperand(t8));
4619  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4620  sw(cp, MemOperand(t8));
4621
4622  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4623  if (save_doubles) {
4624    // The stack  must be allign to 0 modulo 8 for stores with sdc1.
4625    ASSERT(kDoubleSize == frame_alignment);
4626    if (frame_alignment > 0) {
4627      ASSERT(IsPowerOf2(frame_alignment));
4628      And(sp, sp, Operand(-frame_alignment));  // Align stack.
4629    }
4630    int space = FPURegister::kMaxNumRegisters * kDoubleSize;
4631    Subu(sp, sp, Operand(space));
4632    // Remember: we only need to save every 2nd double FPU value.
4633    for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4634      FPURegister reg = FPURegister::from_code(i);
4635      sdc1(reg, MemOperand(sp, i * kDoubleSize));
4636    }
4637  }
4638
4639  // Reserve place for the return address, stack space and an optional slot
4640  // (used by the DirectCEntryStub to hold the return value if a struct is
4641  // returned) and align the frame preparing for calling the runtime function.
4642  ASSERT(stack_space >= 0);
4643  Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4644  if (frame_alignment > 0) {
4645    ASSERT(IsPowerOf2(frame_alignment));
4646    And(sp, sp, Operand(-frame_alignment));  // Align stack.
4647  }
4648
4649  // Set the exit frame sp value to point just before the return address
4650  // location.
4651  addiu(at, sp, kPointerSize);
4652  sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4653}
4654
4655
4656void MacroAssembler::LeaveExitFrame(bool save_doubles,
4657                                    Register argument_count,
4658                                    bool restore_context,
4659                                    bool do_return) {
4660  // Optionally restore all double registers.
4661  if (save_doubles) {
4662    // Remember: we only need to restore every 2nd double FPU value.
4663    lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4664    for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4665      FPURegister reg = FPURegister::from_code(i);
4666      ldc1(reg, MemOperand(t8, i  * kDoubleSize + kPointerSize));
4667    }
4668  }
4669
4670  // Clear top frame.
4671  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4672  sw(zero_reg, MemOperand(t8));
4673
4674  // Restore current context from top and clear it in debug mode.
4675  if (restore_context) {
4676    li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4677    lw(cp, MemOperand(t8));
4678  }
4679#ifdef DEBUG
4680  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4681  sw(a3, MemOperand(t8));
4682#endif
4683
4684  // Pop the arguments, restore registers, and return.
4685  mov(sp, fp);  // Respect ABI stack constraint.
4686  lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4687  lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4688
4689  if (argument_count.is_valid()) {
4690    sll(t8, argument_count, kPointerSizeLog2);
4691    addu(sp, sp, t8);
4692  }
4693
4694  if (do_return) {
4695    Ret(USE_DELAY_SLOT);
4696    // If returning, the instruction in the delay slot will be the addiu below.
4697  }
4698  addiu(sp, sp, 8);
4699}
4700
4701
4702void MacroAssembler::InitializeNewString(Register string,
4703                                         Register length,
4704                                         Heap::RootListIndex map_index,
4705                                         Register scratch1,
4706                                         Register scratch2) {
4707  sll(scratch1, length, kSmiTagSize);
4708  LoadRoot(scratch2, map_index);
4709  sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
4710  li(scratch1, Operand(String::kEmptyHashField));
4711  sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4712  sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4713}
4714
4715
4716int MacroAssembler::ActivationFrameAlignment() {
4717#if V8_HOST_ARCH_MIPS
4718  // Running on the real platform. Use the alignment as mandated by the local
4719  // environment.
4720  // Note: This will break if we ever start generating snapshots on one Mips
4721  // platform for another Mips platform with a different alignment.
4722  return OS::ActivationFrameAlignment();
4723#else  // V8_HOST_ARCH_MIPS
4724  // If we are using the simulator then we should always align to the expected
4725  // alignment. As the simulator is used to generate snapshots we do not know
4726  // if the target platform will need alignment, so this is controlled from a
4727  // flag.
4728  return FLAG_sim_stack_alignment;
4729#endif  // V8_HOST_ARCH_MIPS
4730}
4731
4732
4733void MacroAssembler::AssertStackIsAligned() {
4734  if (emit_debug_code()) {
4735      const int frame_alignment = ActivationFrameAlignment();
4736      const int frame_alignment_mask = frame_alignment - 1;
4737
4738      if (frame_alignment > kPointerSize) {
4739        Label alignment_as_expected;
4740        ASSERT(IsPowerOf2(frame_alignment));
4741        andi(at, sp, frame_alignment_mask);
4742        Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4743        // Don't use Check here, as it will call Runtime_Abort re-entering here.
4744        stop("Unexpected stack alignment");
4745        bind(&alignment_as_expected);
4746      }
4747    }
4748}
4749
4750
4751void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4752    Register reg,
4753    Register scratch,
4754    Label* not_power_of_two_or_zero) {
4755  Subu(scratch, reg, Operand(1));
4756  Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4757         scratch, Operand(zero_reg));
4758  and_(at, scratch, reg);  // In the delay slot.
4759  Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4760}
4761
4762
4763void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4764  ASSERT(!reg.is(overflow));
4765  mov(overflow, reg);  // Save original value.
4766  SmiTag(reg);
4767  xor_(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
4768}
4769
4770
4771void MacroAssembler::SmiTagCheckOverflow(Register dst,
4772                                         Register src,
4773                                         Register overflow) {
4774  if (dst.is(src)) {
4775    // Fall back to slower case.
4776    SmiTagCheckOverflow(dst, overflow);
4777  } else {
4778    ASSERT(!dst.is(src));
4779    ASSERT(!dst.is(overflow));
4780    ASSERT(!src.is(overflow));
4781    SmiTag(dst, src);
4782    xor_(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
4783  }
4784}
4785
4786
4787void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4788                                       Register src,
4789                                       Label* smi_case) {
4790  JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
4791  SmiUntag(dst, src);
4792}
4793
4794
4795void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
4796                                          Register src,
4797                                          Label* non_smi_case) {
4798  JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
4799  SmiUntag(dst, src);
4800}
4801
4802void MacroAssembler::JumpIfSmi(Register value,
4803                               Label* smi_label,
4804                               Register scratch,
4805                               BranchDelaySlot bd) {
4806  ASSERT_EQ(0, kSmiTag);
4807  andi(scratch, value, kSmiTagMask);
4808  Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
4809}
4810
4811void MacroAssembler::JumpIfNotSmi(Register value,
4812                                  Label* not_smi_label,
4813                                  Register scratch,
4814                                  BranchDelaySlot bd) {
4815  ASSERT_EQ(0, kSmiTag);
4816  andi(scratch, value, kSmiTagMask);
4817  Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
4818}
4819
4820
4821void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4822                                      Register reg2,
4823                                      Label* on_not_both_smi) {
4824  STATIC_ASSERT(kSmiTag == 0);
4825  ASSERT_EQ(1, kSmiTagMask);
4826  or_(at, reg1, reg2);
4827  JumpIfNotSmi(at, on_not_both_smi);
4828}
4829
4830
4831void MacroAssembler::JumpIfEitherSmi(Register reg1,
4832                                     Register reg2,
4833                                     Label* on_either_smi) {
4834  STATIC_ASSERT(kSmiTag == 0);
4835  ASSERT_EQ(1, kSmiTagMask);
4836  // Both Smi tags must be 1 (not Smi).
4837  and_(at, reg1, reg2);
4838  JumpIfSmi(at, on_either_smi);
4839}
4840
4841
4842void MacroAssembler::AssertNotSmi(Register object) {
4843  if (emit_debug_code()) {
4844    STATIC_ASSERT(kSmiTag == 0);
4845    andi(at, object, kSmiTagMask);
4846    Check(ne, kOperandIsASmi, at, Operand(zero_reg));
4847  }
4848}
4849
4850
4851void MacroAssembler::AssertSmi(Register object) {
4852  if (emit_debug_code()) {
4853    STATIC_ASSERT(kSmiTag == 0);
4854    andi(at, object, kSmiTagMask);
4855    Check(eq, kOperandIsASmi, at, Operand(zero_reg));
4856  }
4857}
4858
4859
4860void MacroAssembler::AssertString(Register object) {
4861  if (emit_debug_code()) {
4862    STATIC_ASSERT(kSmiTag == 0);
4863    SmiTst(object, t0);
4864    Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
4865    push(object);
4866    lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4867    lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4868    Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
4869    pop(object);
4870  }
4871}
4872
4873
4874void MacroAssembler::AssertName(Register object) {
4875  if (emit_debug_code()) {
4876    STATIC_ASSERT(kSmiTag == 0);
4877    SmiTst(object, t0);
4878    Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
4879    push(object);
4880    lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4881    lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4882    Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
4883    pop(object);
4884  }
4885}
4886
4887
4888void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
4889  if (emit_debug_code()) {
4890    ASSERT(!reg.is(at));
4891    LoadRoot(at, index);
4892    Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
4893  }
4894}
4895
4896
4897void MacroAssembler::JumpIfNotHeapNumber(Register object,
4898                                         Register heap_number_map,
4899                                         Register scratch,
4900                                         Label* on_not_heap_number) {
4901  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4902  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4903  Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4904}
4905
4906
4907void MacroAssembler::LookupNumberStringCache(Register object,
4908                                             Register result,
4909                                             Register scratch1,
4910                                             Register scratch2,
4911                                             Register scratch3,
4912                                             Label* not_found) {
4913  // Use of registers. Register result is used as a temporary.
4914  Register number_string_cache = result;
4915  Register mask = scratch3;
4916
4917  // Load the number string cache.
4918  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
4919
4920  // Make the hash mask from the length of the number string cache. It
4921  // contains two elements (number and string) for each cache entry.
4922  lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
4923  // Divide length by two (length is a smi).
4924  sra(mask, mask, kSmiTagSize + 1);
4925  Addu(mask, mask, -1);  // Make mask.
4926
4927  // Calculate the entry in the number string cache. The hash value in the
4928  // number string cache for smis is just the smi value, and the hash for
4929  // doubles is the xor of the upper and lower words. See
4930  // Heap::GetNumberStringCache.
4931  Label is_smi;
4932  Label load_result_from_cache;
4933  JumpIfSmi(object, &is_smi);
4934  CheckMap(object,
4935           scratch1,
4936           Heap::kHeapNumberMapRootIndex,
4937           not_found,
4938           DONT_DO_SMI_CHECK);
4939
4940  STATIC_ASSERT(8 == kDoubleSize);
4941  Addu(scratch1,
4942       object,
4943       Operand(HeapNumber::kValueOffset - kHeapObjectTag));
4944  lw(scratch2, MemOperand(scratch1, kPointerSize));
4945  lw(scratch1, MemOperand(scratch1, 0));
4946  Xor(scratch1, scratch1, Operand(scratch2));
4947  And(scratch1, scratch1, Operand(mask));
4948
4949  // Calculate address of entry in string cache: each entry consists
4950  // of two pointer sized fields.
4951  sll(scratch1, scratch1, kPointerSizeLog2 + 1);
4952  Addu(scratch1, number_string_cache, scratch1);
4953
4954  Register probe = mask;
4955  lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
4956  JumpIfSmi(probe, not_found);
4957  ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
4958  ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
4959  BranchF(&load_result_from_cache, NULL, eq, f12, f14);
4960  Branch(not_found);
4961
4962  bind(&is_smi);
4963  Register scratch = scratch1;
4964  sra(scratch, object, 1);   // Shift away the tag.
4965  And(scratch, mask, Operand(scratch));
4966
4967  // Calculate address of entry in string cache: each entry consists
4968  // of two pointer sized fields.
4969  sll(scratch, scratch, kPointerSizeLog2 + 1);
4970  Addu(scratch, number_string_cache, scratch);
4971
4972  // Check if the entry is the smi we are looking for.
4973  lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
4974  Branch(not_found, ne, object, Operand(probe));
4975
4976  // Get the result from the cache.
4977  bind(&load_result_from_cache);
4978  lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
4979
4980  IncrementCounter(isolate()->counters()->number_to_string_native(),
4981                   1,
4982                   scratch1,
4983                   scratch2);
4984}
4985
4986
4987void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4988    Register first,
4989    Register second,
4990    Register scratch1,
4991    Register scratch2,
4992    Label* failure) {
4993  // Test that both first and second are sequential ASCII strings.
4994  // Assume that they are non-smis.
4995  lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
4996  lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
4997  lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
4998  lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
4999
5000  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
5001                                               scratch2,
5002                                               scratch1,
5003                                               scratch2,
5004                                               failure);
5005}
5006
5007
5008void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
5009                                                         Register second,
5010                                                         Register scratch1,
5011                                                         Register scratch2,
5012                                                         Label* failure) {
5013  // Check that neither is a smi.
5014  STATIC_ASSERT(kSmiTag == 0);
5015  And(scratch1, first, Operand(second));
5016  JumpIfSmi(scratch1, failure);
5017  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
5018                                             second,
5019                                             scratch1,
5020                                             scratch2,
5021                                             failure);
5022}
5023
5024
5025void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
5026    Register first,
5027    Register second,
5028    Register scratch1,
5029    Register scratch2,
5030    Label* failure) {
5031  const int kFlatAsciiStringMask =
5032      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5033  const int kFlatAsciiStringTag =
5034      kStringTag | kOneByteStringTag | kSeqStringTag;
5035  ASSERT(kFlatAsciiStringTag <= 0xffff);  // Ensure this fits 16-bit immed.
5036  andi(scratch1, first, kFlatAsciiStringMask);
5037  Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
5038  andi(scratch2, second, kFlatAsciiStringMask);
5039  Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
5040}
5041
5042
5043void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
5044                                                            Register scratch,
5045                                                            Label* failure) {
5046  const int kFlatAsciiStringMask =
5047      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5048  const int kFlatAsciiStringTag =
5049      kStringTag | kOneByteStringTag | kSeqStringTag;
5050  And(scratch, type, Operand(kFlatAsciiStringMask));
5051  Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
5052}
5053
5054
5055static const int kRegisterPassedArguments = 4;
5056
5057int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5058                                              int num_double_arguments) {
5059  int stack_passed_words = 0;
5060  num_reg_arguments += 2 * num_double_arguments;
5061
5062  // Up to four simple arguments are passed in registers a0..a3.
5063  if (num_reg_arguments > kRegisterPassedArguments) {
5064    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5065  }
5066  stack_passed_words += kCArgSlotCount;
5067  return stack_passed_words;
5068}
5069
5070
5071void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5072                                               Register index,
5073                                               Register value,
5074                                               Register scratch,
5075                                               uint32_t encoding_mask) {
5076  Label is_object;
5077  SmiTst(string, at);
5078  ThrowIf(eq, kNonObject, at, Operand(zero_reg));
5079
5080  lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
5081  lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5082
5083  andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5084  li(scratch, Operand(encoding_mask));
5085  ThrowIf(ne, kUnexpectedStringType, at, Operand(scratch));
5086
5087  // The index is assumed to be untagged coming in, tag it to compare with the
5088  // string length without using a temp register, it is restored at the end of
5089  // this function.
5090  Label index_tag_ok, index_tag_bad;
5091  TrySmiTag(index, scratch, &index_tag_bad);
5092  Branch(&index_tag_ok);
5093  bind(&index_tag_bad);
5094  Throw(kIndexIsTooLarge);
5095  bind(&index_tag_ok);
5096
5097  lw(at, FieldMemOperand(string, String::kLengthOffset));
5098  ThrowIf(ge, kIndexIsTooLarge, index, Operand(at));
5099
5100  ASSERT(Smi::FromInt(0) == 0);
5101  ThrowIf(lt, kIndexIsNegative, index, Operand(zero_reg));
5102
5103  SmiUntag(index, index);
5104}
5105
5106
5107void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5108                                          int num_double_arguments,
5109                                          Register scratch) {
5110  int frame_alignment = ActivationFrameAlignment();
5111
5112  // Up to four simple arguments are passed in registers a0..a3.
5113  // Those four arguments must have reserved argument slots on the stack for
5114  // mips, even though those argument slots are not normally used.
5115  // Remaining arguments are pushed on the stack, above (higher address than)
5116  // the argument slots.
5117  int stack_passed_arguments = CalculateStackPassedWords(
5118      num_reg_arguments, num_double_arguments);
5119  if (frame_alignment > kPointerSize) {
5120    // Make stack end at alignment and make room for num_arguments - 4 words
5121    // and the original value of sp.
5122    mov(scratch, sp);
5123    Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5124    ASSERT(IsPowerOf2(frame_alignment));
5125    And(sp, sp, Operand(-frame_alignment));
5126    sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5127  } else {
5128    Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5129  }
5130}
5131
5132
5133void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5134                                          Register scratch) {
5135  PrepareCallCFunction(num_reg_arguments, 0, scratch);
5136}
5137
5138
5139void MacroAssembler::CallCFunction(ExternalReference function,
5140                                   int num_reg_arguments,
5141                                   int num_double_arguments) {
5142  li(t8, Operand(function));
5143  CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5144}
5145
5146
5147void MacroAssembler::CallCFunction(Register function,
5148                                   int num_reg_arguments,
5149                                   int num_double_arguments) {
5150  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5151}
5152
5153
5154void MacroAssembler::CallCFunction(ExternalReference function,
5155                                   int num_arguments) {
5156  CallCFunction(function, num_arguments, 0);
5157}
5158
5159
5160void MacroAssembler::CallCFunction(Register function,
5161                                   int num_arguments) {
5162  CallCFunction(function, num_arguments, 0);
5163}
5164
5165
5166void MacroAssembler::CallCFunctionHelper(Register function,
5167                                         int num_reg_arguments,
5168                                         int num_double_arguments) {
5169  ASSERT(has_frame());
5170  // Make sure that the stack is aligned before calling a C function unless
5171  // running in the simulator. The simulator has its own alignment check which
5172  // provides more information.
5173  // The argument stots are presumed to have been set up by
5174  // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5175
5176#if V8_HOST_ARCH_MIPS
5177  if (emit_debug_code()) {
5178    int frame_alignment = OS::ActivationFrameAlignment();
5179    int frame_alignment_mask = frame_alignment - 1;
5180    if (frame_alignment > kPointerSize) {
5181      ASSERT(IsPowerOf2(frame_alignment));
5182      Label alignment_as_expected;
5183      And(at, sp, Operand(frame_alignment_mask));
5184      Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5185      // Don't use Check here, as it will call Runtime_Abort possibly
5186      // re-entering here.
5187      stop("Unexpected alignment in CallCFunction");
5188      bind(&alignment_as_expected);
5189    }
5190  }
5191#endif  // V8_HOST_ARCH_MIPS
5192
5193  // Just call directly. The function called cannot cause a GC, or
5194  // allow preemption, so the return address in the link register
5195  // stays correct.
5196
5197  if (!function.is(t9)) {
5198    mov(t9, function);
5199    function = t9;
5200  }
5201
5202  Call(function);
5203
5204  int stack_passed_arguments = CalculateStackPassedWords(
5205      num_reg_arguments, num_double_arguments);
5206
5207  if (OS::ActivationFrameAlignment() > kPointerSize) {
5208    lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5209  } else {
5210    Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
5211  }
5212}
5213
5214
5215#undef BRANCH_ARGS_CHECK
5216
5217
5218void MacroAssembler::PatchRelocatedValue(Register li_location,
5219                                         Register scratch,
5220                                         Register new_value) {
5221  lw(scratch, MemOperand(li_location));
5222  // At this point scratch is a lui(at, ...) instruction.
5223  if (emit_debug_code()) {
5224    And(scratch, scratch, kOpcodeMask);
5225    Check(eq, kTheInstructionToPatchShouldBeALui,
5226        scratch, Operand(LUI));
5227    lw(scratch, MemOperand(li_location));
5228  }
5229  srl(t9, new_value, kImm16Bits);
5230  Ins(scratch, t9, 0, kImm16Bits);
5231  sw(scratch, MemOperand(li_location));
5232
5233  lw(scratch, MemOperand(li_location, kInstrSize));
5234  // scratch is now ori(at, ...).
5235  if (emit_debug_code()) {
5236    And(scratch, scratch, kOpcodeMask);
5237    Check(eq, kTheInstructionToPatchShouldBeAnOri,
5238        scratch, Operand(ORI));
5239    lw(scratch, MemOperand(li_location, kInstrSize));
5240  }
5241  Ins(scratch, new_value, 0, kImm16Bits);
5242  sw(scratch, MemOperand(li_location, kInstrSize));
5243
5244  // Update the I-cache so the new lui and ori can be executed.
5245  FlushICache(li_location, 2);
5246}
5247
5248void MacroAssembler::GetRelocatedValue(Register li_location,
5249                                       Register value,
5250                                       Register scratch) {
5251  lw(value, MemOperand(li_location));
5252  if (emit_debug_code()) {
5253    And(value, value, kOpcodeMask);
5254    Check(eq, kTheInstructionShouldBeALui,
5255        value, Operand(LUI));
5256    lw(value, MemOperand(li_location));
5257  }
5258
5259  // value now holds a lui instruction. Extract the immediate.
5260  sll(value, value, kImm16Bits);
5261
5262  lw(scratch, MemOperand(li_location, kInstrSize));
5263  if (emit_debug_code()) {
5264    And(scratch, scratch, kOpcodeMask);
5265    Check(eq, kTheInstructionShouldBeAnOri,
5266        scratch, Operand(ORI));
5267    lw(scratch, MemOperand(li_location, kInstrSize));
5268  }
5269  // "scratch" now holds an ori instruction. Extract the immediate.
5270  andi(scratch, scratch, kImm16Mask);
5271
5272  // Merge the results.
5273  or_(value, value, scratch);
5274}
5275
5276
5277void MacroAssembler::CheckPageFlag(
5278    Register object,
5279    Register scratch,
5280    int mask,
5281    Condition cc,
5282    Label* condition_met) {
5283  And(scratch, object, Operand(~Page::kPageAlignmentMask));
5284  lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5285  And(scratch, scratch, Operand(mask));
5286  Branch(condition_met, cc, scratch, Operand(zero_reg));
5287}
5288
5289
5290void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5291                                        Register scratch,
5292                                        Label* if_deprecated) {
5293  if (map->CanBeDeprecated()) {
5294    li(scratch, Operand(map));
5295    lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
5296    And(scratch, scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
5297    Branch(if_deprecated, ne, scratch, Operand(zero_reg));
5298  }
5299}
5300
5301
5302void MacroAssembler::JumpIfBlack(Register object,
5303                                 Register scratch0,
5304                                 Register scratch1,
5305                                 Label* on_black) {
5306  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
5307  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5308}
5309
5310
5311void MacroAssembler::HasColor(Register object,
5312                              Register bitmap_scratch,
5313                              Register mask_scratch,
5314                              Label* has_color,
5315                              int first_bit,
5316                              int second_bit) {
5317  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5318  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5319
5320  GetMarkBits(object, bitmap_scratch, mask_scratch);
5321
5322  Label other_color, word_boundary;
5323  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5324  And(t8, t9, Operand(mask_scratch));
5325  Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5326  // Shift left 1 by adding.
5327  Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5328  Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5329  And(t8, t9, Operand(mask_scratch));
5330  Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5331  jmp(&other_color);
5332
5333  bind(&word_boundary);
5334  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5335  And(t9, t9, Operand(1));
5336  Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5337  bind(&other_color);
5338}
5339
5340
5341// Detect some, but not all, common pointer-free objects.  This is used by the
5342// incremental write barrier which doesn't care about oddballs (they are always
5343// marked black immediately so this code is not hit).
5344void MacroAssembler::JumpIfDataObject(Register value,
5345                                      Register scratch,
5346                                      Label* not_data_object) {
5347  ASSERT(!AreAliased(value, scratch, t8, no_reg));
5348  Label is_data_object;
5349  lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5350  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5351  Branch(&is_data_object, eq, t8, Operand(scratch));
5352  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5353  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5354  // If it's a string and it's not a cons string then it's an object containing
5355  // no GC pointers.
5356  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5357  And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5358  Branch(not_data_object, ne, t8, Operand(zero_reg));
5359  bind(&is_data_object);
5360}
5361
5362
5363void MacroAssembler::GetMarkBits(Register addr_reg,
5364                                 Register bitmap_reg,
5365                                 Register mask_reg) {
5366  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5367  And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5368  Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5369  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5370  Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5371  sll(t8, t8, kPointerSizeLog2);
5372  Addu(bitmap_reg, bitmap_reg, t8);
5373  li(t8, Operand(1));
5374  sllv(mask_reg, t8, mask_reg);
5375}
5376
5377
5378void MacroAssembler::EnsureNotWhite(
5379    Register value,
5380    Register bitmap_scratch,
5381    Register mask_scratch,
5382    Register load_scratch,
5383    Label* value_is_white_and_not_data) {
5384  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5385  GetMarkBits(value, bitmap_scratch, mask_scratch);
5386
5387  // If the value is black or grey we don't need to do anything.
5388  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5389  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5390  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
5391  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5392
5393  Label done;
5394
5395  // Since both black and grey have a 1 in the first position and white does
5396  // not have a 1 there we only need to check one bit.
5397  lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5398  And(t8, mask_scratch, load_scratch);
5399  Branch(&done, ne, t8, Operand(zero_reg));
5400
5401  if (emit_debug_code()) {
5402    // Check for impossible bit pattern.
5403    Label ok;
5404    // sll may overflow, making the check conservative.
5405    sll(t8, mask_scratch, 1);
5406    And(t8, load_scratch, t8);
5407    Branch(&ok, eq, t8, Operand(zero_reg));
5408    stop("Impossible marking bit pattern");
5409    bind(&ok);
5410  }
5411
5412  // Value is white.  We check whether it is data that doesn't need scanning.
5413  // Currently only checks for HeapNumber and non-cons strings.
5414  Register map = load_scratch;  // Holds map while checking type.
5415  Register length = load_scratch;  // Holds length of object after testing type.
5416  Label is_data_object;
5417
5418  // Check for heap-number
5419  lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5420  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5421  {
5422    Label skip;
5423    Branch(&skip, ne, t8, Operand(map));
5424    li(length, HeapNumber::kSize);
5425    Branch(&is_data_object);
5426    bind(&skip);
5427  }
5428
5429  // Check for strings.
5430  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5431  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5432  // If it's a string and it's not a cons string then it's an object containing
5433  // no GC pointers.
5434  Register instance_type = load_scratch;
5435  lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5436  And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5437  Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5438  // It's a non-indirect (non-cons and non-slice) string.
5439  // If it's external, the length is just ExternalString::kSize.
5440  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5441  // External strings are the only ones with the kExternalStringTag bit
5442  // set.
5443  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
5444  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
5445  And(t8, instance_type, Operand(kExternalStringTag));
5446  {
5447    Label skip;
5448    Branch(&skip, eq, t8, Operand(zero_reg));
5449    li(length, ExternalString::kSize);
5450    Branch(&is_data_object);
5451    bind(&skip);
5452  }
5453
5454  // Sequential string, either ASCII or UC16.
5455  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
5456  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5457  // getting the length multiplied by 2.
5458  ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5459  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5460  lw(t9, FieldMemOperand(value, String::kLengthOffset));
5461  And(t8, instance_type, Operand(kStringEncodingMask));
5462  {
5463    Label skip;
5464    Branch(&skip, eq, t8, Operand(zero_reg));
5465    srl(t9, t9, 1);
5466    bind(&skip);
5467  }
5468  Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5469  And(length, length, Operand(~kObjectAlignmentMask));
5470
5471  bind(&is_data_object);
5472  // Value is a data object, and it is white.  Mark it black.  Since we know
5473  // that the object is white we can make it black by flipping one bit.
5474  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5475  Or(t8, t8, Operand(mask_scratch));
5476  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5477
5478  And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5479  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5480  Addu(t8, t8, Operand(length));
5481  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5482
5483  bind(&done);
5484}
5485
5486
5487void MacroAssembler::Throw(BailoutReason reason) {
5488  Label throw_start;
5489  bind(&throw_start);
5490#ifdef DEBUG
5491  const char* msg = GetBailoutReason(reason);
5492  if (msg != NULL) {
5493    RecordComment("Throw message: ");
5494    RecordComment(msg);
5495  }
5496#endif
5497
5498  li(a0, Operand(Smi::FromInt(reason)));
5499  push(a0);
5500  // Disable stub call restrictions to always allow calls to throw.
5501  if (!has_frame_) {
5502    // We don't actually want to generate a pile of code for this, so just
5503    // claim there is a stack frame, without generating one.
5504    FrameScope scope(this, StackFrame::NONE);
5505    CallRuntime(Runtime::kThrowMessage, 1);
5506  } else {
5507    CallRuntime(Runtime::kThrowMessage, 1);
5508  }
5509  // will not return here
5510  if (is_trampoline_pool_blocked()) {
5511    // If the calling code cares throw the exact number of
5512    // instructions generated, we insert padding here to keep the size
5513    // of the ThrowMessage macro constant.
5514    // Currently in debug mode with debug_code enabled the number of
5515    // generated instructions is 14, so we use this as a maximum value.
5516    static const int kExpectedThrowMessageInstructions = 14;
5517    int throw_instructions = InstructionsGeneratedSince(&throw_start);
5518    ASSERT(throw_instructions <= kExpectedThrowMessageInstructions);
5519    while (throw_instructions++ < kExpectedThrowMessageInstructions) {
5520      nop();
5521    }
5522  }
5523}
5524
5525
5526void MacroAssembler::ThrowIf(Condition cc,
5527                             BailoutReason reason,
5528                             Register rs,
5529                             Operand rt) {
5530  Label L;
5531  Branch(&L, NegateCondition(cc), rs, rt);
5532  Throw(reason);
5533  // will not return here
5534  bind(&L);
5535}
5536
5537
5538void MacroAssembler::LoadInstanceDescriptors(Register map,
5539                                             Register descriptors) {
5540  lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5541}
5542
5543
5544void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5545  lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5546  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5547}
5548
5549
5550void MacroAssembler::EnumLength(Register dst, Register map) {
5551  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5552  lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5553  And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
5554}
5555
5556
5557void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5558  Register  empty_fixed_array_value = t2;
5559  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5560  Label next, start;
5561  mov(a2, a0);
5562
5563  // Check if the enum length field is properly initialized, indicating that
5564  // there is an enum cache.
5565  lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5566
5567  EnumLength(a3, a1);
5568  Branch(
5569      call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5570
5571  jmp(&start);
5572
5573  bind(&next);
5574  lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5575
5576  // For all objects but the receiver, check that the cache is empty.
5577  EnumLength(a3, a1);
5578  Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5579
5580  bind(&start);
5581
5582  // Check that there are no elements. Register r2 contains the current JS
5583  // object we've reached through the prototype chain.
5584  lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5585  Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
5586
5587  lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5588  Branch(&next, ne, a2, Operand(null_value));
5589}
5590
5591
5592void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5593  ASSERT(!output_reg.is(input_reg));
5594  Label done;
5595  li(output_reg, Operand(255));
5596  // Normal branch: nop in delay slot.
5597  Branch(&done, gt, input_reg, Operand(output_reg));
5598  // Use delay slot in this branch.
5599  Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5600  mov(output_reg, zero_reg);  // In delay slot.
5601  mov(output_reg, input_reg);  // Value is in range 0..255.
5602  bind(&done);
5603}
5604
5605
5606void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5607                                        DoubleRegister input_reg,
5608                                        DoubleRegister temp_double_reg) {
5609  Label above_zero;
5610  Label done;
5611  Label in_bounds;
5612
5613  Move(temp_double_reg, 0.0);
5614  BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5615
5616  // Double value is less than zero, NaN or Inf, return 0.
5617  mov(result_reg, zero_reg);
5618  Branch(&done);
5619
5620  // Double value is >= 255, return 255.
5621  bind(&above_zero);
5622  Move(temp_double_reg, 255.0);
5623  BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5624  li(result_reg, Operand(255));
5625  Branch(&done);
5626
5627  // In 0-255 range, round and truncate.
5628  bind(&in_bounds);
5629  cvt_w_d(temp_double_reg, input_reg);
5630  mfc1(result_reg, temp_double_reg);
5631  bind(&done);
5632}
5633
5634
5635void MacroAssembler::TestJSArrayForAllocationMemento(
5636    Register receiver_reg,
5637    Register scratch_reg,
5638    Label* no_memento_found,
5639    Condition cond,
5640    Label* allocation_memento_present) {
5641  ExternalReference new_space_start =
5642      ExternalReference::new_space_start(isolate());
5643  ExternalReference new_space_allocation_top =
5644      ExternalReference::new_space_allocation_top_address(isolate());
5645  Addu(scratch_reg, receiver_reg,
5646       Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5647  Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5648  li(at, Operand(new_space_allocation_top));
5649  lw(at, MemOperand(at));
5650  Branch(no_memento_found, gt, scratch_reg, Operand(at));
5651  lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5652  if (allocation_memento_present) {
5653    Branch(allocation_memento_present, cond, scratch_reg,
5654           Operand(isolate()->factory()->allocation_memento_map()));
5655  }
5656}
5657
5658
5659Register GetRegisterThatIsNotOneOf(Register reg1,
5660                                   Register reg2,
5661                                   Register reg3,
5662                                   Register reg4,
5663                                   Register reg5,
5664                                   Register reg6) {
5665  RegList regs = 0;
5666  if (reg1.is_valid()) regs |= reg1.bit();
5667  if (reg2.is_valid()) regs |= reg2.bit();
5668  if (reg3.is_valid()) regs |= reg3.bit();
5669  if (reg4.is_valid()) regs |= reg4.bit();
5670  if (reg5.is_valid()) regs |= reg5.bit();
5671  if (reg6.is_valid()) regs |= reg6.bit();
5672
5673  for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
5674    Register candidate = Register::FromAllocationIndex(i);
5675    if (regs & candidate.bit()) continue;
5676    return candidate;
5677  }
5678  UNREACHABLE();
5679  return no_reg;
5680}
5681
5682
5683void MacroAssembler::JumpIfDictionaryInPrototypeChain(
5684    Register object,
5685    Register scratch0,
5686    Register scratch1,
5687    Label* found) {
5688  ASSERT(!scratch1.is(scratch0));
5689  Factory* factory = isolate()->factory();
5690  Register current = scratch0;
5691  Label loop_again;
5692
5693  // Scratch contained elements pointer.
5694  Move(current, object);
5695
5696  // Loop based on the map going up the prototype chain.
5697  bind(&loop_again);
5698  lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
5699  lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
5700  Ext(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
5701  Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
5702  lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
5703  Branch(&loop_again, ne, current, Operand(factory->null_value()));
5704}
5705
5706
5707bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
5708  if (r1.is(r2)) return true;
5709  if (r1.is(r3)) return true;
5710  if (r1.is(r4)) return true;
5711  if (r2.is(r3)) return true;
5712  if (r2.is(r4)) return true;
5713  if (r3.is(r4)) return true;
5714  return false;
5715}
5716
5717
5718CodePatcher::CodePatcher(byte* address, int instructions)
5719    : address_(address),
5720      size_(instructions * Assembler::kInstrSize),
5721      masm_(NULL, address, size_ + Assembler::kGap) {
5722  // Create a new macro assembler pointing to the address of the code to patch.
5723  // The size is adjusted with kGap on order for the assembler to generate size
5724  // bytes of instructions without failing with buffer size constraints.
5725  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5726}
5727
5728
5729CodePatcher::~CodePatcher() {
5730  // Indicate that code has changed.
5731  CPU::FlushICache(address_, size_);
5732
5733  // Check that the code was patched as expected.
5734  ASSERT(masm_.pc_ == address_ + size_);
5735  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5736}
5737
5738
5739void CodePatcher::Emit(Instr instr) {
5740  masm()->emit(instr);
5741}
5742
5743
5744void CodePatcher::Emit(Address addr) {
5745  masm()->emit(reinterpret_cast<Instr>(addr));
5746}
5747
5748
5749void CodePatcher::ChangeBranchCondition(Condition cond) {
5750  Instr instr = Assembler::instr_at(masm_.pc_);
5751  ASSERT(Assembler::IsBranch(instr));
5752  uint32_t opcode = Assembler::GetOpcodeField(instr);
5753  // Currently only the 'eq' and 'ne' cond values are supported and the simple
5754  // branch instructions (with opcode being the branch type).
5755  // There are some special cases (see Assembler::IsBranch()) so extending this
5756  // would be tricky.
5757  ASSERT(opcode == BEQ ||
5758         opcode == BNE ||
5759        opcode == BLEZ ||
5760        opcode == BGTZ ||
5761        opcode == BEQL ||
5762        opcode == BNEL ||
5763       opcode == BLEZL ||
5764       opcode == BGTZL);
5765  opcode = (cond == eq) ? BEQ : BNE;
5766  instr = (instr & ~kOpcodeMask) | opcode;
5767  masm_.emit(instr);
5768}
5769
5770
5771} }  // namespace v8::internal
5772
5773#endif  // V8_TARGET_ARCH_MIPS
5774