1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include <limits.h>  // For LONG_MIN, LONG_MAX.
29
30#include "v8.h"
31
32#if defined(V8_TARGET_ARCH_MIPS)
33
34#include "bootstrapper.h"
35#include "codegen.h"
36#include "debug.h"
37#include "runtime.h"
38
39namespace v8 {
40namespace internal {
41
42MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
43    : Assembler(arg_isolate, buffer, size),
44      generating_stub_(false),
45      allow_stub_calls_(true),
46      has_frame_(false) {
47  if (isolate() != NULL) {
48    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
49                                  isolate());
50  }
51}
52
53
54void MacroAssembler::LoadRoot(Register destination,
55                              Heap::RootListIndex index) {
56  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
57}
58
59
60void MacroAssembler::LoadRoot(Register destination,
61                              Heap::RootListIndex index,
62                              Condition cond,
63                              Register src1, const Operand& src2) {
64  Branch(2, NegateCondition(cond), src1, src2);
65  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
66}
67
68
69void MacroAssembler::StoreRoot(Register source,
70                               Heap::RootListIndex index) {
71  sw(source, MemOperand(s6, index << kPointerSizeLog2));
72}
73
74
75void MacroAssembler::StoreRoot(Register source,
76                               Heap::RootListIndex index,
77                               Condition cond,
78                               Register src1, const Operand& src2) {
79  Branch(2, NegateCondition(cond), src1, src2);
80  sw(source, MemOperand(s6, index << kPointerSizeLog2));
81}
82
83
84void MacroAssembler::LoadHeapObject(Register result,
85                                    Handle<HeapObject> object) {
86  if (isolate()->heap()->InNewSpace(*object)) {
87    Handle<JSGlobalPropertyCell> cell =
88        isolate()->factory()->NewJSGlobalPropertyCell(object);
89    li(result, Operand(cell));
90    lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
91  } else {
92    li(result, Operand(object));
93  }
94}
95
96
97// Push and pop all registers that can hold pointers.
98void MacroAssembler::PushSafepointRegisters() {
99  // Safepoints expect a block of kNumSafepointRegisters values on the
100  // stack, so adjust the stack for unsaved registers.
101  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
102  ASSERT(num_unsaved >= 0);
103  if (num_unsaved > 0) {
104    Subu(sp, sp, Operand(num_unsaved * kPointerSize));
105  }
106  MultiPush(kSafepointSavedRegisters);
107}
108
109
110void MacroAssembler::PopSafepointRegisters() {
111  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
112  MultiPop(kSafepointSavedRegisters);
113  if (num_unsaved > 0) {
114    Addu(sp, sp, Operand(num_unsaved * kPointerSize));
115  }
116}
117
118
119void MacroAssembler::PushSafepointRegistersAndDoubles() {
120  PushSafepointRegisters();
121  Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
122  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
123    FPURegister reg = FPURegister::FromAllocationIndex(i);
124    sdc1(reg, MemOperand(sp, i * kDoubleSize));
125  }
126}
127
128
129void MacroAssembler::PopSafepointRegistersAndDoubles() {
130  for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
131    FPURegister reg = FPURegister::FromAllocationIndex(i);
132    ldc1(reg, MemOperand(sp, i * kDoubleSize));
133  }
134  Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
135  PopSafepointRegisters();
136}
137
138
139void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
140                                                             Register dst) {
141  sw(src, SafepointRegistersAndDoublesSlot(dst));
142}
143
144
145void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
146  sw(src, SafepointRegisterSlot(dst));
147}
148
149
150void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
151  lw(dst, SafepointRegisterSlot(src));
152}
153
154
155int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
156  // The registers are pushed starting with the highest encoding,
157  // which means that lowest encodings are closest to the stack pointer.
158  return kSafepointRegisterStackIndexMap[reg_code];
159}
160
161
162MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
163  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
164}
165
166
167MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
168  UNIMPLEMENTED_MIPS();
169  // General purpose registers are pushed last on the stack.
170  int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
171  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
172  return MemOperand(sp, doubles_size + register_offset);
173}
174
175
176void MacroAssembler::InNewSpace(Register object,
177                                Register scratch,
178                                Condition cc,
179                                Label* branch) {
180  ASSERT(cc == eq || cc == ne);
181  And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
182  Branch(branch, cc, scratch,
183         Operand(ExternalReference::new_space_start(isolate())));
184}
185
186
187void MacroAssembler::RecordWriteField(
188    Register object,
189    int offset,
190    Register value,
191    Register dst,
192    RAStatus ra_status,
193    SaveFPRegsMode save_fp,
194    RememberedSetAction remembered_set_action,
195    SmiCheck smi_check) {
196  ASSERT(!AreAliased(value, dst, t8, object));
197  // First, check if a write barrier is even needed. The tests below
198  // catch stores of Smis.
199  Label done;
200
201  // Skip barrier if writing a smi.
202  if (smi_check == INLINE_SMI_CHECK) {
203    JumpIfSmi(value, &done);
204  }
205
206  // Although the object register is tagged, the offset is relative to the start
207  // of the object, so so offset must be a multiple of kPointerSize.
208  ASSERT(IsAligned(offset, kPointerSize));
209
210  Addu(dst, object, Operand(offset - kHeapObjectTag));
211  if (emit_debug_code()) {
212    Label ok;
213    And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
214    Branch(&ok, eq, t8, Operand(zero_reg));
215    stop("Unaligned cell in write barrier");
216    bind(&ok);
217  }
218
219  RecordWrite(object,
220              dst,
221              value,
222              ra_status,
223              save_fp,
224              remembered_set_action,
225              OMIT_SMI_CHECK);
226
227  bind(&done);
228
229  // Clobber clobbered input registers when running with the debug-code flag
230  // turned on to provoke errors.
231  if (emit_debug_code()) {
232    li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
233    li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
234  }
235}
236
237
238// Will clobber 4 registers: object, address, scratch, ip.  The
239// register 'object' contains a heap object pointer.  The heap object
240// tag is shifted away.
241void MacroAssembler::RecordWrite(Register object,
242                                 Register address,
243                                 Register value,
244                                 RAStatus ra_status,
245                                 SaveFPRegsMode fp_mode,
246                                 RememberedSetAction remembered_set_action,
247                                 SmiCheck smi_check) {
248  ASSERT(!AreAliased(object, address, value, t8));
249  ASSERT(!AreAliased(object, address, value, t9));
250  // The compiled code assumes that record write doesn't change the
251  // context register, so we check that none of the clobbered
252  // registers are cp.
253  ASSERT(!address.is(cp) && !value.is(cp));
254
255  if (emit_debug_code()) {
256    lw(at, MemOperand(address));
257    Assert(
258        eq, "Wrong address or value passed to RecordWrite", at, Operand(value));
259  }
260
261  Label done;
262
263  if (smi_check == INLINE_SMI_CHECK) {
264    ASSERT_EQ(0, kSmiTag);
265    JumpIfSmi(value, &done);
266  }
267
268  CheckPageFlag(value,
269                value,  // Used as scratch.
270                MemoryChunk::kPointersToHereAreInterestingMask,
271                eq,
272                &done);
273  CheckPageFlag(object,
274                value,  // Used as scratch.
275                MemoryChunk::kPointersFromHereAreInterestingMask,
276                eq,
277                &done);
278
279  // Record the actual write.
280  if (ra_status == kRAHasNotBeenSaved) {
281    push(ra);
282  }
283  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
284  CallStub(&stub);
285  if (ra_status == kRAHasNotBeenSaved) {
286    pop(ra);
287  }
288
289  bind(&done);
290
291  // Clobber clobbered registers when running with the debug-code flag
292  // turned on to provoke errors.
293  if (emit_debug_code()) {
294    li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
295    li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
296  }
297}
298
299
300void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
301                                         Register address,
302                                         Register scratch,
303                                         SaveFPRegsMode fp_mode,
304                                         RememberedSetFinalAction and_then) {
305  Label done;
306  if (emit_debug_code()) {
307    Label ok;
308    JumpIfNotInNewSpace(object, scratch, &ok);
309    stop("Remembered set pointer is in new space");
310    bind(&ok);
311  }
312  // Load store buffer top.
313  ExternalReference store_buffer =
314      ExternalReference::store_buffer_top(isolate());
315  li(t8, Operand(store_buffer));
316  lw(scratch, MemOperand(t8));
317  // Store pointer to buffer and increment buffer top.
318  sw(address, MemOperand(scratch));
319  Addu(scratch, scratch, kPointerSize);
320  // Write back new top of buffer.
321  sw(scratch, MemOperand(t8));
322  // Call stub on end of buffer.
323  // Check for end of buffer.
324  And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
325  if (and_then == kFallThroughAtEnd) {
326    Branch(&done, eq, t8, Operand(zero_reg));
327  } else {
328    ASSERT(and_then == kReturnAtEnd);
329    Ret(eq, t8, Operand(zero_reg));
330  }
331  push(ra);
332  StoreBufferOverflowStub store_buffer_overflow =
333      StoreBufferOverflowStub(fp_mode);
334  CallStub(&store_buffer_overflow);
335  pop(ra);
336  bind(&done);
337  if (and_then == kReturnAtEnd) {
338    Ret();
339  }
340}
341
342
343// -----------------------------------------------------------------------------
344// Allocation support.
345
346
347void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
348                                            Register scratch,
349                                            Label* miss) {
350  Label same_contexts;
351
352  ASSERT(!holder_reg.is(scratch));
353  ASSERT(!holder_reg.is(at));
354  ASSERT(!scratch.is(at));
355
356  // Load current lexical context from the stack frame.
357  lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
358  // In debug mode, make sure the lexical context is set.
359#ifdef DEBUG
360  Check(ne, "we should not have an empty lexical context",
361      scratch, Operand(zero_reg));
362#endif
363
364  // Load the global context of the current context.
365  int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
366  lw(scratch, FieldMemOperand(scratch, offset));
367  lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
368
369  // Check the context is a global context.
370  if (emit_debug_code()) {
371    // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
372    push(holder_reg);  // Temporarily save holder on the stack.
373    // Read the first word and compare to the global_context_map.
374    lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
375    LoadRoot(at, Heap::kGlobalContextMapRootIndex);
376    Check(eq, "JSGlobalObject::global_context should be a global context.",
377          holder_reg, Operand(at));
378    pop(holder_reg);  // Restore holder.
379  }
380
381  // Check if both contexts are the same.
382  lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
383  Branch(&same_contexts, eq, scratch, Operand(at));
384
385  // Check the context is a global context.
386  if (emit_debug_code()) {
387    // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
388    push(holder_reg);  // Temporarily save holder on the stack.
389    mov(holder_reg, at);  // Move at to its holding place.
390    LoadRoot(at, Heap::kNullValueRootIndex);
391    Check(ne, "JSGlobalProxy::context() should not be null.",
392          holder_reg, Operand(at));
393
394    lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
395    LoadRoot(at, Heap::kGlobalContextMapRootIndex);
396    Check(eq, "JSGlobalObject::global_context should be a global context.",
397          holder_reg, Operand(at));
398    // Restore at is not needed. at is reloaded below.
399    pop(holder_reg);  // Restore holder.
400    // Restore at to holder's context.
401    lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
402  }
403
404  // Check that the security token in the calling global object is
405  // compatible with the security token in the receiving global
406  // object.
407  int token_offset = Context::kHeaderSize +
408                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
409
410  lw(scratch, FieldMemOperand(scratch, token_offset));
411  lw(at, FieldMemOperand(at, token_offset));
412  Branch(miss, ne, scratch, Operand(at));
413
414  bind(&same_contexts);
415}
416
417
418void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
419  // First of all we assign the hash seed to scratch.
420  LoadRoot(scratch, Heap::kHashSeedRootIndex);
421  SmiUntag(scratch);
422
423  // Xor original key with a seed.
424  xor_(reg0, reg0, scratch);
425
426  // Compute the hash code from the untagged key.  This must be kept in sync
427  // with ComputeIntegerHash in utils.h.
428  //
429  // hash = ~hash + (hash << 15);
430  nor(scratch, reg0, zero_reg);
431  sll(at, reg0, 15);
432  addu(reg0, scratch, at);
433
434  // hash = hash ^ (hash >> 12);
435  srl(at, reg0, 12);
436  xor_(reg0, reg0, at);
437
438  // hash = hash + (hash << 2);
439  sll(at, reg0, 2);
440  addu(reg0, reg0, at);
441
442  // hash = hash ^ (hash >> 4);
443  srl(at, reg0, 4);
444  xor_(reg0, reg0, at);
445
446  // hash = hash * 2057;
447  sll(scratch, reg0, 11);
448  sll(at, reg0, 3);
449  addu(reg0, reg0, at);
450  addu(reg0, reg0, scratch);
451
452  // hash = hash ^ (hash >> 16);
453  srl(at, reg0, 16);
454  xor_(reg0, reg0, at);
455}
456
457
458void MacroAssembler::LoadFromNumberDictionary(Label* miss,
459                                              Register elements,
460                                              Register key,
461                                              Register result,
462                                              Register reg0,
463                                              Register reg1,
464                                              Register reg2) {
465  // Register use:
466  //
467  // elements - holds the slow-case elements of the receiver on entry.
468  //            Unchanged unless 'result' is the same register.
469  //
470  // key      - holds the smi key on entry.
471  //            Unchanged unless 'result' is the same register.
472  //
473  //
474  // result   - holds the result on exit if the load succeeded.
475  //            Allowed to be the same as 'key' or 'result'.
476  //            Unchanged on bailout so 'key' or 'result' can be used
477  //            in further computation.
478  //
479  // Scratch registers:
480  //
481  // reg0 - holds the untagged key on entry and holds the hash once computed.
482  //
483  // reg1 - Used to hold the capacity mask of the dictionary.
484  //
485  // reg2 - Used for the index into the dictionary.
486  // at   - Temporary (avoid MacroAssembler instructions also using 'at').
487  Label done;
488
489  GetNumberHash(reg0, reg1);
490
491  // Compute the capacity mask.
492  lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
493  sra(reg1, reg1, kSmiTagSize);
494  Subu(reg1, reg1, Operand(1));
495
496  // Generate an unrolled loop that performs a few probes before giving up.
497  static const int kProbes = 4;
498  for (int i = 0; i < kProbes; i++) {
499    // Use reg2 for index calculations and keep the hash intact in reg0.
500    mov(reg2, reg0);
501    // Compute the masked index: (hash + i + i * i) & mask.
502    if (i > 0) {
503      Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
504    }
505    and_(reg2, reg2, reg1);
506
507    // Scale the index by multiplying by the element size.
508    ASSERT(SeededNumberDictionary::kEntrySize == 3);
509    sll(at, reg2, 1);  // 2x.
510    addu(reg2, reg2, at);  // reg2 = reg2 * 3.
511
512    // Check if the key is identical to the name.
513    sll(at, reg2, kPointerSizeLog2);
514    addu(reg2, elements, at);
515
516    lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
517    if (i != kProbes - 1) {
518      Branch(&done, eq, key, Operand(at));
519    } else {
520      Branch(miss, ne, key, Operand(at));
521    }
522  }
523
524  bind(&done);
525  // Check that the value is a normal property.
526  // reg2: elements + (index * kPointerSize).
527  const int kDetailsOffset =
528      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
529  lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
530  And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
531  Branch(miss, ne, at, Operand(zero_reg));
532
533  // Get the value at the masked, scaled index and return.
534  const int kValueOffset =
535      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
536  lw(result, FieldMemOperand(reg2, kValueOffset));
537}
538
539
540// ---------------------------------------------------------------------------
541// Instruction macros.
542
543void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
544  if (rt.is_reg()) {
545    addu(rd, rs, rt.rm());
546  } else {
547    if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
548      addiu(rd, rs, rt.imm32_);
549    } else {
550      // li handles the relocation.
551      ASSERT(!rs.is(at));
552      li(at, rt);
553      addu(rd, rs, at);
554    }
555  }
556}
557
558
559void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
560  if (rt.is_reg()) {
561    subu(rd, rs, rt.rm());
562  } else {
563    if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
564      addiu(rd, rs, -rt.imm32_);  // No subiu instr, use addiu(x, y, -imm).
565    } else {
566      // li handles the relocation.
567      ASSERT(!rs.is(at));
568      li(at, rt);
569      subu(rd, rs, at);
570    }
571  }
572}
573
574
575void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
576  if (rt.is_reg()) {
577    if (kArchVariant == kLoongson) {
578      mult(rs, rt.rm());
579      mflo(rd);
580    } else {
581      mul(rd, rs, rt.rm());
582    }
583  } else {
584    // li handles the relocation.
585    ASSERT(!rs.is(at));
586    li(at, rt);
587    if (kArchVariant == kLoongson) {
588      mult(rs, at);
589      mflo(rd);
590    } else {
591      mul(rd, rs, at);
592    }
593  }
594}
595
596
597void MacroAssembler::Mult(Register rs, const Operand& rt) {
598  if (rt.is_reg()) {
599    mult(rs, rt.rm());
600  } else {
601    // li handles the relocation.
602    ASSERT(!rs.is(at));
603    li(at, rt);
604    mult(rs, at);
605  }
606}
607
608
609void MacroAssembler::Multu(Register rs, const Operand& rt) {
610  if (rt.is_reg()) {
611    multu(rs, rt.rm());
612  } else {
613    // li handles the relocation.
614    ASSERT(!rs.is(at));
615    li(at, rt);
616    multu(rs, at);
617  }
618}
619
620
621void MacroAssembler::Div(Register rs, const Operand& rt) {
622  if (rt.is_reg()) {
623    div(rs, rt.rm());
624  } else {
625    // li handles the relocation.
626    ASSERT(!rs.is(at));
627    li(at, rt);
628    div(rs, at);
629  }
630}
631
632
633void MacroAssembler::Divu(Register rs, const Operand& rt) {
634  if (rt.is_reg()) {
635    divu(rs, rt.rm());
636  } else {
637    // li handles the relocation.
638    ASSERT(!rs.is(at));
639    li(at, rt);
640    divu(rs, at);
641  }
642}
643
644
645void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
646  if (rt.is_reg()) {
647    and_(rd, rs, rt.rm());
648  } else {
649    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
650      andi(rd, rs, rt.imm32_);
651    } else {
652      // li handles the relocation.
653      ASSERT(!rs.is(at));
654      li(at, rt);
655      and_(rd, rs, at);
656    }
657  }
658}
659
660
661void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
662  if (rt.is_reg()) {
663    or_(rd, rs, rt.rm());
664  } else {
665    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
666      ori(rd, rs, rt.imm32_);
667    } else {
668      // li handles the relocation.
669      ASSERT(!rs.is(at));
670      li(at, rt);
671      or_(rd, rs, at);
672    }
673  }
674}
675
676
677void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
678  if (rt.is_reg()) {
679    xor_(rd, rs, rt.rm());
680  } else {
681    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
682      xori(rd, rs, rt.imm32_);
683    } else {
684      // li handles the relocation.
685      ASSERT(!rs.is(at));
686      li(at, rt);
687      xor_(rd, rs, at);
688    }
689  }
690}
691
692
693void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
694  if (rt.is_reg()) {
695    nor(rd, rs, rt.rm());
696  } else {
697    // li handles the relocation.
698    ASSERT(!rs.is(at));
699    li(at, rt);
700    nor(rd, rs, at);
701  }
702}
703
704
705void MacroAssembler::Neg(Register rs, const Operand& rt) {
706  ASSERT(rt.is_reg());
707  ASSERT(!at.is(rs));
708  ASSERT(!at.is(rt.rm()));
709  li(at, -1);
710  xor_(rs, rt.rm(), at);
711}
712
713
714void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
715  if (rt.is_reg()) {
716    slt(rd, rs, rt.rm());
717  } else {
718    if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
719      slti(rd, rs, rt.imm32_);
720    } else {
721      // li handles the relocation.
722      ASSERT(!rs.is(at));
723      li(at, rt);
724      slt(rd, rs, at);
725    }
726  }
727}
728
729
730void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
731  if (rt.is_reg()) {
732    sltu(rd, rs, rt.rm());
733  } else {
734    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
735      sltiu(rd, rs, rt.imm32_);
736    } else {
737      // li handles the relocation.
738      ASSERT(!rs.is(at));
739      li(at, rt);
740      sltu(rd, rs, at);
741    }
742  }
743}
744
745
746void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
747  if (kArchVariant == kMips32r2) {
748    if (rt.is_reg()) {
749      rotrv(rd, rs, rt.rm());
750    } else {
751      rotr(rd, rs, rt.imm32_);
752    }
753  } else {
754    if (rt.is_reg()) {
755      subu(at, zero_reg, rt.rm());
756      sllv(at, rs, at);
757      srlv(rd, rs, rt.rm());
758      or_(rd, rd, at);
759    } else {
760      if (rt.imm32_ == 0) {
761        srl(rd, rs, 0);
762      } else {
763        srl(at, rs, rt.imm32_);
764        sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
765        or_(rd, rd, at);
766      }
767    }
768  }
769}
770
771//------------Pseudo-instructions-------------
772
773void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
774  ASSERT(!j.is_reg());
775  BlockTrampolinePoolScope block_trampoline_pool(this);
776  if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
777    // Normal load of an immediate value which does not need Relocation Info.
778    if (is_int16(j.imm32_)) {
779      addiu(rd, zero_reg, j.imm32_);
780    } else if (!(j.imm32_ & kHiMask)) {
781      ori(rd, zero_reg, j.imm32_);
782    } else if (!(j.imm32_ & kImm16Mask)) {
783      lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
784    } else {
785      lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
786      ori(rd, rd, (j.imm32_ & kImm16Mask));
787    }
788  } else {
789    if (MustUseReg(j.rmode_)) {
790      RecordRelocInfo(j.rmode_, j.imm32_);
791    }
792    // We always need the same number of instructions as we may need to patch
793    // this code to load another value which may need 2 instructions to load.
794    lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
795    ori(rd, rd, (j.imm32_ & kImm16Mask));
796  }
797}
798
799
800void MacroAssembler::MultiPush(RegList regs) {
801  int16_t num_to_push = NumberOfBitsSet(regs);
802  int16_t stack_offset = num_to_push * kPointerSize;
803
804  Subu(sp, sp, Operand(stack_offset));
805  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
806    if ((regs & (1 << i)) != 0) {
807      stack_offset -= kPointerSize;
808      sw(ToRegister(i), MemOperand(sp, stack_offset));
809    }
810  }
811}
812
813
814void MacroAssembler::MultiPushReversed(RegList regs) {
815  int16_t num_to_push = NumberOfBitsSet(regs);
816  int16_t stack_offset = num_to_push * kPointerSize;
817
818  Subu(sp, sp, Operand(stack_offset));
819  for (int16_t i = 0; i < kNumRegisters; i++) {
820    if ((regs & (1 << i)) != 0) {
821      stack_offset -= kPointerSize;
822      sw(ToRegister(i), MemOperand(sp, stack_offset));
823    }
824  }
825}
826
827
828void MacroAssembler::MultiPop(RegList regs) {
829  int16_t stack_offset = 0;
830
831  for (int16_t i = 0; i < kNumRegisters; i++) {
832    if ((regs & (1 << i)) != 0) {
833      lw(ToRegister(i), MemOperand(sp, stack_offset));
834      stack_offset += kPointerSize;
835    }
836  }
837  addiu(sp, sp, stack_offset);
838}
839
840
841void MacroAssembler::MultiPopReversed(RegList regs) {
842  int16_t stack_offset = 0;
843
844  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
845    if ((regs & (1 << i)) != 0) {
846      lw(ToRegister(i), MemOperand(sp, stack_offset));
847      stack_offset += kPointerSize;
848    }
849  }
850  addiu(sp, sp, stack_offset);
851}
852
853
854void MacroAssembler::MultiPushFPU(RegList regs) {
855  CpuFeatures::Scope scope(FPU);
856  int16_t num_to_push = NumberOfBitsSet(regs);
857  int16_t stack_offset = num_to_push * kDoubleSize;
858
859  Subu(sp, sp, Operand(stack_offset));
860  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
861    if ((regs & (1 << i)) != 0) {
862      stack_offset -= kDoubleSize;
863      sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
864    }
865  }
866}
867
868
869void MacroAssembler::MultiPushReversedFPU(RegList regs) {
870  CpuFeatures::Scope scope(FPU);
871  int16_t num_to_push = NumberOfBitsSet(regs);
872  int16_t stack_offset = num_to_push * kDoubleSize;
873
874  Subu(sp, sp, Operand(stack_offset));
875  for (int16_t i = 0; i < kNumRegisters; i++) {
876    if ((regs & (1 << i)) != 0) {
877      stack_offset -= kDoubleSize;
878      sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
879    }
880  }
881}
882
883
884void MacroAssembler::MultiPopFPU(RegList regs) {
885  CpuFeatures::Scope scope(FPU);
886  int16_t stack_offset = 0;
887
888  for (int16_t i = 0; i < kNumRegisters; i++) {
889    if ((regs & (1 << i)) != 0) {
890      ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
891      stack_offset += kDoubleSize;
892    }
893  }
894  addiu(sp, sp, stack_offset);
895}
896
897
898void MacroAssembler::MultiPopReversedFPU(RegList regs) {
899  CpuFeatures::Scope scope(FPU);
900  int16_t stack_offset = 0;
901
902  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
903    if ((regs & (1 << i)) != 0) {
904      ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
905      stack_offset += kDoubleSize;
906    }
907  }
908  addiu(sp, sp, stack_offset);
909}
910
911
912void MacroAssembler::FlushICache(Register address, unsigned instructions) {
913  RegList saved_regs = kJSCallerSaved | ra.bit();
914  MultiPush(saved_regs);
915  AllowExternalCallThatCantCauseGC scope(this);
916
917  // Save to a0 in case address == t0.
918  Move(a0, address);
919  PrepareCallCFunction(2, t0);
920
921  li(a1, instructions * kInstrSize);
922  CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
923  MultiPop(saved_regs);
924}
925
926
927void MacroAssembler::Ext(Register rt,
928                         Register rs,
929                         uint16_t pos,
930                         uint16_t size) {
931  ASSERT(pos < 32);
932  ASSERT(pos + size < 33);
933
934  if (kArchVariant == kMips32r2) {
935    ext_(rt, rs, pos, size);
936  } else {
937    // Move rs to rt and shift it left then right to get the
938    // desired bitfield on the right side and zeroes on the left.
939    int shift_left = 32 - (pos + size);
940    sll(rt, rs, shift_left);  // Acts as a move if shift_left == 0.
941
942    int shift_right = 32 - size;
943    if (shift_right > 0) {
944      srl(rt, rt, shift_right);
945    }
946  }
947}
948
949
950void MacroAssembler::Ins(Register rt,
951                         Register rs,
952                         uint16_t pos,
953                         uint16_t size) {
954  ASSERT(pos < 32);
955  ASSERT(pos + size <= 32);
956  ASSERT(size != 0);
957
958  if (kArchVariant == kMips32r2) {
959    ins_(rt, rs, pos, size);
960  } else {
961    ASSERT(!rt.is(t8) && !rs.is(t8));
962    Subu(at, zero_reg, Operand(1));
963    srl(at, at, 32 - size);
964    and_(t8, rs, at);
965    sll(t8, t8, pos);
966    sll(at, at, pos);
967    nor(at, at, zero_reg);
968    and_(at, rt, at);
969    or_(rt, t8, at);
970  }
971}
972
973
974void MacroAssembler::Cvt_d_uw(FPURegister fd,
975                              FPURegister fs,
976                              FPURegister scratch) {
977  // Move the data from fs to t8.
978  mfc1(t8, fs);
979  Cvt_d_uw(fd, t8, scratch);
980}
981
982
983void MacroAssembler::Cvt_d_uw(FPURegister fd,
984                              Register rs,
985                              FPURegister scratch) {
986  // Convert rs to a FP value in fd (and fd + 1).
987  // We do this by converting rs minus the MSB to avoid sign conversion,
988  // then adding 2^31 to the result (if needed).
989
990  ASSERT(!fd.is(scratch));
991  ASSERT(!rs.is(t9));
992  ASSERT(!rs.is(at));
993
994  // Save rs's MSB to t9.
995  Ext(t9, rs, 31, 1);
996  // Remove rs's MSB.
997  Ext(at, rs, 0, 31);
998  // Move the result to fd.
999  mtc1(at, fd);
1000
1001  // Convert fd to a real FP value.
1002  cvt_d_w(fd, fd);
1003
1004  Label conversion_done;
1005
1006  // If rs's MSB was 0, it's done.
1007  // Otherwise we need to add that to the FP register.
1008  Branch(&conversion_done, eq, t9, Operand(zero_reg));
1009
1010  // Load 2^31 into f20 as its float representation.
1011  li(at, 0x41E00000);
1012  mtc1(at, FPURegister::from_code(scratch.code() + 1));
1013  mtc1(zero_reg, scratch);
1014  // Add it to fd.
1015  add_d(fd, fd, scratch);
1016
1017  bind(&conversion_done);
1018}
1019
1020
1021void MacroAssembler::Trunc_uw_d(FPURegister fd,
1022                                FPURegister fs,
1023                                FPURegister scratch) {
1024  Trunc_uw_d(fs, t8, scratch);
1025  mtc1(t8, fd);
1026}
1027
1028void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1029  if (kArchVariant == kLoongson && fd.is(fs)) {
1030    mfc1(t8, FPURegister::from_code(fs.code() + 1));
1031    trunc_w_d(fd, fs);
1032    mtc1(t8, FPURegister::from_code(fs.code() + 1));
1033  } else {
1034    trunc_w_d(fd, fs);
1035  }
1036}
1037
1038void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1039  if (kArchVariant == kLoongson && fd.is(fs)) {
1040    mfc1(t8, FPURegister::from_code(fs.code() + 1));
1041    round_w_d(fd, fs);
1042    mtc1(t8, FPURegister::from_code(fs.code() + 1));
1043  } else {
1044    round_w_d(fd, fs);
1045  }
1046}
1047
1048
1049void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1050  if (kArchVariant == kLoongson && fd.is(fs)) {
1051    mfc1(t8, FPURegister::from_code(fs.code() + 1));
1052    floor_w_d(fd, fs);
1053    mtc1(t8, FPURegister::from_code(fs.code() + 1));
1054  } else {
1055    floor_w_d(fd, fs);
1056  }
1057}
1058
1059
1060void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1061  if (kArchVariant == kLoongson && fd.is(fs)) {
1062    mfc1(t8, FPURegister::from_code(fs.code() + 1));
1063    ceil_w_d(fd, fs);
1064    mtc1(t8, FPURegister::from_code(fs.code() + 1));
1065  } else {
1066    ceil_w_d(fd, fs);
1067  }
1068}
1069
1070
1071void MacroAssembler::Trunc_uw_d(FPURegister fd,
1072                                Register rs,
1073                                FPURegister scratch) {
1074  ASSERT(!fd.is(scratch));
1075  ASSERT(!rs.is(at));
1076
1077  // Load 2^31 into scratch as its float representation.
1078  li(at, 0x41E00000);
1079  mtc1(at, FPURegister::from_code(scratch.code() + 1));
1080  mtc1(zero_reg, scratch);
1081  // Test if scratch > fd.
1082  // If fd < 2^31 we can convert it normally.
1083  Label simple_convert;
1084  BranchF(&simple_convert, NULL, lt, fd, scratch);
1085
1086  // First we subtract 2^31 from fd, then trunc it to rs
1087  // and add 2^31 to rs.
1088  sub_d(scratch, fd, scratch);
1089  trunc_w_d(scratch, scratch);
1090  mfc1(rs, scratch);
1091  Or(rs, rs, 1 << 31);
1092
1093  Label done;
1094  Branch(&done);
1095  // Simple conversion.
1096  bind(&simple_convert);
1097  trunc_w_d(scratch, fd);
1098  mfc1(rs, scratch);
1099
1100  bind(&done);
1101}
1102
1103
1104void MacroAssembler::BranchF(Label* target,
1105                             Label* nan,
1106                             Condition cc,
1107                             FPURegister cmp1,
1108                             FPURegister cmp2,
1109                             BranchDelaySlot bd) {
1110  if (cc == al) {
1111    Branch(bd, target);
1112    return;
1113  }
1114
1115  ASSERT(nan || target);
1116  // Check for unordered (NaN) cases.
1117  if (nan) {
1118    c(UN, D, cmp1, cmp2);
1119    bc1t(nan);
1120  }
1121
1122  if (target) {
1123    // Here NaN cases were either handled by this function or are assumed to
1124    // have been handled by the caller.
1125    // Unsigned conditions are treated as their signed counterpart.
1126    switch (cc) {
1127      case Uless:
1128      case less:
1129        c(OLT, D, cmp1, cmp2);
1130        bc1t(target);
1131        break;
1132      case Ugreater:
1133      case greater:
1134        c(ULE, D, cmp1, cmp2);
1135        bc1f(target);
1136        break;
1137      case Ugreater_equal:
1138      case greater_equal:
1139        c(ULT, D, cmp1, cmp2);
1140        bc1f(target);
1141        break;
1142      case Uless_equal:
1143      case less_equal:
1144        c(OLE, D, cmp1, cmp2);
1145        bc1t(target);
1146        break;
1147      case eq:
1148        c(EQ, D, cmp1, cmp2);
1149        bc1t(target);
1150        break;
1151      case ne:
1152        c(EQ, D, cmp1, cmp2);
1153        bc1f(target);
1154        break;
1155      default:
1156        CHECK(0);
1157    };
1158  }
1159
1160  if (bd == PROTECT) {
1161    nop();
1162  }
1163}
1164
1165
1166void MacroAssembler::Move(FPURegister dst, double imm) {
1167  ASSERT(CpuFeatures::IsEnabled(FPU));
1168  static const DoubleRepresentation minus_zero(-0.0);
1169  static const DoubleRepresentation zero(0.0);
1170  DoubleRepresentation value(imm);
1171  // Handle special values first.
1172  bool force_load = dst.is(kDoubleRegZero);
1173  if (value.bits == zero.bits && !force_load) {
1174    mov_d(dst, kDoubleRegZero);
1175  } else if (value.bits == minus_zero.bits && !force_load) {
1176    neg_d(dst, kDoubleRegZero);
1177  } else {
1178    uint32_t lo, hi;
1179    DoubleAsTwoUInt32(imm, &lo, &hi);
1180    // Move the low part of the double into the lower of the corresponding FPU
1181    // register of FPU register pair.
1182    if (lo != 0) {
1183      li(at, Operand(lo));
1184      mtc1(at, dst);
1185    } else {
1186      mtc1(zero_reg, dst);
1187    }
1188    // Move the high part of the double into the higher of the corresponding FPU
1189    // register of FPU register pair.
1190    if (hi != 0) {
1191      li(at, Operand(hi));
1192      mtc1(at, dst.high());
1193    } else {
1194      mtc1(zero_reg, dst.high());
1195    }
1196  }
1197}
1198
1199
1200void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1201  if (kArchVariant == kLoongson) {
1202    Label done;
1203    Branch(&done, ne, rt, Operand(zero_reg));
1204    mov(rd, rs);
1205    bind(&done);
1206  } else {
1207    movz(rd, rs, rt);
1208  }
1209}
1210
1211
1212void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1213  if (kArchVariant == kLoongson) {
1214    Label done;
1215    Branch(&done, eq, rt, Operand(zero_reg));
1216    mov(rd, rs);
1217    bind(&done);
1218  } else {
1219    movn(rd, rs, rt);
1220  }
1221}
1222
1223
1224void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1225  if (kArchVariant == kLoongson) {
1226    // Tests an FP condition code and then conditionally move rs to rd.
1227    // We do not currently use any FPU cc bit other than bit 0.
1228    ASSERT(cc == 0);
1229    ASSERT(!(rs.is(t8) || rd.is(t8)));
1230    Label done;
1231    Register scratch = t8;
1232    // For testing purposes we need to fetch content of the FCSR register and
1233    // than test its cc (floating point condition code) bit (for cc = 0, it is
1234    // 24. bit of the FCSR).
1235    cfc1(scratch, FCSR);
1236    // For the MIPS I, II and III architectures, the contents of scratch is
1237    // UNPREDICTABLE for the instruction immediately following CFC1.
1238    nop();
1239    srl(scratch, scratch, 16);
1240    andi(scratch, scratch, 0x0080);
1241    Branch(&done, eq, scratch, Operand(zero_reg));
1242    mov(rd, rs);
1243    bind(&done);
1244  } else {
1245    movt(rd, rs, cc);
1246  }
1247}
1248
1249
1250void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1251  if (kArchVariant == kLoongson) {
1252    // Tests an FP condition code and then conditionally move rs to rd.
1253    // We do not currently use any FPU cc bit other than bit 0.
1254    ASSERT(cc == 0);
1255    ASSERT(!(rs.is(t8) || rd.is(t8)));
1256    Label done;
1257    Register scratch = t8;
1258    // For testing purposes we need to fetch content of the FCSR register and
1259    // than test its cc (floating point condition code) bit (for cc = 0, it is
1260    // 24. bit of the FCSR).
1261    cfc1(scratch, FCSR);
1262    // For the MIPS I, II and III architectures, the contents of scratch is
1263    // UNPREDICTABLE for the instruction immediately following CFC1.
1264    nop();
1265    srl(scratch, scratch, 16);
1266    andi(scratch, scratch, 0x0080);
1267    Branch(&done, ne, scratch, Operand(zero_reg));
1268    mov(rd, rs);
1269    bind(&done);
1270  } else {
1271    movf(rd, rs, cc);
1272  }
1273}
1274
1275
1276void MacroAssembler::Clz(Register rd, Register rs) {
1277  if (kArchVariant == kLoongson) {
1278    ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1279    Register mask = t8;
1280    Register scratch = t9;
1281    Label loop, end;
1282    mov(at, rs);
1283    mov(rd, zero_reg);
1284    lui(mask, 0x8000);
1285    bind(&loop);
1286    and_(scratch, at, mask);
1287    Branch(&end, ne, scratch, Operand(zero_reg));
1288    addiu(rd, rd, 1);
1289    Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1290    srl(mask, mask, 1);
1291    bind(&end);
1292  } else {
1293    clz(rd, rs);
1294  }
1295}
1296
1297
1298// Tries to get a signed int32 out of a double precision floating point heap
1299// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
1300// 32bits signed integer range.
1301// This method implementation differs from the ARM version for performance
1302// reasons.
1303void MacroAssembler::ConvertToInt32(Register source,
1304                                    Register dest,
1305                                    Register scratch,
1306                                    Register scratch2,
1307                                    FPURegister double_scratch,
1308                                    Label *not_int32) {
1309  Label right_exponent, done;
1310  // Get exponent word (ENDIAN issues).
1311  lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
1312  // Get exponent alone in scratch2.
1313  And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
1314  // Load dest with zero.  We use this either for the final shift or
1315  // for the answer.
1316  mov(dest, zero_reg);
1317  // Check whether the exponent matches a 32 bit signed int that is not a Smi.
1318  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).  This is
1319  // the exponent that we are fastest at and also the highest exponent we can
1320  // handle here.
1321  const uint32_t non_smi_exponent =
1322      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1323  // If we have a match of the int32-but-not-Smi exponent then skip some logic.
1324  Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
1325  // If the exponent is higher than that then go to not_int32 case.  This
1326  // catches numbers that don't fit in a signed int32, infinities and NaNs.
1327  Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
1328
1329  // We know the exponent is smaller than 30 (biased).  If it is less than
1330  // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
1331  // it rounds to zero.
1332  const uint32_t zero_exponent =
1333      (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
1334  Subu(scratch2, scratch2, Operand(zero_exponent));
1335  // Dest already has a Smi zero.
1336  Branch(&done, lt, scratch2, Operand(zero_reg));
1337  if (!CpuFeatures::IsSupported(FPU)) {
1338    // We have a shifted exponent between 0 and 30 in scratch2.
1339    srl(dest, scratch2, HeapNumber::kExponentShift);
1340    // We now have the exponent in dest.  Subtract from 30 to get
1341    // how much to shift down.
1342    li(at, Operand(30));
1343    subu(dest, at, dest);
1344  }
1345  bind(&right_exponent);
1346  if (CpuFeatures::IsSupported(FPU)) {
1347    CpuFeatures::Scope scope(FPU);
1348    // MIPS FPU instructions implementing double precision to integer
1349    // conversion using round to zero. Since the FP value was qualified
1350    // above, the resulting integer should be a legal int32.
1351    // The original 'Exponent' word is still in scratch.
1352    lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1353    mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
1354    trunc_w_d(double_scratch, double_scratch);
1355    mfc1(dest, double_scratch);
1356  } else {
1357    // On entry, dest has final downshift, scratch has original sign/exp/mant.
1358    // Save sign bit in top bit of dest.
1359    And(scratch2, scratch, Operand(0x80000000));
1360    Or(dest, dest, Operand(scratch2));
1361    // Put back the implicit 1, just above mantissa field.
1362    Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
1363
1364    // Shift up the mantissa bits to take up the space the exponent used to
1365    // take. We just orred in the implicit bit so that took care of one and
1366    // we want to leave the sign bit 0 so we subtract 2 bits from the shift
1367    // distance. But we want to clear the sign-bit so shift one more bit
1368    // left, then shift right one bit.
1369    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1370    sll(scratch, scratch, shift_distance + 1);
1371    srl(scratch, scratch, 1);
1372
1373    // Get the second half of the double. For some exponents we don't
1374    // actually need this because the bits get shifted out again, but
1375    // it's probably slower to test than just to do it.
1376    lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1377    // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
1378    // The width of the field here is the same as the shift amount above.
1379    const int field_width = shift_distance;
1380    Ext(scratch2, scratch2, 32-shift_distance, field_width);
1381    Ins(scratch, scratch2, 0, field_width);
1382    // Move down according to the exponent.
1383    srlv(scratch, scratch, dest);
1384    // Prepare the negative version of our integer.
1385    subu(scratch2, zero_reg, scratch);
1386    // Trick to check sign bit (msb) held in dest, count leading zero.
1387    // 0 indicates negative, save negative version with conditional move.
1388    Clz(dest, dest);
1389    Movz(scratch, scratch2, dest);
1390    mov(dest, scratch);
1391  }
1392  bind(&done);
1393}
1394
1395
1396void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1397                                     FPURegister result,
1398                                     DoubleRegister double_input,
1399                                     Register scratch1,
1400                                     Register except_flag,
1401                                     CheckForInexactConversion check_inexact) {
1402  ASSERT(CpuFeatures::IsSupported(FPU));
1403  CpuFeatures::Scope scope(FPU);
1404
1405  int32_t except_mask = kFCSRFlagMask;  // Assume interested in all exceptions.
1406
1407  if (check_inexact == kDontCheckForInexactConversion) {
1408    // Ingore inexact exceptions.
1409    except_mask &= ~kFCSRInexactFlagMask;
1410  }
1411
1412  // Save FCSR.
1413  cfc1(scratch1, FCSR);
1414  // Disable FPU exceptions.
1415  ctc1(zero_reg, FCSR);
1416
1417  // Do operation based on rounding mode.
1418  switch (rounding_mode) {
1419    case kRoundToNearest:
1420      Round_w_d(result, double_input);
1421      break;
1422    case kRoundToZero:
1423      Trunc_w_d(result, double_input);
1424      break;
1425    case kRoundToPlusInf:
1426      Ceil_w_d(result, double_input);
1427      break;
1428    case kRoundToMinusInf:
1429      Floor_w_d(result, double_input);
1430      break;
1431  }  // End of switch-statement.
1432
1433  // Retrieve FCSR.
1434  cfc1(except_flag, FCSR);
1435  // Restore FCSR.
1436  ctc1(scratch1, FCSR);
1437
1438  // Check for fpu exceptions.
1439  And(except_flag, except_flag, Operand(except_mask));
1440}
1441
1442
1443void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
1444                                                 Register input_high,
1445                                                 Register input_low,
1446                                                 Register scratch) {
1447  Label done, normal_exponent, restore_sign;
1448  // Extract the biased exponent in result.
1449  Ext(result,
1450      input_high,
1451      HeapNumber::kExponentShift,
1452      HeapNumber::kExponentBits);
1453
1454  // Check for Infinity and NaNs, which should return 0.
1455  Subu(scratch, result, HeapNumber::kExponentMask);
1456  Movz(result, zero_reg, scratch);
1457  Branch(&done, eq, scratch, Operand(zero_reg));
1458
1459  // Express exponent as delta to (number of mantissa bits + 31).
1460  Subu(result,
1461       result,
1462       Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
1463
1464  // If the delta is strictly positive, all bits would be shifted away,
1465  // which means that we can return 0.
1466  Branch(&normal_exponent, le, result, Operand(zero_reg));
1467  mov(result, zero_reg);
1468  Branch(&done);
1469
1470  bind(&normal_exponent);
1471  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
1472  // Calculate shift.
1473  Addu(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits));
1474
1475  // Save the sign.
1476  Register sign = result;
1477  result = no_reg;
1478  And(sign, input_high, Operand(HeapNumber::kSignMask));
1479
1480  // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
1481  // to check for this specific case.
1482  Label high_shift_needed, high_shift_done;
1483  Branch(&high_shift_needed, lt, scratch, Operand(32));
1484  mov(input_high, zero_reg);
1485  Branch(&high_shift_done);
1486  bind(&high_shift_needed);
1487
1488  // Set the implicit 1 before the mantissa part in input_high.
1489  Or(input_high,
1490     input_high,
1491     Operand(1 << HeapNumber::kMantissaBitsInTopWord));
1492  // Shift the mantissa bits to the correct position.
1493  // We don't need to clear non-mantissa bits as they will be shifted away.
1494  // If they weren't, it would mean that the answer is in the 32bit range.
1495  sllv(input_high, input_high, scratch);
1496
1497  bind(&high_shift_done);
1498
1499  // Replace the shifted bits with bits from the lower mantissa word.
1500  Label pos_shift, shift_done;
1501  li(at, 32);
1502  subu(scratch, at, scratch);
1503  Branch(&pos_shift, ge, scratch, Operand(zero_reg));
1504
1505  // Negate scratch.
1506  Subu(scratch, zero_reg, scratch);
1507  sllv(input_low, input_low, scratch);
1508  Branch(&shift_done);
1509
1510  bind(&pos_shift);
1511  srlv(input_low, input_low, scratch);
1512
1513  bind(&shift_done);
1514  Or(input_high, input_high, Operand(input_low));
1515  // Restore sign if necessary.
1516  mov(scratch, sign);
1517  result = sign;
1518  sign = no_reg;
1519  Subu(result, zero_reg, input_high);
1520  Movz(result, input_high, scratch);
1521  bind(&done);
1522}
1523
1524
1525void MacroAssembler::EmitECMATruncate(Register result,
1526                                      FPURegister double_input,
1527                                      FPURegister single_scratch,
1528                                      Register scratch,
1529                                      Register scratch2,
1530                                      Register scratch3) {
1531  CpuFeatures::Scope scope(FPU);
1532  ASSERT(!scratch2.is(result));
1533  ASSERT(!scratch3.is(result));
1534  ASSERT(!scratch3.is(scratch2));
1535  ASSERT(!scratch.is(result) &&
1536         !scratch.is(scratch2) &&
1537         !scratch.is(scratch3));
1538  ASSERT(!single_scratch.is(double_input));
1539
1540  Label done;
1541  Label manual;
1542
1543  // Clear cumulative exception flags and save the FCSR.
1544  cfc1(scratch2, FCSR);
1545  ctc1(zero_reg, FCSR);
1546  // Try a conversion to a signed integer.
1547  trunc_w_d(single_scratch, double_input);
1548  mfc1(result, single_scratch);
1549  // Retrieve and restore the FCSR.
1550  cfc1(scratch, FCSR);
1551  ctc1(scratch2, FCSR);
1552  // Check for overflow and NaNs.
1553  And(scratch,
1554      scratch,
1555      kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1556  // If we had no exceptions we are done.
1557  Branch(&done, eq, scratch, Operand(zero_reg));
1558
1559  // Load the double value and perform a manual truncation.
1560  Register input_high = scratch2;
1561  Register input_low = scratch3;
1562  Move(input_low, input_high, double_input);
1563  EmitOutOfInt32RangeTruncate(result,
1564                              input_high,
1565                              input_low,
1566                              scratch);
1567  bind(&done);
1568}
1569
1570
1571void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1572                                         Register src,
1573                                         int num_least_bits) {
1574  Ext(dst, src, kSmiTagSize, num_least_bits);
1575}
1576
1577
1578void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1579                                           Register src,
1580                                           int num_least_bits) {
1581  And(dst, src, Operand((1 << num_least_bits) - 1));
1582}
1583
1584
1585// Emulated condtional branches do not emit a nop in the branch delay slot.
1586//
1587// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1588#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT(                                \
1589    (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
1590    (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1591
1592
1593void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1594  BranchShort(offset, bdslot);
1595}
1596
1597
1598void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1599                            const Operand& rt,
1600                            BranchDelaySlot bdslot) {
1601  BranchShort(offset, cond, rs, rt, bdslot);
1602}
1603
1604
1605void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1606  if (L->is_bound()) {
1607    if (is_near(L)) {
1608      BranchShort(L, bdslot);
1609    } else {
1610      Jr(L, bdslot);
1611    }
1612  } else {
1613    if (is_trampoline_emitted()) {
1614      Jr(L, bdslot);
1615    } else {
1616      BranchShort(L, bdslot);
1617    }
1618  }
1619}
1620
1621
1622void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1623                            const Operand& rt,
1624                            BranchDelaySlot bdslot) {
1625  if (L->is_bound()) {
1626    if (is_near(L)) {
1627      BranchShort(L, cond, rs, rt, bdslot);
1628    } else {
1629      Label skip;
1630      Condition neg_cond = NegateCondition(cond);
1631      BranchShort(&skip, neg_cond, rs, rt);
1632      Jr(L, bdslot);
1633      bind(&skip);
1634    }
1635  } else {
1636    if (is_trampoline_emitted()) {
1637      Label skip;
1638      Condition neg_cond = NegateCondition(cond);
1639      BranchShort(&skip, neg_cond, rs, rt);
1640      Jr(L, bdslot);
1641      bind(&skip);
1642    } else {
1643      BranchShort(L, cond, rs, rt, bdslot);
1644    }
1645  }
1646}
1647
1648
1649void MacroAssembler::Branch(Label* L,
1650                            Condition cond,
1651                            Register rs,
1652                            Heap::RootListIndex index,
1653                            BranchDelaySlot bdslot) {
1654  LoadRoot(at, index);
1655  Branch(L, cond, rs, Operand(at), bdslot);
1656}
1657
1658
1659void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1660  b(offset);
1661
1662  // Emit a nop in the branch delay slot if required.
1663  if (bdslot == PROTECT)
1664    nop();
1665}
1666
1667
1668void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1669                                 const Operand& rt,
1670                                 BranchDelaySlot bdslot) {
1671  BRANCH_ARGS_CHECK(cond, rs, rt);
1672  ASSERT(!rs.is(zero_reg));
1673  Register r2 = no_reg;
1674  Register scratch = at;
1675
1676  if (rt.is_reg()) {
1677    // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1678    // rt.
1679    r2 = rt.rm_;
1680    switch (cond) {
1681      case cc_always:
1682        b(offset);
1683        break;
1684      case eq:
1685        beq(rs, r2, offset);
1686        break;
1687      case ne:
1688        bne(rs, r2, offset);
1689        break;
1690      // Signed comparison.
1691      case greater:
1692        if (r2.is(zero_reg)) {
1693          bgtz(rs, offset);
1694        } else {
1695          slt(scratch, r2, rs);
1696          bne(scratch, zero_reg, offset);
1697        }
1698        break;
1699      case greater_equal:
1700        if (r2.is(zero_reg)) {
1701          bgez(rs, offset);
1702        } else {
1703          slt(scratch, rs, r2);
1704          beq(scratch, zero_reg, offset);
1705        }
1706        break;
1707      case less:
1708        if (r2.is(zero_reg)) {
1709          bltz(rs, offset);
1710        } else {
1711          slt(scratch, rs, r2);
1712          bne(scratch, zero_reg, offset);
1713        }
1714        break;
1715      case less_equal:
1716        if (r2.is(zero_reg)) {
1717          blez(rs, offset);
1718        } else {
1719          slt(scratch, r2, rs);
1720          beq(scratch, zero_reg, offset);
1721        }
1722        break;
1723      // Unsigned comparison.
1724      case Ugreater:
1725        if (r2.is(zero_reg)) {
1726          bgtz(rs, offset);
1727        } else {
1728          sltu(scratch, r2, rs);
1729          bne(scratch, zero_reg, offset);
1730        }
1731        break;
1732      case Ugreater_equal:
1733        if (r2.is(zero_reg)) {
1734          bgez(rs, offset);
1735        } else {
1736          sltu(scratch, rs, r2);
1737          beq(scratch, zero_reg, offset);
1738        }
1739        break;
1740      case Uless:
1741        if (r2.is(zero_reg)) {
1742          // No code needs to be emitted.
1743          return;
1744        } else {
1745          sltu(scratch, rs, r2);
1746          bne(scratch, zero_reg, offset);
1747        }
1748        break;
1749      case Uless_equal:
1750        if (r2.is(zero_reg)) {
1751          b(offset);
1752        } else {
1753          sltu(scratch, r2, rs);
1754          beq(scratch, zero_reg, offset);
1755        }
1756        break;
1757      default:
1758        UNREACHABLE();
1759    }
1760  } else {
1761    // Be careful to always use shifted_branch_offset only just before the
1762    // branch instruction, as the location will be remember for patching the
1763    // target.
1764    switch (cond) {
1765      case cc_always:
1766        b(offset);
1767        break;
1768      case eq:
1769        // We don't want any other register but scratch clobbered.
1770        ASSERT(!scratch.is(rs));
1771        r2 = scratch;
1772        li(r2, rt);
1773        beq(rs, r2, offset);
1774        break;
1775      case ne:
1776        // We don't want any other register but scratch clobbered.
1777        ASSERT(!scratch.is(rs));
1778        r2 = scratch;
1779        li(r2, rt);
1780        bne(rs, r2, offset);
1781        break;
1782      // Signed comparison.
1783      case greater:
1784        if (rt.imm32_ == 0) {
1785          bgtz(rs, offset);
1786        } else {
1787          r2 = scratch;
1788          li(r2, rt);
1789          slt(scratch, r2, rs);
1790          bne(scratch, zero_reg, offset);
1791        }
1792        break;
1793      case greater_equal:
1794        if (rt.imm32_ == 0) {
1795          bgez(rs, offset);
1796        } else if (is_int16(rt.imm32_)) {
1797          slti(scratch, rs, rt.imm32_);
1798          beq(scratch, zero_reg, offset);
1799        } else {
1800          r2 = scratch;
1801          li(r2, rt);
1802          slt(scratch, rs, r2);
1803          beq(scratch, zero_reg, offset);
1804        }
1805        break;
1806      case less:
1807        if (rt.imm32_ == 0) {
1808          bltz(rs, offset);
1809        } else if (is_int16(rt.imm32_)) {
1810          slti(scratch, rs, rt.imm32_);
1811          bne(scratch, zero_reg, offset);
1812        } else {
1813          r2 = scratch;
1814          li(r2, rt);
1815          slt(scratch, rs, r2);
1816          bne(scratch, zero_reg, offset);
1817        }
1818        break;
1819      case less_equal:
1820        if (rt.imm32_ == 0) {
1821          blez(rs, offset);
1822        } else {
1823          r2 = scratch;
1824          li(r2, rt);
1825          slt(scratch, r2, rs);
1826          beq(scratch, zero_reg, offset);
1827       }
1828       break;
1829      // Unsigned comparison.
1830      case Ugreater:
1831        if (rt.imm32_ == 0) {
1832          bgtz(rs, offset);
1833        } else {
1834          r2 = scratch;
1835          li(r2, rt);
1836          sltu(scratch, r2, rs);
1837          bne(scratch, zero_reg, offset);
1838        }
1839        break;
1840      case Ugreater_equal:
1841        if (rt.imm32_ == 0) {
1842          bgez(rs, offset);
1843        } else if (is_int16(rt.imm32_)) {
1844          sltiu(scratch, rs, rt.imm32_);
1845          beq(scratch, zero_reg, offset);
1846        } else {
1847          r2 = scratch;
1848          li(r2, rt);
1849          sltu(scratch, rs, r2);
1850          beq(scratch, zero_reg, offset);
1851        }
1852        break;
1853      case Uless:
1854        if (rt.imm32_ == 0) {
1855          // No code needs to be emitted.
1856          return;
1857        } else if (is_int16(rt.imm32_)) {
1858          sltiu(scratch, rs, rt.imm32_);
1859          bne(scratch, zero_reg, offset);
1860        } else {
1861          r2 = scratch;
1862          li(r2, rt);
1863          sltu(scratch, rs, r2);
1864          bne(scratch, zero_reg, offset);
1865        }
1866        break;
1867      case Uless_equal:
1868        if (rt.imm32_ == 0) {
1869          b(offset);
1870        } else {
1871          r2 = scratch;
1872          li(r2, rt);
1873          sltu(scratch, r2, rs);
1874          beq(scratch, zero_reg, offset);
1875        }
1876        break;
1877      default:
1878        UNREACHABLE();
1879    }
1880  }
1881  // Emit a nop in the branch delay slot if required.
1882  if (bdslot == PROTECT)
1883    nop();
1884}
1885
1886
1887void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
1888  // We use branch_offset as an argument for the branch instructions to be sure
1889  // it is called just before generating the branch instruction, as needed.
1890
1891  b(shifted_branch_offset(L, false));
1892
1893  // Emit a nop in the branch delay slot if required.
1894  if (bdslot == PROTECT)
1895    nop();
1896}
1897
1898
1899void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1900                                 const Operand& rt,
1901                                 BranchDelaySlot bdslot) {
1902  BRANCH_ARGS_CHECK(cond, rs, rt);
1903
1904  int32_t offset;
1905  Register r2 = no_reg;
1906  Register scratch = at;
1907  if (rt.is_reg()) {
1908    r2 = rt.rm_;
1909    // Be careful to always use shifted_branch_offset only just before the
1910    // branch instruction, as the location will be remember for patching the
1911    // target.
1912    switch (cond) {
1913      case cc_always:
1914        offset = shifted_branch_offset(L, false);
1915        b(offset);
1916        break;
1917      case eq:
1918        offset = shifted_branch_offset(L, false);
1919        beq(rs, r2, offset);
1920        break;
1921      case ne:
1922        offset = shifted_branch_offset(L, false);
1923        bne(rs, r2, offset);
1924        break;
1925      // Signed comparison.
1926      case greater:
1927        if (r2.is(zero_reg)) {
1928          offset = shifted_branch_offset(L, false);
1929          bgtz(rs, offset);
1930        } else {
1931          slt(scratch, r2, rs);
1932          offset = shifted_branch_offset(L, false);
1933          bne(scratch, zero_reg, offset);
1934        }
1935        break;
1936      case greater_equal:
1937        if (r2.is(zero_reg)) {
1938          offset = shifted_branch_offset(L, false);
1939          bgez(rs, offset);
1940        } else {
1941          slt(scratch, rs, r2);
1942          offset = shifted_branch_offset(L, false);
1943          beq(scratch, zero_reg, offset);
1944        }
1945        break;
1946      case less:
1947        if (r2.is(zero_reg)) {
1948          offset = shifted_branch_offset(L, false);
1949          bltz(rs, offset);
1950        } else {
1951          slt(scratch, rs, r2);
1952          offset = shifted_branch_offset(L, false);
1953          bne(scratch, zero_reg, offset);
1954        }
1955        break;
1956      case less_equal:
1957        if (r2.is(zero_reg)) {
1958          offset = shifted_branch_offset(L, false);
1959          blez(rs, offset);
1960        } else {
1961          slt(scratch, r2, rs);
1962          offset = shifted_branch_offset(L, false);
1963          beq(scratch, zero_reg, offset);
1964        }
1965        break;
1966      // Unsigned comparison.
1967      case Ugreater:
1968        if (r2.is(zero_reg)) {
1969          offset = shifted_branch_offset(L, false);
1970           bgtz(rs, offset);
1971        } else {
1972          sltu(scratch, r2, rs);
1973          offset = shifted_branch_offset(L, false);
1974          bne(scratch, zero_reg, offset);
1975        }
1976        break;
1977      case Ugreater_equal:
1978        if (r2.is(zero_reg)) {
1979          offset = shifted_branch_offset(L, false);
1980          bgez(rs, offset);
1981        } else {
1982          sltu(scratch, rs, r2);
1983          offset = shifted_branch_offset(L, false);
1984          beq(scratch, zero_reg, offset);
1985        }
1986        break;
1987      case Uless:
1988        if (r2.is(zero_reg)) {
1989          // No code needs to be emitted.
1990          return;
1991        } else {
1992          sltu(scratch, rs, r2);
1993          offset = shifted_branch_offset(L, false);
1994          bne(scratch, zero_reg, offset);
1995        }
1996        break;
1997      case Uless_equal:
1998        if (r2.is(zero_reg)) {
1999          offset = shifted_branch_offset(L, false);
2000          b(offset);
2001        } else {
2002          sltu(scratch, r2, rs);
2003          offset = shifted_branch_offset(L, false);
2004          beq(scratch, zero_reg, offset);
2005        }
2006        break;
2007      default:
2008        UNREACHABLE();
2009    }
2010  } else {
2011    // Be careful to always use shifted_branch_offset only just before the
2012    // branch instruction, as the location will be remember for patching the
2013    // target.
2014    switch (cond) {
2015      case cc_always:
2016        offset = shifted_branch_offset(L, false);
2017        b(offset);
2018        break;
2019      case eq:
2020        ASSERT(!scratch.is(rs));
2021        r2 = scratch;
2022        li(r2, rt);
2023        offset = shifted_branch_offset(L, false);
2024        beq(rs, r2, offset);
2025        break;
2026      case ne:
2027        ASSERT(!scratch.is(rs));
2028        r2 = scratch;
2029        li(r2, rt);
2030        offset = shifted_branch_offset(L, false);
2031        bne(rs, r2, offset);
2032        break;
2033      // Signed comparison.
2034      case greater:
2035        if (rt.imm32_ == 0) {
2036          offset = shifted_branch_offset(L, false);
2037          bgtz(rs, offset);
2038        } else {
2039          ASSERT(!scratch.is(rs));
2040          r2 = scratch;
2041          li(r2, rt);
2042          slt(scratch, r2, rs);
2043          offset = shifted_branch_offset(L, false);
2044          bne(scratch, zero_reg, offset);
2045        }
2046        break;
2047      case greater_equal:
2048        if (rt.imm32_ == 0) {
2049          offset = shifted_branch_offset(L, false);
2050          bgez(rs, offset);
2051        } else if (is_int16(rt.imm32_)) {
2052          slti(scratch, rs, rt.imm32_);
2053          offset = shifted_branch_offset(L, false);
2054          beq(scratch, zero_reg, offset);
2055        } else {
2056          ASSERT(!scratch.is(rs));
2057          r2 = scratch;
2058          li(r2, rt);
2059          slt(scratch, rs, r2);
2060          offset = shifted_branch_offset(L, false);
2061          beq(scratch, zero_reg, offset);
2062        }
2063        break;
2064      case less:
2065        if (rt.imm32_ == 0) {
2066          offset = shifted_branch_offset(L, false);
2067          bltz(rs, offset);
2068        } else if (is_int16(rt.imm32_)) {
2069          slti(scratch, rs, rt.imm32_);
2070          offset = shifted_branch_offset(L, false);
2071          bne(scratch, zero_reg, offset);
2072        } else {
2073          ASSERT(!scratch.is(rs));
2074          r2 = scratch;
2075          li(r2, rt);
2076          slt(scratch, rs, r2);
2077          offset = shifted_branch_offset(L, false);
2078          bne(scratch, zero_reg, offset);
2079        }
2080        break;
2081      case less_equal:
2082        if (rt.imm32_ == 0) {
2083          offset = shifted_branch_offset(L, false);
2084          blez(rs, offset);
2085        } else {
2086          ASSERT(!scratch.is(rs));
2087          r2 = scratch;
2088          li(r2, rt);
2089          slt(scratch, r2, rs);
2090          offset = shifted_branch_offset(L, false);
2091          beq(scratch, zero_reg, offset);
2092        }
2093        break;
2094      // Unsigned comparison.
2095      case Ugreater:
2096        if (rt.imm32_ == 0) {
2097          offset = shifted_branch_offset(L, false);
2098          bgtz(rs, offset);
2099        } else {
2100          ASSERT(!scratch.is(rs));
2101          r2 = scratch;
2102          li(r2, rt);
2103          sltu(scratch, r2, rs);
2104          offset = shifted_branch_offset(L, false);
2105          bne(scratch, zero_reg, offset);
2106        }
2107        break;
2108      case Ugreater_equal:
2109        if (rt.imm32_ == 0) {
2110          offset = shifted_branch_offset(L, false);
2111          bgez(rs, offset);
2112        } else if (is_int16(rt.imm32_)) {
2113          sltiu(scratch, rs, rt.imm32_);
2114          offset = shifted_branch_offset(L, false);
2115          beq(scratch, zero_reg, offset);
2116        } else {
2117          ASSERT(!scratch.is(rs));
2118          r2 = scratch;
2119          li(r2, rt);
2120          sltu(scratch, rs, r2);
2121          offset = shifted_branch_offset(L, false);
2122          beq(scratch, zero_reg, offset);
2123        }
2124        break;
2125     case Uless:
2126        if (rt.imm32_ == 0) {
2127          // No code needs to be emitted.
2128          return;
2129        } else if (is_int16(rt.imm32_)) {
2130          sltiu(scratch, rs, rt.imm32_);
2131          offset = shifted_branch_offset(L, false);
2132          bne(scratch, zero_reg, offset);
2133        } else {
2134          ASSERT(!scratch.is(rs));
2135          r2 = scratch;
2136          li(r2, rt);
2137          sltu(scratch, rs, r2);
2138          offset = shifted_branch_offset(L, false);
2139          bne(scratch, zero_reg, offset);
2140        }
2141        break;
2142      case Uless_equal:
2143        if (rt.imm32_ == 0) {
2144          offset = shifted_branch_offset(L, false);
2145          b(offset);
2146        } else {
2147          ASSERT(!scratch.is(rs));
2148          r2 = scratch;
2149          li(r2, rt);
2150          sltu(scratch, r2, rs);
2151          offset = shifted_branch_offset(L, false);
2152          beq(scratch, zero_reg, offset);
2153        }
2154        break;
2155      default:
2156        UNREACHABLE();
2157    }
2158  }
2159  // Check that offset could actually hold on an int16_t.
2160  ASSERT(is_int16(offset));
2161  // Emit a nop in the branch delay slot if required.
2162  if (bdslot == PROTECT)
2163    nop();
2164}
2165
2166
2167void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2168  BranchAndLinkShort(offset, bdslot);
2169}
2170
2171
2172void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2173                                   const Operand& rt,
2174                                   BranchDelaySlot bdslot) {
2175  BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2176}
2177
2178
2179void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2180  if (L->is_bound()) {
2181    if (is_near(L)) {
2182      BranchAndLinkShort(L, bdslot);
2183    } else {
2184      Jalr(L, bdslot);
2185    }
2186  } else {
2187    if (is_trampoline_emitted()) {
2188      Jalr(L, bdslot);
2189    } else {
2190      BranchAndLinkShort(L, bdslot);
2191    }
2192  }
2193}
2194
2195
2196void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2197                                   const Operand& rt,
2198                                   BranchDelaySlot bdslot) {
2199  if (L->is_bound()) {
2200    if (is_near(L)) {
2201      BranchAndLinkShort(L, cond, rs, rt, bdslot);
2202    } else {
2203      Label skip;
2204      Condition neg_cond = NegateCondition(cond);
2205      BranchShort(&skip, neg_cond, rs, rt);
2206      Jalr(L, bdslot);
2207      bind(&skip);
2208    }
2209  } else {
2210    if (is_trampoline_emitted()) {
2211      Label skip;
2212      Condition neg_cond = NegateCondition(cond);
2213      BranchShort(&skip, neg_cond, rs, rt);
2214      Jalr(L, bdslot);
2215      bind(&skip);
2216    } else {
2217      BranchAndLinkShort(L, cond, rs, rt, bdslot);
2218    }
2219  }
2220}
2221
2222
2223// We need to use a bgezal or bltzal, but they can't be used directly with the
2224// slt instructions. We could use sub or add instead but we would miss overflow
2225// cases, so we keep slt and add an intermediate third instruction.
2226void MacroAssembler::BranchAndLinkShort(int16_t offset,
2227                                        BranchDelaySlot bdslot) {
2228  bal(offset);
2229
2230  // Emit a nop in the branch delay slot if required.
2231  if (bdslot == PROTECT)
2232    nop();
2233}
2234
2235
2236void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2237                                        Register rs, const Operand& rt,
2238                                        BranchDelaySlot bdslot) {
2239  BRANCH_ARGS_CHECK(cond, rs, rt);
2240  Register r2 = no_reg;
2241  Register scratch = at;
2242
2243  if (rt.is_reg()) {
2244    r2 = rt.rm_;
2245  } else if (cond != cc_always) {
2246    r2 = scratch;
2247    li(r2, rt);
2248  }
2249
2250  switch (cond) {
2251    case cc_always:
2252      bal(offset);
2253      break;
2254    case eq:
2255      bne(rs, r2, 2);
2256      nop();
2257      bal(offset);
2258      break;
2259    case ne:
2260      beq(rs, r2, 2);
2261      nop();
2262      bal(offset);
2263      break;
2264
2265    // Signed comparison.
2266    case greater:
2267      slt(scratch, r2, rs);
2268      addiu(scratch, scratch, -1);
2269      bgezal(scratch, offset);
2270      break;
2271    case greater_equal:
2272      slt(scratch, rs, r2);
2273      addiu(scratch, scratch, -1);
2274      bltzal(scratch, offset);
2275      break;
2276    case less:
2277      slt(scratch, rs, r2);
2278      addiu(scratch, scratch, -1);
2279      bgezal(scratch, offset);
2280      break;
2281    case less_equal:
2282      slt(scratch, r2, rs);
2283      addiu(scratch, scratch, -1);
2284      bltzal(scratch, offset);
2285      break;
2286
2287    // Unsigned comparison.
2288    case Ugreater:
2289      sltu(scratch, r2, rs);
2290      addiu(scratch, scratch, -1);
2291      bgezal(scratch, offset);
2292      break;
2293    case Ugreater_equal:
2294      sltu(scratch, rs, r2);
2295      addiu(scratch, scratch, -1);
2296      bltzal(scratch, offset);
2297      break;
2298    case Uless:
2299      sltu(scratch, rs, r2);
2300      addiu(scratch, scratch, -1);
2301      bgezal(scratch, offset);
2302      break;
2303    case Uless_equal:
2304      sltu(scratch, r2, rs);
2305      addiu(scratch, scratch, -1);
2306      bltzal(scratch, offset);
2307      break;
2308
2309    default:
2310      UNREACHABLE();
2311  }
2312  // Emit a nop in the branch delay slot if required.
2313  if (bdslot == PROTECT)
2314    nop();
2315}
2316
2317
2318void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2319  bal(shifted_branch_offset(L, false));
2320
2321  // Emit a nop in the branch delay slot if required.
2322  if (bdslot == PROTECT)
2323    nop();
2324}
2325
2326
2327void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2328                                        const Operand& rt,
2329                                        BranchDelaySlot bdslot) {
2330  BRANCH_ARGS_CHECK(cond, rs, rt);
2331
2332  int32_t offset;
2333  Register r2 = no_reg;
2334  Register scratch = at;
2335  if (rt.is_reg()) {
2336    r2 = rt.rm_;
2337  } else if (cond != cc_always) {
2338    r2 = scratch;
2339    li(r2, rt);
2340  }
2341
2342  switch (cond) {
2343    case cc_always:
2344      offset = shifted_branch_offset(L, false);
2345      bal(offset);
2346      break;
2347    case eq:
2348      bne(rs, r2, 2);
2349      nop();
2350      offset = shifted_branch_offset(L, false);
2351      bal(offset);
2352      break;
2353    case ne:
2354      beq(rs, r2, 2);
2355      nop();
2356      offset = shifted_branch_offset(L, false);
2357      bal(offset);
2358      break;
2359
2360    // Signed comparison.
2361    case greater:
2362      slt(scratch, r2, rs);
2363      addiu(scratch, scratch, -1);
2364      offset = shifted_branch_offset(L, false);
2365      bgezal(scratch, offset);
2366      break;
2367    case greater_equal:
2368      slt(scratch, rs, r2);
2369      addiu(scratch, scratch, -1);
2370      offset = shifted_branch_offset(L, false);
2371      bltzal(scratch, offset);
2372      break;
2373    case less:
2374      slt(scratch, rs, r2);
2375      addiu(scratch, scratch, -1);
2376      offset = shifted_branch_offset(L, false);
2377      bgezal(scratch, offset);
2378      break;
2379    case less_equal:
2380      slt(scratch, r2, rs);
2381      addiu(scratch, scratch, -1);
2382      offset = shifted_branch_offset(L, false);
2383      bltzal(scratch, offset);
2384      break;
2385
2386    // Unsigned comparison.
2387    case Ugreater:
2388      sltu(scratch, r2, rs);
2389      addiu(scratch, scratch, -1);
2390      offset = shifted_branch_offset(L, false);
2391      bgezal(scratch, offset);
2392      break;
2393    case Ugreater_equal:
2394      sltu(scratch, rs, r2);
2395      addiu(scratch, scratch, -1);
2396      offset = shifted_branch_offset(L, false);
2397      bltzal(scratch, offset);
2398      break;
2399    case Uless:
2400      sltu(scratch, rs, r2);
2401      addiu(scratch, scratch, -1);
2402      offset = shifted_branch_offset(L, false);
2403      bgezal(scratch, offset);
2404      break;
2405    case Uless_equal:
2406      sltu(scratch, r2, rs);
2407      addiu(scratch, scratch, -1);
2408      offset = shifted_branch_offset(L, false);
2409      bltzal(scratch, offset);
2410      break;
2411
2412    default:
2413      UNREACHABLE();
2414  }
2415
2416  // Check that offset could actually hold on an int16_t.
2417  ASSERT(is_int16(offset));
2418
2419  // Emit a nop in the branch delay slot if required.
2420  if (bdslot == PROTECT)
2421    nop();
2422}
2423
2424
2425void MacroAssembler::Jump(Register target,
2426                          Condition cond,
2427                          Register rs,
2428                          const Operand& rt,
2429                          BranchDelaySlot bd) {
2430  BlockTrampolinePoolScope block_trampoline_pool(this);
2431  if (cond == cc_always) {
2432    jr(target);
2433  } else {
2434    BRANCH_ARGS_CHECK(cond, rs, rt);
2435    Branch(2, NegateCondition(cond), rs, rt);
2436    jr(target);
2437  }
2438  // Emit a nop in the branch delay slot if required.
2439  if (bd == PROTECT)
2440    nop();
2441}
2442
2443
2444void MacroAssembler::Jump(intptr_t target,
2445                          RelocInfo::Mode rmode,
2446                          Condition cond,
2447                          Register rs,
2448                          const Operand& rt,
2449                          BranchDelaySlot bd) {
2450  Label skip;
2451  if (cond != cc_always) {
2452    Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2453  }
2454  // The first instruction of 'li' may be placed in the delay slot.
2455  // This is not an issue, t9 is expected to be clobbered anyway.
2456  li(t9, Operand(target, rmode));
2457  Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2458  bind(&skip);
2459}
2460
2461
2462void MacroAssembler::Jump(Address target,
2463                          RelocInfo::Mode rmode,
2464                          Condition cond,
2465                          Register rs,
2466                          const Operand& rt,
2467                          BranchDelaySlot bd) {
2468  ASSERT(!RelocInfo::IsCodeTarget(rmode));
2469  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2470}
2471
2472
2473void MacroAssembler::Jump(Handle<Code> code,
2474                          RelocInfo::Mode rmode,
2475                          Condition cond,
2476                          Register rs,
2477                          const Operand& rt,
2478                          BranchDelaySlot bd) {
2479  ASSERT(RelocInfo::IsCodeTarget(rmode));
2480  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2481}
2482
2483
2484int MacroAssembler::CallSize(Register target,
2485                             Condition cond,
2486                             Register rs,
2487                             const Operand& rt,
2488                             BranchDelaySlot bd) {
2489  int size = 0;
2490
2491  if (cond == cc_always) {
2492    size += 1;
2493  } else {
2494    size += 3;
2495  }
2496
2497  if (bd == PROTECT)
2498    size += 1;
2499
2500  return size * kInstrSize;
2501}
2502
2503
2504// Note: To call gcc-compiled C code on mips, you must call thru t9.
2505void MacroAssembler::Call(Register target,
2506                          Condition cond,
2507                          Register rs,
2508                          const Operand& rt,
2509                          BranchDelaySlot bd) {
2510  BlockTrampolinePoolScope block_trampoline_pool(this);
2511  Label start;
2512  bind(&start);
2513  if (cond == cc_always) {
2514    jalr(target);
2515  } else {
2516    BRANCH_ARGS_CHECK(cond, rs, rt);
2517    Branch(2, NegateCondition(cond), rs, rt);
2518    jalr(target);
2519  }
2520  // Emit a nop in the branch delay slot if required.
2521  if (bd == PROTECT)
2522    nop();
2523
2524  ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2525            SizeOfCodeGeneratedSince(&start));
2526}
2527
2528
2529int MacroAssembler::CallSize(Address target,
2530                             RelocInfo::Mode rmode,
2531                             Condition cond,
2532                             Register rs,
2533                             const Operand& rt,
2534                             BranchDelaySlot bd) {
2535  int size = CallSize(t9, cond, rs, rt, bd);
2536  return size + 2 * kInstrSize;
2537}
2538
2539
2540void MacroAssembler::Call(Address target,
2541                          RelocInfo::Mode rmode,
2542                          Condition cond,
2543                          Register rs,
2544                          const Operand& rt,
2545                          BranchDelaySlot bd) {
2546  BlockTrampolinePoolScope block_trampoline_pool(this);
2547  Label start;
2548  bind(&start);
2549  int32_t target_int = reinterpret_cast<int32_t>(target);
2550  // Must record previous source positions before the
2551  // li() generates a new code target.
2552  positions_recorder()->WriteRecordedPositions();
2553  li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
2554  Call(t9, cond, rs, rt, bd);
2555  ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2556            SizeOfCodeGeneratedSince(&start));
2557}
2558
2559
2560int MacroAssembler::CallSize(Handle<Code> code,
2561                             RelocInfo::Mode rmode,
2562                             unsigned ast_id,
2563                             Condition cond,
2564                             Register rs,
2565                             const Operand& rt,
2566                             BranchDelaySlot bd) {
2567  return CallSize(reinterpret_cast<Address>(code.location()),
2568      rmode, cond, rs, rt, bd);
2569}
2570
2571
2572void MacroAssembler::Call(Handle<Code> code,
2573                          RelocInfo::Mode rmode,
2574                          unsigned ast_id,
2575                          Condition cond,
2576                          Register rs,
2577                          const Operand& rt,
2578                          BranchDelaySlot bd) {
2579  BlockTrampolinePoolScope block_trampoline_pool(this);
2580  Label start;
2581  bind(&start);
2582  ASSERT(RelocInfo::IsCodeTarget(rmode));
2583  if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
2584    SetRecordedAstId(ast_id);
2585    rmode = RelocInfo::CODE_TARGET_WITH_ID;
2586  }
2587  Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2588  ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2589            SizeOfCodeGeneratedSince(&start));
2590}
2591
2592
2593void MacroAssembler::Ret(Condition cond,
2594                         Register rs,
2595                         const Operand& rt,
2596                         BranchDelaySlot bd) {
2597  Jump(ra, cond, rs, rt, bd);
2598}
2599
2600
2601void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2602  BlockTrampolinePoolScope block_trampoline_pool(this);
2603
2604  uint32_t imm28;
2605  imm28 = jump_address(L);
2606  imm28 &= kImm28Mask;
2607  { BlockGrowBufferScope block_buf_growth(this);
2608    // Buffer growth (and relocation) must be blocked for internal references
2609    // until associated instructions are emitted and available to be patched.
2610    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2611    j(imm28);
2612  }
2613  // Emit a nop in the branch delay slot if required.
2614  if (bdslot == PROTECT)
2615    nop();
2616}
2617
2618
2619void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2620  BlockTrampolinePoolScope block_trampoline_pool(this);
2621
2622  uint32_t imm32;
2623  imm32 = jump_address(L);
2624  { BlockGrowBufferScope block_buf_growth(this);
2625    // Buffer growth (and relocation) must be blocked for internal references
2626    // until associated instructions are emitted and available to be patched.
2627    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2628    lui(at, (imm32 & kHiMask) >> kLuiShift);
2629    ori(at, at, (imm32 & kImm16Mask));
2630  }
2631  jr(at);
2632
2633  // Emit a nop in the branch delay slot if required.
2634  if (bdslot == PROTECT)
2635    nop();
2636}
2637
2638
2639void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2640  BlockTrampolinePoolScope block_trampoline_pool(this);
2641
2642  uint32_t imm32;
2643  imm32 = jump_address(L);
2644  { BlockGrowBufferScope block_buf_growth(this);
2645    // Buffer growth (and relocation) must be blocked for internal references
2646    // until associated instructions are emitted and available to be patched.
2647    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2648    lui(at, (imm32 & kHiMask) >> kLuiShift);
2649    ori(at, at, (imm32 & kImm16Mask));
2650  }
2651  jalr(at);
2652
2653  // Emit a nop in the branch delay slot if required.
2654  if (bdslot == PROTECT)
2655    nop();
2656}
2657
2658void MacroAssembler::DropAndRet(int drop) {
2659  Ret(USE_DELAY_SLOT);
2660  addiu(sp, sp, drop * kPointerSize);
2661}
2662
2663void MacroAssembler::DropAndRet(int drop,
2664                                Condition cond,
2665                                Register r1,
2666                                const Operand& r2) {
2667  // Both Drop and Ret need to be conditional.
2668  Label skip;
2669  if (cond != cc_always) {
2670    Branch(&skip, NegateCondition(cond), r1, r2);
2671  }
2672
2673  Drop(drop);
2674  Ret();
2675
2676  if (cond != cc_always) {
2677    bind(&skip);
2678  }
2679}
2680
2681
2682void MacroAssembler::Drop(int count,
2683                          Condition cond,
2684                          Register reg,
2685                          const Operand& op) {
2686  if (count <= 0) {
2687    return;
2688  }
2689
2690  Label skip;
2691
2692  if (cond != al) {
2693     Branch(&skip, NegateCondition(cond), reg, op);
2694  }
2695
2696  addiu(sp, sp, count * kPointerSize);
2697
2698  if (cond != al) {
2699    bind(&skip);
2700  }
2701}
2702
2703
2704
2705void MacroAssembler::Swap(Register reg1,
2706                          Register reg2,
2707                          Register scratch) {
2708  if (scratch.is(no_reg)) {
2709    Xor(reg1, reg1, Operand(reg2));
2710    Xor(reg2, reg2, Operand(reg1));
2711    Xor(reg1, reg1, Operand(reg2));
2712  } else {
2713    mov(scratch, reg1);
2714    mov(reg1, reg2);
2715    mov(reg2, scratch);
2716  }
2717}
2718
2719
2720void MacroAssembler::Call(Label* target) {
2721  BranchAndLink(target);
2722}
2723
2724
2725void MacroAssembler::Push(Handle<Object> handle) {
2726  li(at, Operand(handle));
2727  push(at);
2728}
2729
2730
2731#ifdef ENABLE_DEBUGGER_SUPPORT
2732
2733void MacroAssembler::DebugBreak() {
2734  PrepareCEntryArgs(0);
2735  PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
2736  CEntryStub ces(1);
2737  ASSERT(AllowThisStubCall(&ces));
2738  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2739}
2740
2741#endif  // ENABLE_DEBUGGER_SUPPORT
2742
2743
2744// ---------------------------------------------------------------------------
2745// Exception handling.
2746
2747void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2748                                    int handler_index) {
2749  // Adjust this code if not the case.
2750  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2751  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2752  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2753  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2754  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2755  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2756
2757  // For the JSEntry handler, we must preserve a0-a3 and s0.
2758  // t1-t3 are available. We will build up the handler from the bottom by
2759  // pushing on the stack.
2760  // Set up the code object (t1) and the state (t2) for pushing.
2761  unsigned state =
2762      StackHandler::IndexField::encode(handler_index) |
2763      StackHandler::KindField::encode(kind);
2764  li(t1, Operand(CodeObject()), CONSTANT_SIZE);
2765  li(t2, Operand(state));
2766
2767  // Push the frame pointer, context, state, and code object.
2768  if (kind == StackHandler::JS_ENTRY) {
2769    ASSERT_EQ(Smi::FromInt(0), 0);
2770    // The second zero_reg indicates no context.
2771    // The first zero_reg is the NULL frame pointer.
2772    // The operands are reversed to match the order of MultiPush/Pop.
2773    Push(zero_reg, zero_reg, t2, t1);
2774  } else {
2775    MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
2776  }
2777
2778  // Link the current handler as the next handler.
2779  li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2780  lw(t1, MemOperand(t2));
2781  push(t1);
2782  // Set this new handler as the current one.
2783  sw(sp, MemOperand(t2));
2784}
2785
2786
2787void MacroAssembler::PopTryHandler() {
2788  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2789  pop(a1);
2790  Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2791  li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2792  sw(a1, MemOperand(at));
2793}
2794
2795
2796void MacroAssembler::JumpToHandlerEntry() {
2797  // Compute the handler entry address and jump to it.  The handler table is
2798  // a fixed array of (smi-tagged) code offsets.
2799  // v0 = exception, a1 = code object, a2 = state.
2800  lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));  // Handler table.
2801  Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2802  srl(a2, a2, StackHandler::kKindWidth);  // Handler index.
2803  sll(a2, a2, kPointerSizeLog2);
2804  Addu(a2, a3, a2);
2805  lw(a2, MemOperand(a2));  // Smi-tagged offset.
2806  Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
2807  sra(t9, a2, kSmiTagSize);
2808  Addu(t9, t9, a1);
2809  Jump(t9);  // Jump.
2810}
2811
2812
2813void MacroAssembler::Throw(Register value) {
2814  // Adjust this code if not the case.
2815  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2816  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2817  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2818  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2819  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2820  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2821
2822  // The exception is expected in v0.
2823  Move(v0, value);
2824
2825  // Drop the stack pointer to the top of the top handler.
2826  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
2827                                   isolate())));
2828  lw(sp, MemOperand(a3));
2829
2830  // Restore the next handler.
2831  pop(a2);
2832  sw(a2, MemOperand(a3));
2833
2834  // Get the code object (a1) and state (a2).  Restore the context and frame
2835  // pointer.
2836  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2837
2838  // If the handler is a JS frame, restore the context to the frame.
2839  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
2840  // or cp.
2841  Label done;
2842  Branch(&done, eq, cp, Operand(zero_reg));
2843  sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2844  bind(&done);
2845
2846  JumpToHandlerEntry();
2847}
2848
2849
2850void MacroAssembler::ThrowUncatchable(Register value) {
2851  // Adjust this code if not the case.
2852  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2853  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2854  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2855  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2856  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2857  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2858
2859  // The exception is expected in v0.
2860  if (!value.is(v0)) {
2861    mov(v0, value);
2862  }
2863  // Drop the stack pointer to the top of the top stack handler.
2864  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2865  lw(sp, MemOperand(a3));
2866
2867  // Unwind the handlers until the ENTRY handler is found.
2868  Label fetch_next, check_kind;
2869  jmp(&check_kind);
2870  bind(&fetch_next);
2871  lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
2872
2873  bind(&check_kind);
2874  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2875  lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
2876  And(a2, a2, Operand(StackHandler::KindField::kMask));
2877  Branch(&fetch_next, ne, a2, Operand(zero_reg));
2878
2879  // Set the top handler address to next handler past the top ENTRY handler.
2880  pop(a2);
2881  sw(a2, MemOperand(a3));
2882
2883  // Get the code object (a1) and state (a2).  Clear the context and frame
2884  // pointer (0 was saved in the handler).
2885  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2886
2887  JumpToHandlerEntry();
2888}
2889
2890
2891void MacroAssembler::AllocateInNewSpace(int object_size,
2892                                        Register result,
2893                                        Register scratch1,
2894                                        Register scratch2,
2895                                        Label* gc_required,
2896                                        AllocationFlags flags) {
2897  if (!FLAG_inline_new) {
2898    if (emit_debug_code()) {
2899      // Trash the registers to simulate an allocation failure.
2900      li(result, 0x7091);
2901      li(scratch1, 0x7191);
2902      li(scratch2, 0x7291);
2903    }
2904    jmp(gc_required);
2905    return;
2906  }
2907
2908  ASSERT(!result.is(scratch1));
2909  ASSERT(!result.is(scratch2));
2910  ASSERT(!scratch1.is(scratch2));
2911  ASSERT(!scratch1.is(t9));
2912  ASSERT(!scratch2.is(t9));
2913  ASSERT(!result.is(t9));
2914
2915  // Make object size into bytes.
2916  if ((flags & SIZE_IN_WORDS) != 0) {
2917    object_size *= kPointerSize;
2918  }
2919  ASSERT_EQ(0, object_size & kObjectAlignmentMask);
2920
2921  // Check relative positions of allocation top and limit addresses.
2922  // ARM adds additional checks to make sure the ldm instruction can be
2923  // used. On MIPS we don't have ldm so we don't need additional checks either.
2924  ExternalReference new_space_allocation_top =
2925      ExternalReference::new_space_allocation_top_address(isolate());
2926  ExternalReference new_space_allocation_limit =
2927      ExternalReference::new_space_allocation_limit_address(isolate());
2928  intptr_t top   =
2929      reinterpret_cast<intptr_t>(new_space_allocation_top.address());
2930  intptr_t limit =
2931      reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
2932  ASSERT((limit - top) == kPointerSize);
2933
2934  // Set up allocation top address and object size registers.
2935  Register topaddr = scratch1;
2936  Register obj_size_reg = scratch2;
2937  li(topaddr, Operand(new_space_allocation_top));
2938  li(obj_size_reg, Operand(object_size));
2939
2940  // This code stores a temporary value in t9.
2941  if ((flags & RESULT_CONTAINS_TOP) == 0) {
2942    // Load allocation top into result and allocation limit into t9.
2943    lw(result, MemOperand(topaddr));
2944    lw(t9, MemOperand(topaddr, kPointerSize));
2945  } else {
2946    if (emit_debug_code()) {
2947      // Assert that result actually contains top on entry. t9 is used
2948      // immediately below so this use of t9 does not cause difference with
2949      // respect to register content between debug and release mode.
2950      lw(t9, MemOperand(topaddr));
2951      Check(eq, "Unexpected allocation top", result, Operand(t9));
2952    }
2953    // Load allocation limit into t9. Result already contains allocation top.
2954    lw(t9, MemOperand(topaddr, limit - top));
2955  }
2956
2957  // Calculate new top and bail out if new space is exhausted. Use result
2958  // to calculate the new top.
2959  Addu(scratch2, result, Operand(obj_size_reg));
2960  Branch(gc_required, Ugreater, scratch2, Operand(t9));
2961  sw(scratch2, MemOperand(topaddr));
2962
2963  // Tag object if requested.
2964  if ((flags & TAG_OBJECT) != 0) {
2965    Addu(result, result, Operand(kHeapObjectTag));
2966  }
2967}
2968
2969
2970void MacroAssembler::AllocateInNewSpace(Register object_size,
2971                                        Register result,
2972                                        Register scratch1,
2973                                        Register scratch2,
2974                                        Label* gc_required,
2975                                        AllocationFlags flags) {
2976  if (!FLAG_inline_new) {
2977    if (emit_debug_code()) {
2978      // Trash the registers to simulate an allocation failure.
2979      li(result, 0x7091);
2980      li(scratch1, 0x7191);
2981      li(scratch2, 0x7291);
2982    }
2983    jmp(gc_required);
2984    return;
2985  }
2986
2987  ASSERT(!result.is(scratch1));
2988  ASSERT(!result.is(scratch2));
2989  ASSERT(!scratch1.is(scratch2));
2990  ASSERT(!object_size.is(t9));
2991  ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2992
2993  // Check relative positions of allocation top and limit addresses.
2994  // ARM adds additional checks to make sure the ldm instruction can be
2995  // used. On MIPS we don't have ldm so we don't need additional checks either.
2996  ExternalReference new_space_allocation_top =
2997      ExternalReference::new_space_allocation_top_address(isolate());
2998  ExternalReference new_space_allocation_limit =
2999      ExternalReference::new_space_allocation_limit_address(isolate());
3000  intptr_t top   =
3001      reinterpret_cast<intptr_t>(new_space_allocation_top.address());
3002  intptr_t limit =
3003      reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
3004  ASSERT((limit - top) == kPointerSize);
3005
3006  // Set up allocation top address and object size registers.
3007  Register topaddr = scratch1;
3008  li(topaddr, Operand(new_space_allocation_top));
3009
3010  // This code stores a temporary value in t9.
3011  if ((flags & RESULT_CONTAINS_TOP) == 0) {
3012    // Load allocation top into result and allocation limit into t9.
3013    lw(result, MemOperand(topaddr));
3014    lw(t9, MemOperand(topaddr, kPointerSize));
3015  } else {
3016    if (emit_debug_code()) {
3017      // Assert that result actually contains top on entry. t9 is used
3018      // immediately below so this use of t9 does not cause difference with
3019      // respect to register content between debug and release mode.
3020      lw(t9, MemOperand(topaddr));
3021      Check(eq, "Unexpected allocation top", result, Operand(t9));
3022    }
3023    // Load allocation limit into t9. Result already contains allocation top.
3024    lw(t9, MemOperand(topaddr, limit - top));
3025  }
3026
3027  // Calculate new top and bail out if new space is exhausted. Use result
3028  // to calculate the new top. Object size may be in words so a shift is
3029  // required to get the number of bytes.
3030  if ((flags & SIZE_IN_WORDS) != 0) {
3031    sll(scratch2, object_size, kPointerSizeLog2);
3032    Addu(scratch2, result, scratch2);
3033  } else {
3034    Addu(scratch2, result, Operand(object_size));
3035  }
3036  Branch(gc_required, Ugreater, scratch2, Operand(t9));
3037
3038  // Update allocation top. result temporarily holds the new top.
3039  if (emit_debug_code()) {
3040    And(t9, scratch2, Operand(kObjectAlignmentMask));
3041    Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
3042  }
3043  sw(scratch2, MemOperand(topaddr));
3044
3045  // Tag object if requested.
3046  if ((flags & TAG_OBJECT) != 0) {
3047    Addu(result, result, Operand(kHeapObjectTag));
3048  }
3049}
3050
3051
3052void MacroAssembler::UndoAllocationInNewSpace(Register object,
3053                                              Register scratch) {
3054  ExternalReference new_space_allocation_top =
3055      ExternalReference::new_space_allocation_top_address(isolate());
3056
3057  // Make sure the object has no tag before resetting top.
3058  And(object, object, Operand(~kHeapObjectTagMask));
3059#ifdef DEBUG
3060  // Check that the object un-allocated is below the current top.
3061  li(scratch, Operand(new_space_allocation_top));
3062  lw(scratch, MemOperand(scratch));
3063  Check(less, "Undo allocation of non allocated memory",
3064      object, Operand(scratch));
3065#endif
3066  // Write the address of the object to un-allocate as the current top.
3067  li(scratch, Operand(new_space_allocation_top));
3068  sw(object, MemOperand(scratch));
3069}
3070
3071
3072void MacroAssembler::AllocateTwoByteString(Register result,
3073                                           Register length,
3074                                           Register scratch1,
3075                                           Register scratch2,
3076                                           Register scratch3,
3077                                           Label* gc_required) {
3078  // Calculate the number of bytes needed for the characters in the string while
3079  // observing object alignment.
3080  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3081  sll(scratch1, length, 1);  // Length in bytes, not chars.
3082  addiu(scratch1, scratch1,
3083       kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3084  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3085
3086  // Allocate two-byte string in new space.
3087  AllocateInNewSpace(scratch1,
3088                     result,
3089                     scratch2,
3090                     scratch3,
3091                     gc_required,
3092                     TAG_OBJECT);
3093
3094  // Set the map, length and hash field.
3095  InitializeNewString(result,
3096                      length,
3097                      Heap::kStringMapRootIndex,
3098                      scratch1,
3099                      scratch2);
3100}
3101
3102
3103void MacroAssembler::AllocateAsciiString(Register result,
3104                                         Register length,
3105                                         Register scratch1,
3106                                         Register scratch2,
3107                                         Register scratch3,
3108                                         Label* gc_required) {
3109  // Calculate the number of bytes needed for the characters in the string
3110  // while observing object alignment.
3111  ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
3112  ASSERT(kCharSize == 1);
3113  addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
3114  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3115
3116  // Allocate ASCII string in new space.
3117  AllocateInNewSpace(scratch1,
3118                     result,
3119                     scratch2,
3120                     scratch3,
3121                     gc_required,
3122                     TAG_OBJECT);
3123
3124  // Set the map, length and hash field.
3125  InitializeNewString(result,
3126                      length,
3127                      Heap::kAsciiStringMapRootIndex,
3128                      scratch1,
3129                      scratch2);
3130}
3131
3132
3133void MacroAssembler::AllocateTwoByteConsString(Register result,
3134                                               Register length,
3135                                               Register scratch1,
3136                                               Register scratch2,
3137                                               Label* gc_required) {
3138  AllocateInNewSpace(ConsString::kSize,
3139                     result,
3140                     scratch1,
3141                     scratch2,
3142                     gc_required,
3143                     TAG_OBJECT);
3144  InitializeNewString(result,
3145                      length,
3146                      Heap::kConsStringMapRootIndex,
3147                      scratch1,
3148                      scratch2);
3149}
3150
3151
3152void MacroAssembler::AllocateAsciiConsString(Register result,
3153                                             Register length,
3154                                             Register scratch1,
3155                                             Register scratch2,
3156                                             Label* gc_required) {
3157  AllocateInNewSpace(ConsString::kSize,
3158                     result,
3159                     scratch1,
3160                     scratch2,
3161                     gc_required,
3162                     TAG_OBJECT);
3163  InitializeNewString(result,
3164                      length,
3165                      Heap::kConsAsciiStringMapRootIndex,
3166                      scratch1,
3167                      scratch2);
3168}
3169
3170
3171void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3172                                                 Register length,
3173                                                 Register scratch1,
3174                                                 Register scratch2,
3175                                                 Label* gc_required) {
3176  AllocateInNewSpace(SlicedString::kSize,
3177                     result,
3178                     scratch1,
3179                     scratch2,
3180                     gc_required,
3181                     TAG_OBJECT);
3182
3183  InitializeNewString(result,
3184                      length,
3185                      Heap::kSlicedStringMapRootIndex,
3186                      scratch1,
3187                      scratch2);
3188}
3189
3190
3191void MacroAssembler::AllocateAsciiSlicedString(Register result,
3192                                               Register length,
3193                                               Register scratch1,
3194                                               Register scratch2,
3195                                               Label* gc_required) {
3196  AllocateInNewSpace(SlicedString::kSize,
3197                     result,
3198                     scratch1,
3199                     scratch2,
3200                     gc_required,
3201                     TAG_OBJECT);
3202
3203  InitializeNewString(result,
3204                      length,
3205                      Heap::kSlicedAsciiStringMapRootIndex,
3206                      scratch1,
3207                      scratch2);
3208}
3209
3210
3211// Allocates a heap number or jumps to the label if the young space is full and
3212// a scavenge is needed.
3213void MacroAssembler::AllocateHeapNumber(Register result,
3214                                        Register scratch1,
3215                                        Register scratch2,
3216                                        Register heap_number_map,
3217                                        Label* need_gc) {
3218  // Allocate an object in the heap for the heap number and tag it as a heap
3219  // object.
3220  AllocateInNewSpace(HeapNumber::kSize,
3221                     result,
3222                     scratch1,
3223                     scratch2,
3224                     need_gc,
3225                     TAG_OBJECT);
3226
3227  // Store heap number map in the allocated object.
3228  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3229  sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3230}
3231
3232
3233void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3234                                                 FPURegister value,
3235                                                 Register scratch1,
3236                                                 Register scratch2,
3237                                                 Label* gc_required) {
3238  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3239  AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3240  sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3241}
3242
3243
3244// Copies a fixed number of fields of heap objects from src to dst.
3245void MacroAssembler::CopyFields(Register dst,
3246                                Register src,
3247                                RegList temps,
3248                                int field_count) {
3249  ASSERT((temps & dst.bit()) == 0);
3250  ASSERT((temps & src.bit()) == 0);
3251  // Primitive implementation using only one temporary register.
3252
3253  Register tmp = no_reg;
3254  // Find a temp register in temps list.
3255  for (int i = 0; i < kNumRegisters; i++) {
3256    if ((temps & (1 << i)) != 0) {
3257      tmp.code_ = i;
3258      break;
3259    }
3260  }
3261  ASSERT(!tmp.is(no_reg));
3262
3263  for (int i = 0; i < field_count; i++) {
3264    lw(tmp, FieldMemOperand(src, i * kPointerSize));
3265    sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3266  }
3267}
3268
3269
3270void MacroAssembler::CopyBytes(Register src,
3271                               Register dst,
3272                               Register length,
3273                               Register scratch) {
3274  Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3275
3276  // Align src before copying in word size chunks.
3277  bind(&align_loop);
3278  Branch(&done, eq, length, Operand(zero_reg));
3279  bind(&align_loop_1);
3280  And(scratch, src, kPointerSize - 1);
3281  Branch(&word_loop, eq, scratch, Operand(zero_reg));
3282  lbu(scratch, MemOperand(src));
3283  Addu(src, src, 1);
3284  sb(scratch, MemOperand(dst));
3285  Addu(dst, dst, 1);
3286  Subu(length, length, Operand(1));
3287  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3288
3289  // Copy bytes in word size chunks.
3290  bind(&word_loop);
3291  if (emit_debug_code()) {
3292    And(scratch, src, kPointerSize - 1);
3293    Assert(eq, "Expecting alignment for CopyBytes",
3294        scratch, Operand(zero_reg));
3295  }
3296  Branch(&byte_loop, lt, length, Operand(kPointerSize));
3297  lw(scratch, MemOperand(src));
3298  Addu(src, src, kPointerSize);
3299
3300  // TODO(kalmard) check if this can be optimized to use sw in most cases.
3301  // Can't use unaligned access - copy byte by byte.
3302  sb(scratch, MemOperand(dst, 0));
3303  srl(scratch, scratch, 8);
3304  sb(scratch, MemOperand(dst, 1));
3305  srl(scratch, scratch, 8);
3306  sb(scratch, MemOperand(dst, 2));
3307  srl(scratch, scratch, 8);
3308  sb(scratch, MemOperand(dst, 3));
3309  Addu(dst, dst, 4);
3310
3311  Subu(length, length, Operand(kPointerSize));
3312  Branch(&word_loop);
3313
3314  // Copy the last bytes if any left.
3315  bind(&byte_loop);
3316  Branch(&done, eq, length, Operand(zero_reg));
3317  bind(&byte_loop_1);
3318  lbu(scratch, MemOperand(src));
3319  Addu(src, src, 1);
3320  sb(scratch, MemOperand(dst));
3321  Addu(dst, dst, 1);
3322  Subu(length, length, Operand(1));
3323  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3324  bind(&done);
3325}
3326
3327
3328void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3329                                                Register end_offset,
3330                                                Register filler) {
3331  Label loop, entry;
3332  Branch(&entry);
3333  bind(&loop);
3334  sw(filler, MemOperand(start_offset));
3335  Addu(start_offset, start_offset, kPointerSize);
3336  bind(&entry);
3337  Branch(&loop, lt, start_offset, Operand(end_offset));
3338}
3339
3340
3341void MacroAssembler::CheckFastElements(Register map,
3342                                       Register scratch,
3343                                       Label* fail) {
3344  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
3345  STATIC_ASSERT(FAST_ELEMENTS == 1);
3346  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3347  Branch(fail, hi, scratch, Operand(Map::kMaximumBitField2FastElementValue));
3348}
3349
3350
3351void MacroAssembler::CheckFastObjectElements(Register map,
3352                                             Register scratch,
3353                                             Label* fail) {
3354  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
3355  STATIC_ASSERT(FAST_ELEMENTS == 1);
3356  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3357  Branch(fail, ls, scratch,
3358         Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
3359  Branch(fail, hi, scratch,
3360         Operand(Map::kMaximumBitField2FastElementValue));
3361}
3362
3363
3364void MacroAssembler::CheckFastSmiOnlyElements(Register map,
3365                                              Register scratch,
3366                                              Label* fail) {
3367  STATIC_ASSERT(FAST_SMI_ONLY_ELEMENTS == 0);
3368  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3369  Branch(fail, hi, scratch,
3370         Operand(Map::kMaximumBitField2FastSmiOnlyElementValue));
3371}
3372
3373
3374void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3375                                                 Register key_reg,
3376                                                 Register receiver_reg,
3377                                                 Register elements_reg,
3378                                                 Register scratch1,
3379                                                 Register scratch2,
3380                                                 Register scratch3,
3381                                                 Register scratch4,
3382                                                 Label* fail) {
3383  Label smi_value, maybe_nan, have_double_value, is_nan, done;
3384  Register mantissa_reg = scratch2;
3385  Register exponent_reg = scratch3;
3386
3387  // Handle smi values specially.
3388  JumpIfSmi(value_reg, &smi_value);
3389
3390  // Ensure that the object is a heap number
3391  CheckMap(value_reg,
3392           scratch1,
3393           Heap::kHeapNumberMapRootIndex,
3394           fail,
3395           DONT_DO_SMI_CHECK);
3396
3397  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3398  // in the exponent.
3399  li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3400  lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3401  Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3402
3403  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3404
3405  bind(&have_double_value);
3406  sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3407  Addu(scratch1, scratch1, elements_reg);
3408  sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
3409  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
3410  sw(exponent_reg, FieldMemOperand(scratch1, offset));
3411  jmp(&done);
3412
3413  bind(&maybe_nan);
3414  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3415  // it's an Infinity, and the non-NaN code path applies.
3416  Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3417  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3418  Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3419  bind(&is_nan);
3420  // Load canonical NaN for storing into the double array.
3421  uint64_t nan_int64 = BitCast<uint64_t>(
3422      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3423  li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
3424  li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
3425  jmp(&have_double_value);
3426
3427  bind(&smi_value);
3428  Addu(scratch1, elements_reg,
3429      Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3430  sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3431  Addu(scratch1, scratch1, scratch2);
3432  // scratch1 is now effective address of the double element
3433
3434  FloatingPointHelper::Destination destination;
3435  if (CpuFeatures::IsSupported(FPU)) {
3436    destination = FloatingPointHelper::kFPURegisters;
3437  } else {
3438    destination = FloatingPointHelper::kCoreRegisters;
3439  }
3440
3441  Register untagged_value = receiver_reg;
3442  SmiUntag(untagged_value, value_reg);
3443  FloatingPointHelper::ConvertIntToDouble(this,
3444                                          untagged_value,
3445                                          destination,
3446                                          f0,
3447                                          mantissa_reg,
3448                                          exponent_reg,
3449                                          scratch4,
3450                                          f2);
3451  if (destination == FloatingPointHelper::kFPURegisters) {
3452    CpuFeatures::Scope scope(FPU);
3453    sdc1(f0, MemOperand(scratch1, 0));
3454  } else {
3455    sw(mantissa_reg, MemOperand(scratch1, 0));
3456    sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
3457  }
3458  bind(&done);
3459}
3460
3461
3462void MacroAssembler::CompareMapAndBranch(Register obj,
3463                                         Register scratch,
3464                                         Handle<Map> map,
3465                                         Label* early_success,
3466                                         Condition cond,
3467                                         Label* branch_to,
3468                                         CompareMapMode mode) {
3469  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3470  Operand right = Operand(map);
3471  if (mode == ALLOW_ELEMENT_TRANSITION_MAPS) {
3472    Map* transitioned_fast_element_map(
3473        map->LookupElementsTransitionMap(FAST_ELEMENTS, NULL));
3474    ASSERT(transitioned_fast_element_map == NULL ||
3475           map->elements_kind() != FAST_ELEMENTS);
3476    if (transitioned_fast_element_map != NULL) {
3477      Branch(early_success, eq, scratch, right);
3478      right = Operand(Handle<Map>(transitioned_fast_element_map));
3479    }
3480
3481    Map* transitioned_double_map(
3482        map->LookupElementsTransitionMap(FAST_DOUBLE_ELEMENTS, NULL));
3483    ASSERT(transitioned_double_map == NULL ||
3484           map->elements_kind() == FAST_SMI_ONLY_ELEMENTS);
3485    if (transitioned_double_map != NULL) {
3486      Branch(early_success, eq, scratch, right);
3487      right = Operand(Handle<Map>(transitioned_double_map));
3488    }
3489  }
3490
3491  Branch(branch_to, cond, scratch, right);
3492}
3493
3494
3495void MacroAssembler::CheckMap(Register obj,
3496                              Register scratch,
3497                              Handle<Map> map,
3498                              Label* fail,
3499                              SmiCheckType smi_check_type,
3500                              CompareMapMode mode) {
3501  if (smi_check_type == DO_SMI_CHECK) {
3502    JumpIfSmi(obj, fail);
3503  }
3504  Label success;
3505  CompareMapAndBranch(obj, scratch, map, &success, ne, fail, mode);
3506  bind(&success);
3507}
3508
3509
3510void MacroAssembler::DispatchMap(Register obj,
3511                                 Register scratch,
3512                                 Handle<Map> map,
3513                                 Handle<Code> success,
3514                                 SmiCheckType smi_check_type) {
3515  Label fail;
3516  if (smi_check_type == DO_SMI_CHECK) {
3517    JumpIfSmi(obj, &fail);
3518  }
3519  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3520  Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3521  bind(&fail);
3522}
3523
3524
3525void MacroAssembler::CheckMap(Register obj,
3526                              Register scratch,
3527                              Heap::RootListIndex index,
3528                              Label* fail,
3529                              SmiCheckType smi_check_type) {
3530  if (smi_check_type == DO_SMI_CHECK) {
3531    JumpIfSmi(obj, fail);
3532  }
3533  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3534  LoadRoot(at, index);
3535  Branch(fail, ne, scratch, Operand(at));
3536}
3537
3538
3539void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
3540  CpuFeatures::Scope scope(FPU);
3541  if (IsMipsSoftFloatABI) {
3542    Move(dst, v0, v1);
3543  } else {
3544    Move(dst, f0);  // Reg f0 is o32 ABI FP return value.
3545  }
3546}
3547
3548
3549void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
3550  CpuFeatures::Scope scope(FPU);
3551  if (!IsMipsSoftFloatABI) {
3552    Move(f12, dreg);
3553  } else {
3554    Move(a0, a1, dreg);
3555  }
3556}
3557
3558
3559void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
3560                                             DoubleRegister dreg2) {
3561  CpuFeatures::Scope scope(FPU);
3562  if (!IsMipsSoftFloatABI) {
3563    if (dreg2.is(f12)) {
3564      ASSERT(!dreg1.is(f14));
3565      Move(f14, dreg2);
3566      Move(f12, dreg1);
3567    } else {
3568      Move(f12, dreg1);
3569      Move(f14, dreg2);
3570    }
3571  } else {
3572    Move(a0, a1, dreg1);
3573    Move(a2, a3, dreg2);
3574  }
3575}
3576
3577
3578void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
3579                                             Register reg) {
3580  CpuFeatures::Scope scope(FPU);
3581  if (!IsMipsSoftFloatABI) {
3582    Move(f12, dreg);
3583    Move(a2, reg);
3584  } else {
3585    Move(a2, reg);
3586    Move(a0, a1, dreg);
3587  }
3588}
3589
3590
3591void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
3592  // This macro takes the dst register to make the code more readable
3593  // at the call sites. However, the dst register has to be t1 to
3594  // follow the calling convention which requires the call type to be
3595  // in t1.
3596  ASSERT(dst.is(t1));
3597  if (call_kind == CALL_AS_FUNCTION) {
3598    li(dst, Operand(Smi::FromInt(1)));
3599  } else {
3600    li(dst, Operand(Smi::FromInt(0)));
3601  }
3602}
3603
3604
3605// -----------------------------------------------------------------------------
3606// JavaScript invokes.
3607
3608void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3609                                    const ParameterCount& actual,
3610                                    Handle<Code> code_constant,
3611                                    Register code_reg,
3612                                    Label* done,
3613                                    bool* definitely_mismatches,
3614                                    InvokeFlag flag,
3615                                    const CallWrapper& call_wrapper,
3616                                    CallKind call_kind) {
3617  bool definitely_matches = false;
3618  *definitely_mismatches = false;
3619  Label regular_invoke;
3620
3621  // Check whether the expected and actual arguments count match. If not,
3622  // setup registers according to contract with ArgumentsAdaptorTrampoline:
3623  //  a0: actual arguments count
3624  //  a1: function (passed through to callee)
3625  //  a2: expected arguments count
3626  //  a3: callee code entry
3627
3628  // The code below is made a lot easier because the calling code already sets
3629  // up actual and expected registers according to the contract if values are
3630  // passed in registers.
3631  ASSERT(actual.is_immediate() || actual.reg().is(a0));
3632  ASSERT(expected.is_immediate() || expected.reg().is(a2));
3633  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3634
3635  if (expected.is_immediate()) {
3636    ASSERT(actual.is_immediate());
3637    if (expected.immediate() == actual.immediate()) {
3638      definitely_matches = true;
3639    } else {
3640      li(a0, Operand(actual.immediate()));
3641      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3642      if (expected.immediate() == sentinel) {
3643        // Don't worry about adapting arguments for builtins that
3644        // don't want that done. Skip adaption code by making it look
3645        // like we have a match between expected and actual number of
3646        // arguments.
3647        definitely_matches = true;
3648      } else {
3649        *definitely_mismatches = true;
3650        li(a2, Operand(expected.immediate()));
3651      }
3652    }
3653  } else if (actual.is_immediate()) {
3654    Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3655    li(a0, Operand(actual.immediate()));
3656  } else {
3657    Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
3658  }
3659
3660  if (!definitely_matches) {
3661    if (!code_constant.is_null()) {
3662      li(a3, Operand(code_constant));
3663      addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3664    }
3665
3666    Handle<Code> adaptor =
3667        isolate()->builtins()->ArgumentsAdaptorTrampoline();
3668    if (flag == CALL_FUNCTION) {
3669      call_wrapper.BeforeCall(CallSize(adaptor));
3670      SetCallKind(t1, call_kind);
3671      Call(adaptor);
3672      call_wrapper.AfterCall();
3673      if (!*definitely_mismatches) {
3674        Branch(done);
3675      }
3676    } else {
3677      SetCallKind(t1, call_kind);
3678      Jump(adaptor, RelocInfo::CODE_TARGET);
3679    }
3680    bind(&regular_invoke);
3681  }
3682}
3683
3684
3685void MacroAssembler::InvokeCode(Register code,
3686                                const ParameterCount& expected,
3687                                const ParameterCount& actual,
3688                                InvokeFlag flag,
3689                                const CallWrapper& call_wrapper,
3690                                CallKind call_kind) {
3691  // You can't call a function without a valid frame.
3692  ASSERT(flag == JUMP_FUNCTION || has_frame());
3693
3694  Label done;
3695
3696  bool definitely_mismatches = false;
3697  InvokePrologue(expected, actual, Handle<Code>::null(), code,
3698                 &done, &definitely_mismatches, flag,
3699                 call_wrapper, call_kind);
3700  if (!definitely_mismatches) {
3701    if (flag == CALL_FUNCTION) {
3702      call_wrapper.BeforeCall(CallSize(code));
3703      SetCallKind(t1, call_kind);
3704      Call(code);
3705      call_wrapper.AfterCall();
3706    } else {
3707      ASSERT(flag == JUMP_FUNCTION);
3708      SetCallKind(t1, call_kind);
3709      Jump(code);
3710    }
3711    // Continue here if InvokePrologue does handle the invocation due to
3712    // mismatched parameter counts.
3713    bind(&done);
3714  }
3715}
3716
3717
3718void MacroAssembler::InvokeCode(Handle<Code> code,
3719                                const ParameterCount& expected,
3720                                const ParameterCount& actual,
3721                                RelocInfo::Mode rmode,
3722                                InvokeFlag flag,
3723                                CallKind call_kind) {
3724  // You can't call a function without a valid frame.
3725  ASSERT(flag == JUMP_FUNCTION || has_frame());
3726
3727  Label done;
3728
3729  bool definitely_mismatches = false;
3730  InvokePrologue(expected, actual, code, no_reg,
3731                 &done, &definitely_mismatches, flag,
3732                 NullCallWrapper(), call_kind);
3733  if (!definitely_mismatches) {
3734    if (flag == CALL_FUNCTION) {
3735      SetCallKind(t1, call_kind);
3736      Call(code, rmode);
3737    } else {
3738      SetCallKind(t1, call_kind);
3739      Jump(code, rmode);
3740    }
3741    // Continue here if InvokePrologue does handle the invocation due to
3742    // mismatched parameter counts.
3743    bind(&done);
3744  }
3745}
3746
3747
3748void MacroAssembler::InvokeFunction(Register function,
3749                                    const ParameterCount& actual,
3750                                    InvokeFlag flag,
3751                                    const CallWrapper& call_wrapper,
3752                                    CallKind call_kind) {
3753  // You can't call a function without a valid frame.
3754  ASSERT(flag == JUMP_FUNCTION || has_frame());
3755
3756  // Contract with called JS functions requires that function is passed in a1.
3757  ASSERT(function.is(a1));
3758  Register expected_reg = a2;
3759  Register code_reg = a3;
3760
3761  lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3762  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3763  lw(expected_reg,
3764      FieldMemOperand(code_reg,
3765                      SharedFunctionInfo::kFormalParameterCountOffset));
3766  sra(expected_reg, expected_reg, kSmiTagSize);
3767  lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3768
3769  ParameterCount expected(expected_reg);
3770  InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
3771}
3772
3773
3774void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3775                                    const ParameterCount& actual,
3776                                    InvokeFlag flag,
3777                                    const CallWrapper& call_wrapper,
3778                                    CallKind call_kind) {
3779  // You can't call a function without a valid frame.
3780  ASSERT(flag == JUMP_FUNCTION || has_frame());
3781
3782  // Get the function and setup the context.
3783  LoadHeapObject(a1, function);
3784  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3785
3786  ParameterCount expected(function->shared()->formal_parameter_count());
3787  // We call indirectly through the code field in the function to
3788  // allow recompilation to take effect without changing any of the
3789  // call sites.
3790  lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3791  InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
3792}
3793
3794
3795void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3796                                          Register map,
3797                                          Register scratch,
3798                                          Label* fail) {
3799  lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3800  IsInstanceJSObjectType(map, scratch, fail);
3801}
3802
3803
3804void MacroAssembler::IsInstanceJSObjectType(Register map,
3805                                            Register scratch,
3806                                            Label* fail) {
3807  lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
3808  Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3809  Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
3810}
3811
3812
3813void MacroAssembler::IsObjectJSStringType(Register object,
3814                                          Register scratch,
3815                                          Label* fail) {
3816  ASSERT(kNotStringTag != 0);
3817
3818  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3819  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3820  And(scratch, scratch, Operand(kIsNotStringMask));
3821  Branch(fail, ne, scratch, Operand(zero_reg));
3822}
3823
3824
3825// ---------------------------------------------------------------------------
3826// Support functions.
3827
3828
3829void MacroAssembler::TryGetFunctionPrototype(Register function,
3830                                             Register result,
3831                                             Register scratch,
3832                                             Label* miss,
3833                                             bool miss_on_bound_function) {
3834  // Check that the receiver isn't a smi.
3835  JumpIfSmi(function, miss);
3836
3837  // Check that the function really is a function.  Load map into result reg.
3838  GetObjectType(function, result, scratch);
3839  Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3840
3841  if (miss_on_bound_function) {
3842    lw(scratch,
3843       FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3844    lw(scratch,
3845       FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3846    And(scratch, scratch,
3847        Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3848    Branch(miss, ne, scratch, Operand(zero_reg));
3849  }
3850
3851  // Make sure that the function has an instance prototype.
3852  Label non_instance;
3853  lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3854  And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3855  Branch(&non_instance, ne, scratch, Operand(zero_reg));
3856
3857  // Get the prototype or initial map from the function.
3858  lw(result,
3859     FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3860
3861  // If the prototype or initial map is the hole, don't return it and
3862  // simply miss the cache instead. This will allow us to allocate a
3863  // prototype object on-demand in the runtime system.
3864  LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3865  Branch(miss, eq, result, Operand(t8));
3866
3867  // If the function does not have an initial map, we're done.
3868  Label done;
3869  GetObjectType(result, scratch, scratch);
3870  Branch(&done, ne, scratch, Operand(MAP_TYPE));
3871
3872  // Get the prototype from the initial map.
3873  lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3874  jmp(&done);
3875
3876  // Non-instance prototype: Fetch prototype from constructor field
3877  // in initial map.
3878  bind(&non_instance);
3879  lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3880
3881  // All done.
3882  bind(&done);
3883}
3884
3885
3886void MacroAssembler::GetObjectType(Register object,
3887                                   Register map,
3888                                   Register type_reg) {
3889  lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3890  lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3891}
3892
3893
3894// -----------------------------------------------------------------------------
3895// Runtime calls.
3896
3897void MacroAssembler::CallStub(CodeStub* stub,
3898                              Condition cond,
3899                              Register r1,
3900                              const Operand& r2,
3901                              BranchDelaySlot bd) {
3902  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
3903  Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2, bd);
3904}
3905
3906
3907void MacroAssembler::TailCallStub(CodeStub* stub) {
3908  ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
3909  Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
3910}
3911
3912
3913static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3914  return ref0.address() - ref1.address();
3915}
3916
3917
3918void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
3919                                              int stack_space) {
3920  ExternalReference next_address =
3921      ExternalReference::handle_scope_next_address();
3922  const int kNextOffset = 0;
3923  const int kLimitOffset = AddressOffset(
3924      ExternalReference::handle_scope_limit_address(),
3925      next_address);
3926  const int kLevelOffset = AddressOffset(
3927      ExternalReference::handle_scope_level_address(),
3928      next_address);
3929
3930  // Allocate HandleScope in callee-save registers.
3931  li(s3, Operand(next_address));
3932  lw(s0, MemOperand(s3, kNextOffset));
3933  lw(s1, MemOperand(s3, kLimitOffset));
3934  lw(s2, MemOperand(s3, kLevelOffset));
3935  Addu(s2, s2, Operand(1));
3936  sw(s2, MemOperand(s3, kLevelOffset));
3937
3938  // The O32 ABI requires us to pass a pointer in a0 where the returned struct
3939  // (4 bytes) will be placed. This is also built into the Simulator.
3940  // Set up the pointer to the returned value (a0). It was allocated in
3941  // EnterExitFrame.
3942  addiu(a0, fp, ExitFrameConstants::kStackSpaceOffset);
3943
3944  // Native call returns to the DirectCEntry stub which redirects to the
3945  // return address pushed on stack (could have moved after GC).
3946  // DirectCEntry stub itself is generated early and never moves.
3947  DirectCEntryStub stub;
3948  stub.GenerateCall(this, function);
3949
3950  // As mentioned above, on MIPS a pointer is returned - we need to dereference
3951  // it to get the actual return value (which is also a pointer).
3952  lw(v0, MemOperand(v0));
3953
3954  Label promote_scheduled_exception;
3955  Label delete_allocated_handles;
3956  Label leave_exit_frame;
3957
3958  // If result is non-zero, dereference to get the result value
3959  // otherwise set it to undefined.
3960  Label skip;
3961  LoadRoot(a0, Heap::kUndefinedValueRootIndex);
3962  Branch(&skip, eq, v0, Operand(zero_reg));
3963  lw(a0, MemOperand(v0));
3964  bind(&skip);
3965  mov(v0, a0);
3966
3967  // No more valid handles (the result handle was the last one). Restore
3968  // previous handle scope.
3969  sw(s0, MemOperand(s3, kNextOffset));
3970  if (emit_debug_code()) {
3971    lw(a1, MemOperand(s3, kLevelOffset));
3972    Check(eq, "Unexpected level after return from api call", a1, Operand(s2));
3973  }
3974  Subu(s2, s2, Operand(1));
3975  sw(s2, MemOperand(s3, kLevelOffset));
3976  lw(at, MemOperand(s3, kLimitOffset));
3977  Branch(&delete_allocated_handles, ne, s1, Operand(at));
3978
3979  // Check if the function scheduled an exception.
3980  bind(&leave_exit_frame);
3981  LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3982  li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3983  lw(t1, MemOperand(at));
3984  Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
3985  li(s0, Operand(stack_space));
3986  LeaveExitFrame(false, s0, true);
3987
3988  bind(&promote_scheduled_exception);
3989  TailCallExternalReference(
3990      ExternalReference(Runtime::kPromoteScheduledException, isolate()),
3991      0,
3992      1);
3993
3994  // HandleScope limit has changed. Delete allocated extensions.
3995  bind(&delete_allocated_handles);
3996  sw(s1, MemOperand(s3, kLimitOffset));
3997  mov(s0, v0);
3998  mov(a0, v0);
3999  PrepareCallCFunction(1, s1);
4000  li(a0, Operand(ExternalReference::isolate_address()));
4001  CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4002      1);
4003  mov(v0, s0);
4004  jmp(&leave_exit_frame);
4005}
4006
4007
4008bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4009  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
4010  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe();
4011}
4012
4013
4014void MacroAssembler::IllegalOperation(int num_arguments) {
4015  if (num_arguments > 0) {
4016    addiu(sp, sp, num_arguments * kPointerSize);
4017  }
4018  LoadRoot(v0, Heap::kUndefinedValueRootIndex);
4019}
4020
4021
4022void MacroAssembler::IndexFromHash(Register hash,
4023                                   Register index) {
4024  // If the hash field contains an array index pick it out. The assert checks
4025  // that the constants for the maximum number of digits for an array index
4026  // cached in the hash field and the number of bits reserved for it does not
4027  // conflict.
4028  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
4029         (1 << String::kArrayIndexValueBits));
4030  // We want the smi-tagged index in key.  kArrayIndexValueMask has zeros in
4031  // the low kHashShift bits.
4032  STATIC_ASSERT(kSmiTag == 0);
4033  Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
4034  sll(index, hash, kSmiTagSize);
4035}
4036
4037
4038void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4039                                               FPURegister result,
4040                                               Register scratch1,
4041                                               Register scratch2,
4042                                               Register heap_number_map,
4043                                               Label* not_number,
4044                                               ObjectToDoubleFlags flags) {
4045  Label done;
4046  if ((flags & OBJECT_NOT_SMI) == 0) {
4047    Label not_smi;
4048    JumpIfNotSmi(object, &not_smi);
4049    // Remove smi tag and convert to double.
4050    sra(scratch1, object, kSmiTagSize);
4051    mtc1(scratch1, result);
4052    cvt_d_w(result, result);
4053    Branch(&done);
4054    bind(&not_smi);
4055  }
4056  // Check for heap number and load double value from it.
4057  lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4058  Branch(not_number, ne, scratch1, Operand(heap_number_map));
4059
4060  if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4061    // If exponent is all ones the number is either a NaN or +/-Infinity.
4062    Register exponent = scratch1;
4063    Register mask_reg = scratch2;
4064    lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4065    li(mask_reg, HeapNumber::kExponentMask);
4066
4067    And(exponent, exponent, mask_reg);
4068    Branch(not_number, eq, exponent, Operand(mask_reg));
4069  }
4070  ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4071  bind(&done);
4072}
4073
4074
4075void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4076                                            FPURegister value,
4077                                            Register scratch1) {
4078  sra(scratch1, smi, kSmiTagSize);
4079  mtc1(scratch1, value);
4080  cvt_d_w(value, value);
4081}
4082
4083
4084void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4085                                             Register left,
4086                                             Register right,
4087                                             Register overflow_dst,
4088                                             Register scratch) {
4089  ASSERT(!dst.is(overflow_dst));
4090  ASSERT(!dst.is(scratch));
4091  ASSERT(!overflow_dst.is(scratch));
4092  ASSERT(!overflow_dst.is(left));
4093  ASSERT(!overflow_dst.is(right));
4094
4095  if (left.is(right) && dst.is(left)) {
4096    ASSERT(!dst.is(t9));
4097    ASSERT(!scratch.is(t9));
4098    ASSERT(!left.is(t9));
4099    ASSERT(!right.is(t9));
4100    ASSERT(!overflow_dst.is(t9));
4101    mov(t9, right);
4102    right = t9;
4103  }
4104
4105  if (dst.is(left)) {
4106    mov(scratch, left);  // Preserve left.
4107    addu(dst, left, right);  // Left is overwritten.
4108    xor_(scratch, dst, scratch);  // Original left.
4109    xor_(overflow_dst, dst, right);
4110    and_(overflow_dst, overflow_dst, scratch);
4111  } else if (dst.is(right)) {
4112    mov(scratch, right);  // Preserve right.
4113    addu(dst, left, right);  // Right is overwritten.
4114    xor_(scratch, dst, scratch);  // Original right.
4115    xor_(overflow_dst, dst, left);
4116    and_(overflow_dst, overflow_dst, scratch);
4117  } else {
4118    addu(dst, left, right);
4119    xor_(overflow_dst, dst, left);
4120    xor_(scratch, dst, right);
4121    and_(overflow_dst, scratch, overflow_dst);
4122  }
4123}
4124
4125
4126void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4127                                             Register left,
4128                                             Register right,
4129                                             Register overflow_dst,
4130                                             Register scratch) {
4131  ASSERT(!dst.is(overflow_dst));
4132  ASSERT(!dst.is(scratch));
4133  ASSERT(!overflow_dst.is(scratch));
4134  ASSERT(!overflow_dst.is(left));
4135  ASSERT(!overflow_dst.is(right));
4136  ASSERT(!scratch.is(left));
4137  ASSERT(!scratch.is(right));
4138
4139  // This happens with some crankshaft code. Since Subu works fine if
4140  // left == right, let's not make that restriction here.
4141  if (left.is(right)) {
4142    mov(dst, zero_reg);
4143    mov(overflow_dst, zero_reg);
4144    return;
4145  }
4146
4147  if (dst.is(left)) {
4148    mov(scratch, left);  // Preserve left.
4149    subu(dst, left, right);  // Left is overwritten.
4150    xor_(overflow_dst, dst, scratch);  // scratch is original left.
4151    xor_(scratch, scratch, right);  // scratch is original left.
4152    and_(overflow_dst, scratch, overflow_dst);
4153  } else if (dst.is(right)) {
4154    mov(scratch, right);  // Preserve right.
4155    subu(dst, left, right);  // Right is overwritten.
4156    xor_(overflow_dst, dst, left);
4157    xor_(scratch, left, scratch);  // Original right.
4158    and_(overflow_dst, scratch, overflow_dst);
4159  } else {
4160    subu(dst, left, right);
4161    xor_(overflow_dst, dst, left);
4162    xor_(scratch, left, right);
4163    and_(overflow_dst, scratch, overflow_dst);
4164  }
4165}
4166
4167
4168void MacroAssembler::CallRuntime(const Runtime::Function* f,
4169                                 int num_arguments) {
4170  // All parameters are on the stack. v0 has the return value after call.
4171
4172  // If the expected number of arguments of the runtime function is
4173  // constant, we check that the actual number of arguments match the
4174  // expectation.
4175  if (f->nargs >= 0 && f->nargs != num_arguments) {
4176    IllegalOperation(num_arguments);
4177    return;
4178  }
4179
4180  // TODO(1236192): Most runtime routines don't need the number of
4181  // arguments passed in because it is constant. At some point we
4182  // should remove this need and make the runtime routine entry code
4183  // smarter.
4184  PrepareCEntryArgs(num_arguments);
4185  PrepareCEntryFunction(ExternalReference(f, isolate()));
4186  CEntryStub stub(1);
4187  CallStub(&stub);
4188}
4189
4190
4191void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
4192  const Runtime::Function* function = Runtime::FunctionForId(id);
4193  PrepareCEntryArgs(function->nargs);
4194  PrepareCEntryFunction(ExternalReference(function, isolate()));
4195  CEntryStub stub(1, kSaveFPRegs);
4196  CallStub(&stub);
4197}
4198
4199
4200void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
4201  CallRuntime(Runtime::FunctionForId(fid), num_arguments);
4202}
4203
4204
4205void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4206                                           int num_arguments,
4207                                           BranchDelaySlot bd) {
4208  PrepareCEntryArgs(num_arguments);
4209  PrepareCEntryFunction(ext);
4210
4211  CEntryStub stub(1);
4212  CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
4213}
4214
4215
4216void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4217                                               int num_arguments,
4218                                               int result_size) {
4219  // TODO(1236192): Most runtime routines don't need the number of
4220  // arguments passed in because it is constant. At some point we
4221  // should remove this need and make the runtime routine entry code
4222  // smarter.
4223  PrepareCEntryArgs(num_arguments);
4224  JumpToExternalReference(ext);
4225}
4226
4227
4228void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4229                                     int num_arguments,
4230                                     int result_size) {
4231  TailCallExternalReference(ExternalReference(fid, isolate()),
4232                            num_arguments,
4233                            result_size);
4234}
4235
4236
4237void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4238                                             BranchDelaySlot bd) {
4239  PrepareCEntryFunction(builtin);
4240  CEntryStub stub(1);
4241  Jump(stub.GetCode(),
4242       RelocInfo::CODE_TARGET,
4243       al,
4244       zero_reg,
4245       Operand(zero_reg),
4246       bd);
4247}
4248
4249
4250void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4251                                   InvokeFlag flag,
4252                                   const CallWrapper& call_wrapper) {
4253  // You can't call a builtin without a valid frame.
4254  ASSERT(flag == JUMP_FUNCTION || has_frame());
4255
4256  GetBuiltinEntry(t9, id);
4257  if (flag == CALL_FUNCTION) {
4258    call_wrapper.BeforeCall(CallSize(t9));
4259    SetCallKind(t1, CALL_AS_METHOD);
4260    Call(t9);
4261    call_wrapper.AfterCall();
4262  } else {
4263    ASSERT(flag == JUMP_FUNCTION);
4264    SetCallKind(t1, CALL_AS_METHOD);
4265    Jump(t9);
4266  }
4267}
4268
4269
4270void MacroAssembler::GetBuiltinFunction(Register target,
4271                                        Builtins::JavaScript id) {
4272  // Load the builtins object into target register.
4273  lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4274  lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4275  // Load the JavaScript builtin function from the builtins object.
4276  lw(target, FieldMemOperand(target,
4277                          JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4278}
4279
4280
4281void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4282  ASSERT(!target.is(a1));
4283  GetBuiltinFunction(a1, id);
4284  // Load the code entry point from the builtins object.
4285  lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4286}
4287
4288
4289void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4290                                Register scratch1, Register scratch2) {
4291  if (FLAG_native_code_counters && counter->Enabled()) {
4292    li(scratch1, Operand(value));
4293    li(scratch2, Operand(ExternalReference(counter)));
4294    sw(scratch1, MemOperand(scratch2));
4295  }
4296}
4297
4298
4299void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4300                                      Register scratch1, Register scratch2) {
4301  ASSERT(value > 0);
4302  if (FLAG_native_code_counters && counter->Enabled()) {
4303    li(scratch2, Operand(ExternalReference(counter)));
4304    lw(scratch1, MemOperand(scratch2));
4305    Addu(scratch1, scratch1, Operand(value));
4306    sw(scratch1, MemOperand(scratch2));
4307  }
4308}
4309
4310
4311void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4312                                      Register scratch1, Register scratch2) {
4313  ASSERT(value > 0);
4314  if (FLAG_native_code_counters && counter->Enabled()) {
4315    li(scratch2, Operand(ExternalReference(counter)));
4316    lw(scratch1, MemOperand(scratch2));
4317    Subu(scratch1, scratch1, Operand(value));
4318    sw(scratch1, MemOperand(scratch2));
4319  }
4320}
4321
4322
4323// -----------------------------------------------------------------------------
4324// Debugging.
4325
4326void MacroAssembler::Assert(Condition cc, const char* msg,
4327                            Register rs, Operand rt) {
4328  if (emit_debug_code())
4329    Check(cc, msg, rs, rt);
4330}
4331
4332
4333void MacroAssembler::AssertRegisterIsRoot(Register reg,
4334                                          Heap::RootListIndex index) {
4335  if (emit_debug_code()) {
4336    LoadRoot(at, index);
4337    Check(eq, "Register did not match expected root", reg, Operand(at));
4338  }
4339}
4340
4341
4342void MacroAssembler::AssertFastElements(Register elements) {
4343  if (emit_debug_code()) {
4344    ASSERT(!elements.is(at));
4345    Label ok;
4346    push(elements);
4347    lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4348    LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4349    Branch(&ok, eq, elements, Operand(at));
4350    LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4351    Branch(&ok, eq, elements, Operand(at));
4352    LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4353    Branch(&ok, eq, elements, Operand(at));
4354    Abort("JSObject with fast elements map has slow elements");
4355    bind(&ok);
4356    pop(elements);
4357  }
4358}
4359
4360
4361void MacroAssembler::Check(Condition cc, const char* msg,
4362                           Register rs, Operand rt) {
4363  Label L;
4364  Branch(&L, cc, rs, rt);
4365  Abort(msg);
4366  // Will not return here.
4367  bind(&L);
4368}
4369
4370
4371void MacroAssembler::Abort(const char* msg) {
4372  Label abort_start;
4373  bind(&abort_start);
4374  // We want to pass the msg string like a smi to avoid GC
4375  // problems, however msg is not guaranteed to be aligned
4376  // properly. Instead, we pass an aligned pointer that is
4377  // a proper v8 smi, but also pass the alignment difference
4378  // from the real pointer as a smi.
4379  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
4380  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
4381  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
4382#ifdef DEBUG
4383  if (msg != NULL) {
4384    RecordComment("Abort message: ");
4385    RecordComment(msg);
4386  }
4387#endif
4388
4389  li(a0, Operand(p0));
4390  push(a0);
4391  li(a0, Operand(Smi::FromInt(p1 - p0)));
4392  push(a0);
4393  // Disable stub call restrictions to always allow calls to abort.
4394  if (!has_frame_) {
4395    // We don't actually want to generate a pile of code for this, so just
4396    // claim there is a stack frame, without generating one.
4397    FrameScope scope(this, StackFrame::NONE);
4398    CallRuntime(Runtime::kAbort, 2);
4399  } else {
4400    CallRuntime(Runtime::kAbort, 2);
4401  }
4402  // Will not return here.
4403  if (is_trampoline_pool_blocked()) {
4404    // If the calling code cares about the exact number of
4405    // instructions generated, we insert padding here to keep the size
4406    // of the Abort macro constant.
4407    // Currently in debug mode with debug_code enabled the number of
4408    // generated instructions is 14, so we use this as a maximum value.
4409    static const int kExpectedAbortInstructions = 14;
4410    int abort_instructions = InstructionsGeneratedSince(&abort_start);
4411    ASSERT(abort_instructions <= kExpectedAbortInstructions);
4412    while (abort_instructions++ < kExpectedAbortInstructions) {
4413      nop();
4414    }
4415  }
4416}
4417
4418
4419void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4420  if (context_chain_length > 0) {
4421    // Move up the chain of contexts to the context containing the slot.
4422    lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4423    for (int i = 1; i < context_chain_length; i++) {
4424      lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4425    }
4426  } else {
4427    // Slot is in the current function context.  Move it into the
4428    // destination register in case we store into it (the write barrier
4429    // cannot be allowed to destroy the context in esi).
4430    Move(dst, cp);
4431  }
4432}
4433
4434
4435void MacroAssembler::LoadTransitionedArrayMapConditional(
4436    ElementsKind expected_kind,
4437    ElementsKind transitioned_kind,
4438    Register map_in_out,
4439    Register scratch,
4440    Label* no_map_match) {
4441  // Load the global or builtins object from the current context.
4442  lw(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4443  lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
4444
4445  // Check that the function's map is the same as the expected cached map.
4446  int expected_index =
4447      Context::GetContextMapIndexFromElementsKind(expected_kind);
4448  lw(at, MemOperand(scratch, Context::SlotOffset(expected_index)));
4449  Branch(no_map_match, ne, map_in_out, Operand(at));
4450
4451  // Use the transitioned cached map.
4452  int trans_index =
4453      Context::GetContextMapIndexFromElementsKind(transitioned_kind);
4454  lw(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
4455}
4456
4457
4458void MacroAssembler::LoadInitialArrayMap(
4459    Register function_in, Register scratch, Register map_out) {
4460  ASSERT(!function_in.is(map_out));
4461  Label done;
4462  lw(map_out, FieldMemOperand(function_in,
4463                              JSFunction::kPrototypeOrInitialMapOffset));
4464  if (!FLAG_smi_only_arrays) {
4465    LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
4466                                        FAST_ELEMENTS,
4467                                        map_out,
4468                                        scratch,
4469                                        &done);
4470  }
4471  bind(&done);
4472}
4473
4474
4475void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4476  // Load the global or builtins object from the current context.
4477  lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4478  // Load the global context from the global or builtins object.
4479  lw(function, FieldMemOperand(function,
4480                               GlobalObject::kGlobalContextOffset));
4481  // Load the function from the global context.
4482  lw(function, MemOperand(function, Context::SlotOffset(index)));
4483}
4484
4485
4486void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4487                                                  Register map,
4488                                                  Register scratch) {
4489  // Load the initial map. The global functions all have initial maps.
4490  lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4491  if (emit_debug_code()) {
4492    Label ok, fail;
4493    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4494    Branch(&ok);
4495    bind(&fail);
4496    Abort("Global functions must have initial map");
4497    bind(&ok);
4498  }
4499}
4500
4501
4502void MacroAssembler::EnterFrame(StackFrame::Type type) {
4503  addiu(sp, sp, -5 * kPointerSize);
4504  li(t8, Operand(Smi::FromInt(type)));
4505  li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4506  sw(ra, MemOperand(sp, 4 * kPointerSize));
4507  sw(fp, MemOperand(sp, 3 * kPointerSize));
4508  sw(cp, MemOperand(sp, 2 * kPointerSize));
4509  sw(t8, MemOperand(sp, 1 * kPointerSize));
4510  sw(t9, MemOperand(sp, 0 * kPointerSize));
4511  addiu(fp, sp, 3 * kPointerSize);
4512}
4513
4514
4515void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4516  mov(sp, fp);
4517  lw(fp, MemOperand(sp, 0 * kPointerSize));
4518  lw(ra, MemOperand(sp, 1 * kPointerSize));
4519  addiu(sp, sp, 2 * kPointerSize);
4520}
4521
4522
4523void MacroAssembler::EnterExitFrame(bool save_doubles,
4524                                    int stack_space) {
4525  // Set up the frame structure on the stack.
4526  STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4527  STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4528  STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4529
4530  // This is how the stack will look:
4531  // fp + 2 (==kCallerSPDisplacement) - old stack's end
4532  // [fp + 1 (==kCallerPCOffset)] - saved old ra
4533  // [fp + 0 (==kCallerFPOffset)] - saved old fp
4534  // [fp - 1 (==kSPOffset)] - sp of the called function
4535  // [fp - 2 (==kCodeOffset)] - CodeObject
4536  // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4537  //   new stack (will contain saved ra)
4538
4539  // Save registers.
4540  addiu(sp, sp, -4 * kPointerSize);
4541  sw(ra, MemOperand(sp, 3 * kPointerSize));
4542  sw(fp, MemOperand(sp, 2 * kPointerSize));
4543  addiu(fp, sp, 2 * kPointerSize);  // Set up new frame pointer.
4544
4545  if (emit_debug_code()) {
4546    sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4547  }
4548
4549  // Accessed from ExitFrame::code_slot.
4550  li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4551  sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4552
4553  // Save the frame pointer and the context in top.
4554  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4555  sw(fp, MemOperand(t8));
4556  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4557  sw(cp, MemOperand(t8));
4558
4559  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4560  if (save_doubles) {
4561    // The stack  must be allign to 0 modulo 8 for stores with sdc1.
4562    ASSERT(kDoubleSize == frame_alignment);
4563    if (frame_alignment > 0) {
4564      ASSERT(IsPowerOf2(frame_alignment));
4565      And(sp, sp, Operand(-frame_alignment));  // Align stack.
4566    }
4567    int space = FPURegister::kNumRegisters * kDoubleSize;
4568    Subu(sp, sp, Operand(space));
4569    // Remember: we only need to save every 2nd double FPU value.
4570    for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
4571      FPURegister reg = FPURegister::from_code(i);
4572      sdc1(reg, MemOperand(sp, i * kDoubleSize));
4573    }
4574  }
4575
4576  // Reserve place for the return address, stack space and an optional slot
4577  // (used by the DirectCEntryStub to hold the return value if a struct is
4578  // returned) and align the frame preparing for calling the runtime function.
4579  ASSERT(stack_space >= 0);
4580  Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4581  if (frame_alignment > 0) {
4582    ASSERT(IsPowerOf2(frame_alignment));
4583    And(sp, sp, Operand(-frame_alignment));  // Align stack.
4584  }
4585
4586  // Set the exit frame sp value to point just before the return address
4587  // location.
4588  addiu(at, sp, kPointerSize);
4589  sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4590}
4591
4592
4593void MacroAssembler::LeaveExitFrame(bool save_doubles,
4594                                    Register argument_count,
4595                                    bool do_return) {
4596  // Optionally restore all double registers.
4597  if (save_doubles) {
4598    // Remember: we only need to restore every 2nd double FPU value.
4599    lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4600    for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
4601      FPURegister reg = FPURegister::from_code(i);
4602      ldc1(reg, MemOperand(t8, i  * kDoubleSize + kPointerSize));
4603    }
4604  }
4605
4606  // Clear top frame.
4607  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4608  sw(zero_reg, MemOperand(t8));
4609
4610  // Restore current context from top and clear it in debug mode.
4611  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4612  lw(cp, MemOperand(t8));
4613#ifdef DEBUG
4614  sw(a3, MemOperand(t8));
4615#endif
4616
4617  // Pop the arguments, restore registers, and return.
4618  mov(sp, fp);  // Respect ABI stack constraint.
4619  lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4620  lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4621
4622  if (argument_count.is_valid()) {
4623    sll(t8, argument_count, kPointerSizeLog2);
4624    addu(sp, sp, t8);
4625  }
4626
4627  if (do_return) {
4628    Ret(USE_DELAY_SLOT);
4629    // If returning, the instruction in the delay slot will be the addiu below.
4630  }
4631  addiu(sp, sp, 8);
4632}
4633
4634
4635void MacroAssembler::InitializeNewString(Register string,
4636                                         Register length,
4637                                         Heap::RootListIndex map_index,
4638                                         Register scratch1,
4639                                         Register scratch2) {
4640  sll(scratch1, length, kSmiTagSize);
4641  LoadRoot(scratch2, map_index);
4642  sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
4643  li(scratch1, Operand(String::kEmptyHashField));
4644  sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4645  sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4646}
4647
4648
4649int MacroAssembler::ActivationFrameAlignment() {
4650#if defined(V8_HOST_ARCH_MIPS)
4651  // Running on the real platform. Use the alignment as mandated by the local
4652  // environment.
4653  // Note: This will break if we ever start generating snapshots on one Mips
4654  // platform for another Mips platform with a different alignment.
4655  return OS::ActivationFrameAlignment();
4656#else  // defined(V8_HOST_ARCH_MIPS)
4657  // If we are using the simulator then we should always align to the expected
4658  // alignment. As the simulator is used to generate snapshots we do not know
4659  // if the target platform will need alignment, so this is controlled from a
4660  // flag.
4661  return FLAG_sim_stack_alignment;
4662#endif  // defined(V8_HOST_ARCH_MIPS)
4663}
4664
4665
4666void MacroAssembler::AssertStackIsAligned() {
4667  if (emit_debug_code()) {
4668      const int frame_alignment = ActivationFrameAlignment();
4669      const int frame_alignment_mask = frame_alignment - 1;
4670
4671      if (frame_alignment > kPointerSize) {
4672        Label alignment_as_expected;
4673        ASSERT(IsPowerOf2(frame_alignment));
4674        andi(at, sp, frame_alignment_mask);
4675        Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4676        // Don't use Check here, as it will call Runtime_Abort re-entering here.
4677        stop("Unexpected stack alignment");
4678        bind(&alignment_as_expected);
4679      }
4680    }
4681}
4682
4683
4684void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4685    Register reg,
4686    Register scratch,
4687    Label* not_power_of_two_or_zero) {
4688  Subu(scratch, reg, Operand(1));
4689  Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4690         scratch, Operand(zero_reg));
4691  and_(at, scratch, reg);  // In the delay slot.
4692  Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4693}
4694
4695
4696void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4697  ASSERT(!reg.is(overflow));
4698  mov(overflow, reg);  // Save original value.
4699  SmiTag(reg);
4700  xor_(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
4701}
4702
4703
4704void MacroAssembler::SmiTagCheckOverflow(Register dst,
4705                                         Register src,
4706                                         Register overflow) {
4707  if (dst.is(src)) {
4708    // Fall back to slower case.
4709    SmiTagCheckOverflow(dst, overflow);
4710  } else {
4711    ASSERT(!dst.is(src));
4712    ASSERT(!dst.is(overflow));
4713    ASSERT(!src.is(overflow));
4714    SmiTag(dst, src);
4715    xor_(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
4716  }
4717}
4718
4719
4720void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4721                                       Register src,
4722                                       Label* smi_case) {
4723  JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
4724  SmiUntag(dst, src);
4725}
4726
4727
4728void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
4729                                          Register src,
4730                                          Label* non_smi_case) {
4731  JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
4732  SmiUntag(dst, src);
4733}
4734
4735void MacroAssembler::JumpIfSmi(Register value,
4736                               Label* smi_label,
4737                               Register scratch,
4738                               BranchDelaySlot bd) {
4739  ASSERT_EQ(0, kSmiTag);
4740  andi(scratch, value, kSmiTagMask);
4741  Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
4742}
4743
4744void MacroAssembler::JumpIfNotSmi(Register value,
4745                                  Label* not_smi_label,
4746                                  Register scratch,
4747                                  BranchDelaySlot bd) {
4748  ASSERT_EQ(0, kSmiTag);
4749  andi(scratch, value, kSmiTagMask);
4750  Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
4751}
4752
4753
4754void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4755                                      Register reg2,
4756                                      Label* on_not_both_smi) {
4757  STATIC_ASSERT(kSmiTag == 0);
4758  ASSERT_EQ(1, kSmiTagMask);
4759  or_(at, reg1, reg2);
4760  JumpIfNotSmi(at, on_not_both_smi);
4761}
4762
4763
4764void MacroAssembler::JumpIfEitherSmi(Register reg1,
4765                                     Register reg2,
4766                                     Label* on_either_smi) {
4767  STATIC_ASSERT(kSmiTag == 0);
4768  ASSERT_EQ(1, kSmiTagMask);
4769  // Both Smi tags must be 1 (not Smi).
4770  and_(at, reg1, reg2);
4771  JumpIfSmi(at, on_either_smi);
4772}
4773
4774
4775void MacroAssembler::AbortIfSmi(Register object) {
4776  STATIC_ASSERT(kSmiTag == 0);
4777  andi(at, object, kSmiTagMask);
4778  Assert(ne, "Operand is a smi", at, Operand(zero_reg));
4779}
4780
4781
4782void MacroAssembler::AbortIfNotSmi(Register object) {
4783  STATIC_ASSERT(kSmiTag == 0);
4784  andi(at, object, kSmiTagMask);
4785  Assert(eq, "Operand is a smi", at, Operand(zero_reg));
4786}
4787
4788
4789void MacroAssembler::AbortIfNotString(Register object) {
4790  STATIC_ASSERT(kSmiTag == 0);
4791  And(t0, object, Operand(kSmiTagMask));
4792  Assert(ne, "Operand is not a string", t0, Operand(zero_reg));
4793  push(object);
4794  lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4795  lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4796  Assert(lo, "Operand is not a string", object, Operand(FIRST_NONSTRING_TYPE));
4797  pop(object);
4798}
4799
4800
4801void MacroAssembler::AbortIfNotRootValue(Register src,
4802                                         Heap::RootListIndex root_value_index,
4803                                         const char* message) {
4804  ASSERT(!src.is(at));
4805  LoadRoot(at, root_value_index);
4806  Assert(eq, message, src, Operand(at));
4807}
4808
4809
4810void MacroAssembler::JumpIfNotHeapNumber(Register object,
4811                                         Register heap_number_map,
4812                                         Register scratch,
4813                                         Label* on_not_heap_number) {
4814  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4815  AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4816  Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4817}
4818
4819
4820void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
4821    Register first,
4822    Register second,
4823    Register scratch1,
4824    Register scratch2,
4825    Label* failure) {
4826  // Test that both first and second are sequential ASCII strings.
4827  // Assume that they are non-smis.
4828  lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
4829  lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
4830  lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
4831  lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
4832
4833  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
4834                                               scratch2,
4835                                               scratch1,
4836                                               scratch2,
4837                                               failure);
4838}
4839
4840
4841void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
4842                                                         Register second,
4843                                                         Register scratch1,
4844                                                         Register scratch2,
4845                                                         Label* failure) {
4846  // Check that neither is a smi.
4847  STATIC_ASSERT(kSmiTag == 0);
4848  And(scratch1, first, Operand(second));
4849  JumpIfSmi(scratch1, failure);
4850  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
4851                                             second,
4852                                             scratch1,
4853                                             scratch2,
4854                                             failure);
4855}
4856
4857
4858void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
4859    Register first,
4860    Register second,
4861    Register scratch1,
4862    Register scratch2,
4863    Label* failure) {
4864  int kFlatAsciiStringMask =
4865      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
4866  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4867  ASSERT(kFlatAsciiStringTag <= 0xffff);  // Ensure this fits 16-bit immed.
4868  andi(scratch1, first, kFlatAsciiStringMask);
4869  Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
4870  andi(scratch2, second, kFlatAsciiStringMask);
4871  Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
4872}
4873
4874
4875void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
4876                                                            Register scratch,
4877                                                            Label* failure) {
4878  int kFlatAsciiStringMask =
4879      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
4880  int kFlatAsciiStringTag = ASCII_STRING_TYPE;
4881  And(scratch, type, Operand(kFlatAsciiStringMask));
4882  Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
4883}
4884
4885
4886static const int kRegisterPassedArguments = 4;
4887
4888int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
4889                                              int num_double_arguments) {
4890  int stack_passed_words = 0;
4891  num_reg_arguments += 2 * num_double_arguments;
4892
4893  // Up to four simple arguments are passed in registers a0..a3.
4894  if (num_reg_arguments > kRegisterPassedArguments) {
4895    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
4896  }
4897  stack_passed_words += kCArgSlotCount;
4898  return stack_passed_words;
4899}
4900
4901
4902void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4903                                          int num_double_arguments,
4904                                          Register scratch) {
4905  int frame_alignment = ActivationFrameAlignment();
4906
4907  // Up to four simple arguments are passed in registers a0..a3.
4908  // Those four arguments must have reserved argument slots on the stack for
4909  // mips, even though those argument slots are not normally used.
4910  // Remaining arguments are pushed on the stack, above (higher address than)
4911  // the argument slots.
4912  int stack_passed_arguments = CalculateStackPassedWords(
4913      num_reg_arguments, num_double_arguments);
4914  if (frame_alignment > kPointerSize) {
4915    // Make stack end at alignment and make room for num_arguments - 4 words
4916    // and the original value of sp.
4917    mov(scratch, sp);
4918    Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
4919    ASSERT(IsPowerOf2(frame_alignment));
4920    And(sp, sp, Operand(-frame_alignment));
4921    sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
4922  } else {
4923    Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
4924  }
4925}
4926
4927
4928void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
4929                                          Register scratch) {
4930  PrepareCallCFunction(num_reg_arguments, 0, scratch);
4931}
4932
4933
4934void MacroAssembler::CallCFunction(ExternalReference function,
4935                                   int num_reg_arguments,
4936                                   int num_double_arguments) {
4937  li(t8, Operand(function));
4938  CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
4939}
4940
4941
4942void MacroAssembler::CallCFunction(Register function,
4943                                   int num_reg_arguments,
4944                                   int num_double_arguments) {
4945  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
4946}
4947
4948
4949void MacroAssembler::CallCFunction(ExternalReference function,
4950                                   int num_arguments) {
4951  CallCFunction(function, num_arguments, 0);
4952}
4953
4954
4955void MacroAssembler::CallCFunction(Register function,
4956                                   int num_arguments) {
4957  CallCFunction(function, num_arguments, 0);
4958}
4959
4960
4961void MacroAssembler::CallCFunctionHelper(Register function,
4962                                         int num_reg_arguments,
4963                                         int num_double_arguments) {
4964  ASSERT(has_frame());
4965  // Make sure that the stack is aligned before calling a C function unless
4966  // running in the simulator. The simulator has its own alignment check which
4967  // provides more information.
4968  // The argument stots are presumed to have been set up by
4969  // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
4970
4971#if defined(V8_HOST_ARCH_MIPS)
4972  if (emit_debug_code()) {
4973    int frame_alignment = OS::ActivationFrameAlignment();
4974    int frame_alignment_mask = frame_alignment - 1;
4975    if (frame_alignment > kPointerSize) {
4976      ASSERT(IsPowerOf2(frame_alignment));
4977      Label alignment_as_expected;
4978      And(at, sp, Operand(frame_alignment_mask));
4979      Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4980      // Don't use Check here, as it will call Runtime_Abort possibly
4981      // re-entering here.
4982      stop("Unexpected alignment in CallCFunction");
4983      bind(&alignment_as_expected);
4984    }
4985  }
4986#endif  // V8_HOST_ARCH_MIPS
4987
4988  // Just call directly. The function called cannot cause a GC, or
4989  // allow preemption, so the return address in the link register
4990  // stays correct.
4991
4992  if (!function.is(t9)) {
4993    mov(t9, function);
4994    function = t9;
4995  }
4996
4997  Call(function);
4998
4999  int stack_passed_arguments = CalculateStackPassedWords(
5000      num_reg_arguments, num_double_arguments);
5001
5002  if (OS::ActivationFrameAlignment() > kPointerSize) {
5003    lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5004  } else {
5005    Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
5006  }
5007}
5008
5009
5010#undef BRANCH_ARGS_CHECK
5011
5012
5013void MacroAssembler::PatchRelocatedValue(Register li_location,
5014                                         Register scratch,
5015                                         Register new_value) {
5016  lw(scratch, MemOperand(li_location));
5017  // At this point scratch is a lui(at, ...) instruction.
5018  if (emit_debug_code()) {
5019    And(scratch, scratch, kOpcodeMask);
5020    Check(eq, "The instruction to patch should be a lui.",
5021        scratch, Operand(LUI));
5022    lw(scratch, MemOperand(li_location));
5023  }
5024  srl(t9, new_value, kImm16Bits);
5025  Ins(scratch, t9, 0, kImm16Bits);
5026  sw(scratch, MemOperand(li_location));
5027
5028  lw(scratch, MemOperand(li_location, kInstrSize));
5029  // scratch is now ori(at, ...).
5030  if (emit_debug_code()) {
5031    And(scratch, scratch, kOpcodeMask);
5032    Check(eq, "The instruction to patch should be an ori.",
5033        scratch, Operand(ORI));
5034    lw(scratch, MemOperand(li_location, kInstrSize));
5035  }
5036  Ins(scratch, new_value, 0, kImm16Bits);
5037  sw(scratch, MemOperand(li_location, kInstrSize));
5038
5039  // Update the I-cache so the new lui and ori can be executed.
5040  FlushICache(li_location, 2);
5041}
5042
5043void MacroAssembler::GetRelocatedValue(Register li_location,
5044                                       Register value,
5045                                       Register scratch) {
5046  lw(value, MemOperand(li_location));
5047  if (emit_debug_code()) {
5048    And(value, value, kOpcodeMask);
5049    Check(eq, "The instruction should be a lui.",
5050        value, Operand(LUI));
5051    lw(value, MemOperand(li_location));
5052  }
5053
5054  // value now holds a lui instruction. Extract the immediate.
5055  sll(value, value, kImm16Bits);
5056
5057  lw(scratch, MemOperand(li_location, kInstrSize));
5058  if (emit_debug_code()) {
5059    And(scratch, scratch, kOpcodeMask);
5060    Check(eq, "The instruction should be an ori.",
5061        scratch, Operand(ORI));
5062    lw(scratch, MemOperand(li_location, kInstrSize));
5063  }
5064  // "scratch" now holds an ori instruction. Extract the immediate.
5065  andi(scratch, scratch, kImm16Mask);
5066
5067  // Merge the results.
5068  or_(value, value, scratch);
5069}
5070
5071
5072void MacroAssembler::CheckPageFlag(
5073    Register object,
5074    Register scratch,
5075    int mask,
5076    Condition cc,
5077    Label* condition_met) {
5078  And(scratch, object, Operand(~Page::kPageAlignmentMask));
5079  lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5080  And(scratch, scratch, Operand(mask));
5081  Branch(condition_met, cc, scratch, Operand(zero_reg));
5082}
5083
5084
5085void MacroAssembler::JumpIfBlack(Register object,
5086                                 Register scratch0,
5087                                 Register scratch1,
5088                                 Label* on_black) {
5089  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
5090  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5091}
5092
5093
5094void MacroAssembler::HasColor(Register object,
5095                              Register bitmap_scratch,
5096                              Register mask_scratch,
5097                              Label* has_color,
5098                              int first_bit,
5099                              int second_bit) {
5100  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5101  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5102
5103  GetMarkBits(object, bitmap_scratch, mask_scratch);
5104
5105  Label other_color, word_boundary;
5106  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5107  And(t8, t9, Operand(mask_scratch));
5108  Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5109  // Shift left 1 by adding.
5110  Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5111  Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5112  And(t8, t9, Operand(mask_scratch));
5113  Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5114  jmp(&other_color);
5115
5116  bind(&word_boundary);
5117  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5118  And(t9, t9, Operand(1));
5119  Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5120  bind(&other_color);
5121}
5122
5123
5124// Detect some, but not all, common pointer-free objects.  This is used by the
5125// incremental write barrier which doesn't care about oddballs (they are always
5126// marked black immediately so this code is not hit).
5127void MacroAssembler::JumpIfDataObject(Register value,
5128                                      Register scratch,
5129                                      Label* not_data_object) {
5130  ASSERT(!AreAliased(value, scratch, t8, no_reg));
5131  Label is_data_object;
5132  lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5133  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5134  Branch(&is_data_object, eq, t8, Operand(scratch));
5135  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5136  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5137  // If it's a string and it's not a cons string then it's an object containing
5138  // no GC pointers.
5139  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5140  And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5141  Branch(not_data_object, ne, t8, Operand(zero_reg));
5142  bind(&is_data_object);
5143}
5144
5145
5146void MacroAssembler::GetMarkBits(Register addr_reg,
5147                                 Register bitmap_reg,
5148                                 Register mask_reg) {
5149  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5150  And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5151  Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5152  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5153  Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5154  sll(t8, t8, kPointerSizeLog2);
5155  Addu(bitmap_reg, bitmap_reg, t8);
5156  li(t8, Operand(1));
5157  sllv(mask_reg, t8, mask_reg);
5158}
5159
5160
5161void MacroAssembler::EnsureNotWhite(
5162    Register value,
5163    Register bitmap_scratch,
5164    Register mask_scratch,
5165    Register load_scratch,
5166    Label* value_is_white_and_not_data) {
5167  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5168  GetMarkBits(value, bitmap_scratch, mask_scratch);
5169
5170  // If the value is black or grey we don't need to do anything.
5171  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5172  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5173  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
5174  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5175
5176  Label done;
5177
5178  // Since both black and grey have a 1 in the first position and white does
5179  // not have a 1 there we only need to check one bit.
5180  lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5181  And(t8, mask_scratch, load_scratch);
5182  Branch(&done, ne, t8, Operand(zero_reg));
5183
5184  if (emit_debug_code()) {
5185    // Check for impossible bit pattern.
5186    Label ok;
5187    // sll may overflow, making the check conservative.
5188    sll(t8, mask_scratch, 1);
5189    And(t8, load_scratch, t8);
5190    Branch(&ok, eq, t8, Operand(zero_reg));
5191    stop("Impossible marking bit pattern");
5192    bind(&ok);
5193  }
5194
5195  // Value is white.  We check whether it is data that doesn't need scanning.
5196  // Currently only checks for HeapNumber and non-cons strings.
5197  Register map = load_scratch;  // Holds map while checking type.
5198  Register length = load_scratch;  // Holds length of object after testing type.
5199  Label is_data_object;
5200
5201  // Check for heap-number
5202  lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5203  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5204  {
5205    Label skip;
5206    Branch(&skip, ne, t8, Operand(map));
5207    li(length, HeapNumber::kSize);
5208    Branch(&is_data_object);
5209    bind(&skip);
5210  }
5211
5212  // Check for strings.
5213  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5214  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5215  // If it's a string and it's not a cons string then it's an object containing
5216  // no GC pointers.
5217  Register instance_type = load_scratch;
5218  lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5219  And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5220  Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5221  // It's a non-indirect (non-cons and non-slice) string.
5222  // If it's external, the length is just ExternalString::kSize.
5223  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5224  // External strings are the only ones with the kExternalStringTag bit
5225  // set.
5226  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
5227  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
5228  And(t8, instance_type, Operand(kExternalStringTag));
5229  {
5230    Label skip;
5231    Branch(&skip, eq, t8, Operand(zero_reg));
5232    li(length, ExternalString::kSize);
5233    Branch(&is_data_object);
5234    bind(&skip);
5235  }
5236
5237  // Sequential string, either ASCII or UC16.
5238  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
5239  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5240  // getting the length multiplied by 2.
5241  ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
5242  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5243  lw(t9, FieldMemOperand(value, String::kLengthOffset));
5244  And(t8, instance_type, Operand(kStringEncodingMask));
5245  {
5246    Label skip;
5247    Branch(&skip, eq, t8, Operand(zero_reg));
5248    srl(t9, t9, 1);
5249    bind(&skip);
5250  }
5251  Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5252  And(length, length, Operand(~kObjectAlignmentMask));
5253
5254  bind(&is_data_object);
5255  // Value is a data object, and it is white.  Mark it black.  Since we know
5256  // that the object is white we can make it black by flipping one bit.
5257  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5258  Or(t8, t8, Operand(mask_scratch));
5259  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5260
5261  And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5262  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5263  Addu(t8, t8, Operand(length));
5264  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5265
5266  bind(&done);
5267}
5268
5269
5270void MacroAssembler::LoadInstanceDescriptors(Register map,
5271                                             Register descriptors) {
5272  lw(descriptors,
5273     FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
5274  Label not_smi;
5275  JumpIfNotSmi(descriptors, &not_smi);
5276  LoadRoot(descriptors, Heap::kEmptyDescriptorArrayRootIndex);
5277  bind(&not_smi);
5278}
5279
5280
5281void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5282  Label next;
5283  // Preload a couple of values used in the loop.
5284  Register  empty_fixed_array_value = t2;
5285  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5286  Register empty_descriptor_array_value = t3;
5287  LoadRoot(empty_descriptor_array_value,
5288           Heap::kEmptyDescriptorArrayRootIndex);
5289  mov(a1, a0);
5290  bind(&next);
5291
5292  // Check that there are no elements.  Register a1 contains the
5293  // current JS object we've reached through the prototype chain.
5294  lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
5295  Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
5296
5297  // Check that instance descriptors are not empty so that we can
5298  // check for an enum cache.  Leave the map in a2 for the subsequent
5299  // prototype load.
5300  lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
5301  lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
5302  JumpIfSmi(a3, call_runtime);
5303
5304  // Check that there is an enum cache in the non-empty instance
5305  // descriptors (a3).  This is the case if the next enumeration
5306  // index field does not contain a smi.
5307  lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
5308  JumpIfSmi(a3, call_runtime);
5309
5310  // For all objects but the receiver, check that the cache is empty.
5311  Label check_prototype;
5312  Branch(&check_prototype, eq, a1, Operand(a0));
5313  lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
5314  Branch(call_runtime, ne, a3, Operand(empty_fixed_array_value));
5315
5316  // Load the prototype from the map and loop if non-null.
5317  bind(&check_prototype);
5318  lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
5319  Branch(&next, ne, a1, Operand(null_value));
5320}
5321
5322
5323void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5324  ASSERT(!output_reg.is(input_reg));
5325  Label done;
5326  li(output_reg, Operand(255));
5327  // Normal branch: nop in delay slot.
5328  Branch(&done, gt, input_reg, Operand(output_reg));
5329  // Use delay slot in this branch.
5330  Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5331  mov(output_reg, zero_reg);  // In delay slot.
5332  mov(output_reg, input_reg);  // Value is in range 0..255.
5333  bind(&done);
5334}
5335
5336
5337void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5338                                        DoubleRegister input_reg,
5339                                        DoubleRegister temp_double_reg) {
5340  Label above_zero;
5341  Label done;
5342  Label in_bounds;
5343
5344  Move(temp_double_reg, 0.0);
5345  BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5346
5347  // Double value is less than zero, NaN or Inf, return 0.
5348  mov(result_reg, zero_reg);
5349  Branch(&done);
5350
5351  // Double value is >= 255, return 255.
5352  bind(&above_zero);
5353  Move(temp_double_reg, 255.0);
5354  BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5355  li(result_reg, Operand(255));
5356  Branch(&done);
5357
5358  // In 0-255 range, round and truncate.
5359  bind(&in_bounds);
5360  round_w_d(temp_double_reg, input_reg);
5361  mfc1(result_reg, temp_double_reg);
5362  bind(&done);
5363}
5364
5365
5366bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
5367  if (r1.is(r2)) return true;
5368  if (r1.is(r3)) return true;
5369  if (r1.is(r4)) return true;
5370  if (r2.is(r3)) return true;
5371  if (r2.is(r4)) return true;
5372  if (r3.is(r4)) return true;
5373  return false;
5374}
5375
5376
5377CodePatcher::CodePatcher(byte* address, int instructions)
5378    : address_(address),
5379      instructions_(instructions),
5380      size_(instructions * Assembler::kInstrSize),
5381      masm_(Isolate::Current(), address, size_ + Assembler::kGap) {
5382  // Create a new macro assembler pointing to the address of the code to patch.
5383  // The size is adjusted with kGap on order for the assembler to generate size
5384  // bytes of instructions without failing with buffer size constraints.
5385  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5386}
5387
5388
5389CodePatcher::~CodePatcher() {
5390  // Indicate that code has changed.
5391  CPU::FlushICache(address_, size_);
5392
5393  // Check that the code was patched as expected.
5394  ASSERT(masm_.pc_ == address_ + size_);
5395  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5396}
5397
5398
5399void CodePatcher::Emit(Instr instr) {
5400  masm()->emit(instr);
5401}
5402
5403
5404void CodePatcher::Emit(Address addr) {
5405  masm()->emit(reinterpret_cast<Instr>(addr));
5406}
5407
5408
5409void CodePatcher::ChangeBranchCondition(Condition cond) {
5410  Instr instr = Assembler::instr_at(masm_.pc_);
5411  ASSERT(Assembler::IsBranch(instr));
5412  uint32_t opcode = Assembler::GetOpcodeField(instr);
5413  // Currently only the 'eq' and 'ne' cond values are supported and the simple
5414  // branch instructions (with opcode being the branch type).
5415  // There are some special cases (see Assembler::IsBranch()) so extending this
5416  // would be tricky.
5417  ASSERT(opcode == BEQ ||
5418         opcode == BNE ||
5419        opcode == BLEZ ||
5420        opcode == BGTZ ||
5421        opcode == BEQL ||
5422        opcode == BNEL ||
5423       opcode == BLEZL ||
5424       opcode == BGTZL);
5425  opcode = (cond == eq) ? BEQ : BNE;
5426  instr = (instr & ~kOpcodeMask) | opcode;
5427  masm_.emit(instr);
5428}
5429
5430
5431} }  // namespace v8::internal
5432
5433#endif  // V8_TARGET_ARCH_MIPS
5434