1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <limits.h>  // For LONG_MIN, LONG_MAX.
6
7#if V8_TARGET_ARCH_MIPS64
8
9#include "src/base/division-by-constant.h"
10#include "src/bootstrapper.h"
11#include "src/codegen.h"
12#include "src/debug/debug.h"
13#include "src/mips64/macro-assembler-mips64.h"
14#include "src/register-configuration.h"
15#include "src/runtime/runtime.h"
16
17namespace v8 {
18namespace internal {
19
20// Floating point constants.
21const uint64_t kDoubleSignMask = Double::kSignMask;
22const uint32_t kDoubleExponentShift = HeapNumber::kMantissaBits;
23const uint32_t kDoubleNaNShift = kDoubleExponentShift - 1;
24const uint64_t kDoubleNaNMask = Double::kExponentMask | (1L << kDoubleNaNShift);
25
26const uint32_t kSingleSignMask = kBinary32SignMask;
27const uint32_t kSingleExponentMask = kBinary32ExponentMask;
28const uint32_t kSingleExponentShift = kBinary32ExponentShift;
29const uint32_t kSingleNaNShift = kSingleExponentShift - 1;
30const uint32_t kSingleNaNMask = kSingleExponentMask | (1 << kSingleNaNShift);
31
32MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size,
33                               CodeObjectRequired create_code_object)
34    : Assembler(arg_isolate, buffer, size),
35      generating_stub_(false),
36      has_frame_(false),
37      has_double_zero_reg_set_(false) {
38  if (create_code_object == CodeObjectRequired::kYes) {
39    code_object_ =
40        Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
41  }
42}
43
44void MacroAssembler::Load(Register dst,
45                          const MemOperand& src,
46                          Representation r) {
47  DCHECK(!r.IsDouble());
48  if (r.IsInteger8()) {
49    lb(dst, src);
50  } else if (r.IsUInteger8()) {
51    lbu(dst, src);
52  } else if (r.IsInteger16()) {
53    lh(dst, src);
54  } else if (r.IsUInteger16()) {
55    lhu(dst, src);
56  } else if (r.IsInteger32()) {
57    lw(dst, src);
58  } else {
59    ld(dst, src);
60  }
61}
62
63
64void MacroAssembler::Store(Register src,
65                           const MemOperand& dst,
66                           Representation r) {
67  DCHECK(!r.IsDouble());
68  if (r.IsInteger8() || r.IsUInteger8()) {
69    sb(src, dst);
70  } else if (r.IsInteger16() || r.IsUInteger16()) {
71    sh(src, dst);
72  } else if (r.IsInteger32()) {
73    sw(src, dst);
74  } else {
75    if (r.IsHeapObject()) {
76      AssertNotSmi(src);
77    } else if (r.IsSmi()) {
78      AssertSmi(src);
79    }
80    sd(src, dst);
81  }
82}
83
84
85void MacroAssembler::LoadRoot(Register destination,
86                              Heap::RootListIndex index) {
87  ld(destination, MemOperand(s6, index << kPointerSizeLog2));
88}
89
90
91void MacroAssembler::LoadRoot(Register destination,
92                              Heap::RootListIndex index,
93                              Condition cond,
94                              Register src1, const Operand& src2) {
95  Branch(2, NegateCondition(cond), src1, src2);
96  ld(destination, MemOperand(s6, index << kPointerSizeLog2));
97}
98
99
100void MacroAssembler::StoreRoot(Register source,
101                               Heap::RootListIndex index) {
102  DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
103  sd(source, MemOperand(s6, index << kPointerSizeLog2));
104}
105
106
107void MacroAssembler::StoreRoot(Register source,
108                               Heap::RootListIndex index,
109                               Condition cond,
110                               Register src1, const Operand& src2) {
111  DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
112  Branch(2, NegateCondition(cond), src1, src2);
113  sd(source, MemOperand(s6, index << kPointerSizeLog2));
114}
115
116void MacroAssembler::PushCommonFrame(Register marker_reg) {
117  if (marker_reg.is_valid()) {
118    Push(ra, fp, marker_reg);
119    Daddu(fp, sp, Operand(kPointerSize));
120  } else {
121    Push(ra, fp);
122    mov(fp, sp);
123  }
124}
125
126void MacroAssembler::PopCommonFrame(Register marker_reg) {
127  if (marker_reg.is_valid()) {
128    Pop(ra, fp, marker_reg);
129  } else {
130    Pop(ra, fp);
131  }
132}
133
134void MacroAssembler::PushStandardFrame(Register function_reg) {
135  int offset = -StandardFrameConstants::kContextOffset;
136  if (function_reg.is_valid()) {
137    Push(ra, fp, cp, function_reg);
138    offset += kPointerSize;
139  } else {
140    Push(ra, fp, cp);
141  }
142  Daddu(fp, sp, Operand(offset));
143}
144
145// Push and pop all registers that can hold pointers.
146void MacroAssembler::PushSafepointRegisters() {
147  // Safepoints expect a block of kNumSafepointRegisters values on the
148  // stack, so adjust the stack for unsaved registers.
149  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
150  DCHECK(num_unsaved >= 0);
151  if (num_unsaved > 0) {
152    Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
153  }
154  MultiPush(kSafepointSavedRegisters);
155}
156
157
158void MacroAssembler::PopSafepointRegisters() {
159  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
160  MultiPop(kSafepointSavedRegisters);
161  if (num_unsaved > 0) {
162    Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
163  }
164}
165
166
167void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
168  sd(src, SafepointRegisterSlot(dst));
169}
170
171
172void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
173  ld(dst, SafepointRegisterSlot(src));
174}
175
176
177int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
178  // The registers are pushed starting with the highest encoding,
179  // which means that lowest encodings are closest to the stack pointer.
180  return kSafepointRegisterStackIndexMap[reg_code];
181}
182
183
184MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
185  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
186}
187
188
189MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
190  UNIMPLEMENTED_MIPS();
191  // General purpose registers are pushed last on the stack.
192  int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize;
193  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
194  return MemOperand(sp, doubles_size + register_offset);
195}
196
197
198void MacroAssembler::InNewSpace(Register object,
199                                Register scratch,
200                                Condition cc,
201                                Label* branch) {
202  DCHECK(cc == eq || cc == ne);
203  CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch);
204}
205
206
207// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
208// The register 'object' contains a heap object pointer.  The heap object
209// tag is shifted away.
210void MacroAssembler::RecordWriteField(
211    Register object,
212    int offset,
213    Register value,
214    Register dst,
215    RAStatus ra_status,
216    SaveFPRegsMode save_fp,
217    RememberedSetAction remembered_set_action,
218    SmiCheck smi_check,
219    PointersToHereCheck pointers_to_here_check_for_value) {
220  DCHECK(!AreAliased(value, dst, t8, object));
221  // First, check if a write barrier is even needed. The tests below
222  // catch stores of Smis.
223  Label done;
224
225  // Skip barrier if writing a smi.
226  if (smi_check == INLINE_SMI_CHECK) {
227    JumpIfSmi(value, &done);
228  }
229
230  // Although the object register is tagged, the offset is relative to the start
231  // of the object, so so offset must be a multiple of kPointerSize.
232  DCHECK(IsAligned(offset, kPointerSize));
233
234  Daddu(dst, object, Operand(offset - kHeapObjectTag));
235  if (emit_debug_code()) {
236    Label ok;
237    And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
238    Branch(&ok, eq, t8, Operand(zero_reg));
239    stop("Unaligned cell in write barrier");
240    bind(&ok);
241  }
242
243  RecordWrite(object,
244              dst,
245              value,
246              ra_status,
247              save_fp,
248              remembered_set_action,
249              OMIT_SMI_CHECK,
250              pointers_to_here_check_for_value);
251
252  bind(&done);
253
254  // Clobber clobbered input registers when running with the debug-code flag
255  // turned on to provoke errors.
256  if (emit_debug_code()) {
257    li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
258    li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
259  }
260}
261
262
263// Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
264void MacroAssembler::RecordWriteForMap(Register object,
265                                       Register map,
266                                       Register dst,
267                                       RAStatus ra_status,
268                                       SaveFPRegsMode fp_mode) {
269  if (emit_debug_code()) {
270    DCHECK(!dst.is(at));
271    ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
272    Check(eq,
273          kWrongAddressOrValuePassedToRecordWrite,
274          dst,
275          Operand(isolate()->factory()->meta_map()));
276  }
277
278  if (!FLAG_incremental_marking) {
279    return;
280  }
281
282  if (emit_debug_code()) {
283    ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
284    Check(eq,
285          kWrongAddressOrValuePassedToRecordWrite,
286          map,
287          Operand(at));
288  }
289
290  Label done;
291
292  // A single check of the map's pages interesting flag suffices, since it is
293  // only set during incremental collection, and then it's also guaranteed that
294  // the from object's page's interesting flag is also set.  This optimization
295  // relies on the fact that maps can never be in new space.
296  CheckPageFlag(map,
297                map,  // Used as scratch.
298                MemoryChunk::kPointersToHereAreInterestingMask,
299                eq,
300                &done);
301
302  Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
303  if (emit_debug_code()) {
304    Label ok;
305    And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
306    Branch(&ok, eq, at, Operand(zero_reg));
307    stop("Unaligned cell in write barrier");
308    bind(&ok);
309  }
310
311  // Record the actual write.
312  if (ra_status == kRAHasNotBeenSaved) {
313    push(ra);
314  }
315  RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
316                       fp_mode);
317  CallStub(&stub);
318  if (ra_status == kRAHasNotBeenSaved) {
319    pop(ra);
320  }
321
322  bind(&done);
323
324  // Count number of write barriers in generated code.
325  isolate()->counters()->write_barriers_static()->Increment();
326  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
327
328  // Clobber clobbered registers when running with the debug-code flag
329  // turned on to provoke errors.
330  if (emit_debug_code()) {
331    li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
332    li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
333  }
334}
335
336
337// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
338// The register 'object' contains a heap object pointer.  The heap object
339// tag is shifted away.
340void MacroAssembler::RecordWrite(
341    Register object,
342    Register address,
343    Register value,
344    RAStatus ra_status,
345    SaveFPRegsMode fp_mode,
346    RememberedSetAction remembered_set_action,
347    SmiCheck smi_check,
348    PointersToHereCheck pointers_to_here_check_for_value) {
349  DCHECK(!AreAliased(object, address, value, t8));
350  DCHECK(!AreAliased(object, address, value, t9));
351
352  if (emit_debug_code()) {
353    ld(at, MemOperand(address));
354    Assert(
355        eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
356  }
357
358  if (remembered_set_action == OMIT_REMEMBERED_SET &&
359      !FLAG_incremental_marking) {
360    return;
361  }
362
363  // First, check if a write barrier is even needed. The tests below
364  // catch stores of smis and stores into the young generation.
365  Label done;
366
367  if (smi_check == INLINE_SMI_CHECK) {
368    DCHECK_EQ(0, kSmiTag);
369    JumpIfSmi(value, &done);
370  }
371
372  if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
373    CheckPageFlag(value,
374                  value,  // Used as scratch.
375                  MemoryChunk::kPointersToHereAreInterestingMask,
376                  eq,
377                  &done);
378  }
379  CheckPageFlag(object,
380                value,  // Used as scratch.
381                MemoryChunk::kPointersFromHereAreInterestingMask,
382                eq,
383                &done);
384
385  // Record the actual write.
386  if (ra_status == kRAHasNotBeenSaved) {
387    push(ra);
388  }
389  RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
390                       fp_mode);
391  CallStub(&stub);
392  if (ra_status == kRAHasNotBeenSaved) {
393    pop(ra);
394  }
395
396  bind(&done);
397
398  // Count number of write barriers in generated code.
399  isolate()->counters()->write_barriers_static()->Increment();
400  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
401                   value);
402
403  // Clobber clobbered registers when running with the debug-code flag
404  // turned on to provoke errors.
405  if (emit_debug_code()) {
406    li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
407    li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
408  }
409}
410
411void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
412                                               Register code_entry,
413                                               Register scratch) {
414  const int offset = JSFunction::kCodeEntryOffset;
415
416  // Since a code entry (value) is always in old space, we don't need to update
417  // remembered set. If incremental marking is off, there is nothing for us to
418  // do.
419  if (!FLAG_incremental_marking) return;
420
421  DCHECK(js_function.is(a1));
422  DCHECK(code_entry.is(a4));
423  DCHECK(scratch.is(a5));
424  AssertNotSmi(js_function);
425
426  if (emit_debug_code()) {
427    Daddu(scratch, js_function, Operand(offset - kHeapObjectTag));
428    ld(at, MemOperand(scratch));
429    Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
430           Operand(code_entry));
431  }
432
433  // First, check if a write barrier is even needed. The tests below
434  // catch stores of Smis and stores into young gen.
435  Label done;
436
437  CheckPageFlag(code_entry, scratch,
438                MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
439  CheckPageFlag(js_function, scratch,
440                MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
441
442  const Register dst = scratch;
443  Daddu(dst, js_function, Operand(offset - kHeapObjectTag));
444
445  // Save caller-saved registers. js_function and code_entry are in the
446  // caller-saved register list.
447  DCHECK(kJSCallerSaved & js_function.bit());
448  DCHECK(kJSCallerSaved & code_entry.bit());
449  MultiPush(kJSCallerSaved | ra.bit());
450
451  int argument_count = 3;
452
453  PrepareCallCFunction(argument_count, code_entry);
454
455  Move(a0, js_function);
456  Move(a1, dst);
457  li(a2, Operand(ExternalReference::isolate_address(isolate())));
458
459  {
460    AllowExternalCallThatCantCauseGC scope(this);
461    CallCFunction(
462        ExternalReference::incremental_marking_record_write_code_entry_function(
463            isolate()),
464        argument_count);
465  }
466
467  // Restore caller-saved registers.
468  MultiPop(kJSCallerSaved | ra.bit());
469
470  bind(&done);
471}
472
473void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
474                                         Register address,
475                                         Register scratch,
476                                         SaveFPRegsMode fp_mode,
477                                         RememberedSetFinalAction and_then) {
478  Label done;
479  if (emit_debug_code()) {
480    Label ok;
481    JumpIfNotInNewSpace(object, scratch, &ok);
482    stop("Remembered set pointer is in new space");
483    bind(&ok);
484  }
485  // Load store buffer top.
486  ExternalReference store_buffer =
487      ExternalReference::store_buffer_top(isolate());
488  li(t8, Operand(store_buffer));
489  ld(scratch, MemOperand(t8));
490  // Store pointer to buffer and increment buffer top.
491  sd(address, MemOperand(scratch));
492  Daddu(scratch, scratch, kPointerSize);
493  // Write back new top of buffer.
494  sd(scratch, MemOperand(t8));
495  // Call stub on end of buffer.
496  // Check for end of buffer.
497  And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
498  DCHECK(!scratch.is(t8));
499  if (and_then == kFallThroughAtEnd) {
500    Branch(&done, ne, t8, Operand(zero_reg));
501  } else {
502    DCHECK(and_then == kReturnAtEnd);
503    Ret(ne, t8, Operand(zero_reg));
504  }
505  push(ra);
506  StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
507  CallStub(&store_buffer_overflow);
508  pop(ra);
509  bind(&done);
510  if (and_then == kReturnAtEnd) {
511    Ret();
512  }
513}
514
515
516// -----------------------------------------------------------------------------
517// Allocation support.
518
519
520// Compute the hash code from the untagged key.  This must be kept in sync with
521// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
522// code-stub-hydrogen.cc
523void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
524  // First of all we assign the hash seed to scratch.
525  LoadRoot(scratch, Heap::kHashSeedRootIndex);
526  SmiUntag(scratch);
527
528  // Xor original key with a seed.
529  xor_(reg0, reg0, scratch);
530
531  // Compute the hash code from the untagged key.  This must be kept in sync
532  // with ComputeIntegerHash in utils.h.
533  //
534  // hash = ~hash + (hash << 15);
535  // The algorithm uses 32-bit integer values.
536  nor(scratch, reg0, zero_reg);
537  Lsa(reg0, scratch, reg0, 15);
538
539  // hash = hash ^ (hash >> 12);
540  srl(at, reg0, 12);
541  xor_(reg0, reg0, at);
542
543  // hash = hash + (hash << 2);
544  Lsa(reg0, reg0, reg0, 2);
545
546  // hash = hash ^ (hash >> 4);
547  srl(at, reg0, 4);
548  xor_(reg0, reg0, at);
549
550  // hash = hash * 2057;
551  sll(scratch, reg0, 11);
552  Lsa(reg0, reg0, reg0, 3);
553  addu(reg0, reg0, scratch);
554
555  // hash = hash ^ (hash >> 16);
556  srl(at, reg0, 16);
557  xor_(reg0, reg0, at);
558  And(reg0, reg0, Operand(0x3fffffff));
559}
560
561// ---------------------------------------------------------------------------
562// Instruction macros.
563
564void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
565  if (rt.is_reg()) {
566    addu(rd, rs, rt.rm());
567  } else {
568    if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
569      addiu(rd, rs, static_cast<int32_t>(rt.imm64_));
570    } else {
571      // li handles the relocation.
572      DCHECK(!rs.is(at));
573      li(at, rt);
574      addu(rd, rs, at);
575    }
576  }
577}
578
579
580void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
581  if (rt.is_reg()) {
582    daddu(rd, rs, rt.rm());
583  } else {
584    if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
585      daddiu(rd, rs, static_cast<int32_t>(rt.imm64_));
586    } else {
587      // li handles the relocation.
588      DCHECK(!rs.is(at));
589      li(at, rt);
590      daddu(rd, rs, at);
591    }
592  }
593}
594
595
596void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
597  if (rt.is_reg()) {
598    subu(rd, rs, rt.rm());
599  } else {
600    if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
601      addiu(rd, rs, static_cast<int32_t>(
602                        -rt.imm64_));  // No subiu instr, use addiu(x, y, -imm).
603    } else {
604      // li handles the relocation.
605      DCHECK(!rs.is(at));
606      li(at, rt);
607      subu(rd, rs, at);
608    }
609  }
610}
611
612
613void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
614  if (rt.is_reg()) {
615    dsubu(rd, rs, rt.rm());
616  } else {
617    if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
618      daddiu(rd, rs,
619             static_cast<int32_t>(
620                 -rt.imm64_));  // No subiu instr, use addiu(x, y, -imm).
621    } else {
622      // li handles the relocation.
623      DCHECK(!rs.is(at));
624      li(at, rt);
625      dsubu(rd, rs, at);
626    }
627  }
628}
629
630
631void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
632  if (rt.is_reg()) {
633    mul(rd, rs, rt.rm());
634  } else {
635    // li handles the relocation.
636    DCHECK(!rs.is(at));
637    li(at, rt);
638    mul(rd, rs, at);
639  }
640}
641
642
643void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
644  if (rt.is_reg()) {
645    if (kArchVariant != kMips64r6) {
646      mult(rs, rt.rm());
647      mfhi(rd);
648    } else {
649      muh(rd, rs, rt.rm());
650    }
651  } else {
652    // li handles the relocation.
653    DCHECK(!rs.is(at));
654    li(at, rt);
655    if (kArchVariant != kMips64r6) {
656      mult(rs, at);
657      mfhi(rd);
658    } else {
659      muh(rd, rs, at);
660    }
661  }
662}
663
664
665void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
666  if (rt.is_reg()) {
667    if (kArchVariant != kMips64r6) {
668      multu(rs, rt.rm());
669      mfhi(rd);
670    } else {
671      muhu(rd, rs, rt.rm());
672    }
673  } else {
674    // li handles the relocation.
675    DCHECK(!rs.is(at));
676    li(at, rt);
677    if (kArchVariant != kMips64r6) {
678      multu(rs, at);
679      mfhi(rd);
680    } else {
681      muhu(rd, rs, at);
682    }
683  }
684}
685
686
687void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
688  if (rt.is_reg()) {
689    if (kArchVariant == kMips64r6) {
690      dmul(rd, rs, rt.rm());
691    } else {
692      dmult(rs, rt.rm());
693      mflo(rd);
694    }
695  } else {
696    // li handles the relocation.
697    DCHECK(!rs.is(at));
698    li(at, rt);
699    if (kArchVariant == kMips64r6) {
700      dmul(rd, rs, at);
701    } else {
702      dmult(rs, at);
703      mflo(rd);
704    }
705  }
706}
707
708
709void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
710  if (rt.is_reg()) {
711    if (kArchVariant == kMips64r6) {
712      dmuh(rd, rs, rt.rm());
713    } else {
714      dmult(rs, rt.rm());
715      mfhi(rd);
716    }
717  } else {
718    // li handles the relocation.
719    DCHECK(!rs.is(at));
720    li(at, rt);
721    if (kArchVariant == kMips64r6) {
722      dmuh(rd, rs, at);
723    } else {
724      dmult(rs, at);
725      mfhi(rd);
726    }
727  }
728}
729
730
731void MacroAssembler::Mult(Register rs, const Operand& rt) {
732  if (rt.is_reg()) {
733    mult(rs, rt.rm());
734  } else {
735    // li handles the relocation.
736    DCHECK(!rs.is(at));
737    li(at, rt);
738    mult(rs, at);
739  }
740}
741
742
743void MacroAssembler::Dmult(Register rs, const Operand& rt) {
744  if (rt.is_reg()) {
745    dmult(rs, rt.rm());
746  } else {
747    // li handles the relocation.
748    DCHECK(!rs.is(at));
749    li(at, rt);
750    dmult(rs, at);
751  }
752}
753
754
755void MacroAssembler::Multu(Register rs, const Operand& rt) {
756  if (rt.is_reg()) {
757    multu(rs, rt.rm());
758  } else {
759    // li handles the relocation.
760    DCHECK(!rs.is(at));
761    li(at, rt);
762    multu(rs, at);
763  }
764}
765
766
767void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
768  if (rt.is_reg()) {
769    dmultu(rs, rt.rm());
770  } else {
771    // li handles the relocation.
772    DCHECK(!rs.is(at));
773    li(at, rt);
774    dmultu(rs, at);
775  }
776}
777
778
779void MacroAssembler::Div(Register rs, const Operand& rt) {
780  if (rt.is_reg()) {
781    div(rs, rt.rm());
782  } else {
783    // li handles the relocation.
784    DCHECK(!rs.is(at));
785    li(at, rt);
786    div(rs, at);
787  }
788}
789
790
791void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
792  if (rt.is_reg()) {
793    if (kArchVariant != kMips64r6) {
794      div(rs, rt.rm());
795      mflo(res);
796    } else {
797      div(res, rs, rt.rm());
798    }
799  } else {
800    // li handles the relocation.
801    DCHECK(!rs.is(at));
802    li(at, rt);
803    if (kArchVariant != kMips64r6) {
804      div(rs, at);
805      mflo(res);
806    } else {
807      div(res, rs, at);
808    }
809  }
810}
811
812
813void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
814  if (rt.is_reg()) {
815    if (kArchVariant != kMips64r6) {
816      div(rs, rt.rm());
817      mfhi(rd);
818    } else {
819      mod(rd, rs, rt.rm());
820    }
821  } else {
822    // li handles the relocation.
823    DCHECK(!rs.is(at));
824    li(at, rt);
825    if (kArchVariant != kMips64r6) {
826      div(rs, at);
827      mfhi(rd);
828    } else {
829      mod(rd, rs, at);
830    }
831  }
832}
833
834
835void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
836  if (rt.is_reg()) {
837    if (kArchVariant != kMips64r6) {
838      divu(rs, rt.rm());
839      mfhi(rd);
840    } else {
841      modu(rd, rs, rt.rm());
842    }
843  } else {
844    // li handles the relocation.
845    DCHECK(!rs.is(at));
846    li(at, rt);
847    if (kArchVariant != kMips64r6) {
848      divu(rs, at);
849      mfhi(rd);
850    } else {
851      modu(rd, rs, at);
852    }
853  }
854}
855
856
857void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
858  if (rt.is_reg()) {
859    ddiv(rs, rt.rm());
860  } else {
861    // li handles the relocation.
862    DCHECK(!rs.is(at));
863    li(at, rt);
864    ddiv(rs, at);
865  }
866}
867
868
869void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
870  if (kArchVariant != kMips64r6) {
871    if (rt.is_reg()) {
872      ddiv(rs, rt.rm());
873      mflo(rd);
874    } else {
875      // li handles the relocation.
876      DCHECK(!rs.is(at));
877      li(at, rt);
878      ddiv(rs, at);
879      mflo(rd);
880    }
881  } else {
882    if (rt.is_reg()) {
883      ddiv(rd, rs, rt.rm());
884    } else {
885      // li handles the relocation.
886      DCHECK(!rs.is(at));
887      li(at, rt);
888      ddiv(rd, rs, at);
889    }
890  }
891}
892
893
894void MacroAssembler::Divu(Register rs, const Operand& rt) {
895  if (rt.is_reg()) {
896    divu(rs, rt.rm());
897  } else {
898    // li handles the relocation.
899    DCHECK(!rs.is(at));
900    li(at, rt);
901    divu(rs, at);
902  }
903}
904
905
906void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
907  if (rt.is_reg()) {
908    if (kArchVariant != kMips64r6) {
909      divu(rs, rt.rm());
910      mflo(res);
911    } else {
912      divu(res, rs, rt.rm());
913    }
914  } else {
915    // li handles the relocation.
916    DCHECK(!rs.is(at));
917    li(at, rt);
918    if (kArchVariant != kMips64r6) {
919      divu(rs, at);
920      mflo(res);
921    } else {
922      divu(res, rs, at);
923    }
924  }
925}
926
927
928void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
929  if (rt.is_reg()) {
930    ddivu(rs, rt.rm());
931  } else {
932    // li handles the relocation.
933    DCHECK(!rs.is(at));
934    li(at, rt);
935    ddivu(rs, at);
936  }
937}
938
939
940void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
941  if (rt.is_reg()) {
942    if (kArchVariant != kMips64r6) {
943      ddivu(rs, rt.rm());
944      mflo(res);
945    } else {
946      ddivu(res, rs, rt.rm());
947    }
948  } else {
949    // li handles the relocation.
950    DCHECK(!rs.is(at));
951    li(at, rt);
952    if (kArchVariant != kMips64r6) {
953      ddivu(rs, at);
954      mflo(res);
955    } else {
956      ddivu(res, rs, at);
957    }
958  }
959}
960
961
962void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
963  if (kArchVariant != kMips64r6) {
964    if (rt.is_reg()) {
965      ddiv(rs, rt.rm());
966      mfhi(rd);
967    } else {
968      // li handles the relocation.
969      DCHECK(!rs.is(at));
970      li(at, rt);
971      ddiv(rs, at);
972      mfhi(rd);
973    }
974  } else {
975    if (rt.is_reg()) {
976      dmod(rd, rs, rt.rm());
977    } else {
978      // li handles the relocation.
979      DCHECK(!rs.is(at));
980      li(at, rt);
981      dmod(rd, rs, at);
982    }
983  }
984}
985
986
987void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
988  if (kArchVariant != kMips64r6) {
989    if (rt.is_reg()) {
990      ddivu(rs, rt.rm());
991      mfhi(rd);
992    } else {
993      // li handles the relocation.
994      DCHECK(!rs.is(at));
995      li(at, rt);
996      ddivu(rs, at);
997      mfhi(rd);
998    }
999  } else {
1000    if (rt.is_reg()) {
1001      dmodu(rd, rs, rt.rm());
1002    } else {
1003      // li handles the relocation.
1004      DCHECK(!rs.is(at));
1005      li(at, rt);
1006      dmodu(rd, rs, at);
1007    }
1008  }
1009}
1010
1011
1012void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
1013  if (rt.is_reg()) {
1014    and_(rd, rs, rt.rm());
1015  } else {
1016    if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1017      andi(rd, rs, static_cast<int32_t>(rt.imm64_));
1018    } else {
1019      // li handles the relocation.
1020      DCHECK(!rs.is(at));
1021      li(at, rt);
1022      and_(rd, rs, at);
1023    }
1024  }
1025}
1026
1027
1028void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
1029  if (rt.is_reg()) {
1030    or_(rd, rs, rt.rm());
1031  } else {
1032    if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1033      ori(rd, rs, static_cast<int32_t>(rt.imm64_));
1034    } else {
1035      // li handles the relocation.
1036      DCHECK(!rs.is(at));
1037      li(at, rt);
1038      or_(rd, rs, at);
1039    }
1040  }
1041}
1042
1043
1044void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
1045  if (rt.is_reg()) {
1046    xor_(rd, rs, rt.rm());
1047  } else {
1048    if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1049      xori(rd, rs, static_cast<int32_t>(rt.imm64_));
1050    } else {
1051      // li handles the relocation.
1052      DCHECK(!rs.is(at));
1053      li(at, rt);
1054      xor_(rd, rs, at);
1055    }
1056  }
1057}
1058
1059
1060void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
1061  if (rt.is_reg()) {
1062    nor(rd, rs, rt.rm());
1063  } else {
1064    // li handles the relocation.
1065    DCHECK(!rs.is(at));
1066    li(at, rt);
1067    nor(rd, rs, at);
1068  }
1069}
1070
1071
1072void MacroAssembler::Neg(Register rs, const Operand& rt) {
1073  DCHECK(rt.is_reg());
1074  DCHECK(!at.is(rs));
1075  DCHECK(!at.is(rt.rm()));
1076  li(at, -1);
1077  xor_(rs, rt.rm(), at);
1078}
1079
1080
1081void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
1082  if (rt.is_reg()) {
1083    slt(rd, rs, rt.rm());
1084  } else {
1085    if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1086      slti(rd, rs, static_cast<int32_t>(rt.imm64_));
1087    } else {
1088      // li handles the relocation.
1089      DCHECK(!rs.is(at));
1090      li(at, rt);
1091      slt(rd, rs, at);
1092    }
1093  }
1094}
1095
1096
1097void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
1098  if (rt.is_reg()) {
1099    sltu(rd, rs, rt.rm());
1100  } else {
1101    const uint64_t int16_min = std::numeric_limits<int16_t>::min();
1102    if (is_uint15(rt.imm64_) && !MustUseReg(rt.rmode_)) {
1103      // Imm range is: [0, 32767].
1104      sltiu(rd, rs, static_cast<int32_t>(rt.imm64_));
1105    } else if (is_uint15(rt.imm64_ - int16_min) && !MustUseReg(rt.rmode_)) {
1106      // Imm range is: [max_unsigned-32767,max_unsigned].
1107      sltiu(rd, rs, static_cast<uint16_t>(rt.imm64_));
1108    } else {
1109      // li handles the relocation.
1110      DCHECK(!rs.is(at));
1111      li(at, rt);
1112      sltu(rd, rs, at);
1113    }
1114  }
1115}
1116
1117
1118void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
1119  if (rt.is_reg()) {
1120    rotrv(rd, rs, rt.rm());
1121  } else {
1122    int64_t ror_value = rt.imm64_ % 32;
1123    if (ror_value < 0) {
1124      ror_value += 32;
1125    }
1126    rotr(rd, rs, ror_value);
1127  }
1128}
1129
1130
1131void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
1132  if (rt.is_reg()) {
1133    drotrv(rd, rs, rt.rm());
1134  } else {
1135    int64_t dror_value = rt.imm64_ % 64;
1136    if (dror_value < 0) dror_value += 64;
1137    if (dror_value <= 31) {
1138      drotr(rd, rs, dror_value);
1139    } else {
1140      drotr32(rd, rs, dror_value - 32);
1141    }
1142  }
1143}
1144
1145
1146void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
1147    pref(hint, rs);
1148}
1149
1150
1151void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
1152                         Register scratch) {
1153  DCHECK(sa >= 1 && sa <= 31);
1154  if (kArchVariant == kMips64r6 && sa <= 4) {
1155    lsa(rd, rt, rs, sa - 1);
1156  } else {
1157    Register tmp = rd.is(rt) ? scratch : rd;
1158    DCHECK(!tmp.is(rt));
1159    sll(tmp, rs, sa);
1160    Addu(rd, rt, tmp);
1161  }
1162}
1163
1164
1165void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa,
1166                          Register scratch) {
1167  DCHECK(sa >= 1 && sa <= 31);
1168  if (kArchVariant == kMips64r6 && sa <= 4) {
1169    dlsa(rd, rt, rs, sa - 1);
1170  } else {
1171    Register tmp = rd.is(rt) ? scratch : rd;
1172    DCHECK(!tmp.is(rt));
1173    dsll(tmp, rs, sa);
1174    Daddu(rd, rt, tmp);
1175  }
1176}
1177
1178void MacroAssembler::Bovc(Register rs, Register rt, Label* L) {
1179  if (is_trampoline_emitted()) {
1180    Label skip;
1181    bnvc(rs, rt, &skip);
1182    BranchLong(L, PROTECT);
1183    bind(&skip);
1184  } else {
1185    bovc(rs, rt, L);
1186  }
1187}
1188
1189void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) {
1190  if (is_trampoline_emitted()) {
1191    Label skip;
1192    bovc(rs, rt, &skip);
1193    BranchLong(L, PROTECT);
1194    bind(&skip);
1195  } else {
1196    bnvc(rs, rt, L);
1197  }
1198}
1199
1200// ------------Pseudo-instructions-------------
1201
1202// Change endianness
1203void MacroAssembler::ByteSwapSigned(Register dest, Register src,
1204                                    int operand_size) {
1205  DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4 ||
1206         operand_size == 8);
1207  DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2);
1208  if (operand_size == 1) {
1209    seb(src, src);
1210    sll(src, src, 0);
1211    dsbh(dest, src);
1212    dshd(dest, dest);
1213  } else if (operand_size == 2) {
1214    seh(src, src);
1215    sll(src, src, 0);
1216    dsbh(dest, src);
1217    dshd(dest, dest);
1218  } else if (operand_size == 4) {
1219    sll(src, src, 0);
1220    dsbh(dest, src);
1221    dshd(dest, dest);
1222  } else {
1223    dsbh(dest, src);
1224    dshd(dest, dest);
1225  }
1226}
1227
1228void MacroAssembler::ByteSwapUnsigned(Register dest, Register src,
1229                                      int operand_size) {
1230  DCHECK(operand_size == 1 || operand_size == 2 || operand_size == 4);
1231  if (operand_size == 1) {
1232    andi(src, src, 0xFF);
1233    dsbh(dest, src);
1234    dshd(dest, dest);
1235  } else if (operand_size == 2) {
1236    andi(src, src, 0xFFFF);
1237    dsbh(dest, src);
1238    dshd(dest, dest);
1239  } else {
1240    dsll32(src, src, 0);
1241    dsrl32(src, src, 0);
1242    dsbh(dest, src);
1243    dshd(dest, dest);
1244  }
1245}
1246
1247void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1248  DCHECK(!rd.is(at));
1249  DCHECK(!rs.rm().is(at));
1250  if (kArchVariant == kMips64r6) {
1251    lw(rd, rs);
1252  } else {
1253    DCHECK(kArchVariant == kMips64r2);
1254    if (is_int16(rs.offset() + kMipsLwrOffset) &&
1255        is_int16(rs.offset() + kMipsLwlOffset)) {
1256      if (!rd.is(rs.rm())) {
1257        lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1258        lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1259      } else {
1260        lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1261        lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1262        mov(rd, at);
1263      }
1264    } else {  // Offset > 16 bits, use multiple instructions to load.
1265      LoadRegPlusOffsetToAt(rs);
1266      lwr(rd, MemOperand(at, kMipsLwrOffset));
1267      lwl(rd, MemOperand(at, kMipsLwlOffset));
1268    }
1269  }
1270}
1271
1272void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
1273  if (kArchVariant == kMips64r6) {
1274    lwu(rd, rs);
1275  } else {
1276    DCHECK(kArchVariant == kMips64r2);
1277    Ulw(rd, rs);
1278    Dext(rd, rd, 0, 32);
1279  }
1280}
1281
1282
1283void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1284  DCHECK(!rd.is(at));
1285  DCHECK(!rs.rm().is(at));
1286  if (kArchVariant == kMips64r6) {
1287    sw(rd, rs);
1288  } else {
1289    DCHECK(kArchVariant == kMips64r2);
1290    if (is_int16(rs.offset() + kMipsSwrOffset) &&
1291        is_int16(rs.offset() + kMipsSwlOffset)) {
1292      swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
1293      swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
1294    } else {
1295      LoadRegPlusOffsetToAt(rs);
1296      swr(rd, MemOperand(at, kMipsSwrOffset));
1297      swl(rd, MemOperand(at, kMipsSwlOffset));
1298    }
1299  }
1300}
1301
1302void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
1303  DCHECK(!rd.is(at));
1304  DCHECK(!rs.rm().is(at));
1305  if (kArchVariant == kMips64r6) {
1306    lh(rd, rs);
1307  } else {
1308    DCHECK(kArchVariant == kMips64r2);
1309    if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1310#if defined(V8_TARGET_LITTLE_ENDIAN)
1311      lbu(at, rs);
1312      lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
1313#elif defined(V8_TARGET_BIG_ENDIAN)
1314      lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1315      lb(rd, rs);
1316#endif
1317    } else {  // Offset > 16 bits, use multiple instructions to load.
1318      LoadRegPlusOffsetToAt(rs);
1319#if defined(V8_TARGET_LITTLE_ENDIAN)
1320      lb(rd, MemOperand(at, 1));
1321      lbu(at, MemOperand(at, 0));
1322#elif defined(V8_TARGET_BIG_ENDIAN)
1323      lb(rd, MemOperand(at, 0));
1324      lbu(at, MemOperand(at, 1));
1325#endif
1326    }
1327    dsll(rd, rd, 8);
1328    or_(rd, rd, at);
1329  }
1330}
1331
1332void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
1333  DCHECK(!rd.is(at));
1334  DCHECK(!rs.rm().is(at));
1335  if (kArchVariant == kMips64r6) {
1336    lhu(rd, rs);
1337  } else {
1338    DCHECK(kArchVariant == kMips64r2);
1339    if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1340#if defined(V8_TARGET_LITTLE_ENDIAN)
1341      lbu(at, rs);
1342      lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
1343#elif defined(V8_TARGET_BIG_ENDIAN)
1344      lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1345      lbu(rd, rs);
1346#endif
1347    } else {  // Offset > 16 bits, use multiple instructions to load.
1348      LoadRegPlusOffsetToAt(rs);
1349#if defined(V8_TARGET_LITTLE_ENDIAN)
1350      lbu(rd, MemOperand(at, 1));
1351      lbu(at, MemOperand(at, 0));
1352#elif defined(V8_TARGET_BIG_ENDIAN)
1353      lbu(rd, MemOperand(at, 0));
1354      lbu(at, MemOperand(at, 1));
1355#endif
1356    }
1357    dsll(rd, rd, 8);
1358    or_(rd, rd, at);
1359  }
1360}
1361
1362void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
1363  DCHECK(!rd.is(at));
1364  DCHECK(!rs.rm().is(at));
1365  DCHECK(!rs.rm().is(scratch));
1366  DCHECK(!scratch.is(at));
1367  if (kArchVariant == kMips64r6) {
1368    sh(rd, rs);
1369  } else {
1370    DCHECK(kArchVariant == kMips64r2);
1371    MemOperand source = rs;
1372    // If offset > 16 bits, load address to at with offset 0.
1373    if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
1374      LoadRegPlusOffsetToAt(rs);
1375      source = MemOperand(at, 0);
1376    }
1377
1378    if (!scratch.is(rd)) {
1379      mov(scratch, rd);
1380    }
1381
1382#if defined(V8_TARGET_LITTLE_ENDIAN)
1383    sb(scratch, source);
1384    srl(scratch, scratch, 8);
1385    sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1386#elif defined(V8_TARGET_BIG_ENDIAN)
1387    sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1388    srl(scratch, scratch, 8);
1389    sb(scratch, source);
1390#endif
1391  }
1392}
1393
1394void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
1395  DCHECK(!rd.is(at));
1396  DCHECK(!rs.rm().is(at));
1397  if (kArchVariant == kMips64r6) {
1398    ld(rd, rs);
1399  } else {
1400    DCHECK(kArchVariant == kMips64r2);
1401    if (is_int16(rs.offset() + kMipsLdrOffset) &&
1402        is_int16(rs.offset() + kMipsLdlOffset)) {
1403      if (!rd.is(rs.rm())) {
1404        ldr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
1405        ldl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
1406      } else {
1407        ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset));
1408        ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset));
1409        mov(rd, at);
1410      }
1411    } else {  // Offset > 16 bits, use multiple instructions to load.
1412      LoadRegPlusOffsetToAt(rs);
1413      ldr(rd, MemOperand(at, kMipsLdrOffset));
1414      ldl(rd, MemOperand(at, kMipsLdlOffset));
1415    }
1416  }
1417}
1418
1419
1420// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
1421// bits,
1422// second word in high bits.
1423void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
1424                                  Register scratch) {
1425  lwu(rd, rs);
1426  lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1427  dsll32(scratch, scratch, 0);
1428  Daddu(rd, rd, scratch);
1429}
1430
1431void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
1432  DCHECK(!rd.is(at));
1433  DCHECK(!rs.rm().is(at));
1434  if (kArchVariant == kMips64r6) {
1435    sd(rd, rs);
1436  } else {
1437    DCHECK(kArchVariant == kMips64r2);
1438    if (is_int16(rs.offset() + kMipsSdrOffset) &&
1439        is_int16(rs.offset() + kMipsSdlOffset)) {
1440      sdr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdrOffset));
1441      sdl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdlOffset));
1442    } else {
1443      LoadRegPlusOffsetToAt(rs);
1444      sdr(rd, MemOperand(at, kMipsSdrOffset));
1445      sdl(rd, MemOperand(at, kMipsSdlOffset));
1446    }
1447  }
1448}
1449
1450
1451// Do 64-bit store as two consequent 32-bit stores to unaligned address.
1452void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
1453                                   Register scratch) {
1454  sw(rd, rs);
1455  dsrl32(scratch, rd, 0);
1456  sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1457}
1458
1459void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
1460                           Register scratch) {
1461  if (kArchVariant == kMips64r6) {
1462    lwc1(fd, rs);
1463  } else {
1464    DCHECK(kArchVariant == kMips64r2);
1465    Ulw(scratch, rs);
1466    mtc1(scratch, fd);
1467  }
1468}
1469
1470void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
1471                           Register scratch) {
1472  if (kArchVariant == kMips64r6) {
1473    swc1(fd, rs);
1474  } else {
1475    DCHECK(kArchVariant == kMips64r2);
1476    mfc1(scratch, fd);
1477    Usw(scratch, rs);
1478  }
1479}
1480
1481void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
1482                           Register scratch) {
1483  DCHECK(!scratch.is(at));
1484  if (kArchVariant == kMips64r6) {
1485    ldc1(fd, rs);
1486  } else {
1487    DCHECK(kArchVariant == kMips64r2);
1488    Uld(scratch, rs);
1489    dmtc1(scratch, fd);
1490  }
1491}
1492
1493void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
1494                           Register scratch) {
1495  DCHECK(!scratch.is(at));
1496  if (kArchVariant == kMips64r6) {
1497    sdc1(fd, rs);
1498  } else {
1499    DCHECK(kArchVariant == kMips64r2);
1500    dmfc1(scratch, fd);
1501    Usd(scratch, rs);
1502  }
1503}
1504
1505void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1506  li(dst, Operand(value), mode);
1507}
1508
1509static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) {
1510  if ((imm >> (bitnum - 1)) & 0x1) {
1511    imm = (imm >> bitnum) + 1;
1512  } else {
1513    imm = imm >> bitnum;
1514  }
1515  return imm;
1516}
1517
1518bool MacroAssembler::LiLower32BitHelper(Register rd, Operand j) {
1519  bool higher_bits_sign_extended = false;
1520  if (is_int16(j.imm64_)) {
1521    daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
1522  } else if (!(j.imm64_ & kHiMask)) {
1523    ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
1524  } else if (!(j.imm64_ & kImm16Mask)) {
1525    lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1526    if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
1527      higher_bits_sign_extended = true;
1528    }
1529  } else {
1530    lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1531    ori(rd, rd, (j.imm64_ & kImm16Mask));
1532    if ((j.imm64_ >> (kLuiShift + 15)) & 0x1) {
1533      higher_bits_sign_extended = true;
1534    }
1535  }
1536  return higher_bits_sign_extended;
1537}
1538
1539void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
1540  DCHECK(!j.is_reg());
1541  BlockTrampolinePoolScope block_trampoline_pool(this);
1542  if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
1543    // Normal load of an immediate value which does not need Relocation Info.
1544    if (is_int32(j.imm64_)) {
1545      LiLower32BitHelper(rd, j);
1546    } else {
1547      if (kArchVariant == kMips64r6) {
1548        int64_t imm = j.imm64_;
1549        bool higher_bits_sign_extended = LiLower32BitHelper(rd, j);
1550        imm = ShiftAndFixSignExtension(imm, 32);
1551        // If LUI writes 1s to higher bits, we need both DAHI/DATI.
1552        if ((imm & kImm16Mask) ||
1553            (higher_bits_sign_extended && (j.imm64_ > 0))) {
1554          dahi(rd, imm & kImm16Mask);
1555        }
1556        imm = ShiftAndFixSignExtension(imm, 16);
1557        if ((!is_int48(j.imm64_) && (imm & kImm16Mask)) ||
1558            (higher_bits_sign_extended && (j.imm64_ > 0))) {
1559          dati(rd, imm & kImm16Mask);
1560        }
1561      } else {
1562        if (is_int48(j.imm64_)) {
1563          if ((j.imm64_ >> 32) & kImm16Mask) {
1564            lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1565            if ((j.imm64_ >> 16) & kImm16Mask) {
1566              ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1567            }
1568          } else {
1569            ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask);
1570          }
1571          dsll(rd, rd, 16);
1572          if (j.imm64_ & kImm16Mask) {
1573            ori(rd, rd, j.imm64_ & kImm16Mask);
1574          }
1575        } else {
1576          lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1577          if ((j.imm64_ >> 32) & kImm16Mask) {
1578            ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1579          }
1580          if ((j.imm64_ >> 16) & kImm16Mask) {
1581            dsll(rd, rd, 16);
1582            ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1583            if (j.imm64_ & kImm16Mask) {
1584              dsll(rd, rd, 16);
1585              ori(rd, rd, j.imm64_ & kImm16Mask);
1586            } else {
1587              dsll(rd, rd, 16);
1588            }
1589          } else {
1590            if (j.imm64_ & kImm16Mask) {
1591              dsll32(rd, rd, 0);
1592              ori(rd, rd, j.imm64_ & kImm16Mask);
1593            } else {
1594              dsll32(rd, rd, 0);
1595            }
1596          }
1597        }
1598      }
1599    }
1600  } else if (MustUseReg(j.rmode_)) {
1601    RecordRelocInfo(j.rmode_, j.imm64_);
1602    lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1603    ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1604    dsll(rd, rd, 16);
1605    ori(rd, rd, j.imm64_ & kImm16Mask);
1606  } else if (mode == ADDRESS_LOAD)  {
1607    // We always need the same number of instructions as we may need to patch
1608    // this code to load another value which may need all 4 instructions.
1609    lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1610    ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1611    dsll(rd, rd, 16);
1612    ori(rd, rd, j.imm64_ & kImm16Mask);
1613  } else {
1614    if (kArchVariant == kMips64r6) {
1615      int64_t imm = j.imm64_;
1616      lui(rd, (imm >> kLuiShift) & kImm16Mask);
1617      if (imm & kImm16Mask) {
1618        ori(rd, rd, (imm & kImm16Mask));
1619      }
1620      if ((imm >> 31) & 0x1) {
1621        imm = (imm >> 32) + 1;
1622      } else {
1623        imm = imm >> 32;
1624      }
1625      dahi(rd, imm & kImm16Mask);
1626      if ((imm >> 15) & 0x1) {
1627        imm = (imm >> 16) + 1;
1628      } else {
1629        imm = imm >> 16;
1630      }
1631      dati(rd, imm & kImm16Mask);
1632    } else {
1633      lui(rd, (j.imm64_ >> 48) & kImm16Mask);
1634      ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1635      dsll(rd, rd, 16);
1636      ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1637      dsll(rd, rd, 16);
1638      ori(rd, rd, j.imm64_ & kImm16Mask);
1639    }
1640  }
1641}
1642
1643
1644void MacroAssembler::MultiPush(RegList regs) {
1645  int16_t num_to_push = NumberOfBitsSet(regs);
1646  int16_t stack_offset = num_to_push * kPointerSize;
1647
1648  Dsubu(sp, sp, Operand(stack_offset));
1649  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1650    if ((regs & (1 << i)) != 0) {
1651      stack_offset -= kPointerSize;
1652      sd(ToRegister(i), MemOperand(sp, stack_offset));
1653    }
1654  }
1655}
1656
1657
1658void MacroAssembler::MultiPushReversed(RegList regs) {
1659  int16_t num_to_push = NumberOfBitsSet(regs);
1660  int16_t stack_offset = num_to_push * kPointerSize;
1661
1662  Dsubu(sp, sp, Operand(stack_offset));
1663  for (int16_t i = 0; i < kNumRegisters; i++) {
1664    if ((regs & (1 << i)) != 0) {
1665      stack_offset -= kPointerSize;
1666      sd(ToRegister(i), MemOperand(sp, stack_offset));
1667    }
1668  }
1669}
1670
1671
1672void MacroAssembler::MultiPop(RegList regs) {
1673  int16_t stack_offset = 0;
1674
1675  for (int16_t i = 0; i < kNumRegisters; i++) {
1676    if ((regs & (1 << i)) != 0) {
1677      ld(ToRegister(i), MemOperand(sp, stack_offset));
1678      stack_offset += kPointerSize;
1679    }
1680  }
1681  daddiu(sp, sp, stack_offset);
1682}
1683
1684
1685void MacroAssembler::MultiPopReversed(RegList regs) {
1686  int16_t stack_offset = 0;
1687
1688  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1689    if ((regs & (1 << i)) != 0) {
1690      ld(ToRegister(i), MemOperand(sp, stack_offset));
1691      stack_offset += kPointerSize;
1692    }
1693  }
1694  daddiu(sp, sp, stack_offset);
1695}
1696
1697
1698void MacroAssembler::MultiPushFPU(RegList regs) {
1699  int16_t num_to_push = NumberOfBitsSet(regs);
1700  int16_t stack_offset = num_to_push * kDoubleSize;
1701
1702  Dsubu(sp, sp, Operand(stack_offset));
1703  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1704    if ((regs & (1 << i)) != 0) {
1705      stack_offset -= kDoubleSize;
1706      sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1707    }
1708  }
1709}
1710
1711
1712void MacroAssembler::MultiPushReversedFPU(RegList regs) {
1713  int16_t num_to_push = NumberOfBitsSet(regs);
1714  int16_t stack_offset = num_to_push * kDoubleSize;
1715
1716  Dsubu(sp, sp, Operand(stack_offset));
1717  for (int16_t i = 0; i < kNumRegisters; i++) {
1718    if ((regs & (1 << i)) != 0) {
1719      stack_offset -= kDoubleSize;
1720      sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1721    }
1722  }
1723}
1724
1725
1726void MacroAssembler::MultiPopFPU(RegList regs) {
1727  int16_t stack_offset = 0;
1728
1729  for (int16_t i = 0; i < kNumRegisters; i++) {
1730    if ((regs & (1 << i)) != 0) {
1731      ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1732      stack_offset += kDoubleSize;
1733    }
1734  }
1735  daddiu(sp, sp, stack_offset);
1736}
1737
1738
1739void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1740  int16_t stack_offset = 0;
1741
1742  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1743    if ((regs & (1 << i)) != 0) {
1744      ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1745      stack_offset += kDoubleSize;
1746    }
1747  }
1748  daddiu(sp, sp, stack_offset);
1749}
1750
1751
1752void MacroAssembler::Ext(Register rt,
1753                         Register rs,
1754                         uint16_t pos,
1755                         uint16_t size) {
1756  DCHECK(pos < 32);
1757  DCHECK(pos + size < 33);
1758  ext_(rt, rs, pos, size);
1759}
1760
1761void MacroAssembler::ExtractBits(Register rt, Register rs, uint16_t pos,
1762                                 uint16_t size) {
1763  DCHECK(pos < 64);
1764  DCHECK(size > 0 && size <= 64);
1765  DCHECK(pos + size <= 64);
1766  if (pos < 32) {
1767    if (size <= 32) {
1768      Dext(rt, rs, pos, size);
1769    } else {
1770      Dextm(rt, rs, pos, size);
1771    }
1772  } else if (pos < 64) {
1773    DCHECK(size <= 32);
1774    Dextu(rt, rs, pos, size);
1775  }
1776}
1777
1778void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
1779                          uint16_t size) {
1780  DCHECK(pos < 32);
1781  DCHECK(size > 0 && size <= 32);
1782  dext_(rt, rs, pos, size);
1783}
1784
1785
1786void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
1787                           uint16_t size) {
1788  DCHECK(pos < 32);
1789  DCHECK(size > 32 && size <= 64);
1790  DCHECK((pos + size) > 32 && (pos + size) <= 64);
1791  dextm(rt, rs, pos, size);
1792}
1793
1794
1795void MacroAssembler::Dextu(Register rt, Register rs, uint16_t pos,
1796                           uint16_t size) {
1797  DCHECK(pos >= 32 && pos < 64);
1798  DCHECK(size > 0 && size <= 32);
1799  DCHECK((pos + size) > 32 && (pos + size) <= 64);
1800  dextu(rt, rs, pos, size);
1801}
1802
1803
1804void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
1805                          uint16_t size) {
1806  DCHECK(pos < 32);
1807  DCHECK(pos + size <= 32);
1808  DCHECK(size != 0);
1809  dins_(rt, rs, pos, size);
1810}
1811
1812
1813void MacroAssembler::Ins(Register rt,
1814                         Register rs,
1815                         uint16_t pos,
1816                         uint16_t size) {
1817  DCHECK(pos < 32);
1818  DCHECK(pos + size <= 32);
1819  DCHECK(size != 0);
1820  ins_(rt, rs, pos, size);
1821}
1822
1823void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
1824  if (kArchVariant == kMips64r6) {
1825    // r6 neg_s changes the sign for NaN-like operands as well.
1826    neg_s(fd, fs);
1827  } else {
1828    DCHECK(kArchVariant == kMips64r2);
1829    Label is_nan, done;
1830    Register scratch1 = t8;
1831    Register scratch2 = t9;
1832    BranchF32(nullptr, &is_nan, eq, fs, fs);
1833    Branch(USE_DELAY_SLOT, &done);
1834    // For NaN input, neg_s will return the same NaN value,
1835    // while the sign has to be changed separately.
1836    neg_s(fd, fs);  // In delay slot.
1837    bind(&is_nan);
1838    mfc1(scratch1, fs);
1839    And(scratch2, scratch1, Operand(~kBinary32SignMask));
1840    And(scratch1, scratch1, Operand(kBinary32SignMask));
1841    Xor(scratch1, scratch1, Operand(kBinary32SignMask));
1842    Or(scratch2, scratch2, scratch1);
1843    mtc1(scratch2, fd);
1844    bind(&done);
1845  }
1846}
1847
1848void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
1849  if (kArchVariant == kMips64r6) {
1850    // r6 neg_d changes the sign for NaN-like operands as well.
1851    neg_d(fd, fs);
1852  } else {
1853    DCHECK(kArchVariant == kMips64r2);
1854    Label is_nan, done;
1855    Register scratch1 = t8;
1856    Register scratch2 = t9;
1857    BranchF64(nullptr, &is_nan, eq, fs, fs);
1858    Branch(USE_DELAY_SLOT, &done);
1859    // For NaN input, neg_d will return the same NaN value,
1860    // while the sign has to be changed separately.
1861    neg_d(fd, fs);  // In delay slot.
1862    bind(&is_nan);
1863    dmfc1(scratch1, fs);
1864    And(scratch2, scratch1, Operand(~Double::kSignMask));
1865    And(scratch1, scratch1, Operand(Double::kSignMask));
1866    Xor(scratch1, scratch1, Operand(Double::kSignMask));
1867    Or(scratch2, scratch2, scratch1);
1868    dmtc1(scratch2, fd);
1869    bind(&done);
1870  }
1871}
1872
1873void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
1874  // Move the data from fs to t8.
1875  mfc1(t8, fs);
1876  Cvt_d_uw(fd, t8);
1877}
1878
1879
1880void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
1881  // Convert rs to a FP value in fd.
1882  DCHECK(!rs.is(t9));
1883  DCHECK(!rs.is(at));
1884
1885  // Zero extend int32 in rs.
1886  Dext(t9, rs, 0, 32);
1887  dmtc1(t9, fd);
1888  cvt_d_l(fd, fd);
1889}
1890
1891
1892void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) {
1893  // Move the data from fs to t8.
1894  dmfc1(t8, fs);
1895  Cvt_d_ul(fd, t8);
1896}
1897
1898
1899void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) {
1900  // Convert rs to a FP value in fd.
1901
1902  DCHECK(!rs.is(t9));
1903  DCHECK(!rs.is(at));
1904
1905  Label msb_clear, conversion_done;
1906
1907  Branch(&msb_clear, ge, rs, Operand(zero_reg));
1908
1909  // Rs >= 2^63
1910  andi(t9, rs, 1);
1911  dsrl(rs, rs, 1);
1912  or_(t9, t9, rs);
1913  dmtc1(t9, fd);
1914  cvt_d_l(fd, fd);
1915  Branch(USE_DELAY_SLOT, &conversion_done);
1916  add_d(fd, fd, fd);  // In delay slot.
1917
1918  bind(&msb_clear);
1919  // Rs < 2^63, we can do simple conversion.
1920  dmtc1(rs, fd);
1921  cvt_d_l(fd, fd);
1922
1923  bind(&conversion_done);
1924}
1925
1926void MacroAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) {
1927  // Move the data from fs to t8.
1928  mfc1(t8, fs);
1929  Cvt_s_uw(fd, t8);
1930}
1931
1932void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) {
1933  // Convert rs to a FP value in fd.
1934  DCHECK(!rs.is(t9));
1935  DCHECK(!rs.is(at));
1936
1937  // Zero extend int32 in rs.
1938  Dext(t9, rs, 0, 32);
1939  dmtc1(t9, fd);
1940  cvt_s_l(fd, fd);
1941}
1942
1943void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) {
1944  // Move the data from fs to t8.
1945  dmfc1(t8, fs);
1946  Cvt_s_ul(fd, t8);
1947}
1948
1949
1950void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) {
1951  // Convert rs to a FP value in fd.
1952
1953  DCHECK(!rs.is(t9));
1954  DCHECK(!rs.is(at));
1955
1956  Label positive, conversion_done;
1957
1958  Branch(&positive, ge, rs, Operand(zero_reg));
1959
1960  // Rs >= 2^31.
1961  andi(t9, rs, 1);
1962  dsrl(rs, rs, 1);
1963  or_(t9, t9, rs);
1964  dmtc1(t9, fd);
1965  cvt_s_l(fd, fd);
1966  Branch(USE_DELAY_SLOT, &conversion_done);
1967  add_s(fd, fd, fd);  // In delay slot.
1968
1969  bind(&positive);
1970  // Rs < 2^31, we can do simple conversion.
1971  dmtc1(rs, fd);
1972  cvt_s_l(fd, fd);
1973
1974  bind(&conversion_done);
1975}
1976
1977
1978void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
1979  round_l_d(fd, fs);
1980}
1981
1982
1983void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
1984  floor_l_d(fd, fs);
1985}
1986
1987
1988void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
1989  ceil_l_d(fd, fs);
1990}
1991
1992
1993void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
1994  trunc_l_d(fd, fs);
1995}
1996
1997
1998void MacroAssembler::Trunc_l_ud(FPURegister fd,
1999                                FPURegister fs,
2000                                FPURegister scratch) {
2001  // Load to GPR.
2002  dmfc1(t8, fs);
2003  // Reset sign bit.
2004  li(at, 0x7fffffffffffffff);
2005  and_(t8, t8, at);
2006  dmtc1(t8, fs);
2007  trunc_l_d(fd, fs);
2008}
2009
2010
2011void MacroAssembler::Trunc_uw_d(FPURegister fd,
2012                                FPURegister fs,
2013                                FPURegister scratch) {
2014  Trunc_uw_d(fs, t8, scratch);
2015  mtc1(t8, fd);
2016}
2017
2018void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs,
2019                                FPURegister scratch) {
2020  Trunc_uw_s(fs, t8, scratch);
2021  mtc1(t8, fd);
2022}
2023
2024void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs,
2025                                FPURegister scratch, Register result) {
2026  Trunc_ul_d(fs, t8, scratch, result);
2027  dmtc1(t8, fd);
2028}
2029
2030
2031void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs,
2032                                FPURegister scratch, Register result) {
2033  Trunc_ul_s(fs, t8, scratch, result);
2034  dmtc1(t8, fd);
2035}
2036
2037
2038void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
2039  trunc_w_d(fd, fs);
2040}
2041
2042
2043void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
2044  round_w_d(fd, fs);
2045}
2046
2047
2048void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
2049  floor_w_d(fd, fs);
2050}
2051
2052
2053void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
2054  ceil_w_d(fd, fs);
2055}
2056
2057
2058void MacroAssembler::Trunc_uw_d(FPURegister fd,
2059                                Register rs,
2060                                FPURegister scratch) {
2061  DCHECK(!fd.is(scratch));
2062  DCHECK(!rs.is(at));
2063
2064  // Load 2^31 into scratch as its float representation.
2065  li(at, 0x41E00000);
2066  mtc1(zero_reg, scratch);
2067  mthc1(at, scratch);
2068  // Test if scratch > fd.
2069  // If fd < 2^31 we can convert it normally.
2070  Label simple_convert;
2071  BranchF(&simple_convert, NULL, lt, fd, scratch);
2072
2073  // First we subtract 2^31 from fd, then trunc it to rs
2074  // and add 2^31 to rs.
2075  sub_d(scratch, fd, scratch);
2076  trunc_w_d(scratch, scratch);
2077  mfc1(rs, scratch);
2078  Or(rs, rs, 1 << 31);
2079
2080  Label done;
2081  Branch(&done);
2082  // Simple conversion.
2083  bind(&simple_convert);
2084  trunc_w_d(scratch, fd);
2085  mfc1(rs, scratch);
2086
2087  bind(&done);
2088}
2089
2090void MacroAssembler::Trunc_uw_s(FPURegister fd, Register rs,
2091                                FPURegister scratch) {
2092  DCHECK(!fd.is(scratch));
2093  DCHECK(!rs.is(at));
2094
2095  // Load 2^31 into scratch as its float representation.
2096  li(at, 0x4F000000);
2097  mtc1(at, scratch);
2098  // Test if scratch > fd.
2099  // If fd < 2^31 we can convert it normally.
2100  Label simple_convert;
2101  BranchF32(&simple_convert, NULL, lt, fd, scratch);
2102
2103  // First we subtract 2^31 from fd, then trunc it to rs
2104  // and add 2^31 to rs.
2105  sub_s(scratch, fd, scratch);
2106  trunc_w_s(scratch, scratch);
2107  mfc1(rs, scratch);
2108  Or(rs, rs, 1 << 31);
2109
2110  Label done;
2111  Branch(&done);
2112  // Simple conversion.
2113  bind(&simple_convert);
2114  trunc_w_s(scratch, fd);
2115  mfc1(rs, scratch);
2116
2117  bind(&done);
2118}
2119
2120void MacroAssembler::Trunc_ul_d(FPURegister fd, Register rs,
2121                                FPURegister scratch, Register result) {
2122  DCHECK(!fd.is(scratch));
2123  DCHECK(!AreAliased(rs, result, at));
2124
2125  Label simple_convert, done, fail;
2126  if (result.is_valid()) {
2127    mov(result, zero_reg);
2128    Move(scratch, -1.0);
2129    // If fd =< -1 or unordered, then the conversion fails.
2130    BranchF(&fail, &fail, le, fd, scratch);
2131  }
2132
2133  // Load 2^63 into scratch as its double representation.
2134  li(at, 0x43e0000000000000);
2135  dmtc1(at, scratch);
2136
2137  // Test if scratch > fd.
2138  // If fd < 2^63 we can convert it normally.
2139  BranchF(&simple_convert, nullptr, lt, fd, scratch);
2140
2141  // First we subtract 2^63 from fd, then trunc it to rs
2142  // and add 2^63 to rs.
2143  sub_d(scratch, fd, scratch);
2144  trunc_l_d(scratch, scratch);
2145  dmfc1(rs, scratch);
2146  Or(rs, rs, Operand(1UL << 63));
2147  Branch(&done);
2148
2149  // Simple conversion.
2150  bind(&simple_convert);
2151  trunc_l_d(scratch, fd);
2152  dmfc1(rs, scratch);
2153
2154  bind(&done);
2155  if (result.is_valid()) {
2156    // Conversion is failed if the result is negative.
2157    addiu(at, zero_reg, -1);
2158    dsrl(at, at, 1);  // Load 2^62.
2159    dmfc1(result, scratch);
2160    xor_(result, result, at);
2161    Slt(result, zero_reg, result);
2162  }
2163
2164  bind(&fail);
2165}
2166
2167
2168void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
2169                                FPURegister scratch, Register result) {
2170  DCHECK(!fd.is(scratch));
2171  DCHECK(!AreAliased(rs, result, at));
2172
2173  Label simple_convert, done, fail;
2174  if (result.is_valid()) {
2175    mov(result, zero_reg);
2176    Move(scratch, -1.0f);
2177    // If fd =< -1 or unordered, then the conversion fails.
2178    BranchF32(&fail, &fail, le, fd, scratch);
2179  }
2180
2181  // Load 2^63 into scratch as its float representation.
2182  li(at, 0x5f000000);
2183  mtc1(at, scratch);
2184
2185  // Test if scratch > fd.
2186  // If fd < 2^63 we can convert it normally.
2187  BranchF32(&simple_convert, nullptr, lt, fd, scratch);
2188
2189  // First we subtract 2^63 from fd, then trunc it to rs
2190  // and add 2^63 to rs.
2191  sub_s(scratch, fd, scratch);
2192  trunc_l_s(scratch, scratch);
2193  dmfc1(rs, scratch);
2194  Or(rs, rs, Operand(1UL << 63));
2195  Branch(&done);
2196
2197  // Simple conversion.
2198  bind(&simple_convert);
2199  trunc_l_s(scratch, fd);
2200  dmfc1(rs, scratch);
2201
2202  bind(&done);
2203  if (result.is_valid()) {
2204    // Conversion is failed if the result is negative or unordered.
2205    addiu(at, zero_reg, -1);
2206    dsrl(at, at, 1);  // Load 2^62.
2207    dmfc1(result, scratch);
2208    xor_(result, result, at);
2209    Slt(result, zero_reg, result);
2210  }
2211
2212  bind(&fail);
2213}
2214
2215void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
2216                            FPURegister ft, FPURegister scratch) {
2217  if (kArchVariant == kMips64r2) {
2218    madd_s(fd, fr, fs, ft);
2219  } else {
2220    DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
2221    mul_s(scratch, fs, ft);
2222    add_s(fd, fr, scratch);
2223  }
2224}
2225
2226void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
2227    FPURegister ft, FPURegister scratch) {
2228  if (kArchVariant == kMips64r2) {
2229    madd_d(fd, fr, fs, ft);
2230  } else {
2231    DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
2232    mul_d(scratch, fs, ft);
2233    add_d(fd, fr, scratch);
2234  }
2235}
2236
2237void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
2238                            FPURegister ft, FPURegister scratch) {
2239  if (kArchVariant == kMips64r2) {
2240    msub_s(fd, fr, fs, ft);
2241  } else {
2242    DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
2243    mul_s(scratch, fs, ft);
2244    sub_s(fd, scratch, fr);
2245  }
2246}
2247
2248void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
2249                            FPURegister ft, FPURegister scratch) {
2250  if (kArchVariant == kMips64r2) {
2251    msub_d(fd, fr, fs, ft);
2252  } else {
2253    DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
2254    mul_d(scratch, fs, ft);
2255    sub_d(fd, scratch, fr);
2256  }
2257}
2258
2259void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
2260                                   Label* nan, Condition cond, FPURegister cmp1,
2261                                   FPURegister cmp2, BranchDelaySlot bd) {
2262  BlockTrampolinePoolScope block_trampoline_pool(this);
2263  if (cond == al) {
2264    Branch(bd, target);
2265    return;
2266  }
2267
2268  if (kArchVariant == kMips64r6) {
2269    sizeField = sizeField == D ? L : W;
2270  }
2271
2272  DCHECK(nan || target);
2273  // Check for unordered (NaN) cases.
2274  if (nan) {
2275    bool long_branch =
2276        nan->is_bound() ? !is_near(nan) : is_trampoline_emitted();
2277    if (kArchVariant != kMips64r6) {
2278      if (long_branch) {
2279        Label skip;
2280        c(UN, sizeField, cmp1, cmp2);
2281        bc1f(&skip);
2282        nop();
2283        BranchLong(nan, bd);
2284        bind(&skip);
2285      } else {
2286        c(UN, sizeField, cmp1, cmp2);
2287        bc1t(nan);
2288        if (bd == PROTECT) {
2289          nop();
2290        }
2291      }
2292    } else {
2293      // Use kDoubleCompareReg for comparison result. It has to be unavailable
2294      // to lithium
2295      // register allocator.
2296      DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
2297      if (long_branch) {
2298        Label skip;
2299        cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2300        bc1eqz(&skip, kDoubleCompareReg);
2301        nop();
2302        BranchLong(nan, bd);
2303        bind(&skip);
2304      } else {
2305        cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2);
2306        bc1nez(nan, kDoubleCompareReg);
2307        if (bd == PROTECT) {
2308          nop();
2309        }
2310      }
2311    }
2312  }
2313
2314  if (target) {
2315    bool long_branch =
2316        target->is_bound() ? !is_near(target) : is_trampoline_emitted();
2317    if (long_branch) {
2318      Label skip;
2319      Condition neg_cond = NegateFpuCondition(cond);
2320      BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
2321      BranchLong(target, bd);
2322      bind(&skip);
2323    } else {
2324      BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
2325    }
2326  }
2327}
2328
2329
2330void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
2331                                  Condition cc, FPURegister cmp1,
2332                                  FPURegister cmp2, BranchDelaySlot bd) {
2333  if (kArchVariant != kMips64r6) {
2334    BlockTrampolinePoolScope block_trampoline_pool(this);
2335    if (target) {
2336      // Here NaN cases were either handled by this function or are assumed to
2337      // have been handled by the caller.
2338      switch (cc) {
2339        case lt:
2340          c(OLT, sizeField, cmp1, cmp2);
2341          bc1t(target);
2342          break;
2343        case ult:
2344          c(ULT, sizeField, cmp1, cmp2);
2345          bc1t(target);
2346          break;
2347        case gt:
2348          c(ULE, sizeField, cmp1, cmp2);
2349          bc1f(target);
2350          break;
2351        case ugt:
2352          c(OLE, sizeField, cmp1, cmp2);
2353          bc1f(target);
2354          break;
2355        case ge:
2356          c(ULT, sizeField, cmp1, cmp2);
2357          bc1f(target);
2358          break;
2359        case uge:
2360          c(OLT, sizeField, cmp1, cmp2);
2361          bc1f(target);
2362          break;
2363        case le:
2364          c(OLE, sizeField, cmp1, cmp2);
2365          bc1t(target);
2366          break;
2367        case ule:
2368          c(ULE, sizeField, cmp1, cmp2);
2369          bc1t(target);
2370          break;
2371        case eq:
2372          c(EQ, sizeField, cmp1, cmp2);
2373          bc1t(target);
2374          break;
2375        case ueq:
2376          c(UEQ, sizeField, cmp1, cmp2);
2377          bc1t(target);
2378          break;
2379        case ne:  // Unordered or not equal.
2380          c(EQ, sizeField, cmp1, cmp2);
2381          bc1f(target);
2382          break;
2383        case ogl:
2384          c(UEQ, sizeField, cmp1, cmp2);
2385          bc1f(target);
2386          break;
2387        default:
2388          CHECK(0);
2389      }
2390    }
2391  } else {
2392    BlockTrampolinePoolScope block_trampoline_pool(this);
2393    if (target) {
2394      // Here NaN cases were either handled by this function or are assumed to
2395      // have been handled by the caller.
2396      // Unsigned conditions are treated as their signed counterpart.
2397      // Use kDoubleCompareReg for comparison result, it is valid in fp64 (FR =
2398      // 1) mode.
2399      DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
2400      switch (cc) {
2401        case lt:
2402          cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2403          bc1nez(target, kDoubleCompareReg);
2404          break;
2405        case ult:
2406          cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2407          bc1nez(target, kDoubleCompareReg);
2408          break;
2409        case gt:
2410          cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2411          bc1eqz(target, kDoubleCompareReg);
2412          break;
2413        case ugt:
2414          cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2415          bc1eqz(target, kDoubleCompareReg);
2416          break;
2417        case ge:
2418          cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2419          bc1eqz(target, kDoubleCompareReg);
2420          break;
2421        case uge:
2422          cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
2423          bc1eqz(target, kDoubleCompareReg);
2424          break;
2425        case le:
2426          cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2427          bc1nez(target, kDoubleCompareReg);
2428          break;
2429        case ule:
2430          cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
2431          bc1nez(target, kDoubleCompareReg);
2432          break;
2433        case eq:
2434          cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2435          bc1nez(target, kDoubleCompareReg);
2436          break;
2437        case ueq:
2438          cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2439          bc1nez(target, kDoubleCompareReg);
2440          break;
2441        case ne:
2442          cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2443          bc1eqz(target, kDoubleCompareReg);
2444          break;
2445        case ogl:
2446          cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
2447          bc1eqz(target, kDoubleCompareReg);
2448          break;
2449        default:
2450          CHECK(0);
2451      }
2452    }
2453  }
2454
2455  if (bd == PROTECT) {
2456    nop();
2457  }
2458}
2459
2460
2461void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
2462  DCHECK(!src_low.is(at));
2463  mfhc1(at, dst);
2464  mtc1(src_low, dst);
2465  mthc1(at, dst);
2466}
2467
2468
2469void MacroAssembler::Move(FPURegister dst, float imm) {
2470  li(at, Operand(bit_cast<int32_t>(imm)));
2471  mtc1(at, dst);
2472}
2473
2474
2475void MacroAssembler::Move(FPURegister dst, double imm) {
2476  int64_t imm_bits = bit_cast<int64_t>(imm);
2477  // Handle special values first.
2478  if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
2479    mov_d(dst, kDoubleRegZero);
2480  } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) {
2481    Neg_d(dst, kDoubleRegZero);
2482  } else {
2483    uint32_t lo, hi;
2484    DoubleAsTwoUInt32(imm, &lo, &hi);
2485    // Move the low part of the double into the lower bits of the corresponding
2486    // FPU register.
2487    if (lo != 0) {
2488      if (!(lo & kImm16Mask)) {
2489        lui(at, (lo >> kLuiShift) & kImm16Mask);
2490        mtc1(at, dst);
2491      } else if (!(lo & kHiMask)) {
2492        ori(at, zero_reg, lo & kImm16Mask);
2493        mtc1(at, dst);
2494      } else {
2495        lui(at, (lo >> kLuiShift) & kImm16Mask);
2496        ori(at, at, lo & kImm16Mask);
2497        mtc1(at, dst);
2498      }
2499    } else {
2500      mtc1(zero_reg, dst);
2501    }
2502    // Move the high part of the double into the high bits of the corresponding
2503    // FPU register.
2504    if (hi != 0) {
2505      if (!(hi & kImm16Mask)) {
2506        lui(at, (hi >> kLuiShift) & kImm16Mask);
2507        mthc1(at, dst);
2508      } else if (!(hi & kHiMask)) {
2509        ori(at, zero_reg, hi & kImm16Mask);
2510        mthc1(at, dst);
2511      } else {
2512        lui(at, (hi >> kLuiShift) & kImm16Mask);
2513        ori(at, at, hi & kImm16Mask);
2514        mthc1(at, dst);
2515      }
2516    } else {
2517      mthc1(zero_reg, dst);
2518    }
2519    if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
2520  }
2521}
2522
2523
2524void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
2525  if (kArchVariant == kMips64r6) {
2526    Label done;
2527    Branch(&done, ne, rt, Operand(zero_reg));
2528    mov(rd, rs);
2529    bind(&done);
2530  } else {
2531    movz(rd, rs, rt);
2532  }
2533}
2534
2535
2536void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
2537  if (kArchVariant == kMips64r6) {
2538    Label done;
2539    Branch(&done, eq, rt, Operand(zero_reg));
2540    mov(rd, rs);
2541    bind(&done);
2542  } else {
2543    movn(rd, rs, rt);
2544  }
2545}
2546
2547
2548void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
2549  movt(rd, rs, cc);
2550}
2551
2552
2553void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
2554  movf(rd, rs, cc);
2555}
2556
2557
2558void MacroAssembler::Clz(Register rd, Register rs) {
2559  clz(rd, rs);
2560}
2561
2562
2563void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
2564                                     Register result,
2565                                     DoubleRegister double_input,
2566                                     Register scratch,
2567                                     DoubleRegister double_scratch,
2568                                     Register except_flag,
2569                                     CheckForInexactConversion check_inexact) {
2570  DCHECK(!result.is(scratch));
2571  DCHECK(!double_input.is(double_scratch));
2572  DCHECK(!except_flag.is(scratch));
2573
2574  Label done;
2575
2576  // Clear the except flag (0 = no exception)
2577  mov(except_flag, zero_reg);
2578
2579  // Test for values that can be exactly represented as a signed 32-bit integer.
2580  cvt_w_d(double_scratch, double_input);
2581  mfc1(result, double_scratch);
2582  cvt_d_w(double_scratch, double_scratch);
2583  BranchF(&done, NULL, eq, double_input, double_scratch);
2584
2585  int32_t except_mask = kFCSRFlagMask;  // Assume interested in all exceptions.
2586
2587  if (check_inexact == kDontCheckForInexactConversion) {
2588    // Ignore inexact exceptions.
2589    except_mask &= ~kFCSRInexactFlagMask;
2590  }
2591
2592  // Save FCSR.
2593  cfc1(scratch, FCSR);
2594  // Disable FPU exceptions.
2595  ctc1(zero_reg, FCSR);
2596
2597  // Do operation based on rounding mode.
2598  switch (rounding_mode) {
2599    case kRoundToNearest:
2600      Round_w_d(double_scratch, double_input);
2601      break;
2602    case kRoundToZero:
2603      Trunc_w_d(double_scratch, double_input);
2604      break;
2605    case kRoundToPlusInf:
2606      Ceil_w_d(double_scratch, double_input);
2607      break;
2608    case kRoundToMinusInf:
2609      Floor_w_d(double_scratch, double_input);
2610      break;
2611  }  // End of switch-statement.
2612
2613  // Retrieve FCSR.
2614  cfc1(except_flag, FCSR);
2615  // Restore FCSR.
2616  ctc1(scratch, FCSR);
2617  // Move the converted value into the result register.
2618  mfc1(result, double_scratch);
2619
2620  // Check for fpu exceptions.
2621  And(except_flag, except_flag, Operand(except_mask));
2622
2623  bind(&done);
2624}
2625
2626
2627void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2628                                                DoubleRegister double_input,
2629                                                Label* done) {
2630  DoubleRegister single_scratch = kLithiumScratchDouble.low();
2631  Register scratch = at;
2632  Register scratch2 = t9;
2633
2634  // Clear cumulative exception flags and save the FCSR.
2635  cfc1(scratch2, FCSR);
2636  ctc1(zero_reg, FCSR);
2637  // Try a conversion to a signed integer.
2638  trunc_w_d(single_scratch, double_input);
2639  mfc1(result, single_scratch);
2640  // Retrieve and restore the FCSR.
2641  cfc1(scratch, FCSR);
2642  ctc1(scratch2, FCSR);
2643  // Check for overflow and NaNs.
2644  And(scratch,
2645      scratch,
2646      kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
2647  // If we had no exceptions we are done.
2648  Branch(done, eq, scratch, Operand(zero_reg));
2649}
2650
2651
2652void MacroAssembler::TruncateDoubleToI(Register result,
2653                                       DoubleRegister double_input) {
2654  Label done;
2655
2656  TryInlineTruncateDoubleToI(result, double_input, &done);
2657
2658  // If we fell through then inline version didn't succeed - call stub instead.
2659  push(ra);
2660  Dsubu(sp, sp, Operand(kDoubleSize));  // Put input on stack.
2661  sdc1(double_input, MemOperand(sp, 0));
2662
2663  DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2664  CallStub(&stub);
2665
2666  Daddu(sp, sp, Operand(kDoubleSize));
2667  pop(ra);
2668
2669  bind(&done);
2670}
2671
2672
2673void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2674  Label done;
2675  DoubleRegister double_scratch = f12;
2676  DCHECK(!result.is(object));
2677
2678  ldc1(double_scratch,
2679       MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2680  TryInlineTruncateDoubleToI(result, double_scratch, &done);
2681
2682  // If we fell through then inline version didn't succeed - call stub instead.
2683  push(ra);
2684  DoubleToIStub stub(isolate(),
2685                     object,
2686                     result,
2687                     HeapNumber::kValueOffset - kHeapObjectTag,
2688                     true,
2689                     true);
2690  CallStub(&stub);
2691  pop(ra);
2692
2693  bind(&done);
2694}
2695
2696
2697void MacroAssembler::TruncateNumberToI(Register object,
2698                                       Register result,
2699                                       Register heap_number_map,
2700                                       Register scratch,
2701                                       Label* not_number) {
2702  Label done;
2703  DCHECK(!result.is(object));
2704
2705  UntagAndJumpIfSmi(result, object, &done);
2706  JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
2707  TruncateHeapNumberToI(result, object);
2708
2709  bind(&done);
2710}
2711
2712
2713void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2714                                         Register src,
2715                                         int num_least_bits) {
2716  // Ext(dst, src, kSmiTagSize, num_least_bits);
2717  SmiUntag(dst, src);
2718  And(dst, dst, Operand((1 << num_least_bits) - 1));
2719}
2720
2721
2722void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2723                                           Register src,
2724                                           int num_least_bits) {
2725  DCHECK(!src.is(dst));
2726  And(dst, src, Operand((1 << num_least_bits) - 1));
2727}
2728
2729
2730// Emulated condtional branches do not emit a nop in the branch delay slot.
2731//
2732// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
2733#define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK(                                \
2734    (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
2735    (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
2736
2737
2738void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
2739  DCHECK(kArchVariant == kMips64r6 ? is_int26(offset) : is_int16(offset));
2740  BranchShort(offset, bdslot);
2741}
2742
2743
2744void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
2745                            const Operand& rt, BranchDelaySlot bdslot) {
2746  bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
2747  DCHECK(is_near);
2748  USE(is_near);
2749}
2750
2751
2752void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
2753  if (L->is_bound()) {
2754    if (is_near_branch(L)) {
2755      BranchShort(L, bdslot);
2756    } else {
2757      BranchLong(L, bdslot);
2758    }
2759  } else {
2760    if (is_trampoline_emitted()) {
2761      BranchLong(L, bdslot);
2762    } else {
2763      BranchShort(L, bdslot);
2764    }
2765  }
2766}
2767
2768
2769void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
2770                            const Operand& rt,
2771                            BranchDelaySlot bdslot) {
2772  if (L->is_bound()) {
2773    if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
2774      if (cond != cc_always) {
2775        Label skip;
2776        Condition neg_cond = NegateCondition(cond);
2777        BranchShort(&skip, neg_cond, rs, rt);
2778        BranchLong(L, bdslot);
2779        bind(&skip);
2780      } else {
2781        BranchLong(L, bdslot);
2782      }
2783    }
2784  } else {
2785    if (is_trampoline_emitted()) {
2786      if (cond != cc_always) {
2787        Label skip;
2788        Condition neg_cond = NegateCondition(cond);
2789        BranchShort(&skip, neg_cond, rs, rt);
2790        BranchLong(L, bdslot);
2791        bind(&skip);
2792      } else {
2793        BranchLong(L, bdslot);
2794      }
2795    } else {
2796      BranchShort(L, cond, rs, rt, bdslot);
2797    }
2798  }
2799}
2800
2801
2802void MacroAssembler::Branch(Label* L,
2803                            Condition cond,
2804                            Register rs,
2805                            Heap::RootListIndex index,
2806                            BranchDelaySlot bdslot) {
2807  LoadRoot(at, index);
2808  Branch(L, cond, rs, Operand(at), bdslot);
2809}
2810
2811
2812void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
2813                                       BranchDelaySlot bdslot) {
2814  DCHECK(L == nullptr || offset == 0);
2815  offset = GetOffset(offset, L, OffsetSize::kOffset16);
2816  b(offset);
2817
2818  // Emit a nop in the branch delay slot if required.
2819  if (bdslot == PROTECT)
2820    nop();
2821}
2822
2823
2824void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
2825  DCHECK(L == nullptr || offset == 0);
2826  offset = GetOffset(offset, L, OffsetSize::kOffset26);
2827  bc(offset);
2828}
2829
2830
2831void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
2832  if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2833    DCHECK(is_int26(offset));
2834    BranchShortHelperR6(offset, nullptr);
2835  } else {
2836    DCHECK(is_int16(offset));
2837    BranchShortHelper(offset, nullptr, bdslot);
2838  }
2839}
2840
2841
2842void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
2843  if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
2844    BranchShortHelperR6(0, L);
2845  } else {
2846    BranchShortHelper(0, L, bdslot);
2847  }
2848}
2849
2850
2851static inline bool IsZero(const Operand& rt) {
2852  if (rt.is_reg()) {
2853    return rt.rm().is(zero_reg);
2854  } else {
2855    return rt.immediate() == 0;
2856  }
2857}
2858
2859
2860int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
2861  if (L) {
2862    offset = branch_offset_helper(L, bits) >> 2;
2863  } else {
2864    DCHECK(is_intn(offset, bits));
2865  }
2866  return offset;
2867}
2868
2869
2870Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
2871                                               Register scratch) {
2872  Register r2 = no_reg;
2873  if (rt.is_reg()) {
2874    r2 = rt.rm_;
2875  } else {
2876    r2 = scratch;
2877    li(r2, rt);
2878  }
2879
2880  return r2;
2881}
2882
2883
2884bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
2885                                         Condition cond, Register rs,
2886                                         const Operand& rt) {
2887  DCHECK(L == nullptr || offset == 0);
2888  Register scratch = rs.is(at) ? t8 : at;
2889  OffsetSize bits = OffsetSize::kOffset16;
2890
2891  // Be careful to always use shifted_branch_offset only just before the
2892  // branch instruction, as the location will be remember for patching the
2893  // target.
2894  {
2895    BlockTrampolinePoolScope block_trampoline_pool(this);
2896    switch (cond) {
2897      case cc_always:
2898        bits = OffsetSize::kOffset26;
2899        if (!is_near(L, bits)) return false;
2900        offset = GetOffset(offset, L, bits);
2901        bc(offset);
2902        break;
2903      case eq:
2904        if (rs.code() == rt.rm_.reg_code) {
2905          // Pre R6 beq is used here to make the code patchable. Otherwise bc
2906          // should be used which has no condition field so is not patchable.
2907          bits = OffsetSize::kOffset16;
2908          if (!is_near(L, bits)) return false;
2909          scratch = GetRtAsRegisterHelper(rt, scratch);
2910          offset = GetOffset(offset, L, bits);
2911          beq(rs, scratch, offset);
2912          nop();
2913        } else if (IsZero(rt)) {
2914          bits = OffsetSize::kOffset21;
2915          if (!is_near(L, bits)) return false;
2916          offset = GetOffset(offset, L, bits);
2917          beqzc(rs, offset);
2918        } else {
2919          // We don't want any other register but scratch clobbered.
2920          bits = OffsetSize::kOffset16;
2921          if (!is_near(L, bits)) return false;
2922          scratch = GetRtAsRegisterHelper(rt, scratch);
2923          offset = GetOffset(offset, L, bits);
2924          beqc(rs, scratch, offset);
2925        }
2926        break;
2927      case ne:
2928        if (rs.code() == rt.rm_.reg_code) {
2929          // Pre R6 bne is used here to make the code patchable. Otherwise we
2930          // should not generate any instruction.
2931          bits = OffsetSize::kOffset16;
2932          if (!is_near(L, bits)) return false;
2933          scratch = GetRtAsRegisterHelper(rt, scratch);
2934          offset = GetOffset(offset, L, bits);
2935          bne(rs, scratch, offset);
2936          nop();
2937        } else if (IsZero(rt)) {
2938          bits = OffsetSize::kOffset21;
2939          if (!is_near(L, bits)) return false;
2940          offset = GetOffset(offset, L, bits);
2941          bnezc(rs, offset);
2942        } else {
2943          // We don't want any other register but scratch clobbered.
2944          bits = OffsetSize::kOffset16;
2945          if (!is_near(L, bits)) return false;
2946          scratch = GetRtAsRegisterHelper(rt, scratch);
2947          offset = GetOffset(offset, L, bits);
2948          bnec(rs, scratch, offset);
2949        }
2950        break;
2951
2952      // Signed comparison.
2953      case greater:
2954        // rs > rt
2955        if (rs.code() == rt.rm_.reg_code) {
2956          break;  // No code needs to be emitted.
2957        } else if (rs.is(zero_reg)) {
2958          bits = OffsetSize::kOffset16;
2959          if (!is_near(L, bits)) return false;
2960          scratch = GetRtAsRegisterHelper(rt, scratch);
2961          offset = GetOffset(offset, L, bits);
2962          bltzc(scratch, offset);
2963        } else if (IsZero(rt)) {
2964          bits = OffsetSize::kOffset16;
2965          if (!is_near(L, bits)) return false;
2966          offset = GetOffset(offset, L, bits);
2967          bgtzc(rs, offset);
2968        } else {
2969          bits = OffsetSize::kOffset16;
2970          if (!is_near(L, bits)) return false;
2971          scratch = GetRtAsRegisterHelper(rt, scratch);
2972          DCHECK(!rs.is(scratch));
2973          offset = GetOffset(offset, L, bits);
2974          bltc(scratch, rs, offset);
2975        }
2976        break;
2977      case greater_equal:
2978        // rs >= rt
2979        if (rs.code() == rt.rm_.reg_code) {
2980          bits = OffsetSize::kOffset26;
2981          if (!is_near(L, bits)) return false;
2982          offset = GetOffset(offset, L, bits);
2983          bc(offset);
2984        } else if (rs.is(zero_reg)) {
2985          bits = OffsetSize::kOffset16;
2986          if (!is_near(L, bits)) return false;
2987          scratch = GetRtAsRegisterHelper(rt, scratch);
2988          offset = GetOffset(offset, L, bits);
2989          blezc(scratch, offset);
2990        } else if (IsZero(rt)) {
2991          bits = OffsetSize::kOffset16;
2992          if (!is_near(L, bits)) return false;
2993          offset = GetOffset(offset, L, bits);
2994          bgezc(rs, offset);
2995        } else {
2996          bits = OffsetSize::kOffset16;
2997          if (!is_near(L, bits)) return false;
2998          scratch = GetRtAsRegisterHelper(rt, scratch);
2999          DCHECK(!rs.is(scratch));
3000          offset = GetOffset(offset, L, bits);
3001          bgec(rs, scratch, offset);
3002        }
3003        break;
3004      case less:
3005        // rs < rt
3006        if (rs.code() == rt.rm_.reg_code) {
3007          break;  // No code needs to be emitted.
3008        } else if (rs.is(zero_reg)) {
3009          bits = OffsetSize::kOffset16;
3010          if (!is_near(L, bits)) return false;
3011          scratch = GetRtAsRegisterHelper(rt, scratch);
3012          offset = GetOffset(offset, L, bits);
3013          bgtzc(scratch, offset);
3014        } else if (IsZero(rt)) {
3015          bits = OffsetSize::kOffset16;
3016          if (!is_near(L, bits)) return false;
3017          offset = GetOffset(offset, L, bits);
3018          bltzc(rs, offset);
3019        } else {
3020          bits = OffsetSize::kOffset16;
3021          if (!is_near(L, bits)) return false;
3022          scratch = GetRtAsRegisterHelper(rt, scratch);
3023          DCHECK(!rs.is(scratch));
3024          offset = GetOffset(offset, L, bits);
3025          bltc(rs, scratch, offset);
3026        }
3027        break;
3028      case less_equal:
3029        // rs <= rt
3030        if (rs.code() == rt.rm_.reg_code) {
3031          bits = OffsetSize::kOffset26;
3032          if (!is_near(L, bits)) return false;
3033          offset = GetOffset(offset, L, bits);
3034          bc(offset);
3035        } else if (rs.is(zero_reg)) {
3036          bits = OffsetSize::kOffset16;
3037          if (!is_near(L, bits)) return false;
3038          scratch = GetRtAsRegisterHelper(rt, scratch);
3039          offset = GetOffset(offset, L, bits);
3040          bgezc(scratch, offset);
3041        } else if (IsZero(rt)) {
3042          bits = OffsetSize::kOffset16;
3043          if (!is_near(L, bits)) return false;
3044          offset = GetOffset(offset, L, bits);
3045          blezc(rs, offset);
3046        } else {
3047          bits = OffsetSize::kOffset16;
3048          if (!is_near(L, bits)) return false;
3049          scratch = GetRtAsRegisterHelper(rt, scratch);
3050          DCHECK(!rs.is(scratch));
3051          offset = GetOffset(offset, L, bits);
3052          bgec(scratch, rs, offset);
3053        }
3054        break;
3055
3056      // Unsigned comparison.
3057      case Ugreater:
3058        // rs > rt
3059        if (rs.code() == rt.rm_.reg_code) {
3060          break;  // No code needs to be emitted.
3061        } else if (rs.is(zero_reg)) {
3062          bits = OffsetSize::kOffset21;
3063          if (!is_near(L, bits)) return false;
3064          scratch = GetRtAsRegisterHelper(rt, scratch);
3065          offset = GetOffset(offset, L, bits);
3066          bnezc(scratch, offset);
3067        } else if (IsZero(rt)) {
3068          bits = OffsetSize::kOffset21;
3069          if (!is_near(L, bits)) return false;
3070          offset = GetOffset(offset, L, bits);
3071          bnezc(rs, offset);
3072        } else {
3073          bits = OffsetSize::kOffset16;
3074          if (!is_near(L, bits)) return false;
3075          scratch = GetRtAsRegisterHelper(rt, scratch);
3076          DCHECK(!rs.is(scratch));
3077          offset = GetOffset(offset, L, bits);
3078          bltuc(scratch, rs, offset);
3079        }
3080        break;
3081      case Ugreater_equal:
3082        // rs >= rt
3083        if (rs.code() == rt.rm_.reg_code) {
3084          bits = OffsetSize::kOffset26;
3085          if (!is_near(L, bits)) return false;
3086          offset = GetOffset(offset, L, bits);
3087          bc(offset);
3088        } else if (rs.is(zero_reg)) {
3089          bits = OffsetSize::kOffset21;
3090          if (!is_near(L, bits)) return false;
3091          scratch = GetRtAsRegisterHelper(rt, scratch);
3092          offset = GetOffset(offset, L, bits);
3093          beqzc(scratch, offset);
3094        } else if (IsZero(rt)) {
3095          bits = OffsetSize::kOffset26;
3096          if (!is_near(L, bits)) return false;
3097          offset = GetOffset(offset, L, bits);
3098          bc(offset);
3099        } else {
3100          bits = OffsetSize::kOffset16;
3101          if (!is_near(L, bits)) return false;
3102          scratch = GetRtAsRegisterHelper(rt, scratch);
3103          DCHECK(!rs.is(scratch));
3104          offset = GetOffset(offset, L, bits);
3105          bgeuc(rs, scratch, offset);
3106        }
3107        break;
3108      case Uless:
3109        // rs < rt
3110        if (rs.code() == rt.rm_.reg_code) {
3111          break;  // No code needs to be emitted.
3112        } else if (rs.is(zero_reg)) {
3113          bits = OffsetSize::kOffset21;
3114          if (!is_near(L, bits)) return false;
3115          scratch = GetRtAsRegisterHelper(rt, scratch);
3116          offset = GetOffset(offset, L, bits);
3117          bnezc(scratch, offset);
3118        } else if (IsZero(rt)) {
3119          break;  // No code needs to be emitted.
3120        } else {
3121          bits = OffsetSize::kOffset16;
3122          if (!is_near(L, bits)) return false;
3123          scratch = GetRtAsRegisterHelper(rt, scratch);
3124          DCHECK(!rs.is(scratch));
3125          offset = GetOffset(offset, L, bits);
3126          bltuc(rs, scratch, offset);
3127        }
3128        break;
3129      case Uless_equal:
3130        // rs <= rt
3131        if (rs.code() == rt.rm_.reg_code) {
3132          bits = OffsetSize::kOffset26;
3133          if (!is_near(L, bits)) return false;
3134          offset = GetOffset(offset, L, bits);
3135          bc(offset);
3136        } else if (rs.is(zero_reg)) {
3137          bits = OffsetSize::kOffset26;
3138          if (!is_near(L, bits)) return false;
3139          scratch = GetRtAsRegisterHelper(rt, scratch);
3140          offset = GetOffset(offset, L, bits);
3141          bc(offset);
3142        } else if (IsZero(rt)) {
3143          bits = OffsetSize::kOffset21;
3144          if (!is_near(L, bits)) return false;
3145          offset = GetOffset(offset, L, bits);
3146          beqzc(rs, offset);
3147        } else {
3148          bits = OffsetSize::kOffset16;
3149          if (!is_near(L, bits)) return false;
3150          scratch = GetRtAsRegisterHelper(rt, scratch);
3151          DCHECK(!rs.is(scratch));
3152          offset = GetOffset(offset, L, bits);
3153          bgeuc(scratch, rs, offset);
3154        }
3155        break;
3156      default:
3157        UNREACHABLE();
3158    }
3159  }
3160  CheckTrampolinePoolQuick(1);
3161  return true;
3162}
3163
3164
3165bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
3166                                       Register rs, const Operand& rt,
3167                                       BranchDelaySlot bdslot) {
3168  DCHECK(L == nullptr || offset == 0);
3169  if (!is_near(L, OffsetSize::kOffset16)) return false;
3170
3171  Register scratch = at;
3172  int32_t offset32;
3173
3174  // Be careful to always use shifted_branch_offset only just before the
3175  // branch instruction, as the location will be remember for patching the
3176  // target.
3177  {
3178    BlockTrampolinePoolScope block_trampoline_pool(this);
3179    switch (cond) {
3180      case cc_always:
3181        offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3182        b(offset32);
3183        break;
3184      case eq:
3185        if (IsZero(rt)) {
3186          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3187          beq(rs, zero_reg, offset32);
3188        } else {
3189          // We don't want any other register but scratch clobbered.
3190          scratch = GetRtAsRegisterHelper(rt, scratch);
3191          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3192          beq(rs, scratch, offset32);
3193        }
3194        break;
3195      case ne:
3196        if (IsZero(rt)) {
3197          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3198          bne(rs, zero_reg, offset32);
3199        } else {
3200          // We don't want any other register but scratch clobbered.
3201          scratch = GetRtAsRegisterHelper(rt, scratch);
3202          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3203          bne(rs, scratch, offset32);
3204        }
3205        break;
3206
3207      // Signed comparison.
3208      case greater:
3209        if (IsZero(rt)) {
3210          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3211          bgtz(rs, offset32);
3212        } else {
3213          Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3214          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3215          bne(scratch, zero_reg, offset32);
3216        }
3217        break;
3218      case greater_equal:
3219        if (IsZero(rt)) {
3220          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3221          bgez(rs, offset32);
3222        } else {
3223          Slt(scratch, rs, rt);
3224          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3225          beq(scratch, zero_reg, offset32);
3226        }
3227        break;
3228      case less:
3229        if (IsZero(rt)) {
3230          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3231          bltz(rs, offset32);
3232        } else {
3233          Slt(scratch, rs, rt);
3234          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3235          bne(scratch, zero_reg, offset32);
3236        }
3237        break;
3238      case less_equal:
3239        if (IsZero(rt)) {
3240          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3241          blez(rs, offset32);
3242        } else {
3243          Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3244          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3245          beq(scratch, zero_reg, offset32);
3246        }
3247        break;
3248
3249      // Unsigned comparison.
3250      case Ugreater:
3251        if (IsZero(rt)) {
3252          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3253          bne(rs, zero_reg, offset32);
3254        } else {
3255          Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3256          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3257          bne(scratch, zero_reg, offset32);
3258        }
3259        break;
3260      case Ugreater_equal:
3261        if (IsZero(rt)) {
3262          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3263          b(offset32);
3264        } else {
3265          Sltu(scratch, rs, rt);
3266          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3267          beq(scratch, zero_reg, offset32);
3268        }
3269        break;
3270      case Uless:
3271        if (IsZero(rt)) {
3272          return true;  // No code needs to be emitted.
3273        } else {
3274          Sltu(scratch, rs, rt);
3275          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3276          bne(scratch, zero_reg, offset32);
3277        }
3278        break;
3279      case Uless_equal:
3280        if (IsZero(rt)) {
3281          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3282          beq(rs, zero_reg, offset32);
3283        } else {
3284          Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3285          offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
3286          beq(scratch, zero_reg, offset32);
3287        }
3288        break;
3289      default:
3290        UNREACHABLE();
3291    }
3292  }
3293
3294  // Emit a nop in the branch delay slot if required.
3295  if (bdslot == PROTECT)
3296    nop();
3297
3298  return true;
3299}
3300
3301
3302bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
3303                                      Register rs, const Operand& rt,
3304                                      BranchDelaySlot bdslot) {
3305  BRANCH_ARGS_CHECK(cond, rs, rt);
3306
3307  if (!L) {
3308    if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3309      DCHECK(is_int26(offset));
3310      return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
3311    } else {
3312      DCHECK(is_int16(offset));
3313      return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3314    }
3315  } else {
3316    DCHECK(offset == 0);
3317    if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3318      return BranchShortHelperR6(0, L, cond, rs, rt);
3319    } else {
3320      return BranchShortHelper(0, L, cond, rs, rt, bdslot);
3321    }
3322  }
3323  return false;
3324}
3325
3326
3327void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
3328                                 const Operand& rt, BranchDelaySlot bdslot) {
3329  BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3330}
3331
3332
3333void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
3334                                 const Operand& rt, BranchDelaySlot bdslot) {
3335  BranchShortCheck(0, L, cond, rs, rt, bdslot);
3336}
3337
3338
3339void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
3340  BranchAndLinkShort(offset, bdslot);
3341}
3342
3343
3344void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
3345                                   const Operand& rt, BranchDelaySlot bdslot) {
3346  bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
3347  DCHECK(is_near);
3348  USE(is_near);
3349}
3350
3351
3352void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
3353  if (L->is_bound()) {
3354    if (is_near_branch(L)) {
3355      BranchAndLinkShort(L, bdslot);
3356    } else {
3357      BranchAndLinkLong(L, bdslot);
3358    }
3359  } else {
3360    if (is_trampoline_emitted()) {
3361      BranchAndLinkLong(L, bdslot);
3362    } else {
3363      BranchAndLinkShort(L, bdslot);
3364    }
3365  }
3366}
3367
3368
3369void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
3370                                   const Operand& rt,
3371                                   BranchDelaySlot bdslot) {
3372  if (L->is_bound()) {
3373    if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
3374      Label skip;
3375      Condition neg_cond = NegateCondition(cond);
3376      BranchShort(&skip, neg_cond, rs, rt);
3377      BranchAndLinkLong(L, bdslot);
3378      bind(&skip);
3379    }
3380  } else {
3381    if (is_trampoline_emitted()) {
3382      Label skip;
3383      Condition neg_cond = NegateCondition(cond);
3384      BranchShort(&skip, neg_cond, rs, rt);
3385      BranchAndLinkLong(L, bdslot);
3386      bind(&skip);
3387    } else {
3388      BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
3389    }
3390  }
3391}
3392
3393
3394void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3395                                              BranchDelaySlot bdslot) {
3396  DCHECK(L == nullptr || offset == 0);
3397  offset = GetOffset(offset, L, OffsetSize::kOffset16);
3398  bal(offset);
3399
3400  // Emit a nop in the branch delay slot if required.
3401  if (bdslot == PROTECT)
3402    nop();
3403}
3404
3405
3406void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
3407  DCHECK(L == nullptr || offset == 0);
3408  offset = GetOffset(offset, L, OffsetSize::kOffset26);
3409  balc(offset);
3410}
3411
3412
3413void MacroAssembler::BranchAndLinkShort(int32_t offset,
3414                                        BranchDelaySlot bdslot) {
3415  if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3416    DCHECK(is_int26(offset));
3417    BranchAndLinkShortHelperR6(offset, nullptr);
3418  } else {
3419    DCHECK(is_int16(offset));
3420    BranchAndLinkShortHelper(offset, nullptr, bdslot);
3421  }
3422}
3423
3424
3425void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
3426  if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3427    BranchAndLinkShortHelperR6(0, L);
3428  } else {
3429    BranchAndLinkShortHelper(0, L, bdslot);
3430  }
3431}
3432
3433
3434bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
3435                                                Condition cond, Register rs,
3436                                                const Operand& rt) {
3437  DCHECK(L == nullptr || offset == 0);
3438  Register scratch = rs.is(at) ? t8 : at;
3439  OffsetSize bits = OffsetSize::kOffset16;
3440
3441  BlockTrampolinePoolScope block_trampoline_pool(this);
3442  DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
3443  switch (cond) {
3444    case cc_always:
3445      bits = OffsetSize::kOffset26;
3446      if (!is_near(L, bits)) return false;
3447      offset = GetOffset(offset, L, bits);
3448      balc(offset);
3449      break;
3450    case eq:
3451      if (!is_near(L, bits)) return false;
3452      Subu(scratch, rs, rt);
3453      offset = GetOffset(offset, L, bits);
3454      beqzalc(scratch, offset);
3455      break;
3456    case ne:
3457      if (!is_near(L, bits)) return false;
3458      Subu(scratch, rs, rt);
3459      offset = GetOffset(offset, L, bits);
3460      bnezalc(scratch, offset);
3461      break;
3462
3463    // Signed comparison.
3464    case greater:
3465      // rs > rt
3466      if (rs.code() == rt.rm_.reg_code) {
3467        break;  // No code needs to be emitted.
3468      } else if (rs.is(zero_reg)) {
3469        if (!is_near(L, bits)) return false;
3470        scratch = GetRtAsRegisterHelper(rt, scratch);
3471        offset = GetOffset(offset, L, bits);
3472        bltzalc(scratch, offset);
3473      } else if (IsZero(rt)) {
3474        if (!is_near(L, bits)) return false;
3475        offset = GetOffset(offset, L, bits);
3476        bgtzalc(rs, offset);
3477      } else {
3478        if (!is_near(L, bits)) return false;
3479        Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3480        offset = GetOffset(offset, L, bits);
3481        bnezalc(scratch, offset);
3482      }
3483      break;
3484    case greater_equal:
3485      // rs >= rt
3486      if (rs.code() == rt.rm_.reg_code) {
3487        bits = OffsetSize::kOffset26;
3488        if (!is_near(L, bits)) return false;
3489        offset = GetOffset(offset, L, bits);
3490        balc(offset);
3491      } else if (rs.is(zero_reg)) {
3492        if (!is_near(L, bits)) return false;
3493        scratch = GetRtAsRegisterHelper(rt, scratch);
3494        offset = GetOffset(offset, L, bits);
3495        blezalc(scratch, offset);
3496      } else if (IsZero(rt)) {
3497        if (!is_near(L, bits)) return false;
3498        offset = GetOffset(offset, L, bits);
3499        bgezalc(rs, offset);
3500      } else {
3501        if (!is_near(L, bits)) return false;
3502        Slt(scratch, rs, rt);
3503        offset = GetOffset(offset, L, bits);
3504        beqzalc(scratch, offset);
3505      }
3506      break;
3507    case less:
3508      // rs < rt
3509      if (rs.code() == rt.rm_.reg_code) {
3510        break;  // No code needs to be emitted.
3511      } else if (rs.is(zero_reg)) {
3512        if (!is_near(L, bits)) return false;
3513        scratch = GetRtAsRegisterHelper(rt, scratch);
3514        offset = GetOffset(offset, L, bits);
3515        bgtzalc(scratch, offset);
3516      } else if (IsZero(rt)) {
3517        if (!is_near(L, bits)) return false;
3518        offset = GetOffset(offset, L, bits);
3519        bltzalc(rs, offset);
3520      } else {
3521        if (!is_near(L, bits)) return false;
3522        Slt(scratch, rs, rt);
3523        offset = GetOffset(offset, L, bits);
3524        bnezalc(scratch, offset);
3525      }
3526      break;
3527    case less_equal:
3528      // rs <= r2
3529      if (rs.code() == rt.rm_.reg_code) {
3530        bits = OffsetSize::kOffset26;
3531        if (!is_near(L, bits)) return false;
3532        offset = GetOffset(offset, L, bits);
3533        balc(offset);
3534      } else if (rs.is(zero_reg)) {
3535        if (!is_near(L, bits)) return false;
3536        scratch = GetRtAsRegisterHelper(rt, scratch);
3537        offset = GetOffset(offset, L, bits);
3538        bgezalc(scratch, offset);
3539      } else if (IsZero(rt)) {
3540        if (!is_near(L, bits)) return false;
3541        offset = GetOffset(offset, L, bits);
3542        blezalc(rs, offset);
3543      } else {
3544        if (!is_near(L, bits)) return false;
3545        Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3546        offset = GetOffset(offset, L, bits);
3547        beqzalc(scratch, offset);
3548      }
3549      break;
3550
3551
3552    // Unsigned comparison.
3553    case Ugreater:
3554      // rs > r2
3555      if (!is_near(L, bits)) return false;
3556      Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3557      offset = GetOffset(offset, L, bits);
3558      bnezalc(scratch, offset);
3559      break;
3560    case Ugreater_equal:
3561      // rs >= r2
3562      if (!is_near(L, bits)) return false;
3563      Sltu(scratch, rs, rt);
3564      offset = GetOffset(offset, L, bits);
3565      beqzalc(scratch, offset);
3566      break;
3567    case Uless:
3568      // rs < r2
3569      if (!is_near(L, bits)) return false;
3570      Sltu(scratch, rs, rt);
3571      offset = GetOffset(offset, L, bits);
3572      bnezalc(scratch, offset);
3573      break;
3574    case Uless_equal:
3575      // rs <= r2
3576      if (!is_near(L, bits)) return false;
3577      Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3578      offset = GetOffset(offset, L, bits);
3579      beqzalc(scratch, offset);
3580      break;
3581    default:
3582      UNREACHABLE();
3583  }
3584  return true;
3585}
3586
3587
3588// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
3589// with the slt instructions. We could use sub or add instead but we would miss
3590// overflow cases, so we keep slt and add an intermediate third instruction.
3591bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
3592                                              Condition cond, Register rs,
3593                                              const Operand& rt,
3594                                              BranchDelaySlot bdslot) {
3595  DCHECK(L == nullptr || offset == 0);
3596  if (!is_near(L, OffsetSize::kOffset16)) return false;
3597
3598  Register scratch = t8;
3599  BlockTrampolinePoolScope block_trampoline_pool(this);
3600
3601  switch (cond) {
3602    case cc_always:
3603      offset = GetOffset(offset, L, OffsetSize::kOffset16);
3604      bal(offset);
3605      break;
3606    case eq:
3607      bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3608      nop();
3609      offset = GetOffset(offset, L, OffsetSize::kOffset16);
3610      bal(offset);
3611      break;
3612    case ne:
3613      beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
3614      nop();
3615      offset = GetOffset(offset, L, OffsetSize::kOffset16);
3616      bal(offset);
3617      break;
3618
3619    // Signed comparison.
3620    case greater:
3621      Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3622      addiu(scratch, scratch, -1);
3623      offset = GetOffset(offset, L, OffsetSize::kOffset16);
3624      bgezal(scratch, offset);
3625      break;
3626    case greater_equal:
3627      Slt(scratch, rs, rt);
3628      addiu(scratch, scratch, -1);
3629      offset = GetOffset(offset, L, OffsetSize::kOffset16);
3630      bltzal(scratch, offset);
3631      break;
3632    case less:
3633      Slt(scratch, rs, rt);
3634      addiu(scratch, scratch, -1);
3635      offset = GetOffset(offset, L, OffsetSize::kOffset16);
3636      bgezal(scratch, offset);
3637      break;
3638    case less_equal:
3639      Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3640      addiu(scratch, scratch, -1);
3641      offset = GetOffset(offset, L, OffsetSize::kOffset16);
3642      bltzal(scratch, offset);
3643      break;
3644
3645    // Unsigned comparison.
3646    case Ugreater:
3647      Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3648      addiu(scratch, scratch, -1);
3649      offset = GetOffset(offset, L, OffsetSize::kOffset16);
3650      bgezal(scratch, offset);
3651      break;
3652    case Ugreater_equal:
3653      Sltu(scratch, rs, rt);
3654      addiu(scratch, scratch, -1);
3655      offset = GetOffset(offset, L, OffsetSize::kOffset16);
3656      bltzal(scratch, offset);
3657      break;
3658    case Uless:
3659      Sltu(scratch, rs, rt);
3660      addiu(scratch, scratch, -1);
3661      offset = GetOffset(offset, L, OffsetSize::kOffset16);
3662      bgezal(scratch, offset);
3663      break;
3664    case Uless_equal:
3665      Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
3666      addiu(scratch, scratch, -1);
3667      offset = GetOffset(offset, L, OffsetSize::kOffset16);
3668      bltzal(scratch, offset);
3669      break;
3670
3671    default:
3672      UNREACHABLE();
3673  }
3674
3675  // Emit a nop in the branch delay slot if required.
3676  if (bdslot == PROTECT)
3677    nop();
3678
3679  return true;
3680}
3681
3682
3683bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
3684                                             Condition cond, Register rs,
3685                                             const Operand& rt,
3686                                             BranchDelaySlot bdslot) {
3687  BRANCH_ARGS_CHECK(cond, rs, rt);
3688
3689  if (!L) {
3690    if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3691      DCHECK(is_int26(offset));
3692      return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
3693    } else {
3694      DCHECK(is_int16(offset));
3695      return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
3696    }
3697  } else {
3698    DCHECK(offset == 0);
3699    if (kArchVariant == kMips64r6 && bdslot == PROTECT) {
3700      return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
3701    } else {
3702      return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
3703    }
3704  }
3705  return false;
3706}
3707
3708
3709void MacroAssembler::Jump(Register target,
3710                          Condition cond,
3711                          Register rs,
3712                          const Operand& rt,
3713                          BranchDelaySlot bd) {
3714  BlockTrampolinePoolScope block_trampoline_pool(this);
3715  if (kArchVariant == kMips64r6 && bd == PROTECT) {
3716    if (cond == cc_always) {
3717      jic(target, 0);
3718    } else {
3719      BRANCH_ARGS_CHECK(cond, rs, rt);
3720      Branch(2, NegateCondition(cond), rs, rt);
3721      jic(target, 0);
3722    }
3723  } else {
3724    if (cond == cc_always) {
3725      jr(target);
3726    } else {
3727      BRANCH_ARGS_CHECK(cond, rs, rt);
3728      Branch(2, NegateCondition(cond), rs, rt);
3729      jr(target);
3730    }
3731    // Emit a nop in the branch delay slot if required.
3732    if (bd == PROTECT) nop();
3733  }
3734}
3735
3736
3737void MacroAssembler::Jump(intptr_t target,
3738                          RelocInfo::Mode rmode,
3739                          Condition cond,
3740                          Register rs,
3741                          const Operand& rt,
3742                          BranchDelaySlot bd) {
3743  Label skip;
3744  if (cond != cc_always) {
3745    Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
3746  }
3747  // The first instruction of 'li' may be placed in the delay slot.
3748  // This is not an issue, t9 is expected to be clobbered anyway.
3749  li(t9, Operand(target, rmode));
3750  Jump(t9, al, zero_reg, Operand(zero_reg), bd);
3751  bind(&skip);
3752}
3753
3754
3755void MacroAssembler::Jump(Address target,
3756                          RelocInfo::Mode rmode,
3757                          Condition cond,
3758                          Register rs,
3759                          const Operand& rt,
3760                          BranchDelaySlot bd) {
3761  DCHECK(!RelocInfo::IsCodeTarget(rmode));
3762  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
3763}
3764
3765
3766void MacroAssembler::Jump(Handle<Code> code,
3767                          RelocInfo::Mode rmode,
3768                          Condition cond,
3769                          Register rs,
3770                          const Operand& rt,
3771                          BranchDelaySlot bd) {
3772  DCHECK(RelocInfo::IsCodeTarget(rmode));
3773  AllowDeferredHandleDereference embedding_raw_address;
3774  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
3775}
3776
3777
3778int MacroAssembler::CallSize(Register target,
3779                             Condition cond,
3780                             Register rs,
3781                             const Operand& rt,
3782                             BranchDelaySlot bd) {
3783  int size = 0;
3784
3785  if (cond == cc_always) {
3786    size += 1;
3787  } else {
3788    size += 3;
3789  }
3790
3791  if (bd == PROTECT && kArchVariant != kMips64r6) size += 1;
3792
3793  return size * kInstrSize;
3794}
3795
3796
3797// Note: To call gcc-compiled C code on mips, you must call thru t9.
3798void MacroAssembler::Call(Register target,
3799                          Condition cond,
3800                          Register rs,
3801                          const Operand& rt,
3802                          BranchDelaySlot bd) {
3803#ifdef DEBUG
3804  int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
3805#endif
3806
3807  BlockTrampolinePoolScope block_trampoline_pool(this);
3808  Label start;
3809  bind(&start);
3810  if (kArchVariant == kMips64r6 && bd == PROTECT) {
3811    if (cond == cc_always) {
3812      jialc(target, 0);
3813    } else {
3814      BRANCH_ARGS_CHECK(cond, rs, rt);
3815      Branch(2, NegateCondition(cond), rs, rt);
3816      jialc(target, 0);
3817    }
3818  } else {
3819    if (cond == cc_always) {
3820      jalr(target);
3821    } else {
3822      BRANCH_ARGS_CHECK(cond, rs, rt);
3823      Branch(2, NegateCondition(cond), rs, rt);
3824      jalr(target);
3825    }
3826    // Emit a nop in the branch delay slot if required.
3827    if (bd == PROTECT) nop();
3828  }
3829
3830#ifdef DEBUG
3831  CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
3832           SizeOfCodeGeneratedSince(&start));
3833#endif
3834}
3835
3836
3837int MacroAssembler::CallSize(Address target,
3838                             RelocInfo::Mode rmode,
3839                             Condition cond,
3840                             Register rs,
3841                             const Operand& rt,
3842                             BranchDelaySlot bd) {
3843  int size = CallSize(t9, cond, rs, rt, bd);
3844  return size + 4 * kInstrSize;
3845}
3846
3847
3848void MacroAssembler::Call(Address target,
3849                          RelocInfo::Mode rmode,
3850                          Condition cond,
3851                          Register rs,
3852                          const Operand& rt,
3853                          BranchDelaySlot bd) {
3854  BlockTrampolinePoolScope block_trampoline_pool(this);
3855  Label start;
3856  bind(&start);
3857  int64_t target_int = reinterpret_cast<int64_t>(target);
3858  li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
3859  Call(t9, cond, rs, rt, bd);
3860  DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
3861            SizeOfCodeGeneratedSince(&start));
3862}
3863
3864
3865int MacroAssembler::CallSize(Handle<Code> code,
3866                             RelocInfo::Mode rmode,
3867                             TypeFeedbackId ast_id,
3868                             Condition cond,
3869                             Register rs,
3870                             const Operand& rt,
3871                             BranchDelaySlot bd) {
3872  AllowDeferredHandleDereference using_raw_address;
3873  return CallSize(reinterpret_cast<Address>(code.location()),
3874      rmode, cond, rs, rt, bd);
3875}
3876
3877
3878void MacroAssembler::Call(Handle<Code> code,
3879                          RelocInfo::Mode rmode,
3880                          TypeFeedbackId ast_id,
3881                          Condition cond,
3882                          Register rs,
3883                          const Operand& rt,
3884                          BranchDelaySlot bd) {
3885  BlockTrampolinePoolScope block_trampoline_pool(this);
3886  Label start;
3887  bind(&start);
3888  DCHECK(RelocInfo::IsCodeTarget(rmode));
3889  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
3890    SetRecordedAstId(ast_id);
3891    rmode = RelocInfo::CODE_TARGET_WITH_ID;
3892  }
3893  AllowDeferredHandleDereference embedding_raw_address;
3894  Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
3895  DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
3896            SizeOfCodeGeneratedSince(&start));
3897}
3898
3899
3900void MacroAssembler::Ret(Condition cond,
3901                         Register rs,
3902                         const Operand& rt,
3903                         BranchDelaySlot bd) {
3904  Jump(ra, cond, rs, rt, bd);
3905}
3906
3907
3908void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
3909  if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
3910      (!L->is_bound() || is_near_r6(L))) {
3911    BranchShortHelperR6(0, L);
3912  } else {
3913    EmitForbiddenSlotInstruction();
3914    BlockTrampolinePoolScope block_trampoline_pool(this);
3915    {
3916      BlockGrowBufferScope block_buf_growth(this);
3917      // Buffer growth (and relocation) must be blocked for internal references
3918      // until associated instructions are emitted and available to be patched.
3919      RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3920      j(L);
3921    }
3922    // Emit a nop in the branch delay slot if required.
3923    if (bdslot == PROTECT) nop();
3924  }
3925}
3926
3927
3928void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
3929  if (kArchVariant == kMips64r6 && bdslot == PROTECT &&
3930      (!L->is_bound() || is_near_r6(L))) {
3931    BranchAndLinkShortHelperR6(0, L);
3932  } else {
3933    EmitForbiddenSlotInstruction();
3934    BlockTrampolinePoolScope block_trampoline_pool(this);
3935    {
3936      BlockGrowBufferScope block_buf_growth(this);
3937      // Buffer growth (and relocation) must be blocked for internal references
3938      // until associated instructions are emitted and available to be patched.
3939      RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
3940      jal(L);
3941    }
3942    // Emit a nop in the branch delay slot if required.
3943    if (bdslot == PROTECT) nop();
3944  }
3945}
3946
3947
3948void MacroAssembler::DropAndRet(int drop) {
3949  DCHECK(is_int16(drop * kPointerSize));
3950  Ret(USE_DELAY_SLOT);
3951  daddiu(sp, sp, drop * kPointerSize);
3952}
3953
3954void MacroAssembler::DropAndRet(int drop,
3955                                Condition cond,
3956                                Register r1,
3957                                const Operand& r2) {
3958  // Both Drop and Ret need to be conditional.
3959  Label skip;
3960  if (cond != cc_always) {
3961    Branch(&skip, NegateCondition(cond), r1, r2);
3962  }
3963
3964  Drop(drop);
3965  Ret();
3966
3967  if (cond != cc_always) {
3968    bind(&skip);
3969  }
3970}
3971
3972
3973void MacroAssembler::Drop(int count,
3974                          Condition cond,
3975                          Register reg,
3976                          const Operand& op) {
3977  if (count <= 0) {
3978    return;
3979  }
3980
3981  Label skip;
3982
3983  if (cond != al) {
3984     Branch(&skip, NegateCondition(cond), reg, op);
3985  }
3986
3987  Daddu(sp, sp, Operand(count * kPointerSize));
3988
3989  if (cond != al) {
3990    bind(&skip);
3991  }
3992}
3993
3994
3995
3996void MacroAssembler::Swap(Register reg1,
3997                          Register reg2,
3998                          Register scratch) {
3999  if (scratch.is(no_reg)) {
4000    Xor(reg1, reg1, Operand(reg2));
4001    Xor(reg2, reg2, Operand(reg1));
4002    Xor(reg1, reg1, Operand(reg2));
4003  } else {
4004    mov(scratch, reg1);
4005    mov(reg1, reg2);
4006    mov(reg2, scratch);
4007  }
4008}
4009
4010
4011void MacroAssembler::Call(Label* target) {
4012  BranchAndLink(target);
4013}
4014
4015
4016void MacroAssembler::Push(Handle<Object> handle) {
4017  li(at, Operand(handle));
4018  push(at);
4019}
4020
4021
4022void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
4023  DCHECK(!src.is(scratch));
4024  mov(scratch, src);
4025  dsrl32(src, src, 0);
4026  dsll32(src, src, 0);
4027  push(src);
4028  dsll32(scratch, scratch, 0);
4029  push(scratch);
4030}
4031
4032
4033void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
4034  DCHECK(!dst.is(scratch));
4035  pop(scratch);
4036  dsrl32(scratch, scratch, 0);
4037  pop(dst);
4038  dsrl32(dst, dst, 0);
4039  dsll32(dst, dst, 0);
4040  or_(dst, dst, scratch);
4041}
4042
4043void MacroAssembler::MaybeDropFrames() {
4044  // Check whether we need to drop frames to restart a function on the stack.
4045  ExternalReference restart_fp =
4046      ExternalReference::debug_restart_fp_address(isolate());
4047  li(a1, Operand(restart_fp));
4048  ld(a1, MemOperand(a1));
4049  Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
4050       ne, a1, Operand(zero_reg));
4051}
4052
4053// ---------------------------------------------------------------------------
4054// Exception handling.
4055
4056void MacroAssembler::PushStackHandler() {
4057  // Adjust this code if not the case.
4058  STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
4059  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
4060
4061  // Link the current handler as the next handler.
4062  li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
4063  ld(a5, MemOperand(a6));
4064  push(a5);
4065
4066  // Set this new handler as the current one.
4067  sd(sp, MemOperand(a6));
4068}
4069
4070
4071void MacroAssembler::PopStackHandler() {
4072  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4073  pop(a1);
4074  Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
4075                                             kPointerSize)));
4076  li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
4077  sd(a1, MemOperand(at));
4078}
4079
4080
4081void MacroAssembler::Allocate(int object_size,
4082                              Register result,
4083                              Register scratch1,
4084                              Register scratch2,
4085                              Label* gc_required,
4086                              AllocationFlags flags) {
4087  DCHECK(object_size <= kMaxRegularHeapObjectSize);
4088  if (!FLAG_inline_new) {
4089    if (emit_debug_code()) {
4090      // Trash the registers to simulate an allocation failure.
4091      li(result, 0x7091);
4092      li(scratch1, 0x7191);
4093      li(scratch2, 0x7291);
4094    }
4095    jmp(gc_required);
4096    return;
4097  }
4098
4099  DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
4100
4101  // Make object size into bytes.
4102  if ((flags & SIZE_IN_WORDS) != 0) {
4103    object_size *= kPointerSize;
4104  }
4105  DCHECK(0 == (object_size & kObjectAlignmentMask));
4106
4107  // Check relative positions of allocation top and limit addresses.
4108  // ARM adds additional checks to make sure the ldm instruction can be
4109  // used. On MIPS we don't have ldm so we don't need additional checks either.
4110  ExternalReference allocation_top =
4111      AllocationUtils::GetAllocationTopReference(isolate(), flags);
4112  ExternalReference allocation_limit =
4113      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4114
4115  intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
4116  intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
4117  DCHECK((limit - top) == kPointerSize);
4118
4119  // Set up allocation top address and allocation limit registers.
4120  Register top_address = scratch1;
4121  // This code stores a temporary value in t9.
4122  Register alloc_limit = t9;
4123  Register result_end = scratch2;
4124  li(top_address, Operand(allocation_top));
4125
4126  if ((flags & RESULT_CONTAINS_TOP) == 0) {
4127    // Load allocation top into result and allocation limit into alloc_limit.
4128    ld(result, MemOperand(top_address));
4129    ld(alloc_limit, MemOperand(top_address, kPointerSize));
4130  } else {
4131    if (emit_debug_code()) {
4132      // Assert that result actually contains top on entry.
4133      ld(alloc_limit, MemOperand(top_address));
4134      Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
4135    }
4136    // Load allocation limit. Result already contains allocation top.
4137    ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
4138  }
4139
4140  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4141  // the same alignment on ARM64.
4142  STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4143
4144  if (emit_debug_code()) {
4145    And(at, result, Operand(kDoubleAlignmentMask));
4146    Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4147  }
4148
4149  // Calculate new top and bail out if new space is exhausted. Use result
4150  // to calculate the new top.
4151  Daddu(result_end, result, Operand(object_size));
4152  Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
4153
4154  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4155    // The top pointer is not updated for allocation folding dominators.
4156    sd(result_end, MemOperand(top_address));
4157  }
4158
4159  // Tag object.
4160  Daddu(result, result, Operand(kHeapObjectTag));
4161}
4162
4163
4164void MacroAssembler::Allocate(Register object_size, Register result,
4165                              Register result_end, Register scratch,
4166                              Label* gc_required, AllocationFlags flags) {
4167  if (!FLAG_inline_new) {
4168    if (emit_debug_code()) {
4169      // Trash the registers to simulate an allocation failure.
4170      li(result, 0x7091);
4171      li(scratch, 0x7191);
4172      li(result_end, 0x7291);
4173    }
4174    jmp(gc_required);
4175    return;
4176  }
4177
4178  // |object_size| and |result_end| may overlap, other registers must not.
4179  DCHECK(!AreAliased(object_size, result, scratch, t9, at));
4180  DCHECK(!AreAliased(result_end, result, scratch, t9, at));
4181
4182  // Check relative positions of allocation top and limit addresses.
4183  // ARM adds additional checks to make sure the ldm instruction can be
4184  // used. On MIPS we don't have ldm so we don't need additional checks either.
4185  ExternalReference allocation_top =
4186      AllocationUtils::GetAllocationTopReference(isolate(), flags);
4187  ExternalReference allocation_limit =
4188      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4189  intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
4190  intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
4191  DCHECK((limit - top) == kPointerSize);
4192
4193  // Set up allocation top address and object size registers.
4194  Register top_address = scratch;
4195  // This code stores a temporary value in t9.
4196  Register alloc_limit = t9;
4197  li(top_address, Operand(allocation_top));
4198
4199  if ((flags & RESULT_CONTAINS_TOP) == 0) {
4200    // Load allocation top into result and allocation limit into alloc_limit.
4201    ld(result, MemOperand(top_address));
4202    ld(alloc_limit, MemOperand(top_address, kPointerSize));
4203  } else {
4204    if (emit_debug_code()) {
4205      // Assert that result actually contains top on entry.
4206      ld(alloc_limit, MemOperand(top_address));
4207      Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
4208    }
4209    // Load allocation limit. Result already contains allocation top.
4210    ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
4211  }
4212
4213  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4214  // the same alignment on ARM64.
4215  STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4216
4217  if (emit_debug_code()) {
4218    And(at, result, Operand(kDoubleAlignmentMask));
4219    Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4220  }
4221
4222  // Calculate new top and bail out if new space is exhausted. Use result
4223  // to calculate the new top. Object size may be in words so a shift is
4224  // required to get the number of bytes.
4225  if ((flags & SIZE_IN_WORDS) != 0) {
4226    Dlsa(result_end, result, object_size, kPointerSizeLog2);
4227  } else {
4228    Daddu(result_end, result, Operand(object_size));
4229  }
4230
4231  Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
4232
4233  // Update allocation top. result temporarily holds the new top.
4234  if (emit_debug_code()) {
4235    And(at, result_end, Operand(kObjectAlignmentMask));
4236    Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
4237  }
4238
4239  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
4240    // The top pointer is not updated for allocation folding dominators.
4241    sd(result_end, MemOperand(top_address));
4242  }
4243
4244  // Tag object if.
4245  Daddu(result, result, Operand(kHeapObjectTag));
4246}
4247
4248void MacroAssembler::FastAllocate(int object_size, Register result,
4249                                  Register scratch1, Register scratch2,
4250                                  AllocationFlags flags) {
4251  DCHECK(object_size <= kMaxRegularHeapObjectSize);
4252  DCHECK(!AreAliased(result, scratch1, scratch2, at));
4253
4254  // Make object size into bytes.
4255  if ((flags & SIZE_IN_WORDS) != 0) {
4256    object_size *= kPointerSize;
4257  }
4258  DCHECK(0 == (object_size & kObjectAlignmentMask));
4259
4260  ExternalReference allocation_top =
4261      AllocationUtils::GetAllocationTopReference(isolate(), flags);
4262
4263  Register top_address = scratch1;
4264  Register result_end = scratch2;
4265  li(top_address, Operand(allocation_top));
4266  ld(result, MemOperand(top_address));
4267
4268  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4269  // the same alignment on MIPS64.
4270  STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4271
4272  if (emit_debug_code()) {
4273    And(at, result, Operand(kDoubleAlignmentMask));
4274    Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4275  }
4276
4277  // Calculate new top and write it back.
4278  Daddu(result_end, result, Operand(object_size));
4279  sd(result_end, MemOperand(top_address));
4280
4281  Daddu(result, result, Operand(kHeapObjectTag));
4282}
4283
4284void MacroAssembler::FastAllocate(Register object_size, Register result,
4285                                  Register result_end, Register scratch,
4286                                  AllocationFlags flags) {
4287  // |object_size| and |result_end| may overlap, other registers must not.
4288  DCHECK(!AreAliased(object_size, result, scratch, at));
4289  DCHECK(!AreAliased(result_end, result, scratch, at));
4290
4291  ExternalReference allocation_top =
4292      AllocationUtils::GetAllocationTopReference(isolate(), flags);
4293
4294  // Set up allocation top address and object size registers.
4295  Register top_address = scratch;
4296  li(top_address, Operand(allocation_top));
4297  ld(result, MemOperand(top_address));
4298
4299  // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
4300  // the same alignment on MIPS64.
4301  STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
4302
4303  if (emit_debug_code()) {
4304    And(at, result, Operand(kDoubleAlignmentMask));
4305    Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
4306  }
4307
4308  // Calculate new top and write it back
4309  if ((flags & SIZE_IN_WORDS) != 0) {
4310    Dlsa(result_end, result, object_size, kPointerSizeLog2);
4311  } else {
4312    Daddu(result_end, result, Operand(object_size));
4313  }
4314
4315  // Update allocation top. result temporarily holds the new top.
4316  if (emit_debug_code()) {
4317    And(at, result_end, Operand(kObjectAlignmentMask));
4318    Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg));
4319  }
4320
4321  Daddu(result, result, Operand(kHeapObjectTag));
4322}
4323
4324void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
4325                                                     Label* not_unique_name) {
4326  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4327  Label succeed;
4328  And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4329  Branch(&succeed, eq, at, Operand(zero_reg));
4330  Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
4331
4332  bind(&succeed);
4333}
4334
4335
4336// Allocates a heap number or jumps to the label if the young space is full and
4337// a scavenge is needed.
4338void MacroAssembler::AllocateHeapNumber(Register result,
4339                                        Register scratch1,
4340                                        Register scratch2,
4341                                        Register heap_number_map,
4342                                        Label* need_gc,
4343                                        MutableMode mode) {
4344  // Allocate an object in the heap for the heap number and tag it as a heap
4345  // object.
4346  Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
4347           NO_ALLOCATION_FLAGS);
4348
4349  Heap::RootListIndex map_index = mode == MUTABLE
4350      ? Heap::kMutableHeapNumberMapRootIndex
4351      : Heap::kHeapNumberMapRootIndex;
4352  AssertIsRoot(heap_number_map, map_index);
4353
4354  // Store heap number map in the allocated object.
4355  sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
4356}
4357
4358
4359void MacroAssembler::AllocateHeapNumberWithValue(Register result,
4360                                                 FPURegister value,
4361                                                 Register scratch1,
4362                                                 Register scratch2,
4363                                                 Label* gc_required) {
4364  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
4365  AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
4366  sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
4367}
4368
4369
4370void MacroAssembler::AllocateJSValue(Register result, Register constructor,
4371                                     Register value, Register scratch1,
4372                                     Register scratch2, Label* gc_required) {
4373  DCHECK(!result.is(constructor));
4374  DCHECK(!result.is(scratch1));
4375  DCHECK(!result.is(scratch2));
4376  DCHECK(!result.is(value));
4377
4378  // Allocate JSValue in new space.
4379  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
4380           NO_ALLOCATION_FLAGS);
4381
4382  // Initialize the JSValue.
4383  LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
4384  sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
4385  LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
4386  sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
4387  sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
4388  sd(value, FieldMemOperand(result, JSValue::kValueOffset));
4389  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
4390}
4391
4392void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
4393                                                Register end_address,
4394                                                Register filler) {
4395  Label loop, entry;
4396  Branch(&entry);
4397  bind(&loop);
4398  sd(filler, MemOperand(current_address));
4399  Daddu(current_address, current_address, kPointerSize);
4400  bind(&entry);
4401  Branch(&loop, ult, current_address, Operand(end_address));
4402}
4403
4404void MacroAssembler::SubNanPreservePayloadAndSign_s(FPURegister fd,
4405                                                    FPURegister fs,
4406                                                    FPURegister ft) {
4407  FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
4408  Label check_nan, save_payload, done;
4409  Register scratch1 = t8;
4410  Register scratch2 = t9;
4411
4412  sub_s(dest, fs, ft);
4413  // Check if the result of subtraction is NaN.
4414  BranchF32(nullptr, &check_nan, eq, fs, ft);
4415  Branch(USE_DELAY_SLOT, &done);
4416  dest.is(fd) ? nop() : mov_s(fd, dest);
4417
4418  bind(&check_nan);
4419  // Check if first operand is a NaN.
4420  mfc1(scratch1, fs);
4421  BranchF32(nullptr, &save_payload, eq, fs, fs);
4422  // Second operand must be a NaN.
4423  mfc1(scratch1, ft);
4424
4425  bind(&save_payload);
4426  // Reserve payload.
4427  And(scratch1, scratch1,
4428      Operand(kSingleSignMask | ((1 << kSingleNaNShift) - 1)));
4429  mfc1(scratch2, dest);
4430  And(scratch2, scratch2, Operand(kSingleNaNMask));
4431  Or(scratch2, scratch2, scratch1);
4432  mtc1(scratch2, fd);
4433
4434  bind(&done);
4435}
4436
4437void MacroAssembler::SubNanPreservePayloadAndSign_d(FPURegister fd,
4438                                                    FPURegister fs,
4439                                                    FPURegister ft) {
4440  FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd;
4441  Label check_nan, save_payload, done;
4442  Register scratch1 = t8;
4443  Register scratch2 = t9;
4444
4445  sub_d(dest, fs, ft);
4446  // Check if the result of subtraction is NaN.
4447  BranchF64(nullptr, &check_nan, eq, fs, ft);
4448  Branch(USE_DELAY_SLOT, &done);
4449  dest.is(fd) ? nop() : mov_d(fd, dest);
4450
4451  bind(&check_nan);
4452  // Check if first operand is a NaN.
4453  dmfc1(scratch1, fs);
4454  BranchF64(nullptr, &save_payload, eq, fs, fs);
4455  // Second operand must be a NaN.
4456  dmfc1(scratch1, ft);
4457
4458  bind(&save_payload);
4459  // Reserve payload.
4460  li(at, Operand(kDoubleSignMask | (1L << kDoubleNaNShift)));
4461  Dsubu(at, at, Operand(1));
4462  And(scratch1, scratch1, at);
4463  dmfc1(scratch2, dest);
4464  And(scratch2, scratch2, Operand(kDoubleNaNMask));
4465  Or(scratch2, scratch2, scratch1);
4466  dmtc1(scratch2, fd);
4467
4468  bind(&done);
4469}
4470
4471void MacroAssembler::CompareMapAndBranch(Register obj,
4472                                         Register scratch,
4473                                         Handle<Map> map,
4474                                         Label* early_success,
4475                                         Condition cond,
4476                                         Label* branch_to) {
4477  ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4478  CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
4479}
4480
4481
4482void MacroAssembler::CompareMapAndBranch(Register obj_map,
4483                                         Handle<Map> map,
4484                                         Label* early_success,
4485                                         Condition cond,
4486                                         Label* branch_to) {
4487  Branch(branch_to, cond, obj_map, Operand(map));
4488}
4489
4490
4491void MacroAssembler::CheckMap(Register obj,
4492                              Register scratch,
4493                              Handle<Map> map,
4494                              Label* fail,
4495                              SmiCheckType smi_check_type) {
4496  if (smi_check_type == DO_SMI_CHECK) {
4497    JumpIfSmi(obj, fail);
4498  }
4499  Label success;
4500  CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
4501  bind(&success);
4502}
4503
4504
4505void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
4506                                     Register scratch2, Handle<WeakCell> cell,
4507                                     Handle<Code> success,
4508                                     SmiCheckType smi_check_type) {
4509  Label fail;
4510  if (smi_check_type == DO_SMI_CHECK) {
4511    JumpIfSmi(obj, &fail);
4512  }
4513  ld(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
4514  GetWeakValue(scratch2, cell);
4515  Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
4516  bind(&fail);
4517}
4518
4519
4520void MacroAssembler::CheckMap(Register obj,
4521                              Register scratch,
4522                              Heap::RootListIndex index,
4523                              Label* fail,
4524                              SmiCheckType smi_check_type) {
4525  if (smi_check_type == DO_SMI_CHECK) {
4526    JumpIfSmi(obj, fail);
4527  }
4528  ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
4529  LoadRoot(at, index);
4530  Branch(fail, ne, scratch, Operand(at));
4531}
4532
4533
4534void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
4535  li(value, Operand(cell));
4536  ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
4537}
4538
4539void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
4540                                        const DoubleRegister src) {
4541  sub_d(dst, src, kDoubleRegZero);
4542}
4543
4544void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
4545                                   Label* miss) {
4546  GetWeakValue(value, cell);
4547  JumpIfSmi(value, miss);
4548}
4549
4550
4551void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
4552  if (IsMipsSoftFloatABI) {
4553    if (kArchEndian == kLittle) {
4554      Move(dst, v0, v1);
4555    } else {
4556      Move(dst, v1, v0);
4557    }
4558  } else {
4559    Move(dst, f0);  // Reg f0 is o32 ABI FP return value.
4560  }
4561}
4562
4563
4564void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
4565  if (IsMipsSoftFloatABI) {
4566    if (kArchEndian == kLittle) {
4567      Move(dst, a0, a1);
4568    } else {
4569      Move(dst, a1, a0);
4570    }
4571  } else {
4572    Move(dst, f12);  // Reg f12 is n64 ABI FP first argument value.
4573  }
4574}
4575
4576
4577void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
4578  if (!IsMipsSoftFloatABI) {
4579    Move(f12, src);
4580  } else {
4581    if (kArchEndian == kLittle) {
4582      Move(a0, a1, src);
4583    } else {
4584      Move(a1, a0, src);
4585    }
4586  }
4587}
4588
4589
4590void MacroAssembler::MovToFloatResult(DoubleRegister src) {
4591  if (!IsMipsSoftFloatABI) {
4592    Move(f0, src);
4593  } else {
4594    if (kArchEndian == kLittle) {
4595      Move(v0, v1, src);
4596    } else {
4597      Move(v1, v0, src);
4598    }
4599  }
4600}
4601
4602
4603void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
4604                                          DoubleRegister src2) {
4605  if (!IsMipsSoftFloatABI) {
4606    const DoubleRegister fparg2 = f13;
4607    if (src2.is(f12)) {
4608      DCHECK(!src1.is(fparg2));
4609      Move(fparg2, src2);
4610      Move(f12, src1);
4611    } else {
4612      Move(f12, src1);
4613      Move(fparg2, src2);
4614    }
4615  } else {
4616    if (kArchEndian == kLittle) {
4617      Move(a0, a1, src1);
4618      Move(a2, a3, src2);
4619    } else {
4620      Move(a1, a0, src1);
4621      Move(a3, a2, src2);
4622    }
4623  }
4624}
4625
4626
4627// -----------------------------------------------------------------------------
4628// JavaScript invokes.
4629
4630void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
4631                                        Register caller_args_count_reg,
4632                                        Register scratch0, Register scratch1) {
4633#if DEBUG
4634  if (callee_args_count.is_reg()) {
4635    DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
4636                       scratch1));
4637  } else {
4638    DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
4639  }
4640#endif
4641
4642  // Calculate the end of destination area where we will put the arguments
4643  // after we drop current frame. We add kPointerSize to count the receiver
4644  // argument which is not included into formal parameters count.
4645  Register dst_reg = scratch0;
4646  Dlsa(dst_reg, fp, caller_args_count_reg, kPointerSizeLog2);
4647  Daddu(dst_reg, dst_reg,
4648        Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
4649
4650  Register src_reg = caller_args_count_reg;
4651  // Calculate the end of source area. +kPointerSize is for the receiver.
4652  if (callee_args_count.is_reg()) {
4653    Dlsa(src_reg, sp, callee_args_count.reg(), kPointerSizeLog2);
4654    Daddu(src_reg, src_reg, Operand(kPointerSize));
4655  } else {
4656    Daddu(src_reg, sp,
4657          Operand((callee_args_count.immediate() + 1) * kPointerSize));
4658  }
4659
4660  if (FLAG_debug_code) {
4661    Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg));
4662  }
4663
4664  // Restore caller's frame pointer and return address now as they will be
4665  // overwritten by the copying loop.
4666  ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
4667  ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4668
4669  // Now copy callee arguments to the caller frame going backwards to avoid
4670  // callee arguments corruption (source and destination areas could overlap).
4671
4672  // Both src_reg and dst_reg are pointing to the word after the one to copy,
4673  // so they must be pre-decremented in the loop.
4674  Register tmp_reg = scratch1;
4675  Label loop, entry;
4676  Branch(&entry);
4677  bind(&loop);
4678  Dsubu(src_reg, src_reg, Operand(kPointerSize));
4679  Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
4680  ld(tmp_reg, MemOperand(src_reg));
4681  sd(tmp_reg, MemOperand(dst_reg));
4682  bind(&entry);
4683  Branch(&loop, ne, sp, Operand(src_reg));
4684
4685  // Leave current frame.
4686  mov(sp, dst_reg);
4687}
4688
4689void MacroAssembler::InvokePrologue(const ParameterCount& expected,
4690                                    const ParameterCount& actual,
4691                                    Label* done,
4692                                    bool* definitely_mismatches,
4693                                    InvokeFlag flag,
4694                                    const CallWrapper& call_wrapper) {
4695  bool definitely_matches = false;
4696  *definitely_mismatches = false;
4697  Label regular_invoke;
4698
4699  // Check whether the expected and actual arguments count match. If not,
4700  // setup registers according to contract with ArgumentsAdaptorTrampoline:
4701  //  a0: actual arguments count
4702  //  a1: function (passed through to callee)
4703  //  a2: expected arguments count
4704
4705  // The code below is made a lot easier because the calling code already sets
4706  // up actual and expected registers according to the contract if values are
4707  // passed in registers.
4708  DCHECK(actual.is_immediate() || actual.reg().is(a0));
4709  DCHECK(expected.is_immediate() || expected.reg().is(a2));
4710
4711  if (expected.is_immediate()) {
4712    DCHECK(actual.is_immediate());
4713    li(a0, Operand(actual.immediate()));
4714    if (expected.immediate() == actual.immediate()) {
4715      definitely_matches = true;
4716    } else {
4717      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
4718      if (expected.immediate() == sentinel) {
4719        // Don't worry about adapting arguments for builtins that
4720        // don't want that done. Skip adaption code by making it look
4721        // like we have a match between expected and actual number of
4722        // arguments.
4723        definitely_matches = true;
4724      } else {
4725        *definitely_mismatches = true;
4726        li(a2, Operand(expected.immediate()));
4727      }
4728    }
4729  } else if (actual.is_immediate()) {
4730    li(a0, Operand(actual.immediate()));
4731    Branch(&regular_invoke, eq, expected.reg(), Operand(a0));
4732  } else {
4733    Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
4734  }
4735
4736  if (!definitely_matches) {
4737    Handle<Code> adaptor =
4738        isolate()->builtins()->ArgumentsAdaptorTrampoline();
4739    if (flag == CALL_FUNCTION) {
4740      call_wrapper.BeforeCall(CallSize(adaptor));
4741      Call(adaptor);
4742      call_wrapper.AfterCall();
4743      if (!*definitely_mismatches) {
4744        Branch(done);
4745      }
4746    } else {
4747      Jump(adaptor, RelocInfo::CODE_TARGET);
4748    }
4749    bind(&regular_invoke);
4750  }
4751}
4752
4753void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
4754                                    const ParameterCount& expected,
4755                                    const ParameterCount& actual) {
4756  Label skip_hook;
4757  ExternalReference debug_hook_active =
4758      ExternalReference::debug_hook_on_function_call_address(isolate());
4759  li(t0, Operand(debug_hook_active));
4760  lb(t0, MemOperand(t0));
4761  Branch(&skip_hook, eq, t0, Operand(zero_reg));
4762  {
4763    FrameScope frame(this,
4764                     has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
4765    if (expected.is_reg()) {
4766      SmiTag(expected.reg());
4767      Push(expected.reg());
4768    }
4769    if (actual.is_reg()) {
4770      SmiTag(actual.reg());
4771      Push(actual.reg());
4772    }
4773    if (new_target.is_valid()) {
4774      Push(new_target);
4775    }
4776    Push(fun);
4777    Push(fun);
4778    CallRuntime(Runtime::kDebugOnFunctionCall);
4779    Pop(fun);
4780    if (new_target.is_valid()) {
4781      Pop(new_target);
4782    }
4783    if (actual.is_reg()) {
4784      Pop(actual.reg());
4785      SmiUntag(actual.reg());
4786    }
4787    if (expected.is_reg()) {
4788      Pop(expected.reg());
4789      SmiUntag(expected.reg());
4790    }
4791  }
4792  bind(&skip_hook);
4793}
4794
4795
4796void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
4797                                        const ParameterCount& expected,
4798                                        const ParameterCount& actual,
4799                                        InvokeFlag flag,
4800                                        const CallWrapper& call_wrapper) {
4801  // You can't call a function without a valid frame.
4802  DCHECK(flag == JUMP_FUNCTION || has_frame());
4803  DCHECK(function.is(a1));
4804  DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
4805
4806  if (call_wrapper.NeedsDebugHookCheck()) {
4807    CheckDebugHook(function, new_target, expected, actual);
4808  }
4809
4810  // Clear the new.target register if not given.
4811  if (!new_target.is_valid()) {
4812    LoadRoot(a3, Heap::kUndefinedValueRootIndex);
4813  }
4814
4815  Label done;
4816  bool definitely_mismatches = false;
4817  InvokePrologue(expected, actual, &done, &definitely_mismatches, flag,
4818                 call_wrapper);
4819  if (!definitely_mismatches) {
4820    // We call indirectly through the code field in the function to
4821    // allow recompilation to take effect without changing any of the
4822    // call sites.
4823    Register code = t0;
4824    ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4825    if (flag == CALL_FUNCTION) {
4826      call_wrapper.BeforeCall(CallSize(code));
4827      Call(code);
4828      call_wrapper.AfterCall();
4829    } else {
4830      DCHECK(flag == JUMP_FUNCTION);
4831      Jump(code);
4832    }
4833    // Continue here if InvokePrologue does handle the invocation due to
4834    // mismatched parameter counts.
4835    bind(&done);
4836  }
4837}
4838
4839
4840void MacroAssembler::InvokeFunction(Register function,
4841                                    Register new_target,
4842                                    const ParameterCount& actual,
4843                                    InvokeFlag flag,
4844                                    const CallWrapper& call_wrapper) {
4845  // You can't call a function without a valid frame.
4846  DCHECK(flag == JUMP_FUNCTION || has_frame());
4847
4848  // Contract with called JS functions requires that function is passed in a1.
4849  DCHECK(function.is(a1));
4850  Register expected_reg = a2;
4851  Register temp_reg = t0;
4852  ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4853  ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4854  // The argument count is stored as int32_t on 64-bit platforms.
4855  // TODO(plind): Smi on 32-bit platforms.
4856  lw(expected_reg,
4857     FieldMemOperand(temp_reg,
4858                     SharedFunctionInfo::kFormalParameterCountOffset));
4859  ParameterCount expected(expected_reg);
4860  InvokeFunctionCode(a1, new_target, expected, actual, flag, call_wrapper);
4861}
4862
4863
4864void MacroAssembler::InvokeFunction(Register function,
4865                                    const ParameterCount& expected,
4866                                    const ParameterCount& actual,
4867                                    InvokeFlag flag,
4868                                    const CallWrapper& call_wrapper) {
4869  // You can't call a function without a valid frame.
4870  DCHECK(flag == JUMP_FUNCTION || has_frame());
4871
4872  // Contract with called JS functions requires that function is passed in a1.
4873  DCHECK(function.is(a1));
4874
4875  // Get the function and setup the context.
4876  ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4877
4878  InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
4879}
4880
4881
4882void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
4883                                    const ParameterCount& expected,
4884                                    const ParameterCount& actual,
4885                                    InvokeFlag flag,
4886                                    const CallWrapper& call_wrapper) {
4887  li(a1, function);
4888  InvokeFunction(a1, expected, actual, flag, call_wrapper);
4889}
4890
4891
4892void MacroAssembler::IsObjectJSStringType(Register object,
4893                                          Register scratch,
4894                                          Label* fail) {
4895  DCHECK(kNotStringTag != 0);
4896
4897  ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4898  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4899  And(scratch, scratch, Operand(kIsNotStringMask));
4900  Branch(fail, ne, scratch, Operand(zero_reg));
4901}
4902
4903
4904void MacroAssembler::IsObjectNameType(Register object,
4905                                      Register scratch,
4906                                      Label* fail) {
4907  ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4908  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4909  Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
4910}
4911
4912
4913// ---------------------------------------------------------------------------
4914// Support functions.
4915
4916
4917void MacroAssembler::GetMapConstructor(Register result, Register map,
4918                                       Register temp, Register temp2) {
4919  Label done, loop;
4920  ld(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
4921  bind(&loop);
4922  JumpIfSmi(result, &done);
4923  GetObjectType(result, temp, temp2);
4924  Branch(&done, ne, temp2, Operand(MAP_TYPE));
4925  ld(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
4926  Branch(&loop);
4927  bind(&done);
4928}
4929
4930void MacroAssembler::GetObjectType(Register object,
4931                                   Register map,
4932                                   Register type_reg) {
4933  ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
4934  lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
4935}
4936
4937
4938// -----------------------------------------------------------------------------
4939// Runtime calls.
4940
4941void MacroAssembler::CallStub(CodeStub* stub,
4942                              TypeFeedbackId ast_id,
4943                              Condition cond,
4944                              Register r1,
4945                              const Operand& r2,
4946                              BranchDelaySlot bd) {
4947  DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
4948  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
4949       cond, r1, r2, bd);
4950}
4951
4952
4953void MacroAssembler::TailCallStub(CodeStub* stub,
4954                                  Condition cond,
4955                                  Register r1,
4956                                  const Operand& r2,
4957                                  BranchDelaySlot bd) {
4958  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
4959}
4960
4961
4962bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4963  return has_frame_ || !stub->SometimesSetsUpAFrame();
4964}
4965
4966void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4967                                               FPURegister result,
4968                                               Register scratch1,
4969                                               Register scratch2,
4970                                               Register heap_number_map,
4971                                               Label* not_number,
4972                                               ObjectToDoubleFlags flags) {
4973  Label done;
4974  if ((flags & OBJECT_NOT_SMI) == 0) {
4975    Label not_smi;
4976    JumpIfNotSmi(object, &not_smi);
4977    // Remove smi tag and convert to double.
4978    // dsra(scratch1, object, kSmiTagSize);
4979    dsra32(scratch1, object, 0);
4980    mtc1(scratch1, result);
4981    cvt_d_w(result, result);
4982    Branch(&done);
4983    bind(&not_smi);
4984  }
4985  // Check for heap number and load double value from it.
4986  ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4987  Branch(not_number, ne, scratch1, Operand(heap_number_map));
4988
4989  if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4990    // If exponent is all ones the number is either a NaN or +/-Infinity.
4991    Register exponent = scratch1;
4992    Register mask_reg = scratch2;
4993    lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4994    li(mask_reg, HeapNumber::kExponentMask);
4995
4996    And(exponent, exponent, mask_reg);
4997    Branch(not_number, eq, exponent, Operand(mask_reg));
4998  }
4999  ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
5000  bind(&done);
5001}
5002
5003
5004void MacroAssembler::SmiToDoubleFPURegister(Register smi,
5005                                            FPURegister value,
5006                                            Register scratch1) {
5007  dsra32(scratch1, smi, 0);
5008  mtc1(scratch1, value);
5009  cvt_d_w(value, value);
5010}
5011
5012static inline void BranchOvfHelper(MacroAssembler* masm, Register overflow_dst,
5013                                   Label* overflow_label,
5014                                   Label* no_overflow_label) {
5015  DCHECK(overflow_label || no_overflow_label);
5016  if (!overflow_label) {
5017    DCHECK(no_overflow_label);
5018    masm->Branch(no_overflow_label, ge, overflow_dst, Operand(zero_reg));
5019  } else {
5020    masm->Branch(overflow_label, lt, overflow_dst, Operand(zero_reg));
5021    if (no_overflow_label) masm->Branch(no_overflow_label);
5022  }
5023}
5024
5025void MacroAssembler::AddBranchOvf(Register dst, Register left,
5026                                  const Operand& right, Label* overflow_label,
5027                                  Label* no_overflow_label, Register scratch) {
5028  if (right.is_reg()) {
5029    AddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5030                 scratch);
5031  } else {
5032    if (kArchVariant == kMips64r6) {
5033      Register right_reg = t9;
5034      DCHECK(!left.is(right_reg));
5035      li(right_reg, Operand(right));
5036      AddBranchOvf(dst, left, right_reg, overflow_label, no_overflow_label);
5037    } else {
5038      Register overflow_dst = t9;
5039      DCHECK(!dst.is(scratch));
5040      DCHECK(!dst.is(overflow_dst));
5041      DCHECK(!scratch.is(overflow_dst));
5042      DCHECK(!left.is(overflow_dst));
5043      if (dst.is(left)) {
5044        mov(scratch, left);  // Preserve left.
5045        // Left is overwritten.
5046        Addu(dst, left, static_cast<int32_t>(right.immediate()));
5047        xor_(scratch, dst, scratch);  // Original left.
5048        // Load right since xori takes uint16 as immediate.
5049        Addu(overflow_dst, zero_reg, right);
5050        xor_(overflow_dst, dst, overflow_dst);
5051        and_(overflow_dst, overflow_dst, scratch);
5052      } else {
5053        Addu(dst, left, static_cast<int32_t>(right.immediate()));
5054        xor_(overflow_dst, dst, left);
5055        // Load right since xori takes uint16 as immediate.
5056        Addu(scratch, zero_reg, right);
5057        xor_(scratch, dst, scratch);
5058        and_(overflow_dst, scratch, overflow_dst);
5059      }
5060      BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5061    }
5062  }
5063}
5064
5065void MacroAssembler::AddBranchOvf(Register dst, Register left, Register right,
5066                                  Label* overflow_label,
5067                                  Label* no_overflow_label, Register scratch) {
5068  if (kArchVariant == kMips64r6) {
5069    if (!overflow_label) {
5070      DCHECK(no_overflow_label);
5071      DCHECK(!dst.is(scratch));
5072      Register left_reg = left.is(dst) ? scratch : left;
5073      Register right_reg = right.is(dst) ? t9 : right;
5074      DCHECK(!dst.is(left_reg));
5075      DCHECK(!dst.is(right_reg));
5076      Move(left_reg, left);
5077      Move(right_reg, right);
5078      addu(dst, left, right);
5079      Bnvc(left_reg, right_reg, no_overflow_label);
5080    } else {
5081      Bovc(left, right, overflow_label);
5082      addu(dst, left, right);
5083      if (no_overflow_label) bc(no_overflow_label);
5084    }
5085  } else {
5086    Register overflow_dst = t9;
5087    DCHECK(!dst.is(scratch));
5088    DCHECK(!dst.is(overflow_dst));
5089    DCHECK(!scratch.is(overflow_dst));
5090    DCHECK(!left.is(overflow_dst));
5091    DCHECK(!right.is(overflow_dst));
5092    DCHECK(!left.is(scratch));
5093    DCHECK(!right.is(scratch));
5094
5095    if (left.is(right) && dst.is(left)) {
5096      mov(overflow_dst, right);
5097      right = overflow_dst;
5098    }
5099
5100    if (dst.is(left)) {
5101      mov(scratch, left);           // Preserve left.
5102      addu(dst, left, right);       // Left is overwritten.
5103      xor_(scratch, dst, scratch);  // Original left.
5104      xor_(overflow_dst, dst, right);
5105      and_(overflow_dst, overflow_dst, scratch);
5106    } else if (dst.is(right)) {
5107      mov(scratch, right);          // Preserve right.
5108      addu(dst, left, right);       // Right is overwritten.
5109      xor_(scratch, dst, scratch);  // Original right.
5110      xor_(overflow_dst, dst, left);
5111      and_(overflow_dst, overflow_dst, scratch);
5112    } else {
5113      addu(dst, left, right);
5114      xor_(overflow_dst, dst, left);
5115      xor_(scratch, dst, right);
5116      and_(overflow_dst, scratch, overflow_dst);
5117    }
5118    BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5119  }
5120}
5121
5122void MacroAssembler::SubBranchOvf(Register dst, Register left,
5123                                  const Operand& right, Label* overflow_label,
5124                                  Label* no_overflow_label, Register scratch) {
5125  DCHECK(overflow_label || no_overflow_label);
5126  if (right.is_reg()) {
5127    SubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5128                 scratch);
5129  } else {
5130    Register overflow_dst = t9;
5131    DCHECK(!dst.is(scratch));
5132    DCHECK(!dst.is(overflow_dst));
5133    DCHECK(!scratch.is(overflow_dst));
5134    DCHECK(!left.is(overflow_dst));
5135    DCHECK(!left.is(scratch));
5136    if (dst.is(left)) {
5137      mov(scratch, left);  // Preserve left.
5138      // Left is overwritten.
5139      Subu(dst, left, static_cast<int32_t>(right.immediate()));
5140      // Load right since xori takes uint16 as immediate.
5141      Addu(overflow_dst, zero_reg, right);
5142      xor_(overflow_dst, scratch, overflow_dst);  // scratch is original left.
5143      xor_(scratch, dst, scratch);                // scratch is original left.
5144      and_(overflow_dst, scratch, overflow_dst);
5145    } else {
5146      Subu(dst, left, right);
5147      xor_(overflow_dst, dst, left);
5148      // Load right since xori takes uint16 as immediate.
5149      Addu(scratch, zero_reg, right);
5150      xor_(scratch, left, scratch);
5151      and_(overflow_dst, scratch, overflow_dst);
5152    }
5153    BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5154  }
5155}
5156
5157void MacroAssembler::SubBranchOvf(Register dst, Register left, Register right,
5158                                  Label* overflow_label,
5159                                  Label* no_overflow_label, Register scratch) {
5160  DCHECK(overflow_label || no_overflow_label);
5161  Register overflow_dst = t9;
5162  DCHECK(!dst.is(scratch));
5163  DCHECK(!dst.is(overflow_dst));
5164  DCHECK(!scratch.is(overflow_dst));
5165  DCHECK(!overflow_dst.is(left));
5166  DCHECK(!overflow_dst.is(right));
5167  DCHECK(!scratch.is(left));
5168  DCHECK(!scratch.is(right));
5169
5170  // This happens with some crankshaft code. Since Subu works fine if
5171  // left == right, let's not make that restriction here.
5172  if (left.is(right)) {
5173    mov(dst, zero_reg);
5174    if (no_overflow_label) {
5175      Branch(no_overflow_label);
5176    }
5177  }
5178
5179  if (dst.is(left)) {
5180    mov(scratch, left);  // Preserve left.
5181    subu(dst, left, right);            // Left is overwritten.
5182    xor_(overflow_dst, dst, scratch);  // scratch is original left.
5183    xor_(scratch, scratch, right);     // scratch is original left.
5184    and_(overflow_dst, scratch, overflow_dst);
5185  } else if (dst.is(right)) {
5186    mov(scratch, right);  // Preserve right.
5187    subu(dst, left, right);  // Right is overwritten.
5188    xor_(overflow_dst, dst, left);
5189    xor_(scratch, left, scratch);  // Original right.
5190    and_(overflow_dst, scratch, overflow_dst);
5191  } else {
5192    subu(dst, left, right);
5193    xor_(overflow_dst, dst, left);
5194    xor_(scratch, left, right);
5195    and_(overflow_dst, scratch, overflow_dst);
5196  }
5197  BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5198}
5199
5200void MacroAssembler::DaddBranchOvf(Register dst, Register left,
5201                                   const Operand& right, Label* overflow_label,
5202                                   Label* no_overflow_label, Register scratch) {
5203  if (right.is_reg()) {
5204    DaddBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5205                  scratch);
5206  } else {
5207    Register overflow_dst = t9;
5208    DCHECK(!dst.is(scratch));
5209    DCHECK(!dst.is(overflow_dst));
5210    DCHECK(!scratch.is(overflow_dst));
5211    DCHECK(!left.is(overflow_dst));
5212    li(overflow_dst, right);  // Load right.
5213    if (dst.is(left)) {
5214      mov(scratch, left);              // Preserve left.
5215      Daddu(dst, left, overflow_dst);  // Left is overwritten.
5216      xor_(scratch, dst, scratch);     // Original left.
5217      xor_(overflow_dst, dst, overflow_dst);
5218      and_(overflow_dst, overflow_dst, scratch);
5219    } else {
5220      Daddu(dst, left, overflow_dst);
5221      xor_(scratch, dst, overflow_dst);
5222      xor_(overflow_dst, dst, left);
5223      and_(overflow_dst, scratch, overflow_dst);
5224    }
5225    BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5226  }
5227}
5228
5229
5230void MacroAssembler::DaddBranchOvf(Register dst, Register left, Register right,
5231                                   Label* overflow_label,
5232                                   Label* no_overflow_label, Register scratch) {
5233  Register overflow_dst = t9;
5234  DCHECK(!dst.is(scratch));
5235  DCHECK(!dst.is(overflow_dst));
5236  DCHECK(!scratch.is(overflow_dst));
5237  DCHECK(!left.is(overflow_dst));
5238  DCHECK(!right.is(overflow_dst));
5239  DCHECK(!left.is(scratch));
5240  DCHECK(!right.is(scratch));
5241
5242  if (left.is(right) && dst.is(left)) {
5243    mov(overflow_dst, right);
5244    right = overflow_dst;
5245  }
5246
5247  if (dst.is(left)) {
5248    mov(scratch, left);           // Preserve left.
5249    daddu(dst, left, right);      // Left is overwritten.
5250    xor_(scratch, dst, scratch);  // Original left.
5251    xor_(overflow_dst, dst, right);
5252    and_(overflow_dst, overflow_dst, scratch);
5253  } else if (dst.is(right)) {
5254    mov(scratch, right);          // Preserve right.
5255    daddu(dst, left, right);      // Right is overwritten.
5256    xor_(scratch, dst, scratch);  // Original right.
5257    xor_(overflow_dst, dst, left);
5258    and_(overflow_dst, overflow_dst, scratch);
5259  } else {
5260    daddu(dst, left, right);
5261    xor_(overflow_dst, dst, left);
5262    xor_(scratch, dst, right);
5263    and_(overflow_dst, scratch, overflow_dst);
5264  }
5265  BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5266}
5267
5268
5269void MacroAssembler::DsubBranchOvf(Register dst, Register left,
5270                                   const Operand& right, Label* overflow_label,
5271                                   Label* no_overflow_label, Register scratch) {
5272  DCHECK(overflow_label || no_overflow_label);
5273  if (right.is_reg()) {
5274    DsubBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5275                  scratch);
5276  } else {
5277    Register overflow_dst = t9;
5278    DCHECK(!dst.is(scratch));
5279    DCHECK(!dst.is(overflow_dst));
5280    DCHECK(!scratch.is(overflow_dst));
5281    DCHECK(!left.is(overflow_dst));
5282    DCHECK(!left.is(scratch));
5283    li(overflow_dst, right);  // Load right.
5284    if (dst.is(left)) {
5285      mov(scratch, left);                         // Preserve left.
5286      Dsubu(dst, left, overflow_dst);             // Left is overwritten.
5287      xor_(overflow_dst, scratch, overflow_dst);  // scratch is original left.
5288      xor_(scratch, dst, scratch);                // scratch is original left.
5289      and_(overflow_dst, scratch, overflow_dst);
5290    } else {
5291      Dsubu(dst, left, overflow_dst);
5292      xor_(scratch, left, overflow_dst);
5293      xor_(overflow_dst, dst, left);
5294      and_(overflow_dst, scratch, overflow_dst);
5295    }
5296    BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5297  }
5298}
5299
5300
5301void MacroAssembler::DsubBranchOvf(Register dst, Register left, Register right,
5302                                   Label* overflow_label,
5303                                   Label* no_overflow_label, Register scratch) {
5304  DCHECK(overflow_label || no_overflow_label);
5305  Register overflow_dst = t9;
5306  DCHECK(!dst.is(scratch));
5307  DCHECK(!dst.is(overflow_dst));
5308  DCHECK(!scratch.is(overflow_dst));
5309  DCHECK(!overflow_dst.is(left));
5310  DCHECK(!overflow_dst.is(right));
5311  DCHECK(!scratch.is(left));
5312  DCHECK(!scratch.is(right));
5313
5314  // This happens with some crankshaft code. Since Subu works fine if
5315  // left == right, let's not make that restriction here.
5316  if (left.is(right)) {
5317    mov(dst, zero_reg);
5318    if (no_overflow_label) {
5319      Branch(no_overflow_label);
5320    }
5321  }
5322
5323  if (dst.is(left)) {
5324    mov(scratch, left);                // Preserve left.
5325    dsubu(dst, left, right);           // Left is overwritten.
5326    xor_(overflow_dst, dst, scratch);  // scratch is original left.
5327    xor_(scratch, scratch, right);     // scratch is original left.
5328    and_(overflow_dst, scratch, overflow_dst);
5329  } else if (dst.is(right)) {
5330    mov(scratch, right);      // Preserve right.
5331    dsubu(dst, left, right);  // Right is overwritten.
5332    xor_(overflow_dst, dst, left);
5333    xor_(scratch, left, scratch);  // Original right.
5334    and_(overflow_dst, scratch, overflow_dst);
5335  } else {
5336    dsubu(dst, left, right);
5337    xor_(overflow_dst, dst, left);
5338    xor_(scratch, left, right);
5339    and_(overflow_dst, scratch, overflow_dst);
5340  }
5341  BranchOvfHelper(this, overflow_dst, overflow_label, no_overflow_label);
5342}
5343
5344static inline void BranchOvfHelperMult(MacroAssembler* masm,
5345                                       Register overflow_dst,
5346                                       Label* overflow_label,
5347                                       Label* no_overflow_label) {
5348  DCHECK(overflow_label || no_overflow_label);
5349  if (!overflow_label) {
5350    DCHECK(no_overflow_label);
5351    masm->Branch(no_overflow_label, eq, overflow_dst, Operand(zero_reg));
5352  } else {
5353    masm->Branch(overflow_label, ne, overflow_dst, Operand(zero_reg));
5354    if (no_overflow_label) masm->Branch(no_overflow_label);
5355  }
5356}
5357
5358void MacroAssembler::MulBranchOvf(Register dst, Register left,
5359                                  const Operand& right, Label* overflow_label,
5360                                  Label* no_overflow_label, Register scratch) {
5361  DCHECK(overflow_label || no_overflow_label);
5362  if (right.is_reg()) {
5363    MulBranchOvf(dst, left, right.rm(), overflow_label, no_overflow_label,
5364                 scratch);
5365  } else {
5366    Register overflow_dst = t9;
5367    DCHECK(!dst.is(scratch));
5368    DCHECK(!dst.is(overflow_dst));
5369    DCHECK(!scratch.is(overflow_dst));
5370    DCHECK(!left.is(overflow_dst));
5371    DCHECK(!left.is(scratch));
5372
5373    if (dst.is(left)) {
5374      Mul(scratch, left, static_cast<int32_t>(right.immediate()));
5375      Mulh(overflow_dst, left, static_cast<int32_t>(right.immediate()));
5376      mov(dst, scratch);
5377    } else {
5378      Mul(dst, left, static_cast<int32_t>(right.immediate()));
5379      Mulh(overflow_dst, left, static_cast<int32_t>(right.immediate()));
5380    }
5381
5382    dsra32(scratch, dst, 0);
5383    xor_(overflow_dst, overflow_dst, scratch);
5384
5385    BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
5386  }
5387}
5388
5389void MacroAssembler::MulBranchOvf(Register dst, Register left, Register right,
5390                                  Label* overflow_label,
5391                                  Label* no_overflow_label, Register scratch) {
5392  DCHECK(overflow_label || no_overflow_label);
5393  Register overflow_dst = t9;
5394  DCHECK(!dst.is(scratch));
5395  DCHECK(!dst.is(overflow_dst));
5396  DCHECK(!scratch.is(overflow_dst));
5397  DCHECK(!overflow_dst.is(left));
5398  DCHECK(!overflow_dst.is(right));
5399  DCHECK(!scratch.is(left));
5400  DCHECK(!scratch.is(right));
5401
5402  if (dst.is(left) || dst.is(right)) {
5403    Mul(scratch, left, right);
5404    Mulh(overflow_dst, left, right);
5405    mov(dst, scratch);
5406  } else {
5407    Mul(dst, left, right);
5408    Mulh(overflow_dst, left, right);
5409  }
5410
5411  dsra32(scratch, dst, 0);
5412  xor_(overflow_dst, overflow_dst, scratch);
5413
5414  BranchOvfHelperMult(this, overflow_dst, overflow_label, no_overflow_label);
5415}
5416
5417void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
5418                                 SaveFPRegsMode save_doubles,
5419                                 BranchDelaySlot bd) {
5420  // All parameters are on the stack. v0 has the return value after call.
5421
5422  // If the expected number of arguments of the runtime function is
5423  // constant, we check that the actual number of arguments match the
5424  // expectation.
5425  CHECK(f->nargs < 0 || f->nargs == num_arguments);
5426
5427  // TODO(1236192): Most runtime routines don't need the number of
5428  // arguments passed in because it is constant. At some point we
5429  // should remove this need and make the runtime routine entry code
5430  // smarter.
5431  PrepareCEntryArgs(num_arguments);
5432  PrepareCEntryFunction(ExternalReference(f, isolate()));
5433  CEntryStub stub(isolate(), 1, save_doubles);
5434  CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5435}
5436
5437
5438void MacroAssembler::CallExternalReference(const ExternalReference& ext,
5439                                           int num_arguments,
5440                                           BranchDelaySlot bd) {
5441  PrepareCEntryArgs(num_arguments);
5442  PrepareCEntryFunction(ext);
5443
5444  CEntryStub stub(isolate(), 1);
5445  CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
5446}
5447
5448
5449void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
5450  const Runtime::Function* function = Runtime::FunctionForId(fid);
5451  DCHECK_EQ(1, function->result_size);
5452  if (function->nargs >= 0) {
5453    PrepareCEntryArgs(function->nargs);
5454  }
5455  JumpToExternalReference(ExternalReference(fid, isolate()));
5456}
5457
5458void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
5459                                             BranchDelaySlot bd,
5460                                             bool builtin_exit_frame) {
5461  PrepareCEntryFunction(builtin);
5462  CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
5463                  builtin_exit_frame);
5464  Jump(stub.GetCode(),
5465       RelocInfo::CODE_TARGET,
5466       al,
5467       zero_reg,
5468       Operand(zero_reg),
5469       bd);
5470}
5471
5472void MacroAssembler::SetCounter(StatsCounter* counter, int value,
5473                                Register scratch1, Register scratch2) {
5474  if (FLAG_native_code_counters && counter->Enabled()) {
5475    li(scratch1, Operand(value));
5476    li(scratch2, Operand(ExternalReference(counter)));
5477    sw(scratch1, MemOperand(scratch2));
5478  }
5479}
5480
5481
5482void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
5483                                      Register scratch1, Register scratch2) {
5484  DCHECK(value > 0);
5485  if (FLAG_native_code_counters && counter->Enabled()) {
5486    li(scratch2, Operand(ExternalReference(counter)));
5487    lw(scratch1, MemOperand(scratch2));
5488    Addu(scratch1, scratch1, Operand(value));
5489    sw(scratch1, MemOperand(scratch2));
5490  }
5491}
5492
5493
5494void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
5495                                      Register scratch1, Register scratch2) {
5496  DCHECK(value > 0);
5497  if (FLAG_native_code_counters && counter->Enabled()) {
5498    li(scratch2, Operand(ExternalReference(counter)));
5499    lw(scratch1, MemOperand(scratch2));
5500    Subu(scratch1, scratch1, Operand(value));
5501    sw(scratch1, MemOperand(scratch2));
5502  }
5503}
5504
5505
5506// -----------------------------------------------------------------------------
5507// Debugging.
5508
5509void MacroAssembler::Assert(Condition cc, BailoutReason reason,
5510                            Register rs, Operand rt) {
5511  if (emit_debug_code())
5512    Check(cc, reason, rs, rt);
5513}
5514
5515
5516void MacroAssembler::AssertFastElements(Register elements) {
5517  if (emit_debug_code()) {
5518    DCHECK(!elements.is(at));
5519    Label ok;
5520    push(elements);
5521    ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
5522    LoadRoot(at, Heap::kFixedArrayMapRootIndex);
5523    Branch(&ok, eq, elements, Operand(at));
5524    LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
5525    Branch(&ok, eq, elements, Operand(at));
5526    LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
5527    Branch(&ok, eq, elements, Operand(at));
5528    Abort(kJSObjectWithFastElementsMapHasSlowElements);
5529    bind(&ok);
5530    pop(elements);
5531  }
5532}
5533
5534
5535void MacroAssembler::Check(Condition cc, BailoutReason reason,
5536                           Register rs, Operand rt) {
5537  Label L;
5538  Branch(&L, cc, rs, rt);
5539  Abort(reason);
5540  // Will not return here.
5541  bind(&L);
5542}
5543
5544
5545void MacroAssembler::Abort(BailoutReason reason) {
5546  Label abort_start;
5547  bind(&abort_start);
5548#ifdef DEBUG
5549  const char* msg = GetBailoutReason(reason);
5550  if (msg != NULL) {
5551    RecordComment("Abort message: ");
5552    RecordComment(msg);
5553  }
5554
5555  if (FLAG_trap_on_abort) {
5556    stop(msg);
5557    return;
5558  }
5559#endif
5560
5561  // Check if Abort() has already been initialized.
5562  DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
5563
5564  Move(a0, Smi::FromInt(static_cast<int>(reason)));
5565
5566  // Disable stub call restrictions to always allow calls to abort.
5567  if (!has_frame_) {
5568    // We don't actually want to generate a pile of code for this, so just
5569    // claim there is a stack frame, without generating one.
5570    FrameScope scope(this, StackFrame::NONE);
5571    Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
5572  } else {
5573    Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
5574  }
5575  // Will not return here.
5576  if (is_trampoline_pool_blocked()) {
5577    // If the calling code cares about the exact number of
5578    // instructions generated, we insert padding here to keep the size
5579    // of the Abort macro constant.
5580    // Currently in debug mode with debug_code enabled the number of
5581    // generated instructions is 10, so we use this as a maximum value.
5582    static const int kExpectedAbortInstructions = 10;
5583    int abort_instructions = InstructionsGeneratedSince(&abort_start);
5584    DCHECK(abort_instructions <= kExpectedAbortInstructions);
5585    while (abort_instructions++ < kExpectedAbortInstructions) {
5586      nop();
5587    }
5588  }
5589}
5590
5591
5592void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
5593  if (context_chain_length > 0) {
5594    // Move up the chain of contexts to the context containing the slot.
5595    ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5596    for (int i = 1; i < context_chain_length; i++) {
5597      ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
5598    }
5599  } else {
5600    // Slot is in the current function context.  Move it into the
5601    // destination register in case we store into it (the write barrier
5602    // cannot be allowed to destroy the context in esi).
5603    Move(dst, cp);
5604  }
5605}
5606
5607void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
5608  ld(dst, NativeContextMemOperand());
5609  ld(dst, ContextMemOperand(dst, index));
5610}
5611
5612
5613void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
5614                                                  Register map,
5615                                                  Register scratch) {
5616  // Load the initial map. The global functions all have initial maps.
5617  ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
5618  if (emit_debug_code()) {
5619    Label ok, fail;
5620    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
5621    Branch(&ok);
5622    bind(&fail);
5623    Abort(kGlobalFunctionsMustHaveInitialMap);
5624    bind(&ok);
5625  }
5626}
5627
5628void MacroAssembler::StubPrologue(StackFrame::Type type) {
5629  li(at, Operand(StackFrame::TypeToMarker(type)));
5630  PushCommonFrame(at);
5631}
5632
5633
5634void MacroAssembler::Prologue(bool code_pre_aging) {
5635  PredictableCodeSizeScope predictible_code_size_scope(
5636      this, kNoCodeAgeSequenceLength);
5637  // The following three instructions must remain together and unmodified
5638  // for code aging to work properly.
5639  if (code_pre_aging) {
5640    // Pre-age the code.
5641    Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
5642    nop(Assembler::CODE_AGE_MARKER_NOP);
5643    // Load the stub address to t9 and call it,
5644    // GetCodeAge() extracts the stub address from this instruction.
5645    li(t9,
5646       Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
5647       ADDRESS_LOAD);
5648    nop();  // Prevent jalr to jal optimization.
5649    jalr(t9, a0);
5650    nop();  // Branch delay slot nop.
5651    nop();  // Pad the empty space.
5652  } else {
5653    PushStandardFrame(a1);
5654    nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5655    nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5656    nop(Assembler::CODE_AGE_SEQUENCE_NOP);
5657  }
5658}
5659
5660void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
5661  ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5662  ld(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
5663  ld(vector, FieldMemOperand(vector, Cell::kValueOffset));
5664}
5665
5666
5667void MacroAssembler::EnterFrame(StackFrame::Type type,
5668                                bool load_constant_pool_pointer_reg) {
5669  // Out-of-line constant pool not implemented on mips64.
5670  UNREACHABLE();
5671}
5672
5673
5674void MacroAssembler::EnterFrame(StackFrame::Type type) {
5675  int stack_offset, fp_offset;
5676  if (type == StackFrame::INTERNAL) {
5677    stack_offset = -4 * kPointerSize;
5678    fp_offset = 2 * kPointerSize;
5679  } else {
5680    stack_offset = -3 * kPointerSize;
5681    fp_offset = 1 * kPointerSize;
5682  }
5683  daddiu(sp, sp, stack_offset);
5684  stack_offset = -stack_offset - kPointerSize;
5685  sd(ra, MemOperand(sp, stack_offset));
5686  stack_offset -= kPointerSize;
5687  sd(fp, MemOperand(sp, stack_offset));
5688  stack_offset -= kPointerSize;
5689  li(t9, Operand(StackFrame::TypeToMarker(type)));
5690  sd(t9, MemOperand(sp, stack_offset));
5691  if (type == StackFrame::INTERNAL) {
5692    DCHECK_EQ(stack_offset, kPointerSize);
5693    li(t9, Operand(CodeObject()));
5694    sd(t9, MemOperand(sp, 0));
5695  } else {
5696    DCHECK_EQ(stack_offset, 0);
5697  }
5698  // Adjust FP to point to saved FP.
5699  Daddu(fp, sp, Operand(fp_offset));
5700}
5701
5702
5703void MacroAssembler::LeaveFrame(StackFrame::Type type) {
5704  daddiu(sp, fp, 2 * kPointerSize);
5705  ld(ra, MemOperand(fp, 1 * kPointerSize));
5706  ld(fp, MemOperand(fp, 0 * kPointerSize));
5707}
5708
5709void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
5710                                       Register argc) {
5711  Push(ra, fp);
5712  Move(fp, sp);
5713  Push(context, target, argc);
5714}
5715
5716void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
5717                                       Register argc) {
5718  Pop(context, target, argc);
5719  Pop(ra, fp);
5720}
5721
5722void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
5723                                    StackFrame::Type frame_type) {
5724  DCHECK(frame_type == StackFrame::EXIT ||
5725         frame_type == StackFrame::BUILTIN_EXIT);
5726
5727  // Set up the frame structure on the stack.
5728  STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
5729  STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
5730  STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
5731
5732  // This is how the stack will look:
5733  // fp + 2 (==kCallerSPDisplacement) - old stack's end
5734  // [fp + 1 (==kCallerPCOffset)] - saved old ra
5735  // [fp + 0 (==kCallerFPOffset)] - saved old fp
5736  // [fp - 1 StackFrame::EXIT Smi
5737  // [fp - 2 (==kSPOffset)] - sp of the called function
5738  // [fp - 3 (==kCodeOffset)] - CodeObject
5739  // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
5740  //   new stack (will contain saved ra)
5741
5742  // Save registers and reserve room for saved entry sp and code object.
5743  daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
5744  sd(ra, MemOperand(sp, 4 * kPointerSize));
5745  sd(fp, MemOperand(sp, 3 * kPointerSize));
5746  li(at, Operand(StackFrame::TypeToMarker(frame_type)));
5747  sd(at, MemOperand(sp, 2 * kPointerSize));
5748  // Set up new frame pointer.
5749  daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
5750
5751  if (emit_debug_code()) {
5752    sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
5753  }
5754
5755  // Accessed from ExitFrame::code_slot.
5756  li(t8, Operand(CodeObject()), CONSTANT_SIZE);
5757  sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
5758
5759  // Save the frame pointer and the context in top.
5760  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5761  sd(fp, MemOperand(t8));
5762  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5763  sd(cp, MemOperand(t8));
5764
5765  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
5766  if (save_doubles) {
5767    // The stack is already aligned to 0 modulo 8 for stores with sdc1.
5768    int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
5769    int space = kNumOfSavedRegisters * kDoubleSize;
5770    Dsubu(sp, sp, Operand(space));
5771    // Remember: we only need to save every 2nd double FPU value.
5772    for (int i = 0; i < kNumOfSavedRegisters; i++) {
5773      FPURegister reg = FPURegister::from_code(2 * i);
5774      sdc1(reg, MemOperand(sp, i * kDoubleSize));
5775    }
5776  }
5777
5778  // Reserve place for the return address, stack space and an optional slot
5779  // (used by the DirectCEntryStub to hold the return value if a struct is
5780  // returned) and align the frame preparing for calling the runtime function.
5781  DCHECK(stack_space >= 0);
5782  Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
5783  if (frame_alignment > 0) {
5784    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5785    And(sp, sp, Operand(-frame_alignment));  // Align stack.
5786  }
5787
5788  // Set the exit frame sp value to point just before the return address
5789  // location.
5790  daddiu(at, sp, kPointerSize);
5791  sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
5792}
5793
5794
5795void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
5796                                    bool restore_context, bool do_return,
5797                                    bool argument_count_is_length) {
5798  // Optionally restore all double registers.
5799  if (save_doubles) {
5800    // Remember: we only need to restore every 2nd double FPU value.
5801    int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
5802    Dsubu(t8, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
5803                          kNumOfSavedRegisters * kDoubleSize));
5804    for (int i = 0; i < kNumOfSavedRegisters; i++) {
5805      FPURegister reg = FPURegister::from_code(2 * i);
5806      ldc1(reg, MemOperand(t8, i  * kDoubleSize));
5807    }
5808  }
5809
5810  // Clear top frame.
5811  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
5812  sd(zero_reg, MemOperand(t8));
5813
5814  // Restore current context from top and clear it in debug mode.
5815  if (restore_context) {
5816    li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5817    ld(cp, MemOperand(t8));
5818  }
5819#ifdef DEBUG
5820  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
5821  sd(a3, MemOperand(t8));
5822#endif
5823
5824  // Pop the arguments, restore registers, and return.
5825  mov(sp, fp);  // Respect ABI stack constraint.
5826  ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
5827  ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
5828
5829  if (argument_count.is_valid()) {
5830    if (argument_count_is_length) {
5831      daddu(sp, sp, argument_count);
5832    } else {
5833      Dlsa(sp, sp, argument_count, kPointerSizeLog2, t8);
5834    }
5835  }
5836
5837  if (do_return) {
5838    Ret(USE_DELAY_SLOT);
5839    // If returning, the instruction in the delay slot will be the addiu below.
5840  }
5841  daddiu(sp, sp, 2 * kPointerSize);
5842}
5843
5844int MacroAssembler::ActivationFrameAlignment() {
5845#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
5846  // Running on the real platform. Use the alignment as mandated by the local
5847  // environment.
5848  // Note: This will break if we ever start generating snapshots on one Mips
5849  // platform for another Mips platform with a different alignment.
5850  return base::OS::ActivationFrameAlignment();
5851#else  // V8_HOST_ARCH_MIPS
5852  // If we are using the simulator then we should always align to the expected
5853  // alignment. As the simulator is used to generate snapshots we do not know
5854  // if the target platform will need alignment, so this is controlled from a
5855  // flag.
5856  return FLAG_sim_stack_alignment;
5857#endif  // V8_HOST_ARCH_MIPS
5858}
5859
5860
5861void MacroAssembler::AssertStackIsAligned() {
5862  if (emit_debug_code()) {
5863      const int frame_alignment = ActivationFrameAlignment();
5864      const int frame_alignment_mask = frame_alignment - 1;
5865
5866      if (frame_alignment > kPointerSize) {
5867        Label alignment_as_expected;
5868        DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
5869        andi(at, sp, frame_alignment_mask);
5870        Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5871        // Don't use Check here, as it will call Runtime_Abort re-entering here.
5872        stop("Unexpected stack alignment");
5873        bind(&alignment_as_expected);
5874      }
5875    }
5876}
5877
5878
5879void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
5880    Register reg,
5881    Register scratch,
5882    Label* not_power_of_two_or_zero) {
5883  Dsubu(scratch, reg, Operand(1));
5884  Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
5885         scratch, Operand(zero_reg));
5886  and_(at, scratch, reg);  // In the delay slot.
5887  Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
5888}
5889
5890
5891void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
5892  DCHECK(!reg.is(overflow));
5893  mov(overflow, reg);  // Save original value.
5894  SmiTag(reg);
5895  xor_(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
5896}
5897
5898
5899void MacroAssembler::SmiTagCheckOverflow(Register dst,
5900                                         Register src,
5901                                         Register overflow) {
5902  if (dst.is(src)) {
5903    // Fall back to slower case.
5904    SmiTagCheckOverflow(dst, overflow);
5905  } else {
5906    DCHECK(!dst.is(src));
5907    DCHECK(!dst.is(overflow));
5908    DCHECK(!src.is(overflow));
5909    SmiTag(dst, src);
5910    xor_(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
5911  }
5912}
5913
5914
5915void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
5916  if (SmiValuesAre32Bits()) {
5917    lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5918  } else {
5919    lw(dst, src);
5920    SmiUntag(dst);
5921  }
5922}
5923
5924
5925void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
5926  if (SmiValuesAre32Bits()) {
5927    // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
5928    lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
5929    dsll(dst, dst, scale);
5930  } else {
5931    lw(dst, src);
5932    DCHECK(scale >= kSmiTagSize);
5933    sll(dst, dst, scale - kSmiTagSize);
5934  }
5935}
5936
5937
5938// Returns 2 values: the Smi and a scaled version of the int within the Smi.
5939void MacroAssembler::SmiLoadWithScale(Register d_smi,
5940                                      Register d_scaled,
5941                                      MemOperand src,
5942                                      int scale) {
5943  if (SmiValuesAre32Bits()) {
5944    ld(d_smi, src);
5945    dsra(d_scaled, d_smi, kSmiShift - scale);
5946  } else {
5947    lw(d_smi, src);
5948    DCHECK(scale >= kSmiTagSize);
5949    sll(d_scaled, d_smi, scale - kSmiTagSize);
5950  }
5951}
5952
5953
5954// Returns 2 values: the untagged Smi (int32) and scaled version of that int.
5955void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
5956                                           Register d_scaled,
5957                                           MemOperand src,
5958                                           int scale) {
5959  if (SmiValuesAre32Bits()) {
5960    lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
5961    dsll(d_scaled, d_int, scale);
5962  } else {
5963    lw(d_int, src);
5964    // Need both the int and the scaled in, so use two instructions.
5965    SmiUntag(d_int);
5966    sll(d_scaled, d_int, scale);
5967  }
5968}
5969
5970
5971void MacroAssembler::UntagAndJumpIfSmi(Register dst,
5972                                       Register src,
5973                                       Label* smi_case) {
5974  // DCHECK(!dst.is(src));
5975  JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
5976  SmiUntag(dst, src);
5977}
5978
5979void MacroAssembler::JumpIfSmi(Register value,
5980                               Label* smi_label,
5981                               Register scratch,
5982                               BranchDelaySlot bd) {
5983  DCHECK_EQ(0, kSmiTag);
5984  andi(scratch, value, kSmiTagMask);
5985  Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
5986}
5987
5988void MacroAssembler::JumpIfNotSmi(Register value,
5989                                  Label* not_smi_label,
5990                                  Register scratch,
5991                                  BranchDelaySlot bd) {
5992  DCHECK_EQ(0, kSmiTag);
5993  andi(scratch, value, kSmiTagMask);
5994  Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
5995}
5996
5997
5998void MacroAssembler::JumpIfNotBothSmi(Register reg1,
5999                                      Register reg2,
6000                                      Label* on_not_both_smi) {
6001  STATIC_ASSERT(kSmiTag == 0);
6002  // TODO(plind): Find some better to fix this assert issue.
6003#if defined(__APPLE__)
6004  DCHECK_EQ(1, kSmiTagMask);
6005#else
6006  DCHECK_EQ((int64_t)1, kSmiTagMask);
6007#endif
6008  or_(at, reg1, reg2);
6009  JumpIfNotSmi(at, on_not_both_smi);
6010}
6011
6012
6013void MacroAssembler::JumpIfEitherSmi(Register reg1,
6014                                     Register reg2,
6015                                     Label* on_either_smi) {
6016  STATIC_ASSERT(kSmiTag == 0);
6017  // TODO(plind): Find some better to fix this assert issue.
6018#if defined(__APPLE__)
6019  DCHECK_EQ(1, kSmiTagMask);
6020#else
6021  DCHECK_EQ((int64_t)1, kSmiTagMask);
6022#endif
6023  // Both Smi tags must be 1 (not Smi).
6024  and_(at, reg1, reg2);
6025  JumpIfSmi(at, on_either_smi);
6026}
6027
6028void MacroAssembler::AssertNotNumber(Register object) {
6029  if (emit_debug_code()) {
6030    STATIC_ASSERT(kSmiTag == 0);
6031    andi(at, object, kSmiTagMask);
6032    Check(ne, kOperandIsANumber, at, Operand(zero_reg));
6033    GetObjectType(object, t8, t8);
6034    Check(ne, kOperandIsNotANumber, t8, Operand(HEAP_NUMBER_TYPE));
6035  }
6036}
6037
6038void MacroAssembler::AssertNotSmi(Register object) {
6039  if (emit_debug_code()) {
6040    STATIC_ASSERT(kSmiTag == 0);
6041    andi(at, object, kSmiTagMask);
6042    Check(ne, kOperandIsASmi, at, Operand(zero_reg));
6043  }
6044}
6045
6046
6047void MacroAssembler::AssertSmi(Register object) {
6048  if (emit_debug_code()) {
6049    STATIC_ASSERT(kSmiTag == 0);
6050    andi(at, object, kSmiTagMask);
6051    Check(eq, kOperandIsASmi, at, Operand(zero_reg));
6052  }
6053}
6054
6055
6056void MacroAssembler::AssertString(Register object) {
6057  if (emit_debug_code()) {
6058    STATIC_ASSERT(kSmiTag == 0);
6059    SmiTst(object, t8);
6060    Check(ne, kOperandIsASmiAndNotAString, t8, Operand(zero_reg));
6061    GetObjectType(object, t8, t8);
6062    Check(lo, kOperandIsNotAString, t8, Operand(FIRST_NONSTRING_TYPE));
6063  }
6064}
6065
6066
6067void MacroAssembler::AssertName(Register object) {
6068  if (emit_debug_code()) {
6069    STATIC_ASSERT(kSmiTag == 0);
6070    SmiTst(object, t8);
6071    Check(ne, kOperandIsASmiAndNotAName, t8, Operand(zero_reg));
6072    GetObjectType(object, t8, t8);
6073    Check(le, kOperandIsNotAName, t8, Operand(LAST_NAME_TYPE));
6074  }
6075}
6076
6077
6078void MacroAssembler::AssertFunction(Register object) {
6079  if (emit_debug_code()) {
6080    STATIC_ASSERT(kSmiTag == 0);
6081    SmiTst(object, t8);
6082    Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
6083    GetObjectType(object, t8, t8);
6084    Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
6085  }
6086}
6087
6088
6089void MacroAssembler::AssertBoundFunction(Register object) {
6090  if (emit_debug_code()) {
6091    STATIC_ASSERT(kSmiTag == 0);
6092    SmiTst(object, t8);
6093    Check(ne, kOperandIsASmiAndNotABoundFunction, t8, Operand(zero_reg));
6094    GetObjectType(object, t8, t8);
6095    Check(eq, kOperandIsNotABoundFunction, t8, Operand(JS_BOUND_FUNCTION_TYPE));
6096  }
6097}
6098
6099void MacroAssembler::AssertGeneratorObject(Register object) {
6100  if (emit_debug_code()) {
6101    STATIC_ASSERT(kSmiTag == 0);
6102    SmiTst(object, t8);
6103    Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
6104    GetObjectType(object, t8, t8);
6105    Check(eq, kOperandIsNotAGeneratorObject, t8,
6106          Operand(JS_GENERATOR_OBJECT_TYPE));
6107  }
6108}
6109
6110void MacroAssembler::AssertReceiver(Register object) {
6111  if (emit_debug_code()) {
6112    STATIC_ASSERT(kSmiTag == 0);
6113    SmiTst(object, t8);
6114    Check(ne, kOperandIsASmiAndNotAReceiver, t8, Operand(zero_reg));
6115    GetObjectType(object, t8, t8);
6116    Check(ge, kOperandIsNotAReceiver, t8, Operand(FIRST_JS_RECEIVER_TYPE));
6117  }
6118}
6119
6120
6121void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
6122                                                     Register scratch) {
6123  if (emit_debug_code()) {
6124    Label done_checking;
6125    AssertNotSmi(object);
6126    LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
6127    Branch(&done_checking, eq, object, Operand(scratch));
6128    ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
6129    LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
6130    Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
6131    bind(&done_checking);
6132  }
6133}
6134
6135
6136void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
6137  if (emit_debug_code()) {
6138    DCHECK(!reg.is(at));
6139    LoadRoot(at, index);
6140    Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
6141  }
6142}
6143
6144
6145void MacroAssembler::JumpIfNotHeapNumber(Register object,
6146                                         Register heap_number_map,
6147                                         Register scratch,
6148                                         Label* on_not_heap_number) {
6149  ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
6150  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
6151  Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
6152}
6153
6154
6155void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
6156    Register first, Register second, Register scratch1, Register scratch2,
6157    Label* failure) {
6158  // Test that both first and second are sequential one-byte strings.
6159  // Assume that they are non-smis.
6160  ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
6161  ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
6162  lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
6163  lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
6164
6165  JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
6166                                                 scratch2, failure);
6167}
6168
6169
6170void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
6171                                                           Register second,
6172                                                           Register scratch1,
6173                                                           Register scratch2,
6174                                                           Label* failure) {
6175  // Check that neither is a smi.
6176  STATIC_ASSERT(kSmiTag == 0);
6177  And(scratch1, first, Operand(second));
6178  JumpIfSmi(scratch1, failure);
6179  JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
6180                                               scratch2, failure);
6181}
6182
6183void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
6184                                FPURegister src2, Label* out_of_line) {
6185  if (src1.is(src2)) {
6186    Move_s(dst, src1);
6187    return;
6188  }
6189
6190  // Check if one of operands is NaN.
6191  BranchF32(nullptr, out_of_line, eq, src1, src2);
6192
6193  if (kArchVariant >= kMips64r6) {
6194    max_s(dst, src1, src2);
6195  } else {
6196    Label return_left, return_right, done;
6197
6198    BranchF32(&return_right, nullptr, lt, src1, src2);
6199    BranchF32(&return_left, nullptr, lt, src2, src1);
6200
6201    // Operands are equal, but check for +/-0.
6202    mfc1(t8, src1);
6203    dsll32(t8, t8, 0);
6204    Branch(&return_left, eq, t8, Operand(zero_reg));
6205    Branch(&return_right);
6206
6207    bind(&return_right);
6208    if (!src2.is(dst)) {
6209      Move_s(dst, src2);
6210    }
6211    Branch(&done);
6212
6213    bind(&return_left);
6214    if (!src1.is(dst)) {
6215      Move_s(dst, src1);
6216    }
6217
6218    bind(&done);
6219  }
6220}
6221
6222void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
6223                                         FPURegister src2) {
6224  add_s(dst, src1, src2);
6225}
6226
6227void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
6228                                FPURegister src2, Label* out_of_line) {
6229  if (src1.is(src2)) {
6230    Move_s(dst, src1);
6231    return;
6232  }
6233
6234  // Check if one of operands is NaN.
6235  BranchF32(nullptr, out_of_line, eq, src1, src2);
6236
6237  if (kArchVariant >= kMips64r6) {
6238    min_s(dst, src1, src2);
6239  } else {
6240    Label return_left, return_right, done;
6241
6242    BranchF32(&return_left, nullptr, lt, src1, src2);
6243    BranchF32(&return_right, nullptr, lt, src2, src1);
6244
6245    // Left equals right => check for -0.
6246    mfc1(t8, src1);
6247    dsll32(t8, t8, 0);
6248    Branch(&return_right, eq, t8, Operand(zero_reg));
6249    Branch(&return_left);
6250
6251    bind(&return_right);
6252    if (!src2.is(dst)) {
6253      Move_s(dst, src2);
6254    }
6255    Branch(&done);
6256
6257    bind(&return_left);
6258    if (!src1.is(dst)) {
6259      Move_s(dst, src1);
6260    }
6261
6262    bind(&done);
6263  }
6264}
6265
6266void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
6267                                         FPURegister src2) {
6268  add_s(dst, src1, src2);
6269}
6270
6271void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1,
6272                                FPURegister src2, Label* out_of_line) {
6273  if (src1.is(src2)) {
6274    Move_d(dst, src1);
6275    return;
6276  }
6277
6278  // Check if one of operands is NaN.
6279  BranchF64(nullptr, out_of_line, eq, src1, src2);
6280
6281  if (kArchVariant >= kMips64r6) {
6282    max_d(dst, src1, src2);
6283  } else {
6284    Label return_left, return_right, done;
6285
6286    BranchF64(&return_right, nullptr, lt, src1, src2);
6287    BranchF64(&return_left, nullptr, lt, src2, src1);
6288
6289    // Left equals right => check for -0.
6290    dmfc1(t8, src1);
6291    Branch(&return_left, eq, t8, Operand(zero_reg));
6292    Branch(&return_right);
6293
6294    bind(&return_right);
6295    if (!src2.is(dst)) {
6296      Move_d(dst, src2);
6297    }
6298    Branch(&done);
6299
6300    bind(&return_left);
6301    if (!src1.is(dst)) {
6302      Move_d(dst, src1);
6303    }
6304
6305    bind(&done);
6306  }
6307}
6308
6309void MacroAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
6310                                         FPURegister src2) {
6311  add_d(dst, src1, src2);
6312}
6313
6314void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1,
6315                                FPURegister src2, Label* out_of_line) {
6316  if (src1.is(src2)) {
6317    Move_d(dst, src1);
6318    return;
6319  }
6320
6321  // Check if one of operands is NaN.
6322  BranchF64(nullptr, out_of_line, eq, src1, src2);
6323
6324  if (kArchVariant >= kMips64r6) {
6325    min_d(dst, src1, src2);
6326  } else {
6327    Label return_left, return_right, done;
6328
6329    BranchF64(&return_left, nullptr, lt, src1, src2);
6330    BranchF64(&return_right, nullptr, lt, src2, src1);
6331
6332    // Left equals right => check for -0.
6333    dmfc1(t8, src1);
6334    Branch(&return_right, eq, t8, Operand(zero_reg));
6335    Branch(&return_left);
6336
6337    bind(&return_right);
6338    if (!src2.is(dst)) {
6339      Move_d(dst, src2);
6340    }
6341    Branch(&done);
6342
6343    bind(&return_left);
6344    if (!src1.is(dst)) {
6345      Move_d(dst, src1);
6346    }
6347
6348    bind(&done);
6349  }
6350}
6351
6352void MacroAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
6353                                         FPURegister src2) {
6354  add_d(dst, src1, src2);
6355}
6356
6357void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
6358    Register first, Register second, Register scratch1, Register scratch2,
6359    Label* failure) {
6360  const int kFlatOneByteStringMask =
6361      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
6362  const int kFlatOneByteStringTag =
6363      kStringTag | kOneByteStringTag | kSeqStringTag;
6364  DCHECK(kFlatOneByteStringTag <= 0xffff);  // Ensure this fits 16-bit immed.
6365  andi(scratch1, first, kFlatOneByteStringMask);
6366  Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
6367  andi(scratch2, second, kFlatOneByteStringMask);
6368  Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
6369}
6370
6371static const int kRegisterPassedArguments = 8;
6372
6373int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
6374                                              int num_double_arguments) {
6375  int stack_passed_words = 0;
6376  num_reg_arguments += 2 * num_double_arguments;
6377
6378  // O32: Up to four simple arguments are passed in registers a0..a3.
6379  // N64: Up to eight simple arguments are passed in registers a0..a7.
6380  if (num_reg_arguments > kRegisterPassedArguments) {
6381    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
6382  }
6383  stack_passed_words += kCArgSlotCount;
6384  return stack_passed_words;
6385}
6386
6387
6388void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
6389                                               Register index,
6390                                               Register value,
6391                                               Register scratch,
6392                                               uint32_t encoding_mask) {
6393  Label is_object;
6394  SmiTst(string, at);
6395  Check(ne, kNonObject, at, Operand(zero_reg));
6396
6397  ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
6398  lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
6399
6400  andi(at, at, kStringRepresentationMask | kStringEncodingMask);
6401  li(scratch, Operand(encoding_mask));
6402  Check(eq, kUnexpectedStringType, at, Operand(scratch));
6403
6404  // TODO(plind): requires Smi size check code for mips32.
6405
6406  ld(at, FieldMemOperand(string, String::kLengthOffset));
6407  Check(lt, kIndexIsTooLarge, index, Operand(at));
6408
6409  DCHECK(Smi::kZero == 0);
6410  Check(ge, kIndexIsNegative, index, Operand(zero_reg));
6411}
6412
6413
6414void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6415                                          int num_double_arguments,
6416                                          Register scratch) {
6417  int frame_alignment = ActivationFrameAlignment();
6418
6419  // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
6420  // O32: Up to four simple arguments are passed in registers a0..a3.
6421  // Those four arguments must have reserved argument slots on the stack for
6422  // mips, even though those argument slots are not normally used.
6423  // Both ABIs: Remaining arguments are pushed on the stack, above (higher
6424  // address than) the (O32) argument slots. (arg slot calculation handled by
6425  // CalculateStackPassedWords()).
6426  int stack_passed_arguments = CalculateStackPassedWords(
6427      num_reg_arguments, num_double_arguments);
6428  if (frame_alignment > kPointerSize) {
6429    // Make stack end at alignment and make room for num_arguments - 4 words
6430    // and the original value of sp.
6431    mov(scratch, sp);
6432    Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
6433    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6434    And(sp, sp, Operand(-frame_alignment));
6435    sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
6436  } else {
6437    Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6438  }
6439}
6440
6441
6442void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
6443                                          Register scratch) {
6444  PrepareCallCFunction(num_reg_arguments, 0, scratch);
6445}
6446
6447
6448void MacroAssembler::CallCFunction(ExternalReference function,
6449                                   int num_reg_arguments,
6450                                   int num_double_arguments) {
6451  li(t8, Operand(function));
6452  CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
6453}
6454
6455
6456void MacroAssembler::CallCFunction(Register function,
6457                                   int num_reg_arguments,
6458                                   int num_double_arguments) {
6459  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
6460}
6461
6462
6463void MacroAssembler::CallCFunction(ExternalReference function,
6464                                   int num_arguments) {
6465  CallCFunction(function, num_arguments, 0);
6466}
6467
6468
6469void MacroAssembler::CallCFunction(Register function,
6470                                   int num_arguments) {
6471  CallCFunction(function, num_arguments, 0);
6472}
6473
6474
6475void MacroAssembler::CallCFunctionHelper(Register function,
6476                                         int num_reg_arguments,
6477                                         int num_double_arguments) {
6478  DCHECK(has_frame());
6479  // Make sure that the stack is aligned before calling a C function unless
6480  // running in the simulator. The simulator has its own alignment check which
6481  // provides more information.
6482  // The argument stots are presumed to have been set up by
6483  // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
6484
6485#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
6486  if (emit_debug_code()) {
6487    int frame_alignment = base::OS::ActivationFrameAlignment();
6488    int frame_alignment_mask = frame_alignment - 1;
6489    if (frame_alignment > kPointerSize) {
6490      DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
6491      Label alignment_as_expected;
6492      And(at, sp, Operand(frame_alignment_mask));
6493      Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
6494      // Don't use Check here, as it will call Runtime_Abort possibly
6495      // re-entering here.
6496      stop("Unexpected alignment in CallCFunction");
6497      bind(&alignment_as_expected);
6498    }
6499  }
6500#endif  // V8_HOST_ARCH_MIPS
6501
6502  // Just call directly. The function called cannot cause a GC, or
6503  // allow preemption, so the return address in the link register
6504  // stays correct.
6505
6506  if (!function.is(t9)) {
6507    mov(t9, function);
6508    function = t9;
6509  }
6510
6511  Call(function);
6512
6513  int stack_passed_arguments = CalculateStackPassedWords(
6514      num_reg_arguments, num_double_arguments);
6515
6516  if (base::OS::ActivationFrameAlignment() > kPointerSize) {
6517    ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
6518  } else {
6519    Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
6520  }
6521}
6522
6523
6524#undef BRANCH_ARGS_CHECK
6525
6526
6527void MacroAssembler::CheckPageFlag(
6528    Register object,
6529    Register scratch,
6530    int mask,
6531    Condition cc,
6532    Label* condition_met) {
6533  And(scratch, object, Operand(~Page::kPageAlignmentMask));
6534  ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
6535  And(scratch, scratch, Operand(mask));
6536  Branch(condition_met, cc, scratch, Operand(zero_reg));
6537}
6538
6539
6540void MacroAssembler::JumpIfBlack(Register object,
6541                                 Register scratch0,
6542                                 Register scratch1,
6543                                 Label* on_black) {
6544  HasColor(object, scratch0, scratch1, on_black, 1, 1);  // kBlackBitPattern.
6545  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6546}
6547
6548
6549void MacroAssembler::HasColor(Register object,
6550                              Register bitmap_scratch,
6551                              Register mask_scratch,
6552                              Label* has_color,
6553                              int first_bit,
6554                              int second_bit) {
6555  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
6556  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
6557
6558  GetMarkBits(object, bitmap_scratch, mask_scratch);
6559
6560  Label other_color;
6561  // Note that we are using two 4-byte aligned loads.
6562  LoadWordPair(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6563  And(t8, t9, Operand(mask_scratch));
6564  Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
6565  // Shift left 1 by adding.
6566  Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
6567  And(t8, t9, Operand(mask_scratch));
6568  Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
6569
6570  bind(&other_color);
6571}
6572
6573
6574void MacroAssembler::GetMarkBits(Register addr_reg,
6575                                 Register bitmap_reg,
6576                                 Register mask_reg) {
6577  DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
6578  // addr_reg is divided into fields:
6579  // |63        page base        20|19    high      8|7   shift   3|2  0|
6580  // 'high' gives the index of the cell holding color bits for the object.
6581  // 'shift' gives the offset in the cell for this object's color.
6582  And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
6583  Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
6584  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
6585  Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
6586  Dlsa(bitmap_reg, bitmap_reg, t8, Bitmap::kBytesPerCellLog2);
6587  li(t8, Operand(1));
6588  dsllv(mask_reg, t8, mask_reg);
6589}
6590
6591
6592void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
6593                                 Register mask_scratch, Register load_scratch,
6594                                 Label* value_is_white) {
6595  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
6596  GetMarkBits(value, bitmap_scratch, mask_scratch);
6597
6598  // If the value is black or grey we don't need to do anything.
6599  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
6600  DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
6601  DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
6602  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
6603
6604  // Since both black and grey have a 1 in the first position and white does
6605  // not have a 1 there we only need to check one bit.
6606  // Note that we are using a 4-byte aligned 8-byte load.
6607  if (emit_debug_code()) {
6608    LoadWordPair(load_scratch,
6609                 MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6610  } else {
6611    lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
6612  }
6613  And(t8, mask_scratch, load_scratch);
6614  Branch(value_is_white, eq, t8, Operand(zero_reg));
6615}
6616
6617
6618void MacroAssembler::LoadInstanceDescriptors(Register map,
6619                                             Register descriptors) {
6620  ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
6621}
6622
6623
6624void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
6625  lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
6626  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
6627}
6628
6629
6630void MacroAssembler::EnumLength(Register dst, Register map) {
6631  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
6632  lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
6633  And(dst, dst, Operand(Map::EnumLengthBits::kMask));
6634  SmiTag(dst);
6635}
6636
6637
6638void MacroAssembler::LoadAccessor(Register dst, Register holder,
6639                                  int accessor_index,
6640                                  AccessorComponent accessor) {
6641  ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
6642  LoadInstanceDescriptors(dst, dst);
6643  ld(dst,
6644     FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
6645  int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
6646                                           : AccessorPair::kSetterOffset;
6647  ld(dst, FieldMemOperand(dst, offset));
6648}
6649
6650
6651void MacroAssembler::CheckEnumCache(Label* call_runtime) {
6652  Register null_value = a5;
6653  Register  empty_fixed_array_value = a6;
6654  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
6655  Label next, start;
6656  mov(a2, a0);
6657
6658  // Check if the enum length field is properly initialized, indicating that
6659  // there is an enum cache.
6660  ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6661
6662  EnumLength(a3, a1);
6663  Branch(
6664      call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
6665
6666  LoadRoot(null_value, Heap::kNullValueRootIndex);
6667  jmp(&start);
6668
6669  bind(&next);
6670  ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
6671
6672  // For all objects but the receiver, check that the cache is empty.
6673  EnumLength(a3, a1);
6674  Branch(call_runtime, ne, a3, Operand(Smi::kZero));
6675
6676  bind(&start);
6677
6678  // Check that there are no elements. Register a2 contains the current JS
6679  // object we've reached through the prototype chain.
6680  Label no_elements;
6681  ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
6682  Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
6683
6684  // Second chance, the object may be using the empty slow element dictionary.
6685  LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
6686  Branch(call_runtime, ne, a2, Operand(at));
6687
6688  bind(&no_elements);
6689  ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
6690  Branch(&next, ne, a2, Operand(null_value));
6691}
6692
6693
6694void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
6695  DCHECK(!output_reg.is(input_reg));
6696  Label done;
6697  li(output_reg, Operand(255));
6698  // Normal branch: nop in delay slot.
6699  Branch(&done, gt, input_reg, Operand(output_reg));
6700  // Use delay slot in this branch.
6701  Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
6702  mov(output_reg, zero_reg);  // In delay slot.
6703  mov(output_reg, input_reg);  // Value is in range 0..255.
6704  bind(&done);
6705}
6706
6707
6708void MacroAssembler::ClampDoubleToUint8(Register result_reg,
6709                                        DoubleRegister input_reg,
6710                                        DoubleRegister temp_double_reg) {
6711  Label above_zero;
6712  Label done;
6713  Label in_bounds;
6714
6715  Move(temp_double_reg, 0.0);
6716  BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
6717
6718  // Double value is less than zero, NaN or Inf, return 0.
6719  mov(result_reg, zero_reg);
6720  Branch(&done);
6721
6722  // Double value is >= 255, return 255.
6723  bind(&above_zero);
6724  Move(temp_double_reg, 255.0);
6725  BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
6726  li(result_reg, Operand(255));
6727  Branch(&done);
6728
6729  // In 0-255 range, round and truncate.
6730  bind(&in_bounds);
6731  cvt_w_d(temp_double_reg, input_reg);
6732  mfc1(result_reg, temp_double_reg);
6733  bind(&done);
6734}
6735
6736void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
6737                                                     Register scratch_reg,
6738                                                     Label* no_memento_found) {
6739  Label map_check;
6740  Label top_check;
6741  ExternalReference new_space_allocation_top_adr =
6742      ExternalReference::new_space_allocation_top_address(isolate());
6743  const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
6744  const int kMementoLastWordOffset =
6745      kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
6746
6747  // Bail out if the object is not in new space.
6748  JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found);
6749  // If the object is in new space, we need to check whether it is on the same
6750  // page as the current top.
6751  Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
6752  li(at, Operand(new_space_allocation_top_adr));
6753  ld(at, MemOperand(at));
6754  Xor(scratch_reg, scratch_reg, Operand(at));
6755  And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
6756  Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
6757  // The object is on a different page than allocation top. Bail out if the
6758  // object sits on the page boundary as no memento can follow and we cannot
6759  // touch the memory following it.
6760  Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
6761  Xor(scratch_reg, scratch_reg, Operand(receiver_reg));
6762  And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
6763  Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg));
6764  // Continue with the actual map check.
6765  jmp(&map_check);
6766  // If top is on the same page as the current object, we need to check whether
6767  // we are below top.
6768  bind(&top_check);
6769  Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
6770  li(at, Operand(new_space_allocation_top_adr));
6771  ld(at, MemOperand(at));
6772  Branch(no_memento_found, ge, scratch_reg, Operand(at));
6773  // Memento map check.
6774  bind(&map_check);
6775  ld(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
6776  Branch(no_memento_found, ne, scratch_reg,
6777         Operand(isolate()->factory()->allocation_memento_map()));
6778}
6779
6780
6781Register GetRegisterThatIsNotOneOf(Register reg1,
6782                                   Register reg2,
6783                                   Register reg3,
6784                                   Register reg4,
6785                                   Register reg5,
6786                                   Register reg6) {
6787  RegList regs = 0;
6788  if (reg1.is_valid()) regs |= reg1.bit();
6789  if (reg2.is_valid()) regs |= reg2.bit();
6790  if (reg3.is_valid()) regs |= reg3.bit();
6791  if (reg4.is_valid()) regs |= reg4.bit();
6792  if (reg5.is_valid()) regs |= reg5.bit();
6793  if (reg6.is_valid()) regs |= reg6.bit();
6794
6795  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
6796  for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
6797    int code = config->GetAllocatableGeneralCode(i);
6798    Register candidate = Register::from_code(code);
6799    if (regs & candidate.bit()) continue;
6800    return candidate;
6801  }
6802  UNREACHABLE();
6803  return no_reg;
6804}
6805
6806bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
6807                Register reg5, Register reg6, Register reg7, Register reg8,
6808                Register reg9, Register reg10) {
6809  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
6810                        reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
6811                        reg7.is_valid() + reg8.is_valid() + reg9.is_valid() +
6812                        reg10.is_valid();
6813
6814  RegList regs = 0;
6815  if (reg1.is_valid()) regs |= reg1.bit();
6816  if (reg2.is_valid()) regs |= reg2.bit();
6817  if (reg3.is_valid()) regs |= reg3.bit();
6818  if (reg4.is_valid()) regs |= reg4.bit();
6819  if (reg5.is_valid()) regs |= reg5.bit();
6820  if (reg6.is_valid()) regs |= reg6.bit();
6821  if (reg7.is_valid()) regs |= reg7.bit();
6822  if (reg8.is_valid()) regs |= reg8.bit();
6823  if (reg9.is_valid()) regs |= reg9.bit();
6824  if (reg10.is_valid()) regs |= reg10.bit();
6825  int n_of_non_aliasing_regs = NumRegs(regs);
6826
6827  return n_of_valid_regs != n_of_non_aliasing_regs;
6828}
6829
6830
6831CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
6832                         FlushICache flush_cache)
6833    : address_(address),
6834      size_(instructions * Assembler::kInstrSize),
6835      masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
6836      flush_cache_(flush_cache) {
6837  // Create a new macro assembler pointing to the address of the code to patch.
6838  // The size is adjusted with kGap on order for the assembler to generate size
6839  // bytes of instructions without failing with buffer size constraints.
6840  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6841}
6842
6843
6844CodePatcher::~CodePatcher() {
6845  // Indicate that code has changed.
6846  if (flush_cache_ == FLUSH) {
6847    Assembler::FlushICache(masm_.isolate(), address_, size_);
6848  }
6849  // Check that the code was patched as expected.
6850  DCHECK(masm_.pc_ == address_ + size_);
6851  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
6852}
6853
6854
6855void CodePatcher::Emit(Instr instr) {
6856  masm()->emit(instr);
6857}
6858
6859
6860void CodePatcher::Emit(Address addr) {
6861  // masm()->emit(reinterpret_cast<Instr>(addr));
6862}
6863
6864
6865void CodePatcher::ChangeBranchCondition(Instr current_instr,
6866                                        uint32_t new_opcode) {
6867  current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
6868  masm_.emit(current_instr);
6869}
6870
6871
6872void MacroAssembler::TruncatingDiv(Register result,
6873                                   Register dividend,
6874                                   int32_t divisor) {
6875  DCHECK(!dividend.is(result));
6876  DCHECK(!dividend.is(at));
6877  DCHECK(!result.is(at));
6878  base::MagicNumbersForDivision<uint32_t> mag =
6879  base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
6880  li(at, Operand(static_cast<int32_t>(mag.multiplier)));
6881  Mulh(result, dividend, Operand(at));
6882  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
6883  if (divisor > 0 && neg) {
6884    Addu(result, result, Operand(dividend));
6885  }
6886  if (divisor < 0 && !neg && mag.multiplier > 0) {
6887    Subu(result, result, Operand(dividend));
6888  }
6889  if (mag.shift > 0) sra(result, result, mag.shift);
6890  srl(at, dividend, 31);
6891  Addu(result, result, Operand(at));
6892}
6893
6894
6895}  // namespace internal
6896}  // namespace v8
6897
6898#endif  // V8_TARGET_ARCH_MIPS64
6899