code-stubs-mips64.cc revision f91f0611dbaf29ca0f1d4aecb357ce243a19d2fa
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_MIPS64
6
7#include "src/code-stubs.h"
8#include "src/api-arguments.h"
9#include "src/bootstrapper.h"
10#include "src/codegen.h"
11#include "src/ic/handler-compiler.h"
12#include "src/ic/ic.h"
13#include "src/ic/stub-cache.h"
14#include "src/isolate.h"
15#include "src/mips64/code-stubs-mips64.h"
16#include "src/regexp/jsregexp.h"
17#include "src/regexp/regexp-macro-assembler.h"
18#include "src/runtime/runtime.h"
19
20namespace v8 {
21namespace internal {
22
23#define __ ACCESS_MASM(masm)
24
25void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
26  __ dsll(t9, a0, kPointerSizeLog2);
27  __ Daddu(t9, sp, t9);
28  __ sd(a1, MemOperand(t9, 0));
29  __ Push(a1);
30  __ Push(a2);
31  __ Daddu(a0, a0, 3);
32  __ TailCallRuntime(Runtime::kNewArray);
33}
34
35void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
36  Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
37  descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
38}
39
40void FastFunctionBindStub::InitializeDescriptor(
41    CodeStubDescriptor* descriptor) {
42  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
43  descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
44}
45
46static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
47                                          Condition cc);
48static void EmitSmiNonsmiComparison(MacroAssembler* masm,
49                                    Register lhs,
50                                    Register rhs,
51                                    Label* rhs_not_nan,
52                                    Label* slow,
53                                    bool strict);
54static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
55                                           Register lhs,
56                                           Register rhs);
57
58
59void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
60                                               ExternalReference miss) {
61  // Update the static counter each time a new code stub is generated.
62  isolate()->counters()->code_stubs()->Increment();
63
64  CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
65  int param_count = descriptor.GetRegisterParameterCount();
66  {
67    // Call the runtime system in a fresh internal frame.
68    FrameScope scope(masm, StackFrame::INTERNAL);
69    DCHECK((param_count == 0) ||
70           a0.is(descriptor.GetRegisterParameter(param_count - 1)));
71    // Push arguments, adjust sp.
72    __ Dsubu(sp, sp, Operand(param_count * kPointerSize));
73    for (int i = 0; i < param_count; ++i) {
74      // Store argument to stack.
75      __ sd(descriptor.GetRegisterParameter(i),
76            MemOperand(sp, (param_count - 1 - i) * kPointerSize));
77    }
78    __ CallExternalReference(miss, param_count);
79  }
80
81  __ Ret();
82}
83
84
85void DoubleToIStub::Generate(MacroAssembler* masm) {
86  Label out_of_range, only_low, negate, done;
87  Register input_reg = source();
88  Register result_reg = destination();
89
90  int double_offset = offset();
91  // Account for saved regs if input is sp.
92  if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
93
94  Register scratch =
95      GetRegisterThatIsNotOneOf(input_reg, result_reg);
96  Register scratch2 =
97      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
98  Register scratch3 =
99      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
100  DoubleRegister double_scratch = kLithiumScratchDouble;
101
102  __ Push(scratch, scratch2, scratch3);
103  if (!skip_fastpath()) {
104    // Load double input.
105    __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
106
107    // Clear cumulative exception flags and save the FCSR.
108    __ cfc1(scratch2, FCSR);
109    __ ctc1(zero_reg, FCSR);
110
111    // Try a conversion to a signed integer.
112    __ Trunc_w_d(double_scratch, double_scratch);
113    // Move the converted value into the result register.
114    __ mfc1(scratch3, double_scratch);
115
116    // Retrieve and restore the FCSR.
117    __ cfc1(scratch, FCSR);
118    __ ctc1(scratch2, FCSR);
119
120    // Check for overflow and NaNs.
121    __ And(
122        scratch, scratch,
123        kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
124           | kFCSRInvalidOpFlagMask);
125    // If we had no exceptions then set result_reg and we are done.
126    Label error;
127    __ Branch(&error, ne, scratch, Operand(zero_reg));
128    __ Move(result_reg, scratch3);
129    __ Branch(&done);
130    __ bind(&error);
131  }
132
133  // Load the double value and perform a manual truncation.
134  Register input_high = scratch2;
135  Register input_low = scratch3;
136
137  __ lw(input_low,
138        MemOperand(input_reg, double_offset + Register::kMantissaOffset));
139  __ lw(input_high,
140        MemOperand(input_reg, double_offset + Register::kExponentOffset));
141
142  Label normal_exponent, restore_sign;
143  // Extract the biased exponent in result.
144  __ Ext(result_reg,
145         input_high,
146         HeapNumber::kExponentShift,
147         HeapNumber::kExponentBits);
148
149  // Check for Infinity and NaNs, which should return 0.
150  __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
151  __ Movz(result_reg, zero_reg, scratch);
152  __ Branch(&done, eq, scratch, Operand(zero_reg));
153
154  // Express exponent as delta to (number of mantissa bits + 31).
155  __ Subu(result_reg,
156          result_reg,
157          Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
158
159  // If the delta is strictly positive, all bits would be shifted away,
160  // which means that we can return 0.
161  __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
162  __ mov(result_reg, zero_reg);
163  __ Branch(&done);
164
165  __ bind(&normal_exponent);
166  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
167  // Calculate shift.
168  __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
169
170  // Save the sign.
171  Register sign = result_reg;
172  result_reg = no_reg;
173  __ And(sign, input_high, Operand(HeapNumber::kSignMask));
174
175  // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
176  // to check for this specific case.
177  Label high_shift_needed, high_shift_done;
178  __ Branch(&high_shift_needed, lt, scratch, Operand(32));
179  __ mov(input_high, zero_reg);
180  __ Branch(&high_shift_done);
181  __ bind(&high_shift_needed);
182
183  // Set the implicit 1 before the mantissa part in input_high.
184  __ Or(input_high,
185        input_high,
186        Operand(1 << HeapNumber::kMantissaBitsInTopWord));
187  // Shift the mantissa bits to the correct position.
188  // We don't need to clear non-mantissa bits as they will be shifted away.
189  // If they weren't, it would mean that the answer is in the 32bit range.
190  __ sllv(input_high, input_high, scratch);
191
192  __ bind(&high_shift_done);
193
194  // Replace the shifted bits with bits from the lower mantissa word.
195  Label pos_shift, shift_done;
196  __ li(at, 32);
197  __ subu(scratch, at, scratch);
198  __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
199
200  // Negate scratch.
201  __ Subu(scratch, zero_reg, scratch);
202  __ sllv(input_low, input_low, scratch);
203  __ Branch(&shift_done);
204
205  __ bind(&pos_shift);
206  __ srlv(input_low, input_low, scratch);
207
208  __ bind(&shift_done);
209  __ Or(input_high, input_high, Operand(input_low));
210  // Restore sign if necessary.
211  __ mov(scratch, sign);
212  result_reg = sign;
213  sign = no_reg;
214  __ Subu(result_reg, zero_reg, input_high);
215  __ Movz(result_reg, input_high, scratch);
216
217  __ bind(&done);
218
219  __ Pop(scratch, scratch2, scratch3);
220  __ Ret();
221}
222
223
224// Handle the case where the lhs and rhs are the same object.
225// Equality is almost reflexive (everything but NaN), so this is a test
226// for "identity and not NaN".
227static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
228                                          Condition cc) {
229  Label not_identical;
230  Label heap_number, return_equal;
231  Register exp_mask_reg = t1;
232
233  __ Branch(&not_identical, ne, a0, Operand(a1));
234
235  __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
236
237  // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
238  // so we do the second best thing - test it ourselves.
239  // They are both equal and they are not both Smis so both of them are not
240  // Smis. If it's not a heap number, then return equal.
241  __ GetObjectType(a0, t0, t0);
242  if (cc == less || cc == greater) {
243    // Call runtime on identical JSObjects.
244    __ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
245    // Call runtime on identical symbols since we need to throw a TypeError.
246    __ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
247    // Call runtime on identical SIMD values since we must throw a TypeError.
248    __ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
249  } else {
250    __ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
251    // Comparing JS objects with <=, >= is complicated.
252    if (cc != eq) {
253      __ Branch(slow, greater, t0, Operand(FIRST_JS_RECEIVER_TYPE));
254      // Call runtime on identical symbols since we need to throw a TypeError.
255      __ Branch(slow, eq, t0, Operand(SYMBOL_TYPE));
256      // Call runtime on identical SIMD values since we must throw a TypeError.
257      __ Branch(slow, eq, t0, Operand(SIMD128_VALUE_TYPE));
258      // Normally here we fall through to return_equal, but undefined is
259      // special: (undefined == undefined) == true, but
260      // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
261      if (cc == less_equal || cc == greater_equal) {
262        __ Branch(&return_equal, ne, t0, Operand(ODDBALL_TYPE));
263        __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
264        __ Branch(&return_equal, ne, a0, Operand(a6));
265        DCHECK(is_int16(GREATER) && is_int16(LESS));
266        __ Ret(USE_DELAY_SLOT);
267        if (cc == le) {
268          // undefined <= undefined should fail.
269          __ li(v0, Operand(GREATER));
270        } else  {
271          // undefined >= undefined should fail.
272          __ li(v0, Operand(LESS));
273        }
274      }
275    }
276  }
277
278  __ bind(&return_equal);
279  DCHECK(is_int16(GREATER) && is_int16(LESS));
280  __ Ret(USE_DELAY_SLOT);
281  if (cc == less) {
282    __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
283  } else if (cc == greater) {
284    __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
285  } else {
286    __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
287  }
288  // For less and greater we don't have to check for NaN since the result of
289  // x < x is false regardless.  For the others here is some code to check
290  // for NaN.
291  if (cc != lt && cc != gt) {
292    __ bind(&heap_number);
293    // It is a heap number, so return non-equal if it's NaN and equal if it's
294    // not NaN.
295
296    // The representation of NaN values has all exponent bits (52..62) set,
297    // and not all mantissa bits (0..51) clear.
298    // Read top bits of double representation (second word of value).
299    __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
300    // Test that exponent bits are all set.
301    __ And(a7, a6, Operand(exp_mask_reg));
302    // If all bits not set (ne cond), then not a NaN, objects are equal.
303    __ Branch(&return_equal, ne, a7, Operand(exp_mask_reg));
304
305    // Shift out flag and all exponent bits, retaining only mantissa.
306    __ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord);
307    // Or with all low-bits of mantissa.
308    __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
309    __ Or(v0, a7, Operand(a6));
310    // For equal we already have the right value in v0:  Return zero (equal)
311    // if all bits in mantissa are zero (it's an Infinity) and non-zero if
312    // not (it's a NaN).  For <= and >= we need to load v0 with the failing
313    // value if it's a NaN.
314    if (cc != eq) {
315      // All-zero means Infinity means equal.
316      __ Ret(eq, v0, Operand(zero_reg));
317      DCHECK(is_int16(GREATER) && is_int16(LESS));
318      __ Ret(USE_DELAY_SLOT);
319      if (cc == le) {
320        __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
321      } else {
322        __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
323      }
324    }
325  }
326  // No fall through here.
327
328  __ bind(&not_identical);
329}
330
331
332static void EmitSmiNonsmiComparison(MacroAssembler* masm,
333                                    Register lhs,
334                                    Register rhs,
335                                    Label* both_loaded_as_doubles,
336                                    Label* slow,
337                                    bool strict) {
338  DCHECK((lhs.is(a0) && rhs.is(a1)) ||
339         (lhs.is(a1) && rhs.is(a0)));
340
341  Label lhs_is_smi;
342  __ JumpIfSmi(lhs, &lhs_is_smi);
343  // Rhs is a Smi.
344  // Check whether the non-smi is a heap number.
345  __ GetObjectType(lhs, t0, t0);
346  if (strict) {
347    // If lhs was not a number and rhs was a Smi then strict equality cannot
348    // succeed. Return non-equal (lhs is already not zero).
349    __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
350    __ mov(v0, lhs);
351  } else {
352    // Smi compared non-strictly with a non-Smi non-heap-number. Call
353    // the runtime.
354    __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
355  }
356  // Rhs is a smi, lhs is a number.
357  // Convert smi rhs to double.
358  __ SmiUntag(at, rhs);
359  __ mtc1(at, f14);
360  __ cvt_d_w(f14, f14);
361  __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
362
363  // We now have both loaded as doubles.
364  __ jmp(both_loaded_as_doubles);
365
366  __ bind(&lhs_is_smi);
367  // Lhs is a Smi.  Check whether the non-smi is a heap number.
368  __ GetObjectType(rhs, t0, t0);
369  if (strict) {
370    // If lhs was not a number and rhs was a Smi then strict equality cannot
371    // succeed. Return non-equal.
372    __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
373    __ li(v0, Operand(1));
374  } else {
375    // Smi compared non-strictly with a non-Smi non-heap-number. Call
376    // the runtime.
377    __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
378  }
379
380  // Lhs is a smi, rhs is a number.
381  // Convert smi lhs to double.
382  __ SmiUntag(at, lhs);
383  __ mtc1(at, f12);
384  __ cvt_d_w(f12, f12);
385  __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
386  // Fall through to both_loaded_as_doubles.
387}
388
389
390static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
391                                           Register lhs,
392                                           Register rhs) {
393    // If either operand is a JS object or an oddball value, then they are
394    // not equal since their pointers are different.
395    // There is no test for undetectability in strict equality.
396    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
397    Label first_non_object;
398    // Get the type of the first operand into a2 and compare it with
399    // FIRST_JS_RECEIVER_TYPE.
400    __ GetObjectType(lhs, a2, a2);
401    __ Branch(&first_non_object, less, a2, Operand(FIRST_JS_RECEIVER_TYPE));
402
403    // Return non-zero.
404    Label return_not_equal;
405    __ bind(&return_not_equal);
406    __ Ret(USE_DELAY_SLOT);
407    __ li(v0, Operand(1));
408
409    __ bind(&first_non_object);
410    // Check for oddballs: true, false, null, undefined.
411    __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
412
413    __ GetObjectType(rhs, a3, a3);
414    __ Branch(&return_not_equal, greater, a3, Operand(FIRST_JS_RECEIVER_TYPE));
415
416    // Check for oddballs: true, false, null, undefined.
417    __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
418
419    // Now that we have the types we might as well check for
420    // internalized-internalized.
421    STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
422    __ Or(a2, a2, Operand(a3));
423    __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
424    __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
425}
426
427
428static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
429                                       Register lhs,
430                                       Register rhs,
431                                       Label* both_loaded_as_doubles,
432                                       Label* not_heap_numbers,
433                                       Label* slow) {
434  __ GetObjectType(lhs, a3, a2);
435  __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
436  __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
437  // If first was a heap number & second wasn't, go to slow case.
438  __ Branch(slow, ne, a3, Operand(a2));
439
440  // Both are heap numbers. Load them up then jump to the code we have
441  // for that.
442  __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
443  __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
444
445  __ jmp(both_loaded_as_doubles);
446}
447
448
449// Fast negative check for internalized-to-internalized equality.
450static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
451                                                     Register lhs, Register rhs,
452                                                     Label* possible_strings,
453                                                     Label* runtime_call) {
454  DCHECK((lhs.is(a0) && rhs.is(a1)) ||
455         (lhs.is(a1) && rhs.is(a0)));
456
457  // a2 is object type of rhs.
458  Label object_test, return_equal, return_unequal, undetectable;
459  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
460  __ And(at, a2, Operand(kIsNotStringMask));
461  __ Branch(&object_test, ne, at, Operand(zero_reg));
462  __ And(at, a2, Operand(kIsNotInternalizedMask));
463  __ Branch(possible_strings, ne, at, Operand(zero_reg));
464  __ GetObjectType(rhs, a3, a3);
465  __ Branch(runtime_call, ge, a3, Operand(FIRST_NONSTRING_TYPE));
466  __ And(at, a3, Operand(kIsNotInternalizedMask));
467  __ Branch(possible_strings, ne, at, Operand(zero_reg));
468
469  // Both are internalized. We already checked they weren't the same pointer so
470  // they are not equal. Return non-equal by returning the non-zero object
471  // pointer in v0.
472  __ Ret(USE_DELAY_SLOT);
473  __ mov(v0, a0);  // In delay slot.
474
475  __ bind(&object_test);
476  __ ld(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
477  __ ld(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
478  __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
479  __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
480  __ And(at, t0, Operand(1 << Map::kIsUndetectable));
481  __ Branch(&undetectable, ne, at, Operand(zero_reg));
482  __ And(at, t1, Operand(1 << Map::kIsUndetectable));
483  __ Branch(&return_unequal, ne, at, Operand(zero_reg));
484
485  __ GetInstanceType(a2, a2);
486  __ Branch(runtime_call, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
487  __ GetInstanceType(a3, a3);
488  __ Branch(runtime_call, lt, a3, Operand(FIRST_JS_RECEIVER_TYPE));
489
490  __ bind(&return_unequal);
491  // Return non-equal by returning the non-zero object pointer in v0.
492  __ Ret(USE_DELAY_SLOT);
493  __ mov(v0, a0);  // In delay slot.
494
495  __ bind(&undetectable);
496  __ And(at, t1, Operand(1 << Map::kIsUndetectable));
497  __ Branch(&return_unequal, eq, at, Operand(zero_reg));
498
499  // If both sides are JSReceivers, then the result is false according to
500  // the HTML specification, which says that only comparisons with null or
501  // undefined are affected by special casing for document.all.
502  __ GetInstanceType(a2, a2);
503  __ Branch(&return_equal, eq, a2, Operand(ODDBALL_TYPE));
504  __ GetInstanceType(a3, a3);
505  __ Branch(&return_unequal, ne, a3, Operand(ODDBALL_TYPE));
506
507  __ bind(&return_equal);
508  __ Ret(USE_DELAY_SLOT);
509  __ li(v0, Operand(EQUAL));  // In delay slot.
510}
511
512
513static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
514                                         Register scratch,
515                                         CompareICState::State expected,
516                                         Label* fail) {
517  Label ok;
518  if (expected == CompareICState::SMI) {
519    __ JumpIfNotSmi(input, fail);
520  } else if (expected == CompareICState::NUMBER) {
521    __ JumpIfSmi(input, &ok);
522    __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
523                DONT_DO_SMI_CHECK);
524  }
525  // We could be strict about internalized/string here, but as long as
526  // hydrogen doesn't care, the stub doesn't have to care either.
527  __ bind(&ok);
528}
529
530
531// On entry a1 and a2 are the values to be compared.
532// On exit a0 is 0, positive or negative to indicate the result of
533// the comparison.
534void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
535  Register lhs = a1;
536  Register rhs = a0;
537  Condition cc = GetCondition();
538
539  Label miss;
540  CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
541  CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
542
543  Label slow;  // Call builtin.
544  Label not_smis, both_loaded_as_doubles;
545
546  Label not_two_smis, smi_done;
547  __ Or(a2, a1, a0);
548  __ JumpIfNotSmi(a2, &not_two_smis);
549  __ SmiUntag(a1);
550  __ SmiUntag(a0);
551
552  __ Ret(USE_DELAY_SLOT);
553  __ dsubu(v0, a1, a0);
554  __ bind(&not_two_smis);
555
556  // NOTICE! This code is only reached after a smi-fast-case check, so
557  // it is certain that at least one operand isn't a smi.
558
559  // Handle the case where the objects are identical.  Either returns the answer
560  // or goes to slow.  Only falls through if the objects were not identical.
561  EmitIdenticalObjectComparison(masm, &slow, cc);
562
563  // If either is a Smi (we know that not both are), then they can only
564  // be strictly equal if the other is a HeapNumber.
565  STATIC_ASSERT(kSmiTag == 0);
566  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
567  __ And(a6, lhs, Operand(rhs));
568  __ JumpIfNotSmi(a6, &not_smis, a4);
569  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
570  // 1) Return the answer.
571  // 2) Go to slow.
572  // 3) Fall through to both_loaded_as_doubles.
573  // 4) Jump to rhs_not_nan.
574  // In cases 3 and 4 we have found out we were dealing with a number-number
575  // comparison and the numbers have been loaded into f12 and f14 as doubles,
576  // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
577  EmitSmiNonsmiComparison(masm, lhs, rhs,
578                          &both_loaded_as_doubles, &slow, strict());
579
580  __ bind(&both_loaded_as_doubles);
581  // f12, f14 are the double representations of the left hand side
582  // and the right hand side if we have FPU. Otherwise a2, a3 represent
583  // left hand side and a0, a1 represent right hand side.
584
585  Label nan;
586  __ li(a4, Operand(LESS));
587  __ li(a5, Operand(GREATER));
588  __ li(a6, Operand(EQUAL));
589
590  // Check if either rhs or lhs is NaN.
591  __ BranchF(NULL, &nan, eq, f12, f14);
592
593  // Check if LESS condition is satisfied. If true, move conditionally
594  // result to v0.
595  if (kArchVariant != kMips64r6) {
596    __ c(OLT, D, f12, f14);
597    __ Movt(v0, a4);
598    // Use previous check to store conditionally to v0 oposite condition
599    // (GREATER). If rhs is equal to lhs, this will be corrected in next
600    // check.
601    __ Movf(v0, a5);
602    // Check if EQUAL condition is satisfied. If true, move conditionally
603    // result to v0.
604    __ c(EQ, D, f12, f14);
605    __ Movt(v0, a6);
606  } else {
607    Label skip;
608    __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
609    __ mov(v0, a4);  // Return LESS as result.
610
611    __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
612    __ mov(v0, a6);  // Return EQUAL as result.
613
614    __ mov(v0, a5);  // Return GREATER as result.
615    __ bind(&skip);
616  }
617  __ Ret();
618
619  __ bind(&nan);
620  // NaN comparisons always fail.
621  // Load whatever we need in v0 to make the comparison fail.
622  DCHECK(is_int16(GREATER) && is_int16(LESS));
623  __ Ret(USE_DELAY_SLOT);
624  if (cc == lt || cc == le) {
625    __ li(v0, Operand(GREATER));
626  } else {
627    __ li(v0, Operand(LESS));
628  }
629
630
631  __ bind(&not_smis);
632  // At this point we know we are dealing with two different objects,
633  // and neither of them is a Smi. The objects are in lhs_ and rhs_.
634  if (strict()) {
635    // This returns non-equal for some object types, or falls through if it
636    // was not lucky.
637    EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
638  }
639
640  Label check_for_internalized_strings;
641  Label flat_string_check;
642  // Check for heap-number-heap-number comparison. Can jump to slow case,
643  // or load both doubles and jump to the code that handles
644  // that case. If the inputs are not doubles then jumps to
645  // check_for_internalized_strings.
646  // In this case a2 will contain the type of lhs_.
647  EmitCheckForTwoHeapNumbers(masm,
648                             lhs,
649                             rhs,
650                             &both_loaded_as_doubles,
651                             &check_for_internalized_strings,
652                             &flat_string_check);
653
654  __ bind(&check_for_internalized_strings);
655  if (cc == eq && !strict()) {
656    // Returns an answer for two internalized strings or two
657    // detectable objects.
658    // Otherwise jumps to string case or not both strings case.
659    // Assumes that a2 is the type of lhs_ on entry.
660    EmitCheckForInternalizedStringsOrObjects(
661        masm, lhs, rhs, &flat_string_check, &slow);
662  }
663
664  // Check for both being sequential one-byte strings,
665  // and inline if that is the case.
666  __ bind(&flat_string_check);
667
668  __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
669
670  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
671                      a3);
672  if (cc == eq) {
673    StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, a4);
674  } else {
675    StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, a4,
676                                                    a5);
677  }
678  // Never falls through to here.
679
680  __ bind(&slow);
681  if (cc == eq) {
682    {
683      FrameScope scope(masm, StackFrame::INTERNAL);
684      __ Push(lhs, rhs);
685      __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
686    }
687    // Turn true into 0 and false into some non-zero value.
688    STATIC_ASSERT(EQUAL == 0);
689    __ LoadRoot(a0, Heap::kTrueValueRootIndex);
690    __ Ret(USE_DELAY_SLOT);
691    __ subu(v0, v0, a0);  // In delay slot.
692  } else {
693    // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
694    // a1 (rhs) second.
695    __ Push(lhs, rhs);
696    int ncr;  // NaN compare result.
697    if (cc == lt || cc == le) {
698      ncr = GREATER;
699    } else {
700      DCHECK(cc == gt || cc == ge);  // Remaining cases.
701      ncr = LESS;
702    }
703    __ li(a0, Operand(Smi::FromInt(ncr)));
704    __ push(a0);
705
706    // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
707    // tagged as a small integer.
708    __ TailCallRuntime(Runtime::kCompare);
709  }
710
711  __ bind(&miss);
712  GenerateMiss(masm);
713}
714
715
716void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
717  __ mov(t9, ra);
718  __ pop(ra);
719  __ PushSafepointRegisters();
720  __ Jump(t9);
721}
722
723
724void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
725  __ mov(t9, ra);
726  __ pop(ra);
727  __ PopSafepointRegisters();
728  __ Jump(t9);
729}
730
731
732void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
733  // We don't allow a GC during a store buffer overflow so there is no need to
734  // store the registers in any particular way, but we do have to store and
735  // restore them.
736  __ MultiPush(kJSCallerSaved | ra.bit());
737  if (save_doubles()) {
738    __ MultiPushFPU(kCallerSavedFPU);
739  }
740  const int argument_count = 1;
741  const int fp_argument_count = 0;
742  const Register scratch = a1;
743
744  AllowExternalCallThatCantCauseGC scope(masm);
745  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
746  __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
747  __ CallCFunction(
748      ExternalReference::store_buffer_overflow_function(isolate()),
749      argument_count);
750  if (save_doubles()) {
751    __ MultiPopFPU(kCallerSavedFPU);
752  }
753
754  __ MultiPop(kJSCallerSaved | ra.bit());
755  __ Ret();
756}
757
758
759void MathPowStub::Generate(MacroAssembler* masm) {
760  const Register exponent = MathPowTaggedDescriptor::exponent();
761  DCHECK(exponent.is(a2));
762  const DoubleRegister double_base = f2;
763  const DoubleRegister double_exponent = f4;
764  const DoubleRegister double_result = f0;
765  const DoubleRegister double_scratch = f6;
766  const FPURegister single_scratch = f8;
767  const Register scratch = t1;
768  const Register scratch2 = a7;
769
770  Label call_runtime, done, int_exponent;
771  if (exponent_type() == TAGGED) {
772    // Base is already in double_base.
773    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
774
775    __ ldc1(double_exponent,
776            FieldMemOperand(exponent, HeapNumber::kValueOffset));
777  }
778
779  if (exponent_type() != INTEGER) {
780    Label int_exponent_convert;
781    // Detect integer exponents stored as double.
782    __ EmitFPUTruncate(kRoundToMinusInf,
783                       scratch,
784                       double_exponent,
785                       at,
786                       double_scratch,
787                       scratch2,
788                       kCheckForInexactConversion);
789    // scratch2 == 0 means there was no conversion error.
790    __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
791
792    __ push(ra);
793    {
794      AllowExternalCallThatCantCauseGC scope(masm);
795      __ PrepareCallCFunction(0, 2, scratch2);
796      __ MovToFloatParameters(double_base, double_exponent);
797      __ CallCFunction(
798          ExternalReference::power_double_double_function(isolate()),
799          0, 2);
800    }
801    __ pop(ra);
802    __ MovFromFloatResult(double_result);
803    __ jmp(&done);
804
805    __ bind(&int_exponent_convert);
806  }
807
808  // Calculate power with integer exponent.
809  __ bind(&int_exponent);
810
811  // Get two copies of exponent in the registers scratch and exponent.
812  if (exponent_type() == INTEGER) {
813    __ mov(scratch, exponent);
814  } else {
815    // Exponent has previously been stored into scratch as untagged integer.
816    __ mov(exponent, scratch);
817  }
818
819  __ mov_d(double_scratch, double_base);  // Back up base.
820  __ Move(double_result, 1.0);
821
822  // Get absolute value of exponent.
823  Label positive_exponent, bail_out;
824  __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
825  __ Dsubu(scratch, zero_reg, scratch);
826  // Check when Dsubu overflows and we get negative result
827  // (happens only when input is MIN_INT).
828  __ Branch(&bail_out, gt, zero_reg, Operand(scratch));
829  __ bind(&positive_exponent);
830  __ Assert(ge, kUnexpectedNegativeValue, scratch, Operand(zero_reg));
831
832  Label while_true, no_carry, loop_end;
833  __ bind(&while_true);
834
835  __ And(scratch2, scratch, 1);
836
837  __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
838  __ mul_d(double_result, double_result, double_scratch);
839  __ bind(&no_carry);
840
841  __ dsra(scratch, scratch, 1);
842
843  __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
844  __ mul_d(double_scratch, double_scratch, double_scratch);
845
846  __ Branch(&while_true);
847
848  __ bind(&loop_end);
849
850  __ Branch(&done, ge, exponent, Operand(zero_reg));
851  __ Move(double_scratch, 1.0);
852  __ div_d(double_result, double_scratch, double_result);
853  // Test whether result is zero.  Bail out to check for subnormal result.
854  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
855  __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
856
857  // double_exponent may not contain the exponent value if the input was a
858  // smi.  We set it with exponent value before bailing out.
859  __ bind(&bail_out);
860  __ mtc1(exponent, single_scratch);
861  __ cvt_d_w(double_exponent, single_scratch);
862
863  // Returning or bailing out.
864  __ push(ra);
865  {
866    AllowExternalCallThatCantCauseGC scope(masm);
867    __ PrepareCallCFunction(0, 2, scratch);
868    __ MovToFloatParameters(double_base, double_exponent);
869    __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
870                     0, 2);
871  }
872  __ pop(ra);
873  __ MovFromFloatResult(double_result);
874
875  __ bind(&done);
876  __ Ret();
877}
878
879bool CEntryStub::NeedsImmovableCode() {
880  return true;
881}
882
883
884void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
885  CEntryStub::GenerateAheadOfTime(isolate);
886  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
887  StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
888  CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
889  CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
890  CreateWeakCellStub::GenerateAheadOfTime(isolate);
891  BinaryOpICStub::GenerateAheadOfTime(isolate);
892  StoreRegistersStateStub::GenerateAheadOfTime(isolate);
893  RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
894  BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
895  StoreFastElementStub::GenerateAheadOfTime(isolate);
896}
897
898
899void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
900  StoreRegistersStateStub stub(isolate);
901  stub.GetCode();
902}
903
904
905void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
906  RestoreRegistersStateStub stub(isolate);
907  stub.GetCode();
908}
909
910
911void CodeStub::GenerateFPStubs(Isolate* isolate) {
912  // Generate if not already in cache.
913  SaveFPRegsMode mode = kSaveFPRegs;
914  CEntryStub(isolate, 1, mode).GetCode();
915  StoreBufferOverflowStub(isolate, mode).GetCode();
916  isolate->set_fp_stubs_generated(true);
917}
918
919
920void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
921  CEntryStub stub(isolate, 1, kDontSaveFPRegs);
922  stub.GetCode();
923}
924
925
926void CEntryStub::Generate(MacroAssembler* masm) {
927  // Called from JavaScript; parameters are on stack as if calling JS function
928  // a0: number of arguments including receiver
929  // a1: pointer to builtin function
930  // fp: frame pointer    (restored after C call)
931  // sp: stack pointer    (restored as callee's sp after C call)
932  // cp: current context  (C callee-saved)
933  //
934  // If argv_in_register():
935  // a2: pointer to the first argument
936
937  ProfileEntryHookStub::MaybeCallEntryHook(masm);
938
939  if (argv_in_register()) {
940    // Move argv into the correct register.
941    __ mov(s1, a2);
942  } else {
943    // Compute the argv pointer in a callee-saved register.
944    __ Dlsa(s1, sp, a0, kPointerSizeLog2);
945    __ Dsubu(s1, s1, kPointerSize);
946  }
947
948  // Enter the exit frame that transitions from JavaScript to C++.
949  FrameScope scope(masm, StackFrame::MANUAL);
950  __ EnterExitFrame(save_doubles(), 0, is_builtin_exit()
951                                           ? StackFrame::BUILTIN_EXIT
952                                           : StackFrame::EXIT);
953
954  // s0: number of arguments  including receiver (C callee-saved)
955  // s1: pointer to first argument (C callee-saved)
956  // s2: pointer to builtin function (C callee-saved)
957
958  // Prepare arguments for C routine.
959  // a0 = argc
960  __ mov(s0, a0);
961  __ mov(s2, a1);
962
963  // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
964  // also need to reserve the 4 argument slots on the stack.
965
966  __ AssertStackIsAligned();
967
968  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
969  int frame_alignment_mask = frame_alignment - 1;
970  int result_stack_size;
971  if (result_size() <= 2) {
972    // a0 = argc, a1 = argv, a2 = isolate
973    __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
974    __ mov(a1, s1);
975    result_stack_size = 0;
976  } else {
977    DCHECK_EQ(3, result_size());
978    // Allocate additional space for the result.
979    result_stack_size =
980        ((result_size() * kPointerSize) + frame_alignment_mask) &
981        ~frame_alignment_mask;
982    __ Dsubu(sp, sp, Operand(result_stack_size));
983
984    // a0 = hidden result argument, a1 = argc, a2 = argv, a3 = isolate.
985    __ li(a3, Operand(ExternalReference::isolate_address(isolate())));
986    __ mov(a2, s1);
987    __ mov(a1, a0);
988    __ mov(a0, sp);
989  }
990
991  // To let the GC traverse the return address of the exit frames, we need to
992  // know where the return address is. The CEntryStub is unmovable, so
993  // we can store the address on the stack to be able to find it again and
994  // we never have to restore it, because it will not change.
995  { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
996    int kNumInstructionsToJump = 4;
997    Label find_ra;
998    // Adjust the value in ra to point to the correct return location, 2nd
999    // instruction past the real call into C code (the jalr(t9)), and push it.
1000    // This is the return address of the exit frame.
1001    if (kArchVariant >= kMips64r6) {
1002      __ addiupc(ra, kNumInstructionsToJump + 1);
1003    } else {
1004      // This branch-and-link sequence is needed to find the current PC on mips
1005      // before r6, saved to the ra register.
1006      __ bal(&find_ra);  // bal exposes branch delay slot.
1007      __ Daddu(ra, ra, kNumInstructionsToJump * Instruction::kInstrSize);
1008    }
1009    __ bind(&find_ra);
1010
1011    // This spot was reserved in EnterExitFrame.
1012    __ sd(ra, MemOperand(sp, result_stack_size));
1013    // Stack space reservation moved to the branch delay slot below.
1014    // Stack is still aligned.
1015
1016    // Call the C routine.
1017    __ mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
1018    __ jalr(t9);
1019    // Set up sp in the delay slot.
1020    __ daddiu(sp, sp, -kCArgsSlotsSize);
1021    // Make sure the stored 'ra' points to this position.
1022    DCHECK_EQ(kNumInstructionsToJump,
1023              masm->InstructionsGeneratedSince(&find_ra));
1024  }
1025  if (result_size() > 2) {
1026    DCHECK_EQ(3, result_size());
1027    // Read result values stored on stack.
1028    __ ld(a0, MemOperand(v0, 2 * kPointerSize));
1029    __ ld(v1, MemOperand(v0, 1 * kPointerSize));
1030    __ ld(v0, MemOperand(v0, 0 * kPointerSize));
1031  }
1032  // Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
1033
1034  // Check result for exception sentinel.
1035  Label exception_returned;
1036  __ LoadRoot(a4, Heap::kExceptionRootIndex);
1037  __ Branch(&exception_returned, eq, a4, Operand(v0));
1038
1039  // Check that there is no pending exception, otherwise we
1040  // should have returned the exception sentinel.
1041  if (FLAG_debug_code) {
1042    Label okay;
1043    ExternalReference pending_exception_address(
1044        Isolate::kPendingExceptionAddress, isolate());
1045    __ li(a2, Operand(pending_exception_address));
1046    __ ld(a2, MemOperand(a2));
1047    __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
1048    // Cannot use check here as it attempts to generate call into runtime.
1049    __ Branch(&okay, eq, a4, Operand(a2));
1050    __ stop("Unexpected pending exception");
1051    __ bind(&okay);
1052  }
1053
1054  // Exit C frame and return.
1055  // v0:v1: result
1056  // sp: stack pointer
1057  // fp: frame pointer
1058  Register argc;
1059  if (argv_in_register()) {
1060    // We don't want to pop arguments so set argc to no_reg.
1061    argc = no_reg;
1062  } else {
1063    // s0: still holds argc (callee-saved).
1064    argc = s0;
1065  }
1066  __ LeaveExitFrame(save_doubles(), argc, true, EMIT_RETURN);
1067
1068  // Handling of exception.
1069  __ bind(&exception_returned);
1070
1071  ExternalReference pending_handler_context_address(
1072      Isolate::kPendingHandlerContextAddress, isolate());
1073  ExternalReference pending_handler_code_address(
1074      Isolate::kPendingHandlerCodeAddress, isolate());
1075  ExternalReference pending_handler_offset_address(
1076      Isolate::kPendingHandlerOffsetAddress, isolate());
1077  ExternalReference pending_handler_fp_address(
1078      Isolate::kPendingHandlerFPAddress, isolate());
1079  ExternalReference pending_handler_sp_address(
1080      Isolate::kPendingHandlerSPAddress, isolate());
1081
1082  // Ask the runtime for help to determine the handler. This will set v0 to
1083  // contain the current pending exception, don't clobber it.
1084  ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
1085                                 isolate());
1086  {
1087    FrameScope scope(masm, StackFrame::MANUAL);
1088    __ PrepareCallCFunction(3, 0, a0);
1089    __ mov(a0, zero_reg);
1090    __ mov(a1, zero_reg);
1091    __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
1092    __ CallCFunction(find_handler, 3);
1093  }
1094
1095  // Retrieve the handler context, SP and FP.
1096  __ li(cp, Operand(pending_handler_context_address));
1097  __ ld(cp, MemOperand(cp));
1098  __ li(sp, Operand(pending_handler_sp_address));
1099  __ ld(sp, MemOperand(sp));
1100  __ li(fp, Operand(pending_handler_fp_address));
1101  __ ld(fp, MemOperand(fp));
1102
1103  // If the handler is a JS frame, restore the context to the frame. Note that
1104  // the context will be set to (cp == 0) for non-JS frames.
1105  Label zero;
1106  __ Branch(&zero, eq, cp, Operand(zero_reg));
1107  __ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1108  __ bind(&zero);
1109
1110  // Compute the handler entry address and jump to it.
1111  __ li(a1, Operand(pending_handler_code_address));
1112  __ ld(a1, MemOperand(a1));
1113  __ li(a2, Operand(pending_handler_offset_address));
1114  __ ld(a2, MemOperand(a2));
1115  __ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
1116  __ Daddu(t9, a1, a2);
1117  __ Jump(t9);
1118}
1119
1120
1121void JSEntryStub::Generate(MacroAssembler* masm) {
1122  Label invoke, handler_entry, exit;
1123  Isolate* isolate = masm->isolate();
1124
1125  // TODO(plind): unify the ABI description here.
1126  // Registers:
1127  // a0: entry address
1128  // a1: function
1129  // a2: receiver
1130  // a3: argc
1131  // a4 (a4): on mips64
1132
1133  // Stack:
1134  // 0 arg slots on mips64 (4 args slots on mips)
1135  // args -- in a4/a4 on mips64, on stack on mips
1136
1137  ProfileEntryHookStub::MaybeCallEntryHook(masm);
1138
1139  // Save callee saved registers on the stack.
1140  __ MultiPush(kCalleeSaved | ra.bit());
1141
1142  // Save callee-saved FPU registers.
1143  __ MultiPushFPU(kCalleeSavedFPU);
1144  // Set up the reserved register for 0.0.
1145  __ Move(kDoubleRegZero, 0.0);
1146
1147  // Load argv in s0 register.
1148  __ mov(s0, a4);  // 5th parameter in mips64 a4 (a4) register.
1149
1150  __ InitializeRootRegister();
1151
1152  // We build an EntryFrame.
1153  __ li(a7, Operand(-1));  // Push a bad frame pointer to fail if it is used.
1154  int marker = type();
1155  __ li(a6, Operand(Smi::FromInt(marker)));
1156  __ li(a5, Operand(Smi::FromInt(marker)));
1157  ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
1158  __ li(a4, Operand(c_entry_fp));
1159  __ ld(a4, MemOperand(a4));
1160  __ Push(a7, a6, a5, a4);
1161  // Set up frame pointer for the frame to be pushed.
1162  __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
1163
1164  // Registers:
1165  // a0: entry_address
1166  // a1: function
1167  // a2: receiver_pointer
1168  // a3: argc
1169  // s0: argv
1170  //
1171  // Stack:
1172  // caller fp          |
1173  // function slot      | entry frame
1174  // context slot       |
1175  // bad fp (0xff...f)  |
1176  // callee saved registers + ra
1177  // [ O32: 4 args slots]
1178  // args
1179
1180  // If this is the outermost JS call, set js_entry_sp value.
1181  Label non_outermost_js;
1182  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1183  __ li(a5, Operand(ExternalReference(js_entry_sp)));
1184  __ ld(a6, MemOperand(a5));
1185  __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
1186  __ sd(fp, MemOperand(a5));
1187  __ li(a4, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1188  Label cont;
1189  __ b(&cont);
1190  __ nop();   // Branch delay slot nop.
1191  __ bind(&non_outermost_js);
1192  __ li(a4, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1193  __ bind(&cont);
1194  __ push(a4);
1195
1196  // Jump to a faked try block that does the invoke, with a faked catch
1197  // block that sets the pending exception.
1198  __ jmp(&invoke);
1199  __ bind(&handler_entry);
1200  handler_offset_ = handler_entry.pos();
1201  // Caught exception: Store result (exception) in the pending exception
1202  // field in the JSEnv and return a failure sentinel.  Coming in here the
1203  // fp will be invalid because the PushStackHandler below sets it to 0 to
1204  // signal the existence of the JSEntry frame.
1205  __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1206                                      isolate)));
1207  __ sd(v0, MemOperand(a4));  // We come back from 'invoke'. result is in v0.
1208  __ LoadRoot(v0, Heap::kExceptionRootIndex);
1209  __ b(&exit);  // b exposes branch delay slot.
1210  __ nop();   // Branch delay slot nop.
1211
1212  // Invoke: Link this frame into the handler chain.
1213  __ bind(&invoke);
1214  __ PushStackHandler();
1215  // If an exception not caught by another handler occurs, this handler
1216  // returns control to the code after the bal(&invoke) above, which
1217  // restores all kCalleeSaved registers (including cp and fp) to their
1218  // saved values before returning a failure to C.
1219
1220  // Invoke the function by calling through JS entry trampoline builtin.
1221  // Notice that we cannot store a reference to the trampoline code directly in
1222  // this stub, because runtime stubs are not traversed when doing GC.
1223
1224  // Registers:
1225  // a0: entry_address
1226  // a1: function
1227  // a2: receiver_pointer
1228  // a3: argc
1229  // s0: argv
1230  //
1231  // Stack:
1232  // handler frame
1233  // entry frame
1234  // callee saved registers + ra
1235  // [ O32: 4 args slots]
1236  // args
1237
1238  if (type() == StackFrame::ENTRY_CONSTRUCT) {
1239    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1240                                      isolate);
1241    __ li(a4, Operand(construct_entry));
1242  } else {
1243    ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
1244    __ li(a4, Operand(entry));
1245  }
1246  __ ld(t9, MemOperand(a4));  // Deref address.
1247  // Call JSEntryTrampoline.
1248  __ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
1249  __ Call(t9);
1250
1251  // Unlink this frame from the handler chain.
1252  __ PopStackHandler();
1253
1254  __ bind(&exit);  // v0 holds result
1255  // Check if the current stack frame is marked as the outermost JS frame.
1256  Label non_outermost_js_2;
1257  __ pop(a5);
1258  __ Branch(&non_outermost_js_2,
1259            ne,
1260            a5,
1261            Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1262  __ li(a5, Operand(ExternalReference(js_entry_sp)));
1263  __ sd(zero_reg, MemOperand(a5));
1264  __ bind(&non_outermost_js_2);
1265
1266  // Restore the top frame descriptors from the stack.
1267  __ pop(a5);
1268  __ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress,
1269                                      isolate)));
1270  __ sd(a5, MemOperand(a4));
1271
1272  // Reset the stack to the callee saved registers.
1273  __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
1274
1275  // Restore callee-saved fpu registers.
1276  __ MultiPopFPU(kCalleeSavedFPU);
1277
1278  // Restore callee saved registers from the stack.
1279  __ MultiPop(kCalleeSaved | ra.bit());
1280  // Return.
1281  __ Jump(ra);
1282}
1283
1284
1285void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1286  // Return address is in ra.
1287  Label miss;
1288
1289  Register receiver = LoadDescriptor::ReceiverRegister();
1290  Register index = LoadDescriptor::NameRegister();
1291  Register scratch = a5;
1292  Register result = v0;
1293  DCHECK(!scratch.is(receiver) && !scratch.is(index));
1294  DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()));
1295
1296  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1297                                          &miss,  // When not a string.
1298                                          &miss,  // When not a number.
1299                                          &miss,  // When index out of range.
1300                                          RECEIVER_IS_STRING);
1301  char_at_generator.GenerateFast(masm);
1302  __ Ret();
1303
1304  StubRuntimeCallHelper call_helper;
1305  char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
1306
1307  __ bind(&miss);
1308  PropertyAccessCompiler::TailCallBuiltin(
1309      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1310}
1311
1312
1313void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1314  Label miss;
1315  Register receiver = LoadDescriptor::ReceiverRegister();
1316  // Ensure that the vector and slot registers won't be clobbered before
1317  // calling the miss handler.
1318  DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::VectorRegister(),
1319                     LoadWithVectorDescriptor::SlotRegister()));
1320
1321  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a4,
1322                                                          a5, &miss);
1323  __ bind(&miss);
1324  PropertyAccessCompiler::TailCallBuiltin(
1325      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1326}
1327
1328
1329void RegExpExecStub::Generate(MacroAssembler* masm) {
1330  // Just jump directly to runtime if native RegExp is not selected at compile
1331  // time or if regexp entry in generated code is turned off runtime switch or
1332  // at compilation.
1333#ifdef V8_INTERPRETED_REGEXP
1334  __ TailCallRuntime(Runtime::kRegExpExec);
1335#else  // V8_INTERPRETED_REGEXP
1336
1337  // Stack frame on entry.
1338  //  sp[0]: last_match_info (expected JSArray)
1339  //  sp[4]: previous index
1340  //  sp[8]: subject string
1341  //  sp[12]: JSRegExp object
1342
1343  const int kLastMatchInfoOffset = 0 * kPointerSize;
1344  const int kPreviousIndexOffset = 1 * kPointerSize;
1345  const int kSubjectOffset = 2 * kPointerSize;
1346  const int kJSRegExpOffset = 3 * kPointerSize;
1347
1348  Label runtime;
1349  // Allocation of registers for this function. These are in callee save
1350  // registers and will be preserved by the call to the native RegExp code, as
1351  // this code is called using the normal C calling convention. When calling
1352  // directly from generated code the native RegExp code will not do a GC and
1353  // therefore the content of these registers are safe to use after the call.
1354  // MIPS - using s0..s2, since we are not using CEntry Stub.
1355  Register subject = s0;
1356  Register regexp_data = s1;
1357  Register last_match_info_elements = s2;
1358
1359  // Ensure that a RegExp stack is allocated.
1360  ExternalReference address_of_regexp_stack_memory_address =
1361      ExternalReference::address_of_regexp_stack_memory_address(
1362          isolate());
1363  ExternalReference address_of_regexp_stack_memory_size =
1364      ExternalReference::address_of_regexp_stack_memory_size(isolate());
1365  __ li(a0, Operand(address_of_regexp_stack_memory_size));
1366  __ ld(a0, MemOperand(a0, 0));
1367  __ Branch(&runtime, eq, a0, Operand(zero_reg));
1368
1369  // Check that the first argument is a JSRegExp object.
1370  __ ld(a0, MemOperand(sp, kJSRegExpOffset));
1371  STATIC_ASSERT(kSmiTag == 0);
1372  __ JumpIfSmi(a0, &runtime);
1373  __ GetObjectType(a0, a1, a1);
1374  __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
1375
1376  // Check that the RegExp has been compiled (data contains a fixed array).
1377  __ ld(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
1378  if (FLAG_debug_code) {
1379    __ SmiTst(regexp_data, a4);
1380    __ Check(nz,
1381             kUnexpectedTypeForRegExpDataFixedArrayExpected,
1382             a4,
1383             Operand(zero_reg));
1384    __ GetObjectType(regexp_data, a0, a0);
1385    __ Check(eq,
1386             kUnexpectedTypeForRegExpDataFixedArrayExpected,
1387             a0,
1388             Operand(FIXED_ARRAY_TYPE));
1389  }
1390
1391  // regexp_data: RegExp data (FixedArray)
1392  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1393  __ ld(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1394  __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
1395
1396  // regexp_data: RegExp data (FixedArray)
1397  // Check that the number of captures fit in the static offsets vector buffer.
1398  __ ld(a2,
1399         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1400  // Check (number_of_captures + 1) * 2 <= offsets vector size
1401  // Or          number_of_captures * 2 <= offsets vector size - 2
1402  // Or          number_of_captures     <= offsets vector size / 2 - 1
1403  // Multiplying by 2 comes for free since a2 is smi-tagged.
1404  STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1405  int temp = Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1;
1406  __ Branch(&runtime, hi, a2, Operand(Smi::FromInt(temp)));
1407
1408  // Reset offset for possibly sliced string.
1409  __ mov(t0, zero_reg);
1410  __ ld(subject, MemOperand(sp, kSubjectOffset));
1411  __ JumpIfSmi(subject, &runtime);
1412  __ mov(a3, subject);  // Make a copy of the original subject string.
1413
1414  // subject: subject string
1415  // a3: subject string
1416  // regexp_data: RegExp data (FixedArray)
1417  // Handle subject string according to its encoding and representation:
1418  // (1) Sequential string?  If yes, go to (4).
1419  // (2) Sequential or cons?  If not, go to (5).
1420  // (3) Cons string.  If the string is flat, replace subject with first string
1421  //     and go to (1). Otherwise bail out to runtime.
1422  // (4) Sequential string.  Load regexp code according to encoding.
1423  // (E) Carry on.
1424  /// [...]
1425
1426  // Deferred code at the end of the stub:
1427  // (5) Long external string?  If not, go to (7).
1428  // (6) External string.  Make it, offset-wise, look like a sequential string.
1429  //     Go to (4).
1430  // (7) Short external string or not a string?  If yes, bail out to runtime.
1431  // (8) Sliced string.  Replace subject with parent.  Go to (1).
1432
1433  Label check_underlying;   // (1)
1434  Label seq_string;         // (4)
1435  Label not_seq_nor_cons;   // (5)
1436  Label external_string;    // (6)
1437  Label not_long_external;  // (7)
1438
1439  __ bind(&check_underlying);
1440  __ ld(a2, FieldMemOperand(subject, HeapObject::kMapOffset));
1441  __ lbu(a0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
1442
1443  // (1) Sequential string?  If yes, go to (4).
1444  __ And(a1,
1445         a0,
1446         Operand(kIsNotStringMask |
1447                 kStringRepresentationMask |
1448                 kShortExternalStringMask));
1449  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
1450  __ Branch(&seq_string, eq, a1, Operand(zero_reg));  // Go to (4).
1451
1452  // (2) Sequential or cons?  If not, go to (5).
1453  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1454  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1455  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1456  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1457  // Go to (5).
1458  __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
1459
1460  // (3) Cons string.  Check that it's flat.
1461  // Replace subject with first string and reload instance type.
1462  __ ld(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
1463  __ LoadRoot(a1, Heap::kempty_stringRootIndex);
1464  __ Branch(&runtime, ne, a0, Operand(a1));
1465  __ ld(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
1466  __ jmp(&check_underlying);
1467
1468  // (4) Sequential string.  Load regexp code according to encoding.
1469  __ bind(&seq_string);
1470  // subject: sequential subject string (or look-alike, external string)
1471  // a3: original subject string
1472  // Load previous index and check range before a3 is overwritten.  We have to
1473  // use a3 instead of subject here because subject might have been only made
1474  // to look like a sequential string when it actually is an external string.
1475  __ ld(a1, MemOperand(sp, kPreviousIndexOffset));
1476  __ JumpIfNotSmi(a1, &runtime);
1477  __ ld(a3, FieldMemOperand(a3, String::kLengthOffset));
1478  __ Branch(&runtime, ls, a3, Operand(a1));
1479  __ SmiUntag(a1);
1480
1481  STATIC_ASSERT(kStringEncodingMask == 4);
1482  STATIC_ASSERT(kOneByteStringTag == 4);
1483  STATIC_ASSERT(kTwoByteStringTag == 0);
1484  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for one_byte.
1485  __ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
1486  __ dsra(a3, a0, 2);  // a3 is 1 for one_byte, 0 for UC16 (used below).
1487  __ ld(a5, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
1488  __ Movz(t9, a5, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
1489
1490  // (E) Carry on.  String handling is done.
1491  // t9: irregexp code
1492  // Check that the irregexp code has been generated for the actual string
1493  // encoding. If it has, the field contains a code object otherwise it contains
1494  // a smi (code flushing support).
1495  __ JumpIfSmi(t9, &runtime);
1496
1497  // a1: previous index
1498  // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
1499  // t9: code
1500  // subject: Subject string
1501  // regexp_data: RegExp data (FixedArray)
1502  // All checks done. Now push arguments for native regexp code.
1503  __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
1504                      1, a0, a2);
1505
1506  // Isolates: note we add an additional parameter here (isolate pointer).
1507  const int kRegExpExecuteArguments = 9;
1508  const int kParameterRegisters = 8;
1509  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
1510
1511  // Stack pointer now points to cell where return address is to be written.
1512  // Arguments are before that on the stack or in registers, meaning we
1513  // treat the return address as argument 5. Thus every argument after that
1514  // needs to be shifted back by 1. Since DirectCEntryStub will handle
1515  // allocating space for the c argument slots, we don't need to calculate
1516  // that into the argument positions on the stack. This is how the stack will
1517  // look (sp meaning the value of sp at this moment):
1518  // Abi n64:
1519  //   [sp + 1] - Argument 9
1520  //   [sp + 0] - saved ra
1521  // Abi O32:
1522  //   [sp + 5] - Argument 9
1523  //   [sp + 4] - Argument 8
1524  //   [sp + 3] - Argument 7
1525  //   [sp + 2] - Argument 6
1526  //   [sp + 1] - Argument 5
1527  //   [sp + 0] - saved ra
1528
1529  // Argument 9: Pass current isolate address.
1530  __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
1531  __ sd(a0, MemOperand(sp, 1 * kPointerSize));
1532
1533  // Argument 8: Indicate that this is a direct call from JavaScript.
1534  __ li(a7, Operand(1));
1535
1536  // Argument 7: Start (high end) of backtracking stack memory area.
1537  __ li(a0, Operand(address_of_regexp_stack_memory_address));
1538  __ ld(a0, MemOperand(a0, 0));
1539  __ li(a2, Operand(address_of_regexp_stack_memory_size));
1540  __ ld(a2, MemOperand(a2, 0));
1541  __ daddu(a6, a0, a2);
1542
1543  // Argument 6: Set the number of capture registers to zero to force global
1544  // regexps to behave as non-global. This does not affect non-global regexps.
1545  __ mov(a5, zero_reg);
1546
1547  // Argument 5: static offsets vector buffer.
1548  __ li(
1549      a4,
1550      Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
1551
1552  // For arguments 4 and 3 get string length, calculate start of string data
1553  // and calculate the shift of the index (0 for one_byte and 1 for two byte).
1554  __ Daddu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
1555  __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
1556  // Load the length from the original subject string from the previous stack
1557  // frame. Therefore we have to use fp, which points exactly to two pointer
1558  // sizes below the previous sp. (Because creating a new stack frame pushes
1559  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
1560  __ ld(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
1561  // If slice offset is not 0, load the length from the original sliced string.
1562  // Argument 4, a3: End of string data
1563  // Argument 3, a2: Start of string data
1564  // Prepare start and end index of the input.
1565  __ dsllv(t1, t0, a3);
1566  __ daddu(t0, t2, t1);
1567  __ dsllv(t1, a1, a3);
1568  __ daddu(a2, t0, t1);
1569
1570  __ ld(t2, FieldMemOperand(subject, String::kLengthOffset));
1571
1572  __ SmiUntag(t2);
1573  __ dsllv(t1, t2, a3);
1574  __ daddu(a3, t0, t1);
1575  // Argument 2 (a1): Previous index.
1576  // Already there
1577
1578  // Argument 1 (a0): Subject string.
1579  __ mov(a0, subject);
1580
1581  // Locate the code entry and call it.
1582  __ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
1583  DirectCEntryStub stub(isolate());
1584  stub.GenerateCall(masm, t9);
1585
1586  __ LeaveExitFrame(false, no_reg, true);
1587
1588  // v0: result
1589  // subject: subject string (callee saved)
1590  // regexp_data: RegExp data (callee saved)
1591  // last_match_info_elements: Last match info elements (callee saved)
1592  // Check the result.
1593  Label success;
1594  __ Branch(&success, eq, v0, Operand(1));
1595  // We expect exactly one result since we force the called regexp to behave
1596  // as non-global.
1597  Label failure;
1598  __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
1599  // If not exception it can only be retry. Handle that in the runtime system.
1600  __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
1601  // Result must now be exception. If there is no pending exception already a
1602  // stack overflow (on the backtrack stack) was detected in RegExp code but
1603  // haven't created the exception yet. Handle that in the runtime system.
1604  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
1605  __ li(a1, Operand(isolate()->factory()->the_hole_value()));
1606  __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1607                                      isolate())));
1608  __ ld(v0, MemOperand(a2, 0));
1609  __ Branch(&runtime, eq, v0, Operand(a1));
1610
1611  // For exception, throw the exception again.
1612  __ TailCallRuntime(Runtime::kRegExpExecReThrow);
1613
1614  __ bind(&failure);
1615  // For failure and exception return null.
1616  __ li(v0, Operand(isolate()->factory()->null_value()));
1617  __ DropAndRet(4);
1618
1619  // Process the result from the native regexp code.
1620  __ bind(&success);
1621
1622  __ lw(a1, UntagSmiFieldMemOperand(
1623      regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1624  // Calculate number of capture registers (number_of_captures + 1) * 2.
1625  __ Daddu(a1, a1, Operand(1));
1626  __ dsll(a1, a1, 1);  // Multiply by 2.
1627
1628  __ ld(a0, MemOperand(sp, kLastMatchInfoOffset));
1629  __ JumpIfSmi(a0, &runtime);
1630  __ GetObjectType(a0, a2, a2);
1631  __ Branch(&runtime, ne, a2, Operand(JS_OBJECT_TYPE));
1632  // Check that the object has fast elements.
1633  __ ld(last_match_info_elements,
1634        FieldMemOperand(a0, JSArray::kElementsOffset));
1635  __ ld(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
1636  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
1637  __ Branch(&runtime, ne, a0, Operand(at));
1638  // Check that the last match info has space for the capture registers and the
1639  // additional information.
1640  __ ld(a0,
1641        FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
1642  __ Daddu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
1643
1644  __ SmiUntag(at, a0);
1645  __ Branch(&runtime, gt, a2, Operand(at));
1646
1647  // a1: number of capture registers
1648  // subject: subject string
1649  // Store the capture count.
1650  __ SmiTag(a2, a1);  // To smi.
1651  __ sd(a2, FieldMemOperand(last_match_info_elements,
1652                             RegExpImpl::kLastCaptureCountOffset));
1653  // Store last subject and last input.
1654  __ sd(subject,
1655         FieldMemOperand(last_match_info_elements,
1656                         RegExpImpl::kLastSubjectOffset));
1657  __ mov(a2, subject);
1658  __ RecordWriteField(last_match_info_elements,
1659                      RegExpImpl::kLastSubjectOffset,
1660                      subject,
1661                      a7,
1662                      kRAHasNotBeenSaved,
1663                      kDontSaveFPRegs);
1664  __ mov(subject, a2);
1665  __ sd(subject,
1666         FieldMemOperand(last_match_info_elements,
1667                         RegExpImpl::kLastInputOffset));
1668  __ RecordWriteField(last_match_info_elements,
1669                      RegExpImpl::kLastInputOffset,
1670                      subject,
1671                      a7,
1672                      kRAHasNotBeenSaved,
1673                      kDontSaveFPRegs);
1674
1675  // Get the static offsets vector filled by the native regexp code.
1676  ExternalReference address_of_static_offsets_vector =
1677      ExternalReference::address_of_static_offsets_vector(isolate());
1678  __ li(a2, Operand(address_of_static_offsets_vector));
1679
1680  // a1: number of capture registers
1681  // a2: offsets vector
1682  Label next_capture, done;
1683  // Capture register counter starts from number of capture registers and
1684  // counts down until wrapping after zero.
1685  __ Daddu(a0,
1686         last_match_info_elements,
1687         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
1688  __ bind(&next_capture);
1689  __ Dsubu(a1, a1, Operand(1));
1690  __ Branch(&done, lt, a1, Operand(zero_reg));
1691  // Read the value from the static offsets vector buffer.
1692  __ lw(a3, MemOperand(a2, 0));
1693  __ daddiu(a2, a2, kIntSize);
1694  // Store the smi value in the last match info.
1695  __ SmiTag(a3);
1696  __ sd(a3, MemOperand(a0, 0));
1697  __ Branch(&next_capture, USE_DELAY_SLOT);
1698  __ daddiu(a0, a0, kPointerSize);  // In branch delay slot.
1699
1700  __ bind(&done);
1701
1702  // Return last match info.
1703  __ ld(v0, MemOperand(sp, kLastMatchInfoOffset));
1704  __ DropAndRet(4);
1705
1706  // Do the runtime call to execute the regexp.
1707  __ bind(&runtime);
1708  __ TailCallRuntime(Runtime::kRegExpExec);
1709
1710  // Deferred code for string handling.
1711  // (5) Long external string?  If not, go to (7).
1712  __ bind(&not_seq_nor_cons);
1713  // Go to (7).
1714  __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
1715
1716  // (6) External string.  Make it, offset-wise, look like a sequential string.
1717  __ bind(&external_string);
1718  __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
1719  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
1720  if (FLAG_debug_code) {
1721    // Assert that we do not have a cons or slice (indirect strings) here.
1722    // Sequential strings have already been ruled out.
1723    __ And(at, a0, Operand(kIsIndirectStringMask));
1724    __ Assert(eq,
1725              kExternalStringExpectedButNotFound,
1726              at,
1727              Operand(zero_reg));
1728  }
1729  __ ld(subject,
1730        FieldMemOperand(subject, ExternalString::kResourceDataOffset));
1731  // Move the pointer so that offset-wise, it looks like a sequential string.
1732  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1733  __ Dsubu(subject,
1734          subject,
1735          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
1736  __ jmp(&seq_string);  // Go to (4).
1737
1738  // (7) Short external string or not a string?  If yes, bail out to runtime.
1739  __ bind(&not_long_external);
1740  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
1741  __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
1742  __ Branch(&runtime, ne, at, Operand(zero_reg));
1743
1744  // (8) Sliced string.  Replace subject with parent.  Go to (4).
1745  // Load offset into t0 and replace subject string with parent.
1746  __ ld(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
1747  __ SmiUntag(t0);
1748  __ ld(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
1749  __ jmp(&check_underlying);  // Go to (1).
1750#endif  // V8_INTERPRETED_REGEXP
1751}
1752
1753
1754static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
1755  // a0 : number of arguments to the construct function
1756  // a2 : feedback vector
1757  // a3 : slot in feedback vector (Smi)
1758  // a1 : the function to call
1759  FrameScope scope(masm, StackFrame::INTERNAL);
1760  const RegList kSavedRegs = 1 << 4 |  // a0
1761                             1 << 5 |  // a1
1762                             1 << 6 |  // a2
1763                             1 << 7 |  // a3
1764                             1 << cp.code();
1765
1766  // Number-of-arguments register must be smi-tagged to call out.
1767  __ SmiTag(a0);
1768  __ MultiPush(kSavedRegs);
1769
1770  __ CallStub(stub);
1771
1772  __ MultiPop(kSavedRegs);
1773  __ SmiUntag(a0);
1774}
1775
1776
1777static void GenerateRecordCallTarget(MacroAssembler* masm) {
1778  // Cache the called function in a feedback vector slot.  Cache states
1779  // are uninitialized, monomorphic (indicated by a JSFunction), and
1780  // megamorphic.
1781  // a0 : number of arguments to the construct function
1782  // a1 : the function to call
1783  // a2 : feedback vector
1784  // a3 : slot in feedback vector (Smi)
1785  Label initialize, done, miss, megamorphic, not_array_function;
1786  Label done_initialize_count, done_increment_count;
1787
1788  DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
1789            masm->isolate()->heap()->megamorphic_symbol());
1790  DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
1791            masm->isolate()->heap()->uninitialized_symbol());
1792
1793  // Load the cache state into a5.
1794  __ dsrl(a5, a3, 32 - kPointerSizeLog2);
1795  __ Daddu(a5, a2, Operand(a5));
1796  __ ld(a5, FieldMemOperand(a5, FixedArray::kHeaderSize));
1797
1798  // A monomorphic cache hit or an already megamorphic state: invoke the
1799  // function without changing the state.
1800  // We don't know if a5 is a WeakCell or a Symbol, but it's harmless to read at
1801  // this position in a symbol (see static asserts in type-feedback-vector.h).
1802  Label check_allocation_site;
1803  Register feedback_map = a6;
1804  Register weak_value = t0;
1805  __ ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
1806  __ Branch(&done_increment_count, eq, a1, Operand(weak_value));
1807  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
1808  __ Branch(&done, eq, a5, Operand(at));
1809  __ ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
1810  __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
1811  __ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
1812
1813  // If the weak cell is cleared, we have a new chance to become monomorphic.
1814  __ JumpIfSmi(weak_value, &initialize);
1815  __ jmp(&megamorphic);
1816
1817  __ bind(&check_allocation_site);
1818  // If we came here, we need to see if we are the array function.
1819  // If we didn't have a matching function, and we didn't find the megamorph
1820  // sentinel, then we have in the slot either some other function or an
1821  // AllocationSite.
1822  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
1823  __ Branch(&miss, ne, feedback_map, Operand(at));
1824
1825  // Make sure the function is the Array() function
1826  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
1827  __ Branch(&megamorphic, ne, a1, Operand(a5));
1828  __ jmp(&done_increment_count);
1829
1830  __ bind(&miss);
1831
1832  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
1833  // megamorphic.
1834  __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
1835  __ Branch(&initialize, eq, a5, Operand(at));
1836  // MegamorphicSentinel is an immortal immovable object (undefined) so no
1837  // write-barrier is needed.
1838  __ bind(&megamorphic);
1839  __ dsrl(a5, a3, 32 - kPointerSizeLog2);
1840  __ Daddu(a5, a2, Operand(a5));
1841  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
1842  __ sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize));
1843  __ jmp(&done);
1844
1845  // An uninitialized cache is patched with the function.
1846  __ bind(&initialize);
1847  // Make sure the function is the Array() function.
1848  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a5);
1849  __ Branch(&not_array_function, ne, a1, Operand(a5));
1850
1851  // The target function is the Array constructor,
1852  // Create an AllocationSite if we don't already have it, store it in the
1853  // slot.
1854  CreateAllocationSiteStub create_stub(masm->isolate());
1855  CallStubInRecordCallTarget(masm, &create_stub);
1856  __ Branch(&done_initialize_count);
1857
1858  __ bind(&not_array_function);
1859
1860  CreateWeakCellStub weak_cell_stub(masm->isolate());
1861  CallStubInRecordCallTarget(masm, &weak_cell_stub);
1862
1863  __ bind(&done_initialize_count);
1864  // Initialize the call counter.
1865
1866  __ SmiScale(a4, a3, kPointerSizeLog2);
1867  __ Daddu(a4, a2, Operand(a4));
1868  __ li(a5, Operand(Smi::FromInt(1)));
1869  __ Branch(USE_DELAY_SLOT, &done);
1870  __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + kPointerSize));
1871
1872  __ bind(&done_increment_count);
1873
1874  // Increment the call count for monomorphic function calls.
1875  __ SmiScale(a4, a3, kPointerSizeLog2);
1876  __ Daddu(a5, a2, Operand(a4));
1877  __ ld(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
1878  __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
1879  __ sd(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
1880
1881  __ bind(&done);
1882}
1883
1884
1885void CallConstructStub::Generate(MacroAssembler* masm) {
1886  // a0 : number of arguments
1887  // a1 : the function to call
1888  // a2 : feedback vector
1889  // a3 : slot in feedback vector (Smi, for RecordCallTarget)
1890
1891  Label non_function;
1892  // Check that the function is not a smi.
1893  __ JumpIfSmi(a1, &non_function);
1894  // Check that the function is a JSFunction.
1895  __ GetObjectType(a1, a5, a5);
1896  __ Branch(&non_function, ne, a5, Operand(JS_FUNCTION_TYPE));
1897
1898  GenerateRecordCallTarget(masm);
1899
1900  __ dsrl(at, a3, 32 - kPointerSizeLog2);
1901  __ Daddu(a5, a2, at);
1902  Label feedback_register_initialized;
1903  // Put the AllocationSite from the feedback vector into a2, or undefined.
1904  __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
1905  __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
1906  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
1907  __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
1908  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
1909  __ bind(&feedback_register_initialized);
1910
1911  __ AssertUndefinedOrAllocationSite(a2, a5);
1912
1913  // Pass function as new target.
1914  __ mov(a3, a1);
1915
1916  // Tail call to the function-specific construct stub (still in the caller
1917  // context at this point).
1918  __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1919  __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
1920  __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
1921  __ Jump(at);
1922
1923  __ bind(&non_function);
1924  __ mov(a3, a1);
1925  __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
1926}
1927
1928
1929// StringCharCodeAtGenerator.
1930void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
1931  DCHECK(!a4.is(index_));
1932  DCHECK(!a4.is(result_));
1933  DCHECK(!a4.is(object_));
1934
1935  // If the receiver is a smi trigger the non-string case.
1936  if (check_mode_ == RECEIVER_IS_UNKNOWN) {
1937    __ JumpIfSmi(object_, receiver_not_string_);
1938
1939    // Fetch the instance type of the receiver into result register.
1940    __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
1941    __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
1942    // If the receiver is not a string trigger the non-string case.
1943    __ And(a4, result_, Operand(kIsNotStringMask));
1944    __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg));
1945  }
1946
1947  // If the index is non-smi trigger the non-smi case.
1948  __ JumpIfNotSmi(index_, &index_not_smi_);
1949
1950  __ bind(&got_smi_index_);
1951
1952  // Check for index out of range.
1953  __ ld(a4, FieldMemOperand(object_, String::kLengthOffset));
1954  __ Branch(index_out_of_range_, ls, a4, Operand(index_));
1955
1956  __ SmiUntag(index_);
1957
1958  StringCharLoadGenerator::Generate(masm,
1959                                    object_,
1960                                    index_,
1961                                    result_,
1962                                    &call_runtime_);
1963
1964  __ SmiTag(result_);
1965  __ bind(&exit_);
1966}
1967
1968
1969void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
1970  // a1 - function
1971  // a3 - slot id
1972  // a2 - vector
1973  // a4 - allocation site (loaded from vector[slot])
1974  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, at);
1975  __ Branch(miss, ne, a1, Operand(at));
1976
1977  __ li(a0, Operand(arg_count()));
1978
1979  // Increment the call count for monomorphic function calls.
1980  __ dsrl(t0, a3, 32 - kPointerSizeLog2);
1981  __ Daddu(a3, a2, Operand(t0));
1982  __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
1983  __ Daddu(t0, t0, Operand(Smi::FromInt(1)));
1984  __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
1985
1986  __ mov(a2, a4);
1987  __ mov(a3, a1);
1988  ArrayConstructorStub stub(masm->isolate(), arg_count());
1989  __ TailCallStub(&stub);
1990}
1991
1992
1993void CallICStub::Generate(MacroAssembler* masm) {
1994  // a1 - function
1995  // a3 - slot id (Smi)
1996  // a2 - vector
1997  Label extra_checks_or_miss, call, call_function;
1998  int argc = arg_count();
1999  ParameterCount actual(argc);
2000
2001  // The checks. First, does r1 match the recorded monomorphic target?
2002  __ dsrl(a4, a3, 32 - kPointerSizeLog2);
2003  __ Daddu(a4, a2, Operand(a4));
2004  __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
2005
2006  // We don't know that we have a weak cell. We might have a private symbol
2007  // or an AllocationSite, but the memory is safe to examine.
2008  // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
2009  // FixedArray.
2010  // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
2011  // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
2012  // computed, meaning that it can't appear to be a pointer. If the low bit is
2013  // 0, then hash is computed, but the 0 bit prevents the field from appearing
2014  // to be a pointer.
2015  STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
2016  STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
2017                    WeakCell::kValueOffset &&
2018                WeakCell::kValueOffset == Symbol::kHashFieldSlot);
2019
2020  __ ld(a5, FieldMemOperand(a4, WeakCell::kValueOffset));
2021  __ Branch(&extra_checks_or_miss, ne, a1, Operand(a5));
2022
2023  // The compare above could have been a SMI/SMI comparison. Guard against this
2024  // convincing us that we have a monomorphic JSFunction.
2025  __ JumpIfSmi(a1, &extra_checks_or_miss);
2026
2027  // Increment the call count for monomorphic function calls.
2028  __ dsrl(t0, a3, 32 - kPointerSizeLog2);
2029  __ Daddu(a3, a2, Operand(t0));
2030  __ ld(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
2031  __ Daddu(t0, t0, Operand(Smi::FromInt(1)));
2032  __ sd(t0, FieldMemOperand(a3, FixedArray::kHeaderSize + kPointerSize));
2033
2034  __ bind(&call_function);
2035  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
2036                                                    tail_call_mode()),
2037          RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
2038          USE_DELAY_SLOT);
2039  __ li(a0, Operand(argc));  // In delay slot.
2040
2041  __ bind(&extra_checks_or_miss);
2042  Label uninitialized, miss, not_allocation_site;
2043
2044  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2045  __ Branch(&call, eq, a4, Operand(at));
2046
2047  // Verify that a4 contains an AllocationSite
2048  __ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset));
2049  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2050  __ Branch(&not_allocation_site, ne, a5, Operand(at));
2051
2052  HandleArrayCase(masm, &miss);
2053
2054  __ bind(&not_allocation_site);
2055
2056  // The following cases attempt to handle MISS cases without going to the
2057  // runtime.
2058  if (FLAG_trace_ic) {
2059    __ Branch(&miss);
2060  }
2061
2062  __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
2063  __ Branch(&uninitialized, eq, a4, Operand(at));
2064
2065  // We are going megamorphic. If the feedback is a JSFunction, it is fine
2066  // to handle it here. More complex cases are dealt with in the runtime.
2067  __ AssertNotSmi(a4);
2068  __ GetObjectType(a4, a5, a5);
2069  __ Branch(&miss, ne, a5, Operand(JS_FUNCTION_TYPE));
2070  __ dsrl(a4, a3, 32 - kPointerSizeLog2);
2071  __ Daddu(a4, a2, Operand(a4));
2072  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
2073  __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
2074
2075  __ bind(&call);
2076  __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
2077          RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg),
2078          USE_DELAY_SLOT);
2079  __ li(a0, Operand(argc));  // In delay slot.
2080
2081  __ bind(&uninitialized);
2082
2083  // We are going monomorphic, provided we actually have a JSFunction.
2084  __ JumpIfSmi(a1, &miss);
2085
2086  // Goto miss case if we do not have a function.
2087  __ GetObjectType(a1, a4, a4);
2088  __ Branch(&miss, ne, a4, Operand(JS_FUNCTION_TYPE));
2089
2090  // Make sure the function is not the Array() function, which requires special
2091  // behavior on MISS.
2092  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, a4);
2093  __ Branch(&miss, eq, a1, Operand(a4));
2094
2095  // Make sure the function belongs to the same native context.
2096  __ ld(t0, FieldMemOperand(a1, JSFunction::kContextOffset));
2097  __ ld(t0, ContextMemOperand(t0, Context::NATIVE_CONTEXT_INDEX));
2098  __ ld(t1, NativeContextMemOperand());
2099  __ Branch(&miss, ne, t0, Operand(t1));
2100
2101  // Initialize the call counter.
2102  __ dsrl(at, a3, 32 - kPointerSizeLog2);
2103  __ Daddu(at, a2, Operand(at));
2104  __ li(t0, Operand(Smi::FromInt(1)));
2105  __ sd(t0, FieldMemOperand(at, FixedArray::kHeaderSize + kPointerSize));
2106
2107  // Store the function. Use a stub since we need a frame for allocation.
2108  // a2 - vector
2109  // a3 - slot
2110  // a1 - function
2111  {
2112    FrameScope scope(masm, StackFrame::INTERNAL);
2113    CreateWeakCellStub create_stub(masm->isolate());
2114    __ Push(cp, a1);
2115    __ CallStub(&create_stub);
2116    __ Pop(cp, a1);
2117  }
2118
2119  __ Branch(&call_function);
2120
2121  // We are here because tracing is on or we encountered a MISS case we can't
2122  // handle here.
2123  __ bind(&miss);
2124  GenerateMiss(masm);
2125
2126  __ Branch(&call);
2127}
2128
2129
2130void CallICStub::GenerateMiss(MacroAssembler* masm) {
2131  FrameScope scope(masm, StackFrame::INTERNAL);
2132
2133  // Push the receiver and the function and feedback info.
2134  __ Push(a1, a2, a3);
2135
2136  // Call the entry.
2137  __ CallRuntime(Runtime::kCallIC_Miss);
2138
2139  // Move result to a1 and exit the internal frame.
2140  __ mov(a1, v0);
2141}
2142
2143
2144void StringCharCodeAtGenerator::GenerateSlow(
2145    MacroAssembler* masm, EmbedMode embed_mode,
2146    const RuntimeCallHelper& call_helper) {
2147  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2148
2149  // Index is not a smi.
2150  __ bind(&index_not_smi_);
2151  // If index is a heap number, try converting it to an integer.
2152  __ CheckMap(index_,
2153              result_,
2154              Heap::kHeapNumberMapRootIndex,
2155              index_not_number_,
2156              DONT_DO_SMI_CHECK);
2157  call_helper.BeforeCall(masm);
2158  // Consumed by runtime conversion function:
2159  if (embed_mode == PART_OF_IC_HANDLER) {
2160    __ Push(LoadWithVectorDescriptor::VectorRegister(),
2161            LoadWithVectorDescriptor::SlotRegister(), object_, index_);
2162  } else {
2163    __ Push(object_, index_);
2164  }
2165  __ CallRuntime(Runtime::kNumberToSmi);
2166
2167  // Save the conversion result before the pop instructions below
2168  // have a chance to overwrite it.
2169
2170  __ Move(index_, v0);
2171  if (embed_mode == PART_OF_IC_HANDLER) {
2172    __ Pop(LoadWithVectorDescriptor::VectorRegister(),
2173           LoadWithVectorDescriptor::SlotRegister(), object_);
2174  } else {
2175    __ pop(object_);
2176  }
2177  // Reload the instance type.
2178  __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2179  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2180  call_helper.AfterCall(masm);
2181  // If index is still not a smi, it must be out of range.
2182  __ JumpIfNotSmi(index_, index_out_of_range_);
2183  // Otherwise, return to the fast path.
2184  __ Branch(&got_smi_index_);
2185
2186  // Call runtime. We get here when the receiver is a string and the
2187  // index is a number, but the code of getting the actual character
2188  // is too complex (e.g., when the string needs to be flattened).
2189  __ bind(&call_runtime_);
2190  call_helper.BeforeCall(masm);
2191  __ SmiTag(index_);
2192  __ Push(object_, index_);
2193  __ CallRuntime(Runtime::kStringCharCodeAtRT);
2194
2195  __ Move(result_, v0);
2196
2197  call_helper.AfterCall(masm);
2198  __ jmp(&exit_);
2199
2200  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2201}
2202
2203
2204// -------------------------------------------------------------------------
2205// StringCharFromCodeGenerator
2206
2207void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2208  // Fast case of Heap::LookupSingleCharacterStringFromCode.
2209  __ JumpIfNotSmi(code_, &slow_case_);
2210  __ Branch(&slow_case_, hi, code_,
2211            Operand(Smi::FromInt(String::kMaxOneByteCharCode)));
2212
2213  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2214  // At this point code register contains smi tagged one_byte char code.
2215  __ SmiScale(at, code_, kPointerSizeLog2);
2216  __ Daddu(result_, result_, at);
2217  __ ld(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
2218  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2219  __ Branch(&slow_case_, eq, result_, Operand(at));
2220  __ bind(&exit_);
2221}
2222
2223
2224void StringCharFromCodeGenerator::GenerateSlow(
2225    MacroAssembler* masm,
2226    const RuntimeCallHelper& call_helper) {
2227  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2228
2229  __ bind(&slow_case_);
2230  call_helper.BeforeCall(masm);
2231  __ push(code_);
2232  __ CallRuntime(Runtime::kStringCharFromCode);
2233  __ Move(result_, v0);
2234
2235  call_helper.AfterCall(masm);
2236  __ Branch(&exit_);
2237
2238  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2239}
2240
2241
2242enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
2243
2244
2245void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
2246                                          Register dest,
2247                                          Register src,
2248                                          Register count,
2249                                          Register scratch,
2250                                          String::Encoding encoding) {
2251  if (FLAG_debug_code) {
2252    // Check that destination is word aligned.
2253    __ And(scratch, dest, Operand(kPointerAlignmentMask));
2254    __ Check(eq,
2255             kDestinationOfCopyNotAligned,
2256             scratch,
2257             Operand(zero_reg));
2258  }
2259
2260  // Assumes word reads and writes are little endian.
2261  // Nothing to do for zero characters.
2262  Label done;
2263
2264  if (encoding == String::TWO_BYTE_ENCODING) {
2265    __ Daddu(count, count, count);
2266  }
2267
2268  Register limit = count;  // Read until dest equals this.
2269  __ Daddu(limit, dest, Operand(count));
2270
2271  Label loop_entry, loop;
2272  // Copy bytes from src to dest until dest hits limit.
2273  __ Branch(&loop_entry);
2274  __ bind(&loop);
2275  __ lbu(scratch, MemOperand(src));
2276  __ daddiu(src, src, 1);
2277  __ sb(scratch, MemOperand(dest));
2278  __ daddiu(dest, dest, 1);
2279  __ bind(&loop_entry);
2280  __ Branch(&loop, lt, dest, Operand(limit));
2281
2282  __ bind(&done);
2283}
2284
2285
2286void SubStringStub::Generate(MacroAssembler* masm) {
2287  Label runtime;
2288  // Stack frame on entry.
2289  //  ra: return address
2290  //  sp[0]: to
2291  //  sp[4]: from
2292  //  sp[8]: string
2293
2294  // This stub is called from the native-call %_SubString(...), so
2295  // nothing can be assumed about the arguments. It is tested that:
2296  //  "string" is a sequential string,
2297  //  both "from" and "to" are smis, and
2298  //  0 <= from <= to <= string.length.
2299  // If any of these assumptions fail, we call the runtime system.
2300
2301  const int kToOffset = 0 * kPointerSize;
2302  const int kFromOffset = 1 * kPointerSize;
2303  const int kStringOffset = 2 * kPointerSize;
2304
2305  __ ld(a2, MemOperand(sp, kToOffset));
2306  __ ld(a3, MemOperand(sp, kFromOffset));
2307
2308  STATIC_ASSERT(kSmiTag == 0);
2309
2310  // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
2311  // safe in this case.
2312  __ JumpIfNotSmi(a2, &runtime);
2313  __ JumpIfNotSmi(a3, &runtime);
2314  // Both a2 and a3 are untagged integers.
2315
2316  __ SmiUntag(a2, a2);
2317  __ SmiUntag(a3, a3);
2318  __ Branch(&runtime, lt, a3, Operand(zero_reg));  // From < 0.
2319
2320  __ Branch(&runtime, gt, a3, Operand(a2));  // Fail if from > to.
2321  __ Dsubu(a2, a2, a3);
2322
2323  // Make sure first argument is a string.
2324  __ ld(v0, MemOperand(sp, kStringOffset));
2325  __ JumpIfSmi(v0, &runtime);
2326  __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
2327  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
2328  __ And(a4, a1, Operand(kIsNotStringMask));
2329
2330  __ Branch(&runtime, ne, a4, Operand(zero_reg));
2331
2332  Label single_char;
2333  __ Branch(&single_char, eq, a2, Operand(1));
2334
2335  // Short-cut for the case of trivial substring.
2336  Label return_v0;
2337  // v0: original string
2338  // a2: result string length
2339  __ ld(a4, FieldMemOperand(v0, String::kLengthOffset));
2340  __ SmiUntag(a4);
2341  // Return original string.
2342  __ Branch(&return_v0, eq, a2, Operand(a4));
2343  // Longer than original string's length or negative: unsafe arguments.
2344  __ Branch(&runtime, hi, a2, Operand(a4));
2345  // Shorter than original string's length: an actual substring.
2346
2347  // Deal with different string types: update the index if necessary
2348  // and put the underlying string into a5.
2349  // v0: original string
2350  // a1: instance type
2351  // a2: length
2352  // a3: from index (untagged)
2353  Label underlying_unpacked, sliced_string, seq_or_external_string;
2354  // If the string is not indirect, it can only be sequential or external.
2355  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
2356  STATIC_ASSERT(kIsIndirectStringMask != 0);
2357  __ And(a4, a1, Operand(kIsIndirectStringMask));
2358  __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, a4, Operand(zero_reg));
2359  // a4 is used as a scratch register and can be overwritten in either case.
2360  __ And(a4, a1, Operand(kSlicedNotConsMask));
2361  __ Branch(&sliced_string, ne, a4, Operand(zero_reg));
2362  // Cons string.  Check whether it is flat, then fetch first part.
2363  __ ld(a5, FieldMemOperand(v0, ConsString::kSecondOffset));
2364  __ LoadRoot(a4, Heap::kempty_stringRootIndex);
2365  __ Branch(&runtime, ne, a5, Operand(a4));
2366  __ ld(a5, FieldMemOperand(v0, ConsString::kFirstOffset));
2367  // Update instance type.
2368  __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
2369  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
2370  __ jmp(&underlying_unpacked);
2371
2372  __ bind(&sliced_string);
2373  // Sliced string.  Fetch parent and correct start index by offset.
2374  __ ld(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
2375  __ ld(a4, FieldMemOperand(v0, SlicedString::kOffsetOffset));
2376  __ SmiUntag(a4);  // Add offset to index.
2377  __ Daddu(a3, a3, a4);
2378  // Update instance type.
2379  __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
2380  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
2381  __ jmp(&underlying_unpacked);
2382
2383  __ bind(&seq_or_external_string);
2384  // Sequential or external string.  Just move string to the expected register.
2385  __ mov(a5, v0);
2386
2387  __ bind(&underlying_unpacked);
2388
2389  if (FLAG_string_slices) {
2390    Label copy_routine;
2391    // a5: underlying subject string
2392    // a1: instance type of underlying subject string
2393    // a2: length
2394    // a3: adjusted start index (untagged)
2395    // Short slice.  Copy instead of slicing.
2396    __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
2397    // Allocate new sliced string.  At this point we do not reload the instance
2398    // type including the string encoding because we simply rely on the info
2399    // provided by the original string.  It does not matter if the original
2400    // string's encoding is wrong because we always have to recheck encoding of
2401    // the newly created string's parent anyways due to externalized strings.
2402    Label two_byte_slice, set_slice_header;
2403    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
2404    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
2405    __ And(a4, a1, Operand(kStringEncodingMask));
2406    __ Branch(&two_byte_slice, eq, a4, Operand(zero_reg));
2407    __ AllocateOneByteSlicedString(v0, a2, a6, a7, &runtime);
2408    __ jmp(&set_slice_header);
2409    __ bind(&two_byte_slice);
2410    __ AllocateTwoByteSlicedString(v0, a2, a6, a7, &runtime);
2411    __ bind(&set_slice_header);
2412    __ SmiTag(a3);
2413    __ sd(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
2414    __ sd(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
2415    __ jmp(&return_v0);
2416
2417    __ bind(&copy_routine);
2418  }
2419
2420  // a5: underlying subject string
2421  // a1: instance type of underlying subject string
2422  // a2: length
2423  // a3: adjusted start index (untagged)
2424  Label two_byte_sequential, sequential_string, allocate_result;
2425  STATIC_ASSERT(kExternalStringTag != 0);
2426  STATIC_ASSERT(kSeqStringTag == 0);
2427  __ And(a4, a1, Operand(kExternalStringTag));
2428  __ Branch(&sequential_string, eq, a4, Operand(zero_reg));
2429
2430  // Handle external string.
2431  // Rule out short external strings.
2432  STATIC_ASSERT(kShortExternalStringTag != 0);
2433  __ And(a4, a1, Operand(kShortExternalStringTag));
2434  __ Branch(&runtime, ne, a4, Operand(zero_reg));
2435  __ ld(a5, FieldMemOperand(a5, ExternalString::kResourceDataOffset));
2436  // a5 already points to the first character of underlying string.
2437  __ jmp(&allocate_result);
2438
2439  __ bind(&sequential_string);
2440  // Locate first character of underlying subject string.
2441  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2442  __ Daddu(a5, a5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
2443
2444  __ bind(&allocate_result);
2445  // Sequential acii string.  Allocate the result.
2446  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
2447  __ And(a4, a1, Operand(kStringEncodingMask));
2448  __ Branch(&two_byte_sequential, eq, a4, Operand(zero_reg));
2449
2450  // Allocate and copy the resulting one_byte string.
2451  __ AllocateOneByteString(v0, a2, a4, a6, a7, &runtime);
2452
2453  // Locate first character of substring to copy.
2454  __ Daddu(a5, a5, a3);
2455
2456  // Locate first character of result.
2457  __ Daddu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
2458
2459  // v0: result string
2460  // a1: first character of result string
2461  // a2: result string length
2462  // a5: first character of substring to copy
2463  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2464  StringHelper::GenerateCopyCharacters(
2465      masm, a1, a5, a2, a3, String::ONE_BYTE_ENCODING);
2466  __ jmp(&return_v0);
2467
2468  // Allocate and copy the resulting two-byte string.
2469  __ bind(&two_byte_sequential);
2470  __ AllocateTwoByteString(v0, a2, a4, a6, a7, &runtime);
2471
2472  // Locate first character of substring to copy.
2473  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
2474  __ Dlsa(a5, a5, a3, 1);
2475  // Locate first character of result.
2476  __ Daddu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
2477
2478  // v0: result string.
2479  // a1: first character of result.
2480  // a2: result length.
2481  // a5: first character of substring to copy.
2482  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2483  StringHelper::GenerateCopyCharacters(
2484      masm, a1, a5, a2, a3, String::TWO_BYTE_ENCODING);
2485
2486  __ bind(&return_v0);
2487  Counters* counters = isolate()->counters();
2488  __ IncrementCounter(counters->sub_string_native(), 1, a3, a4);
2489  __ DropAndRet(3);
2490
2491  // Just jump to runtime to create the sub string.
2492  __ bind(&runtime);
2493  __ TailCallRuntime(Runtime::kSubString);
2494
2495  __ bind(&single_char);
2496  // v0: original string
2497  // a1: instance type
2498  // a2: length
2499  // a3: from index (untagged)
2500  __ SmiTag(a3);
2501  StringCharAtGenerator generator(v0, a3, a2, v0, &runtime, &runtime, &runtime,
2502                                  RECEIVER_IS_STRING);
2503  generator.GenerateFast(masm);
2504  __ DropAndRet(3);
2505  generator.SkipSlow(masm, &runtime);
2506}
2507
2508void ToStringStub::Generate(MacroAssembler* masm) {
2509  // The ToString stub takes on argument in a0.
2510  Label is_number;
2511  __ JumpIfSmi(a0, &is_number);
2512
2513  Label not_string;
2514  __ GetObjectType(a0, a1, a1);
2515  // a0: receiver
2516  // a1: receiver instance type
2517  __ Branch(&not_string, ge, a1, Operand(FIRST_NONSTRING_TYPE));
2518  __ Ret(USE_DELAY_SLOT);
2519  __ mov(v0, a0);
2520  __ bind(&not_string);
2521
2522  Label not_heap_number;
2523  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
2524  __ bind(&is_number);
2525  NumberToStringStub stub(isolate());
2526  __ TailCallStub(&stub);
2527  __ bind(&not_heap_number);
2528
2529  Label not_oddball;
2530  __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
2531  __ Ret(USE_DELAY_SLOT);
2532  __ ld(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
2533  __ bind(&not_oddball);
2534
2535  __ push(a0);  // Push argument.
2536  __ TailCallRuntime(Runtime::kToString);
2537}
2538
2539
2540void ToNameStub::Generate(MacroAssembler* masm) {
2541  // The ToName stub takes on argument in a0.
2542  Label is_number;
2543  __ JumpIfSmi(a0, &is_number);
2544
2545  Label not_name;
2546  STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
2547  __ GetObjectType(a0, a1, a1);
2548  // a0: receiver
2549  // a1: receiver instance type
2550  __ Branch(&not_name, gt, a1, Operand(LAST_NAME_TYPE));
2551  __ Ret(USE_DELAY_SLOT);
2552  __ mov(v0, a0);
2553  __ bind(&not_name);
2554
2555  Label not_heap_number;
2556  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
2557  __ bind(&is_number);
2558  NumberToStringStub stub(isolate());
2559  __ TailCallStub(&stub);
2560  __ bind(&not_heap_number);
2561
2562  Label not_oddball;
2563  __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
2564  __ Ret(USE_DELAY_SLOT);
2565  __ ld(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
2566  __ bind(&not_oddball);
2567
2568  __ push(a0);  // Push argument.
2569  __ TailCallRuntime(Runtime::kToName);
2570}
2571
2572
2573void StringHelper::GenerateFlatOneByteStringEquals(
2574    MacroAssembler* masm, Register left, Register right, Register scratch1,
2575    Register scratch2, Register scratch3) {
2576  Register length = scratch1;
2577
2578  // Compare lengths.
2579  Label strings_not_equal, check_zero_length;
2580  __ ld(length, FieldMemOperand(left, String::kLengthOffset));
2581  __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
2582  __ Branch(&check_zero_length, eq, length, Operand(scratch2));
2583  __ bind(&strings_not_equal);
2584  // Can not put li in delayslot, it has multi instructions.
2585  __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
2586  __ Ret();
2587
2588  // Check if the length is zero.
2589  Label compare_chars;
2590  __ bind(&check_zero_length);
2591  STATIC_ASSERT(kSmiTag == 0);
2592  __ Branch(&compare_chars, ne, length, Operand(zero_reg));
2593  DCHECK(is_int16((intptr_t)Smi::FromInt(EQUAL)));
2594  __ Ret(USE_DELAY_SLOT);
2595  __ li(v0, Operand(Smi::FromInt(EQUAL)));
2596
2597  // Compare characters.
2598  __ bind(&compare_chars);
2599
2600  GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
2601                                  v0, &strings_not_equal);
2602
2603  // Characters are equal.
2604  __ Ret(USE_DELAY_SLOT);
2605  __ li(v0, Operand(Smi::FromInt(EQUAL)));
2606}
2607
2608
2609void StringHelper::GenerateCompareFlatOneByteStrings(
2610    MacroAssembler* masm, Register left, Register right, Register scratch1,
2611    Register scratch2, Register scratch3, Register scratch4) {
2612  Label result_not_equal, compare_lengths;
2613  // Find minimum length and length difference.
2614  __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
2615  __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
2616  __ Dsubu(scratch3, scratch1, Operand(scratch2));
2617  Register length_delta = scratch3;
2618  __ slt(scratch4, scratch2, scratch1);
2619  __ Movn(scratch1, scratch2, scratch4);
2620  Register min_length = scratch1;
2621  STATIC_ASSERT(kSmiTag == 0);
2622  __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
2623
2624  // Compare loop.
2625  GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
2626                                  scratch4, v0, &result_not_equal);
2627
2628  // Compare lengths - strings up to min-length are equal.
2629  __ bind(&compare_lengths);
2630  DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
2631  // Use length_delta as result if it's zero.
2632  __ mov(scratch2, length_delta);
2633  __ mov(scratch4, zero_reg);
2634  __ mov(v0, zero_reg);
2635
2636  __ bind(&result_not_equal);
2637  // Conditionally update the result based either on length_delta or
2638  // the last comparion performed in the loop above.
2639  Label ret;
2640  __ Branch(&ret, eq, scratch2, Operand(scratch4));
2641  __ li(v0, Operand(Smi::FromInt(GREATER)));
2642  __ Branch(&ret, gt, scratch2, Operand(scratch4));
2643  __ li(v0, Operand(Smi::FromInt(LESS)));
2644  __ bind(&ret);
2645  __ Ret();
2646}
2647
2648
2649void StringHelper::GenerateOneByteCharsCompareLoop(
2650    MacroAssembler* masm, Register left, Register right, Register length,
2651    Register scratch1, Register scratch2, Register scratch3,
2652    Label* chars_not_equal) {
2653  // Change index to run from -length to -1 by adding length to string
2654  // start. This means that loop ends when index reaches zero, which
2655  // doesn't need an additional compare.
2656  __ SmiUntag(length);
2657  __ Daddu(scratch1, length,
2658          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
2659  __ Daddu(left, left, Operand(scratch1));
2660  __ Daddu(right, right, Operand(scratch1));
2661  __ Dsubu(length, zero_reg, length);
2662  Register index = length;  // index = -length;
2663
2664
2665  // Compare loop.
2666  Label loop;
2667  __ bind(&loop);
2668  __ Daddu(scratch3, left, index);
2669  __ lbu(scratch1, MemOperand(scratch3));
2670  __ Daddu(scratch3, right, index);
2671  __ lbu(scratch2, MemOperand(scratch3));
2672  __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
2673  __ Daddu(index, index, 1);
2674  __ Branch(&loop, ne, index, Operand(zero_reg));
2675}
2676
2677
2678void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
2679  // ----------- S t a t e -------------
2680  //  -- a1    : left
2681  //  -- a0    : right
2682  //  -- ra    : return address
2683  // -----------------------------------
2684
2685  // Load a2 with the allocation site. We stick an undefined dummy value here
2686  // and replace it with the real allocation site later when we instantiate this
2687  // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
2688  __ li(a2, isolate()->factory()->undefined_value());
2689
2690  // Make sure that we actually patched the allocation site.
2691  if (FLAG_debug_code) {
2692    __ And(at, a2, Operand(kSmiTagMask));
2693    __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
2694    __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
2695    __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
2696    __ Assert(eq, kExpectedAllocationSite, a4, Operand(at));
2697  }
2698
2699  // Tail call into the stub that handles binary operations with allocation
2700  // sites.
2701  BinaryOpWithAllocationSiteStub stub(isolate(), state());
2702  __ TailCallStub(&stub);
2703}
2704
2705
2706void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
2707  DCHECK_EQ(CompareICState::BOOLEAN, state());
2708  Label miss;
2709
2710  __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2711  __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2712  if (!Token::IsEqualityOp(op())) {
2713    __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
2714    __ AssertSmi(a1);
2715    __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
2716    __ AssertSmi(a0);
2717  }
2718  __ Ret(USE_DELAY_SLOT);
2719  __ Dsubu(v0, a1, a0);
2720
2721  __ bind(&miss);
2722  GenerateMiss(masm);
2723}
2724
2725
2726void CompareICStub::GenerateSmis(MacroAssembler* masm) {
2727  DCHECK(state() == CompareICState::SMI);
2728  Label miss;
2729  __ Or(a2, a1, a0);
2730  __ JumpIfNotSmi(a2, &miss);
2731
2732  if (GetCondition() == eq) {
2733    // For equality we do not care about the sign of the result.
2734    __ Ret(USE_DELAY_SLOT);
2735    __ Dsubu(v0, a0, a1);
2736  } else {
2737    // Untag before subtracting to avoid handling overflow.
2738    __ SmiUntag(a1);
2739    __ SmiUntag(a0);
2740    __ Ret(USE_DELAY_SLOT);
2741    __ Dsubu(v0, a1, a0);
2742  }
2743
2744  __ bind(&miss);
2745  GenerateMiss(masm);
2746}
2747
2748
2749void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
2750  DCHECK(state() == CompareICState::NUMBER);
2751
2752  Label generic_stub;
2753  Label unordered, maybe_undefined1, maybe_undefined2;
2754  Label miss;
2755
2756  if (left() == CompareICState::SMI) {
2757    __ JumpIfNotSmi(a1, &miss);
2758  }
2759  if (right() == CompareICState::SMI) {
2760    __ JumpIfNotSmi(a0, &miss);
2761  }
2762
2763  // Inlining the double comparison and falling back to the general compare
2764  // stub if NaN is involved.
2765  // Load left and right operand.
2766  Label done, left, left_smi, right_smi;
2767  __ JumpIfSmi(a0, &right_smi);
2768  __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
2769              DONT_DO_SMI_CHECK);
2770  __ Dsubu(a2, a0, Operand(kHeapObjectTag));
2771  __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
2772  __ Branch(&left);
2773  __ bind(&right_smi);
2774  __ SmiUntag(a2, a0);  // Can't clobber a0 yet.
2775  FPURegister single_scratch = f6;
2776  __ mtc1(a2, single_scratch);
2777  __ cvt_d_w(f2, single_scratch);
2778
2779  __ bind(&left);
2780  __ JumpIfSmi(a1, &left_smi);
2781  __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
2782              DONT_DO_SMI_CHECK);
2783  __ Dsubu(a2, a1, Operand(kHeapObjectTag));
2784  __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
2785  __ Branch(&done);
2786  __ bind(&left_smi);
2787  __ SmiUntag(a2, a1);  // Can't clobber a1 yet.
2788  single_scratch = f8;
2789  __ mtc1(a2, single_scratch);
2790  __ cvt_d_w(f0, single_scratch);
2791
2792  __ bind(&done);
2793
2794  // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
2795  Label fpu_eq, fpu_lt;
2796  // Test if equal, and also handle the unordered/NaN case.
2797  __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
2798
2799  // Test if less (unordered case is already handled).
2800  __ BranchF(&fpu_lt, NULL, lt, f0, f2);
2801
2802  // Otherwise it's greater, so just fall thru, and return.
2803  DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
2804  __ Ret(USE_DELAY_SLOT);
2805  __ li(v0, Operand(GREATER));
2806
2807  __ bind(&fpu_eq);
2808  __ Ret(USE_DELAY_SLOT);
2809  __ li(v0, Operand(EQUAL));
2810
2811  __ bind(&fpu_lt);
2812  __ Ret(USE_DELAY_SLOT);
2813  __ li(v0, Operand(LESS));
2814
2815  __ bind(&unordered);
2816  __ bind(&generic_stub);
2817  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
2818                     CompareICState::GENERIC, CompareICState::GENERIC);
2819  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2820
2821  __ bind(&maybe_undefined1);
2822  if (Token::IsOrderedRelationalCompareOp(op())) {
2823    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2824    __ Branch(&miss, ne, a0, Operand(at));
2825    __ JumpIfSmi(a1, &unordered);
2826    __ GetObjectType(a1, a2, a2);
2827    __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
2828    __ jmp(&unordered);
2829  }
2830
2831  __ bind(&maybe_undefined2);
2832  if (Token::IsOrderedRelationalCompareOp(op())) {
2833    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2834    __ Branch(&unordered, eq, a1, Operand(at));
2835  }
2836
2837  __ bind(&miss);
2838  GenerateMiss(masm);
2839}
2840
2841
2842void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
2843  DCHECK(state() == CompareICState::INTERNALIZED_STRING);
2844  Label miss;
2845
2846  // Registers containing left and right operands respectively.
2847  Register left = a1;
2848  Register right = a0;
2849  Register tmp1 = a2;
2850  Register tmp2 = a3;
2851
2852  // Check that both operands are heap objects.
2853  __ JumpIfEitherSmi(left, right, &miss);
2854
2855  // Check that both operands are internalized strings.
2856  __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2857  __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2858  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2859  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2860  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2861  __ Or(tmp1, tmp1, Operand(tmp2));
2862  __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2863  __ Branch(&miss, ne, at, Operand(zero_reg));
2864
2865  // Make sure a0 is non-zero. At this point input operands are
2866  // guaranteed to be non-zero.
2867  DCHECK(right.is(a0));
2868  STATIC_ASSERT(EQUAL == 0);
2869  STATIC_ASSERT(kSmiTag == 0);
2870  __ mov(v0, right);
2871  // Internalized strings are compared by identity.
2872  __ Ret(ne, left, Operand(right));
2873  DCHECK(is_int16(EQUAL));
2874  __ Ret(USE_DELAY_SLOT);
2875  __ li(v0, Operand(Smi::FromInt(EQUAL)));
2876
2877  __ bind(&miss);
2878  GenerateMiss(masm);
2879}
2880
2881
2882void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
2883  DCHECK(state() == CompareICState::UNIQUE_NAME);
2884  DCHECK(GetCondition() == eq);
2885  Label miss;
2886
2887  // Registers containing left and right operands respectively.
2888  Register left = a1;
2889  Register right = a0;
2890  Register tmp1 = a2;
2891  Register tmp2 = a3;
2892
2893  // Check that both operands are heap objects.
2894  __ JumpIfEitherSmi(left, right, &miss);
2895
2896  // Check that both operands are unique names. This leaves the instance
2897  // types loaded in tmp1 and tmp2.
2898  __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2899  __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2900  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2901  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2902
2903  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
2904  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
2905
2906  // Use a0 as result
2907  __ mov(v0, a0);
2908
2909  // Unique names are compared by identity.
2910  Label done;
2911  __ Branch(&done, ne, left, Operand(right));
2912  // Make sure a0 is non-zero. At this point input operands are
2913  // guaranteed to be non-zero.
2914  DCHECK(right.is(a0));
2915  STATIC_ASSERT(EQUAL == 0);
2916  STATIC_ASSERT(kSmiTag == 0);
2917  __ li(v0, Operand(Smi::FromInt(EQUAL)));
2918  __ bind(&done);
2919  __ Ret();
2920
2921  __ bind(&miss);
2922  GenerateMiss(masm);
2923}
2924
2925
2926void CompareICStub::GenerateStrings(MacroAssembler* masm) {
2927  DCHECK(state() == CompareICState::STRING);
2928  Label miss;
2929
2930  bool equality = Token::IsEqualityOp(op());
2931
2932  // Registers containing left and right operands respectively.
2933  Register left = a1;
2934  Register right = a0;
2935  Register tmp1 = a2;
2936  Register tmp2 = a3;
2937  Register tmp3 = a4;
2938  Register tmp4 = a5;
2939  Register tmp5 = a6;
2940
2941  // Check that both operands are heap objects.
2942  __ JumpIfEitherSmi(left, right, &miss);
2943
2944  // Check that both operands are strings. This leaves the instance
2945  // types loaded in tmp1 and tmp2.
2946  __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2947  __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2948  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2949  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2950  STATIC_ASSERT(kNotStringTag != 0);
2951  __ Or(tmp3, tmp1, tmp2);
2952  __ And(tmp5, tmp3, Operand(kIsNotStringMask));
2953  __ Branch(&miss, ne, tmp5, Operand(zero_reg));
2954
2955  // Fast check for identical strings.
2956  Label left_ne_right;
2957  STATIC_ASSERT(EQUAL == 0);
2958  STATIC_ASSERT(kSmiTag == 0);
2959  __ Branch(&left_ne_right, ne, left, Operand(right));
2960  __ Ret(USE_DELAY_SLOT);
2961  __ mov(v0, zero_reg);  // In the delay slot.
2962  __ bind(&left_ne_right);
2963
2964  // Handle not identical strings.
2965
2966  // Check that both strings are internalized strings. If they are, we're done
2967  // because we already know they are not identical. We know they are both
2968  // strings.
2969  if (equality) {
2970    DCHECK(GetCondition() == eq);
2971    STATIC_ASSERT(kInternalizedTag == 0);
2972    __ Or(tmp3, tmp1, Operand(tmp2));
2973    __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
2974    Label is_symbol;
2975    __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
2976    // Make sure a0 is non-zero. At this point input operands are
2977    // guaranteed to be non-zero.
2978    DCHECK(right.is(a0));
2979    __ Ret(USE_DELAY_SLOT);
2980    __ mov(v0, a0);  // In the delay slot.
2981    __ bind(&is_symbol);
2982  }
2983
2984  // Check that both strings are sequential one_byte.
2985  Label runtime;
2986  __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
2987                                                    &runtime);
2988
2989  // Compare flat one_byte strings. Returns when done.
2990  if (equality) {
2991    StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
2992                                                  tmp3);
2993  } else {
2994    StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
2995                                                    tmp2, tmp3, tmp4);
2996  }
2997
2998  // Handle more complex cases in runtime.
2999  __ bind(&runtime);
3000  if (equality) {
3001    {
3002      FrameScope scope(masm, StackFrame::INTERNAL);
3003      __ Push(left, right);
3004      __ CallRuntime(Runtime::kStringEqual);
3005    }
3006    __ LoadRoot(a0, Heap::kTrueValueRootIndex);
3007    __ Ret(USE_DELAY_SLOT);
3008    __ Subu(v0, v0, a0);  // In delay slot.
3009  } else {
3010    __ Push(left, right);
3011    __ TailCallRuntime(Runtime::kStringCompare);
3012  }
3013
3014  __ bind(&miss);
3015  GenerateMiss(masm);
3016}
3017
3018
3019void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
3020  DCHECK_EQ(CompareICState::RECEIVER, state());
3021  Label miss;
3022  __ And(a2, a1, Operand(a0));
3023  __ JumpIfSmi(a2, &miss);
3024
3025  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
3026  __ GetObjectType(a0, a2, a2);
3027  __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
3028  __ GetObjectType(a1, a2, a2);
3029  __ Branch(&miss, lt, a2, Operand(FIRST_JS_RECEIVER_TYPE));
3030
3031  DCHECK_EQ(eq, GetCondition());
3032  __ Ret(USE_DELAY_SLOT);
3033  __ dsubu(v0, a0, a1);
3034
3035  __ bind(&miss);
3036  GenerateMiss(masm);
3037}
3038
3039
3040void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
3041  Label miss;
3042  Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
3043  __ And(a2, a1, a0);
3044  __ JumpIfSmi(a2, &miss);
3045  __ GetWeakValue(a4, cell);
3046  __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
3047  __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
3048  __ Branch(&miss, ne, a2, Operand(a4));
3049  __ Branch(&miss, ne, a3, Operand(a4));
3050
3051  if (Token::IsEqualityOp(op())) {
3052    __ Ret(USE_DELAY_SLOT);
3053    __ dsubu(v0, a0, a1);
3054  } else {
3055    if (op() == Token::LT || op() == Token::LTE) {
3056      __ li(a2, Operand(Smi::FromInt(GREATER)));
3057    } else {
3058      __ li(a2, Operand(Smi::FromInt(LESS)));
3059    }
3060    __ Push(a1, a0, a2);
3061    __ TailCallRuntime(Runtime::kCompare);
3062  }
3063
3064  __ bind(&miss);
3065  GenerateMiss(masm);
3066}
3067
3068
3069void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3070  {
3071    // Call the runtime system in a fresh internal frame.
3072    FrameScope scope(masm, StackFrame::INTERNAL);
3073    __ Push(a1, a0);
3074    __ Push(ra, a1, a0);
3075    __ li(a4, Operand(Smi::FromInt(op())));
3076    __ daddiu(sp, sp, -kPointerSize);
3077    __ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs,
3078                   USE_DELAY_SLOT);
3079    __ sd(a4, MemOperand(sp));  // In the delay slot.
3080    // Compute the entry point of the rewritten stub.
3081    __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
3082    // Restore registers.
3083    __ Pop(a1, a0, ra);
3084  }
3085  __ Jump(a2);
3086}
3087
3088
3089void DirectCEntryStub::Generate(MacroAssembler* masm) {
3090  // Make place for arguments to fit C calling convention. Most of the callers
3091  // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
3092  // so they handle stack restoring and we don't have to do that here.
3093  // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
3094  // kCArgsSlotsSize stack space after the call.
3095  __ daddiu(sp, sp, -kCArgsSlotsSize);
3096  // Place the return address on the stack, making the call
3097  // GC safe. The RegExp backend also relies on this.
3098  __ sd(ra, MemOperand(sp, kCArgsSlotsSize));
3099  __ Call(t9);  // Call the C++ function.
3100  __ ld(t9, MemOperand(sp, kCArgsSlotsSize));
3101
3102  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
3103    // In case of an error the return address may point to a memory area
3104    // filled with kZapValue by the GC.
3105    // Dereference the address and check for this.
3106    __ Uld(a4, MemOperand(t9));
3107    __ Assert(ne, kReceivedInvalidReturnAddress, a4,
3108        Operand(reinterpret_cast<uint64_t>(kZapValue)));
3109  }
3110  __ Jump(t9);
3111}
3112
3113
3114void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
3115                                    Register target) {
3116  intptr_t loc =
3117      reinterpret_cast<intptr_t>(GetCode().location());
3118  __ Move(t9, target);
3119  __ li(at, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
3120  __ Call(at);
3121}
3122
3123
3124void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3125                                                      Label* miss,
3126                                                      Label* done,
3127                                                      Register receiver,
3128                                                      Register properties,
3129                                                      Handle<Name> name,
3130                                                      Register scratch0) {
3131  DCHECK(name->IsUniqueName());
3132  // If names of slots in range from 1 to kProbes - 1 for the hash value are
3133  // not equal to the name and kProbes-th slot is not used (its name is the
3134  // undefined value), it guarantees the hash table doesn't contain the
3135  // property. It's true even if some slots represent deleted properties
3136  // (their names are the hole value).
3137  for (int i = 0; i < kInlinedProbes; i++) {
3138    // scratch0 points to properties hash.
3139    // Compute the masked index: (hash + i + i * i) & mask.
3140    Register index = scratch0;
3141    // Capacity is smi 2^n.
3142    __ SmiLoadUntag(index, FieldMemOperand(properties, kCapacityOffset));
3143    __ Dsubu(index, index, Operand(1));
3144    __ And(index, index,
3145           Operand(name->Hash() + NameDictionary::GetProbeOffset(i)));
3146
3147    // Scale the index by multiplying by the entry size.
3148    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3149    __ Dlsa(index, index, index, 1);  // index *= 3.
3150
3151    Register entity_name = scratch0;
3152    // Having undefined at this place means the name is not contained.
3153    STATIC_ASSERT(kSmiTagSize == 1);
3154    Register tmp = properties;
3155
3156    __ Dlsa(tmp, properties, index, kPointerSizeLog2);
3157    __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
3158
3159    DCHECK(!tmp.is(entity_name));
3160    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
3161    __ Branch(done, eq, entity_name, Operand(tmp));
3162
3163    // Load the hole ready for use below:
3164    __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
3165
3166    // Stop if found the property.
3167    __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
3168
3169    Label good;
3170    __ Branch(&good, eq, entity_name, Operand(tmp));
3171
3172    // Check if the entry name is not a unique name.
3173    __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
3174    __ lbu(entity_name,
3175           FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
3176    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
3177    __ bind(&good);
3178
3179    // Restore the properties.
3180    __ ld(properties,
3181          FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3182  }
3183
3184  const int spill_mask =
3185      (ra.bit() | a6.bit() | a5.bit() | a4.bit() | a3.bit() |
3186       a2.bit() | a1.bit() | a0.bit() | v0.bit());
3187
3188  __ MultiPush(spill_mask);
3189  __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3190  __ li(a1, Operand(Handle<Name>(name)));
3191  NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
3192  __ CallStub(&stub);
3193  __ mov(at, v0);
3194  __ MultiPop(spill_mask);
3195
3196  __ Branch(done, eq, at, Operand(zero_reg));
3197  __ Branch(miss, ne, at, Operand(zero_reg));
3198}
3199
3200
3201// Probe the name dictionary in the |elements| register. Jump to the
3202// |done| label if a property with the given name is found. Jump to
3203// the |miss| label otherwise.
3204// If lookup was successful |scratch2| will be equal to elements + 4 * index.
3205void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
3206                                                      Label* miss,
3207                                                      Label* done,
3208                                                      Register elements,
3209                                                      Register name,
3210                                                      Register scratch1,
3211                                                      Register scratch2) {
3212  DCHECK(!elements.is(scratch1));
3213  DCHECK(!elements.is(scratch2));
3214  DCHECK(!name.is(scratch1));
3215  DCHECK(!name.is(scratch2));
3216
3217  __ AssertName(name);
3218
3219  // Compute the capacity mask.
3220  __ ld(scratch1, FieldMemOperand(elements, kCapacityOffset));
3221  __ SmiUntag(scratch1);
3222  __ Dsubu(scratch1, scratch1, Operand(1));
3223
3224  // Generate an unrolled loop that performs a few probes before
3225  // giving up. Measurements done on Gmail indicate that 2 probes
3226  // cover ~93% of loads from dictionaries.
3227  for (int i = 0; i < kInlinedProbes; i++) {
3228    // Compute the masked index: (hash + i + i * i) & mask.
3229    __ lwu(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
3230    if (i > 0) {
3231      // Add the probe offset (i + i * i) left shifted to avoid right shifting
3232      // the hash in a separate instruction. The value hash + i + i * i is right
3233      // shifted in the following and instruction.
3234      DCHECK(NameDictionary::GetProbeOffset(i) <
3235             1 << (32 - Name::kHashFieldOffset));
3236      __ Daddu(scratch2, scratch2, Operand(
3237          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3238    }
3239    __ dsrl(scratch2, scratch2, Name::kHashShift);
3240    __ And(scratch2, scratch1, scratch2);
3241
3242    // Scale the index by multiplying by the entry size.
3243    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3244    // scratch2 = scratch2 * 3.
3245    __ Dlsa(scratch2, scratch2, scratch2, 1);
3246
3247    // Check if the key is identical to the name.
3248    __ Dlsa(scratch2, elements, scratch2, kPointerSizeLog2);
3249    __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset));
3250    __ Branch(done, eq, name, Operand(at));
3251  }
3252
3253  const int spill_mask =
3254      (ra.bit() | a6.bit() | a5.bit() | a4.bit() |
3255       a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
3256      ~(scratch1.bit() | scratch2.bit());
3257
3258  __ MultiPush(spill_mask);
3259  if (name.is(a0)) {
3260    DCHECK(!elements.is(a1));
3261    __ Move(a1, name);
3262    __ Move(a0, elements);
3263  } else {
3264    __ Move(a0, elements);
3265    __ Move(a1, name);
3266  }
3267  NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
3268  __ CallStub(&stub);
3269  __ mov(scratch2, a2);
3270  __ mov(at, v0);
3271  __ MultiPop(spill_mask);
3272
3273  __ Branch(done, ne, at, Operand(zero_reg));
3274  __ Branch(miss, eq, at, Operand(zero_reg));
3275}
3276
3277
3278void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
3279  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
3280  // we cannot call anything that could cause a GC from this stub.
3281  // Registers:
3282  //  result: NameDictionary to probe
3283  //  a1: key
3284  //  dictionary: NameDictionary to probe.
3285  //  index: will hold an index of entry if lookup is successful.
3286  //         might alias with result_.
3287  // Returns:
3288  //  result_ is zero if lookup failed, non zero otherwise.
3289
3290  Register result = v0;
3291  Register dictionary = a0;
3292  Register key = a1;
3293  Register index = a2;
3294  Register mask = a3;
3295  Register hash = a4;
3296  Register undefined = a5;
3297  Register entry_key = a6;
3298
3299  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
3300
3301  __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
3302  __ SmiUntag(mask);
3303  __ Dsubu(mask, mask, Operand(1));
3304
3305  __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
3306
3307  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
3308
3309  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
3310    // Compute the masked index: (hash + i + i * i) & mask.
3311    // Capacity is smi 2^n.
3312    if (i > 0) {
3313      // Add the probe offset (i + i * i) left shifted to avoid right shifting
3314      // the hash in a separate instruction. The value hash + i + i * i is right
3315      // shifted in the following and instruction.
3316      DCHECK(NameDictionary::GetProbeOffset(i) <
3317             1 << (32 - Name::kHashFieldOffset));
3318      __ Daddu(index, hash, Operand(
3319          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3320    } else {
3321      __ mov(index, hash);
3322    }
3323    __ dsrl(index, index, Name::kHashShift);
3324    __ And(index, mask, index);
3325
3326    // Scale the index by multiplying by the entry size.
3327    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3328    // index *= 3.
3329    __ Dlsa(index, index, index, 1);
3330
3331    STATIC_ASSERT(kSmiTagSize == 1);
3332    __ Dlsa(index, dictionary, index, kPointerSizeLog2);
3333    __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
3334
3335    // Having undefined at this place means the name is not contained.
3336    __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
3337
3338    // Stop if found the property.
3339    __ Branch(&in_dictionary, eq, entry_key, Operand(key));
3340
3341    if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
3342      // Check if the entry name is not a unique name.
3343      __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
3344      __ lbu(entry_key,
3345             FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
3346      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
3347    }
3348  }
3349
3350  __ bind(&maybe_in_dictionary);
3351  // If we are doing negative lookup then probing failure should be
3352  // treated as a lookup success. For positive lookup probing failure
3353  // should be treated as lookup failure.
3354  if (mode() == POSITIVE_LOOKUP) {
3355    __ Ret(USE_DELAY_SLOT);
3356    __ mov(result, zero_reg);
3357  }
3358
3359  __ bind(&in_dictionary);
3360  __ Ret(USE_DELAY_SLOT);
3361  __ li(result, 1);
3362
3363  __ bind(&not_in_dictionary);
3364  __ Ret(USE_DELAY_SLOT);
3365  __ mov(result, zero_reg);
3366}
3367
3368
3369void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
3370    Isolate* isolate) {
3371  StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
3372  stub1.GetCode();
3373  // Hydrogen code stubs need stub2 at snapshot time.
3374  StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
3375  stub2.GetCode();
3376}
3377
3378
3379// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
3380// the value has just been written into the object, now this stub makes sure
3381// we keep the GC informed.  The word in the object where the value has been
3382// written is in the address register.
3383void RecordWriteStub::Generate(MacroAssembler* masm) {
3384  Label skip_to_incremental_noncompacting;
3385  Label skip_to_incremental_compacting;
3386
3387  // The first two branch+nop instructions are generated with labels so as to
3388  // get the offset fixed up correctly by the bind(Label*) call.  We patch it
3389  // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
3390  // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
3391  // incremental heap marking.
3392  // See RecordWriteStub::Patch for details.
3393  __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
3394  __ nop();
3395  __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
3396  __ nop();
3397
3398  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3399    __ RememberedSetHelper(object(),
3400                           address(),
3401                           value(),
3402                           save_fp_regs_mode(),
3403                           MacroAssembler::kReturnAtEnd);
3404  }
3405  __ Ret();
3406
3407  __ bind(&skip_to_incremental_noncompacting);
3408  GenerateIncremental(masm, INCREMENTAL);
3409
3410  __ bind(&skip_to_incremental_compacting);
3411  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
3412
3413  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
3414  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
3415
3416  PatchBranchIntoNop(masm, 0);
3417  PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
3418}
3419
3420
3421void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
3422  regs_.Save(masm);
3423
3424  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3425    Label dont_need_remembered_set;
3426
3427    __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
3428    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
3429                           regs_.scratch0(),
3430                           &dont_need_remembered_set);
3431
3432    __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
3433                        &dont_need_remembered_set);
3434
3435    // First notify the incremental marker if necessary, then update the
3436    // remembered set.
3437    CheckNeedsToInformIncrementalMarker(
3438        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
3439    InformIncrementalMarker(masm);
3440    regs_.Restore(masm);
3441    __ RememberedSetHelper(object(),
3442                           address(),
3443                           value(),
3444                           save_fp_regs_mode(),
3445                           MacroAssembler::kReturnAtEnd);
3446
3447    __ bind(&dont_need_remembered_set);
3448  }
3449
3450  CheckNeedsToInformIncrementalMarker(
3451      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
3452  InformIncrementalMarker(masm);
3453  regs_.Restore(masm);
3454  __ Ret();
3455}
3456
3457
3458void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
3459  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
3460  int argument_count = 3;
3461  __ PrepareCallCFunction(argument_count, regs_.scratch0());
3462  Register address =
3463      a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
3464  DCHECK(!address.is(regs_.object()));
3465  DCHECK(!address.is(a0));
3466  __ Move(address, regs_.address());
3467  __ Move(a0, regs_.object());
3468  __ Move(a1, address);
3469  __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
3470
3471  AllowExternalCallThatCantCauseGC scope(masm);
3472  __ CallCFunction(
3473      ExternalReference::incremental_marking_record_write_function(isolate()),
3474      argument_count);
3475  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
3476}
3477
3478
3479void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
3480    MacroAssembler* masm,
3481    OnNoNeedToInformIncrementalMarker on_no_need,
3482    Mode mode) {
3483  Label on_black;
3484  Label need_incremental;
3485  Label need_incremental_pop_scratch;
3486
3487  __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
3488  __ ld(regs_.scratch1(),
3489        MemOperand(regs_.scratch0(),
3490                   MemoryChunk::kWriteBarrierCounterOffset));
3491  __ Dsubu(regs_.scratch1(), regs_.scratch1(), Operand(1));
3492  __ sd(regs_.scratch1(),
3493         MemOperand(regs_.scratch0(),
3494                    MemoryChunk::kWriteBarrierCounterOffset));
3495  __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
3496
3497  // Let's look at the color of the object:  If it is not black we don't have
3498  // to inform the incremental marker.
3499  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
3500
3501  regs_.Restore(masm);
3502  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3503    __ RememberedSetHelper(object(),
3504                           address(),
3505                           value(),
3506                           save_fp_regs_mode(),
3507                           MacroAssembler::kReturnAtEnd);
3508  } else {
3509    __ Ret();
3510  }
3511
3512  __ bind(&on_black);
3513
3514  // Get the value from the slot.
3515  __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
3516
3517  if (mode == INCREMENTAL_COMPACTION) {
3518    Label ensure_not_white;
3519
3520    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
3521                     regs_.scratch1(),  // Scratch.
3522                     MemoryChunk::kEvacuationCandidateMask,
3523                     eq,
3524                     &ensure_not_white);
3525
3526    __ CheckPageFlag(regs_.object(),
3527                     regs_.scratch1(),  // Scratch.
3528                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
3529                     eq,
3530                     &need_incremental);
3531
3532    __ bind(&ensure_not_white);
3533  }
3534
3535  // We need extra registers for this, so we push the object and the address
3536  // register temporarily.
3537  __ Push(regs_.object(), regs_.address());
3538  __ JumpIfWhite(regs_.scratch0(),  // The value.
3539                 regs_.scratch1(),  // Scratch.
3540                 regs_.object(),    // Scratch.
3541                 regs_.address(),   // Scratch.
3542                 &need_incremental_pop_scratch);
3543  __ Pop(regs_.object(), regs_.address());
3544
3545  regs_.Restore(masm);
3546  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3547    __ RememberedSetHelper(object(),
3548                           address(),
3549                           value(),
3550                           save_fp_regs_mode(),
3551                           MacroAssembler::kReturnAtEnd);
3552  } else {
3553    __ Ret();
3554  }
3555
3556  __ bind(&need_incremental_pop_scratch);
3557  __ Pop(regs_.object(), regs_.address());
3558
3559  __ bind(&need_incremental);
3560
3561  // Fall through when we need to inform the incremental marker.
3562}
3563
3564
3565void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
3566  CEntryStub ces(isolate(), 1, kSaveFPRegs);
3567  __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
3568  int parameter_count_offset =
3569      StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
3570  __ ld(a1, MemOperand(fp, parameter_count_offset));
3571  if (function_mode() == JS_FUNCTION_STUB_MODE) {
3572    __ Daddu(a1, a1, Operand(1));
3573  }
3574  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
3575  __ dsll(a1, a1, kPointerSizeLog2);
3576  __ Ret(USE_DELAY_SLOT);
3577  __ Daddu(sp, sp, a1);
3578}
3579
3580
3581void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
3582  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
3583  LoadICStub stub(isolate());
3584  stub.GenerateForTrampoline(masm);
3585}
3586
3587
3588void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
3589  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
3590  KeyedLoadICStub stub(isolate());
3591  stub.GenerateForTrampoline(masm);
3592}
3593
3594
3595void CallICTrampolineStub::Generate(MacroAssembler* masm) {
3596  __ EmitLoadTypeFeedbackVector(a2);
3597  CallICStub stub(isolate(), state());
3598  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3599}
3600
3601
3602void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
3603
3604
3605void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
3606  GenerateImpl(masm, true);
3607}
3608
3609
3610static void HandleArrayCases(MacroAssembler* masm, Register feedback,
3611                             Register receiver_map, Register scratch1,
3612                             Register scratch2, bool is_polymorphic,
3613                             Label* miss) {
3614  // feedback initially contains the feedback array
3615  Label next_loop, prepare_next;
3616  Label start_polymorphic;
3617
3618  Register cached_map = scratch1;
3619
3620  __ ld(cached_map,
3621        FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
3622  __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3623  __ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
3624  // found, now call handler.
3625  Register handler = feedback;
3626  __ ld(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
3627  __ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3628  __ Jump(t9);
3629
3630  Register length = scratch2;
3631  __ bind(&start_polymorphic);
3632  __ ld(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3633  if (!is_polymorphic) {
3634    // If the IC could be monomorphic we have to make sure we don't go past the
3635    // end of the feedback array.
3636    __ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
3637  }
3638
3639  Register too_far = length;
3640  Register pointer_reg = feedback;
3641
3642  // +-----+------+------+-----+-----+ ... ----+
3643  // | map | len  | wm0  | h0  | wm1 |      hN |
3644  // +-----+------+------+-----+-----+ ... ----+
3645  //                 0      1     2        len-1
3646  //                              ^              ^
3647  //                              |              |
3648  //                         pointer_reg      too_far
3649  //                         aka feedback     scratch2
3650  // also need receiver_map
3651  // use cached_map (scratch1) to look in the weak map values.
3652  __ SmiScale(too_far, length, kPointerSizeLog2);
3653  __ Daddu(too_far, feedback, Operand(too_far));
3654  __ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3655  __ Daddu(pointer_reg, feedback,
3656           Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
3657
3658  __ bind(&next_loop);
3659  __ ld(cached_map, MemOperand(pointer_reg));
3660  __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3661  __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
3662  __ ld(handler, MemOperand(pointer_reg, kPointerSize));
3663  __ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
3664  __ Jump(t9);
3665
3666  __ bind(&prepare_next);
3667  __ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
3668  __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
3669
3670  // We exhausted our array of map handler pairs.
3671  __ Branch(miss);
3672}
3673
3674
3675static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
3676                                  Register receiver_map, Register feedback,
3677                                  Register vector, Register slot,
3678                                  Register scratch, Label* compare_map,
3679                                  Label* load_smi_map, Label* try_array) {
3680  __ JumpIfSmi(receiver, load_smi_map);
3681  __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
3682  __ bind(compare_map);
3683  Register cached_map = scratch;
3684  // Move the weak map into the weak_cell register.
3685  __ ld(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
3686  __ Branch(try_array, ne, cached_map, Operand(receiver_map));
3687  Register handler = feedback;
3688  __ SmiScale(handler, slot, kPointerSizeLog2);
3689  __ Daddu(handler, vector, Operand(handler));
3690  __ ld(handler,
3691        FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
3692  __ Daddu(t9, handler, Code::kHeaderSize - kHeapObjectTag);
3693  __ Jump(t9);
3694}
3695
3696
3697void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3698  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // a1
3699  Register name = LoadWithVectorDescriptor::NameRegister();          // a2
3700  Register vector = LoadWithVectorDescriptor::VectorRegister();      // a3
3701  Register slot = LoadWithVectorDescriptor::SlotRegister();          // a0
3702  Register feedback = a4;
3703  Register receiver_map = a5;
3704  Register scratch1 = a6;
3705
3706  __ SmiScale(feedback, slot, kPointerSizeLog2);
3707  __ Daddu(feedback, vector, Operand(feedback));
3708  __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3709
3710  // Try to quickly handle the monomorphic case without knowing for sure
3711  // if we have a weak cell in feedback. We do know it's safe to look
3712  // at WeakCell::kValueOffset.
3713  Label try_array, load_smi_map, compare_map;
3714  Label not_array, miss;
3715  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3716                        scratch1, &compare_map, &load_smi_map, &try_array);
3717
3718  // Is it a fixed array?
3719  __ bind(&try_array);
3720  __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3721  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
3722  __ Branch(&not_array, ne, scratch1, Operand(at));
3723  HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, true, &miss);
3724
3725  __ bind(&not_array);
3726  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
3727  __ Branch(&miss, ne, feedback, Operand(at));
3728  masm->isolate()->load_stub_cache()->GenerateProbe(
3729      masm, receiver, name, feedback, receiver_map, scratch1, a7);
3730
3731  __ bind(&miss);
3732  LoadIC::GenerateMiss(masm);
3733
3734  __ bind(&load_smi_map);
3735  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
3736  __ Branch(&compare_map);
3737}
3738
3739
3740void KeyedLoadICStub::Generate(MacroAssembler* masm) {
3741  GenerateImpl(masm, false);
3742}
3743
3744
3745void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
3746  GenerateImpl(masm, true);
3747}
3748
3749
3750void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3751  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // a1
3752  Register key = LoadWithVectorDescriptor::NameRegister();           // a2
3753  Register vector = LoadWithVectorDescriptor::VectorRegister();      // a3
3754  Register slot = LoadWithVectorDescriptor::SlotRegister();          // a0
3755  Register feedback = a4;
3756  Register receiver_map = a5;
3757  Register scratch1 = a6;
3758
3759  __ SmiScale(feedback, slot, kPointerSizeLog2);
3760  __ Daddu(feedback, vector, Operand(feedback));
3761  __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3762
3763  // Try to quickly handle the monomorphic case without knowing for sure
3764  // if we have a weak cell in feedback. We do know it's safe to look
3765  // at WeakCell::kValueOffset.
3766  Label try_array, load_smi_map, compare_map;
3767  Label not_array, miss;
3768  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3769                        scratch1, &compare_map, &load_smi_map, &try_array);
3770
3771  __ bind(&try_array);
3772  // Is it a fixed array?
3773  __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3774  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
3775  __ Branch(&not_array, ne, scratch1, Operand(at));
3776  // We have a polymorphic element handler.
3777  __ JumpIfNotSmi(key, &miss);
3778
3779  Label polymorphic, try_poly_name;
3780  __ bind(&polymorphic);
3781  HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, true, &miss);
3782
3783  __ bind(&not_array);
3784  // Is it generic?
3785  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
3786  __ Branch(&try_poly_name, ne, feedback, Operand(at));
3787  Handle<Code> megamorphic_stub =
3788      KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
3789  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
3790
3791  __ bind(&try_poly_name);
3792  // We might have a name in feedback, and a fixed array in the next slot.
3793  __ Branch(&miss, ne, key, Operand(feedback));
3794  // If the name comparison succeeded, we know we have a fixed array with
3795  // at least one map/handler pair.
3796  __ SmiScale(feedback, slot, kPointerSizeLog2);
3797  __ Daddu(feedback, vector, Operand(feedback));
3798  __ ld(feedback,
3799        FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
3800  HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, false, &miss);
3801
3802  __ bind(&miss);
3803  KeyedLoadIC::GenerateMiss(masm);
3804
3805  __ bind(&load_smi_map);
3806  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
3807  __ Branch(&compare_map);
3808}
3809
3810void StoreICTrampolineStub::Generate(MacroAssembler* masm) {
3811  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
3812  StoreICStub stub(isolate(), state());
3813  stub.GenerateForTrampoline(masm);
3814}
3815
3816void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
3817  __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
3818  KeyedStoreICStub stub(isolate(), state());
3819  stub.GenerateForTrampoline(masm);
3820}
3821
3822void StoreICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
3823
3824void StoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
3825  GenerateImpl(masm, true);
3826}
3827
3828void StoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3829  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // a1
3830  Register key = StoreWithVectorDescriptor::NameRegister();           // a2
3831  Register vector = StoreWithVectorDescriptor::VectorRegister();      // a3
3832  Register slot = StoreWithVectorDescriptor::SlotRegister();          // a4
3833  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0));          // a0
3834  Register feedback = a5;
3835  Register receiver_map = a6;
3836  Register scratch1 = a7;
3837
3838  __ SmiScale(scratch1, slot, kPointerSizeLog2);
3839  __ Daddu(feedback, vector, Operand(scratch1));
3840  __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3841
3842  // Try to quickly handle the monomorphic case without knowing for sure
3843  // if we have a weak cell in feedback. We do know it's safe to look
3844  // at WeakCell::kValueOffset.
3845  Label try_array, load_smi_map, compare_map;
3846  Label not_array, miss;
3847  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3848                        scratch1, &compare_map, &load_smi_map, &try_array);
3849
3850  // Is it a fixed array?
3851  __ bind(&try_array);
3852  __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3853  __ Branch(&not_array, ne, scratch1, Heap::kFixedArrayMapRootIndex);
3854
3855  Register scratch2 = t0;
3856  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
3857                   &miss);
3858
3859  __ bind(&not_array);
3860  __ Branch(&miss, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
3861  masm->isolate()->store_stub_cache()->GenerateProbe(
3862      masm, receiver, key, feedback, receiver_map, scratch1, scratch2);
3863
3864  __ bind(&miss);
3865  StoreIC::GenerateMiss(masm);
3866
3867  __ bind(&load_smi_map);
3868  __ Branch(USE_DELAY_SLOT, &compare_map);
3869  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);  // In delay slot.
3870}
3871
3872void KeyedStoreICStub::Generate(MacroAssembler* masm) {
3873  GenerateImpl(masm, false);
3874}
3875
3876void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
3877  GenerateImpl(masm, true);
3878}
3879
3880
3881static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
3882                                       Register receiver_map, Register scratch1,
3883                                       Register scratch2, Label* miss) {
3884  // feedback initially contains the feedback array
3885  Label next_loop, prepare_next;
3886  Label start_polymorphic;
3887  Label transition_call;
3888
3889  Register cached_map = scratch1;
3890  Register too_far = scratch2;
3891  Register pointer_reg = feedback;
3892
3893  __ ld(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
3894
3895  // +-----+------+------+-----+-----+-----+ ... ----+
3896  // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
3897  // +-----+------+------+-----+-----+ ----+ ... ----+
3898  //                 0      1     2              len-1
3899  //                 ^                                 ^
3900  //                 |                                 |
3901  //             pointer_reg                        too_far
3902  //             aka feedback                       scratch2
3903  // also need receiver_map
3904  // use cached_map (scratch1) to look in the weak map values.
3905  __ SmiScale(too_far, too_far, kPointerSizeLog2);
3906  __ Daddu(too_far, feedback, Operand(too_far));
3907  __ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3908  __ Daddu(pointer_reg, feedback,
3909           Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
3910
3911  __ bind(&next_loop);
3912  __ ld(cached_map, MemOperand(pointer_reg));
3913  __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
3914  __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
3915  // Is it a transitioning store?
3916  __ ld(too_far, MemOperand(pointer_reg, kPointerSize));
3917  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3918  __ Branch(&transition_call, ne, too_far, Operand(at));
3919
3920  __ ld(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
3921  __ Daddu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3922  __ Jump(t9);
3923
3924  __ bind(&transition_call);
3925  __ ld(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
3926  __ JumpIfSmi(too_far, miss);
3927
3928  __ ld(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
3929  // Load the map into the correct register.
3930  DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
3931  __ Move(feedback, too_far);
3932  __ Daddu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
3933  __ Jump(t9);
3934
3935  __ bind(&prepare_next);
3936  __ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
3937  __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
3938
3939  // We exhausted our array of map handler pairs.
3940  __ Branch(miss);
3941}
3942
3943void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
3944  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();  // a1
3945  Register key = StoreWithVectorDescriptor::NameRegister();           // a2
3946  Register vector = StoreWithVectorDescriptor::VectorRegister();      // a3
3947  Register slot = StoreWithVectorDescriptor::SlotRegister();          // a4
3948  DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0));          // a0
3949  Register feedback = a5;
3950  Register receiver_map = a6;
3951  Register scratch1 = a7;
3952
3953  __ SmiScale(scratch1, slot, kPointerSizeLog2);
3954  __ Daddu(feedback, vector, Operand(scratch1));
3955  __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
3956
3957  // Try to quickly handle the monomorphic case without knowing for sure
3958  // if we have a weak cell in feedback. We do know it's safe to look
3959  // at WeakCell::kValueOffset.
3960  Label try_array, load_smi_map, compare_map;
3961  Label not_array, miss;
3962  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
3963                        scratch1, &compare_map, &load_smi_map, &try_array);
3964
3965  __ bind(&try_array);
3966  // Is it a fixed array?
3967  __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
3968  __ Branch(&not_array, ne, scratch1, Heap::kFixedArrayMapRootIndex);
3969
3970  // We have a polymorphic element handler.
3971  Label try_poly_name;
3972
3973  Register scratch2 = t0;
3974
3975  HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
3976                             &miss);
3977
3978  __ bind(&not_array);
3979  // Is it generic?
3980  __ Branch(&try_poly_name, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
3981  Handle<Code> megamorphic_stub =
3982      KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
3983  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
3984
3985  __ bind(&try_poly_name);
3986  // We might have a name in feedback, and a fixed array in the next slot.
3987  __ Branch(&miss, ne, key, Operand(feedback));
3988  // If the name comparison succeeded, we know we have a fixed array with
3989  // at least one map/handler pair.
3990  __ SmiScale(scratch1, slot, kPointerSizeLog2);
3991  __ Daddu(feedback, vector, Operand(scratch1));
3992  __ ld(feedback,
3993        FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
3994  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
3995                   &miss);
3996
3997  __ bind(&miss);
3998  KeyedStoreIC::GenerateMiss(masm);
3999
4000  __ bind(&load_smi_map);
4001  __ Branch(USE_DELAY_SLOT, &compare_map);
4002  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);  // In delay slot.
4003}
4004
4005
4006void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4007  if (masm->isolate()->function_entry_hook() != NULL) {
4008    ProfileEntryHookStub stub(masm->isolate());
4009    __ push(ra);
4010    __ CallStub(&stub);
4011    __ pop(ra);
4012  }
4013}
4014
4015
4016void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4017  // The entry hook is a "push ra" instruction, followed by a call.
4018  // Note: on MIPS "push" is 2 instruction
4019  const int32_t kReturnAddressDistanceFromFunctionStart =
4020      Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
4021
4022  // This should contain all kJSCallerSaved registers.
4023  const RegList kSavedRegs =
4024     kJSCallerSaved |  // Caller saved registers.
4025     s5.bit();         // Saved stack pointer.
4026
4027  // We also save ra, so the count here is one higher than the mask indicates.
4028  const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
4029
4030  // Save all caller-save registers as this may be called from anywhere.
4031  __ MultiPush(kSavedRegs | ra.bit());
4032
4033  // Compute the function's address for the first argument.
4034  __ Dsubu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
4035
4036  // The caller's return address is above the saved temporaries.
4037  // Grab that for the second argument to the hook.
4038  __ Daddu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
4039
4040  // Align the stack if necessary.
4041  int frame_alignment = masm->ActivationFrameAlignment();
4042  if (frame_alignment > kPointerSize) {
4043    __ mov(s5, sp);
4044    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4045    __ And(sp, sp, Operand(-frame_alignment));
4046  }
4047
4048  __ Dsubu(sp, sp, kCArgsSlotsSize);
4049#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
4050  int64_t entry_hook =
4051      reinterpret_cast<int64_t>(isolate()->function_entry_hook());
4052  __ li(t9, Operand(entry_hook));
4053#else
4054  // Under the simulator we need to indirect the entry hook through a
4055  // trampoline function at a known address.
4056  // It additionally takes an isolate as a third parameter.
4057  __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
4058
4059  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4060  __ li(t9, Operand(ExternalReference(&dispatcher,
4061                                      ExternalReference::BUILTIN_CALL,
4062                                      isolate())));
4063#endif
4064  // Call C function through t9 to conform ABI for PIC.
4065  __ Call(t9);
4066
4067  // Restore the stack pointer if needed.
4068  if (frame_alignment > kPointerSize) {
4069    __ mov(sp, s5);
4070  } else {
4071    __ Daddu(sp, sp, kCArgsSlotsSize);
4072  }
4073
4074  // Also pop ra to get Ret(0).
4075  __ MultiPop(kSavedRegs | ra.bit());
4076  __ Ret();
4077}
4078
4079
4080template<class T>
4081static void CreateArrayDispatch(MacroAssembler* masm,
4082                                AllocationSiteOverrideMode mode) {
4083  if (mode == DISABLE_ALLOCATION_SITES) {
4084    T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4085    __ TailCallStub(&stub);
4086  } else if (mode == DONT_OVERRIDE) {
4087    int last_index = GetSequenceIndexFromFastElementsKind(
4088        TERMINAL_FAST_ELEMENTS_KIND);
4089    for (int i = 0; i <= last_index; ++i) {
4090      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4091      T stub(masm->isolate(), kind);
4092      __ TailCallStub(&stub, eq, a3, Operand(kind));
4093    }
4094
4095    // If we reached this point there is a problem.
4096    __ Abort(kUnexpectedElementsKindInArrayConstructor);
4097  } else {
4098    UNREACHABLE();
4099  }
4100}
4101
4102
4103static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4104                                           AllocationSiteOverrideMode mode) {
4105  // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4106  // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4107  // a0 - number of arguments
4108  // a1 - constructor?
4109  // sp[0] - last argument
4110  Label normal_sequence;
4111  if (mode == DONT_OVERRIDE) {
4112    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4113    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4114    STATIC_ASSERT(FAST_ELEMENTS == 2);
4115    STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4116    STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
4117    STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4118
4119    // is the low bit set? If so, we are holey and that is good.
4120    __ And(at, a3, Operand(1));
4121    __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
4122  }
4123  // look at the first argument
4124  __ ld(a5, MemOperand(sp, 0));
4125  __ Branch(&normal_sequence, eq, a5, Operand(zero_reg));
4126
4127  if (mode == DISABLE_ALLOCATION_SITES) {
4128    ElementsKind initial = GetInitialFastElementsKind();
4129    ElementsKind holey_initial = GetHoleyElementsKind(initial);
4130
4131    ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4132                                                  holey_initial,
4133                                                  DISABLE_ALLOCATION_SITES);
4134    __ TailCallStub(&stub_holey);
4135
4136    __ bind(&normal_sequence);
4137    ArraySingleArgumentConstructorStub stub(masm->isolate(),
4138                                            initial,
4139                                            DISABLE_ALLOCATION_SITES);
4140    __ TailCallStub(&stub);
4141  } else if (mode == DONT_OVERRIDE) {
4142    // We are going to create a holey array, but our kind is non-holey.
4143    // Fix kind and retry (only if we have an allocation site in the slot).
4144    __ Daddu(a3, a3, Operand(1));
4145
4146    if (FLAG_debug_code) {
4147      __ ld(a5, FieldMemOperand(a2, 0));
4148      __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
4149      __ Assert(eq, kExpectedAllocationSite, a5, Operand(at));
4150    }
4151
4152    // Save the resulting elements kind in type info. We can't just store a3
4153    // in the AllocationSite::transition_info field because elements kind is
4154    // restricted to a portion of the field...upper bits need to be left alone.
4155    STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4156    __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4157    __ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
4158    __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4159
4160
4161    __ bind(&normal_sequence);
4162    int last_index = GetSequenceIndexFromFastElementsKind(
4163        TERMINAL_FAST_ELEMENTS_KIND);
4164    for (int i = 0; i <= last_index; ++i) {
4165      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4166      ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4167      __ TailCallStub(&stub, eq, a3, Operand(kind));
4168    }
4169
4170    // If we reached this point there is a problem.
4171    __ Abort(kUnexpectedElementsKindInArrayConstructor);
4172  } else {
4173    UNREACHABLE();
4174  }
4175}
4176
4177
4178template<class T>
4179static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4180  int to_index = GetSequenceIndexFromFastElementsKind(
4181      TERMINAL_FAST_ELEMENTS_KIND);
4182  for (int i = 0; i <= to_index; ++i) {
4183    ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4184    T stub(isolate, kind);
4185    stub.GetCode();
4186    if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4187      T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4188      stub1.GetCode();
4189    }
4190  }
4191}
4192
4193void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
4194  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4195      isolate);
4196  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4197      isolate);
4198  ArrayNArgumentsConstructorStub stub(isolate);
4199  stub.GetCode();
4200  ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4201  for (int i = 0; i < 2; i++) {
4202    // For internal arrays we only need a few things.
4203    InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4204    stubh1.GetCode();
4205    InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4206    stubh2.GetCode();
4207  }
4208}
4209
4210
4211void ArrayConstructorStub::GenerateDispatchToArrayStub(
4212    MacroAssembler* masm,
4213    AllocationSiteOverrideMode mode) {
4214  if (argument_count() == ANY) {
4215    Label not_zero_case, not_one_case;
4216    __ And(at, a0, a0);
4217    __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
4218    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4219
4220    __ bind(&not_zero_case);
4221    __ Branch(&not_one_case, gt, a0, Operand(1));
4222    CreateArrayDispatchOneArgument(masm, mode);
4223
4224    __ bind(&not_one_case);
4225    ArrayNArgumentsConstructorStub stub(masm->isolate());
4226    __ TailCallStub(&stub);
4227  } else if (argument_count() == NONE) {
4228    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4229  } else if (argument_count() == ONE) {
4230    CreateArrayDispatchOneArgument(masm, mode);
4231  } else if (argument_count() == MORE_THAN_ONE) {
4232    ArrayNArgumentsConstructorStub stub(masm->isolate());
4233    __ TailCallStub(&stub);
4234  } else {
4235    UNREACHABLE();
4236  }
4237}
4238
4239
4240void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4241  // ----------- S t a t e -------------
4242  //  -- a0 : argc (only if argument_count() == ANY)
4243  //  -- a1 : constructor
4244  //  -- a2 : AllocationSite or undefined
4245  //  -- a3 : new target
4246  //  -- sp[0] : last argument
4247  // -----------------------------------
4248
4249  if (FLAG_debug_code) {
4250    // The array construct code is only set for the global and natives
4251    // builtin Array functions which always have maps.
4252
4253    // Initial map for the builtin Array function should be a map.
4254    __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4255    // Will both indicate a NULL and a Smi.
4256    __ SmiTst(a4, at);
4257    __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
4258        at, Operand(zero_reg));
4259    __ GetObjectType(a4, a4, a5);
4260    __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
4261        a5, Operand(MAP_TYPE));
4262
4263    // We should either have undefined in a2 or a valid AllocationSite
4264    __ AssertUndefinedOrAllocationSite(a2, a4);
4265  }
4266
4267  // Enter the context of the Array function.
4268  __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
4269
4270  Label subclassing;
4271  __ Branch(&subclassing, ne, a1, Operand(a3));
4272
4273  Label no_info;
4274  // Get the elements kind and case on that.
4275  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4276  __ Branch(&no_info, eq, a2, Operand(at));
4277
4278  __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
4279  __ SmiUntag(a3);
4280  STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4281  __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
4282  GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4283
4284  __ bind(&no_info);
4285  GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4286
4287  // Subclassing.
4288  __ bind(&subclassing);
4289  switch (argument_count()) {
4290    case ANY:
4291    case MORE_THAN_ONE:
4292      __ Dlsa(at, sp, a0, kPointerSizeLog2);
4293      __ sd(a1, MemOperand(at));
4294      __ li(at, Operand(3));
4295      __ Daddu(a0, a0, at);
4296      break;
4297    case NONE:
4298      __ sd(a1, MemOperand(sp, 0 * kPointerSize));
4299      __ li(a0, Operand(3));
4300      break;
4301    case ONE:
4302      __ sd(a1, MemOperand(sp, 1 * kPointerSize));
4303      __ li(a0, Operand(4));
4304      break;
4305  }
4306  __ Push(a3, a2);
4307  __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
4308}
4309
4310
4311void InternalArrayConstructorStub::GenerateCase(
4312    MacroAssembler* masm, ElementsKind kind) {
4313
4314  InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4315  __ TailCallStub(&stub0, lo, a0, Operand(1));
4316
4317  ArrayNArgumentsConstructorStub stubN(isolate());
4318  __ TailCallStub(&stubN, hi, a0, Operand(1));
4319
4320  if (IsFastPackedElementsKind(kind)) {
4321    // We might need to create a holey array
4322    // look at the first argument.
4323    __ ld(at, MemOperand(sp, 0));
4324
4325    InternalArraySingleArgumentConstructorStub
4326        stub1_holey(isolate(), GetHoleyElementsKind(kind));
4327    __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
4328  }
4329
4330  InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4331  __ TailCallStub(&stub1);
4332}
4333
4334
4335void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4336  // ----------- S t a t e -------------
4337  //  -- a0 : argc
4338  //  -- a1 : constructor
4339  //  -- sp[0] : return address
4340  //  -- sp[4] : last argument
4341  // -----------------------------------
4342
4343  if (FLAG_debug_code) {
4344    // The array construct code is only set for the global and natives
4345    // builtin Array functions which always have maps.
4346
4347    // Initial map for the builtin Array function should be a map.
4348    __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4349    // Will both indicate a NULL and a Smi.
4350    __ SmiTst(a3, at);
4351    __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
4352        at, Operand(zero_reg));
4353    __ GetObjectType(a3, a3, a4);
4354    __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
4355        a4, Operand(MAP_TYPE));
4356  }
4357
4358  // Figure out the right elements kind.
4359  __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
4360
4361  // Load the map's "bit field 2" into a3. We only need the first byte,
4362  // but the following bit field extraction takes care of that anyway.
4363  __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
4364  // Retrieve elements_kind from bit field 2.
4365  __ DecodeField<Map::ElementsKindBits>(a3);
4366
4367  if (FLAG_debug_code) {
4368    Label done;
4369    __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
4370    __ Assert(
4371        eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
4372        a3, Operand(FAST_HOLEY_ELEMENTS));
4373    __ bind(&done);
4374  }
4375
4376  Label fast_elements_case;
4377  __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
4378  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4379
4380  __ bind(&fast_elements_case);
4381  GenerateCase(masm, FAST_ELEMENTS);
4382}
4383
4384
4385void FastNewObjectStub::Generate(MacroAssembler* masm) {
4386  // ----------- S t a t e -------------
4387  //  -- a1 : target
4388  //  -- a3 : new target
4389  //  -- cp : context
4390  //  -- ra : return address
4391  // -----------------------------------
4392  __ AssertFunction(a1);
4393  __ AssertReceiver(a3);
4394
4395  // Verify that the new target is a JSFunction.
4396  Label new_object;
4397  __ GetObjectType(a3, a2, a2);
4398  __ Branch(&new_object, ne, a2, Operand(JS_FUNCTION_TYPE));
4399
4400  // Load the initial map and verify that it's in fact a map.
4401  __ ld(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
4402  __ JumpIfSmi(a2, &new_object);
4403  __ GetObjectType(a2, a0, a0);
4404  __ Branch(&new_object, ne, a0, Operand(MAP_TYPE));
4405
4406  // Fall back to runtime if the target differs from the new target's
4407  // initial map constructor.
4408  __ ld(a0, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
4409  __ Branch(&new_object, ne, a0, Operand(a1));
4410
4411  // Allocate the JSObject on the heap.
4412  Label allocate, done_allocate;
4413  __ lbu(a4, FieldMemOperand(a2, Map::kInstanceSizeOffset));
4414  __ Allocate(a4, v0, a5, a0, &allocate, SIZE_IN_WORDS);
4415  __ bind(&done_allocate);
4416
4417  // Initialize the JSObject fields.
4418  __ sd(a2, FieldMemOperand(v0, JSObject::kMapOffset));
4419  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
4420  __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
4421  __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
4422  STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
4423  __ Daddu(a1, v0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
4424
4425  // ----------- S t a t e -------------
4426  //  -- v0 : result (tagged)
4427  //  -- a1 : result fields (untagged)
4428  //  -- a5 : result end (untagged)
4429  //  -- a2 : initial map
4430  //  -- cp : context
4431  //  -- ra : return address
4432  // -----------------------------------
4433
4434  // Perform in-object slack tracking if requested.
4435  Label slack_tracking;
4436  STATIC_ASSERT(Map::kNoSlackTracking == 0);
4437  __ lwu(a3, FieldMemOperand(a2, Map::kBitField3Offset));
4438  __ And(at, a3, Operand(Map::ConstructionCounter::kMask));
4439  __ Branch(USE_DELAY_SLOT, &slack_tracking, ne, at, Operand(zero_reg));
4440  __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);  // In delay slot.
4441  {
4442    // Initialize all in-object fields with undefined.
4443    __ InitializeFieldsWithFiller(a1, a5, a0);
4444    __ Ret();
4445  }
4446  __ bind(&slack_tracking);
4447  {
4448    // Decrease generous allocation count.
4449    STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
4450    __ Subu(a3, a3, Operand(1 << Map::ConstructionCounter::kShift));
4451    __ sw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
4452
4453    // Initialize the in-object fields with undefined.
4454    __ lbu(a4, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
4455    __ dsll(a4, a4, kPointerSizeLog2);
4456    __ Dsubu(a4, a5, a4);
4457    __ InitializeFieldsWithFiller(a1, a4, a0);
4458
4459    // Initialize the remaining (reserved) fields with one pointer filler map.
4460    __ LoadRoot(a0, Heap::kOnePointerFillerMapRootIndex);
4461    __ InitializeFieldsWithFiller(a1, a5, a0);
4462
4463    // Check if we can finalize the instance size.
4464    Label finalize;
4465    STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
4466    __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
4467    __ Branch(&finalize, eq, a3, Operand(zero_reg));
4468    __ Ret();
4469
4470    // Finalize the instance size.
4471    __ bind(&finalize);
4472    {
4473      FrameScope scope(masm, StackFrame::INTERNAL);
4474      __ Push(v0, a2);
4475      __ CallRuntime(Runtime::kFinalizeInstanceSize);
4476      __ Pop(v0);
4477    }
4478    __ Ret();
4479  }
4480
4481  // Fall back to %AllocateInNewSpace.
4482  __ bind(&allocate);
4483  {
4484    FrameScope scope(masm, StackFrame::INTERNAL);
4485    STATIC_ASSERT(kSmiTag == 0);
4486    STATIC_ASSERT(kSmiTagSize == 1);
4487    __ dsll(a4, a4, kPointerSizeLog2 + kSmiShiftSize + kSmiTagSize);
4488    __ SmiTag(a4);
4489    __ Push(a2, a4);
4490    __ CallRuntime(Runtime::kAllocateInNewSpace);
4491    __ Pop(a2);
4492  }
4493  __ lbu(a5, FieldMemOperand(a2, Map::kInstanceSizeOffset));
4494  __ Dlsa(a5, v0, a5, kPointerSizeLog2);
4495  STATIC_ASSERT(kHeapObjectTag == 1);
4496  __ Dsubu(a5, a5, Operand(kHeapObjectTag));
4497  __ jmp(&done_allocate);
4498
4499  // Fall back to %NewObject.
4500  __ bind(&new_object);
4501  __ Push(a1, a3);
4502  __ TailCallRuntime(Runtime::kNewObject);
4503}
4504
4505
4506void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
4507  // ----------- S t a t e -------------
4508  //  -- a1 : function
4509  //  -- cp : context
4510  //  -- fp : frame pointer
4511  //  -- ra : return address
4512  // -----------------------------------
4513  __ AssertFunction(a1);
4514
4515  // Make a2 point to the JavaScript frame.
4516  __ mov(a2, fp);
4517  if (skip_stub_frame()) {
4518    // For Ignition we need to skip the handler/stub frame to reach the
4519    // JavaScript frame for the function.
4520    __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
4521  }
4522  if (FLAG_debug_code) {
4523    Label ok;
4524    __ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
4525    __ Branch(&ok, eq, a1, Operand(a3));
4526    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4527    __ bind(&ok);
4528  }
4529
4530  // Check if we have rest parameters (only possible if we have an
4531  // arguments adaptor frame below the function frame).
4532  Label no_rest_parameters;
4533  __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
4534  __ ld(a3, MemOperand(a2, CommonFrameConstants::kContextOrFrameTypeOffset));
4535  __ Branch(&no_rest_parameters, ne, a3,
4536            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4537
4538  // Check if the arguments adaptor frame contains more arguments than
4539  // specified by the function's internal formal parameter count.
4540  Label rest_parameters;
4541  __ SmiLoadUntag(
4542      a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4543  __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4544  __ lw(a3,
4545        FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
4546  __ Dsubu(a0, a0, Operand(a3));
4547  __ Branch(&rest_parameters, gt, a0, Operand(zero_reg));
4548
4549  // Return an empty rest parameter array.
4550  __ bind(&no_rest_parameters);
4551  {
4552    // ----------- S t a t e -------------
4553    //  -- cp : context
4554    //  -- ra : return address
4555    // -----------------------------------
4556
4557    // Allocate an empty rest parameter array.
4558    Label allocate, done_allocate;
4559    __ Allocate(JSArray::kSize, v0, a0, a1, &allocate, NO_ALLOCATION_FLAGS);
4560    __ bind(&done_allocate);
4561
4562    // Setup the rest parameter array in v0.
4563    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, a1);
4564    __ sd(a1, FieldMemOperand(v0, JSArray::kMapOffset));
4565    __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
4566    __ sd(a1, FieldMemOperand(v0, JSArray::kPropertiesOffset));
4567    __ sd(a1, FieldMemOperand(v0, JSArray::kElementsOffset));
4568    __ Move(a1, Smi::FromInt(0));
4569    __ Ret(USE_DELAY_SLOT);
4570    __ sd(a1, FieldMemOperand(v0, JSArray::kLengthOffset));  // In delay slot
4571    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
4572
4573    // Fall back to %AllocateInNewSpace.
4574    __ bind(&allocate);
4575    {
4576      FrameScope scope(masm, StackFrame::INTERNAL);
4577      __ Push(Smi::FromInt(JSArray::kSize));
4578      __ CallRuntime(Runtime::kAllocateInNewSpace);
4579    }
4580    __ jmp(&done_allocate);
4581  }
4582
4583  __ bind(&rest_parameters);
4584  {
4585    // Compute the pointer to the first rest parameter (skippping the receiver).
4586    __ Dlsa(a2, a2, a0, kPointerSizeLog2);
4587    __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
4588                             1 * kPointerSize));
4589
4590    // ----------- S t a t e -------------
4591    //  -- cp : context
4592    //  -- a0 : number of rest parameters
4593    //  -- a1 : function
4594    //  -- a2 : pointer to first rest parameters
4595    //  -- ra : return address
4596    // -----------------------------------
4597
4598    // Allocate space for the rest parameter array plus the backing store.
4599    Label allocate, done_allocate;
4600    __ li(a5, Operand(JSArray::kSize + FixedArray::kHeaderSize));
4601    __ Dlsa(a5, a5, a0, kPointerSizeLog2);
4602    __ Allocate(a5, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
4603    __ bind(&done_allocate);
4604
4605    // Compute arguments.length in a4.
4606    __ SmiTag(a4, a0);
4607
4608    // Setup the elements array in v0.
4609    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4610    __ sd(at, FieldMemOperand(v0, FixedArray::kMapOffset));
4611    __ sd(a4, FieldMemOperand(v0, FixedArray::kLengthOffset));
4612    __ Daddu(a3, v0, Operand(FixedArray::kHeaderSize));
4613    {
4614      Label loop, done_loop;
4615      __ Dlsa(a1, a3, a0, kPointerSizeLog2);
4616      __ bind(&loop);
4617      __ Branch(&done_loop, eq, a1, Operand(a3));
4618      __ ld(at, MemOperand(a2, 0 * kPointerSize));
4619      __ sd(at, FieldMemOperand(a3, 0 * kPointerSize));
4620      __ Dsubu(a2, a2, Operand(1 * kPointerSize));
4621      __ Daddu(a3, a3, Operand(1 * kPointerSize));
4622      __ Branch(&loop);
4623      __ bind(&done_loop);
4624    }
4625
4626    // Setup the rest parameter array in a3.
4627    __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, at);
4628    __ sd(at, FieldMemOperand(a3, JSArray::kMapOffset));
4629    __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
4630    __ sd(at, FieldMemOperand(a3, JSArray::kPropertiesOffset));
4631    __ sd(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
4632    __ sd(a4, FieldMemOperand(a3, JSArray::kLengthOffset));
4633    STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
4634    __ Ret(USE_DELAY_SLOT);
4635    __ mov(v0, a3);  // In delay slot
4636
4637    // Fall back to %AllocateInNewSpace (if not too big).
4638    Label too_big_for_new_space;
4639    __ bind(&allocate);
4640    __ Branch(&too_big_for_new_space, gt, a5,
4641              Operand(Page::kMaxRegularHeapObjectSize));
4642    {
4643      FrameScope scope(masm, StackFrame::INTERNAL);
4644      __ SmiTag(a0);
4645      __ SmiTag(a5);
4646      __ Push(a0, a2, a5);
4647      __ CallRuntime(Runtime::kAllocateInNewSpace);
4648      __ Pop(a0, a2);
4649      __ SmiUntag(a0);
4650    }
4651    __ jmp(&done_allocate);
4652
4653    // Fall back to %NewStrictArguments.
4654    __ bind(&too_big_for_new_space);
4655    __ Push(a1);
4656    __ TailCallRuntime(Runtime::kNewStrictArguments);
4657  }
4658}
4659
4660
4661void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
4662  // ----------- S t a t e -------------
4663  //  -- a1 : function
4664  //  -- cp : context
4665  //  -- fp : frame pointer
4666  //  -- ra : return address
4667  // -----------------------------------
4668  __ AssertFunction(a1);
4669
4670  // Make t0 point to the JavaScript frame.
4671  __ mov(t0, fp);
4672  if (skip_stub_frame()) {
4673    // For Ignition we need to skip the handler/stub frame to reach the
4674    // JavaScript frame for the function.
4675    __ ld(t0, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
4676  }
4677  if (FLAG_debug_code) {
4678    Label ok;
4679    __ ld(a3, MemOperand(t0, StandardFrameConstants::kFunctionOffset));
4680    __ Branch(&ok, eq, a1, Operand(a3));
4681    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4682    __ bind(&ok);
4683  }
4684
4685  // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
4686  __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4687  __ lw(a2,
4688         FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
4689  __ Lsa(a3, t0, a2, kPointerSizeLog2);
4690  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4691  __ SmiTag(a2);
4692
4693  // a1 : function
4694  // a2 : number of parameters (tagged)
4695  // a3 : parameters pointer
4696  // t0 : Javascript frame pointer
4697  // Registers used over whole function:
4698  //  a5 : arguments count (tagged)
4699  //  a6 : mapped parameter count (tagged)
4700
4701  // Check if the calling frame is an arguments adaptor frame.
4702  Label adaptor_frame, try_allocate, runtime;
4703  __ ld(a4, MemOperand(t0, StandardFrameConstants::kCallerFPOffset));
4704  __ ld(a0, MemOperand(a4, CommonFrameConstants::kContextOrFrameTypeOffset));
4705  __ Branch(&adaptor_frame, eq, a0,
4706            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4707
4708  // No adaptor, parameter count = argument count.
4709  __ mov(a5, a2);
4710  __ Branch(USE_DELAY_SLOT, &try_allocate);
4711  __ mov(a6, a2);  // In delay slot.
4712
4713  // We have an adaptor frame. Patch the parameters pointer.
4714  __ bind(&adaptor_frame);
4715  __ ld(a5, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
4716  __ SmiScale(t2, a5, kPointerSizeLog2);
4717  __ Daddu(a4, a4, Operand(t2));
4718  __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
4719
4720  // a5 = argument count (tagged)
4721  // a6 = parameter count (tagged)
4722  // Compute the mapped parameter count = min(a6, a5) in a6.
4723  __ mov(a6, a2);
4724  __ Branch(&try_allocate, le, a6, Operand(a5));
4725  __ mov(a6, a5);
4726
4727  __ bind(&try_allocate);
4728
4729  // Compute the sizes of backing store, parameter map, and arguments object.
4730  // 1. Parameter map, has 2 extra words containing context and backing store.
4731  const int kParameterMapHeaderSize =
4732      FixedArray::kHeaderSize + 2 * kPointerSize;
4733  // If there are no mapped parameters, we do not need the parameter_map.
4734  Label param_map_size;
4735  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
4736  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a6, Operand(zero_reg));
4737  __ mov(t1, zero_reg);  // In delay slot: param map size = 0 when a6 == 0.
4738  __ SmiScale(t1, a6, kPointerSizeLog2);
4739  __ daddiu(t1, t1, kParameterMapHeaderSize);
4740  __ bind(&param_map_size);
4741
4742  // 2. Backing store.
4743  __ SmiScale(t2, a5, kPointerSizeLog2);
4744  __ Daddu(t1, t1, Operand(t2));
4745  __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
4746
4747  // 3. Arguments object.
4748  __ Daddu(t1, t1, Operand(JSSloppyArgumentsObject::kSize));
4749
4750  // Do the allocation of all three objects in one go.
4751  __ Allocate(t1, v0, t1, a4, &runtime, NO_ALLOCATION_FLAGS);
4752
4753  // v0 = address of new object(s) (tagged)
4754  // a2 = argument count (smi-tagged)
4755  // Get the arguments boilerplate from the current native context into a4.
4756  const int kNormalOffset =
4757      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
4758  const int kAliasedOffset =
4759      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
4760
4761  __ ld(a4, NativeContextMemOperand());
4762  Label skip2_ne, skip2_eq;
4763  __ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
4764  __ ld(a4, MemOperand(a4, kNormalOffset));
4765  __ bind(&skip2_ne);
4766
4767  __ Branch(&skip2_eq, eq, a6, Operand(zero_reg));
4768  __ ld(a4, MemOperand(a4, kAliasedOffset));
4769  __ bind(&skip2_eq);
4770
4771  // v0 = address of new object (tagged)
4772  // a2 = argument count (smi-tagged)
4773  // a4 = address of arguments map (tagged)
4774  // a6 = mapped parameter count (tagged)
4775  __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
4776  __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
4777  __ sd(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
4778  __ sd(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
4779
4780  // Set up the callee in-object property.
4781  __ AssertNotSmi(a1);
4782  __ sd(a1, FieldMemOperand(v0, JSSloppyArgumentsObject::kCalleeOffset));
4783
4784  // Use the length (smi tagged) and set that as an in-object property too.
4785  __ AssertSmi(a5);
4786  __ sd(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
4787
4788  // Set up the elements pointer in the allocated arguments object.
4789  // If we allocated a parameter map, a4 will point there, otherwise
4790  // it will point to the backing store.
4791  __ Daddu(a4, v0, Operand(JSSloppyArgumentsObject::kSize));
4792  __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
4793
4794  // v0 = address of new object (tagged)
4795  // a2 = argument count (tagged)
4796  // a4 = address of parameter map or backing store (tagged)
4797  // a6 = mapped parameter count (tagged)
4798  // Initialize parameter map. If there are no mapped arguments, we're done.
4799  Label skip_parameter_map;
4800  Label skip3;
4801  __ Branch(&skip3, ne, a6, Operand(Smi::FromInt(0)));
4802  // Move backing store address to a1, because it is
4803  // expected there when filling in the unmapped arguments.
4804  __ mov(a1, a4);
4805  __ bind(&skip3);
4806
4807  __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::FromInt(0)));
4808
4809  __ LoadRoot(a5, Heap::kSloppyArgumentsElementsMapRootIndex);
4810  __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
4811  __ Daddu(a5, a6, Operand(Smi::FromInt(2)));
4812  __ sd(a5, FieldMemOperand(a4, FixedArray::kLengthOffset));
4813  __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
4814  __ SmiScale(t2, a6, kPointerSizeLog2);
4815  __ Daddu(a5, a4, Operand(t2));
4816  __ Daddu(a5, a5, Operand(kParameterMapHeaderSize));
4817  __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
4818
4819  // Copy the parameter slots and the holes in the arguments.
4820  // We need to fill in mapped_parameter_count slots. They index the context,
4821  // where parameters are stored in reverse order, at
4822  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4823  // The mapped parameter thus need to get indices
4824  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
4825  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4826  // We loop from right to left.
4827  Label parameters_loop, parameters_test;
4828  __ mov(a5, a6);
4829  __ Daddu(t1, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4830  __ Dsubu(t1, t1, Operand(a6));
4831  __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
4832  __ SmiScale(t2, a5, kPointerSizeLog2);
4833  __ Daddu(a1, a4, Operand(t2));
4834  __ Daddu(a1, a1, Operand(kParameterMapHeaderSize));
4835
4836  // a1 = address of backing store (tagged)
4837  // a4 = address of parameter map (tagged)
4838  // a0 = temporary scratch (a.o., for address calculation)
4839  // t1 = loop variable (tagged)
4840  // a7 = the hole value
4841  __ jmp(&parameters_test);
4842
4843  __ bind(&parameters_loop);
4844  __ Dsubu(a5, a5, Operand(Smi::FromInt(1)));
4845  __ SmiScale(a0, a5, kPointerSizeLog2);
4846  __ Daddu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4847  __ Daddu(t2, a4, a0);
4848  __ sd(t1, MemOperand(t2));
4849  __ Dsubu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4850  __ Daddu(t2, a1, a0);
4851  __ sd(a7, MemOperand(t2));
4852  __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
4853  __ bind(&parameters_test);
4854  __ Branch(&parameters_loop, ne, a5, Operand(Smi::FromInt(0)));
4855
4856  // Restore t1 = argument count (tagged).
4857  __ ld(a5, FieldMemOperand(v0, JSSloppyArgumentsObject::kLengthOffset));
4858
4859  __ bind(&skip_parameter_map);
4860  // v0 = address of new object (tagged)
4861  // a1 = address of backing store (tagged)
4862  // a5 = argument count (tagged)
4863  // a6 = mapped parameter count (tagged)
4864  // t1 = scratch
4865  // Copy arguments header and remaining slots (if there are any).
4866  __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
4867  __ sd(t1, FieldMemOperand(a1, FixedArray::kMapOffset));
4868  __ sd(a5, FieldMemOperand(a1, FixedArray::kLengthOffset));
4869
4870  Label arguments_loop, arguments_test;
4871  __ SmiScale(t2, a6, kPointerSizeLog2);
4872  __ Dsubu(a3, a3, Operand(t2));
4873  __ jmp(&arguments_test);
4874
4875  __ bind(&arguments_loop);
4876  __ Dsubu(a3, a3, Operand(kPointerSize));
4877  __ ld(a4, MemOperand(a3, 0));
4878  __ SmiScale(t2, a6, kPointerSizeLog2);
4879  __ Daddu(t1, a1, Operand(t2));
4880  __ sd(a4, FieldMemOperand(t1, FixedArray::kHeaderSize));
4881  __ Daddu(a6, a6, Operand(Smi::FromInt(1)));
4882
4883  __ bind(&arguments_test);
4884  __ Branch(&arguments_loop, lt, a6, Operand(a5));
4885
4886  // Return.
4887  __ Ret();
4888
4889  // Do the runtime call to allocate the arguments object.
4890  // a5 = argument count (tagged)
4891  __ bind(&runtime);
4892  __ Push(a1, a3, a5);
4893  __ TailCallRuntime(Runtime::kNewSloppyArguments);
4894}
4895
4896
4897void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
4898  // ----------- S t a t e -------------
4899  //  -- a1 : function
4900  //  -- cp : context
4901  //  -- fp : frame pointer
4902  //  -- ra : return address
4903  // -----------------------------------
4904  __ AssertFunction(a1);
4905
4906  // Make a2 point to the JavaScript frame.
4907  __ mov(a2, fp);
4908  if (skip_stub_frame()) {
4909    // For Ignition we need to skip the handler/stub frame to reach the
4910    // JavaScript frame for the function.
4911    __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
4912  }
4913  if (FLAG_debug_code) {
4914    Label ok;
4915    __ ld(a3, MemOperand(a2, StandardFrameConstants::kFunctionOffset));
4916    __ Branch(&ok, eq, a1, Operand(a3));
4917    __ Abort(kInvalidFrameForFastNewRestArgumentsStub);
4918    __ bind(&ok);
4919  }
4920
4921  // Check if we have an arguments adaptor frame below the function frame.
4922  Label arguments_adaptor, arguments_done;
4923  __ ld(a3, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
4924  __ ld(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
4925  __ Branch(&arguments_adaptor, eq, a0,
4926            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4927  {
4928    __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
4929    __ lw(a0,
4930          FieldMemOperand(a4, SharedFunctionInfo::kFormalParameterCountOffset));
4931    __ Dlsa(a2, a2, a0, kPointerSizeLog2);
4932    __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
4933                             1 * kPointerSize));
4934  }
4935  __ Branch(&arguments_done);
4936  __ bind(&arguments_adaptor);
4937  {
4938    __ SmiLoadUntag(
4939        a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4940    __ Dlsa(a2, a3, a0, kPointerSizeLog2);
4941    __ Daddu(a2, a2, Operand(StandardFrameConstants::kCallerSPOffset -
4942                             1 * kPointerSize));
4943  }
4944  __ bind(&arguments_done);
4945
4946  // ----------- S t a t e -------------
4947  //  -- cp : context
4948  //  -- a0 : number of rest parameters
4949  //  -- a1 : function
4950  //  -- a2 : pointer to first rest parameters
4951  //  -- ra : return address
4952  // -----------------------------------
4953
4954  // Allocate space for the rest parameter array plus the backing store.
4955  Label allocate, done_allocate;
4956  __ li(a5, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
4957  __ Dlsa(a5, a5, a0, kPointerSizeLog2);
4958  __ Allocate(a5, v0, a3, a4, &allocate, NO_ALLOCATION_FLAGS);
4959  __ bind(&done_allocate);
4960
4961  // Compute arguments.length in a4.
4962  __ SmiTag(a4, a0);
4963
4964  // Setup the elements array in v0.
4965  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4966  __ sd(at, FieldMemOperand(v0, FixedArray::kMapOffset));
4967  __ sd(a4, FieldMemOperand(v0, FixedArray::kLengthOffset));
4968  __ Daddu(a3, v0, Operand(FixedArray::kHeaderSize));
4969  {
4970    Label loop, done_loop;
4971    __ Dlsa(a1, a3, a0, kPointerSizeLog2);
4972    __ bind(&loop);
4973    __ Branch(&done_loop, eq, a1, Operand(a3));
4974    __ ld(at, MemOperand(a2, 0 * kPointerSize));
4975    __ sd(at, FieldMemOperand(a3, 0 * kPointerSize));
4976    __ Dsubu(a2, a2, Operand(1 * kPointerSize));
4977    __ Daddu(a3, a3, Operand(1 * kPointerSize));
4978    __ Branch(&loop);
4979    __ bind(&done_loop);
4980  }
4981
4982  // Setup the strict arguments object in a3.
4983  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, at);
4984  __ sd(at, FieldMemOperand(a3, JSStrictArgumentsObject::kMapOffset));
4985  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
4986  __ sd(at, FieldMemOperand(a3, JSStrictArgumentsObject::kPropertiesOffset));
4987  __ sd(v0, FieldMemOperand(a3, JSStrictArgumentsObject::kElementsOffset));
4988  __ sd(a4, FieldMemOperand(a3, JSStrictArgumentsObject::kLengthOffset));
4989  STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
4990  __ Ret(USE_DELAY_SLOT);
4991  __ mov(v0, a3);  // In delay slot
4992
4993  // Fall back to %AllocateInNewSpace (if not too big).
4994  Label too_big_for_new_space;
4995  __ bind(&allocate);
4996  __ Branch(&too_big_for_new_space, gt, a5,
4997            Operand(Page::kMaxRegularHeapObjectSize));
4998  {
4999    FrameScope scope(masm, StackFrame::INTERNAL);
5000    __ SmiTag(a0);
5001    __ SmiTag(a5);
5002    __ Push(a0, a2, a5);
5003    __ CallRuntime(Runtime::kAllocateInNewSpace);
5004    __ Pop(a0, a2);
5005    __ SmiUntag(a0);
5006  }
5007  __ jmp(&done_allocate);
5008
5009  // Fall back to %NewStrictArguments.
5010  __ bind(&too_big_for_new_space);
5011  __ Push(a1);
5012  __ TailCallRuntime(Runtime::kNewStrictArguments);
5013}
5014
5015
5016void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
5017  Register context_reg = cp;
5018  Register slot_reg = a2;
5019  Register value_reg = a0;
5020  Register cell_reg = a4;
5021  Register cell_value_reg = a5;
5022  Register cell_details_reg = a6;
5023  Label fast_heapobject_case, fast_smi_case, slow_case;
5024
5025  if (FLAG_debug_code) {
5026    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5027    __ Check(ne, kUnexpectedValue, value_reg, Operand(at));
5028  }
5029
5030  // Go up context chain to the script context.
5031  for (int i = 0; i < depth(); ++i) {
5032    __ ld(cell_reg, ContextMemOperand(context_reg, Context::PREVIOUS_INDEX));
5033    context_reg = cell_reg;
5034  }
5035
5036  // Load the PropertyCell at the specified slot.
5037  __ Dlsa(at, context_reg, slot_reg, kPointerSizeLog2);
5038  __ ld(cell_reg, ContextMemOperand(at, 0));
5039
5040  // Load PropertyDetails for the cell (actually only the cell_type and kind).
5041  __ ld(cell_details_reg,
5042        FieldMemOperand(cell_reg, PropertyCell::kDetailsOffset));
5043  __ SmiUntag(cell_details_reg);
5044  __ And(cell_details_reg, cell_details_reg,
5045         PropertyDetails::PropertyCellTypeField::kMask |
5046             PropertyDetails::KindField::kMask |
5047             PropertyDetails::kAttributesReadOnlyMask);
5048
5049  // Check if PropertyCell holds mutable data.
5050  Label not_mutable_data;
5051  __ Branch(&not_mutable_data, ne, cell_details_reg,
5052            Operand(PropertyDetails::PropertyCellTypeField::encode(
5053                        PropertyCellType::kMutable) |
5054                    PropertyDetails::KindField::encode(kData)));
5055  __ JumpIfSmi(value_reg, &fast_smi_case);
5056  __ bind(&fast_heapobject_case);
5057  __ sd(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
5058  __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
5059                      cell_details_reg, kRAHasNotBeenSaved, kDontSaveFPRegs,
5060                      EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
5061  // RecordWriteField clobbers the value register, so we need to reload.
5062  __ Ret(USE_DELAY_SLOT);
5063  __ ld(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
5064  __ bind(&not_mutable_data);
5065
5066  // Check if PropertyCell value matches the new value (relevant for Constant,
5067  // ConstantType and Undefined cells).
5068  Label not_same_value;
5069  __ ld(cell_value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
5070  __ Branch(&not_same_value, ne, value_reg, Operand(cell_value_reg));
5071  // Make sure the PropertyCell is not marked READ_ONLY.
5072  __ And(at, cell_details_reg, PropertyDetails::kAttributesReadOnlyMask);
5073  __ Branch(&slow_case, ne, at, Operand(zero_reg));
5074  if (FLAG_debug_code) {
5075    Label done;
5076    // This can only be true for Constant, ConstantType and Undefined cells,
5077    // because we never store the_hole via this stub.
5078    __ Branch(&done, eq, cell_details_reg,
5079              Operand(PropertyDetails::PropertyCellTypeField::encode(
5080                          PropertyCellType::kConstant) |
5081                      PropertyDetails::KindField::encode(kData)));
5082    __ Branch(&done, eq, cell_details_reg,
5083              Operand(PropertyDetails::PropertyCellTypeField::encode(
5084                          PropertyCellType::kConstantType) |
5085                      PropertyDetails::KindField::encode(kData)));
5086    __ Check(eq, kUnexpectedValue, cell_details_reg,
5087             Operand(PropertyDetails::PropertyCellTypeField::encode(
5088                         PropertyCellType::kUndefined) |
5089                     PropertyDetails::KindField::encode(kData)));
5090    __ bind(&done);
5091  }
5092  __ Ret();
5093  __ bind(&not_same_value);
5094
5095  // Check if PropertyCell contains data with constant type (and is not
5096  // READ_ONLY).
5097  __ Branch(&slow_case, ne, cell_details_reg,
5098            Operand(PropertyDetails::PropertyCellTypeField::encode(
5099                        PropertyCellType::kConstantType) |
5100                    PropertyDetails::KindField::encode(kData)));
5101
5102  // Now either both old and new values must be SMIs or both must be heap
5103  // objects with same map.
5104  Label value_is_heap_object;
5105  __ JumpIfNotSmi(value_reg, &value_is_heap_object);
5106  __ JumpIfNotSmi(cell_value_reg, &slow_case);
5107  // Old and new values are SMIs, no need for a write barrier here.
5108  __ bind(&fast_smi_case);
5109  __ Ret(USE_DELAY_SLOT);
5110  __ sd(value_reg, FieldMemOperand(cell_reg, PropertyCell::kValueOffset));
5111  __ bind(&value_is_heap_object);
5112  __ JumpIfSmi(cell_value_reg, &slow_case);
5113  Register cell_value_map_reg = cell_value_reg;
5114  __ ld(cell_value_map_reg,
5115        FieldMemOperand(cell_value_reg, HeapObject::kMapOffset));
5116  __ Branch(&fast_heapobject_case, eq, cell_value_map_reg,
5117            FieldMemOperand(value_reg, HeapObject::kMapOffset));
5118
5119  // Fallback to the runtime.
5120  __ bind(&slow_case);
5121  __ SmiTag(slot_reg);
5122  __ Push(slot_reg, value_reg);
5123  __ TailCallRuntime(is_strict(language_mode())
5124                         ? Runtime::kStoreGlobalViaContext_Strict
5125                         : Runtime::kStoreGlobalViaContext_Sloppy);
5126}
5127
5128
5129static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
5130  int64_t offset = (ref0.address() - ref1.address());
5131  DCHECK(static_cast<int>(offset) == offset);
5132  return static_cast<int>(offset);
5133}
5134
5135
5136// Calls an API function.  Allocates HandleScope, extracts returned value
5137// from handle and propagates exceptions.  Restores context.  stack_space
5138// - space to be unwound on exit (includes the call JS arguments space and
5139// the additional space allocated for the fast call).
5140static void CallApiFunctionAndReturn(
5141    MacroAssembler* masm, Register function_address,
5142    ExternalReference thunk_ref, int stack_space, int32_t stack_space_offset,
5143    MemOperand return_value_operand, MemOperand* context_restore_operand) {
5144  Isolate* isolate = masm->isolate();
5145  ExternalReference next_address =
5146      ExternalReference::handle_scope_next_address(isolate);
5147  const int kNextOffset = 0;
5148  const int kLimitOffset = AddressOffset(
5149      ExternalReference::handle_scope_limit_address(isolate), next_address);
5150  const int kLevelOffset = AddressOffset(
5151      ExternalReference::handle_scope_level_address(isolate), next_address);
5152
5153  DCHECK(function_address.is(a1) || function_address.is(a2));
5154
5155  Label profiler_disabled;
5156  Label end_profiler_check;
5157  __ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
5158  __ lb(t9, MemOperand(t9, 0));
5159  __ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
5160
5161  // Additional parameter is the address of the actual callback.
5162  __ li(t9, Operand(thunk_ref));
5163  __ jmp(&end_profiler_check);
5164
5165  __ bind(&profiler_disabled);
5166  __ mov(t9, function_address);
5167  __ bind(&end_profiler_check);
5168
5169  // Allocate HandleScope in callee-save registers.
5170  __ li(s3, Operand(next_address));
5171  __ ld(s0, MemOperand(s3, kNextOffset));
5172  __ ld(s1, MemOperand(s3, kLimitOffset));
5173  __ lw(s2, MemOperand(s3, kLevelOffset));
5174  __ Addu(s2, s2, Operand(1));
5175  __ sw(s2, MemOperand(s3, kLevelOffset));
5176
5177  if (FLAG_log_timer_events) {
5178    FrameScope frame(masm, StackFrame::MANUAL);
5179    __ PushSafepointRegisters();
5180    __ PrepareCallCFunction(1, a0);
5181    __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
5182    __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
5183                     1);
5184    __ PopSafepointRegisters();
5185  }
5186
5187  // Native call returns to the DirectCEntry stub which redirects to the
5188  // return address pushed on stack (could have moved after GC).
5189  // DirectCEntry stub itself is generated early and never moves.
5190  DirectCEntryStub stub(isolate);
5191  stub.GenerateCall(masm, t9);
5192
5193  if (FLAG_log_timer_events) {
5194    FrameScope frame(masm, StackFrame::MANUAL);
5195    __ PushSafepointRegisters();
5196    __ PrepareCallCFunction(1, a0);
5197    __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
5198    __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
5199                     1);
5200    __ PopSafepointRegisters();
5201  }
5202
5203  Label promote_scheduled_exception;
5204  Label delete_allocated_handles;
5205  Label leave_exit_frame;
5206  Label return_value_loaded;
5207
5208  // Load value from ReturnValue.
5209  __ ld(v0, return_value_operand);
5210  __ bind(&return_value_loaded);
5211
5212  // No more valid handles (the result handle was the last one). Restore
5213  // previous handle scope.
5214  __ sd(s0, MemOperand(s3, kNextOffset));
5215  if (__ emit_debug_code()) {
5216    __ lw(a1, MemOperand(s3, kLevelOffset));
5217    __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
5218  }
5219  __ Subu(s2, s2, Operand(1));
5220  __ sw(s2, MemOperand(s3, kLevelOffset));
5221  __ ld(at, MemOperand(s3, kLimitOffset));
5222  __ Branch(&delete_allocated_handles, ne, s1, Operand(at));
5223
5224  // Leave the API exit frame.
5225  __ bind(&leave_exit_frame);
5226
5227  bool restore_context = context_restore_operand != NULL;
5228  if (restore_context) {
5229    __ ld(cp, *context_restore_operand);
5230  }
5231  if (stack_space_offset != kInvalidStackOffset) {
5232    DCHECK(kCArgsSlotsSize == 0);
5233    __ ld(s0, MemOperand(sp, stack_space_offset));
5234  } else {
5235    __ li(s0, Operand(stack_space));
5236  }
5237  __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN,
5238                    stack_space_offset != kInvalidStackOffset);
5239
5240  // Check if the function scheduled an exception.
5241  __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
5242  __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
5243  __ ld(a5, MemOperand(at));
5244  __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
5245
5246  __ Ret();
5247
5248  // Re-throw by promoting a scheduled exception.
5249  __ bind(&promote_scheduled_exception);
5250  __ TailCallRuntime(Runtime::kPromoteScheduledException);
5251
5252  // HandleScope limit has changed. Delete allocated extensions.
5253  __ bind(&delete_allocated_handles);
5254  __ sd(s1, MemOperand(s3, kLimitOffset));
5255  __ mov(s0, v0);
5256  __ mov(a0, v0);
5257  __ PrepareCallCFunction(1, s1);
5258  __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
5259  __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
5260                   1);
5261  __ mov(v0, s0);
5262  __ jmp(&leave_exit_frame);
5263}
5264
5265void CallApiCallbackStub::Generate(MacroAssembler* masm) {
5266  // ----------- S t a t e -------------
5267  //  -- a0                  : callee
5268  //  -- a4                  : call_data
5269  //  -- a2                  : holder
5270  //  -- a1                  : api_function_address
5271  //  -- cp                  : context
5272  //  --
5273  //  -- sp[0]               : last argument
5274  //  -- ...
5275  //  -- sp[(argc - 1)* 8]   : first argument
5276  //  -- sp[argc * 8]        : receiver
5277  // -----------------------------------
5278
5279  Register callee = a0;
5280  Register call_data = a4;
5281  Register holder = a2;
5282  Register api_function_address = a1;
5283  Register context = cp;
5284
5285  typedef FunctionCallbackArguments FCA;
5286
5287  STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5288  STATIC_ASSERT(FCA::kCalleeIndex == 5);
5289  STATIC_ASSERT(FCA::kDataIndex == 4);
5290  STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5291  STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5292  STATIC_ASSERT(FCA::kIsolateIndex == 1);
5293  STATIC_ASSERT(FCA::kHolderIndex == 0);
5294  STATIC_ASSERT(FCA::kNewTargetIndex == 7);
5295  STATIC_ASSERT(FCA::kArgsLength == 8);
5296
5297  // new target
5298  __ PushRoot(Heap::kUndefinedValueRootIndex);
5299
5300  // Save context, callee and call data.
5301  __ Push(context, callee, call_data);
5302  if (!is_lazy()) {
5303    // Load context from callee.
5304    __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5305  }
5306
5307  Register scratch = call_data;
5308  if (!call_data_undefined()) {
5309    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5310  }
5311  // Push return value and default return value.
5312  __ Push(scratch, scratch);
5313  __ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
5314  // Push isolate and holder.
5315  __ Push(scratch, holder);
5316
5317  // Prepare arguments.
5318  __ mov(scratch, sp);
5319
5320  // Allocate the v8::Arguments structure in the arguments' space since
5321  // it's not controlled by GC.
5322  const int kApiStackSpace = 3;
5323
5324  FrameScope frame_scope(masm, StackFrame::MANUAL);
5325  __ EnterExitFrame(false, kApiStackSpace);
5326
5327  DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
5328  // a0 = FunctionCallbackInfo&
5329  // Arguments is after the return address.
5330  __ Daddu(a0, sp, Operand(1 * kPointerSize));
5331  // FunctionCallbackInfo::implicit_args_
5332  __ sd(scratch, MemOperand(a0, 0 * kPointerSize));
5333  // FunctionCallbackInfo::values_
5334  __ Daddu(at, scratch,
5335           Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
5336  __ sd(at, MemOperand(a0, 1 * kPointerSize));
5337  // FunctionCallbackInfo::length_ = argc
5338  // Stored as int field, 32-bit integers within struct on stack always left
5339  // justified by n64 ABI.
5340  __ li(at, Operand(argc()));
5341  __ sw(at, MemOperand(a0, 2 * kPointerSize));
5342
5343  ExternalReference thunk_ref =
5344      ExternalReference::invoke_function_callback(masm->isolate());
5345
5346  AllowExternalCallThatCantCauseGC scope(masm);
5347  MemOperand context_restore_operand(
5348      fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5349  // Stores return the first js argument.
5350  int return_value_offset = 0;
5351  if (is_store()) {
5352    return_value_offset = 2 + FCA::kArgsLength;
5353  } else {
5354    return_value_offset = 2 + FCA::kReturnValueOffset;
5355  }
5356  MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5357  int stack_space = 0;
5358  int32_t stack_space_offset = 3 * kPointerSize;
5359  stack_space = argc() + FCA::kArgsLength + 1;
5360  // TODO(adamk): Why are we clobbering this immediately?
5361  stack_space_offset = kInvalidStackOffset;
5362  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
5363                           stack_space_offset, return_value_operand,
5364                           &context_restore_operand);
5365}
5366
5367
5368void CallApiGetterStub::Generate(MacroAssembler* masm) {
5369  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
5370  // name below the exit frame to make GC aware of them.
5371  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
5372  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
5373  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
5374  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
5375  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
5376  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
5377  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
5378  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
5379
5380  Register receiver = ApiGetterDescriptor::ReceiverRegister();
5381  Register holder = ApiGetterDescriptor::HolderRegister();
5382  Register callback = ApiGetterDescriptor::CallbackRegister();
5383  Register scratch = a4;
5384  DCHECK(!AreAliased(receiver, holder, callback, scratch));
5385
5386  Register api_function_address = a2;
5387
5388  // Here and below +1 is for name() pushed after the args_ array.
5389  typedef PropertyCallbackArguments PCA;
5390  __ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
5391  __ sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
5392  __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
5393  __ sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
5394  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5395  __ sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
5396  __ sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
5397                                    kPointerSize));
5398  __ li(scratch, Operand(ExternalReference::isolate_address(isolate())));
5399  __ sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
5400  __ sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
5401  // should_throw_on_error -> false
5402  DCHECK(Smi::FromInt(0) == nullptr);
5403  __ sd(zero_reg,
5404        MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
5405  __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
5406  __ sd(scratch, MemOperand(sp, 0 * kPointerSize));
5407
5408  // v8::PropertyCallbackInfo::args_ array and name handle.
5409  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5410
5411  // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
5412  __ mov(a0, sp);                               // a0 = Handle<Name>
5413  __ Daddu(a1, a0, Operand(1 * kPointerSize));  // a1 = v8::PCI::args_
5414
5415  const int kApiStackSpace = 1;
5416  FrameScope frame_scope(masm, StackFrame::MANUAL);
5417  __ EnterExitFrame(false, kApiStackSpace);
5418
5419  // Create v8::PropertyCallbackInfo object on the stack and initialize
5420  // it's args_ field.
5421  __ sd(a1, MemOperand(sp, 1 * kPointerSize));
5422  __ Daddu(a1, sp, Operand(1 * kPointerSize));
5423  // a1 = v8::PropertyCallbackInfo&
5424
5425  ExternalReference thunk_ref =
5426      ExternalReference::invoke_accessor_getter_callback(isolate());
5427
5428  __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
5429  __ ld(api_function_address,
5430        FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
5431
5432  // +3 is to skip prolog, return address and name handle.
5433  MemOperand return_value_operand(
5434      fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
5435  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
5436                           kStackUnwindSpace, kInvalidStackOffset,
5437                           return_value_operand, NULL);
5438}
5439
5440#undef __
5441
5442}  // namespace internal
5443}  // namespace v8
5444
5445#endif  // V8_TARGET_ARCH_MIPS64
5446