1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_ARM
6
7#include "src/base/bits.h"
8#include "src/bootstrapper.h"
9#include "src/code-stubs.h"
10#include "src/codegen.h"
11#include "src/ic/handler-compiler.h"
12#include "src/ic/ic.h"
13#include "src/ic/stub-cache.h"
14#include "src/isolate.h"
15#include "src/regexp/jsregexp.h"
16#include "src/regexp/regexp-macro-assembler.h"
17#include "src/runtime/runtime.h"
18
19#include "src/arm/code-stubs-arm.h"
20
21namespace v8 {
22namespace internal {
23
24
25static void InitializeArrayConstructorDescriptor(
26    Isolate* isolate, CodeStubDescriptor* descriptor,
27    int constant_stack_parameter_count) {
28  Address deopt_handler = Runtime::FunctionForId(
29      Runtime::kArrayConstructor)->entry;
30
31  if (constant_stack_parameter_count == 0) {
32    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
33                           JS_FUNCTION_STUB_MODE);
34  } else {
35    descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
36                           JS_FUNCTION_STUB_MODE);
37  }
38}
39
40
41static void InitializeInternalArrayConstructorDescriptor(
42    Isolate* isolate, CodeStubDescriptor* descriptor,
43    int constant_stack_parameter_count) {
44  Address deopt_handler = Runtime::FunctionForId(
45      Runtime::kInternalArrayConstructor)->entry;
46
47  if (constant_stack_parameter_count == 0) {
48    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
49                           JS_FUNCTION_STUB_MODE);
50  } else {
51    descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
52                           JS_FUNCTION_STUB_MODE);
53  }
54}
55
56
57void ArrayNoArgumentConstructorStub::InitializeDescriptor(
58    CodeStubDescriptor* descriptor) {
59  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
60}
61
62
63void ArraySingleArgumentConstructorStub::InitializeDescriptor(
64    CodeStubDescriptor* descriptor) {
65  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
66}
67
68
69void ArrayNArgumentsConstructorStub::InitializeDescriptor(
70    CodeStubDescriptor* descriptor) {
71  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
72}
73
74
75void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
76    CodeStubDescriptor* descriptor) {
77  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
78}
79
80
81void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
82    CodeStubDescriptor* descriptor) {
83  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
84}
85
86
87void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
88    CodeStubDescriptor* descriptor) {
89  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
90}
91
92
93#define __ ACCESS_MASM(masm)
94
95
96static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
97                                          Condition cond, Strength strength);
98static void EmitSmiNonsmiComparison(MacroAssembler* masm,
99                                    Register lhs,
100                                    Register rhs,
101                                    Label* lhs_not_nan,
102                                    Label* slow,
103                                    bool strict);
104static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
105                                           Register lhs,
106                                           Register rhs);
107
108
109void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
110                                               ExternalReference miss) {
111  // Update the static counter each time a new code stub is generated.
112  isolate()->counters()->code_stubs()->Increment();
113
114  CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
115  int param_count = descriptor.GetRegisterParameterCount();
116  {
117    // Call the runtime system in a fresh internal frame.
118    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
119    DCHECK(param_count == 0 ||
120           r0.is(descriptor.GetRegisterParameter(param_count - 1)));
121    // Push arguments
122    for (int i = 0; i < param_count; ++i) {
123      __ push(descriptor.GetRegisterParameter(i));
124    }
125    __ CallExternalReference(miss, param_count);
126  }
127
128  __ Ret();
129}
130
131
132void DoubleToIStub::Generate(MacroAssembler* masm) {
133  Label out_of_range, only_low, negate, done;
134  Register input_reg = source();
135  Register result_reg = destination();
136  DCHECK(is_truncating());
137
138  int double_offset = offset();
139  // Account for saved regs if input is sp.
140  if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
141
142  Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
143  Register scratch_low =
144      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
145  Register scratch_high =
146      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
147  LowDwVfpRegister double_scratch = kScratchDoubleReg;
148
149  __ Push(scratch_high, scratch_low, scratch);
150
151  if (!skip_fastpath()) {
152    // Load double input.
153    __ vldr(double_scratch, MemOperand(input_reg, double_offset));
154    __ vmov(scratch_low, scratch_high, double_scratch);
155
156    // Do fast-path convert from double to int.
157    __ vcvt_s32_f64(double_scratch.low(), double_scratch);
158    __ vmov(result_reg, double_scratch.low());
159
160    // If result is not saturated (0x7fffffff or 0x80000000), we are done.
161    __ sub(scratch, result_reg, Operand(1));
162    __ cmp(scratch, Operand(0x7ffffffe));
163    __ b(lt, &done);
164  } else {
165    // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
166    // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
167    if (double_offset == 0) {
168      __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
169    } else {
170      __ ldr(scratch_low, MemOperand(input_reg, double_offset));
171      __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
172    }
173  }
174
175  __ Ubfx(scratch, scratch_high,
176         HeapNumber::kExponentShift, HeapNumber::kExponentBits);
177  // Load scratch with exponent - 1. This is faster than loading
178  // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
179  STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
180  __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
181  // If exponent is greater than or equal to 84, the 32 less significant
182  // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
183  // the result is 0.
184  // Compare exponent with 84 (compare exponent - 1 with 83).
185  __ cmp(scratch, Operand(83));
186  __ b(ge, &out_of_range);
187
188  // If we reach this code, 31 <= exponent <= 83.
189  // So, we don't have to handle cases where 0 <= exponent <= 20 for
190  // which we would need to shift right the high part of the mantissa.
191  // Scratch contains exponent - 1.
192  // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
193  __ rsb(scratch, scratch, Operand(51), SetCC);
194  __ b(ls, &only_low);
195  // 21 <= exponent <= 51, shift scratch_low and scratch_high
196  // to generate the result.
197  __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
198  // Scratch contains: 52 - exponent.
199  // We needs: exponent - 20.
200  // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
201  __ rsb(scratch, scratch, Operand(32));
202  __ Ubfx(result_reg, scratch_high,
203          0, HeapNumber::kMantissaBitsInTopWord);
204  // Set the implicit 1 before the mantissa part in scratch_high.
205  __ orr(result_reg, result_reg,
206         Operand(1 << HeapNumber::kMantissaBitsInTopWord));
207  __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
208  __ b(&negate);
209
210  __ bind(&out_of_range);
211  __ mov(result_reg, Operand::Zero());
212  __ b(&done);
213
214  __ bind(&only_low);
215  // 52 <= exponent <= 83, shift only scratch_low.
216  // On entry, scratch contains: 52 - exponent.
217  __ rsb(scratch, scratch, Operand::Zero());
218  __ mov(result_reg, Operand(scratch_low, LSL, scratch));
219
220  __ bind(&negate);
221  // If input was positive, scratch_high ASR 31 equals 0 and
222  // scratch_high LSR 31 equals zero.
223  // New result = (result eor 0) + 0 = result.
224  // If the input was negative, we have to negate the result.
225  // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
226  // New result = (result eor 0xffffffff) + 1 = 0 - result.
227  __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
228  __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
229
230  __ bind(&done);
231
232  __ Pop(scratch_high, scratch_low, scratch);
233  __ Ret();
234}
235
236
237// Handle the case where the lhs and rhs are the same object.
238// Equality is almost reflexive (everything but NaN), so this is a test
239// for "identity and not NaN".
240static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
241                                          Condition cond, Strength strength) {
242  Label not_identical;
243  Label heap_number, return_equal;
244  __ cmp(r0, r1);
245  __ b(ne, &not_identical);
246
247  // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
248  // so we do the second best thing - test it ourselves.
249  // They are both equal and they are not both Smis so both of them are not
250  // Smis.  If it's not a heap number, then return equal.
251  if (cond == lt || cond == gt) {
252    // Call runtime on identical JSObjects.
253    __ CompareObjectType(r0, r4, r4, FIRST_JS_RECEIVER_TYPE);
254    __ b(ge, slow);
255    // Call runtime on identical symbols since we need to throw a TypeError.
256    __ cmp(r4, Operand(SYMBOL_TYPE));
257    __ b(eq, slow);
258    // Call runtime on identical SIMD values since we must throw a TypeError.
259    __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
260    __ b(eq, slow);
261    if (is_strong(strength)) {
262      // Call the runtime on anything that is converted in the semantics, since
263      // we need to throw a TypeError. Smis have already been ruled out.
264      __ cmp(r4, Operand(HEAP_NUMBER_TYPE));
265      __ b(eq, &return_equal);
266      __ tst(r4, Operand(kIsNotStringMask));
267      __ b(ne, slow);
268    }
269  } else {
270    __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
271    __ b(eq, &heap_number);
272    // Comparing JS objects with <=, >= is complicated.
273    if (cond != eq) {
274      __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
275      __ b(ge, slow);
276      // Call runtime on identical symbols since we need to throw a TypeError.
277      __ cmp(r4, Operand(SYMBOL_TYPE));
278      __ b(eq, slow);
279      // Call runtime on identical SIMD values since we must throw a TypeError.
280      __ cmp(r4, Operand(SIMD128_VALUE_TYPE));
281      __ b(eq, slow);
282      if (is_strong(strength)) {
283        // Call the runtime on anything that is converted in the semantics,
284        // since we need to throw a TypeError. Smis and heap numbers have
285        // already been ruled out.
286        __ tst(r4, Operand(kIsNotStringMask));
287        __ b(ne, slow);
288      }
289      // Normally here we fall through to return_equal, but undefined is
290      // special: (undefined == undefined) == true, but
291      // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
292      if (cond == le || cond == ge) {
293        __ cmp(r4, Operand(ODDBALL_TYPE));
294        __ b(ne, &return_equal);
295        __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
296        __ cmp(r0, r2);
297        __ b(ne, &return_equal);
298        if (cond == le) {
299          // undefined <= undefined should fail.
300          __ mov(r0, Operand(GREATER));
301        } else  {
302          // undefined >= undefined should fail.
303          __ mov(r0, Operand(LESS));
304        }
305        __ Ret();
306      }
307    }
308  }
309
310  __ bind(&return_equal);
311  if (cond == lt) {
312    __ mov(r0, Operand(GREATER));  // Things aren't less than themselves.
313  } else if (cond == gt) {
314    __ mov(r0, Operand(LESS));     // Things aren't greater than themselves.
315  } else {
316    __ mov(r0, Operand(EQUAL));    // Things are <=, >=, ==, === themselves.
317  }
318  __ Ret();
319
320  // For less and greater we don't have to check for NaN since the result of
321  // x < x is false regardless.  For the others here is some code to check
322  // for NaN.
323  if (cond != lt && cond != gt) {
324    __ bind(&heap_number);
325    // It is a heap number, so return non-equal if it's NaN and equal if it's
326    // not NaN.
327
328    // The representation of NaN values has all exponent bits (52..62) set,
329    // and not all mantissa bits (0..51) clear.
330    // Read top bits of double representation (second word of value).
331    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
332    // Test that exponent bits are all set.
333    __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
334    // NaNs have all-one exponents so they sign extend to -1.
335    __ cmp(r3, Operand(-1));
336    __ b(ne, &return_equal);
337
338    // Shift out flag and all exponent bits, retaining only mantissa.
339    __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
340    // Or with all low-bits of mantissa.
341    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
342    __ orr(r0, r3, Operand(r2), SetCC);
343    // For equal we already have the right value in r0:  Return zero (equal)
344    // if all bits in mantissa are zero (it's an Infinity) and non-zero if
345    // not (it's a NaN).  For <= and >= we need to load r0 with the failing
346    // value if it's a NaN.
347    if (cond != eq) {
348      // All-zero means Infinity means equal.
349      __ Ret(eq);
350      if (cond == le) {
351        __ mov(r0, Operand(GREATER));  // NaN <= NaN should fail.
352      } else {
353        __ mov(r0, Operand(LESS));     // NaN >= NaN should fail.
354      }
355    }
356    __ Ret();
357  }
358  // No fall through here.
359
360  __ bind(&not_identical);
361}
362
363
364// See comment at call site.
365static void EmitSmiNonsmiComparison(MacroAssembler* masm,
366                                    Register lhs,
367                                    Register rhs,
368                                    Label* lhs_not_nan,
369                                    Label* slow,
370                                    bool strict) {
371  DCHECK((lhs.is(r0) && rhs.is(r1)) ||
372         (lhs.is(r1) && rhs.is(r0)));
373
374  Label rhs_is_smi;
375  __ JumpIfSmi(rhs, &rhs_is_smi);
376
377  // Lhs is a Smi.  Check whether the rhs is a heap number.
378  __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
379  if (strict) {
380    // If rhs is not a number and lhs is a Smi then strict equality cannot
381    // succeed.  Return non-equal
382    // If rhs is r0 then there is already a non zero value in it.
383    if (!rhs.is(r0)) {
384      __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
385    }
386    __ Ret(ne);
387  } else {
388    // Smi compared non-strictly with a non-Smi non-heap-number.  Call
389    // the runtime.
390    __ b(ne, slow);
391  }
392
393  // Lhs is a smi, rhs is a number.
394  // Convert lhs to a double in d7.
395  __ SmiToDouble(d7, lhs);
396  // Load the double from rhs, tagged HeapNumber r0, to d6.
397  __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
398
399  // We now have both loaded as doubles but we can skip the lhs nan check
400  // since it's a smi.
401  __ jmp(lhs_not_nan);
402
403  __ bind(&rhs_is_smi);
404  // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
405  __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
406  if (strict) {
407    // If lhs is not a number and rhs is a smi then strict equality cannot
408    // succeed.  Return non-equal.
409    // If lhs is r0 then there is already a non zero value in it.
410    if (!lhs.is(r0)) {
411      __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
412    }
413    __ Ret(ne);
414  } else {
415    // Smi compared non-strictly with a non-smi non-heap-number.  Call
416    // the runtime.
417    __ b(ne, slow);
418  }
419
420  // Rhs is a smi, lhs is a heap number.
421  // Load the double from lhs, tagged HeapNumber r1, to d7.
422  __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
423  // Convert rhs to a double in d6              .
424  __ SmiToDouble(d6, rhs);
425  // Fall through to both_loaded_as_doubles.
426}
427
428
429// See comment at call site.
430static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
431                                           Register lhs,
432                                           Register rhs) {
433    DCHECK((lhs.is(r0) && rhs.is(r1)) ||
434           (lhs.is(r1) && rhs.is(r0)));
435
436    // If either operand is a JS object or an oddball value, then they are
437    // not equal since their pointers are different.
438    // There is no test for undetectability in strict equality.
439    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
440    Label first_non_object;
441    // Get the type of the first operand into r2 and compare it with
442    // FIRST_JS_RECEIVER_TYPE.
443    __ CompareObjectType(rhs, r2, r2, FIRST_JS_RECEIVER_TYPE);
444    __ b(lt, &first_non_object);
445
446    // Return non-zero (r0 is not zero)
447    Label return_not_equal;
448    __ bind(&return_not_equal);
449    __ Ret();
450
451    __ bind(&first_non_object);
452    // Check for oddballs: true, false, null, undefined.
453    __ cmp(r2, Operand(ODDBALL_TYPE));
454    __ b(eq, &return_not_equal);
455
456    __ CompareObjectType(lhs, r3, r3, FIRST_JS_RECEIVER_TYPE);
457    __ b(ge, &return_not_equal);
458
459    // Check for oddballs: true, false, null, undefined.
460    __ cmp(r3, Operand(ODDBALL_TYPE));
461    __ b(eq, &return_not_equal);
462
463    // Now that we have the types we might as well check for
464    // internalized-internalized.
465    STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
466    __ orr(r2, r2, Operand(r3));
467    __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
468    __ b(eq, &return_not_equal);
469}
470
471
472// See comment at call site.
473static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
474                                       Register lhs,
475                                       Register rhs,
476                                       Label* both_loaded_as_doubles,
477                                       Label* not_heap_numbers,
478                                       Label* slow) {
479  DCHECK((lhs.is(r0) && rhs.is(r1)) ||
480         (lhs.is(r1) && rhs.is(r0)));
481
482  __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
483  __ b(ne, not_heap_numbers);
484  __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
485  __ cmp(r2, r3);
486  __ b(ne, slow);  // First was a heap number, second wasn't.  Go slow case.
487
488  // Both are heap numbers.  Load them up then jump to the code we have
489  // for that.
490  __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
491  __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
492  __ jmp(both_loaded_as_doubles);
493}
494
495
496// Fast negative check for internalized-to-internalized equality.
497static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
498                                                     Register lhs,
499                                                     Register rhs,
500                                                     Label* possible_strings,
501                                                     Label* not_both_strings) {
502  DCHECK((lhs.is(r0) && rhs.is(r1)) ||
503         (lhs.is(r1) && rhs.is(r0)));
504
505  // r2 is object type of rhs.
506  Label object_test;
507  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
508  __ tst(r2, Operand(kIsNotStringMask));
509  __ b(ne, &object_test);
510  __ tst(r2, Operand(kIsNotInternalizedMask));
511  __ b(ne, possible_strings);
512  __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
513  __ b(ge, not_both_strings);
514  __ tst(r3, Operand(kIsNotInternalizedMask));
515  __ b(ne, possible_strings);
516
517  // Both are internalized.  We already checked they weren't the same pointer
518  // so they are not equal.
519  __ mov(r0, Operand(NOT_EQUAL));
520  __ Ret();
521
522  __ bind(&object_test);
523  __ cmp(r2, Operand(FIRST_JS_RECEIVER_TYPE));
524  __ b(lt, not_both_strings);
525  __ CompareObjectType(lhs, r2, r3, FIRST_JS_RECEIVER_TYPE);
526  __ b(lt, not_both_strings);
527  // If both objects are undetectable, they are equal. Otherwise, they
528  // are not equal, since they are different objects and an object is not
529  // equal to undefined.
530  __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
531  __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
532  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
533  __ and_(r0, r2, Operand(r3));
534  __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
535  __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
536  __ Ret();
537}
538
539
540static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
541                                         Register scratch,
542                                         CompareICState::State expected,
543                                         Label* fail) {
544  Label ok;
545  if (expected == CompareICState::SMI) {
546    __ JumpIfNotSmi(input, fail);
547  } else if (expected == CompareICState::NUMBER) {
548    __ JumpIfSmi(input, &ok);
549    __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
550                DONT_DO_SMI_CHECK);
551  }
552  // We could be strict about internalized/non-internalized here, but as long as
553  // hydrogen doesn't care, the stub doesn't have to care either.
554  __ bind(&ok);
555}
556
557
558// On entry r1 and r2 are the values to be compared.
559// On exit r0 is 0, positive or negative to indicate the result of
560// the comparison.
561void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
562  Register lhs = r1;
563  Register rhs = r0;
564  Condition cc = GetCondition();
565
566  Label miss;
567  CompareICStub_CheckInputType(masm, lhs, r2, left(), &miss);
568  CompareICStub_CheckInputType(masm, rhs, r3, right(), &miss);
569
570  Label slow;  // Call builtin.
571  Label not_smis, both_loaded_as_doubles, lhs_not_nan;
572
573  Label not_two_smis, smi_done;
574  __ orr(r2, r1, r0);
575  __ JumpIfNotSmi(r2, &not_two_smis);
576  __ mov(r1, Operand(r1, ASR, 1));
577  __ sub(r0, r1, Operand(r0, ASR, 1));
578  __ Ret();
579  __ bind(&not_two_smis);
580
581  // NOTICE! This code is only reached after a smi-fast-case check, so
582  // it is certain that at least one operand isn't a smi.
583
584  // Handle the case where the objects are identical.  Either returns the answer
585  // or goes to slow.  Only falls through if the objects were not identical.
586  EmitIdenticalObjectComparison(masm, &slow, cc, strength());
587
588  // If either is a Smi (we know that not both are), then they can only
589  // be strictly equal if the other is a HeapNumber.
590  STATIC_ASSERT(kSmiTag == 0);
591  DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
592  __ and_(r2, lhs, Operand(rhs));
593  __ JumpIfNotSmi(r2, &not_smis);
594  // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
595  // 1) Return the answer.
596  // 2) Go to slow.
597  // 3) Fall through to both_loaded_as_doubles.
598  // 4) Jump to lhs_not_nan.
599  // In cases 3 and 4 we have found out we were dealing with a number-number
600  // comparison.  If VFP3 is supported the double values of the numbers have
601  // been loaded into d7 and d6.  Otherwise, the double values have been loaded
602  // into r0, r1, r2, and r3.
603  EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
604
605  __ bind(&both_loaded_as_doubles);
606  // The arguments have been converted to doubles and stored in d6 and d7, if
607  // VFP3 is supported, or in r0, r1, r2, and r3.
608  __ bind(&lhs_not_nan);
609  Label no_nan;
610  // ARMv7 VFP3 instructions to implement double precision comparison.
611  __ VFPCompareAndSetFlags(d7, d6);
612  Label nan;
613  __ b(vs, &nan);
614  __ mov(r0, Operand(EQUAL), LeaveCC, eq);
615  __ mov(r0, Operand(LESS), LeaveCC, lt);
616  __ mov(r0, Operand(GREATER), LeaveCC, gt);
617  __ Ret();
618
619  __ bind(&nan);
620  // If one of the sides was a NaN then the v flag is set.  Load r0 with
621  // whatever it takes to make the comparison fail, since comparisons with NaN
622  // always fail.
623  if (cc == lt || cc == le) {
624    __ mov(r0, Operand(GREATER));
625  } else {
626    __ mov(r0, Operand(LESS));
627  }
628  __ Ret();
629
630  __ bind(&not_smis);
631  // At this point we know we are dealing with two different objects,
632  // and neither of them is a Smi.  The objects are in rhs_ and lhs_.
633  if (strict()) {
634    // This returns non-equal for some object types, or falls through if it
635    // was not lucky.
636    EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
637  }
638
639  Label check_for_internalized_strings;
640  Label flat_string_check;
641  // Check for heap-number-heap-number comparison.  Can jump to slow case,
642  // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
643  // that case.  If the inputs are not doubles then jumps to
644  // check_for_internalized_strings.
645  // In this case r2 will contain the type of rhs_.  Never falls through.
646  EmitCheckForTwoHeapNumbers(masm,
647                             lhs,
648                             rhs,
649                             &both_loaded_as_doubles,
650                             &check_for_internalized_strings,
651                             &flat_string_check);
652
653  __ bind(&check_for_internalized_strings);
654  // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
655  // internalized strings.
656  if (cc == eq && !strict()) {
657    // Returns an answer for two internalized strings or two detectable objects.
658    // Otherwise jumps to string case or not both strings case.
659    // Assumes that r2 is the type of rhs_ on entry.
660    EmitCheckForInternalizedStringsOrObjects(
661        masm, lhs, rhs, &flat_string_check, &slow);
662  }
663
664  // Check for both being sequential one-byte strings,
665  // and inline if that is the case.
666  __ bind(&flat_string_check);
667
668  __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r2, r3, &slow);
669
670  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
671                      r3);
672  if (cc == eq) {
673    StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r2, r3, r4);
674  } else {
675    StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r2, r3, r4,
676                                                    r5);
677  }
678  // Never falls through to here.
679
680  __ bind(&slow);
681
682  __ Push(lhs, rhs);
683  // Figure out which native to call and setup the arguments.
684  if (cc == eq) {
685    __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
686  } else {
687    int ncr;  // NaN compare result
688    if (cc == lt || cc == le) {
689      ncr = GREATER;
690    } else {
691      DCHECK(cc == gt || cc == ge);  // remaining cases
692      ncr = LESS;
693    }
694    __ mov(r0, Operand(Smi::FromInt(ncr)));
695    __ push(r0);
696
697    // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
698    // tagged as a small integer.
699    __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
700                                             : Runtime::kCompare);
701  }
702
703  __ bind(&miss);
704  GenerateMiss(masm);
705}
706
707
708void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
709  // We don't allow a GC during a store buffer overflow so there is no need to
710  // store the registers in any particular way, but we do have to store and
711  // restore them.
712  __ stm(db_w, sp, kCallerSaved | lr.bit());
713
714  const Register scratch = r1;
715
716  if (save_doubles()) {
717    __ SaveFPRegs(sp, scratch);
718  }
719  const int argument_count = 1;
720  const int fp_argument_count = 0;
721
722  AllowExternalCallThatCantCauseGC scope(masm);
723  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
724  __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
725  __ CallCFunction(
726      ExternalReference::store_buffer_overflow_function(isolate()),
727      argument_count);
728  if (save_doubles()) {
729    __ RestoreFPRegs(sp, scratch);
730  }
731  __ ldm(ia_w, sp, kCallerSaved | pc.bit());  // Also pop pc to get Ret(0).
732}
733
734
735void MathPowStub::Generate(MacroAssembler* masm) {
736  const Register base = r1;
737  const Register exponent = MathPowTaggedDescriptor::exponent();
738  DCHECK(exponent.is(r2));
739  const Register heapnumbermap = r5;
740  const Register heapnumber = r0;
741  const DwVfpRegister double_base = d0;
742  const DwVfpRegister double_exponent = d1;
743  const DwVfpRegister double_result = d2;
744  const DwVfpRegister double_scratch = d3;
745  const SwVfpRegister single_scratch = s6;
746  const Register scratch = r9;
747  const Register scratch2 = r4;
748
749  Label call_runtime, done, int_exponent;
750  if (exponent_type() == ON_STACK) {
751    Label base_is_smi, unpack_exponent;
752    // The exponent and base are supplied as arguments on the stack.
753    // This can only happen if the stub is called from non-optimized code.
754    // Load input parameters from stack to double registers.
755    __ ldr(base, MemOperand(sp, 1 * kPointerSize));
756    __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
757
758    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
759
760    __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
761    __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
762    __ cmp(scratch, heapnumbermap);
763    __ b(ne, &call_runtime);
764
765    __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
766    __ jmp(&unpack_exponent);
767
768    __ bind(&base_is_smi);
769    __ vmov(single_scratch, scratch);
770    __ vcvt_f64_s32(double_base, single_scratch);
771    __ bind(&unpack_exponent);
772
773    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
774
775    __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
776    __ cmp(scratch, heapnumbermap);
777    __ b(ne, &call_runtime);
778    __ vldr(double_exponent,
779            FieldMemOperand(exponent, HeapNumber::kValueOffset));
780  } else if (exponent_type() == TAGGED) {
781    // Base is already in double_base.
782    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
783
784    __ vldr(double_exponent,
785            FieldMemOperand(exponent, HeapNumber::kValueOffset));
786  }
787
788  if (exponent_type() != INTEGER) {
789    Label int_exponent_convert;
790    // Detect integer exponents stored as double.
791    __ vcvt_u32_f64(single_scratch, double_exponent);
792    // We do not check for NaN or Infinity here because comparing numbers on
793    // ARM correctly distinguishes NaNs.  We end up calling the built-in.
794    __ vcvt_f64_u32(double_scratch, single_scratch);
795    __ VFPCompareAndSetFlags(double_scratch, double_exponent);
796    __ b(eq, &int_exponent_convert);
797
798    if (exponent_type() == ON_STACK) {
799      // Detect square root case.  Crankshaft detects constant +/-0.5 at
800      // compile time and uses DoMathPowHalf instead.  We then skip this check
801      // for non-constant cases of +/-0.5 as these hardly occur.
802      Label not_plus_half;
803
804      // Test for 0.5.
805      __ vmov(double_scratch, 0.5, scratch);
806      __ VFPCompareAndSetFlags(double_exponent, double_scratch);
807      __ b(ne, &not_plus_half);
808
809      // Calculates square root of base.  Check for the special case of
810      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
811      __ vmov(double_scratch, -V8_INFINITY, scratch);
812      __ VFPCompareAndSetFlags(double_base, double_scratch);
813      __ vneg(double_result, double_scratch, eq);
814      __ b(eq, &done);
815
816      // Add +0 to convert -0 to +0.
817      __ vadd(double_scratch, double_base, kDoubleRegZero);
818      __ vsqrt(double_result, double_scratch);
819      __ jmp(&done);
820
821      __ bind(&not_plus_half);
822      __ vmov(double_scratch, -0.5, scratch);
823      __ VFPCompareAndSetFlags(double_exponent, double_scratch);
824      __ b(ne, &call_runtime);
825
826      // Calculates square root of base.  Check for the special case of
827      // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
828      __ vmov(double_scratch, -V8_INFINITY, scratch);
829      __ VFPCompareAndSetFlags(double_base, double_scratch);
830      __ vmov(double_result, kDoubleRegZero, eq);
831      __ b(eq, &done);
832
833      // Add +0 to convert -0 to +0.
834      __ vadd(double_scratch, double_base, kDoubleRegZero);
835      __ vmov(double_result, 1.0, scratch);
836      __ vsqrt(double_scratch, double_scratch);
837      __ vdiv(double_result, double_result, double_scratch);
838      __ jmp(&done);
839    }
840
841    __ push(lr);
842    {
843      AllowExternalCallThatCantCauseGC scope(masm);
844      __ PrepareCallCFunction(0, 2, scratch);
845      __ MovToFloatParameters(double_base, double_exponent);
846      __ CallCFunction(
847          ExternalReference::power_double_double_function(isolate()),
848          0, 2);
849    }
850    __ pop(lr);
851    __ MovFromFloatResult(double_result);
852    __ jmp(&done);
853
854    __ bind(&int_exponent_convert);
855    __ vcvt_u32_f64(single_scratch, double_exponent);
856    __ vmov(scratch, single_scratch);
857  }
858
859  // Calculate power with integer exponent.
860  __ bind(&int_exponent);
861
862  // Get two copies of exponent in the registers scratch and exponent.
863  if (exponent_type() == INTEGER) {
864    __ mov(scratch, exponent);
865  } else {
866    // Exponent has previously been stored into scratch as untagged integer.
867    __ mov(exponent, scratch);
868  }
869  __ vmov(double_scratch, double_base);  // Back up base.
870  __ vmov(double_result, 1.0, scratch2);
871
872  // Get absolute value of exponent.
873  __ cmp(scratch, Operand::Zero());
874  __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
875  __ sub(scratch, scratch2, scratch, LeaveCC, mi);
876
877  Label while_true;
878  __ bind(&while_true);
879  __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
880  __ vmul(double_result, double_result, double_scratch, cs);
881  __ vmul(double_scratch, double_scratch, double_scratch, ne);
882  __ b(ne, &while_true);
883
884  __ cmp(exponent, Operand::Zero());
885  __ b(ge, &done);
886  __ vmov(double_scratch, 1.0, scratch);
887  __ vdiv(double_result, double_scratch, double_result);
888  // Test whether result is zero.  Bail out to check for subnormal result.
889  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
890  __ VFPCompareAndSetFlags(double_result, 0.0);
891  __ b(ne, &done);
892  // double_exponent may not containe the exponent value if the input was a
893  // smi.  We set it with exponent value before bailing out.
894  __ vmov(single_scratch, exponent);
895  __ vcvt_f64_s32(double_exponent, single_scratch);
896
897  // Returning or bailing out.
898  Counters* counters = isolate()->counters();
899  if (exponent_type() == ON_STACK) {
900    // The arguments are still on the stack.
901    __ bind(&call_runtime);
902    __ TailCallRuntime(Runtime::kMathPowRT);
903
904    // The stub is called from non-optimized code, which expects the result
905    // as heap number in exponent.
906    __ bind(&done);
907    __ AllocateHeapNumber(
908        heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
909    __ vstr(double_result,
910            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
911    DCHECK(heapnumber.is(r0));
912    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
913    __ Ret(2);
914  } else {
915    __ push(lr);
916    {
917      AllowExternalCallThatCantCauseGC scope(masm);
918      __ PrepareCallCFunction(0, 2, scratch);
919      __ MovToFloatParameters(double_base, double_exponent);
920      __ CallCFunction(
921          ExternalReference::power_double_double_function(isolate()),
922          0, 2);
923    }
924    __ pop(lr);
925    __ MovFromFloatResult(double_result);
926
927    __ bind(&done);
928    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
929    __ Ret();
930  }
931}
932
933
934bool CEntryStub::NeedsImmovableCode() {
935  return true;
936}
937
938
939void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
940  CEntryStub::GenerateAheadOfTime(isolate);
941  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
942  StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
943  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
944  CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
945  CreateWeakCellStub::GenerateAheadOfTime(isolate);
946  BinaryOpICStub::GenerateAheadOfTime(isolate);
947  BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
948  StoreFastElementStub::GenerateAheadOfTime(isolate);
949  TypeofStub::GenerateAheadOfTime(isolate);
950}
951
952
953void CodeStub::GenerateFPStubs(Isolate* isolate) {
954  // Generate if not already in cache.
955  SaveFPRegsMode mode = kSaveFPRegs;
956  CEntryStub(isolate, 1, mode).GetCode();
957  StoreBufferOverflowStub(isolate, mode).GetCode();
958  isolate->set_fp_stubs_generated(true);
959}
960
961
962void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
963  CEntryStub stub(isolate, 1, kDontSaveFPRegs);
964  stub.GetCode();
965}
966
967
968void CEntryStub::Generate(MacroAssembler* masm) {
969  // Called from JavaScript; parameters are on stack as if calling JS function.
970  // r0: number of arguments including receiver
971  // r1: pointer to builtin function
972  // fp: frame pointer  (restored after C call)
973  // sp: stack pointer  (restored as callee's sp after C call)
974  // cp: current context  (C callee-saved)
975  //
976  // If argv_in_register():
977  // r2: pointer to the first argument
978  ProfileEntryHookStub::MaybeCallEntryHook(masm);
979
980  __ mov(r5, Operand(r1));
981
982  if (argv_in_register()) {
983    // Move argv into the correct register.
984    __ mov(r1, Operand(r2));
985  } else {
986    // Compute the argv pointer in a callee-saved register.
987    __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2));
988    __ sub(r1, r1, Operand(kPointerSize));
989  }
990
991  // Enter the exit frame that transitions from JavaScript to C++.
992  FrameScope scope(masm, StackFrame::MANUAL);
993  __ EnterExitFrame(save_doubles());
994
995  // Store a copy of argc in callee-saved registers for later.
996  __ mov(r4, Operand(r0));
997
998  // r0, r4: number of arguments including receiver  (C callee-saved)
999  // r1: pointer to the first argument (C callee-saved)
1000  // r5: pointer to builtin function  (C callee-saved)
1001
1002  // Result returned in r0 or r0+r1 by default.
1003
1004#if V8_HOST_ARCH_ARM
1005  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1006  int frame_alignment_mask = frame_alignment - 1;
1007  if (FLAG_debug_code) {
1008    if (frame_alignment > kPointerSize) {
1009      Label alignment_as_expected;
1010      DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
1011      __ tst(sp, Operand(frame_alignment_mask));
1012      __ b(eq, &alignment_as_expected);
1013      // Don't use Check here, as it will call Runtime_Abort re-entering here.
1014      __ stop("Unexpected alignment");
1015      __ bind(&alignment_as_expected);
1016    }
1017  }
1018#endif
1019
1020  // Call C built-in.
1021  // r0 = argc, r1 = argv
1022  __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
1023
1024  // To let the GC traverse the return address of the exit frames, we need to
1025  // know where the return address is. The CEntryStub is unmovable, so
1026  // we can store the address on the stack to be able to find it again and
1027  // we never have to restore it, because it will not change.
1028  // Compute the return address in lr to return to after the jump below. Pc is
1029  // already at '+ 8' from the current instruction but return is after three
1030  // instructions so add another 4 to pc to get the return address.
1031  {
1032    // Prevent literal pool emission before return address.
1033    Assembler::BlockConstPoolScope block_const_pool(masm);
1034    __ add(lr, pc, Operand(4));
1035    __ str(lr, MemOperand(sp, 0));
1036    __ Call(r5);
1037  }
1038
1039  __ VFPEnsureFPSCRState(r2);
1040
1041  // Check result for exception sentinel.
1042  Label exception_returned;
1043  __ CompareRoot(r0, Heap::kExceptionRootIndex);
1044  __ b(eq, &exception_returned);
1045
1046  // Check that there is no pending exception, otherwise we
1047  // should have returned the exception sentinel.
1048  if (FLAG_debug_code) {
1049    Label okay;
1050    ExternalReference pending_exception_address(
1051        Isolate::kPendingExceptionAddress, isolate());
1052    __ mov(r2, Operand(pending_exception_address));
1053    __ ldr(r2, MemOperand(r2));
1054    __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
1055    // Cannot use check here as it attempts to generate call into runtime.
1056    __ b(eq, &okay);
1057    __ stop("Unexpected pending exception");
1058    __ bind(&okay);
1059  }
1060
1061  // Exit C frame and return.
1062  // r0:r1: result
1063  // sp: stack pointer
1064  // fp: frame pointer
1065  Register argc;
1066  if (argv_in_register()) {
1067    // We don't want to pop arguments so set argc to no_reg.
1068    argc = no_reg;
1069  } else {
1070    // Callee-saved register r4 still holds argc.
1071    argc = r4;
1072  }
1073  __ LeaveExitFrame(save_doubles(), argc, true);
1074  __ mov(pc, lr);
1075
1076  // Handling of exception.
1077  __ bind(&exception_returned);
1078
1079  ExternalReference pending_handler_context_address(
1080      Isolate::kPendingHandlerContextAddress, isolate());
1081  ExternalReference pending_handler_code_address(
1082      Isolate::kPendingHandlerCodeAddress, isolate());
1083  ExternalReference pending_handler_offset_address(
1084      Isolate::kPendingHandlerOffsetAddress, isolate());
1085  ExternalReference pending_handler_fp_address(
1086      Isolate::kPendingHandlerFPAddress, isolate());
1087  ExternalReference pending_handler_sp_address(
1088      Isolate::kPendingHandlerSPAddress, isolate());
1089
1090  // Ask the runtime for help to determine the handler. This will set r0 to
1091  // contain the current pending exception, don't clobber it.
1092  ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
1093                                 isolate());
1094  {
1095    FrameScope scope(masm, StackFrame::MANUAL);
1096    __ PrepareCallCFunction(3, 0, r0);
1097    __ mov(r0, Operand(0));
1098    __ mov(r1, Operand(0));
1099    __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
1100    __ CallCFunction(find_handler, 3);
1101  }
1102
1103  // Retrieve the handler context, SP and FP.
1104  __ mov(cp, Operand(pending_handler_context_address));
1105  __ ldr(cp, MemOperand(cp));
1106  __ mov(sp, Operand(pending_handler_sp_address));
1107  __ ldr(sp, MemOperand(sp));
1108  __ mov(fp, Operand(pending_handler_fp_address));
1109  __ ldr(fp, MemOperand(fp));
1110
1111  // If the handler is a JS frame, restore the context to the frame. Note that
1112  // the context will be set to (cp == 0) for non-JS frames.
1113  __ cmp(cp, Operand(0));
1114  __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1115
1116  // Compute the handler entry address and jump to it.
1117  ConstantPoolUnavailableScope constant_pool_unavailable(masm);
1118  __ mov(r1, Operand(pending_handler_code_address));
1119  __ ldr(r1, MemOperand(r1));
1120  __ mov(r2, Operand(pending_handler_offset_address));
1121  __ ldr(r2, MemOperand(r2));
1122  __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start
1123  if (FLAG_enable_embedded_constant_pool) {
1124    __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r1);
1125  }
1126  __ add(pc, r1, r2);
1127}
1128
1129
1130void JSEntryStub::Generate(MacroAssembler* masm) {
1131  // r0: code entry
1132  // r1: function
1133  // r2: receiver
1134  // r3: argc
1135  // [sp+0]: argv
1136
1137  Label invoke, handler_entry, exit;
1138
1139  ProfileEntryHookStub::MaybeCallEntryHook(masm);
1140
1141  // Called from C, so do not pop argc and args on exit (preserve sp)
1142  // No need to save register-passed args
1143  // Save callee-saved registers (incl. cp and fp), sp, and lr
1144  __ stm(db_w, sp, kCalleeSaved | lr.bit());
1145
1146  // Save callee-saved vfp registers.
1147  __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1148  // Set up the reserved register for 0.0.
1149  __ vmov(kDoubleRegZero, 0.0);
1150  __ VFPEnsureFPSCRState(r4);
1151
1152  // Get address of argv, see stm above.
1153  // r0: code entry
1154  // r1: function
1155  // r2: receiver
1156  // r3: argc
1157
1158  // Set up argv in r4.
1159  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1160  offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
1161  __ ldr(r4, MemOperand(sp, offset_to_argv));
1162
1163  // Push a frame with special values setup to mark it as an entry frame.
1164  // r0: code entry
1165  // r1: function
1166  // r2: receiver
1167  // r3: argc
1168  // r4: argv
1169  int marker = type();
1170  if (FLAG_enable_embedded_constant_pool) {
1171    __ mov(r8, Operand::Zero());
1172  }
1173  __ mov(r7, Operand(Smi::FromInt(marker)));
1174  __ mov(r6, Operand(Smi::FromInt(marker)));
1175  __ mov(r5,
1176         Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1177  __ ldr(r5, MemOperand(r5));
1178  __ mov(ip, Operand(-1));  // Push a bad frame pointer to fail if it is used.
1179  __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
1180                       (FLAG_enable_embedded_constant_pool ? r8.bit() : 0) |
1181                       ip.bit());
1182
1183  // Set up frame pointer for the frame to be pushed.
1184  __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1185
1186  // If this is the outermost JS call, set js_entry_sp value.
1187  Label non_outermost_js;
1188  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1189  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1190  __ ldr(r6, MemOperand(r5));
1191  __ cmp(r6, Operand::Zero());
1192  __ b(ne, &non_outermost_js);
1193  __ str(fp, MemOperand(r5));
1194  __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1195  Label cont;
1196  __ b(&cont);
1197  __ bind(&non_outermost_js);
1198  __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1199  __ bind(&cont);
1200  __ push(ip);
1201
1202  // Jump to a faked try block that does the invoke, with a faked catch
1203  // block that sets the pending exception.
1204  __ jmp(&invoke);
1205
1206  // Block literal pool emission whilst taking the position of the handler
1207  // entry. This avoids making the assumption that literal pools are always
1208  // emitted after an instruction is emitted, rather than before.
1209  {
1210    Assembler::BlockConstPoolScope block_const_pool(masm);
1211    __ bind(&handler_entry);
1212    handler_offset_ = handler_entry.pos();
1213    // Caught exception: Store result (exception) in the pending exception
1214    // field in the JSEnv and return a failure sentinel.  Coming in here the
1215    // fp will be invalid because the PushStackHandler below sets it to 0 to
1216    // signal the existence of the JSEntry frame.
1217    __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1218                                         isolate())));
1219  }
1220  __ str(r0, MemOperand(ip));
1221  __ LoadRoot(r0, Heap::kExceptionRootIndex);
1222  __ b(&exit);
1223
1224  // Invoke: Link this frame into the handler chain.
1225  __ bind(&invoke);
1226  // Must preserve r0-r4, r5-r6 are available.
1227  __ PushStackHandler();
1228  // If an exception not caught by another handler occurs, this handler
1229  // returns control to the code after the bl(&invoke) above, which
1230  // restores all kCalleeSaved registers (including cp and fp) to their
1231  // saved values before returning a failure to C.
1232
1233  // Clear any pending exceptions.
1234  __ mov(r5, Operand(isolate()->factory()->the_hole_value()));
1235  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1236                                       isolate())));
1237  __ str(r5, MemOperand(ip));
1238
1239  // Invoke the function by calling through JS entry trampoline builtin.
1240  // Notice that we cannot store a reference to the trampoline code directly in
1241  // this stub, because runtime stubs are not traversed when doing GC.
1242
1243  // Expected registers by Builtins::JSEntryTrampoline
1244  // r0: code entry
1245  // r1: function
1246  // r2: receiver
1247  // r3: argc
1248  // r4: argv
1249  if (type() == StackFrame::ENTRY_CONSTRUCT) {
1250    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1251                                      isolate());
1252    __ mov(ip, Operand(construct_entry));
1253  } else {
1254    ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
1255    __ mov(ip, Operand(entry));
1256  }
1257  __ ldr(ip, MemOperand(ip));  // deref address
1258  __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1259
1260  // Branch and link to JSEntryTrampoline.
1261  __ Call(ip);
1262
1263  // Unlink this frame from the handler chain.
1264  __ PopStackHandler();
1265
1266  __ bind(&exit);  // r0 holds result
1267  // Check if the current stack frame is marked as the outermost JS frame.
1268  Label non_outermost_js_2;
1269  __ pop(r5);
1270  __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1271  __ b(ne, &non_outermost_js_2);
1272  __ mov(r6, Operand::Zero());
1273  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1274  __ str(r6, MemOperand(r5));
1275  __ bind(&non_outermost_js_2);
1276
1277  // Restore the top frame descriptors from the stack.
1278  __ pop(r3);
1279  __ mov(ip,
1280         Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1281  __ str(r3, MemOperand(ip));
1282
1283  // Reset the stack to the callee saved registers.
1284  __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1285
1286  // Restore callee-saved registers and return.
1287#ifdef DEBUG
1288  if (FLAG_debug_code) {
1289    __ mov(lr, Operand(pc));
1290  }
1291#endif
1292
1293  // Restore callee-saved vfp registers.
1294  __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1295
1296  __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
1297}
1298
1299
1300void InstanceOfStub::Generate(MacroAssembler* masm) {
1301  Register const object = r1;              // Object (lhs).
1302  Register const function = r0;            // Function (rhs).
1303  Register const object_map = r2;          // Map of {object}.
1304  Register const function_map = r3;        // Map of {function}.
1305  Register const function_prototype = r4;  // Prototype of {function}.
1306  Register const scratch = r5;
1307
1308  DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
1309  DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
1310
1311  // Check if {object} is a smi.
1312  Label object_is_smi;
1313  __ JumpIfSmi(object, &object_is_smi);
1314
1315  // Lookup the {function} and the {object} map in the global instanceof cache.
1316  // Note: This is safe because we clear the global instanceof cache whenever
1317  // we change the prototype of any object.
1318  Label fast_case, slow_case;
1319  __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
1320  __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1321  __ b(ne, &fast_case);
1322  __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
1323  __ b(ne, &fast_case);
1324  __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
1325  __ Ret();
1326
1327  // If {object} is a smi we can safely return false if {function} is a JS
1328  // function, otherwise we have to miss to the runtime and throw an exception.
1329  __ bind(&object_is_smi);
1330  __ JumpIfSmi(function, &slow_case);
1331  __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
1332  __ b(ne, &slow_case);
1333  __ LoadRoot(r0, Heap::kFalseValueRootIndex);
1334  __ Ret();
1335
1336  // Fast-case: The {function} must be a valid JSFunction.
1337  __ bind(&fast_case);
1338  __ JumpIfSmi(function, &slow_case);
1339  __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
1340  __ b(ne, &slow_case);
1341
1342  // Ensure that {function} has an instance prototype.
1343  __ ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
1344  __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
1345  __ b(ne, &slow_case);
1346
1347  // Get the "prototype" (or initial map) of the {function}.
1348  __ ldr(function_prototype,
1349         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1350  __ AssertNotSmi(function_prototype);
1351
1352  // Resolve the prototype if the {function} has an initial map.  Afterwards the
1353  // {function_prototype} will be either the JSReceiver prototype object or the
1354  // hole value, which means that no instances of the {function} were created so
1355  // far and hence we should return false.
1356  Label function_prototype_valid;
1357  __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
1358  __ b(ne, &function_prototype_valid);
1359  __ ldr(function_prototype,
1360         FieldMemOperand(function_prototype, Map::kPrototypeOffset));
1361  __ bind(&function_prototype_valid);
1362  __ AssertNotSmi(function_prototype);
1363
1364  // Update the global instanceof cache with the current {object} map and
1365  // {function}.  The cached answer will be set when it is known below.
1366  __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1367  __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
1368
1369  // Loop through the prototype chain looking for the {function} prototype.
1370  // Assume true, and change to false if not found.
1371  Register const object_instance_type = function_map;
1372  Register const map_bit_field = function_map;
1373  Register const null = scratch;
1374  Register const result = r0;
1375
1376  Label done, loop, fast_runtime_fallback;
1377  __ LoadRoot(result, Heap::kTrueValueRootIndex);
1378  __ LoadRoot(null, Heap::kNullValueRootIndex);
1379  __ bind(&loop);
1380
1381  // Check if the object needs to be access checked.
1382  __ ldrb(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset));
1383  __ tst(map_bit_field, Operand(1 << Map::kIsAccessCheckNeeded));
1384  __ b(ne, &fast_runtime_fallback);
1385  // Check if the current object is a Proxy.
1386  __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
1387  __ b(eq, &fast_runtime_fallback);
1388
1389  __ ldr(object, FieldMemOperand(object_map, Map::kPrototypeOffset));
1390  __ cmp(object, function_prototype);
1391  __ b(eq, &done);
1392  __ cmp(object, null);
1393  __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
1394  __ b(ne, &loop);
1395  __ LoadRoot(result, Heap::kFalseValueRootIndex);
1396  __ bind(&done);
1397  __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
1398  __ Ret();
1399
1400  // Found Proxy or access check needed: Call the runtime
1401  __ bind(&fast_runtime_fallback);
1402  __ Push(object, function_prototype);
1403  // Invalidate the instanceof cache.
1404  __ Move(scratch, Smi::FromInt(0));
1405  __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex);
1406  __ TailCallRuntime(Runtime::kHasInPrototypeChain);
1407
1408  // Slow-case: Call the %InstanceOf runtime function.
1409  __ bind(&slow_case);
1410  __ Push(object, function);
1411  __ TailCallRuntime(Runtime::kInstanceOf);
1412}
1413
1414
1415void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1416  Label miss;
1417  Register receiver = LoadDescriptor::ReceiverRegister();
1418  // Ensure that the vector and slot registers won't be clobbered before
1419  // calling the miss handler.
1420  DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::VectorRegister(),
1421                     LoadWithVectorDescriptor::SlotRegister()));
1422
1423  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
1424                                                          r5, &miss);
1425  __ bind(&miss);
1426  PropertyAccessCompiler::TailCallBuiltin(
1427      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1428}
1429
1430
1431void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1432  // Return address is in lr.
1433  Label miss;
1434
1435  Register receiver = LoadDescriptor::ReceiverRegister();
1436  Register index = LoadDescriptor::NameRegister();
1437  Register scratch = r5;
1438  Register result = r0;
1439  DCHECK(!scratch.is(receiver) && !scratch.is(index));
1440  DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
1441         result.is(LoadWithVectorDescriptor::SlotRegister()));
1442
1443  // StringCharAtGenerator doesn't use the result register until it's passed
1444  // the different miss possibilities. If it did, we would have a conflict
1445  // when FLAG_vector_ics is true.
1446  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1447                                          &miss,  // When not a string.
1448                                          &miss,  // When not a number.
1449                                          &miss,  // When index out of range.
1450                                          STRING_INDEX_IS_ARRAY_INDEX,
1451                                          RECEIVER_IS_STRING);
1452  char_at_generator.GenerateFast(masm);
1453  __ Ret();
1454
1455  StubRuntimeCallHelper call_helper;
1456  char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
1457
1458  __ bind(&miss);
1459  PropertyAccessCompiler::TailCallBuiltin(
1460      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1461}
1462
1463
1464void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1465  // The displacement is the offset of the last parameter (if any)
1466  // relative to the frame pointer.
1467  const int kDisplacement =
1468      StandardFrameConstants::kCallerSPOffset - kPointerSize;
1469  DCHECK(r1.is(ArgumentsAccessReadDescriptor::index()));
1470  DCHECK(r0.is(ArgumentsAccessReadDescriptor::parameter_count()));
1471
1472  // Check that the key is a smi.
1473  Label slow;
1474  __ JumpIfNotSmi(r1, &slow);
1475
1476  // Check if the calling frame is an arguments adaptor frame.
1477  Label adaptor;
1478  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1479  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1480  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1481  __ b(eq, &adaptor);
1482
1483  // Check index against formal parameters count limit passed in
1484  // through register r0. Use unsigned comparison to get negative
1485  // check for free.
1486  __ cmp(r1, r0);
1487  __ b(hs, &slow);
1488
1489  // Read the argument from the stack and return it.
1490  __ sub(r3, r0, r1);
1491  __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3));
1492  __ ldr(r0, MemOperand(r3, kDisplacement));
1493  __ Jump(lr);
1494
1495  // Arguments adaptor case: Check index against actual arguments
1496  // limit found in the arguments adaptor frame. Use unsigned
1497  // comparison to get negative check for free.
1498  __ bind(&adaptor);
1499  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
1500  __ cmp(r1, r0);
1501  __ b(cs, &slow);
1502
1503  // Read the argument from the adaptor frame and return it.
1504  __ sub(r3, r0, r1);
1505  __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3));
1506  __ ldr(r0, MemOperand(r3, kDisplacement));
1507  __ Jump(lr);
1508
1509  // Slow-case: Handle non-smi or out-of-bounds access to arguments
1510  // by calling the runtime system.
1511  __ bind(&slow);
1512  __ push(r1);
1513  __ TailCallRuntime(Runtime::kArguments);
1514}
1515
1516
1517void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1518  // r1 : function
1519  // r2 : number of parameters (tagged)
1520  // r3 : parameters pointer
1521
1522  DCHECK(r1.is(ArgumentsAccessNewDescriptor::function()));
1523  DCHECK(r2.is(ArgumentsAccessNewDescriptor::parameter_count()));
1524  DCHECK(r3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
1525
1526  // Check if the calling frame is an arguments adaptor frame.
1527  Label runtime;
1528  __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1529  __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
1530  __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1531  __ b(ne, &runtime);
1532
1533  // Patch the arguments.length and the parameters pointer in the current frame.
1534  __ ldr(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
1535  __ add(r4, r4, Operand(r2, LSL, 1));
1536  __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
1537
1538  __ bind(&runtime);
1539  __ Push(r1, r3, r2);
1540  __ TailCallRuntime(Runtime::kNewSloppyArguments);
1541}
1542
1543
1544void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1545  // r1 : function
1546  // r2 : number of parameters (tagged)
1547  // r3 : parameters pointer
1548  // Registers used over whole function:
1549  //  r5 : arguments count (tagged)
1550  //  r6 : mapped parameter count (tagged)
1551
1552  DCHECK(r1.is(ArgumentsAccessNewDescriptor::function()));
1553  DCHECK(r2.is(ArgumentsAccessNewDescriptor::parameter_count()));
1554  DCHECK(r3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
1555
1556  // Check if the calling frame is an arguments adaptor frame.
1557  Label adaptor_frame, try_allocate, runtime;
1558  __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1559  __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
1560  __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1561  __ b(eq, &adaptor_frame);
1562
1563  // No adaptor, parameter count = argument count.
1564  __ mov(r5, r2);
1565  __ mov(r6, r2);
1566  __ b(&try_allocate);
1567
1568  // We have an adaptor frame. Patch the parameters pointer.
1569  __ bind(&adaptor_frame);
1570  __ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
1571  __ add(r4, r4, Operand(r5, LSL, 1));
1572  __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
1573
1574  // r5 = argument count (tagged)
1575  // r6 = parameter count (tagged)
1576  // Compute the mapped parameter count = min(r6, r5) in r6.
1577  __ mov(r6, r2);
1578  __ cmp(r6, Operand(r5));
1579  __ mov(r6, Operand(r5), LeaveCC, gt);
1580
1581  __ bind(&try_allocate);
1582
1583  // Compute the sizes of backing store, parameter map, and arguments object.
1584  // 1. Parameter map, has 2 extra words containing context and backing store.
1585  const int kParameterMapHeaderSize =
1586      FixedArray::kHeaderSize + 2 * kPointerSize;
1587  // If there are no mapped parameters, we do not need the parameter_map.
1588  __ cmp(r6, Operand(Smi::FromInt(0)));
1589  __ mov(r9, Operand::Zero(), LeaveCC, eq);
1590  __ mov(r9, Operand(r6, LSL, 1), LeaveCC, ne);
1591  __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
1592
1593  // 2. Backing store.
1594  __ add(r9, r9, Operand(r5, LSL, 1));
1595  __ add(r9, r9, Operand(FixedArray::kHeaderSize));
1596
1597  // 3. Arguments object.
1598  __ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
1599
1600  // Do the allocation of all three objects in one go.
1601  __ Allocate(r9, r0, r9, r4, &runtime, TAG_OBJECT);
1602
1603  // r0 = address of new object(s) (tagged)
1604  // r2 = argument count (smi-tagged)
1605  // Get the arguments boilerplate from the current native context into r4.
1606  const int kNormalOffset =
1607      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1608  const int kAliasedOffset =
1609      Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
1610
1611  __ ldr(r4, NativeContextMemOperand());
1612  __ cmp(r6, Operand::Zero());
1613  __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
1614  __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
1615
1616  // r0 = address of new object (tagged)
1617  // r2 = argument count (smi-tagged)
1618  // r4 = address of arguments map (tagged)
1619  // r6 = mapped parameter count (tagged)
1620  __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
1621  __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
1622  __ str(r9, FieldMemOperand(r0, JSObject::kPropertiesOffset));
1623  __ str(r9, FieldMemOperand(r0, JSObject::kElementsOffset));
1624
1625  // Set up the callee in-object property.
1626  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1627  __ AssertNotSmi(r1);
1628  const int kCalleeOffset = JSObject::kHeaderSize +
1629      Heap::kArgumentsCalleeIndex * kPointerSize;
1630  __ str(r1, FieldMemOperand(r0, kCalleeOffset));
1631
1632  // Use the length (smi tagged) and set that as an in-object property too.
1633  __ AssertSmi(r5);
1634  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1635  const int kLengthOffset = JSObject::kHeaderSize +
1636      Heap::kArgumentsLengthIndex * kPointerSize;
1637  __ str(r5, FieldMemOperand(r0, kLengthOffset));
1638
1639  // Set up the elements pointer in the allocated arguments object.
1640  // If we allocated a parameter map, r4 will point there, otherwise
1641  // it will point to the backing store.
1642  __ add(r4, r0, Operand(Heap::kSloppyArgumentsObjectSize));
1643  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
1644
1645  // r0 = address of new object (tagged)
1646  // r2 = argument count (tagged)
1647  // r4 = address of parameter map or backing store (tagged)
1648  // r6 = mapped parameter count (tagged)
1649  // Initialize parameter map. If there are no mapped arguments, we're done.
1650  Label skip_parameter_map;
1651  __ cmp(r6, Operand(Smi::FromInt(0)));
1652  // Move backing store address to r1, because it is
1653  // expected there when filling in the unmapped arguments.
1654  __ mov(r1, r4, LeaveCC, eq);
1655  __ b(eq, &skip_parameter_map);
1656
1657  __ LoadRoot(r5, Heap::kSloppyArgumentsElementsMapRootIndex);
1658  __ str(r5, FieldMemOperand(r4, FixedArray::kMapOffset));
1659  __ add(r5, r6, Operand(Smi::FromInt(2)));
1660  __ str(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
1661  __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
1662  __ add(r5, r4, Operand(r6, LSL, 1));
1663  __ add(r5, r5, Operand(kParameterMapHeaderSize));
1664  __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
1665
1666  // Copy the parameter slots and the holes in the arguments.
1667  // We need to fill in mapped_parameter_count slots. They index the context,
1668  // where parameters are stored in reverse order, at
1669  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1670  // The mapped parameter thus need to get indices
1671  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
1672  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1673  // We loop from right to left.
1674  Label parameters_loop, parameters_test;
1675  __ mov(r5, r6);
1676  __ add(r9, r2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
1677  __ sub(r9, r9, Operand(r6));
1678  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
1679  __ add(r1, r4, Operand(r5, LSL, 1));
1680  __ add(r1, r1, Operand(kParameterMapHeaderSize));
1681
1682  // r1 = address of backing store (tagged)
1683  // r4 = address of parameter map (tagged), which is also the address of new
1684  //      object + Heap::kSloppyArgumentsObjectSize (tagged)
1685  // r0 = temporary scratch (a.o., for address calculation)
1686  // r5 = loop variable (tagged)
1687  // ip = the hole value
1688  __ jmp(&parameters_test);
1689
1690  __ bind(&parameters_loop);
1691  __ sub(r5, r5, Operand(Smi::FromInt(1)));
1692  __ mov(r0, Operand(r5, LSL, 1));
1693  __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
1694  __ str(r9, MemOperand(r4, r0));
1695  __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
1696  __ str(ip, MemOperand(r1, r0));
1697  __ add(r9, r9, Operand(Smi::FromInt(1)));
1698  __ bind(&parameters_test);
1699  __ cmp(r5, Operand(Smi::FromInt(0)));
1700  __ b(ne, &parameters_loop);
1701
1702  // Restore r0 = new object (tagged) and r5 = argument count (tagged).
1703  __ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize));
1704  __ ldr(r5, FieldMemOperand(r0, kLengthOffset));
1705
1706  __ bind(&skip_parameter_map);
1707  // r0 = address of new object (tagged)
1708  // r1 = address of backing store (tagged)
1709  // r5 = argument count (tagged)
1710  // r6 = mapped parameter count (tagged)
1711  // r9 = scratch
1712  // Copy arguments header and remaining slots (if there are any).
1713  __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
1714  __ str(r9, FieldMemOperand(r1, FixedArray::kMapOffset));
1715  __ str(r5, FieldMemOperand(r1, FixedArray::kLengthOffset));
1716
1717  Label arguments_loop, arguments_test;
1718  __ sub(r3, r3, Operand(r6, LSL, 1));
1719  __ jmp(&arguments_test);
1720
1721  __ bind(&arguments_loop);
1722  __ sub(r3, r3, Operand(kPointerSize));
1723  __ ldr(r4, MemOperand(r3, 0));
1724  __ add(r9, r1, Operand(r6, LSL, 1));
1725  __ str(r4, FieldMemOperand(r9, FixedArray::kHeaderSize));
1726  __ add(r6, r6, Operand(Smi::FromInt(1)));
1727
1728  __ bind(&arguments_test);
1729  __ cmp(r6, Operand(r5));
1730  __ b(lt, &arguments_loop);
1731
1732  // Return.
1733  __ Ret();
1734
1735  // Do the runtime call to allocate the arguments object.
1736  // r0 = address of new object (tagged)
1737  // r5 = argument count (tagged)
1738  __ bind(&runtime);
1739  __ Push(r1, r3, r5);
1740  __ TailCallRuntime(Runtime::kNewSloppyArguments);
1741}
1742
1743
1744void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1745  // Return address is in lr.
1746  Label slow;
1747
1748  Register receiver = LoadDescriptor::ReceiverRegister();
1749  Register key = LoadDescriptor::NameRegister();
1750
1751  // Check that the key is an array index, that is Uint32.
1752  __ NonNegativeSmiTst(key);
1753  __ b(ne, &slow);
1754
1755  // Everything is fine, call runtime.
1756  __ Push(receiver, key);  // Receiver, key.
1757
1758  // Perform tail call to the entry.
1759  __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
1760
1761  __ bind(&slow);
1762  PropertyAccessCompiler::TailCallBuiltin(
1763      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1764}
1765
1766
1767void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1768  // r1 : function
1769  // r2 : number of parameters (tagged)
1770  // r3 : parameters pointer
1771
1772  DCHECK(r1.is(ArgumentsAccessNewDescriptor::function()));
1773  DCHECK(r2.is(ArgumentsAccessNewDescriptor::parameter_count()));
1774  DCHECK(r3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
1775
1776  // Check if the calling frame is an arguments adaptor frame.
1777  Label try_allocate, runtime;
1778  __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1779  __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
1780  __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1781  __ b(ne, &try_allocate);
1782
1783  // Patch the arguments.length and the parameters pointer.
1784  __ ldr(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
1785  __ add(r4, r4, Operand::PointerOffsetFromSmiKey(r2));
1786  __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
1787
1788  // Try the new space allocation. Start out with computing the size
1789  // of the arguments object and the elements array in words.
1790  Label add_arguments_object;
1791  __ bind(&try_allocate);
1792  __ SmiUntag(r9, r2, SetCC);
1793  __ b(eq, &add_arguments_object);
1794  __ add(r9, r9, Operand(FixedArray::kHeaderSize / kPointerSize));
1795  __ bind(&add_arguments_object);
1796  __ add(r9, r9, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1797
1798  // Do the allocation of both objects in one go.
1799  __ Allocate(r9, r0, r4, r5, &runtime,
1800              static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1801
1802  // Get the arguments boilerplate from the current native context.
1803  __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r4);
1804
1805  __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
1806  __ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
1807  __ str(r5, FieldMemOperand(r0, JSObject::kPropertiesOffset));
1808  __ str(r5, FieldMemOperand(r0, JSObject::kElementsOffset));
1809
1810  // Get the length (smi tagged) and set that as an in-object property too.
1811  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1812  __ AssertSmi(r2);
1813  __ str(r2,
1814         FieldMemOperand(r0, JSObject::kHeaderSize +
1815                                 Heap::kArgumentsLengthIndex * kPointerSize));
1816
1817  // If there are no actual arguments, we're done.
1818  Label done;
1819  __ cmp(r2, Operand::Zero());
1820  __ b(eq, &done);
1821
1822  // Set up the elements pointer in the allocated arguments object and
1823  // initialize the header in the elements fixed array.
1824  __ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize));
1825  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
1826  __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
1827  __ str(r5, FieldMemOperand(r4, FixedArray::kMapOffset));
1828  __ str(r2, FieldMemOperand(r4, FixedArray::kLengthOffset));
1829  __ SmiUntag(r2);
1830
1831  // Copy the fixed array slots.
1832  Label loop;
1833  // Set up r4 to point to the first array slot.
1834  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1835  __ bind(&loop);
1836  // Pre-decrement r3 with kPointerSize on each iteration.
1837  // Pre-decrement in order to skip receiver.
1838  __ ldr(r5, MemOperand(r3, kPointerSize, NegPreIndex));
1839  // Post-increment r4 with kPointerSize on each iteration.
1840  __ str(r5, MemOperand(r4, kPointerSize, PostIndex));
1841  __ sub(r2, r2, Operand(1));
1842  __ cmp(r2, Operand::Zero());
1843  __ b(ne, &loop);
1844
1845  // Return.
1846  __ bind(&done);
1847  __ Ret();
1848
1849  // Do the runtime call to allocate the arguments object.
1850  __ bind(&runtime);
1851  __ Push(r1, r3, r2);
1852  __ TailCallRuntime(Runtime::kNewStrictArguments);
1853}
1854
1855
1856void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
1857  // r2 : number of parameters (tagged)
1858  // r3 : parameters pointer
1859  // r4 : rest parameter index (tagged)
1860
1861  Label runtime;
1862  __ ldr(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1863  __ ldr(r0, MemOperand(r5, StandardFrameConstants::kContextOffset));
1864  __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1865  __ b(ne, &runtime);
1866
1867  // Patch the arguments.length and the parameters pointer.
1868  __ ldr(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
1869  __ add(r3, r5, Operand::PointerOffsetFromSmiKey(r2));
1870  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
1871
1872  __ bind(&runtime);
1873  __ Push(r2, r3, r4);
1874  __ TailCallRuntime(Runtime::kNewRestParam);
1875}
1876
1877
1878void RegExpExecStub::Generate(MacroAssembler* masm) {
1879  // Just jump directly to runtime if native RegExp is not selected at compile
1880  // time or if regexp entry in generated code is turned off runtime switch or
1881  // at compilation.
1882#ifdef V8_INTERPRETED_REGEXP
1883  __ TailCallRuntime(Runtime::kRegExpExec);
1884#else  // V8_INTERPRETED_REGEXP
1885
1886  // Stack frame on entry.
1887  //  sp[0]: last_match_info (expected JSArray)
1888  //  sp[4]: previous index
1889  //  sp[8]: subject string
1890  //  sp[12]: JSRegExp object
1891
1892  const int kLastMatchInfoOffset = 0 * kPointerSize;
1893  const int kPreviousIndexOffset = 1 * kPointerSize;
1894  const int kSubjectOffset = 2 * kPointerSize;
1895  const int kJSRegExpOffset = 3 * kPointerSize;
1896
1897  Label runtime;
1898  // Allocation of registers for this function. These are in callee save
1899  // registers and will be preserved by the call to the native RegExp code, as
1900  // this code is called using the normal C calling convention. When calling
1901  // directly from generated code the native RegExp code will not do a GC and
1902  // therefore the content of these registers are safe to use after the call.
1903  Register subject = r4;
1904  Register regexp_data = r5;
1905  Register last_match_info_elements = no_reg;  // will be r6;
1906
1907  // Ensure that a RegExp stack is allocated.
1908  ExternalReference address_of_regexp_stack_memory_address =
1909      ExternalReference::address_of_regexp_stack_memory_address(isolate());
1910  ExternalReference address_of_regexp_stack_memory_size =
1911      ExternalReference::address_of_regexp_stack_memory_size(isolate());
1912  __ mov(r0, Operand(address_of_regexp_stack_memory_size));
1913  __ ldr(r0, MemOperand(r0, 0));
1914  __ cmp(r0, Operand::Zero());
1915  __ b(eq, &runtime);
1916
1917  // Check that the first argument is a JSRegExp object.
1918  __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
1919  __ JumpIfSmi(r0, &runtime);
1920  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
1921  __ b(ne, &runtime);
1922
1923  // Check that the RegExp has been compiled (data contains a fixed array).
1924  __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
1925  if (FLAG_debug_code) {
1926    __ SmiTst(regexp_data);
1927    __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1928    __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
1929    __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1930  }
1931
1932  // regexp_data: RegExp data (FixedArray)
1933  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1934  __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1935  __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
1936  __ b(ne, &runtime);
1937
1938  // regexp_data: RegExp data (FixedArray)
1939  // Check that the number of captures fit in the static offsets vector buffer.
1940  __ ldr(r2,
1941         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1942  // Check (number_of_captures + 1) * 2 <= offsets vector size
1943  // Or          number_of_captures * 2 <= offsets vector size - 2
1944  // Multiplying by 2 comes for free since r2 is smi-tagged.
1945  STATIC_ASSERT(kSmiTag == 0);
1946  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
1947  STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1948  __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
1949  __ b(hi, &runtime);
1950
1951  // Reset offset for possibly sliced string.
1952  __ mov(r9, Operand::Zero());
1953  __ ldr(subject, MemOperand(sp, kSubjectOffset));
1954  __ JumpIfSmi(subject, &runtime);
1955  __ mov(r3, subject);  // Make a copy of the original subject string.
1956  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
1957  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
1958  // subject: subject string
1959  // r3: subject string
1960  // r0: subject string instance type
1961  // regexp_data: RegExp data (FixedArray)
1962  // Handle subject string according to its encoding and representation:
1963  // (1) Sequential string?  If yes, go to (5).
1964  // (2) Anything but sequential or cons?  If yes, go to (6).
1965  // (3) Cons string.  If the string is flat, replace subject with first string.
1966  //     Otherwise bailout.
1967  // (4) Is subject external?  If yes, go to (7).
1968  // (5) Sequential string.  Load regexp code according to encoding.
1969  // (E) Carry on.
1970  /// [...]
1971
1972  // Deferred code at the end of the stub:
1973  // (6) Not a long external string?  If yes, go to (8).
1974  // (7) External string.  Make it, offset-wise, look like a sequential string.
1975  //     Go to (5).
1976  // (8) Short external string or not a string?  If yes, bail out to runtime.
1977  // (9) Sliced string.  Replace subject with parent.  Go to (4).
1978
1979  Label seq_string /* 5 */, external_string /* 7 */,
1980        check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
1981        not_long_external /* 8 */;
1982
1983  // (1) Sequential string?  If yes, go to (5).
1984  __ and_(r1,
1985          r0,
1986          Operand(kIsNotStringMask |
1987                  kStringRepresentationMask |
1988                  kShortExternalStringMask),
1989          SetCC);
1990  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
1991  __ b(eq, &seq_string);  // Go to (5).
1992
1993  // (2) Anything but sequential or cons?  If yes, go to (6).
1994  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1995  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1996  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1997  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1998  __ cmp(r1, Operand(kExternalStringTag));
1999  __ b(ge, &not_seq_nor_cons);  // Go to (6).
2000
2001  // (3) Cons string.  Check that it's flat.
2002  // Replace subject with first string and reload instance type.
2003  __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
2004  __ CompareRoot(r0, Heap::kempty_stringRootIndex);
2005  __ b(ne, &runtime);
2006  __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2007
2008  // (4) Is subject external?  If yes, go to (7).
2009  __ bind(&check_underlying);
2010  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2011  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2012  STATIC_ASSERT(kSeqStringTag == 0);
2013  __ tst(r0, Operand(kStringRepresentationMask));
2014  // The underlying external string is never a short external string.
2015  STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2016  STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2017  __ b(ne, &external_string);  // Go to (7).
2018
2019  // (5) Sequential string.  Load regexp code according to encoding.
2020  __ bind(&seq_string);
2021  // subject: sequential subject string (or look-alike, external string)
2022  // r3: original subject string
2023  // Load previous index and check range before r3 is overwritten.  We have to
2024  // use r3 instead of subject here because subject might have been only made
2025  // to look like a sequential string when it actually is an external string.
2026  __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
2027  __ JumpIfNotSmi(r1, &runtime);
2028  __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
2029  __ cmp(r3, Operand(r1));
2030  __ b(ls, &runtime);
2031  __ SmiUntag(r1);
2032
2033  STATIC_ASSERT(4 == kOneByteStringTag);
2034  STATIC_ASSERT(kTwoByteStringTag == 0);
2035  __ and_(r0, r0, Operand(kStringEncodingMask));
2036  __ mov(r3, Operand(r0, ASR, 2), SetCC);
2037  __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset),
2038         ne);
2039  __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
2040
2041  // (E) Carry on.  String handling is done.
2042  // r6: irregexp code
2043  // Check that the irregexp code has been generated for the actual string
2044  // encoding. If it has, the field contains a code object otherwise it contains
2045  // a smi (code flushing support).
2046  __ JumpIfSmi(r6, &runtime);
2047
2048  // r1: previous index
2049  // r3: encoding of subject string (1 if one_byte, 0 if two_byte);
2050  // r6: code
2051  // subject: Subject string
2052  // regexp_data: RegExp data (FixedArray)
2053  // All checks done. Now push arguments for native regexp code.
2054  __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2);
2055
2056  // Isolates: note we add an additional parameter here (isolate pointer).
2057  const int kRegExpExecuteArguments = 9;
2058  const int kParameterRegisters = 4;
2059  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2060
2061  // Stack pointer now points to cell where return address is to be written.
2062  // Arguments are before that on the stack or in registers.
2063
2064  // Argument 9 (sp[20]): Pass current isolate address.
2065  __ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2066  __ str(r0, MemOperand(sp, 5 * kPointerSize));
2067
2068  // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
2069  __ mov(r0, Operand(1));
2070  __ str(r0, MemOperand(sp, 4 * kPointerSize));
2071
2072  // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
2073  __ mov(r0, Operand(address_of_regexp_stack_memory_address));
2074  __ ldr(r0, MemOperand(r0, 0));
2075  __ mov(r2, Operand(address_of_regexp_stack_memory_size));
2076  __ ldr(r2, MemOperand(r2, 0));
2077  __ add(r0, r0, Operand(r2));
2078  __ str(r0, MemOperand(sp, 3 * kPointerSize));
2079
2080  // Argument 6: Set the number of capture registers to zero to force global
2081  // regexps to behave as non-global.  This does not affect non-global regexps.
2082  __ mov(r0, Operand::Zero());
2083  __ str(r0, MemOperand(sp, 2 * kPointerSize));
2084
2085  // Argument 5 (sp[4]): static offsets vector buffer.
2086  __ mov(r0,
2087         Operand(ExternalReference::address_of_static_offsets_vector(
2088             isolate())));
2089  __ str(r0, MemOperand(sp, 1 * kPointerSize));
2090
2091  // For arguments 4 and 3 get string length, calculate start of string data and
2092  // calculate the shift of the index (0 for one-byte and 1 for two-byte).
2093  __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2094  __ eor(r3, r3, Operand(1));
2095  // Load the length from the original subject string from the previous stack
2096  // frame. Therefore we have to use fp, which points exactly to two pointer
2097  // sizes below the previous sp. (Because creating a new stack frame pushes
2098  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2099  __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2100  // If slice offset is not 0, load the length from the original sliced string.
2101  // Argument 4, r3: End of string data
2102  // Argument 3, r2: Start of string data
2103  // Prepare start and end index of the input.
2104  __ add(r9, r7, Operand(r9, LSL, r3));
2105  __ add(r2, r9, Operand(r1, LSL, r3));
2106
2107  __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
2108  __ SmiUntag(r7);
2109  __ add(r3, r9, Operand(r7, LSL, r3));
2110
2111  // Argument 2 (r1): Previous index.
2112  // Already there
2113
2114  // Argument 1 (r0): Subject string.
2115  __ mov(r0, subject);
2116
2117  // Locate the code entry and call it.
2118  __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
2119  DirectCEntryStub stub(isolate());
2120  stub.GenerateCall(masm, r6);
2121
2122  __ LeaveExitFrame(false, no_reg, true);
2123
2124  last_match_info_elements = r6;
2125
2126  // r0: result
2127  // subject: subject string (callee saved)
2128  // regexp_data: RegExp data (callee saved)
2129  // last_match_info_elements: Last match info elements (callee saved)
2130  // Check the result.
2131  Label success;
2132  __ cmp(r0, Operand(1));
2133  // We expect exactly one result since we force the called regexp to behave
2134  // as non-global.
2135  __ b(eq, &success);
2136  Label failure;
2137  __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
2138  __ b(eq, &failure);
2139  __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2140  // If not exception it can only be retry. Handle that in the runtime system.
2141  __ b(ne, &runtime);
2142  // Result must now be exception. If there is no pending exception already a
2143  // stack overflow (on the backtrack stack) was detected in RegExp code but
2144  // haven't created the exception yet. Handle that in the runtime system.
2145  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2146  __ mov(r1, Operand(isolate()->factory()->the_hole_value()));
2147  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2148                                       isolate())));
2149  __ ldr(r0, MemOperand(r2, 0));
2150  __ cmp(r0, r1);
2151  __ b(eq, &runtime);
2152
2153  // For exception, throw the exception again.
2154  __ TailCallRuntime(Runtime::kRegExpExecReThrow);
2155
2156  __ bind(&failure);
2157  // For failure and exception return null.
2158  __ mov(r0, Operand(isolate()->factory()->null_value()));
2159  __ add(sp, sp, Operand(4 * kPointerSize));
2160  __ Ret();
2161
2162  // Process the result from the native regexp code.
2163  __ bind(&success);
2164  __ ldr(r1,
2165         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2166  // Calculate number of capture registers (number_of_captures + 1) * 2.
2167  // Multiplying by 2 comes for free since r1 is smi-tagged.
2168  STATIC_ASSERT(kSmiTag == 0);
2169  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2170  __ add(r1, r1, Operand(2));  // r1 was a smi.
2171
2172  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2173  __ JumpIfSmi(r0, &runtime);
2174  __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
2175  __ b(ne, &runtime);
2176  // Check that the JSArray is in fast case.
2177  __ ldr(last_match_info_elements,
2178         FieldMemOperand(r0, JSArray::kElementsOffset));
2179  __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2180  __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
2181  __ b(ne, &runtime);
2182  // Check that the last match info has space for the capture registers and the
2183  // additional information.
2184  __ ldr(r0,
2185         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2186  __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
2187  __ cmp(r2, Operand::SmiUntag(r0));
2188  __ b(gt, &runtime);
2189
2190  // r1: number of capture registers
2191  // r4: subject string
2192  // Store the capture count.
2193  __ SmiTag(r2, r1);
2194  __ str(r2, FieldMemOperand(last_match_info_elements,
2195                             RegExpImpl::kLastCaptureCountOffset));
2196  // Store last subject and last input.
2197  __ str(subject,
2198         FieldMemOperand(last_match_info_elements,
2199                         RegExpImpl::kLastSubjectOffset));
2200  __ mov(r2, subject);
2201  __ RecordWriteField(last_match_info_elements,
2202                      RegExpImpl::kLastSubjectOffset,
2203                      subject,
2204                      r3,
2205                      kLRHasNotBeenSaved,
2206                      kDontSaveFPRegs);
2207  __ mov(subject, r2);
2208  __ str(subject,
2209         FieldMemOperand(last_match_info_elements,
2210                         RegExpImpl::kLastInputOffset));
2211  __ RecordWriteField(last_match_info_elements,
2212                      RegExpImpl::kLastInputOffset,
2213                      subject,
2214                      r3,
2215                      kLRHasNotBeenSaved,
2216                      kDontSaveFPRegs);
2217
2218  // Get the static offsets vector filled by the native regexp code.
2219  ExternalReference address_of_static_offsets_vector =
2220      ExternalReference::address_of_static_offsets_vector(isolate());
2221  __ mov(r2, Operand(address_of_static_offsets_vector));
2222
2223  // r1: number of capture registers
2224  // r2: offsets vector
2225  Label next_capture, done;
2226  // Capture register counter starts from number of capture registers and
2227  // counts down until wraping after zero.
2228  __ add(r0,
2229         last_match_info_elements,
2230         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
2231  __ bind(&next_capture);
2232  __ sub(r1, r1, Operand(1), SetCC);
2233  __ b(mi, &done);
2234  // Read the value from the static offsets vector buffer.
2235  __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
2236  // Store the smi value in the last match info.
2237  __ SmiTag(r3);
2238  __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
2239  __ jmp(&next_capture);
2240  __ bind(&done);
2241
2242  // Return last match info.
2243  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
2244  __ add(sp, sp, Operand(4 * kPointerSize));
2245  __ Ret();
2246
2247  // Do the runtime call to execute the regexp.
2248  __ bind(&runtime);
2249  __ TailCallRuntime(Runtime::kRegExpExec);
2250
2251  // Deferred code for string handling.
2252  // (6) Not a long external string?  If yes, go to (8).
2253  __ bind(&not_seq_nor_cons);
2254  // Compare flags are still set.
2255  __ b(gt, &not_long_external);  // Go to (8).
2256
2257  // (7) External string.  Make it, offset-wise, look like a sequential string.
2258  __ bind(&external_string);
2259  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2260  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2261  if (FLAG_debug_code) {
2262    // Assert that we do not have a cons or slice (indirect strings) here.
2263    // Sequential strings have already been ruled out.
2264    __ tst(r0, Operand(kIsIndirectStringMask));
2265    __ Assert(eq, kExternalStringExpectedButNotFound);
2266  }
2267  __ ldr(subject,
2268         FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2269  // Move the pointer so that offset-wise, it looks like a sequential string.
2270  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2271  __ sub(subject,
2272         subject,
2273         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
2274  __ jmp(&seq_string);    // Go to (5).
2275
2276  // (8) Short external string or not a string?  If yes, bail out to runtime.
2277  __ bind(&not_long_external);
2278  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2279  __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
2280  __ b(ne, &runtime);
2281
2282  // (9) Sliced string.  Replace subject with parent.  Go to (4).
2283  // Load offset into r9 and replace subject string with parent.
2284  __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2285  __ SmiUntag(r9);
2286  __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2287  __ jmp(&check_underlying);  // Go to (4).
2288#endif  // V8_INTERPRETED_REGEXP
2289}
2290
2291
2292static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
2293  // r0 : number of arguments to the construct function
2294  // r1 : the function to call
2295  // r2 : feedback vector
2296  // r3 : slot in feedback vector (Smi)
2297  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2298
2299  // Number-of-arguments register must be smi-tagged to call out.
2300  __ SmiTag(r0);
2301  __ Push(r3, r2, r1, r0);
2302
2303  __ CallStub(stub);
2304
2305  __ Pop(r3, r2, r1, r0);
2306  __ SmiUntag(r0);
2307}
2308
2309
2310static void GenerateRecordCallTarget(MacroAssembler* masm) {
2311  // Cache the called function in a feedback vector slot.  Cache states
2312  // are uninitialized, monomorphic (indicated by a JSFunction), and
2313  // megamorphic.
2314  // r0 : number of arguments to the construct function
2315  // r1 : the function to call
2316  // r2 : feedback vector
2317  // r3 : slot in feedback vector (Smi)
2318  Label initialize, done, miss, megamorphic, not_array_function;
2319
2320  DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2321            masm->isolate()->heap()->megamorphic_symbol());
2322  DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2323            masm->isolate()->heap()->uninitialized_symbol());
2324
2325  // Load the cache state into r5.
2326  __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
2327  __ ldr(r5, FieldMemOperand(r5, FixedArray::kHeaderSize));
2328
2329  // A monomorphic cache hit or an already megamorphic state: invoke the
2330  // function without changing the state.
2331  // We don't know if r5 is a WeakCell or a Symbol, but it's harmless to read at
2332  // this position in a symbol (see static asserts in type-feedback-vector.h).
2333  Label check_allocation_site;
2334  Register feedback_map = r6;
2335  Register weak_value = r9;
2336  __ ldr(weak_value, FieldMemOperand(r5, WeakCell::kValueOffset));
2337  __ cmp(r1, weak_value);
2338  __ b(eq, &done);
2339  __ CompareRoot(r5, Heap::kmegamorphic_symbolRootIndex);
2340  __ b(eq, &done);
2341  __ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
2342  __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
2343  __ b(ne, &check_allocation_site);
2344
2345  // If the weak cell is cleared, we have a new chance to become monomorphic.
2346  __ JumpIfSmi(weak_value, &initialize);
2347  __ jmp(&megamorphic);
2348
2349  __ bind(&check_allocation_site);
2350  // If we came here, we need to see if we are the array function.
2351  // If we didn't have a matching function, and we didn't find the megamorph
2352  // sentinel, then we have in the slot either some other function or an
2353  // AllocationSite.
2354  __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
2355  __ b(ne, &miss);
2356
2357  // Make sure the function is the Array() function
2358  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
2359  __ cmp(r1, r5);
2360  __ b(ne, &megamorphic);
2361  __ jmp(&done);
2362
2363  __ bind(&miss);
2364
2365  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2366  // megamorphic.
2367  __ CompareRoot(r5, Heap::kuninitialized_symbolRootIndex);
2368  __ b(eq, &initialize);
2369  // MegamorphicSentinel is an immortal immovable object (undefined) so no
2370  // write-barrier is needed.
2371  __ bind(&megamorphic);
2372  __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
2373  __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
2374  __ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
2375  __ jmp(&done);
2376
2377  // An uninitialized cache is patched with the function
2378  __ bind(&initialize);
2379
2380  // Make sure the function is the Array() function
2381  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
2382  __ cmp(r1, r5);
2383  __ b(ne, &not_array_function);
2384
2385  // The target function is the Array constructor,
2386  // Create an AllocationSite if we don't already have it, store it in the
2387  // slot.
2388  CreateAllocationSiteStub create_stub(masm->isolate());
2389  CallStubInRecordCallTarget(masm, &create_stub);
2390  __ b(&done);
2391
2392  __ bind(&not_array_function);
2393  CreateWeakCellStub weak_cell_stub(masm->isolate());
2394  CallStubInRecordCallTarget(masm, &weak_cell_stub);
2395  __ bind(&done);
2396}
2397
2398
2399void CallConstructStub::Generate(MacroAssembler* masm) {
2400  // r0 : number of arguments
2401  // r1 : the function to call
2402  // r2 : feedback vector
2403  // r3 : slot in feedback vector (Smi, for RecordCallTarget)
2404
2405  Label non_function;
2406  // Check that the function is not a smi.
2407  __ JumpIfSmi(r1, &non_function);
2408  // Check that the function is a JSFunction.
2409  __ CompareObjectType(r1, r5, r5, JS_FUNCTION_TYPE);
2410  __ b(ne, &non_function);
2411
2412  GenerateRecordCallTarget(masm);
2413
2414  __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
2415  Label feedback_register_initialized;
2416  // Put the AllocationSite from the feedback vector into r2, or undefined.
2417  __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
2418  __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
2419  __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2420  __ b(eq, &feedback_register_initialized);
2421  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
2422  __ bind(&feedback_register_initialized);
2423
2424  __ AssertUndefinedOrAllocationSite(r2, r5);
2425
2426  // Pass function as new target.
2427  __ mov(r3, r1);
2428
2429  // Tail call to the function-specific construct stub (still in the caller
2430  // context at this point).
2431  __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
2432  __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
2433  __ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
2434
2435  __ bind(&non_function);
2436  __ mov(r3, r1);
2437  __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
2438}
2439
2440
2441void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
2442  // r1 - function
2443  // r3 - slot id
2444  // r2 - vector
2445  // r4 - allocation site (loaded from vector[slot])
2446  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
2447  __ cmp(r1, r5);
2448  __ b(ne, miss);
2449
2450  __ mov(r0, Operand(arg_count()));
2451
2452  // Increment the call count for monomorphic function calls.
2453  __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
2454  __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
2455  __ ldr(r3, FieldMemOperand(r2, 0));
2456  __ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2457  __ str(r3, FieldMemOperand(r2, 0));
2458
2459  __ mov(r2, r4);
2460  __ mov(r3, r1);
2461  ArrayConstructorStub stub(masm->isolate(), arg_count());
2462  __ TailCallStub(&stub);
2463}
2464
2465
2466void CallICStub::Generate(MacroAssembler* masm) {
2467  // r1 - function
2468  // r3 - slot id (Smi)
2469  // r2 - vector
2470  Label extra_checks_or_miss, call, call_function;
2471  int argc = arg_count();
2472  ParameterCount actual(argc);
2473
2474  // The checks. First, does r1 match the recorded monomorphic target?
2475  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2476  __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
2477
2478  // We don't know that we have a weak cell. We might have a private symbol
2479  // or an AllocationSite, but the memory is safe to examine.
2480  // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
2481  // FixedArray.
2482  // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
2483  // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
2484  // computed, meaning that it can't appear to be a pointer. If the low bit is
2485  // 0, then hash is computed, but the 0 bit prevents the field from appearing
2486  // to be a pointer.
2487  STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
2488  STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
2489                    WeakCell::kValueOffset &&
2490                WeakCell::kValueOffset == Symbol::kHashFieldSlot);
2491
2492  __ ldr(r5, FieldMemOperand(r4, WeakCell::kValueOffset));
2493  __ cmp(r1, r5);
2494  __ b(ne, &extra_checks_or_miss);
2495
2496  // The compare above could have been a SMI/SMI comparison. Guard against this
2497  // convincing us that we have a monomorphic JSFunction.
2498  __ JumpIfSmi(r1, &extra_checks_or_miss);
2499
2500  // Increment the call count for monomorphic function calls.
2501  __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
2502  __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
2503  __ ldr(r3, FieldMemOperand(r2, 0));
2504  __ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2505  __ str(r3, FieldMemOperand(r2, 0));
2506
2507  __ bind(&call_function);
2508  __ mov(r0, Operand(argc));
2509  __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
2510          RelocInfo::CODE_TARGET);
2511
2512  __ bind(&extra_checks_or_miss);
2513  Label uninitialized, miss, not_allocation_site;
2514
2515  __ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
2516  __ b(eq, &call);
2517
2518  // Verify that r4 contains an AllocationSite
2519  __ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
2520  __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
2521  __ b(ne, &not_allocation_site);
2522
2523  // We have an allocation site.
2524  HandleArrayCase(masm, &miss);
2525
2526  __ bind(&not_allocation_site);
2527
2528  // The following cases attempt to handle MISS cases without going to the
2529  // runtime.
2530  if (FLAG_trace_ic) {
2531    __ jmp(&miss);
2532  }
2533
2534  __ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
2535  __ b(eq, &uninitialized);
2536
2537  // We are going megamorphic. If the feedback is a JSFunction, it is fine
2538  // to handle it here. More complex cases are dealt with in the runtime.
2539  __ AssertNotSmi(r4);
2540  __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
2541  __ b(ne, &miss);
2542  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2543  __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
2544  __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
2545
2546  __ bind(&call);
2547  __ mov(r0, Operand(argc));
2548  __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
2549          RelocInfo::CODE_TARGET);
2550
2551  __ bind(&uninitialized);
2552
2553  // We are going monomorphic, provided we actually have a JSFunction.
2554  __ JumpIfSmi(r1, &miss);
2555
2556  // Goto miss case if we do not have a function.
2557  __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
2558  __ b(ne, &miss);
2559
2560  // Make sure the function is not the Array() function, which requires special
2561  // behavior on MISS.
2562  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r4);
2563  __ cmp(r1, r4);
2564  __ b(eq, &miss);
2565
2566  // Make sure the function belongs to the same native context.
2567  __ ldr(r4, FieldMemOperand(r1, JSFunction::kContextOffset));
2568  __ ldr(r4, ContextMemOperand(r4, Context::NATIVE_CONTEXT_INDEX));
2569  __ ldr(ip, NativeContextMemOperand());
2570  __ cmp(r4, ip);
2571  __ b(ne, &miss);
2572
2573  // Initialize the call counter.
2574  __ Move(r5, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
2575  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
2576  __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
2577
2578  // Store the function. Use a stub since we need a frame for allocation.
2579  // r2 - vector
2580  // r3 - slot
2581  // r1 - function
2582  {
2583    FrameScope scope(masm, StackFrame::INTERNAL);
2584    CreateWeakCellStub create_stub(masm->isolate());
2585    __ Push(r1);
2586    __ CallStub(&create_stub);
2587    __ Pop(r1);
2588  }
2589
2590  __ jmp(&call_function);
2591
2592  // We are here because tracing is on or we encountered a MISS case we can't
2593  // handle here.
2594  __ bind(&miss);
2595  GenerateMiss(masm);
2596
2597  __ jmp(&call);
2598}
2599
2600
2601void CallICStub::GenerateMiss(MacroAssembler* masm) {
2602  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2603
2604  // Push the receiver and the function and feedback info.
2605  __ Push(r1, r2, r3);
2606
2607  // Call the entry.
2608  __ CallRuntime(Runtime::kCallIC_Miss);
2609
2610  // Move result to edi and exit the internal frame.
2611  __ mov(r1, r0);
2612}
2613
2614
2615// StringCharCodeAtGenerator
2616void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2617  // If the receiver is a smi trigger the non-string case.
2618  if (check_mode_ == RECEIVER_IS_UNKNOWN) {
2619    __ JumpIfSmi(object_, receiver_not_string_);
2620
2621    // Fetch the instance type of the receiver into result register.
2622    __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2623    __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2624    // If the receiver is not a string trigger the non-string case.
2625    __ tst(result_, Operand(kIsNotStringMask));
2626    __ b(ne, receiver_not_string_);
2627  }
2628
2629  // If the index is non-smi trigger the non-smi case.
2630  __ JumpIfNotSmi(index_, &index_not_smi_);
2631  __ bind(&got_smi_index_);
2632
2633  // Check for index out of range.
2634  __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
2635  __ cmp(ip, Operand(index_));
2636  __ b(ls, index_out_of_range_);
2637
2638  __ SmiUntag(index_);
2639
2640  StringCharLoadGenerator::Generate(masm,
2641                                    object_,
2642                                    index_,
2643                                    result_,
2644                                    &call_runtime_);
2645
2646  __ SmiTag(result_);
2647  __ bind(&exit_);
2648}
2649
2650
2651void StringCharCodeAtGenerator::GenerateSlow(
2652    MacroAssembler* masm, EmbedMode embed_mode,
2653    const RuntimeCallHelper& call_helper) {
2654  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2655
2656  // Index is not a smi.
2657  __ bind(&index_not_smi_);
2658  // If index is a heap number, try converting it to an integer.
2659  __ CheckMap(index_,
2660              result_,
2661              Heap::kHeapNumberMapRootIndex,
2662              index_not_number_,
2663              DONT_DO_SMI_CHECK);
2664  call_helper.BeforeCall(masm);
2665  if (embed_mode == PART_OF_IC_HANDLER) {
2666    __ Push(LoadWithVectorDescriptor::VectorRegister(),
2667            LoadWithVectorDescriptor::SlotRegister(), object_, index_);
2668  } else {
2669    // index_ is consumed by runtime conversion function.
2670    __ Push(object_, index_);
2671  }
2672  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
2673    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
2674  } else {
2675    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
2676    // NumberToSmi discards numbers that are not exact integers.
2677    __ CallRuntime(Runtime::kNumberToSmi);
2678  }
2679  // Save the conversion result before the pop instructions below
2680  // have a chance to overwrite it.
2681  __ Move(index_, r0);
2682  if (embed_mode == PART_OF_IC_HANDLER) {
2683    __ Pop(LoadWithVectorDescriptor::VectorRegister(),
2684           LoadWithVectorDescriptor::SlotRegister(), object_);
2685  } else {
2686    __ pop(object_);
2687  }
2688  // Reload the instance type.
2689  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
2690  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
2691  call_helper.AfterCall(masm);
2692  // If index is still not a smi, it must be out of range.
2693  __ JumpIfNotSmi(index_, index_out_of_range_);
2694  // Otherwise, return to the fast path.
2695  __ jmp(&got_smi_index_);
2696
2697  // Call runtime. We get here when the receiver is a string and the
2698  // index is a number, but the code of getting the actual character
2699  // is too complex (e.g., when the string needs to be flattened).
2700  __ bind(&call_runtime_);
2701  call_helper.BeforeCall(masm);
2702  __ SmiTag(index_);
2703  __ Push(object_, index_);
2704  __ CallRuntime(Runtime::kStringCharCodeAtRT);
2705  __ Move(result_, r0);
2706  call_helper.AfterCall(masm);
2707  __ jmp(&exit_);
2708
2709  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2710}
2711
2712
2713// -------------------------------------------------------------------------
2714// StringCharFromCodeGenerator
2715
2716void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2717  // Fast case of Heap::LookupSingleCharacterStringFromCode.
2718  STATIC_ASSERT(kSmiTag == 0);
2719  STATIC_ASSERT(kSmiShiftSize == 0);
2720  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
2721  __ tst(code_, Operand(kSmiTagMask |
2722                        ((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
2723  __ b(ne, &slow_case_);
2724
2725  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2726  // At this point code register contains smi tagged one-byte char code.
2727  __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
2728  __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
2729  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
2730  __ b(eq, &slow_case_);
2731  __ bind(&exit_);
2732}
2733
2734
2735void StringCharFromCodeGenerator::GenerateSlow(
2736    MacroAssembler* masm,
2737    const RuntimeCallHelper& call_helper) {
2738  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2739
2740  __ bind(&slow_case_);
2741  call_helper.BeforeCall(masm);
2742  __ push(code_);
2743  __ CallRuntime(Runtime::kStringCharFromCode);
2744  __ Move(result_, r0);
2745  call_helper.AfterCall(masm);
2746  __ jmp(&exit_);
2747
2748  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2749}
2750
2751
2752enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
2753
2754
2755void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
2756                                          Register dest,
2757                                          Register src,
2758                                          Register count,
2759                                          Register scratch,
2760                                          String::Encoding encoding) {
2761  if (FLAG_debug_code) {
2762    // Check that destination is word aligned.
2763    __ tst(dest, Operand(kPointerAlignmentMask));
2764    __ Check(eq, kDestinationOfCopyNotAligned);
2765  }
2766
2767  // Assumes word reads and writes are little endian.
2768  // Nothing to do for zero characters.
2769  Label done;
2770  if (encoding == String::TWO_BYTE_ENCODING) {
2771    __ add(count, count, Operand(count), SetCC);
2772  }
2773
2774  Register limit = count;  // Read until dest equals this.
2775  __ add(limit, dest, Operand(count));
2776
2777  Label loop_entry, loop;
2778  // Copy bytes from src to dest until dest hits limit.
2779  __ b(&loop_entry);
2780  __ bind(&loop);
2781  __ ldrb(scratch, MemOperand(src, 1, PostIndex), lt);
2782  __ strb(scratch, MemOperand(dest, 1, PostIndex));
2783  __ bind(&loop_entry);
2784  __ cmp(dest, Operand(limit));
2785  __ b(lt, &loop);
2786
2787  __ bind(&done);
2788}
2789
2790
2791void SubStringStub::Generate(MacroAssembler* masm) {
2792  Label runtime;
2793
2794  // Stack frame on entry.
2795  //  lr: return address
2796  //  sp[0]: to
2797  //  sp[4]: from
2798  //  sp[8]: string
2799
2800  // This stub is called from the native-call %_SubString(...), so
2801  // nothing can be assumed about the arguments. It is tested that:
2802  //  "string" is a sequential string,
2803  //  both "from" and "to" are smis, and
2804  //  0 <= from <= to <= string.length.
2805  // If any of these assumptions fail, we call the runtime system.
2806
2807  const int kToOffset = 0 * kPointerSize;
2808  const int kFromOffset = 1 * kPointerSize;
2809  const int kStringOffset = 2 * kPointerSize;
2810
2811  __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
2812  STATIC_ASSERT(kFromOffset == kToOffset + 4);
2813  STATIC_ASSERT(kSmiTag == 0);
2814  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2815
2816  // Arithmetic shift right by one un-smi-tags. In this case we rotate right
2817  // instead because we bail out on non-smi values: ROR and ASR are equivalent
2818  // for smis but they set the flags in a way that's easier to optimize.
2819  __ mov(r2, Operand(r2, ROR, 1), SetCC);
2820  __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
2821  // If either to or from had the smi tag bit set, then C is set now, and N
2822  // has the same value: we rotated by 1, so the bottom bit is now the top bit.
2823  // We want to bailout to runtime here if From is negative.  In that case, the
2824  // next instruction is not executed and we fall through to bailing out to
2825  // runtime.
2826  // Executed if both r2 and r3 are untagged integers.
2827  __ sub(r2, r2, Operand(r3), SetCC, cc);
2828  // One of the above un-smis or the above SUB could have set N==1.
2829  __ b(mi, &runtime);  // Either "from" or "to" is not an smi, or from > to.
2830
2831  // Make sure first argument is a string.
2832  __ ldr(r0, MemOperand(sp, kStringOffset));
2833  __ JumpIfSmi(r0, &runtime);
2834  Condition is_string = masm->IsObjectStringType(r0, r1);
2835  __ b(NegateCondition(is_string), &runtime);
2836
2837  Label single_char;
2838  __ cmp(r2, Operand(1));
2839  __ b(eq, &single_char);
2840
2841  // Short-cut for the case of trivial substring.
2842  Label return_r0;
2843  // r0: original string
2844  // r2: result string length
2845  __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
2846  __ cmp(r2, Operand(r4, ASR, 1));
2847  // Return original string.
2848  __ b(eq, &return_r0);
2849  // Longer than original string's length or negative: unsafe arguments.
2850  __ b(hi, &runtime);
2851  // Shorter than original string's length: an actual substring.
2852
2853  // Deal with different string types: update the index if necessary
2854  // and put the underlying string into r5.
2855  // r0: original string
2856  // r1: instance type
2857  // r2: length
2858  // r3: from index (untagged)
2859  Label underlying_unpacked, sliced_string, seq_or_external_string;
2860  // If the string is not indirect, it can only be sequential or external.
2861  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
2862  STATIC_ASSERT(kIsIndirectStringMask != 0);
2863  __ tst(r1, Operand(kIsIndirectStringMask));
2864  __ b(eq, &seq_or_external_string);
2865
2866  __ tst(r1, Operand(kSlicedNotConsMask));
2867  __ b(ne, &sliced_string);
2868  // Cons string.  Check whether it is flat, then fetch first part.
2869  __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
2870  __ CompareRoot(r5, Heap::kempty_stringRootIndex);
2871  __ b(ne, &runtime);
2872  __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
2873  // Update instance type.
2874  __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
2875  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
2876  __ jmp(&underlying_unpacked);
2877
2878  __ bind(&sliced_string);
2879  // Sliced string.  Fetch parent and correct start index by offset.
2880  __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
2881  __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
2882  __ add(r3, r3, Operand(r4, ASR, 1));  // Add offset to index.
2883  // Update instance type.
2884  __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
2885  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
2886  __ jmp(&underlying_unpacked);
2887
2888  __ bind(&seq_or_external_string);
2889  // Sequential or external string.  Just move string to the expected register.
2890  __ mov(r5, r0);
2891
2892  __ bind(&underlying_unpacked);
2893
2894  if (FLAG_string_slices) {
2895    Label copy_routine;
2896    // r5: underlying subject string
2897    // r1: instance type of underlying subject string
2898    // r2: length
2899    // r3: adjusted start index (untagged)
2900    __ cmp(r2, Operand(SlicedString::kMinLength));
2901    // Short slice.  Copy instead of slicing.
2902    __ b(lt, &copy_routine);
2903    // Allocate new sliced string.  At this point we do not reload the instance
2904    // type including the string encoding because we simply rely on the info
2905    // provided by the original string.  It does not matter if the original
2906    // string's encoding is wrong because we always have to recheck encoding of
2907    // the newly created string's parent anyways due to externalized strings.
2908    Label two_byte_slice, set_slice_header;
2909    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
2910    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
2911    __ tst(r1, Operand(kStringEncodingMask));
2912    __ b(eq, &two_byte_slice);
2913    __ AllocateOneByteSlicedString(r0, r2, r6, r4, &runtime);
2914    __ jmp(&set_slice_header);
2915    __ bind(&two_byte_slice);
2916    __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
2917    __ bind(&set_slice_header);
2918    __ mov(r3, Operand(r3, LSL, 1));
2919    __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
2920    __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
2921    __ jmp(&return_r0);
2922
2923    __ bind(&copy_routine);
2924  }
2925
2926  // r5: underlying subject string
2927  // r1: instance type of underlying subject string
2928  // r2: length
2929  // r3: adjusted start index (untagged)
2930  Label two_byte_sequential, sequential_string, allocate_result;
2931  STATIC_ASSERT(kExternalStringTag != 0);
2932  STATIC_ASSERT(kSeqStringTag == 0);
2933  __ tst(r1, Operand(kExternalStringTag));
2934  __ b(eq, &sequential_string);
2935
2936  // Handle external string.
2937  // Rule out short external strings.
2938  STATIC_ASSERT(kShortExternalStringTag != 0);
2939  __ tst(r1, Operand(kShortExternalStringTag));
2940  __ b(ne, &runtime);
2941  __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
2942  // r5 already points to the first character of underlying string.
2943  __ jmp(&allocate_result);
2944
2945  __ bind(&sequential_string);
2946  // Locate first character of underlying subject string.
2947  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2948  __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
2949
2950  __ bind(&allocate_result);
2951  // Sequential acii string.  Allocate the result.
2952  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
2953  __ tst(r1, Operand(kStringEncodingMask));
2954  __ b(eq, &two_byte_sequential);
2955
2956  // Allocate and copy the resulting one-byte string.
2957  __ AllocateOneByteString(r0, r2, r4, r6, r1, &runtime);
2958
2959  // Locate first character of substring to copy.
2960  __ add(r5, r5, r3);
2961  // Locate first character of result.
2962  __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
2963
2964  // r0: result string
2965  // r1: first character of result string
2966  // r2: result string length
2967  // r5: first character of substring to copy
2968  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2969  StringHelper::GenerateCopyCharacters(
2970      masm, r1, r5, r2, r3, String::ONE_BYTE_ENCODING);
2971  __ jmp(&return_r0);
2972
2973  // Allocate and copy the resulting two-byte string.
2974  __ bind(&two_byte_sequential);
2975  __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
2976
2977  // Locate first character of substring to copy.
2978  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
2979  __ add(r5, r5, Operand(r3, LSL, 1));
2980  // Locate first character of result.
2981  __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
2982
2983  // r0: result string.
2984  // r1: first character of result.
2985  // r2: result length.
2986  // r5: first character of substring to copy.
2987  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
2988  StringHelper::GenerateCopyCharacters(
2989      masm, r1, r5, r2, r3, String::TWO_BYTE_ENCODING);
2990
2991  __ bind(&return_r0);
2992  Counters* counters = isolate()->counters();
2993  __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
2994  __ Drop(3);
2995  __ Ret();
2996
2997  // Just jump to runtime to create the sub string.
2998  __ bind(&runtime);
2999  __ TailCallRuntime(Runtime::kSubString);
3000
3001  __ bind(&single_char);
3002  // r0: original string
3003  // r1: instance type
3004  // r2: length
3005  // r3: from index (untagged)
3006  __ SmiTag(r3, r3);
3007  StringCharAtGenerator generator(r0, r3, r2, r0, &runtime, &runtime, &runtime,
3008                                  STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
3009  generator.GenerateFast(masm);
3010  __ Drop(3);
3011  __ Ret();
3012  generator.SkipSlow(masm, &runtime);
3013}
3014
3015
3016void ToNumberStub::Generate(MacroAssembler* masm) {
3017  // The ToNumber stub takes one argument in r0.
3018  Label not_smi;
3019  __ JumpIfNotSmi(r0, &not_smi);
3020  __ Ret();
3021  __ bind(&not_smi);
3022
3023  __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
3024  // r0: receiver
3025  // r1: receiver instance type
3026  __ Ret(eq);
3027
3028  Label not_string, slow_string;
3029  __ cmp(r1, Operand(FIRST_NONSTRING_TYPE));
3030  __ b(hs, &not_string);
3031  // Check if string has a cached array index.
3032  __ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
3033  __ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
3034  __ b(ne, &slow_string);
3035  __ IndexFromHash(r2, r0);
3036  __ Ret();
3037  __ bind(&slow_string);
3038  __ push(r0);  // Push argument.
3039  __ TailCallRuntime(Runtime::kStringToNumber);
3040  __ bind(&not_string);
3041
3042  Label not_oddball;
3043  __ cmp(r1, Operand(ODDBALL_TYPE));
3044  __ b(ne, &not_oddball);
3045  __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
3046  __ Ret();
3047  __ bind(&not_oddball);
3048
3049  __ push(r0);  // Push argument.
3050  __ TailCallRuntime(Runtime::kToNumber);
3051}
3052
3053
3054void ToLengthStub::Generate(MacroAssembler* masm) {
3055  // The ToLength stub takes one argument in r0.
3056  Label not_smi;
3057  __ JumpIfNotSmi(r0, &not_smi);
3058  STATIC_ASSERT(kSmiTag == 0);
3059  __ tst(r0, r0);
3060  __ mov(r0, Operand(0), LeaveCC, lt);
3061  __ Ret();
3062  __ bind(&not_smi);
3063
3064  __ push(r0);  // Push argument.
3065  __ TailCallRuntime(Runtime::kToLength);
3066}
3067
3068
3069void ToStringStub::Generate(MacroAssembler* masm) {
3070  // The ToString stub takes one argument in r0.
3071  Label is_number;
3072  __ JumpIfSmi(r0, &is_number);
3073
3074  __ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
3075  // r0: receiver
3076  // r1: receiver instance type
3077  __ Ret(lo);
3078
3079  Label not_heap_number;
3080  __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
3081  __ b(ne, &not_heap_number);
3082  __ bind(&is_number);
3083  NumberToStringStub stub(isolate());
3084  __ TailCallStub(&stub);
3085  __ bind(&not_heap_number);
3086
3087  Label not_oddball;
3088  __ cmp(r1, Operand(ODDBALL_TYPE));
3089  __ b(ne, &not_oddball);
3090  __ ldr(r0, FieldMemOperand(r0, Oddball::kToStringOffset));
3091  __ Ret();
3092  __ bind(&not_oddball);
3093
3094  __ push(r0);  // Push argument.
3095  __ TailCallRuntime(Runtime::kToString);
3096}
3097
3098
3099void StringHelper::GenerateFlatOneByteStringEquals(
3100    MacroAssembler* masm, Register left, Register right, Register scratch1,
3101    Register scratch2, Register scratch3) {
3102  Register length = scratch1;
3103
3104  // Compare lengths.
3105  Label strings_not_equal, check_zero_length;
3106  __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
3107  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3108  __ cmp(length, scratch2);
3109  __ b(eq, &check_zero_length);
3110  __ bind(&strings_not_equal);
3111  __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
3112  __ Ret();
3113
3114  // Check if the length is zero.
3115  Label compare_chars;
3116  __ bind(&check_zero_length);
3117  STATIC_ASSERT(kSmiTag == 0);
3118  __ cmp(length, Operand::Zero());
3119  __ b(ne, &compare_chars);
3120  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3121  __ Ret();
3122
3123  // Compare characters.
3124  __ bind(&compare_chars);
3125  GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
3126                                  &strings_not_equal);
3127
3128  // Characters are equal.
3129  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3130  __ Ret();
3131}
3132
3133
3134void StringHelper::GenerateCompareFlatOneByteStrings(
3135    MacroAssembler* masm, Register left, Register right, Register scratch1,
3136    Register scratch2, Register scratch3, Register scratch4) {
3137  Label result_not_equal, compare_lengths;
3138  // Find minimum length and length difference.
3139  __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
3140  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
3141  __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
3142  Register length_delta = scratch3;
3143  __ mov(scratch1, scratch2, LeaveCC, gt);
3144  Register min_length = scratch1;
3145  STATIC_ASSERT(kSmiTag == 0);
3146  __ cmp(min_length, Operand::Zero());
3147  __ b(eq, &compare_lengths);
3148
3149  // Compare loop.
3150  GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3151                                  scratch4, &result_not_equal);
3152
3153  // Compare lengths - strings up to min-length are equal.
3154  __ bind(&compare_lengths);
3155  DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3156  // Use length_delta as result if it's zero.
3157  __ mov(r0, Operand(length_delta), SetCC);
3158  __ bind(&result_not_equal);
3159  // Conditionally update the result based either on length_delta or
3160  // the last comparion performed in the loop above.
3161  __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
3162  __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
3163  __ Ret();
3164}
3165
3166
3167void StringHelper::GenerateOneByteCharsCompareLoop(
3168    MacroAssembler* masm, Register left, Register right, Register length,
3169    Register scratch1, Register scratch2, Label* chars_not_equal) {
3170  // Change index to run from -length to -1 by adding length to string
3171  // start. This means that loop ends when index reaches zero, which
3172  // doesn't need an additional compare.
3173  __ SmiUntag(length);
3174  __ add(scratch1, length,
3175         Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3176  __ add(left, left, Operand(scratch1));
3177  __ add(right, right, Operand(scratch1));
3178  __ rsb(length, length, Operand::Zero());
3179  Register index = length;  // index = -length;
3180
3181  // Compare loop.
3182  Label loop;
3183  __ bind(&loop);
3184  __ ldrb(scratch1, MemOperand(left, index));
3185  __ ldrb(scratch2, MemOperand(right, index));
3186  __ cmp(scratch1, scratch2);
3187  __ b(ne, chars_not_equal);
3188  __ add(index, index, Operand(1), SetCC);
3189  __ b(ne, &loop);
3190}
3191
3192
3193void StringCompareStub::Generate(MacroAssembler* masm) {
3194  // ----------- S t a t e -------------
3195  //  -- r1    : left
3196  //  -- r0    : right
3197  //  -- lr    : return address
3198  // -----------------------------------
3199  __ AssertString(r1);
3200  __ AssertString(r0);
3201
3202  Label not_same;
3203  __ cmp(r0, r1);
3204  __ b(ne, &not_same);
3205  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
3206  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r1,
3207                      r2);
3208  __ Ret();
3209
3210  __ bind(&not_same);
3211
3212  // Check that both objects are sequential one-byte strings.
3213  Label runtime;
3214  __ JumpIfNotBothSequentialOneByteStrings(r1, r0, r2, r3, &runtime);
3215
3216  // Compare flat one-byte strings natively.
3217  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
3218                      r3);
3219  StringHelper::GenerateCompareFlatOneByteStrings(masm, r1, r0, r2, r3, r4, r5);
3220
3221  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
3222  // tagged as a small integer.
3223  __ bind(&runtime);
3224  __ Push(r1, r0);
3225  __ TailCallRuntime(Runtime::kStringCompare);
3226}
3227
3228
3229void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3230  // ----------- S t a t e -------------
3231  //  -- r1    : left
3232  //  -- r0    : right
3233  //  -- lr    : return address
3234  // -----------------------------------
3235
3236  // Load r2 with the allocation site.  We stick an undefined dummy value here
3237  // and replace it with the real allocation site later when we instantiate this
3238  // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3239  __ Move(r2, handle(isolate()->heap()->undefined_value()));
3240
3241  // Make sure that we actually patched the allocation site.
3242  if (FLAG_debug_code) {
3243    __ tst(r2, Operand(kSmiTagMask));
3244    __ Assert(ne, kExpectedAllocationSite);
3245    __ push(r2);
3246    __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
3247    __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
3248    __ cmp(r2, ip);
3249    __ pop(r2);
3250    __ Assert(eq, kExpectedAllocationSite);
3251  }
3252
3253  // Tail call into the stub that handles binary operations with allocation
3254  // sites.
3255  BinaryOpWithAllocationSiteStub stub(isolate(), state());
3256  __ TailCallStub(&stub);
3257}
3258
3259
3260void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
3261  DCHECK_EQ(CompareICState::BOOLEAN, state());
3262  Label miss;
3263
3264  __ CheckMap(r1, r2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
3265  __ CheckMap(r0, r3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
3266  if (op() != Token::EQ_STRICT && is_strong(strength())) {
3267    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
3268  } else {
3269    if (!Token::IsEqualityOp(op())) {
3270      __ ldr(r1, FieldMemOperand(r1, Oddball::kToNumberOffset));
3271      __ AssertSmi(r1);
3272      __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
3273      __ AssertSmi(r0);
3274    }
3275    __ sub(r0, r1, r0);
3276    __ Ret();
3277  }
3278
3279  __ bind(&miss);
3280  GenerateMiss(masm);
3281}
3282
3283
3284void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3285  DCHECK(state() == CompareICState::SMI);
3286  Label miss;
3287  __ orr(r2, r1, r0);
3288  __ JumpIfNotSmi(r2, &miss);
3289
3290  if (GetCondition() == eq) {
3291    // For equality we do not care about the sign of the result.
3292    __ sub(r0, r0, r1, SetCC);
3293  } else {
3294    // Untag before subtracting to avoid handling overflow.
3295    __ SmiUntag(r1);
3296    __ sub(r0, r1, Operand::SmiUntag(r0));
3297  }
3298  __ Ret();
3299
3300  __ bind(&miss);
3301  GenerateMiss(masm);
3302}
3303
3304
3305void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3306  DCHECK(state() == CompareICState::NUMBER);
3307
3308  Label generic_stub;
3309  Label unordered, maybe_undefined1, maybe_undefined2;
3310  Label miss;
3311
3312  if (left() == CompareICState::SMI) {
3313    __ JumpIfNotSmi(r1, &miss);
3314  }
3315  if (right() == CompareICState::SMI) {
3316    __ JumpIfNotSmi(r0, &miss);
3317  }
3318
3319  // Inlining the double comparison and falling back to the general compare
3320  // stub if NaN is involved.
3321  // Load left and right operand.
3322  Label done, left, left_smi, right_smi;
3323  __ JumpIfSmi(r0, &right_smi);
3324  __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3325              DONT_DO_SMI_CHECK);
3326  __ sub(r2, r0, Operand(kHeapObjectTag));
3327  __ vldr(d1, r2, HeapNumber::kValueOffset);
3328  __ b(&left);
3329  __ bind(&right_smi);
3330  __ SmiToDouble(d1, r0);
3331
3332  __ bind(&left);
3333  __ JumpIfSmi(r1, &left_smi);
3334  __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3335              DONT_DO_SMI_CHECK);
3336  __ sub(r2, r1, Operand(kHeapObjectTag));
3337  __ vldr(d0, r2, HeapNumber::kValueOffset);
3338  __ b(&done);
3339  __ bind(&left_smi);
3340  __ SmiToDouble(d0, r1);
3341
3342  __ bind(&done);
3343  // Compare operands.
3344  __ VFPCompareAndSetFlags(d0, d1);
3345
3346  // Don't base result on status bits when a NaN is involved.
3347  __ b(vs, &unordered);
3348
3349  // Return a result of -1, 0, or 1, based on status bits.
3350  __ mov(r0, Operand(EQUAL), LeaveCC, eq);
3351  __ mov(r0, Operand(LESS), LeaveCC, lt);
3352  __ mov(r0, Operand(GREATER), LeaveCC, gt);
3353  __ Ret();
3354
3355  __ bind(&unordered);
3356  __ bind(&generic_stub);
3357  CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
3358                     CompareICState::GENERIC, CompareICState::GENERIC);
3359  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3360
3361  __ bind(&maybe_undefined1);
3362  if (Token::IsOrderedRelationalCompareOp(op())) {
3363    __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
3364    __ b(ne, &miss);
3365    __ JumpIfSmi(r1, &unordered);
3366    __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
3367    __ b(ne, &maybe_undefined2);
3368    __ jmp(&unordered);
3369  }
3370
3371  __ bind(&maybe_undefined2);
3372  if (Token::IsOrderedRelationalCompareOp(op())) {
3373    __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
3374    __ b(eq, &unordered);
3375  }
3376
3377  __ bind(&miss);
3378  GenerateMiss(masm);
3379}
3380
3381
3382void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3383  DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3384  Label miss;
3385
3386  // Registers containing left and right operands respectively.
3387  Register left = r1;
3388  Register right = r0;
3389  Register tmp1 = r2;
3390  Register tmp2 = r3;
3391
3392  // Check that both operands are heap objects.
3393  __ JumpIfEitherSmi(left, right, &miss);
3394
3395  // Check that both operands are internalized strings.
3396  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3397  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3398  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3399  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3400  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3401  __ orr(tmp1, tmp1, Operand(tmp2));
3402  __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3403  __ b(ne, &miss);
3404
3405  // Internalized strings are compared by identity.
3406  __ cmp(left, right);
3407  // Make sure r0 is non-zero. At this point input operands are
3408  // guaranteed to be non-zero.
3409  DCHECK(right.is(r0));
3410  STATIC_ASSERT(EQUAL == 0);
3411  STATIC_ASSERT(kSmiTag == 0);
3412  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3413  __ Ret();
3414
3415  __ bind(&miss);
3416  GenerateMiss(masm);
3417}
3418
3419
3420void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3421  DCHECK(state() == CompareICState::UNIQUE_NAME);
3422  DCHECK(GetCondition() == eq);
3423  Label miss;
3424
3425  // Registers containing left and right operands respectively.
3426  Register left = r1;
3427  Register right = r0;
3428  Register tmp1 = r2;
3429  Register tmp2 = r3;
3430
3431  // Check that both operands are heap objects.
3432  __ JumpIfEitherSmi(left, right, &miss);
3433
3434  // Check that both operands are unique names. This leaves the instance
3435  // types loaded in tmp1 and tmp2.
3436  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3437  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3438  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3439  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3440
3441  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
3442  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
3443
3444  // Unique names are compared by identity.
3445  __ cmp(left, right);
3446  // Make sure r0 is non-zero. At this point input operands are
3447  // guaranteed to be non-zero.
3448  DCHECK(right.is(r0));
3449  STATIC_ASSERT(EQUAL == 0);
3450  STATIC_ASSERT(kSmiTag == 0);
3451  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3452  __ Ret();
3453
3454  __ bind(&miss);
3455  GenerateMiss(masm);
3456}
3457
3458
3459void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3460  DCHECK(state() == CompareICState::STRING);
3461  Label miss;
3462
3463  bool equality = Token::IsEqualityOp(op());
3464
3465  // Registers containing left and right operands respectively.
3466  Register left = r1;
3467  Register right = r0;
3468  Register tmp1 = r2;
3469  Register tmp2 = r3;
3470  Register tmp3 = r4;
3471  Register tmp4 = r5;
3472
3473  // Check that both operands are heap objects.
3474  __ JumpIfEitherSmi(left, right, &miss);
3475
3476  // Check that both operands are strings. This leaves the instance
3477  // types loaded in tmp1 and tmp2.
3478  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3479  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3480  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3481  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3482  STATIC_ASSERT(kNotStringTag != 0);
3483  __ orr(tmp3, tmp1, tmp2);
3484  __ tst(tmp3, Operand(kIsNotStringMask));
3485  __ b(ne, &miss);
3486
3487  // Fast check for identical strings.
3488  __ cmp(left, right);
3489  STATIC_ASSERT(EQUAL == 0);
3490  STATIC_ASSERT(kSmiTag == 0);
3491  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
3492  __ Ret(eq);
3493
3494  // Handle not identical strings.
3495
3496  // Check that both strings are internalized strings. If they are, we're done
3497  // because we already know they are not identical. We know they are both
3498  // strings.
3499  if (equality) {
3500    DCHECK(GetCondition() == eq);
3501    STATIC_ASSERT(kInternalizedTag == 0);
3502    __ orr(tmp3, tmp1, Operand(tmp2));
3503    __ tst(tmp3, Operand(kIsNotInternalizedMask));
3504    // Make sure r0 is non-zero. At this point input operands are
3505    // guaranteed to be non-zero.
3506    DCHECK(right.is(r0));
3507    __ Ret(eq);
3508  }
3509
3510  // Check that both strings are sequential one-byte.
3511  Label runtime;
3512  __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
3513                                                    &runtime);
3514
3515  // Compare flat one-byte strings. Returns when done.
3516  if (equality) {
3517    StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
3518                                                  tmp3);
3519  } else {
3520    StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
3521                                                    tmp2, tmp3, tmp4);
3522  }
3523
3524  // Handle more complex cases in runtime.
3525  __ bind(&runtime);
3526  __ Push(left, right);
3527  if (equality) {
3528    __ TailCallRuntime(Runtime::kStringEquals);
3529  } else {
3530    __ TailCallRuntime(Runtime::kStringCompare);
3531  }
3532
3533  __ bind(&miss);
3534  GenerateMiss(masm);
3535}
3536
3537
3538void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
3539  DCHECK_EQ(CompareICState::RECEIVER, state());
3540  Label miss;
3541  __ and_(r2, r1, Operand(r0));
3542  __ JumpIfSmi(r2, &miss);
3543
3544  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
3545  __ CompareObjectType(r0, r2, r2, FIRST_JS_RECEIVER_TYPE);
3546  __ b(lt, &miss);
3547  __ CompareObjectType(r1, r2, r2, FIRST_JS_RECEIVER_TYPE);
3548  __ b(lt, &miss);
3549
3550  DCHECK(GetCondition() == eq);
3551  __ sub(r0, r0, Operand(r1));
3552  __ Ret();
3553
3554  __ bind(&miss);
3555  GenerateMiss(masm);
3556}
3557
3558
3559void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
3560  Label miss;
3561  Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
3562  __ and_(r2, r1, Operand(r0));
3563  __ JumpIfSmi(r2, &miss);
3564  __ GetWeakValue(r4, cell);
3565  __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
3566  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
3567  __ cmp(r2, r4);
3568  __ b(ne, &miss);
3569  __ cmp(r3, r4);
3570  __ b(ne, &miss);
3571
3572  if (Token::IsEqualityOp(op())) {
3573    __ sub(r0, r0, Operand(r1));
3574    __ Ret();
3575  } else if (is_strong(strength())) {
3576    __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
3577  } else {
3578    if (op() == Token::LT || op() == Token::LTE) {
3579      __ mov(r2, Operand(Smi::FromInt(GREATER)));
3580    } else {
3581      __ mov(r2, Operand(Smi::FromInt(LESS)));
3582    }
3583    __ Push(r1, r0, r2);
3584    __ TailCallRuntime(Runtime::kCompare);
3585  }
3586
3587  __ bind(&miss);
3588  GenerateMiss(masm);
3589}
3590
3591
3592void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3593  {
3594    // Call the runtime system in a fresh internal frame.
3595    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3596    __ Push(r1, r0);
3597    __ Push(lr, r1, r0);
3598    __ mov(ip, Operand(Smi::FromInt(op())));
3599    __ push(ip);
3600    __ CallRuntime(Runtime::kCompareIC_Miss);
3601    // Compute the entry point of the rewritten stub.
3602    __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
3603    // Restore registers.
3604    __ pop(lr);
3605    __ Pop(r1, r0);
3606  }
3607
3608  __ Jump(r2);
3609}
3610
3611
3612void DirectCEntryStub::Generate(MacroAssembler* masm) {
3613  // Place the return address on the stack, making the call
3614  // GC safe. The RegExp backend also relies on this.
3615  __ str(lr, MemOperand(sp, 0));
3616  __ blx(ip);  // Call the C++ function.
3617  __ VFPEnsureFPSCRState(r2);
3618  __ ldr(pc, MemOperand(sp, 0));
3619}
3620
3621
3622void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
3623                                    Register target) {
3624  intptr_t code =
3625      reinterpret_cast<intptr_t>(GetCode().location());
3626  __ Move(ip, target);
3627  __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
3628  __ blx(lr);  // Call the stub.
3629}
3630
3631
3632void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3633                                                      Label* miss,
3634                                                      Label* done,
3635                                                      Register receiver,
3636                                                      Register properties,
3637                                                      Handle<Name> name,
3638                                                      Register scratch0) {
3639  DCHECK(name->IsUniqueName());
3640  // If names of slots in range from 1 to kProbes - 1 for the hash value are
3641  // not equal to the name and kProbes-th slot is not used (its name is the
3642  // undefined value), it guarantees the hash table doesn't contain the
3643  // property. It's true even if some slots represent deleted properties
3644  // (their names are the hole value).
3645  for (int i = 0; i < kInlinedProbes; i++) {
3646    // scratch0 points to properties hash.
3647    // Compute the masked index: (hash + i + i * i) & mask.
3648    Register index = scratch0;
3649    // Capacity is smi 2^n.
3650    __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
3651    __ sub(index, index, Operand(1));
3652    __ and_(index, index, Operand(
3653        Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
3654
3655    // Scale the index by multiplying by the entry size.
3656    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3657    __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
3658
3659    Register entity_name = scratch0;
3660    // Having undefined at this place means the name is not contained.
3661    STATIC_ASSERT(kSmiTagSize == 1);
3662    Register tmp = properties;
3663    __ add(tmp, properties, Operand(index, LSL, 1));
3664    __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
3665
3666    DCHECK(!tmp.is(entity_name));
3667    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
3668    __ cmp(entity_name, tmp);
3669    __ b(eq, done);
3670
3671    // Load the hole ready for use below:
3672    __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
3673
3674    // Stop if found the property.
3675    __ cmp(entity_name, Operand(Handle<Name>(name)));
3676    __ b(eq, miss);
3677
3678    Label good;
3679    __ cmp(entity_name, tmp);
3680    __ b(eq, &good);
3681
3682    // Check if the entry name is not a unique name.
3683    __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
3684    __ ldrb(entity_name,
3685            FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
3686    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
3687    __ bind(&good);
3688
3689    // Restore the properties.
3690    __ ldr(properties,
3691           FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3692  }
3693
3694  const int spill_mask =
3695      (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
3696       r2.bit() | r1.bit() | r0.bit());
3697
3698  __ stm(db_w, sp, spill_mask);
3699  __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
3700  __ mov(r1, Operand(Handle<Name>(name)));
3701  NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
3702  __ CallStub(&stub);
3703  __ cmp(r0, Operand::Zero());
3704  __ ldm(ia_w, sp, spill_mask);
3705
3706  __ b(eq, done);
3707  __ b(ne, miss);
3708}
3709
3710
3711// Probe the name dictionary in the |elements| register. Jump to the
3712// |done| label if a property with the given name is found. Jump to
3713// the |miss| label otherwise.
3714// If lookup was successful |scratch2| will be equal to elements + 4 * index.
3715void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
3716                                                      Label* miss,
3717                                                      Label* done,
3718                                                      Register elements,
3719                                                      Register name,
3720                                                      Register scratch1,
3721                                                      Register scratch2) {
3722  DCHECK(!elements.is(scratch1));
3723  DCHECK(!elements.is(scratch2));
3724  DCHECK(!name.is(scratch1));
3725  DCHECK(!name.is(scratch2));
3726
3727  __ AssertName(name);
3728
3729  // Compute the capacity mask.
3730  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
3731  __ SmiUntag(scratch1);
3732  __ sub(scratch1, scratch1, Operand(1));
3733
3734  // Generate an unrolled loop that performs a few probes before
3735  // giving up. Measurements done on Gmail indicate that 2 probes
3736  // cover ~93% of loads from dictionaries.
3737  for (int i = 0; i < kInlinedProbes; i++) {
3738    // Compute the masked index: (hash + i + i * i) & mask.
3739    __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
3740    if (i > 0) {
3741      // Add the probe offset (i + i * i) left shifted to avoid right shifting
3742      // the hash in a separate instruction. The value hash + i + i * i is right
3743      // shifted in the following and instruction.
3744      DCHECK(NameDictionary::GetProbeOffset(i) <
3745             1 << (32 - Name::kHashFieldOffset));
3746      __ add(scratch2, scratch2, Operand(
3747          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3748    }
3749    __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
3750
3751    // Scale the index by multiplying by the entry size.
3752    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3753    // scratch2 = scratch2 * 3.
3754    __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
3755
3756    // Check if the key is identical to the name.
3757    __ add(scratch2, elements, Operand(scratch2, LSL, 2));
3758    __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
3759    __ cmp(name, Operand(ip));
3760    __ b(eq, done);
3761  }
3762
3763  const int spill_mask =
3764      (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
3765       r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
3766      ~(scratch1.bit() | scratch2.bit());
3767
3768  __ stm(db_w, sp, spill_mask);
3769  if (name.is(r0)) {
3770    DCHECK(!elements.is(r1));
3771    __ Move(r1, name);
3772    __ Move(r0, elements);
3773  } else {
3774    __ Move(r0, elements);
3775    __ Move(r1, name);
3776  }
3777  NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
3778  __ CallStub(&stub);
3779  __ cmp(r0, Operand::Zero());
3780  __ mov(scratch2, Operand(r2));
3781  __ ldm(ia_w, sp, spill_mask);
3782
3783  __ b(ne, done);
3784  __ b(eq, miss);
3785}
3786
3787
3788void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
3789  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
3790  // we cannot call anything that could cause a GC from this stub.
3791  // Registers:
3792  //  result: NameDictionary to probe
3793  //  r1: key
3794  //  dictionary: NameDictionary to probe.
3795  //  index: will hold an index of entry if lookup is successful.
3796  //         might alias with result_.
3797  // Returns:
3798  //  result_ is zero if lookup failed, non zero otherwise.
3799
3800  Register result = r0;
3801  Register dictionary = r0;
3802  Register key = r1;
3803  Register index = r2;
3804  Register mask = r3;
3805  Register hash = r4;
3806  Register undefined = r5;
3807  Register entry_key = r6;
3808
3809  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
3810
3811  __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
3812  __ SmiUntag(mask);
3813  __ sub(mask, mask, Operand(1));
3814
3815  __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
3816
3817  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
3818
3819  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
3820    // Compute the masked index: (hash + i + i * i) & mask.
3821    // Capacity is smi 2^n.
3822    if (i > 0) {
3823      // Add the probe offset (i + i * i) left shifted to avoid right shifting
3824      // the hash in a separate instruction. The value hash + i + i * i is right
3825      // shifted in the following and instruction.
3826      DCHECK(NameDictionary::GetProbeOffset(i) <
3827             1 << (32 - Name::kHashFieldOffset));
3828      __ add(index, hash, Operand(
3829          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
3830    } else {
3831      __ mov(index, Operand(hash));
3832    }
3833    __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
3834
3835    // Scale the index by multiplying by the entry size.
3836    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3837    __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
3838
3839    STATIC_ASSERT(kSmiTagSize == 1);
3840    __ add(index, dictionary, Operand(index, LSL, 2));
3841    __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
3842
3843    // Having undefined at this place means the name is not contained.
3844    __ cmp(entry_key, Operand(undefined));
3845    __ b(eq, &not_in_dictionary);
3846
3847    // Stop if found the property.
3848    __ cmp(entry_key, Operand(key));
3849    __ b(eq, &in_dictionary);
3850
3851    if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
3852      // Check if the entry name is not a unique name.
3853      __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
3854      __ ldrb(entry_key,
3855              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
3856      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
3857    }
3858  }
3859
3860  __ bind(&maybe_in_dictionary);
3861  // If we are doing negative lookup then probing failure should be
3862  // treated as a lookup success. For positive lookup probing failure
3863  // should be treated as lookup failure.
3864  if (mode() == POSITIVE_LOOKUP) {
3865    __ mov(result, Operand::Zero());
3866    __ Ret();
3867  }
3868
3869  __ bind(&in_dictionary);
3870  __ mov(result, Operand(1));
3871  __ Ret();
3872
3873  __ bind(&not_in_dictionary);
3874  __ mov(result, Operand::Zero());
3875  __ Ret();
3876}
3877
3878
3879void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
3880    Isolate* isolate) {
3881  StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
3882  stub1.GetCode();
3883  // Hydrogen code stubs need stub2 at snapshot time.
3884  StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
3885  stub2.GetCode();
3886}
3887
3888
3889// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
3890// the value has just been written into the object, now this stub makes sure
3891// we keep the GC informed.  The word in the object where the value has been
3892// written is in the address register.
3893void RecordWriteStub::Generate(MacroAssembler* masm) {
3894  Label skip_to_incremental_noncompacting;
3895  Label skip_to_incremental_compacting;
3896
3897  // The first two instructions are generated with labels so as to get the
3898  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
3899  // forth between a compare instructions (a nop in this position) and the
3900  // real branch when we start and stop incremental heap marking.
3901  // See RecordWriteStub::Patch for details.
3902  {
3903    // Block literal pool emission, as the position of these two instructions
3904    // is assumed by the patching code.
3905    Assembler::BlockConstPoolScope block_const_pool(masm);
3906    __ b(&skip_to_incremental_noncompacting);
3907    __ b(&skip_to_incremental_compacting);
3908  }
3909
3910  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3911    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3912                           MacroAssembler::kReturnAtEnd);
3913  }
3914  __ Ret();
3915
3916  __ bind(&skip_to_incremental_noncompacting);
3917  GenerateIncremental(masm, INCREMENTAL);
3918
3919  __ bind(&skip_to_incremental_compacting);
3920  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
3921
3922  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
3923  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
3924  DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
3925  DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
3926  PatchBranchIntoNop(masm, 0);
3927  PatchBranchIntoNop(masm, Assembler::kInstrSize);
3928}
3929
3930
3931void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
3932  regs_.Save(masm);
3933
3934  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3935    Label dont_need_remembered_set;
3936
3937    __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
3938    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
3939                           regs_.scratch0(),
3940                           &dont_need_remembered_set);
3941
3942    __ CheckPageFlag(regs_.object(),
3943                     regs_.scratch0(),
3944                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
3945                     ne,
3946                     &dont_need_remembered_set);
3947
3948    // First notify the incremental marker if necessary, then update the
3949    // remembered set.
3950    CheckNeedsToInformIncrementalMarker(
3951        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
3952    InformIncrementalMarker(masm);
3953    regs_.Restore(masm);
3954    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3955                           MacroAssembler::kReturnAtEnd);
3956
3957    __ bind(&dont_need_remembered_set);
3958  }
3959
3960  CheckNeedsToInformIncrementalMarker(
3961      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
3962  InformIncrementalMarker(masm);
3963  regs_.Restore(masm);
3964  __ Ret();
3965}
3966
3967
3968void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
3969  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
3970  int argument_count = 3;
3971  __ PrepareCallCFunction(argument_count, regs_.scratch0());
3972  Register address =
3973      r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
3974  DCHECK(!address.is(regs_.object()));
3975  DCHECK(!address.is(r0));
3976  __ Move(address, regs_.address());
3977  __ Move(r0, regs_.object());
3978  __ Move(r1, address);
3979  __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
3980
3981  AllowExternalCallThatCantCauseGC scope(masm);
3982  __ CallCFunction(
3983      ExternalReference::incremental_marking_record_write_function(isolate()),
3984      argument_count);
3985  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
3986}
3987
3988
3989void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
3990    MacroAssembler* masm,
3991    OnNoNeedToInformIncrementalMarker on_no_need,
3992    Mode mode) {
3993  Label on_black;
3994  Label need_incremental;
3995  Label need_incremental_pop_scratch;
3996
3997  __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
3998  __ ldr(regs_.scratch1(),
3999         MemOperand(regs_.scratch0(),
4000                    MemoryChunk::kWriteBarrierCounterOffset));
4001  __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
4002  __ str(regs_.scratch1(),
4003         MemOperand(regs_.scratch0(),
4004                    MemoryChunk::kWriteBarrierCounterOffset));
4005  __ b(mi, &need_incremental);
4006
4007  // Let's look at the color of the object:  If it is not black we don't have
4008  // to inform the incremental marker.
4009  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4010
4011  regs_.Restore(masm);
4012  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4013    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4014                           MacroAssembler::kReturnAtEnd);
4015  } else {
4016    __ Ret();
4017  }
4018
4019  __ bind(&on_black);
4020
4021  // Get the value from the slot.
4022  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
4023
4024  if (mode == INCREMENTAL_COMPACTION) {
4025    Label ensure_not_white;
4026
4027    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
4028                     regs_.scratch1(),  // Scratch.
4029                     MemoryChunk::kEvacuationCandidateMask,
4030                     eq,
4031                     &ensure_not_white);
4032
4033    __ CheckPageFlag(regs_.object(),
4034                     regs_.scratch1(),  // Scratch.
4035                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4036                     eq,
4037                     &need_incremental);
4038
4039    __ bind(&ensure_not_white);
4040  }
4041
4042  // We need extra registers for this, so we push the object and the address
4043  // register temporarily.
4044  __ Push(regs_.object(), regs_.address());
4045  __ JumpIfWhite(regs_.scratch0(),  // The value.
4046                 regs_.scratch1(),  // Scratch.
4047                 regs_.object(),    // Scratch.
4048                 regs_.address(),   // Scratch.
4049                 &need_incremental_pop_scratch);
4050  __ Pop(regs_.object(), regs_.address());
4051
4052  regs_.Restore(masm);
4053  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4054    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4055                           MacroAssembler::kReturnAtEnd);
4056  } else {
4057    __ Ret();
4058  }
4059
4060  __ bind(&need_incremental_pop_scratch);
4061  __ Pop(regs_.object(), regs_.address());
4062
4063  __ bind(&need_incremental);
4064
4065  // Fall through when we need to inform the incremental marker.
4066}
4067
4068
4069void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4070  CEntryStub ces(isolate(), 1, kSaveFPRegs);
4071  __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4072  int parameter_count_offset =
4073      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4074  __ ldr(r1, MemOperand(fp, parameter_count_offset));
4075  if (function_mode() == JS_FUNCTION_STUB_MODE) {
4076    __ add(r1, r1, Operand(1));
4077  }
4078  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4079  __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
4080  __ add(sp, sp, r1);
4081  __ Ret();
4082}
4083
4084
4085void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4086  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
4087  LoadICStub stub(isolate(), state());
4088  stub.GenerateForTrampoline(masm);
4089}
4090
4091
4092void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4093  __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
4094  KeyedLoadICStub stub(isolate(), state());
4095  stub.GenerateForTrampoline(masm);
4096}
4097
4098
4099void CallICTrampolineStub::Generate(MacroAssembler* masm) {
4100  __ EmitLoadTypeFeedbackVector(r2);
4101  CallICStub stub(isolate(), state());
4102  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4103}
4104
4105
4106void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
4107
4108
4109void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
4110  GenerateImpl(masm, true);
4111}
4112
4113
4114static void HandleArrayCases(MacroAssembler* masm, Register feedback,
4115                             Register receiver_map, Register scratch1,
4116                             Register scratch2, bool is_polymorphic,
4117                             Label* miss) {
4118  // feedback initially contains the feedback array
4119  Label next_loop, prepare_next;
4120  Label start_polymorphic;
4121
4122  Register cached_map = scratch1;
4123
4124  __ ldr(cached_map,
4125         FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
4126  __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
4127  __ cmp(receiver_map, cached_map);
4128  __ b(ne, &start_polymorphic);
4129  // found, now call handler.
4130  Register handler = feedback;
4131  __ ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
4132  __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
4133
4134
4135  Register length = scratch2;
4136  __ bind(&start_polymorphic);
4137  __ ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
4138  if (!is_polymorphic) {
4139    // If the IC could be monomorphic we have to make sure we don't go past the
4140    // end of the feedback array.
4141    __ cmp(length, Operand(Smi::FromInt(2)));
4142    __ b(eq, miss);
4143  }
4144
4145  Register too_far = length;
4146  Register pointer_reg = feedback;
4147
4148  // +-----+------+------+-----+-----+ ... ----+
4149  // | map | len  | wm0  | h0  | wm1 |      hN |
4150  // +-----+------+------+-----+-----+ ... ----+
4151  //                 0      1     2        len-1
4152  //                              ^              ^
4153  //                              |              |
4154  //                         pointer_reg      too_far
4155  //                         aka feedback     scratch2
4156  // also need receiver_map
4157  // use cached_map (scratch1) to look in the weak map values.
4158  __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(length));
4159  __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4160  __ add(pointer_reg, feedback,
4161         Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
4162
4163  __ bind(&next_loop);
4164  __ ldr(cached_map, MemOperand(pointer_reg));
4165  __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
4166  __ cmp(receiver_map, cached_map);
4167  __ b(ne, &prepare_next);
4168  __ ldr(handler, MemOperand(pointer_reg, kPointerSize));
4169  __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
4170
4171  __ bind(&prepare_next);
4172  __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
4173  __ cmp(pointer_reg, too_far);
4174  __ b(lt, &next_loop);
4175
4176  // We exhausted our array of map handler pairs.
4177  __ jmp(miss);
4178}
4179
4180
4181static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
4182                                  Register receiver_map, Register feedback,
4183                                  Register vector, Register slot,
4184                                  Register scratch, Label* compare_map,
4185                                  Label* load_smi_map, Label* try_array) {
4186  __ JumpIfSmi(receiver, load_smi_map);
4187  __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
4188  __ bind(compare_map);
4189  Register cached_map = scratch;
4190  // Move the weak map into the weak_cell register.
4191  __ ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
4192  __ cmp(cached_map, receiver_map);
4193  __ b(ne, try_array);
4194  Register handler = feedback;
4195  __ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
4196  __ ldr(handler,
4197         FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
4198  __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
4199}
4200
4201
4202void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4203  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r1
4204  Register name = LoadWithVectorDescriptor::NameRegister();          // r2
4205  Register vector = LoadWithVectorDescriptor::VectorRegister();      // r3
4206  Register slot = LoadWithVectorDescriptor::SlotRegister();          // r0
4207  Register feedback = r4;
4208  Register receiver_map = r5;
4209  Register scratch1 = r6;
4210
4211  __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
4212  __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4213
4214  // Try to quickly handle the monomorphic case without knowing for sure
4215  // if we have a weak cell in feedback. We do know it's safe to look
4216  // at WeakCell::kValueOffset.
4217  Label try_array, load_smi_map, compare_map;
4218  Label not_array, miss;
4219  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
4220                        scratch1, &compare_map, &load_smi_map, &try_array);
4221
4222  // Is it a fixed array?
4223  __ bind(&try_array);
4224  __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4225  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
4226  __ b(ne, &not_array);
4227  HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
4228
4229  __ bind(&not_array);
4230  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
4231  __ b(ne, &miss);
4232  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
4233      Code::ComputeHandlerFlags(Code::LOAD_IC));
4234  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
4235                                               receiver, name, feedback,
4236                                               receiver_map, scratch1, r9);
4237
4238  __ bind(&miss);
4239  LoadIC::GenerateMiss(masm);
4240
4241  __ bind(&load_smi_map);
4242  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4243  __ jmp(&compare_map);
4244}
4245
4246
4247void KeyedLoadICStub::Generate(MacroAssembler* masm) {
4248  GenerateImpl(masm, false);
4249}
4250
4251
4252void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
4253  GenerateImpl(masm, true);
4254}
4255
4256
4257void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4258  Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r1
4259  Register key = LoadWithVectorDescriptor::NameRegister();           // r2
4260  Register vector = LoadWithVectorDescriptor::VectorRegister();      // r3
4261  Register slot = LoadWithVectorDescriptor::SlotRegister();          // r0
4262  Register feedback = r4;
4263  Register receiver_map = r5;
4264  Register scratch1 = r6;
4265
4266  __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
4267  __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4268
4269  // Try to quickly handle the monomorphic case without knowing for sure
4270  // if we have a weak cell in feedback. We do know it's safe to look
4271  // at WeakCell::kValueOffset.
4272  Label try_array, load_smi_map, compare_map;
4273  Label not_array, miss;
4274  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
4275                        scratch1, &compare_map, &load_smi_map, &try_array);
4276
4277  __ bind(&try_array);
4278  // Is it a fixed array?
4279  __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4280  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
4281  __ b(ne, &not_array);
4282
4283  // We have a polymorphic element handler.
4284  Label polymorphic, try_poly_name;
4285  __ bind(&polymorphic);
4286  HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
4287
4288  __ bind(&not_array);
4289  // Is it generic?
4290  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
4291  __ b(ne, &try_poly_name);
4292  Handle<Code> megamorphic_stub =
4293      KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
4294  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
4295
4296  __ bind(&try_poly_name);
4297  // We might have a name in feedback, and a fixed array in the next slot.
4298  __ cmp(key, feedback);
4299  __ b(ne, &miss);
4300  // If the name comparison succeeded, we know we have a fixed array with
4301  // at least one map/handler pair.
4302  __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
4303  __ ldr(feedback,
4304         FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
4305  HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, false, &miss);
4306
4307  __ bind(&miss);
4308  KeyedLoadIC::GenerateMiss(masm);
4309
4310  __ bind(&load_smi_map);
4311  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4312  __ jmp(&compare_map);
4313}
4314
4315
4316void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
4317  __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
4318  VectorStoreICStub stub(isolate(), state());
4319  stub.GenerateForTrampoline(masm);
4320}
4321
4322
4323void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
4324  __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
4325  VectorKeyedStoreICStub stub(isolate(), state());
4326  stub.GenerateForTrampoline(masm);
4327}
4328
4329
4330void VectorStoreICStub::Generate(MacroAssembler* masm) {
4331  GenerateImpl(masm, false);
4332}
4333
4334
4335void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
4336  GenerateImpl(masm, true);
4337}
4338
4339
4340void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4341  Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // r1
4342  Register key = VectorStoreICDescriptor::NameRegister();           // r2
4343  Register vector = VectorStoreICDescriptor::VectorRegister();      // r3
4344  Register slot = VectorStoreICDescriptor::SlotRegister();          // r4
4345  DCHECK(VectorStoreICDescriptor::ValueRegister().is(r0));          // r0
4346  Register feedback = r5;
4347  Register receiver_map = r6;
4348  Register scratch1 = r9;
4349
4350  __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
4351  __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4352
4353  // Try to quickly handle the monomorphic case without knowing for sure
4354  // if we have a weak cell in feedback. We do know it's safe to look
4355  // at WeakCell::kValueOffset.
4356  Label try_array, load_smi_map, compare_map;
4357  Label not_array, miss;
4358  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
4359                        scratch1, &compare_map, &load_smi_map, &try_array);
4360
4361  // Is it a fixed array?
4362  __ bind(&try_array);
4363  __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4364  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
4365  __ b(ne, &not_array);
4366
4367  // We are using register r8, which is used for the embedded constant pool
4368  // when FLAG_enable_embedded_constant_pool is true.
4369  DCHECK(!FLAG_enable_embedded_constant_pool);
4370  Register scratch2 = r8;
4371  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
4372                   &miss);
4373
4374  __ bind(&not_array);
4375  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
4376  __ b(ne, &miss);
4377  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
4378      Code::ComputeHandlerFlags(Code::STORE_IC));
4379  masm->isolate()->stub_cache()->GenerateProbe(
4380      masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
4381      scratch1, scratch2);
4382
4383  __ bind(&miss);
4384  StoreIC::GenerateMiss(masm);
4385
4386  __ bind(&load_smi_map);
4387  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4388  __ jmp(&compare_map);
4389}
4390
4391
4392void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
4393  GenerateImpl(masm, false);
4394}
4395
4396
4397void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
4398  GenerateImpl(masm, true);
4399}
4400
4401
4402static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
4403                                       Register receiver_map, Register scratch1,
4404                                       Register scratch2, Label* miss) {
4405  // feedback initially contains the feedback array
4406  Label next_loop, prepare_next;
4407  Label start_polymorphic;
4408  Label transition_call;
4409
4410  Register cached_map = scratch1;
4411  Register too_far = scratch2;
4412  Register pointer_reg = feedback;
4413  __ ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
4414
4415  // +-----+------+------+-----+-----+-----+ ... ----+
4416  // | map | len  | wm0  | wt0 | h0  | wm1 |      hN |
4417  // +-----+------+------+-----+-----+ ----+ ... ----+
4418  //                 0      1     2              len-1
4419  //                 ^                                 ^
4420  //                 |                                 |
4421  //             pointer_reg                        too_far
4422  //             aka feedback                       scratch2
4423  // also need receiver_map
4424  // use cached_map (scratch1) to look in the weak map values.
4425  __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(too_far));
4426  __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4427  __ add(pointer_reg, feedback,
4428         Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
4429
4430  __ bind(&next_loop);
4431  __ ldr(cached_map, MemOperand(pointer_reg));
4432  __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
4433  __ cmp(receiver_map, cached_map);
4434  __ b(ne, &prepare_next);
4435  // Is it a transitioning store?
4436  __ ldr(too_far, MemOperand(pointer_reg, kPointerSize));
4437  __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
4438  __ b(ne, &transition_call);
4439  __ ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
4440  __ add(pc, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
4441
4442  __ bind(&transition_call);
4443  __ ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
4444  __ JumpIfSmi(too_far, miss);
4445
4446  __ ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
4447
4448  // Load the map into the correct register.
4449  DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
4450  __ mov(feedback, too_far);
4451
4452  __ add(pc, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
4453
4454  __ bind(&prepare_next);
4455  __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
4456  __ cmp(pointer_reg, too_far);
4457  __ b(lt, &next_loop);
4458
4459  // We exhausted our array of map handler pairs.
4460  __ jmp(miss);
4461}
4462
4463
4464void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4465  Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // r1
4466  Register key = VectorStoreICDescriptor::NameRegister();           // r2
4467  Register vector = VectorStoreICDescriptor::VectorRegister();      // r3
4468  Register slot = VectorStoreICDescriptor::SlotRegister();          // r4
4469  DCHECK(VectorStoreICDescriptor::ValueRegister().is(r0));          // r0
4470  Register feedback = r5;
4471  Register receiver_map = r6;
4472  Register scratch1 = r9;
4473
4474  __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
4475  __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
4476
4477  // Try to quickly handle the monomorphic case without knowing for sure
4478  // if we have a weak cell in feedback. We do know it's safe to look
4479  // at WeakCell::kValueOffset.
4480  Label try_array, load_smi_map, compare_map;
4481  Label not_array, miss;
4482  HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
4483                        scratch1, &compare_map, &load_smi_map, &try_array);
4484
4485  __ bind(&try_array);
4486  // Is it a fixed array?
4487  __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
4488  __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
4489  __ b(ne, &not_array);
4490
4491  // We have a polymorphic element handler.
4492  Label polymorphic, try_poly_name;
4493  __ bind(&polymorphic);
4494
4495  // We are using register r8, which is used for the embedded constant pool
4496  // when FLAG_enable_embedded_constant_pool is true.
4497  DCHECK(!FLAG_enable_embedded_constant_pool);
4498  Register scratch2 = r8;
4499
4500  HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
4501                             &miss);
4502
4503  __ bind(&not_array);
4504  // Is it generic?
4505  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
4506  __ b(ne, &try_poly_name);
4507  Handle<Code> megamorphic_stub =
4508      KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
4509  __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
4510
4511  __ bind(&try_poly_name);
4512  // We might have a name in feedback, and a fixed array in the next slot.
4513  __ cmp(key, feedback);
4514  __ b(ne, &miss);
4515  // If the name comparison succeeded, we know we have a fixed array with
4516  // at least one map/handler pair.
4517  __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
4518  __ ldr(feedback,
4519         FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
4520  HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
4521                   &miss);
4522
4523  __ bind(&miss);
4524  KeyedStoreIC::GenerateMiss(masm);
4525
4526  __ bind(&load_smi_map);
4527  __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4528  __ jmp(&compare_map);
4529}
4530
4531
4532void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4533  if (masm->isolate()->function_entry_hook() != NULL) {
4534    ProfileEntryHookStub stub(masm->isolate());
4535    PredictableCodeSizeScope predictable(masm);
4536    predictable.ExpectSize(masm->CallStubSize(&stub) +
4537                           2 * Assembler::kInstrSize);
4538    __ push(lr);
4539    __ CallStub(&stub);
4540    __ pop(lr);
4541  }
4542}
4543
4544
4545void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4546  // The entry hook is a "push lr" instruction, followed by a call.
4547  const int32_t kReturnAddressDistanceFromFunctionStart =
4548      3 * Assembler::kInstrSize;
4549
4550  // This should contain all kCallerSaved registers.
4551  const RegList kSavedRegs =
4552      1 <<  0 |  // r0
4553      1 <<  1 |  // r1
4554      1 <<  2 |  // r2
4555      1 <<  3 |  // r3
4556      1 <<  5 |  // r5
4557      1 <<  9;   // r9
4558  // We also save lr, so the count here is one higher than the mask indicates.
4559  const int32_t kNumSavedRegs = 7;
4560
4561  DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved);
4562
4563  // Save all caller-save registers as this may be called from anywhere.
4564  __ stm(db_w, sp, kSavedRegs | lr.bit());
4565
4566  // Compute the function's address for the first argument.
4567  __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
4568
4569  // The caller's return address is above the saved temporaries.
4570  // Grab that for the second argument to the hook.
4571  __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
4572
4573  // Align the stack if necessary.
4574  int frame_alignment = masm->ActivationFrameAlignment();
4575  if (frame_alignment > kPointerSize) {
4576    __ mov(r5, sp);
4577    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4578    __ and_(sp, sp, Operand(-frame_alignment));
4579  }
4580
4581#if V8_HOST_ARCH_ARM
4582  int32_t entry_hook =
4583      reinterpret_cast<int32_t>(isolate()->function_entry_hook());
4584  __ mov(ip, Operand(entry_hook));
4585#else
4586  // Under the simulator we need to indirect the entry hook through a
4587  // trampoline function at a known address.
4588  // It additionally takes an isolate as a third parameter
4589  __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
4590
4591  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4592  __ mov(ip, Operand(ExternalReference(&dispatcher,
4593                                       ExternalReference::BUILTIN_CALL,
4594                                       isolate())));
4595#endif
4596  __ Call(ip);
4597
4598  // Restore the stack pointer if needed.
4599  if (frame_alignment > kPointerSize) {
4600    __ mov(sp, r5);
4601  }
4602
4603  // Also pop pc to get Ret(0).
4604  __ ldm(ia_w, sp, kSavedRegs | pc.bit());
4605}
4606
4607
4608template<class T>
4609static void CreateArrayDispatch(MacroAssembler* masm,
4610                                AllocationSiteOverrideMode mode) {
4611  if (mode == DISABLE_ALLOCATION_SITES) {
4612    T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4613    __ TailCallStub(&stub);
4614  } else if (mode == DONT_OVERRIDE) {
4615    int last_index = GetSequenceIndexFromFastElementsKind(
4616        TERMINAL_FAST_ELEMENTS_KIND);
4617    for (int i = 0; i <= last_index; ++i) {
4618      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4619      __ cmp(r3, Operand(kind));
4620      T stub(masm->isolate(), kind);
4621      __ TailCallStub(&stub, eq);
4622    }
4623
4624    // If we reached this point there is a problem.
4625    __ Abort(kUnexpectedElementsKindInArrayConstructor);
4626  } else {
4627    UNREACHABLE();
4628  }
4629}
4630
4631
4632static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4633                                           AllocationSiteOverrideMode mode) {
4634  // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4635  // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
4636  // r0 - number of arguments
4637  // r1 - constructor?
4638  // sp[0] - last argument
4639  Label normal_sequence;
4640  if (mode == DONT_OVERRIDE) {
4641    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4642    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4643    STATIC_ASSERT(FAST_ELEMENTS == 2);
4644    STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4645    STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
4646    STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4647
4648    // is the low bit set? If so, we are holey and that is good.
4649    __ tst(r3, Operand(1));
4650    __ b(ne, &normal_sequence);
4651  }
4652
4653  // look at the first argument
4654  __ ldr(r5, MemOperand(sp, 0));
4655  __ cmp(r5, Operand::Zero());
4656  __ b(eq, &normal_sequence);
4657
4658  if (mode == DISABLE_ALLOCATION_SITES) {
4659    ElementsKind initial = GetInitialFastElementsKind();
4660    ElementsKind holey_initial = GetHoleyElementsKind(initial);
4661
4662    ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4663                                                  holey_initial,
4664                                                  DISABLE_ALLOCATION_SITES);
4665    __ TailCallStub(&stub_holey);
4666
4667    __ bind(&normal_sequence);
4668    ArraySingleArgumentConstructorStub stub(masm->isolate(),
4669                                            initial,
4670                                            DISABLE_ALLOCATION_SITES);
4671    __ TailCallStub(&stub);
4672  } else if (mode == DONT_OVERRIDE) {
4673    // We are going to create a holey array, but our kind is non-holey.
4674    // Fix kind and retry (only if we have an allocation site in the slot).
4675    __ add(r3, r3, Operand(1));
4676
4677    if (FLAG_debug_code) {
4678      __ ldr(r5, FieldMemOperand(r2, 0));
4679      __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
4680      __ Assert(eq, kExpectedAllocationSite);
4681    }
4682
4683    // Save the resulting elements kind in type info. We can't just store r3
4684    // in the AllocationSite::transition_info field because elements kind is
4685    // restricted to a portion of the field...upper bits need to be left alone.
4686    STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4687    __ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
4688    __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
4689    __ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
4690
4691    __ bind(&normal_sequence);
4692    int last_index = GetSequenceIndexFromFastElementsKind(
4693        TERMINAL_FAST_ELEMENTS_KIND);
4694    for (int i = 0; i <= last_index; ++i) {
4695      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4696      __ cmp(r3, Operand(kind));
4697      ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4698      __ TailCallStub(&stub, eq);
4699    }
4700
4701    // If we reached this point there is a problem.
4702    __ Abort(kUnexpectedElementsKindInArrayConstructor);
4703  } else {
4704    UNREACHABLE();
4705  }
4706}
4707
4708
4709template<class T>
4710static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4711  int to_index = GetSequenceIndexFromFastElementsKind(
4712      TERMINAL_FAST_ELEMENTS_KIND);
4713  for (int i = 0; i <= to_index; ++i) {
4714    ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4715    T stub(isolate, kind);
4716    stub.GetCode();
4717    if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4718      T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4719      stub1.GetCode();
4720    }
4721  }
4722}
4723
4724
4725void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4726  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4727      isolate);
4728  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4729      isolate);
4730  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4731      isolate);
4732}
4733
4734
4735void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4736    Isolate* isolate) {
4737  ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4738  for (int i = 0; i < 2; i++) {
4739    // For internal arrays we only need a few things
4740    InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4741    stubh1.GetCode();
4742    InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4743    stubh2.GetCode();
4744    InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4745    stubh3.GetCode();
4746  }
4747}
4748
4749
4750void ArrayConstructorStub::GenerateDispatchToArrayStub(
4751    MacroAssembler* masm,
4752    AllocationSiteOverrideMode mode) {
4753  if (argument_count() == ANY) {
4754    Label not_zero_case, not_one_case;
4755    __ tst(r0, r0);
4756    __ b(ne, &not_zero_case);
4757    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4758
4759    __ bind(&not_zero_case);
4760    __ cmp(r0, Operand(1));
4761    __ b(gt, &not_one_case);
4762    CreateArrayDispatchOneArgument(masm, mode);
4763
4764    __ bind(&not_one_case);
4765    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4766  } else if (argument_count() == NONE) {
4767    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4768  } else if (argument_count() == ONE) {
4769    CreateArrayDispatchOneArgument(masm, mode);
4770  } else if (argument_count() == MORE_THAN_ONE) {
4771    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4772  } else {
4773    UNREACHABLE();
4774  }
4775}
4776
4777
4778void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4779  // ----------- S t a t e -------------
4780  //  -- r0 : argc (only if argument_count() == ANY)
4781  //  -- r1 : constructor
4782  //  -- r2 : AllocationSite or undefined
4783  //  -- r3 : new target
4784  //  -- sp[0] : return address
4785  //  -- sp[4] : last argument
4786  // -----------------------------------
4787
4788  if (FLAG_debug_code) {
4789    // The array construct code is only set for the global and natives
4790    // builtin Array functions which always have maps.
4791
4792    // Initial map for the builtin Array function should be a map.
4793    __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
4794    // Will both indicate a NULL and a Smi.
4795    __ tst(r4, Operand(kSmiTagMask));
4796    __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
4797    __ CompareObjectType(r4, r4, r5, MAP_TYPE);
4798    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4799
4800    // We should either have undefined in r2 or a valid AllocationSite
4801    __ AssertUndefinedOrAllocationSite(r2, r4);
4802  }
4803
4804  // Enter the context of the Array function.
4805  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
4806
4807  Label subclassing;
4808  __ cmp(r3, r1);
4809  __ b(ne, &subclassing);
4810
4811  Label no_info;
4812  // Get the elements kind and case on that.
4813  __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
4814  __ b(eq, &no_info);
4815
4816  __ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
4817  __ SmiUntag(r3);
4818  STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4819  __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
4820  GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4821
4822  __ bind(&no_info);
4823  GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4824
4825  __ bind(&subclassing);
4826  switch (argument_count()) {
4827    case ANY:
4828    case MORE_THAN_ONE:
4829      __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
4830      __ add(r0, r0, Operand(3));
4831      break;
4832    case NONE:
4833      __ str(r1, MemOperand(sp, 0 * kPointerSize));
4834      __ mov(r0, Operand(3));
4835      break;
4836    case ONE:
4837      __ str(r1, MemOperand(sp, 1 * kPointerSize));
4838      __ mov(r0, Operand(4));
4839      break;
4840  }
4841  __ Push(r3, r2);
4842  __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
4843}
4844
4845
4846void InternalArrayConstructorStub::GenerateCase(
4847    MacroAssembler* masm, ElementsKind kind) {
4848  __ cmp(r0, Operand(1));
4849
4850  InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4851  __ TailCallStub(&stub0, lo);
4852
4853  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4854  __ TailCallStub(&stubN, hi);
4855
4856  if (IsFastPackedElementsKind(kind)) {
4857    // We might need to create a holey array
4858    // look at the first argument
4859    __ ldr(r3, MemOperand(sp, 0));
4860    __ cmp(r3, Operand::Zero());
4861
4862    InternalArraySingleArgumentConstructorStub
4863        stub1_holey(isolate(), GetHoleyElementsKind(kind));
4864    __ TailCallStub(&stub1_holey, ne);
4865  }
4866
4867  InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4868  __ TailCallStub(&stub1);
4869}
4870
4871
4872void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4873  // ----------- S t a t e -------------
4874  //  -- r0 : argc
4875  //  -- r1 : constructor
4876  //  -- sp[0] : return address
4877  //  -- sp[4] : last argument
4878  // -----------------------------------
4879
4880  if (FLAG_debug_code) {
4881    // The array construct code is only set for the global and natives
4882    // builtin Array functions which always have maps.
4883
4884    // Initial map for the builtin Array function should be a map.
4885    __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
4886    // Will both indicate a NULL and a Smi.
4887    __ tst(r3, Operand(kSmiTagMask));
4888    __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
4889    __ CompareObjectType(r3, r3, r4, MAP_TYPE);
4890    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4891  }
4892
4893  // Figure out the right elements kind
4894  __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
4895  // Load the map's "bit field 2" into |result|. We only need the first byte,
4896  // but the following bit field extraction takes care of that anyway.
4897  __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
4898  // Retrieve elements_kind from bit field 2.
4899  __ DecodeField<Map::ElementsKindBits>(r3);
4900
4901  if (FLAG_debug_code) {
4902    Label done;
4903    __ cmp(r3, Operand(FAST_ELEMENTS));
4904    __ b(eq, &done);
4905    __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
4906    __ Assert(eq,
4907              kInvalidElementsKindForInternalArrayOrInternalPackedArray);
4908    __ bind(&done);
4909  }
4910
4911  Label fast_elements_case;
4912  __ cmp(r3, Operand(FAST_ELEMENTS));
4913  __ b(eq, &fast_elements_case);
4914  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4915
4916  __ bind(&fast_elements_case);
4917  GenerateCase(masm, FAST_ELEMENTS);
4918}
4919
4920
4921void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
4922  Register context = cp;
4923  Register result = r0;
4924  Register slot = r2;
4925
4926  // Go up the context chain to the script context.
4927  for (int i = 0; i < depth(); ++i) {
4928    __ ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
4929    context = result;
4930  }
4931
4932  // Load the PropertyCell value at the specified slot.
4933  __ add(result, context, Operand(slot, LSL, kPointerSizeLog2));
4934  __ ldr(result, ContextMemOperand(result));
4935  __ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
4936
4937  // If the result is not the_hole, return. Otherwise, handle in the runtime.
4938  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
4939  __ Ret(ne);
4940
4941  // Fallback to runtime.
4942  __ SmiTag(slot);
4943  __ push(slot);
4944  __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
4945}
4946
4947
4948void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
4949  Register value = r0;
4950  Register slot = r2;
4951
4952  Register cell = r1;
4953  Register cell_details = r4;
4954  Register cell_value = r5;
4955  Register cell_value_map = r6;
4956  Register scratch = r9;
4957
4958  Register context = cp;
4959  Register context_temp = cell;
4960
4961  Label fast_heapobject_case, fast_smi_case, slow_case;
4962
4963  if (FLAG_debug_code) {
4964    __ CompareRoot(value, Heap::kTheHoleValueRootIndex);
4965    __ Check(ne, kUnexpectedValue);
4966  }
4967
4968  // Go up the context chain to the script context.
4969  for (int i = 0; i < depth(); i++) {
4970    __ ldr(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX));
4971    context = context_temp;
4972  }
4973
4974  // Load the PropertyCell at the specified slot.
4975  __ add(cell, context, Operand(slot, LSL, kPointerSizeLog2));
4976  __ ldr(cell, ContextMemOperand(cell));
4977
4978  // Load PropertyDetails for the cell (actually only the cell_type and kind).
4979  __ ldr(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset));
4980  __ SmiUntag(cell_details);
4981  __ and_(cell_details, cell_details,
4982          Operand(PropertyDetails::PropertyCellTypeField::kMask |
4983                  PropertyDetails::KindField::kMask |
4984                  PropertyDetails::kAttributesReadOnlyMask));
4985
4986  // Check if PropertyCell holds mutable data.
4987  Label not_mutable_data;
4988  __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
4989                                   PropertyCellType::kMutable) |
4990                               PropertyDetails::KindField::encode(kData)));
4991  __ b(ne, &not_mutable_data);
4992  __ JumpIfSmi(value, &fast_smi_case);
4993
4994  __ bind(&fast_heapobject_case);
4995  __ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
4996  // RecordWriteField clobbers the value register, so we copy it before the
4997  // call.
4998  __ mov(r4, Operand(value));
4999  __ RecordWriteField(cell, PropertyCell::kValueOffset, r4, scratch,
5000                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
5001                      OMIT_SMI_CHECK);
5002  __ Ret();
5003
5004  __ bind(&not_mutable_data);
5005  // Check if PropertyCell value matches the new value (relevant for Constant,
5006  // ConstantType and Undefined cells).
5007  Label not_same_value;
5008  __ ldr(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset));
5009  __ cmp(cell_value, value);
5010  __ b(ne, &not_same_value);
5011
5012  // Make sure the PropertyCell is not marked READ_ONLY.
5013  __ tst(cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask));
5014  __ b(ne, &slow_case);
5015
5016  if (FLAG_debug_code) {
5017    Label done;
5018    // This can only be true for Constant, ConstantType and Undefined cells,
5019    // because we never store the_hole via this stub.
5020    __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
5021                                     PropertyCellType::kConstant) |
5022                                 PropertyDetails::KindField::encode(kData)));
5023    __ b(eq, &done);
5024    __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
5025                                     PropertyCellType::kConstantType) |
5026                                 PropertyDetails::KindField::encode(kData)));
5027    __ b(eq, &done);
5028    __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
5029                                     PropertyCellType::kUndefined) |
5030                                 PropertyDetails::KindField::encode(kData)));
5031    __ Check(eq, kUnexpectedValue);
5032    __ bind(&done);
5033  }
5034  __ Ret();
5035  __ bind(&not_same_value);
5036
5037  // Check if PropertyCell contains data with constant type (and is not
5038  // READ_ONLY).
5039  __ cmp(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode(
5040                                   PropertyCellType::kConstantType) |
5041                               PropertyDetails::KindField::encode(kData)));
5042  __ b(ne, &slow_case);
5043
5044  // Now either both old and new values must be smis or both must be heap
5045  // objects with same map.
5046  Label value_is_heap_object;
5047  __ JumpIfNotSmi(value, &value_is_heap_object);
5048  __ JumpIfNotSmi(cell_value, &slow_case);
5049  // Old and new values are smis, no need for a write barrier here.
5050  __ bind(&fast_smi_case);
5051  __ str(value, FieldMemOperand(cell, PropertyCell::kValueOffset));
5052  __ Ret();
5053
5054  __ bind(&value_is_heap_object);
5055  __ JumpIfSmi(cell_value, &slow_case);
5056
5057  __ ldr(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset));
5058  __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5059  __ cmp(cell_value_map, scratch);
5060  __ b(eq, &fast_heapobject_case);
5061
5062  // Fallback to runtime.
5063  __ bind(&slow_case);
5064  __ SmiTag(slot);
5065  __ Push(slot, value);
5066  __ TailCallRuntime(is_strict(language_mode())
5067                         ? Runtime::kStoreGlobalViaContext_Strict
5068                         : Runtime::kStoreGlobalViaContext_Sloppy);
5069}
5070
5071
5072static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
5073  return ref0.address() - ref1.address();
5074}
5075
5076
5077// Calls an API function.  Allocates HandleScope, extracts returned value
5078// from handle and propagates exceptions.  Restores context.  stack_space
5079// - space to be unwound on exit (includes the call JS arguments space and
5080// the additional space allocated for the fast call).
5081static void CallApiFunctionAndReturn(MacroAssembler* masm,
5082                                     Register function_address,
5083                                     ExternalReference thunk_ref,
5084                                     int stack_space,
5085                                     MemOperand* stack_space_operand,
5086                                     MemOperand return_value_operand,
5087                                     MemOperand* context_restore_operand) {
5088  Isolate* isolate = masm->isolate();
5089  ExternalReference next_address =
5090      ExternalReference::handle_scope_next_address(isolate);
5091  const int kNextOffset = 0;
5092  const int kLimitOffset = AddressOffset(
5093      ExternalReference::handle_scope_limit_address(isolate), next_address);
5094  const int kLevelOffset = AddressOffset(
5095      ExternalReference::handle_scope_level_address(isolate), next_address);
5096
5097  DCHECK(function_address.is(r1) || function_address.is(r2));
5098
5099  Label profiler_disabled;
5100  Label end_profiler_check;
5101  __ mov(r9, Operand(ExternalReference::is_profiling_address(isolate)));
5102  __ ldrb(r9, MemOperand(r9, 0));
5103  __ cmp(r9, Operand(0));
5104  __ b(eq, &profiler_disabled);
5105
5106  // Additional parameter is the address of the actual callback.
5107  __ mov(r3, Operand(thunk_ref));
5108  __ jmp(&end_profiler_check);
5109
5110  __ bind(&profiler_disabled);
5111  __ Move(r3, function_address);
5112  __ bind(&end_profiler_check);
5113
5114  // Allocate HandleScope in callee-save registers.
5115  __ mov(r9, Operand(next_address));
5116  __ ldr(r4, MemOperand(r9, kNextOffset));
5117  __ ldr(r5, MemOperand(r9, kLimitOffset));
5118  __ ldr(r6, MemOperand(r9, kLevelOffset));
5119  __ add(r6, r6, Operand(1));
5120  __ str(r6, MemOperand(r9, kLevelOffset));
5121
5122  if (FLAG_log_timer_events) {
5123    FrameScope frame(masm, StackFrame::MANUAL);
5124    __ PushSafepointRegisters();
5125    __ PrepareCallCFunction(1, r0);
5126    __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
5127    __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
5128                     1);
5129    __ PopSafepointRegisters();
5130  }
5131
5132  // Native call returns to the DirectCEntry stub which redirects to the
5133  // return address pushed on stack (could have moved after GC).
5134  // DirectCEntry stub itself is generated early and never moves.
5135  DirectCEntryStub stub(isolate);
5136  stub.GenerateCall(masm, r3);
5137
5138  if (FLAG_log_timer_events) {
5139    FrameScope frame(masm, StackFrame::MANUAL);
5140    __ PushSafepointRegisters();
5141    __ PrepareCallCFunction(1, r0);
5142    __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
5143    __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
5144                     1);
5145    __ PopSafepointRegisters();
5146  }
5147
5148  Label promote_scheduled_exception;
5149  Label delete_allocated_handles;
5150  Label leave_exit_frame;
5151  Label return_value_loaded;
5152
5153  // load value from ReturnValue
5154  __ ldr(r0, return_value_operand);
5155  __ bind(&return_value_loaded);
5156  // No more valid handles (the result handle was the last one). Restore
5157  // previous handle scope.
5158  __ str(r4, MemOperand(r9, kNextOffset));
5159  if (__ emit_debug_code()) {
5160    __ ldr(r1, MemOperand(r9, kLevelOffset));
5161    __ cmp(r1, r6);
5162    __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
5163  }
5164  __ sub(r6, r6, Operand(1));
5165  __ str(r6, MemOperand(r9, kLevelOffset));
5166  __ ldr(ip, MemOperand(r9, kLimitOffset));
5167  __ cmp(r5, ip);
5168  __ b(ne, &delete_allocated_handles);
5169
5170  // Leave the API exit frame.
5171  __ bind(&leave_exit_frame);
5172  bool restore_context = context_restore_operand != NULL;
5173  if (restore_context) {
5174    __ ldr(cp, *context_restore_operand);
5175  }
5176  // LeaveExitFrame expects unwind space to be in a register.
5177  if (stack_space_operand != NULL) {
5178    __ ldr(r4, *stack_space_operand);
5179  } else {
5180    __ mov(r4, Operand(stack_space));
5181  }
5182  __ LeaveExitFrame(false, r4, !restore_context, stack_space_operand != NULL);
5183
5184  // Check if the function scheduled an exception.
5185  __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
5186  __ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate)));
5187  __ ldr(r5, MemOperand(ip));
5188  __ cmp(r4, r5);
5189  __ b(ne, &promote_scheduled_exception);
5190
5191  __ mov(pc, lr);
5192
5193  // Re-throw by promoting a scheduled exception.
5194  __ bind(&promote_scheduled_exception);
5195  __ TailCallRuntime(Runtime::kPromoteScheduledException);
5196
5197  // HandleScope limit has changed. Delete allocated extensions.
5198  __ bind(&delete_allocated_handles);
5199  __ str(r5, MemOperand(r9, kLimitOffset));
5200  __ mov(r4, r0);
5201  __ PrepareCallCFunction(1, r5);
5202  __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
5203  __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
5204                   1);
5205  __ mov(r0, r4);
5206  __ jmp(&leave_exit_frame);
5207}
5208
5209
5210static void CallApiFunctionStubHelper(MacroAssembler* masm,
5211                                      const ParameterCount& argc,
5212                                      bool return_first_arg,
5213                                      bool call_data_undefined) {
5214  // ----------- S t a t e -------------
5215  //  -- r0                  : callee
5216  //  -- r4                  : call_data
5217  //  -- r2                  : holder
5218  //  -- r1                  : api_function_address
5219  //  -- r3                  : number of arguments if argc is a register
5220  //  -- cp                  : context
5221  //  --
5222  //  -- sp[0]               : last argument
5223  //  -- ...
5224  //  -- sp[(argc - 1)* 4]   : first argument
5225  //  -- sp[argc * 4]        : receiver
5226  // -----------------------------------
5227
5228  Register callee = r0;
5229  Register call_data = r4;
5230  Register holder = r2;
5231  Register api_function_address = r1;
5232  Register context = cp;
5233
5234  typedef FunctionCallbackArguments FCA;
5235
5236  STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5237  STATIC_ASSERT(FCA::kCalleeIndex == 5);
5238  STATIC_ASSERT(FCA::kDataIndex == 4);
5239  STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5240  STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5241  STATIC_ASSERT(FCA::kIsolateIndex == 1);
5242  STATIC_ASSERT(FCA::kHolderIndex == 0);
5243  STATIC_ASSERT(FCA::kArgsLength == 7);
5244
5245  DCHECK(argc.is_immediate() || r3.is(argc.reg()));
5246
5247  // context save
5248  __ push(context);
5249  // load context from callee
5250  __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5251
5252  // callee
5253  __ push(callee);
5254
5255  // call data
5256  __ push(call_data);
5257
5258  Register scratch = call_data;
5259  if (!call_data_undefined) {
5260    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5261  }
5262  // return value
5263  __ push(scratch);
5264  // return value default
5265  __ push(scratch);
5266  // isolate
5267  __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
5268  __ push(scratch);
5269  // holder
5270  __ push(holder);
5271
5272  // Prepare arguments.
5273  __ mov(scratch, sp);
5274
5275  // Allocate the v8::Arguments structure in the arguments' space since
5276  // it's not controlled by GC.
5277  const int kApiStackSpace = 4;
5278
5279  FrameScope frame_scope(masm, StackFrame::MANUAL);
5280  __ EnterExitFrame(false, kApiStackSpace);
5281
5282  DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
5283  // r0 = FunctionCallbackInfo&
5284  // Arguments is after the return address.
5285  __ add(r0, sp, Operand(1 * kPointerSize));
5286  // FunctionCallbackInfo::implicit_args_
5287  __ str(scratch, MemOperand(r0, 0 * kPointerSize));
5288  if (argc.is_immediate()) {
5289    // FunctionCallbackInfo::values_
5290    __ add(ip, scratch,
5291           Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
5292    __ str(ip, MemOperand(r0, 1 * kPointerSize));
5293    // FunctionCallbackInfo::length_ = argc
5294    __ mov(ip, Operand(argc.immediate()));
5295    __ str(ip, MemOperand(r0, 2 * kPointerSize));
5296    // FunctionCallbackInfo::is_construct_call_ = 0
5297    __ mov(ip, Operand::Zero());
5298    __ str(ip, MemOperand(r0, 3 * kPointerSize));
5299  } else {
5300    // FunctionCallbackInfo::values_
5301    __ add(ip, scratch, Operand(argc.reg(), LSL, kPointerSizeLog2));
5302    __ add(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize));
5303    __ str(ip, MemOperand(r0, 1 * kPointerSize));
5304    // FunctionCallbackInfo::length_ = argc
5305    __ str(argc.reg(), MemOperand(r0, 2 * kPointerSize));
5306    // FunctionCallbackInfo::is_construct_call_
5307    __ add(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
5308    __ mov(ip, Operand(argc.reg(), LSL, kPointerSizeLog2));
5309    __ str(ip, MemOperand(r0, 3 * kPointerSize));
5310  }
5311
5312  ExternalReference thunk_ref =
5313      ExternalReference::invoke_function_callback(masm->isolate());
5314
5315  AllowExternalCallThatCantCauseGC scope(masm);
5316  MemOperand context_restore_operand(
5317      fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5318  // Stores return the first js argument
5319  int return_value_offset = 0;
5320  if (return_first_arg) {
5321    return_value_offset = 2 + FCA::kArgsLength;
5322  } else {
5323    return_value_offset = 2 + FCA::kReturnValueOffset;
5324  }
5325  MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5326  int stack_space = 0;
5327  MemOperand is_construct_call_operand = MemOperand(sp, 4 * kPointerSize);
5328  MemOperand* stack_space_operand = &is_construct_call_operand;
5329  if (argc.is_immediate()) {
5330    stack_space = argc.immediate() + FCA::kArgsLength + 1;
5331    stack_space_operand = NULL;
5332  }
5333  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
5334                           stack_space_operand, return_value_operand,
5335                           &context_restore_operand);
5336}
5337
5338
5339void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5340  bool call_data_undefined = this->call_data_undefined();
5341  CallApiFunctionStubHelper(masm, ParameterCount(r3), false,
5342                            call_data_undefined);
5343}
5344
5345
5346void CallApiAccessorStub::Generate(MacroAssembler* masm) {
5347  bool is_store = this->is_store();
5348  int argc = this->argc();
5349  bool call_data_undefined = this->call_data_undefined();
5350  CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
5351                            call_data_undefined);
5352}
5353
5354
5355void CallApiGetterStub::Generate(MacroAssembler* masm) {
5356  // ----------- S t a t e -------------
5357  //  -- sp[0]                  : name
5358  //  -- sp[4 - kArgsLength*4]  : PropertyCallbackArguments object
5359  //  -- ...
5360  //  -- r2                     : api_function_address
5361  // -----------------------------------
5362
5363  Register api_function_address = ApiGetterDescriptor::function_address();
5364  DCHECK(api_function_address.is(r2));
5365
5366  __ mov(r0, sp);  // r0 = Handle<Name>
5367  __ add(r1, r0, Operand(1 * kPointerSize));  // r1 = PCA
5368
5369  const int kApiStackSpace = 1;
5370  FrameScope frame_scope(masm, StackFrame::MANUAL);
5371  __ EnterExitFrame(false, kApiStackSpace);
5372
5373  // Create PropertyAccessorInfo instance on the stack above the exit frame with
5374  // r1 (internal::Object** args_) as the data.
5375  __ str(r1, MemOperand(sp, 1 * kPointerSize));
5376  __ add(r1, sp, Operand(1 * kPointerSize));  // r1 = AccessorInfo&
5377
5378  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5379
5380  ExternalReference thunk_ref =
5381      ExternalReference::invoke_accessor_getter_callback(isolate());
5382  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
5383                           kStackUnwindSpace, NULL,
5384                           MemOperand(fp, 6 * kPointerSize), NULL);
5385}
5386
5387
5388#undef __
5389
5390}  // namespace internal
5391}  // namespace v8
5392
5393#endif  // V8_TARGET_ARCH_ARM
5394