1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_ARM64
6
7#include "src/code-stubs.h"
8#include "src/api-arguments.h"
9#include "src/bootstrapper.h"
10#include "src/codegen.h"
11#include "src/ic/handler-compiler.h"
12#include "src/ic/ic.h"
13#include "src/ic/stub-cache.h"
14#include "src/isolate.h"
15#include "src/regexp/jsregexp.h"
16#include "src/regexp/regexp-macro-assembler.h"
17#include "src/runtime/runtime.h"
18
19#include "src/arm64/code-stubs-arm64.h"
20#include "src/arm64/frames-arm64.h"
21
22namespace v8 {
23namespace internal {
24
25#define __ ACCESS_MASM(masm)
26
27void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
28  __ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
29  __ Str(x1, MemOperand(jssp, x5));
30  __ Push(x1);
31  __ Push(x2);
32  __ Add(x0, x0, Operand(3));
33  __ TailCallRuntime(Runtime::kNewArray);
34}
35
36void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
37                                               ExternalReference miss) {
38  // Update the static counter each time a new code stub is generated.
39  isolate()->counters()->code_stubs()->Increment();
40
41  CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
42  int param_count = descriptor.GetRegisterParameterCount();
43  {
44    // Call the runtime system in a fresh internal frame.
45    FrameScope scope(masm, StackFrame::INTERNAL);
46    DCHECK((param_count == 0) ||
47           x0.Is(descriptor.GetRegisterParameter(param_count - 1)));
48
49    // Push arguments
50    MacroAssembler::PushPopQueue queue(masm);
51    for (int i = 0; i < param_count; ++i) {
52      queue.Queue(descriptor.GetRegisterParameter(i));
53    }
54    queue.PushQueued();
55
56    __ CallExternalReference(miss, param_count);
57  }
58
59  __ Ret();
60}
61
62
63void DoubleToIStub::Generate(MacroAssembler* masm) {
64  Label done;
65  Register input = source();
66  Register result = destination();
67  DCHECK(is_truncating());
68
69  DCHECK(result.Is64Bits());
70  DCHECK(jssp.Is(masm->StackPointer()));
71
72  int double_offset = offset();
73
74  DoubleRegister double_scratch = d0;  // only used if !skip_fastpath()
75  Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
76  Register scratch2 =
77      GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
78
79  __ Push(scratch1, scratch2);
80  // Account for saved regs if input is jssp.
81  if (input.is(jssp)) double_offset += 2 * kPointerSize;
82
83  if (!skip_fastpath()) {
84    __ Push(double_scratch);
85    if (input.is(jssp)) double_offset += 1 * kDoubleSize;
86    __ Ldr(double_scratch, MemOperand(input, double_offset));
87    // Try to convert with a FPU convert instruction.  This handles all
88    // non-saturating cases.
89    __ TryConvertDoubleToInt64(result, double_scratch, &done);
90    __ Fmov(result, double_scratch);
91  } else {
92    __ Ldr(result, MemOperand(input, double_offset));
93  }
94
95  // If we reach here we need to manually convert the input to an int32.
96
97  // Extract the exponent.
98  Register exponent = scratch1;
99  __ Ubfx(exponent, result, HeapNumber::kMantissaBits,
100          HeapNumber::kExponentBits);
101
102  // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
103  // the mantissa gets shifted completely out of the int32_t result.
104  __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
105  __ CzeroX(result, ge);
106  __ B(ge, &done);
107
108  // The Fcvtzs sequence handles all cases except where the conversion causes
109  // signed overflow in the int64_t target. Since we've already handled
110  // exponents >= 84, we can guarantee that 63 <= exponent < 84.
111
112  if (masm->emit_debug_code()) {
113    __ Cmp(exponent, HeapNumber::kExponentBias + 63);
114    // Exponents less than this should have been handled by the Fcvt case.
115    __ Check(ge, kUnexpectedValue);
116  }
117
118  // Isolate the mantissa bits, and set the implicit '1'.
119  Register mantissa = scratch2;
120  __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
121  __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
122
123  // Negate the mantissa if necessary.
124  __ Tst(result, kXSignMask);
125  __ Cneg(mantissa, mantissa, ne);
126
127  // Shift the mantissa bits in the correct place. We know that we have to shift
128  // it left here, because exponent >= 63 >= kMantissaBits.
129  __ Sub(exponent, exponent,
130         HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
131  __ Lsl(result, mantissa, exponent);
132
133  __ Bind(&done);
134  if (!skip_fastpath()) {
135    __ Pop(double_scratch);
136  }
137  __ Pop(scratch2, scratch1);
138  __ Ret();
139}
140
141
142// See call site for description.
143static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
144                                          Register right, Register scratch,
145                                          FPRegister double_scratch,
146                                          Label* slow, Condition cond) {
147  DCHECK(!AreAliased(left, right, scratch));
148  Label not_identical, return_equal, heap_number;
149  Register result = x0;
150
151  __ Cmp(right, left);
152  __ B(ne, &not_identical);
153
154  // Test for NaN. Sadly, we can't just compare to factory::nan_value(),
155  // so we do the second best thing - test it ourselves.
156  // They are both equal and they are not both Smis so both of them are not
157  // Smis.  If it's not a heap number, then return equal.
158  Register right_type = scratch;
159  if ((cond == lt) || (cond == gt)) {
160    // Call runtime on identical JSObjects.  Otherwise return equal.
161    __ JumpIfObjectType(right, right_type, right_type, FIRST_JS_RECEIVER_TYPE,
162                        slow, ge);
163    // Call runtime on identical symbols since we need to throw a TypeError.
164    __ Cmp(right_type, SYMBOL_TYPE);
165    __ B(eq, slow);
166  } else if (cond == eq) {
167    __ JumpIfHeapNumber(right, &heap_number);
168  } else {
169    __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
170                        &heap_number);
171    // Comparing JS objects with <=, >= is complicated.
172    __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
173    __ B(ge, slow);
174    // Call runtime on identical symbols since we need to throw a TypeError.
175    __ Cmp(right_type, SYMBOL_TYPE);
176    __ B(eq, slow);
177    // Normally here we fall through to return_equal, but undefined is
178    // special: (undefined == undefined) == true, but
179    // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
180    if ((cond == le) || (cond == ge)) {
181      __ Cmp(right_type, ODDBALL_TYPE);
182      __ B(ne, &return_equal);
183      __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
184      if (cond == le) {
185        // undefined <= undefined should fail.
186        __ Mov(result, GREATER);
187      } else {
188        // undefined >= undefined should fail.
189        __ Mov(result, LESS);
190      }
191      __ Ret();
192    }
193  }
194
195  __ Bind(&return_equal);
196  if (cond == lt) {
197    __ Mov(result, GREATER);  // Things aren't less than themselves.
198  } else if (cond == gt) {
199    __ Mov(result, LESS);     // Things aren't greater than themselves.
200  } else {
201    __ Mov(result, EQUAL);    // Things are <=, >=, ==, === themselves.
202  }
203  __ Ret();
204
205  // Cases lt and gt have been handled earlier, and case ne is never seen, as
206  // it is handled in the parser (see Parser::ParseBinaryExpression). We are
207  // only concerned with cases ge, le and eq here.
208  if ((cond != lt) && (cond != gt)) {
209    DCHECK((cond == ge) || (cond == le) || (cond == eq));
210    __ Bind(&heap_number);
211    // Left and right are identical pointers to a heap number object. Return
212    // non-equal if the heap number is a NaN, and equal otherwise. Comparing
213    // the number to itself will set the overflow flag iff the number is NaN.
214    __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset));
215    __ Fcmp(double_scratch, double_scratch);
216    __ B(vc, &return_equal);  // Not NaN, so treat as normal heap number.
217
218    if (cond == le) {
219      __ Mov(result, GREATER);
220    } else {
221      __ Mov(result, LESS);
222    }
223    __ Ret();
224  }
225
226  // No fall through here.
227  if (FLAG_debug_code) {
228    __ Unreachable();
229  }
230
231  __ Bind(&not_identical);
232}
233
234
235// See call site for description.
236static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
237                                           Register left,
238                                           Register right,
239                                           Register left_type,
240                                           Register right_type,
241                                           Register scratch) {
242  DCHECK(!AreAliased(left, right, left_type, right_type, scratch));
243
244  if (masm->emit_debug_code()) {
245    // We assume that the arguments are not identical.
246    __ Cmp(left, right);
247    __ Assert(ne, kExpectedNonIdenticalObjects);
248  }
249
250  // If either operand is a JS object or an oddball value, then they are not
251  // equal since their pointers are different.
252  // There is no test for undetectability in strict equality.
253  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
254  Label right_non_object;
255
256  __ Cmp(right_type, FIRST_JS_RECEIVER_TYPE);
257  __ B(lt, &right_non_object);
258
259  // Return non-zero - x0 already contains a non-zero pointer.
260  DCHECK(left.is(x0) || right.is(x0));
261  Label return_not_equal;
262  __ Bind(&return_not_equal);
263  __ Ret();
264
265  __ Bind(&right_non_object);
266
267  // Check for oddballs: true, false, null, undefined.
268  __ Cmp(right_type, ODDBALL_TYPE);
269
270  // If right is not ODDBALL, test left. Otherwise, set eq condition.
271  __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne);
272
273  // If right or left is not ODDBALL, test left >= FIRST_JS_RECEIVER_TYPE.
274  // Otherwise, right or left is ODDBALL, so set a ge condition.
275  __ Ccmp(left_type, FIRST_JS_RECEIVER_TYPE, NVFlag, ne);
276
277  __ B(ge, &return_not_equal);
278
279  // Internalized strings are unique, so they can only be equal if they are the
280  // same object. We have already tested that case, so if left and right are
281  // both internalized strings, they cannot be equal.
282  STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
283  __ Orr(scratch, left_type, right_type);
284  __ TestAndBranchIfAllClear(
285      scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal);
286}
287
288
289// See call site for description.
290static void EmitSmiNonsmiComparison(MacroAssembler* masm,
291                                    Register left,
292                                    Register right,
293                                    FPRegister left_d,
294                                    FPRegister right_d,
295                                    Label* slow,
296                                    bool strict) {
297  DCHECK(!AreAliased(left_d, right_d));
298  DCHECK((left.is(x0) && right.is(x1)) ||
299         (right.is(x0) && left.is(x1)));
300  Register result = x0;
301
302  Label right_is_smi, done;
303  __ JumpIfSmi(right, &right_is_smi);
304
305  // Left is the smi. Check whether right is a heap number.
306  if (strict) {
307    // If right is not a number and left is a smi, then strict equality cannot
308    // succeed. Return non-equal.
309    Label is_heap_number;
310    __ JumpIfHeapNumber(right, &is_heap_number);
311    // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
312    if (!right.is(result)) {
313      __ Mov(result, NOT_EQUAL);
314    }
315    __ Ret();
316    __ Bind(&is_heap_number);
317  } else {
318    // Smi compared non-strictly with a non-smi, non-heap-number. Call the
319    // runtime.
320    __ JumpIfNotHeapNumber(right, slow);
321  }
322
323  // Left is the smi. Right is a heap number. Load right value into right_d, and
324  // convert left smi into double in left_d.
325  __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset));
326  __ SmiUntagToDouble(left_d, left);
327  __ B(&done);
328
329  __ Bind(&right_is_smi);
330  // Right is a smi. Check whether the non-smi left is a heap number.
331  if (strict) {
332    // If left is not a number and right is a smi then strict equality cannot
333    // succeed. Return non-equal.
334    Label is_heap_number;
335    __ JumpIfHeapNumber(left, &is_heap_number);
336    // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
337    if (!left.is(result)) {
338      __ Mov(result, NOT_EQUAL);
339    }
340    __ Ret();
341    __ Bind(&is_heap_number);
342  } else {
343    // Smi compared non-strictly with a non-smi, non-heap-number. Call the
344    // runtime.
345    __ JumpIfNotHeapNumber(left, slow);
346  }
347
348  // Right is the smi. Left is a heap number. Load left value into left_d, and
349  // convert right smi into double in right_d.
350  __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset));
351  __ SmiUntagToDouble(right_d, right);
352
353  // Fall through to both_loaded_as_doubles.
354  __ Bind(&done);
355}
356
357
358// Fast negative check for internalized-to-internalized equality or receiver
359// equality. Also handles the undetectable receiver to null/undefined
360// comparison.
361// See call site for description.
362static void EmitCheckForInternalizedStringsOrObjects(
363    MacroAssembler* masm, Register left, Register right, Register left_map,
364    Register right_map, Register left_type, Register right_type,
365    Label* possible_strings, Label* runtime_call) {
366  DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type));
367  Register result = x0;
368  DCHECK(left.is(x0) || right.is(x0));
369
370  Label object_test, return_equal, return_unequal, undetectable;
371  STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
372  // TODO(all): reexamine this branch sequence for optimisation wrt branch
373  // prediction.
374  __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test);
375  __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
376  __ Tbnz(left_type, MaskToBit(kIsNotStringMask), runtime_call);
377  __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings);
378
379  // Both are internalized. We already checked they weren't the same pointer so
380  // they are not equal. Return non-equal by returning the non-zero object
381  // pointer in x0.
382  __ Ret();
383
384  __ Bind(&object_test);
385
386  Register left_bitfield = left_type;
387  Register right_bitfield = right_type;
388  __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset));
389  __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset));
390  __ Tbnz(right_bitfield, MaskToBit(1 << Map::kIsUndetectable), &undetectable);
391  __ Tbnz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
392
393  __ CompareInstanceType(right_map, right_type, FIRST_JS_RECEIVER_TYPE);
394  __ B(lt, runtime_call);
395  __ CompareInstanceType(left_map, left_type, FIRST_JS_RECEIVER_TYPE);
396  __ B(lt, runtime_call);
397
398  __ Bind(&return_unequal);
399  // Return non-equal by returning the non-zero object pointer in x0.
400  __ Ret();
401
402  __ Bind(&undetectable);
403  __ Tbz(left_bitfield, MaskToBit(1 << Map::kIsUndetectable), &return_unequal);
404
405  // If both sides are JSReceivers, then the result is false according to
406  // the HTML specification, which says that only comparisons with null or
407  // undefined are affected by special casing for document.all.
408  __ CompareInstanceType(right_map, right_type, ODDBALL_TYPE);
409  __ B(eq, &return_equal);
410  __ CompareInstanceType(left_map, left_type, ODDBALL_TYPE);
411  __ B(ne, &return_unequal);
412
413  __ Bind(&return_equal);
414  __ Mov(result, EQUAL);
415  __ Ret();
416}
417
418
419static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
420                                         CompareICState::State expected,
421                                         Label* fail) {
422  Label ok;
423  if (expected == CompareICState::SMI) {
424    __ JumpIfNotSmi(input, fail);
425  } else if (expected == CompareICState::NUMBER) {
426    __ JumpIfSmi(input, &ok);
427    __ JumpIfNotHeapNumber(input, fail);
428  }
429  // We could be strict about internalized/non-internalized here, but as long as
430  // hydrogen doesn't care, the stub doesn't have to care either.
431  __ Bind(&ok);
432}
433
434
435void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
436  Register lhs = x1;
437  Register rhs = x0;
438  Register result = x0;
439  Condition cond = GetCondition();
440
441  Label miss;
442  CompareICStub_CheckInputType(masm, lhs, left(), &miss);
443  CompareICStub_CheckInputType(masm, rhs, right(), &miss);
444
445  Label slow;  // Call builtin.
446  Label not_smis, both_loaded_as_doubles;
447  Label not_two_smis, smi_done;
448  __ JumpIfEitherNotSmi(lhs, rhs, &not_two_smis);
449  __ SmiUntag(lhs);
450  __ Sub(result, lhs, Operand::UntagSmi(rhs));
451  __ Ret();
452
453  __ Bind(&not_two_smis);
454
455  // NOTICE! This code is only reached after a smi-fast-case check, so it is
456  // certain that at least one operand isn't a smi.
457
458  // Handle the case where the objects are identical. Either returns the answer
459  // or goes to slow. Only falls through if the objects were not identical.
460  EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond);
461
462  // If either is a smi (we know that at least one is not a smi), then they can
463  // only be strictly equal if the other is a HeapNumber.
464  __ JumpIfBothNotSmi(lhs, rhs, &not_smis);
465
466  // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that
467  // can:
468  //  1) Return the answer.
469  //  2) Branch to the slow case.
470  //  3) Fall through to both_loaded_as_doubles.
471  // In case 3, we have found out that we were dealing with a number-number
472  // comparison. The double values of the numbers have been loaded, right into
473  // rhs_d, left into lhs_d.
474  FPRegister rhs_d = d0;
475  FPRegister lhs_d = d1;
476  EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
477
478  __ Bind(&both_loaded_as_doubles);
479  // The arguments have been converted to doubles and stored in rhs_d and
480  // lhs_d.
481  Label nan;
482  __ Fcmp(lhs_d, rhs_d);
483  __ B(vs, &nan);  // Overflow flag set if either is NaN.
484  STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
485  __ Cset(result, gt);  // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
486  __ Csinv(result, result, xzr, ge);  // lt => -1, gt => 1, eq => 0.
487  __ Ret();
488
489  __ Bind(&nan);
490  // Left and/or right is a NaN. Load the result register with whatever makes
491  // the comparison fail, since comparisons with NaN always fail (except ne,
492  // which is filtered out at a higher level.)
493  DCHECK(cond != ne);
494  if ((cond == lt) || (cond == le)) {
495    __ Mov(result, GREATER);
496  } else {
497    __ Mov(result, LESS);
498  }
499  __ Ret();
500
501  __ Bind(&not_smis);
502  // At this point we know we are dealing with two different objects, and
503  // neither of them is a smi. The objects are in rhs_ and lhs_.
504
505  // Load the maps and types of the objects.
506  Register rhs_map = x10;
507  Register rhs_type = x11;
508  Register lhs_map = x12;
509  Register lhs_type = x13;
510  __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
511  __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
512  __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
513  __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
514
515  if (strict()) {
516    // This emits a non-equal return sequence for some object types, or falls
517    // through if it was not lucky.
518    EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14);
519  }
520
521  Label check_for_internalized_strings;
522  Label flat_string_check;
523  // Check for heap number comparison. Branch to earlier double comparison code
524  // if they are heap numbers, otherwise, branch to internalized string check.
525  __ Cmp(rhs_type, HEAP_NUMBER_TYPE);
526  __ B(ne, &check_for_internalized_strings);
527  __ Cmp(lhs_map, rhs_map);
528
529  // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat
530  // string check.
531  __ B(ne, &flat_string_check);
532
533  // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double
534  // comparison code.
535  __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
536  __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
537  __ B(&both_loaded_as_doubles);
538
539  __ Bind(&check_for_internalized_strings);
540  // In the strict case, the EmitStrictTwoHeapObjectCompare already took care
541  // of internalized strings.
542  if ((cond == eq) && !strict()) {
543    // Returns an answer for two internalized strings or two detectable objects.
544    // Otherwise branches to the string case or not both strings case.
545    EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map,
546                                             lhs_type, rhs_type,
547                                             &flat_string_check, &slow);
548  }
549
550  // Check for both being sequential one-byte strings,
551  // and inline if that is the case.
552  __ Bind(&flat_string_check);
553  __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x14,
554                                                    x15, &slow);
555
556  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x10,
557                      x11);
558  if (cond == eq) {
559    StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
560                                                  x12);
561  } else {
562    StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
563                                                    x12, x13);
564  }
565
566  // Never fall through to here.
567  if (FLAG_debug_code) {
568    __ Unreachable();
569  }
570
571  __ Bind(&slow);
572
573  if (cond == eq) {
574    {
575      FrameScope scope(masm, StackFrame::INTERNAL);
576      __ Push(cp);
577      __ Call(strict() ? isolate()->builtins()->StrictEqual()
578                       : isolate()->builtins()->Equal(),
579              RelocInfo::CODE_TARGET);
580      __ Pop(cp);
581    }
582    // Turn true into 0 and false into some non-zero value.
583    STATIC_ASSERT(EQUAL == 0);
584    __ LoadRoot(x1, Heap::kTrueValueRootIndex);
585    __ Sub(x0, x0, x1);
586    __ Ret();
587  } else {
588    __ Push(lhs, rhs);
589    int ncr;  // NaN compare result
590    if ((cond == lt) || (cond == le)) {
591      ncr = GREATER;
592    } else {
593      DCHECK((cond == gt) || (cond == ge));  // remaining cases
594      ncr = LESS;
595    }
596    __ Mov(x10, Smi::FromInt(ncr));
597    __ Push(x10);
598
599    // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
600    // tagged as a small integer.
601    __ TailCallRuntime(Runtime::kCompare);
602  }
603
604  __ Bind(&miss);
605  GenerateMiss(masm);
606}
607
608
609void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
610  CPURegList saved_regs = kCallerSaved;
611  CPURegList saved_fp_regs = kCallerSavedFP;
612
613  // We don't allow a GC during a store buffer overflow so there is no need to
614  // store the registers in any particular way, but we do have to store and
615  // restore them.
616
617  // We don't care if MacroAssembler scratch registers are corrupted.
618  saved_regs.Remove(*(masm->TmpList()));
619  saved_fp_regs.Remove(*(masm->FPTmpList()));
620
621  __ PushCPURegList(saved_regs);
622  if (save_doubles()) {
623    __ PushCPURegList(saved_fp_regs);
624  }
625
626  AllowExternalCallThatCantCauseGC scope(masm);
627  __ Mov(x0, ExternalReference::isolate_address(isolate()));
628  __ CallCFunction(
629      ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
630
631  if (save_doubles()) {
632    __ PopCPURegList(saved_fp_regs);
633  }
634  __ PopCPURegList(saved_regs);
635  __ Ret();
636}
637
638
639void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
640    Isolate* isolate) {
641  StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
642  stub1.GetCode();
643  StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
644  stub2.GetCode();
645}
646
647
648void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
649  MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
650  UseScratchRegisterScope temps(masm);
651  Register saved_lr = temps.UnsafeAcquire(to_be_pushed_lr());
652  Register return_address = temps.AcquireX();
653  __ Mov(return_address, lr);
654  // Restore lr with the value it had before the call to this stub (the value
655  // which must be pushed).
656  __ Mov(lr, saved_lr);
657  __ PushSafepointRegisters();
658  __ Ret(return_address);
659}
660
661
662void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
663  MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
664  UseScratchRegisterScope temps(masm);
665  Register return_address = temps.AcquireX();
666  // Preserve the return address (lr will be clobbered by the pop).
667  __ Mov(return_address, lr);
668  __ PopSafepointRegisters();
669  __ Ret(return_address);
670}
671
672void MathPowStub::Generate(MacroAssembler* masm) {
673  // Stack on entry:
674  // jssp[0]: Exponent (as a tagged value).
675  // jssp[1]: Base (as a tagged value).
676  //
677  // The (tagged) result will be returned in x0, as a heap number.
678
679  Register exponent_tagged = MathPowTaggedDescriptor::exponent();
680  DCHECK(exponent_tagged.is(x11));
681  Register exponent_integer = MathPowIntegerDescriptor::exponent();
682  DCHECK(exponent_integer.is(x12));
683  Register saved_lr = x19;
684  FPRegister result_double = d0;
685  FPRegister base_double = d0;
686  FPRegister exponent_double = d1;
687  FPRegister base_double_copy = d2;
688  FPRegister scratch1_double = d6;
689  FPRegister scratch0_double = d7;
690
691  // A fast-path for integer exponents.
692  Label exponent_is_smi, exponent_is_integer;
693  // Allocate a heap number for the result, and return it.
694  Label done;
695
696  // Unpack the inputs.
697  if (exponent_type() == TAGGED) {
698    __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
699    __ Ldr(exponent_double,
700           FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
701  }
702
703  // Handle double (heap number) exponents.
704  if (exponent_type() != INTEGER) {
705    // Detect integer exponents stored as doubles and handle those in the
706    // integer fast-path.
707    __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
708                                 scratch0_double, &exponent_is_integer);
709
710    {
711      AllowExternalCallThatCantCauseGC scope(masm);
712      __ Mov(saved_lr, lr);
713      __ CallCFunction(
714          ExternalReference::power_double_double_function(isolate()), 0, 2);
715      __ Mov(lr, saved_lr);
716      __ B(&done);
717    }
718
719    // Handle SMI exponents.
720    __ Bind(&exponent_is_smi);
721    //  x10   base_tagged       The tagged base (input).
722    //  x11   exponent_tagged   The tagged exponent (input).
723    //  d1    base_double       The base as a double.
724    __ SmiUntag(exponent_integer, exponent_tagged);
725  }
726
727  __ Bind(&exponent_is_integer);
728  //  x10   base_tagged       The tagged base (input).
729  //  x11   exponent_tagged   The tagged exponent (input).
730  //  x12   exponent_integer  The exponent as an integer.
731  //  d1    base_double       The base as a double.
732
733  // Find abs(exponent). For negative exponents, we can find the inverse later.
734  Register exponent_abs = x13;
735  __ Cmp(exponent_integer, 0);
736  __ Cneg(exponent_abs, exponent_integer, mi);
737  //  x13   exponent_abs      The value of abs(exponent_integer).
738
739  // Repeatedly multiply to calculate the power.
740  //  result = 1.0;
741  //  For each bit n (exponent_integer{n}) {
742  //    if (exponent_integer{n}) {
743  //      result *= base;
744  //    }
745  //    base *= base;
746  //    if (remaining bits in exponent_integer are all zero) {
747  //      break;
748  //    }
749  //  }
750  Label power_loop, power_loop_entry, power_loop_exit;
751  __ Fmov(scratch1_double, base_double);
752  __ Fmov(base_double_copy, base_double);
753  __ Fmov(result_double, 1.0);
754  __ B(&power_loop_entry);
755
756  __ Bind(&power_loop);
757  __ Fmul(scratch1_double, scratch1_double, scratch1_double);
758  __ Lsr(exponent_abs, exponent_abs, 1);
759  __ Cbz(exponent_abs, &power_loop_exit);
760
761  __ Bind(&power_loop_entry);
762  __ Tbz(exponent_abs, 0, &power_loop);
763  __ Fmul(result_double, result_double, scratch1_double);
764  __ B(&power_loop);
765
766  __ Bind(&power_loop_exit);
767
768  // If the exponent was positive, result_double holds the result.
769  __ Tbz(exponent_integer, kXSignBit, &done);
770
771  // The exponent was negative, so find the inverse.
772  __ Fmov(scratch0_double, 1.0);
773  __ Fdiv(result_double, scratch0_double, result_double);
774  // ECMA-262 only requires Math.pow to return an 'implementation-dependent
775  // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow
776  // to calculate the subnormal value 2^-1074. This method of calculating
777  // negative powers doesn't work because 2^1074 overflows to infinity. To
778  // catch this corner-case, we bail out if the result was 0. (This can only
779  // occur if the divisor is infinity or the base is zero.)
780  __ Fcmp(result_double, 0.0);
781  __ B(&done, ne);
782
783  AllowExternalCallThatCantCauseGC scope(masm);
784  __ Mov(saved_lr, lr);
785  __ Fmov(base_double, base_double_copy);
786  __ Scvtf(exponent_double, exponent_integer);
787  __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
788                   0, 2);
789  __ Mov(lr, saved_lr);
790  __ Bind(&done);
791  __ Ret();
792}
793
794void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
795  // It is important that the following stubs are generated in this order
796  // because pregenerated stubs can only call other pregenerated stubs.
797  // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
798  // CEntryStub.
799  CEntryStub::GenerateAheadOfTime(isolate);
800  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
801  StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
802  CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
803  CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
804  CreateWeakCellStub::GenerateAheadOfTime(isolate);
805  BinaryOpICStub::GenerateAheadOfTime(isolate);
806  StoreRegistersStateStub::GenerateAheadOfTime(isolate);
807  RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
808  BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
809  StoreFastElementStub::GenerateAheadOfTime(isolate);
810}
811
812
813void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
814  StoreRegistersStateStub stub(isolate);
815  stub.GetCode();
816}
817
818
819void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
820  RestoreRegistersStateStub stub(isolate);
821  stub.GetCode();
822}
823
824
825void CodeStub::GenerateFPStubs(Isolate* isolate) {
826  // Floating-point code doesn't get special handling in ARM64, so there's
827  // nothing to do here.
828  USE(isolate);
829}
830
831
832bool CEntryStub::NeedsImmovableCode() {
833  // CEntryStub stores the return address on the stack before calling into
834  // C++ code. In some cases, the VM accesses this address, but it is not used
835  // when the C++ code returns to the stub because LR holds the return address
836  // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up
837  // returning to dead code.
838  // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
839  // find any comment to confirm this, and I don't hit any crashes whatever
840  // this function returns. The anaylsis should be properly confirmed.
841  return true;
842}
843
844
845void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
846  CEntryStub stub(isolate, 1, kDontSaveFPRegs);
847  stub.GetCode();
848  CEntryStub stub_fp(isolate, 1, kSaveFPRegs);
849  stub_fp.GetCode();
850}
851
852
853void CEntryStub::Generate(MacroAssembler* masm) {
854  // The Abort mechanism relies on CallRuntime, which in turn relies on
855  // CEntryStub, so until this stub has been generated, we have to use a
856  // fall-back Abort mechanism.
857  //
858  // Note that this stub must be generated before any use of Abort.
859  MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
860
861  ASM_LOCATION("CEntryStub::Generate entry");
862  ProfileEntryHookStub::MaybeCallEntryHook(masm);
863
864  // Register parameters:
865  //    x0: argc (including receiver, untagged)
866  //    x1: target
867  // If argv_in_register():
868  //    x11: argv (pointer to first argument)
869  //
870  // The stack on entry holds the arguments and the receiver, with the receiver
871  // at the highest address:
872  //
873  //    jssp]argc-1]: receiver
874  //    jssp[argc-2]: arg[argc-2]
875  //    ...           ...
876  //    jssp[1]:      arg[1]
877  //    jssp[0]:      arg[0]
878  //
879  // The arguments are in reverse order, so that arg[argc-2] is actually the
880  // first argument to the target function and arg[0] is the last.
881  DCHECK(jssp.Is(__ StackPointer()));
882  const Register& argc_input = x0;
883  const Register& target_input = x1;
884
885  // Calculate argv, argc and the target address, and store them in
886  // callee-saved registers so we can retry the call without having to reload
887  // these arguments.
888  // TODO(jbramley): If the first call attempt succeeds in the common case (as
889  // it should), then we might be better off putting these parameters directly
890  // into their argument registers, rather than using callee-saved registers and
891  // preserving them on the stack.
892  const Register& argv = x21;
893  const Register& argc = x22;
894  const Register& target = x23;
895
896  // Derive argv from the stack pointer so that it points to the first argument
897  // (arg[argc-2]), or just below the receiver in case there are no arguments.
898  //  - Adjust for the arg[] array.
899  Register temp_argv = x11;
900  if (!argv_in_register()) {
901    __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
902    //  - Adjust for the receiver.
903    __ Sub(temp_argv, temp_argv, 1 * kPointerSize);
904  }
905
906  // Reserve three slots to preserve x21-x23 callee-saved registers. If the
907  // result size is too large to be returned in registers then also reserve
908  // space for the return value.
909  int extra_stack_space = 3 + (result_size() <= 2 ? 0 : result_size());
910  // Enter the exit frame.
911  FrameScope scope(masm, StackFrame::MANUAL);
912  __ EnterExitFrame(
913      save_doubles(), x10, extra_stack_space,
914      is_builtin_exit() ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
915  DCHECK(csp.Is(__ StackPointer()));
916
917  // Poke callee-saved registers into reserved space.
918  __ Poke(argv, 1 * kPointerSize);
919  __ Poke(argc, 2 * kPointerSize);
920  __ Poke(target, 3 * kPointerSize);
921
922  if (result_size() > 2) {
923    // Save the location of the return value into x8 for call.
924    __ Add(x8, __ StackPointer(), Operand(4 * kPointerSize));
925  }
926
927  // We normally only keep tagged values in callee-saved registers, as they
928  // could be pushed onto the stack by called stubs and functions, and on the
929  // stack they can confuse the GC. However, we're only calling C functions
930  // which can push arbitrary data onto the stack anyway, and so the GC won't
931  // examine that part of the stack.
932  __ Mov(argc, argc_input);
933  __ Mov(target, target_input);
934  __ Mov(argv, temp_argv);
935
936  // x21 : argv
937  // x22 : argc
938  // x23 : call target
939  //
940  // The stack (on entry) holds the arguments and the receiver, with the
941  // receiver at the highest address:
942  //
943  //         argv[8]:     receiver
944  // argv -> argv[0]:     arg[argc-2]
945  //         ...          ...
946  //         argv[...]:   arg[1]
947  //         argv[...]:   arg[0]
948  //
949  // Immediately below (after) this is the exit frame, as constructed by
950  // EnterExitFrame:
951  //         fp[8]:    CallerPC (lr)
952  //   fp -> fp[0]:    CallerFP (old fp)
953  //         fp[-8]:   Space reserved for SPOffset.
954  //         fp[-16]:  CodeObject()
955  //         csp[...]: Saved doubles, if saved_doubles is true.
956  //         csp[32]:  Alignment padding, if necessary.
957  //         csp[24]:  Preserved x23 (used for target).
958  //         csp[16]:  Preserved x22 (used for argc).
959  //         csp[8]:   Preserved x21 (used for argv).
960  //  csp -> csp[0]:   Space reserved for the return address.
961  //
962  // After a successful call, the exit frame, preserved registers (x21-x23) and
963  // the arguments (including the receiver) are dropped or popped as
964  // appropriate. The stub then returns.
965  //
966  // After an unsuccessful call, the exit frame and suchlike are left
967  // untouched, and the stub either throws an exception by jumping to one of
968  // the exception_returned label.
969
970  DCHECK(csp.Is(__ StackPointer()));
971
972  // Prepare AAPCS64 arguments to pass to the builtin.
973  __ Mov(x0, argc);
974  __ Mov(x1, argv);
975  __ Mov(x2, ExternalReference::isolate_address(isolate()));
976
977  Label return_location;
978  __ Adr(x12, &return_location);
979  __ Poke(x12, 0);
980
981  if (__ emit_debug_code()) {
982    // Verify that the slot below fp[kSPOffset]-8 points to the return location
983    // (currently in x12).
984    UseScratchRegisterScope temps(masm);
985    Register temp = temps.AcquireX();
986    __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
987    __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
988    __ Cmp(temp, x12);
989    __ Check(eq, kReturnAddressNotFoundInFrame);
990  }
991
992  // Call the builtin.
993  __ Blr(target);
994  __ Bind(&return_location);
995
996  if (result_size() > 2) {
997    DCHECK_EQ(3, result_size());
998    // Read result values stored on stack.
999    __ Ldr(x0, MemOperand(__ StackPointer(), 4 * kPointerSize));
1000    __ Ldr(x1, MemOperand(__ StackPointer(), 5 * kPointerSize));
1001    __ Ldr(x2, MemOperand(__ StackPointer(), 6 * kPointerSize));
1002  }
1003  // Result returned in x0, x1:x0 or x2:x1:x0 - do not destroy these registers!
1004
1005  //  x0    result0      The return code from the call.
1006  //  x1    result1      For calls which return ObjectPair or ObjectTriple.
1007  //  x2    result2      For calls which return ObjectTriple.
1008  //  x21   argv
1009  //  x22   argc
1010  //  x23   target
1011  const Register& result = x0;
1012
1013  // Check result for exception sentinel.
1014  Label exception_returned;
1015  __ CompareRoot(result, Heap::kExceptionRootIndex);
1016  __ B(eq, &exception_returned);
1017
1018  // The call succeeded, so unwind the stack and return.
1019
1020  // Restore callee-saved registers x21-x23.
1021  __ Mov(x11, argc);
1022
1023  __ Peek(argv, 1 * kPointerSize);
1024  __ Peek(argc, 2 * kPointerSize);
1025  __ Peek(target, 3 * kPointerSize);
1026
1027  __ LeaveExitFrame(save_doubles(), x10, true);
1028  DCHECK(jssp.Is(__ StackPointer()));
1029  if (!argv_in_register()) {
1030    // Drop the remaining stack slots and return from the stub.
1031    __ Drop(x11);
1032  }
1033  __ AssertFPCRState();
1034  __ Ret();
1035
1036  // The stack pointer is still csp if we aren't returning, and the frame
1037  // hasn't changed (except for the return address).
1038  __ SetStackPointer(csp);
1039
1040  // Handling of exception.
1041  __ Bind(&exception_returned);
1042
1043  ExternalReference pending_handler_context_address(
1044      Isolate::kPendingHandlerContextAddress, isolate());
1045  ExternalReference pending_handler_code_address(
1046      Isolate::kPendingHandlerCodeAddress, isolate());
1047  ExternalReference pending_handler_offset_address(
1048      Isolate::kPendingHandlerOffsetAddress, isolate());
1049  ExternalReference pending_handler_fp_address(
1050      Isolate::kPendingHandlerFPAddress, isolate());
1051  ExternalReference pending_handler_sp_address(
1052      Isolate::kPendingHandlerSPAddress, isolate());
1053
1054  // Ask the runtime for help to determine the handler. This will set x0 to
1055  // contain the current pending exception, don't clobber it.
1056  ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
1057                                 isolate());
1058  DCHECK(csp.Is(masm->StackPointer()));
1059  {
1060    FrameScope scope(masm, StackFrame::MANUAL);
1061    __ Mov(x0, 0);  // argc.
1062    __ Mov(x1, 0);  // argv.
1063    __ Mov(x2, ExternalReference::isolate_address(isolate()));
1064    __ CallCFunction(find_handler, 3);
1065  }
1066
1067  // We didn't execute a return case, so the stack frame hasn't been updated
1068  // (except for the return address slot). However, we don't need to initialize
1069  // jssp because the throw method will immediately overwrite it when it
1070  // unwinds the stack.
1071  __ SetStackPointer(jssp);
1072
1073  // Retrieve the handler context, SP and FP.
1074  __ Mov(cp, Operand(pending_handler_context_address));
1075  __ Ldr(cp, MemOperand(cp));
1076  __ Mov(jssp, Operand(pending_handler_sp_address));
1077  __ Ldr(jssp, MemOperand(jssp));
1078  __ Mov(csp, jssp);
1079  __ Mov(fp, Operand(pending_handler_fp_address));
1080  __ Ldr(fp, MemOperand(fp));
1081
1082  // If the handler is a JS frame, restore the context to the frame. Note that
1083  // the context will be set to (cp == 0) for non-JS frames.
1084  Label not_js_frame;
1085  __ Cbz(cp, &not_js_frame);
1086  __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1087  __ Bind(&not_js_frame);
1088
1089  // Compute the handler entry address and jump to it.
1090  __ Mov(x10, Operand(pending_handler_code_address));
1091  __ Ldr(x10, MemOperand(x10));
1092  __ Mov(x11, Operand(pending_handler_offset_address));
1093  __ Ldr(x11, MemOperand(x11));
1094  __ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
1095  __ Add(x10, x10, x11);
1096  __ Br(x10);
1097}
1098
1099
1100// This is the entry point from C++. 5 arguments are provided in x0-x4.
1101// See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
1102// Input:
1103//   x0: code entry.
1104//   x1: function.
1105//   x2: receiver.
1106//   x3: argc.
1107//   x4: argv.
1108// Output:
1109//   x0: result.
1110void JSEntryStub::Generate(MacroAssembler* masm) {
1111  DCHECK(jssp.Is(__ StackPointer()));
1112  Register code_entry = x0;
1113
1114  // Enable instruction instrumentation. This only works on the simulator, and
1115  // will have no effect on the model or real hardware.
1116  __ EnableInstrumentation();
1117
1118  Label invoke, handler_entry, exit;
1119
1120  // Push callee-saved registers and synchronize the system stack pointer (csp)
1121  // and the JavaScript stack pointer (jssp).
1122  //
1123  // We must not write to jssp until after the PushCalleeSavedRegisters()
1124  // call, since jssp is itself a callee-saved register.
1125  __ SetStackPointer(csp);
1126  __ PushCalleeSavedRegisters();
1127  __ Mov(jssp, csp);
1128  __ SetStackPointer(jssp);
1129
1130  ProfileEntryHookStub::MaybeCallEntryHook(masm);
1131
1132  // Set up the reserved register for 0.0.
1133  __ Fmov(fp_zero, 0.0);
1134
1135  // Build an entry frame (see layout below).
1136  StackFrame::Type marker = type();
1137  int64_t bad_frame_pointer = -1L;  // Bad frame pointer to fail if it is used.
1138  __ Mov(x13, bad_frame_pointer);
1139  __ Mov(x12, StackFrame::TypeToMarker(marker));
1140  __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
1141  __ Ldr(x10, MemOperand(x11));
1142
1143  __ Push(x13, x12, xzr, x10);
1144  // Set up fp.
1145  __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
1146
1147  // Push the JS entry frame marker. Also set js_entry_sp if this is the
1148  // outermost JS call.
1149  Label non_outermost_js, done;
1150  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1151  __ Mov(x10, ExternalReference(js_entry_sp));
1152  __ Ldr(x11, MemOperand(x10));
1153  __ Cbnz(x11, &non_outermost_js);
1154  __ Str(fp, MemOperand(x10));
1155  __ Mov(x12, StackFrame::OUTERMOST_JSENTRY_FRAME);
1156  __ Push(x12);
1157  __ B(&done);
1158  __ Bind(&non_outermost_js);
1159  // We spare one instruction by pushing xzr since the marker is 0.
1160  DCHECK(StackFrame::INNER_JSENTRY_FRAME == 0);
1161  __ Push(xzr);
1162  __ Bind(&done);
1163
1164  // The frame set up looks like this:
1165  // jssp[0] : JS entry frame marker.
1166  // jssp[1] : C entry FP.
1167  // jssp[2] : stack frame marker.
1168  // jssp[3] : stack frmae marker.
1169  // jssp[4] : bad frame pointer 0xfff...ff   <- fp points here.
1170
1171
1172  // Jump to a faked try block that does the invoke, with a faked catch
1173  // block that sets the pending exception.
1174  __ B(&invoke);
1175
1176  // Prevent the constant pool from being emitted between the record of the
1177  // handler_entry position and the first instruction of the sequence here.
1178  // There is no risk because Assembler::Emit() emits the instruction before
1179  // checking for constant pool emission, but we do not want to depend on
1180  // that.
1181  {
1182    Assembler::BlockPoolsScope block_pools(masm);
1183    __ bind(&handler_entry);
1184    handler_offset_ = handler_entry.pos();
1185    // Caught exception: Store result (exception) in the pending exception
1186    // field in the JSEnv and return a failure sentinel. Coming in here the
1187    // fp will be invalid because the PushTryHandler below sets it to 0 to
1188    // signal the existence of the JSEntry frame.
1189    __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1190                                          isolate())));
1191  }
1192  __ Str(code_entry, MemOperand(x10));
1193  __ LoadRoot(x0, Heap::kExceptionRootIndex);
1194  __ B(&exit);
1195
1196  // Invoke: Link this frame into the handler chain.
1197  __ Bind(&invoke);
1198  __ PushStackHandler();
1199  // If an exception not caught by another handler occurs, this handler
1200  // returns control to the code after the B(&invoke) above, which
1201  // restores all callee-saved registers (including cp and fp) to their
1202  // saved values before returning a failure to C.
1203
1204  // Invoke the function by calling through the JS entry trampoline builtin.
1205  // Notice that we cannot store a reference to the trampoline code directly in
1206  // this stub, because runtime stubs are not traversed when doing GC.
1207
1208  // Expected registers by Builtins::JSEntryTrampoline
1209  // x0: code entry.
1210  // x1: function.
1211  // x2: receiver.
1212  // x3: argc.
1213  // x4: argv.
1214  ExternalReference entry(type() == StackFrame::ENTRY_CONSTRUCT
1215                              ? Builtins::kJSConstructEntryTrampoline
1216                              : Builtins::kJSEntryTrampoline,
1217                          isolate());
1218  __ Mov(x10, entry);
1219
1220  // Call the JSEntryTrampoline.
1221  __ Ldr(x11, MemOperand(x10));  // Dereference the address.
1222  __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag);
1223  __ Blr(x12);
1224
1225  // Unlink this frame from the handler chain.
1226  __ PopStackHandler();
1227
1228
1229  __ Bind(&exit);
1230  // x0 holds the result.
1231  // The stack pointer points to the top of the entry frame pushed on entry from
1232  // C++ (at the beginning of this stub):
1233  // jssp[0] : JS entry frame marker.
1234  // jssp[1] : C entry FP.
1235  // jssp[2] : stack frame marker.
1236  // jssp[3] : stack frmae marker.
1237  // jssp[4] : bad frame pointer 0xfff...ff   <- fp points here.
1238
1239  // Check if the current stack frame is marked as the outermost JS frame.
1240  Label non_outermost_js_2;
1241  __ Pop(x10);
1242  __ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME);
1243  __ B(ne, &non_outermost_js_2);
1244  __ Mov(x11, ExternalReference(js_entry_sp));
1245  __ Str(xzr, MemOperand(x11));
1246  __ Bind(&non_outermost_js_2);
1247
1248  // Restore the top frame descriptors from the stack.
1249  __ Pop(x10);
1250  __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
1251  __ Str(x10, MemOperand(x11));
1252
1253  // Reset the stack to the callee saved registers.
1254  __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
1255  // Restore the callee-saved registers and return.
1256  DCHECK(jssp.Is(__ StackPointer()));
1257  __ Mov(csp, jssp);
1258  __ SetStackPointer(csp);
1259  __ PopCalleeSavedRegisters();
1260  // After this point, we must not modify jssp because it is a callee-saved
1261  // register which we have just restored.
1262  __ Ret();
1263}
1264
1265void RegExpExecStub::Generate(MacroAssembler* masm) {
1266#ifdef V8_INTERPRETED_REGEXP
1267  __ TailCallRuntime(Runtime::kRegExpExec);
1268#else  // V8_INTERPRETED_REGEXP
1269
1270  // Stack frame on entry.
1271  //  jssp[0]: last_match_info (expected JSArray)
1272  //  jssp[8]: previous index
1273  //  jssp[16]: subject string
1274  //  jssp[24]: JSRegExp object
1275  Label runtime;
1276
1277  // Use of registers for this function.
1278
1279  // Variable registers:
1280  //   x10-x13                                  used as scratch registers
1281  //   w0       string_type                     type of subject string
1282  //   x2       jsstring_length                 subject string length
1283  //   x3       jsregexp_object                 JSRegExp object
1284  //   w4       string_encoding                 Latin1 or UC16
1285  //   w5       sliced_string_offset            if the string is a SlicedString
1286  //                                            offset to the underlying string
1287  //   w6       string_representation           groups attributes of the string:
1288  //                                              - is a string
1289  //                                              - type of the string
1290  //                                              - is a short external string
1291  Register string_type = w0;
1292  Register jsstring_length = x2;
1293  Register jsregexp_object = x3;
1294  Register string_encoding = w4;
1295  Register sliced_string_offset = w5;
1296  Register string_representation = w6;
1297
1298  // These are in callee save registers and will be preserved by the call
1299  // to the native RegExp code, as this code is called using the normal
1300  // C calling convention. When calling directly from generated code the
1301  // native RegExp code will not do a GC and therefore the content of
1302  // these registers are safe to use after the call.
1303
1304  //   x19       subject                        subject string
1305  //   x20       regexp_data                    RegExp data (FixedArray)
1306  //   x21       last_match_info_elements       info relative to the last match
1307  //                                            (FixedArray)
1308  //   x22       code_object                    generated regexp code
1309  Register subject = x19;
1310  Register regexp_data = x20;
1311  Register last_match_info_elements = x21;
1312  Register code_object = x22;
1313
1314  // Stack frame.
1315  //  jssp[00]: last_match_info (JSArray)
1316  //  jssp[08]: previous index
1317  //  jssp[16]: subject string
1318  //  jssp[24]: JSRegExp object
1319
1320  const int kLastMatchInfoOffset = 0 * kPointerSize;
1321  const int kPreviousIndexOffset = 1 * kPointerSize;
1322  const int kSubjectOffset = 2 * kPointerSize;
1323  const int kJSRegExpOffset = 3 * kPointerSize;
1324
1325  // Ensure that a RegExp stack is allocated.
1326  ExternalReference address_of_regexp_stack_memory_address =
1327      ExternalReference::address_of_regexp_stack_memory_address(isolate());
1328  ExternalReference address_of_regexp_stack_memory_size =
1329      ExternalReference::address_of_regexp_stack_memory_size(isolate());
1330  __ Mov(x10, address_of_regexp_stack_memory_size);
1331  __ Ldr(x10, MemOperand(x10));
1332  __ Cbz(x10, &runtime);
1333
1334  // Check that the first argument is a JSRegExp object.
1335  DCHECK(jssp.Is(__ StackPointer()));
1336  __ Peek(jsregexp_object, kJSRegExpOffset);
1337  __ JumpIfSmi(jsregexp_object, &runtime);
1338  __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
1339
1340  // Check that the RegExp has been compiled (data contains a fixed array).
1341  __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset));
1342  if (FLAG_debug_code) {
1343    STATIC_ASSERT(kSmiTag == 0);
1344    __ Tst(regexp_data, kSmiTagMask);
1345    __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1346    __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE);
1347    __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1348  }
1349
1350  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1351  __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1352  __ Cmp(x10, Smi::FromInt(JSRegExp::IRREGEXP));
1353  __ B(ne, &runtime);
1354
1355  // Check that the number of captures fit in the static offsets vector buffer.
1356  // We have always at least one capture for the whole match, plus additional
1357  // ones due to capturing parentheses. A capture takes 2 registers.
1358  // The number of capture registers then is (number_of_captures + 1) * 2.
1359  __ Ldrsw(x10,
1360           UntagSmiFieldMemOperand(regexp_data,
1361                                   JSRegExp::kIrregexpCaptureCountOffset));
1362  // Check (number_of_captures + 1) * 2 <= offsets vector size
1363  //             number_of_captures * 2 <= offsets vector size - 2
1364  STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1365  __ Add(x10, x10, x10);
1366  __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
1367  __ B(hi, &runtime);
1368
1369  // Initialize offset for possibly sliced string.
1370  __ Mov(sliced_string_offset, 0);
1371
1372  DCHECK(jssp.Is(__ StackPointer()));
1373  __ Peek(subject, kSubjectOffset);
1374  __ JumpIfSmi(subject, &runtime);
1375
1376  __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset));
1377
1378  // Handle subject string according to its encoding and representation:
1379  // (1) Sequential string?  If yes, go to (4).
1380  // (2) Sequential or cons?  If not, go to (5).
1381  // (3) Cons string.  If the string is flat, replace subject with first string
1382  //     and go to (1). Otherwise bail out to runtime.
1383  // (4) Sequential string.  Load regexp code according to encoding.
1384  // (E) Carry on.
1385  /// [...]
1386
1387  // Deferred code at the end of the stub:
1388  // (5) Long external string?  If not, go to (7).
1389  // (6) External string.  Make it, offset-wise, look like a sequential string.
1390  //     Go to (4).
1391  // (7) Short external string or not a string?  If yes, bail out to runtime.
1392  // (8) Sliced or thin string.  Replace subject with parent.  Go to (1).
1393
1394  Label check_underlying;   // (1)
1395  Label seq_string;         // (4)
1396  Label not_seq_nor_cons;   // (5)
1397  Label external_string;    // (6)
1398  Label not_long_external;  // (7)
1399
1400  __ Bind(&check_underlying);
1401  __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
1402  __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
1403
1404  // (1) Sequential string?  If yes, go to (4).
1405  __ And(string_representation,
1406         string_type,
1407         kIsNotStringMask |
1408             kStringRepresentationMask |
1409             kShortExternalStringMask);
1410  // We depend on the fact that Strings of type
1411  // SeqString and not ShortExternalString are defined
1412  // by the following pattern:
1413  //   string_type: 0XX0 XX00
1414  //                ^  ^   ^^
1415  //                |  |   ||
1416  //                |  |   is a SeqString
1417  //                |  is not a short external String
1418  //                is a String
1419  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
1420  STATIC_ASSERT(kShortExternalStringTag != 0);
1421  __ Cbz(string_representation, &seq_string);  // Go to (4).
1422
1423  // (2) Sequential or cons?  If not, go to (5).
1424  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1425  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1426  STATIC_ASSERT(kThinStringTag > kExternalStringTag);
1427  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1428  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1429  __ Cmp(string_representation, kExternalStringTag);
1430  __ B(ge, &not_seq_nor_cons);  // Go to (5).
1431
1432  // (3) Cons string.  Check that it's flat.
1433  __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset));
1434  __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime);
1435  // Replace subject with first string.
1436  __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
1437  __ B(&check_underlying);
1438
1439  // (4) Sequential string.  Load regexp code according to encoding.
1440  __ Bind(&seq_string);
1441
1442  // Check that the third argument is a positive smi less than the subject
1443  // string length. A negative value will be greater (unsigned comparison).
1444  DCHECK(jssp.Is(__ StackPointer()));
1445  __ Peek(x10, kPreviousIndexOffset);
1446  __ JumpIfNotSmi(x10, &runtime);
1447  __ Cmp(jsstring_length, x10);
1448  __ B(ls, &runtime);
1449
1450  // Argument 2 (x1): We need to load argument 2 (the previous index) into x1
1451  // before entering the exit frame.
1452  __ SmiUntag(x1, x10);
1453
1454  // The fourth bit determines the string encoding in string_type.
1455  STATIC_ASSERT(kOneByteStringTag == 0x08);
1456  STATIC_ASSERT(kTwoByteStringTag == 0x00);
1457  STATIC_ASSERT(kStringEncodingMask == 0x08);
1458
1459  // Find the code object based on the assumptions above.
1460  // kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
1461  // of kPointerSize to reach the latter.
1462  STATIC_ASSERT(JSRegExp::kDataOneByteCodeOffset + kPointerSize ==
1463                JSRegExp::kDataUC16CodeOffset);
1464  __ Mov(x10, kPointerSize);
1465  // We will need the encoding later: Latin1 = 0x08
1466  //                                  UC16   = 0x00
1467  __ Ands(string_encoding, string_type, kStringEncodingMask);
1468  __ CzeroX(x10, ne);
1469  __ Add(x10, regexp_data, x10);
1470  __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataOneByteCodeOffset));
1471
1472  // (E) Carry on.  String handling is done.
1473
1474  // Check that the irregexp code has been generated for the actual string
1475  // encoding. If it has, the field contains a code object otherwise it contains
1476  // a smi (code flushing support).
1477  __ JumpIfSmi(code_object, &runtime);
1478
1479  // All checks done. Now push arguments for native regexp code.
1480  __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1,
1481                      x10,
1482                      x11);
1483
1484  // Isolates: note we add an additional parameter here (isolate pointer).
1485  __ EnterExitFrame(false, x10, 1);
1486  DCHECK(csp.Is(__ StackPointer()));
1487
1488  // We have 9 arguments to pass to the regexp code, therefore we have to pass
1489  // one on the stack and the rest as registers.
1490
1491  // Note that the placement of the argument on the stack isn't standard
1492  // AAPCS64:
1493  // csp[0]: Space for the return address placed by DirectCEntryStub.
1494  // csp[8]: Argument 9, the current isolate address.
1495
1496  __ Mov(x10, ExternalReference::isolate_address(isolate()));
1497  __ Poke(x10, kPointerSize);
1498
1499  Register length = w11;
1500  Register previous_index_in_bytes = w12;
1501  Register start = x13;
1502
1503  // Load start of the subject string.
1504  __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag);
1505  // Load the length from the original subject string from the previous stack
1506  // frame. Therefore we have to use fp, which points exactly to two pointer
1507  // sizes below the previous sp. (Because creating a new stack frame pushes
1508  // the previous fp onto the stack and decrements sp by 2 * kPointerSize.)
1509  __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
1510  __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
1511
1512  // Handle UC16 encoding, two bytes make one character.
1513  //   string_encoding: if Latin1: 0x08
1514  //                    if UC16:   0x00
1515  STATIC_ASSERT(kStringEncodingMask == 0x08);
1516  __ Ubfx(string_encoding, string_encoding, 3, 1);
1517  __ Eor(string_encoding, string_encoding, 1);
1518  //   string_encoding: if Latin1: 0
1519  //                    if UC16:   1
1520
1521  // Convert string positions from characters to bytes.
1522  // Previous index is in x1.
1523  __ Lsl(previous_index_in_bytes, w1, string_encoding);
1524  __ Lsl(length, length, string_encoding);
1525  __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding);
1526
1527  // Argument 1 (x0): Subject string.
1528  __ Mov(x0, subject);
1529
1530  // Argument 2 (x1): Previous index, already there.
1531
1532  // Argument 3 (x2): Get the start of input.
1533  // Start of input = start of string + previous index + substring offset
1534  //                                                     (0 if the string
1535  //                                                      is not sliced).
1536  __ Add(w10, previous_index_in_bytes, sliced_string_offset);
1537  __ Add(x2, start, Operand(w10, UXTW));
1538
1539  // Argument 4 (x3):
1540  // End of input = start of input + (length of input - previous index)
1541  __ Sub(w10, length, previous_index_in_bytes);
1542  __ Add(x3, x2, Operand(w10, UXTW));
1543
1544  // Argument 5 (x4): static offsets vector buffer.
1545  __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
1546
1547  // Argument 6 (x5): Set the number of capture registers to zero to force
1548  // global regexps to behave as non-global. This stub is not used for global
1549  // regexps.
1550  __ Mov(x5, 0);
1551
1552  // Argument 7 (x6): Start (high end) of backtracking stack memory area.
1553  __ Mov(x10, address_of_regexp_stack_memory_address);
1554  __ Ldr(x10, MemOperand(x10));
1555  __ Mov(x11, address_of_regexp_stack_memory_size);
1556  __ Ldr(x11, MemOperand(x11));
1557  __ Add(x6, x10, x11);
1558
1559  // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
1560  __ Mov(x7, 1);
1561
1562  // Locate the code entry and call it.
1563  __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
1564  DirectCEntryStub stub(isolate());
1565  stub.GenerateCall(masm, code_object);
1566
1567  __ LeaveExitFrame(false, x10, true);
1568
1569  // The generated regexp code returns an int32 in w0.
1570  Label failure, exception;
1571  __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure);
1572  __ CompareAndBranch(w0,
1573                      NativeRegExpMacroAssembler::EXCEPTION,
1574                      eq,
1575                      &exception);
1576  __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime);
1577
1578  // Success: process the result from the native regexp code.
1579  Register number_of_capture_registers = x12;
1580
1581  // Calculate number of capture registers (number_of_captures + 1) * 2
1582  // and store it in the last match info.
1583  __ Ldrsw(x10,
1584           UntagSmiFieldMemOperand(regexp_data,
1585                                   JSRegExp::kIrregexpCaptureCountOffset));
1586  __ Add(x10, x10, x10);
1587  __ Add(number_of_capture_registers, x10, 2);
1588
1589  // Check that the last match info is a FixedArray.
1590  DCHECK(jssp.Is(__ StackPointer()));
1591  __ Peek(last_match_info_elements, kLastMatchInfoOffset);
1592  __ JumpIfSmi(last_match_info_elements, &runtime);
1593
1594  // Check that the object has fast elements.
1595  __ Ldr(x10,
1596         FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
1597  __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime);
1598
1599  // Check that the last match info has space for the capture registers and the
1600  // additional information (overhead).
1601  //     (number_of_captures + 1) * 2 + overhead <= last match info size
1602  //     (number_of_captures * 2) + 2 + overhead <= last match info size
1603  //      number_of_capture_registers + overhead <= last match info size
1604  __ Ldrsw(x10,
1605           UntagSmiFieldMemOperand(last_match_info_elements,
1606                                   FixedArray::kLengthOffset));
1607  __ Add(x11, number_of_capture_registers, RegExpMatchInfo::kLastMatchOverhead);
1608  __ Cmp(x11, x10);
1609  __ B(gt, &runtime);
1610
1611  // Store the capture count.
1612  __ SmiTag(x10, number_of_capture_registers);
1613  __ Str(x10, FieldMemOperand(last_match_info_elements,
1614                              RegExpMatchInfo::kNumberOfCapturesOffset));
1615  // Store last subject and last input.
1616  __ Str(subject, FieldMemOperand(last_match_info_elements,
1617                                  RegExpMatchInfo::kLastSubjectOffset));
1618  // Use x10 as the subject string in order to only need
1619  // one RecordWriteStub.
1620  __ Mov(x10, subject);
1621  __ RecordWriteField(last_match_info_elements,
1622                      RegExpMatchInfo::kLastSubjectOffset, x10, x11,
1623                      kLRHasNotBeenSaved, kDontSaveFPRegs);
1624  __ Str(subject, FieldMemOperand(last_match_info_elements,
1625                                  RegExpMatchInfo::kLastInputOffset));
1626  __ Mov(x10, subject);
1627  __ RecordWriteField(last_match_info_elements,
1628                      RegExpMatchInfo::kLastInputOffset, x10, x11,
1629                      kLRHasNotBeenSaved, kDontSaveFPRegs);
1630
1631  Register last_match_offsets = x13;
1632  Register offsets_vector_index = x14;
1633  Register current_offset = x15;
1634
1635  // Get the static offsets vector filled by the native regexp code
1636  // and fill the last match info.
1637  ExternalReference address_of_static_offsets_vector =
1638      ExternalReference::address_of_static_offsets_vector(isolate());
1639  __ Mov(offsets_vector_index, address_of_static_offsets_vector);
1640
1641  Label next_capture, done;
1642  // Capture register counter starts from number of capture registers and
1643  // iterates down to zero (inclusive).
1644  __ Add(last_match_offsets, last_match_info_elements,
1645         RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag);
1646  __ Bind(&next_capture);
1647  __ Subs(number_of_capture_registers, number_of_capture_registers, 2);
1648  __ B(mi, &done);
1649  // Read two 32 bit values from the static offsets vector buffer into
1650  // an X register
1651  __ Ldr(current_offset,
1652         MemOperand(offsets_vector_index, kWRegSize * 2, PostIndex));
1653  // Store the smi values in the last match info.
1654  __ SmiTag(x10, current_offset);
1655  // Clearing the 32 bottom bits gives us a Smi.
1656  STATIC_ASSERT(kSmiTag == 0);
1657  __ Bic(x11, current_offset, kSmiShiftMask);
1658  __ Stp(x10,
1659         x11,
1660         MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
1661  __ B(&next_capture);
1662  __ Bind(&done);
1663
1664  // Return last match info.
1665  __ Mov(x0, last_match_info_elements);
1666  // Drop the 4 arguments of the stub from the stack.
1667  __ Drop(4);
1668  __ Ret();
1669
1670  __ Bind(&exception);
1671  Register exception_value = x0;
1672  // A stack overflow (on the backtrack stack) may have occured
1673  // in the RegExp code but no exception has been created yet.
1674  // If there is no pending exception, handle that in the runtime system.
1675  __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
1676  __ Mov(x11,
1677         Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1678                                   isolate())));
1679  __ Ldr(exception_value, MemOperand(x11));
1680  __ Cmp(x10, exception_value);
1681  __ B(eq, &runtime);
1682
1683  // For exception, throw the exception again.
1684  __ TailCallRuntime(Runtime::kRegExpExecReThrow);
1685
1686  __ Bind(&failure);
1687  __ Mov(x0, Operand(isolate()->factory()->null_value()));
1688  // Drop the 4 arguments of the stub from the stack.
1689  __ Drop(4);
1690  __ Ret();
1691
1692  __ Bind(&runtime);
1693  __ TailCallRuntime(Runtime::kRegExpExec);
1694
1695  // Deferred code for string handling.
1696  // (5) Long external string?  If not, go to (7).
1697  __ Bind(&not_seq_nor_cons);
1698  // Compare flags are still set.
1699  __ B(ne, &not_long_external);  // Go to (7).
1700
1701  // (6) External string. Make it, offset-wise, look like a sequential string.
1702  __ Bind(&external_string);
1703  if (masm->emit_debug_code()) {
1704    // Assert that we do not have a cons or slice (indirect strings) here.
1705    // Sequential strings have already been ruled out.
1706    __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset));
1707    __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
1708    __ Tst(x10, kIsIndirectStringMask);
1709    __ Check(eq, kExternalStringExpectedButNotFound);
1710    __ And(x10, x10, kStringRepresentationMask);
1711    __ Cmp(x10, 0);
1712    __ Check(ne, kExternalStringExpectedButNotFound);
1713  }
1714  __ Ldr(subject,
1715         FieldMemOperand(subject, ExternalString::kResourceDataOffset));
1716  // Move the pointer so that offset-wise, it looks like a sequential string.
1717  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1718  __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
1719  __ B(&seq_string);  // Go to (4).
1720
1721  // (7) If this is a short external string or not a string, bail out to
1722  // runtime.
1723  __ Bind(&not_long_external);
1724  STATIC_ASSERT(kShortExternalStringTag != 0);
1725  __ TestAndBranchIfAnySet(string_representation,
1726                           kShortExternalStringMask | kIsNotStringMask,
1727                           &runtime);
1728
1729  // (8) Sliced or thin string. Replace subject with parent.
1730  Label thin_string;
1731  __ Cmp(string_representation, kThinStringTag);
1732  __ B(eq, &thin_string);
1733  __ Ldr(sliced_string_offset,
1734         UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset));
1735  __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
1736  __ B(&check_underlying);  // Go to (1).
1737
1738  __ bind(&thin_string);
1739  __ Ldr(subject, FieldMemOperand(subject, ThinString::kActualOffset));
1740  __ B(&check_underlying);  // Go to (1).
1741#endif
1742}
1743
1744
1745static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
1746                                       Register argc, Register function,
1747                                       Register feedback_vector, Register index,
1748                                       Register new_target) {
1749  FrameScope scope(masm, StackFrame::INTERNAL);
1750
1751  // Number-of-arguments register must be smi-tagged to call out.
1752  __ SmiTag(argc);
1753  __ Push(argc, function, feedback_vector, index);
1754  __ Push(cp);
1755
1756  DCHECK(feedback_vector.Is(x2) && index.Is(x3));
1757  __ CallStub(stub);
1758
1759  __ Pop(cp);
1760  __ Pop(index, feedback_vector, function, argc);
1761  __ SmiUntag(argc);
1762}
1763
1764
1765static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
1766                                     Register function,
1767                                     Register feedback_vector, Register index,
1768                                     Register new_target, Register scratch1,
1769                                     Register scratch2, Register scratch3) {
1770  ASM_LOCATION("GenerateRecordCallTarget");
1771  DCHECK(!AreAliased(scratch1, scratch2, scratch3, argc, function,
1772                     feedback_vector, index, new_target));
1773  // Cache the called function in a feedback vector slot. Cache states are
1774  // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
1775  //  argc :            number of arguments to the construct function
1776  //  function :        the function to call
1777  //  feedback_vector : the feedback vector
1778  //  index :           slot in feedback vector (smi)
1779  Label initialize, done, miss, megamorphic, not_array_function;
1780
1781  DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
1782            masm->isolate()->heap()->megamorphic_symbol());
1783  DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
1784            masm->isolate()->heap()->uninitialized_symbol());
1785
1786  // Load the cache state.
1787  Register feedback = scratch1;
1788  Register feedback_map = scratch2;
1789  Register feedback_value = scratch3;
1790  __ Add(feedback, feedback_vector,
1791         Operand::UntagSmiAndScale(index, kPointerSizeLog2));
1792  __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
1793
1794  // A monomorphic cache hit or an already megamorphic state: invoke the
1795  // function without changing the state.
1796  // We don't know if feedback value is a WeakCell or a Symbol, but it's
1797  // harmless to read at this position in a symbol (see static asserts in
1798  // feedback-vector.h).
1799  Label check_allocation_site;
1800  __ Ldr(feedback_value, FieldMemOperand(feedback, WeakCell::kValueOffset));
1801  __ Cmp(function, feedback_value);
1802  __ B(eq, &done);
1803  __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
1804  __ B(eq, &done);
1805  __ Ldr(feedback_map, FieldMemOperand(feedback, HeapObject::kMapOffset));
1806  __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
1807  __ B(ne, &check_allocation_site);
1808
1809  // If the weak cell is cleared, we have a new chance to become monomorphic.
1810  __ JumpIfSmi(feedback_value, &initialize);
1811  __ B(&megamorphic);
1812
1813  __ bind(&check_allocation_site);
1814  // If we came here, we need to see if we are the array function.
1815  // If we didn't have a matching function, and we didn't find the megamorph
1816  // sentinel, then we have in the slot either some other function or an
1817  // AllocationSite.
1818  __ JumpIfNotRoot(feedback_map, Heap::kAllocationSiteMapRootIndex, &miss);
1819
1820  // Make sure the function is the Array() function
1821  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
1822  __ Cmp(function, scratch1);
1823  __ B(ne, &megamorphic);
1824  __ B(&done);
1825
1826  __ Bind(&miss);
1827
1828  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
1829  // megamorphic.
1830  __ JumpIfRoot(scratch1, Heap::kuninitialized_symbolRootIndex, &initialize);
1831  // MegamorphicSentinel is an immortal immovable object (undefined) so no
1832  // write-barrier is needed.
1833  __ Bind(&megamorphic);
1834  __ Add(scratch1, feedback_vector,
1835         Operand::UntagSmiAndScale(index, kPointerSizeLog2));
1836  __ LoadRoot(scratch2, Heap::kmegamorphic_symbolRootIndex);
1837  __ Str(scratch2, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1838  __ B(&done);
1839
1840  // An uninitialized cache is patched with the function or sentinel to
1841  // indicate the ElementsKind if function is the Array constructor.
1842  __ Bind(&initialize);
1843
1844  // Make sure the function is the Array() function
1845  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, scratch1);
1846  __ Cmp(function, scratch1);
1847  __ B(ne, &not_array_function);
1848
1849  // The target function is the Array constructor,
1850  // Create an AllocationSite if we don't already have it, store it in the
1851  // slot.
1852  CreateAllocationSiteStub create_stub(masm->isolate());
1853  CallStubInRecordCallTarget(masm, &create_stub, argc, function,
1854                             feedback_vector, index, new_target);
1855  __ B(&done);
1856
1857  __ Bind(&not_array_function);
1858  CreateWeakCellStub weak_cell_stub(masm->isolate());
1859  CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
1860                             feedback_vector, index, new_target);
1861
1862  __ Bind(&done);
1863
1864  // Increment the call count for all function calls.
1865  __ Add(scratch1, feedback_vector,
1866         Operand::UntagSmiAndScale(index, kPointerSizeLog2));
1867  __ Add(scratch1, scratch1, Operand(FixedArray::kHeaderSize + kPointerSize));
1868  __ Ldr(scratch2, FieldMemOperand(scratch1, 0));
1869  __ Add(scratch2, scratch2, Operand(Smi::FromInt(1)));
1870  __ Str(scratch2, FieldMemOperand(scratch1, 0));
1871}
1872
1873
1874void CallConstructStub::Generate(MacroAssembler* masm) {
1875  ASM_LOCATION("CallConstructStub::Generate");
1876  // x0 : number of arguments
1877  // x1 : the function to call
1878  // x2 : feedback vector
1879  // x3 : slot in feedback vector (Smi, for RecordCallTarget)
1880  Register function = x1;
1881
1882  Label non_function;
1883  // Check that the function is not a smi.
1884  __ JumpIfSmi(function, &non_function);
1885  // Check that the function is a JSFunction.
1886  Register object_type = x10;
1887  __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
1888                         &non_function);
1889
1890  GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12);
1891
1892  __ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
1893  Label feedback_register_initialized;
1894  // Put the AllocationSite from the feedback vector into x2, or undefined.
1895  __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
1896  __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
1897  __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
1898                &feedback_register_initialized);
1899  __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
1900  __ bind(&feedback_register_initialized);
1901
1902  __ AssertUndefinedOrAllocationSite(x2, x5);
1903
1904  __ Mov(x3, function);
1905
1906  // Tail call to the function-specific construct stub (still in the caller
1907  // context at this point).
1908  __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
1909  __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
1910  __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
1911  __ Br(x4);
1912
1913  __ Bind(&non_function);
1914  __ Mov(x3, function);
1915  __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
1916}
1917
1918void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
1919  // If the receiver is a smi trigger the non-string case.
1920  if (check_mode_ == RECEIVER_IS_UNKNOWN) {
1921    __ JumpIfSmi(object_, receiver_not_string_);
1922
1923    // Fetch the instance type of the receiver into result register.
1924    __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
1925    __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
1926
1927    // If the receiver is not a string trigger the non-string case.
1928    __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_);
1929  }
1930
1931  // If the index is non-smi trigger the non-smi case.
1932  __ JumpIfNotSmi(index_, &index_not_smi_);
1933
1934  __ Bind(&got_smi_index_);
1935  // Check for index out of range.
1936  __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset));
1937  __ Cmp(result_, Operand::UntagSmi(index_));
1938  __ B(ls, index_out_of_range_);
1939
1940  __ SmiUntag(index_);
1941
1942  StringCharLoadGenerator::Generate(masm,
1943                                    object_,
1944                                    index_.W(),
1945                                    result_,
1946                                    &call_runtime_);
1947  __ SmiTag(result_);
1948  __ Bind(&exit_);
1949}
1950
1951
1952void StringCharCodeAtGenerator::GenerateSlow(
1953    MacroAssembler* masm, EmbedMode embed_mode,
1954    const RuntimeCallHelper& call_helper) {
1955  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
1956
1957  __ Bind(&index_not_smi_);
1958  // If index is a heap number, try converting it to an integer.
1959  __ JumpIfNotHeapNumber(index_, index_not_number_);
1960  call_helper.BeforeCall(masm);
1961  if (embed_mode == PART_OF_IC_HANDLER) {
1962    __ Push(LoadWithVectorDescriptor::VectorRegister(),
1963            LoadWithVectorDescriptor::SlotRegister(), object_, index_);
1964  } else {
1965    // Save object_ on the stack and pass index_ as argument for runtime call.
1966    __ Push(object_, index_);
1967  }
1968  __ CallRuntime(Runtime::kNumberToSmi);
1969  // Save the conversion result before the pop instructions below
1970  // have a chance to overwrite it.
1971  __ Mov(index_, x0);
1972  if (embed_mode == PART_OF_IC_HANDLER) {
1973    __ Pop(object_, LoadWithVectorDescriptor::SlotRegister(),
1974           LoadWithVectorDescriptor::VectorRegister());
1975  } else {
1976    __ Pop(object_);
1977  }
1978  // Reload the instance type.
1979  __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
1980  __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
1981  call_helper.AfterCall(masm);
1982
1983  // If index is still not a smi, it must be out of range.
1984  __ JumpIfNotSmi(index_, index_out_of_range_);
1985  // Otherwise, return to the fast path.
1986  __ B(&got_smi_index_);
1987
1988  // Call runtime. We get here when the receiver is a string and the
1989  // index is a number, but the code of getting the actual character
1990  // is too complex (e.g., when the string needs to be flattened).
1991  __ Bind(&call_runtime_);
1992  call_helper.BeforeCall(masm);
1993  __ SmiTag(index_);
1994  __ Push(object_, index_);
1995  __ CallRuntime(Runtime::kStringCharCodeAtRT);
1996  __ Mov(result_, x0);
1997  call_helper.AfterCall(masm);
1998  __ B(&exit_);
1999
2000  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2001}
2002
2003void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
2004  // Inputs are in x0 (lhs) and x1 (rhs).
2005  DCHECK_EQ(CompareICState::BOOLEAN, state());
2006  ASM_LOCATION("CompareICStub[Booleans]");
2007  Label miss;
2008
2009  __ CheckMap(x1, x2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2010  __ CheckMap(x0, x3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2011  if (!Token::IsEqualityOp(op())) {
2012    __ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset));
2013    __ AssertSmi(x1);
2014    __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
2015    __ AssertSmi(x0);
2016  }
2017  __ Sub(x0, x1, x0);
2018  __ Ret();
2019
2020  __ Bind(&miss);
2021  GenerateMiss(masm);
2022}
2023
2024
2025void CompareICStub::GenerateSmis(MacroAssembler* masm) {
2026  // Inputs are in x0 (lhs) and x1 (rhs).
2027  DCHECK(state() == CompareICState::SMI);
2028  ASM_LOCATION("CompareICStub[Smis]");
2029  Label miss;
2030  // Bail out (to 'miss') unless both x0 and x1 are smis.
2031  __ JumpIfEitherNotSmi(x0, x1, &miss);
2032
2033  if (GetCondition() == eq) {
2034    // For equality we do not care about the sign of the result.
2035    __ Sub(x0, x0, x1);
2036  } else {
2037    // Untag before subtracting to avoid handling overflow.
2038    __ SmiUntag(x1);
2039    __ Sub(x0, x1, Operand::UntagSmi(x0));
2040  }
2041  __ Ret();
2042
2043  __ Bind(&miss);
2044  GenerateMiss(masm);
2045}
2046
2047
2048void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
2049  DCHECK(state() == CompareICState::NUMBER);
2050  ASM_LOCATION("CompareICStub[HeapNumbers]");
2051
2052  Label unordered, maybe_undefined1, maybe_undefined2;
2053  Label miss, handle_lhs, values_in_d_regs;
2054  Label untag_rhs, untag_lhs;
2055
2056  Register result = x0;
2057  Register rhs = x0;
2058  Register lhs = x1;
2059  FPRegister rhs_d = d0;
2060  FPRegister lhs_d = d1;
2061
2062  if (left() == CompareICState::SMI) {
2063    __ JumpIfNotSmi(lhs, &miss);
2064  }
2065  if (right() == CompareICState::SMI) {
2066    __ JumpIfNotSmi(rhs, &miss);
2067  }
2068
2069  __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag);
2070  __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag);
2071
2072  // Load rhs if it's a heap number.
2073  __ JumpIfSmi(rhs, &handle_lhs);
2074  __ JumpIfNotHeapNumber(rhs, &maybe_undefined1);
2075  __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
2076
2077  // Load lhs if it's a heap number.
2078  __ Bind(&handle_lhs);
2079  __ JumpIfSmi(lhs, &values_in_d_regs);
2080  __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
2081  __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
2082
2083  __ Bind(&values_in_d_regs);
2084  __ Fcmp(lhs_d, rhs_d);
2085  __ B(vs, &unordered);  // Overflow flag set if either is NaN.
2086  STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1));
2087  __ Cset(result, gt);  // gt => 1, otherwise (lt, eq) => 0 (EQUAL).
2088  __ Csinv(result, result, xzr, ge);  // lt => -1, gt => 1, eq => 0.
2089  __ Ret();
2090
2091  __ Bind(&unordered);
2092  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
2093                     CompareICState::GENERIC, CompareICState::GENERIC);
2094  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2095
2096  __ Bind(&maybe_undefined1);
2097  if (Token::IsOrderedRelationalCompareOp(op())) {
2098    __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
2099    __ JumpIfSmi(lhs, &unordered);
2100    __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
2101    __ B(&unordered);
2102  }
2103
2104  __ Bind(&maybe_undefined2);
2105  if (Token::IsOrderedRelationalCompareOp(op())) {
2106    __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
2107  }
2108
2109  __ Bind(&miss);
2110  GenerateMiss(masm);
2111}
2112
2113
2114void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
2115  DCHECK(state() == CompareICState::INTERNALIZED_STRING);
2116  ASM_LOCATION("CompareICStub[InternalizedStrings]");
2117  Label miss;
2118
2119  Register result = x0;
2120  Register rhs = x0;
2121  Register lhs = x1;
2122
2123  // Check that both operands are heap objects.
2124  __ JumpIfEitherSmi(lhs, rhs, &miss);
2125
2126  // Check that both operands are internalized strings.
2127  Register rhs_map = x10;
2128  Register lhs_map = x11;
2129  Register rhs_type = x10;
2130  Register lhs_type = x11;
2131  __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
2132  __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
2133  __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
2134  __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
2135
2136  STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
2137  __ Orr(x12, lhs_type, rhs_type);
2138  __ TestAndBranchIfAnySet(
2139      x12, kIsNotStringMask | kIsNotInternalizedMask, &miss);
2140
2141  // Internalized strings are compared by identity.
2142  STATIC_ASSERT(EQUAL == 0);
2143  __ Cmp(lhs, rhs);
2144  __ Cset(result, ne);
2145  __ Ret();
2146
2147  __ Bind(&miss);
2148  GenerateMiss(masm);
2149}
2150
2151
2152void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
2153  DCHECK(state() == CompareICState::UNIQUE_NAME);
2154  ASM_LOCATION("CompareICStub[UniqueNames]");
2155  DCHECK(GetCondition() == eq);
2156  Label miss;
2157
2158  Register result = x0;
2159  Register rhs = x0;
2160  Register lhs = x1;
2161
2162  Register lhs_instance_type = w2;
2163  Register rhs_instance_type = w3;
2164
2165  // Check that both operands are heap objects.
2166  __ JumpIfEitherSmi(lhs, rhs, &miss);
2167
2168  // Check that both operands are unique names. This leaves the instance
2169  // types loaded in tmp1 and tmp2.
2170  __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset));
2171  __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset));
2172  __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset));
2173  __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset));
2174
2175  // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
2176  // should have kInternalizedTag set.
2177  __ JumpIfNotUniqueNameInstanceType(lhs_instance_type, &miss);
2178  __ JumpIfNotUniqueNameInstanceType(rhs_instance_type, &miss);
2179
2180  // Unique names are compared by identity.
2181  STATIC_ASSERT(EQUAL == 0);
2182  __ Cmp(lhs, rhs);
2183  __ Cset(result, ne);
2184  __ Ret();
2185
2186  __ Bind(&miss);
2187  GenerateMiss(masm);
2188}
2189
2190
2191void CompareICStub::GenerateStrings(MacroAssembler* masm) {
2192  DCHECK(state() == CompareICState::STRING);
2193  ASM_LOCATION("CompareICStub[Strings]");
2194
2195  Label miss;
2196
2197  bool equality = Token::IsEqualityOp(op());
2198
2199  Register result = x0;
2200  Register rhs = x0;
2201  Register lhs = x1;
2202
2203  // Check that both operands are heap objects.
2204  __ JumpIfEitherSmi(rhs, lhs, &miss);
2205
2206  // Check that both operands are strings.
2207  Register rhs_map = x10;
2208  Register lhs_map = x11;
2209  Register rhs_type = x10;
2210  Register lhs_type = x11;
2211  __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
2212  __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
2213  __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset));
2214  __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset));
2215  STATIC_ASSERT(kNotStringTag != 0);
2216  __ Orr(x12, lhs_type, rhs_type);
2217  __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss);
2218
2219  // Fast check for identical strings.
2220  Label not_equal;
2221  __ Cmp(lhs, rhs);
2222  __ B(ne, &not_equal);
2223  __ Mov(result, EQUAL);
2224  __ Ret();
2225
2226  __ Bind(&not_equal);
2227  // Handle not identical strings
2228
2229  // Check that both strings are internalized strings. If they are, we're done
2230  // because we already know they are not identical. We know they are both
2231  // strings.
2232  if (equality) {
2233    DCHECK(GetCondition() == eq);
2234    STATIC_ASSERT(kInternalizedTag == 0);
2235    Label not_internalized_strings;
2236    __ Orr(x12, lhs_type, rhs_type);
2237    __ TestAndBranchIfAnySet(
2238        x12, kIsNotInternalizedMask, &not_internalized_strings);
2239    // Result is in rhs (x0), and not EQUAL, as rhs is not a smi.
2240    __ Ret();
2241    __ Bind(&not_internalized_strings);
2242  }
2243
2244  // Check that both strings are sequential one-byte.
2245  Label runtime;
2246  __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x12,
2247                                                    x13, &runtime);
2248
2249  // Compare flat one-byte strings. Returns when done.
2250  if (equality) {
2251    StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
2252                                                  x12);
2253  } else {
2254    StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
2255                                                    x12, x13);
2256  }
2257
2258  // Handle more complex cases in runtime.
2259  __ Bind(&runtime);
2260  if (equality) {
2261    {
2262      FrameScope scope(masm, StackFrame::INTERNAL);
2263      __ Push(lhs, rhs);
2264      __ CallRuntime(Runtime::kStringEqual);
2265    }
2266    __ LoadRoot(x1, Heap::kTrueValueRootIndex);
2267    __ Sub(x0, x0, x1);
2268    __ Ret();
2269  } else {
2270    __ Push(lhs, rhs);
2271    __ TailCallRuntime(Runtime::kStringCompare);
2272  }
2273
2274  __ Bind(&miss);
2275  GenerateMiss(masm);
2276}
2277
2278
2279void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
2280  DCHECK_EQ(CompareICState::RECEIVER, state());
2281  ASM_LOCATION("CompareICStub[Receivers]");
2282
2283  Label miss;
2284
2285  Register result = x0;
2286  Register rhs = x0;
2287  Register lhs = x1;
2288
2289  __ JumpIfEitherSmi(rhs, lhs, &miss);
2290
2291  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2292  __ JumpIfObjectType(rhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
2293  __ JumpIfObjectType(lhs, x10, x10, FIRST_JS_RECEIVER_TYPE, &miss, lt);
2294
2295  DCHECK_EQ(eq, GetCondition());
2296  __ Sub(result, rhs, lhs);
2297  __ Ret();
2298
2299  __ Bind(&miss);
2300  GenerateMiss(masm);
2301}
2302
2303
2304void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
2305  ASM_LOCATION("CompareICStub[KnownReceivers]");
2306
2307  Label miss;
2308  Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
2309
2310  Register result = x0;
2311  Register rhs = x0;
2312  Register lhs = x1;
2313
2314  __ JumpIfEitherSmi(rhs, lhs, &miss);
2315
2316  Register rhs_map = x10;
2317  Register lhs_map = x11;
2318  Register map = x12;
2319  __ GetWeakValue(map, cell);
2320  __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset));
2321  __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset));
2322  __ Cmp(rhs_map, map);
2323  __ B(ne, &miss);
2324  __ Cmp(lhs_map, map);
2325  __ B(ne, &miss);
2326
2327  if (Token::IsEqualityOp(op())) {
2328  __ Sub(result, rhs, lhs);
2329  __ Ret();
2330  } else {
2331    Register ncr = x2;
2332    if (op() == Token::LT || op() == Token::LTE) {
2333      __ Mov(ncr, Smi::FromInt(GREATER));
2334    } else {
2335      __ Mov(ncr, Smi::FromInt(LESS));
2336    }
2337    __ Push(lhs, rhs, ncr);
2338    __ TailCallRuntime(Runtime::kCompare);
2339  }
2340
2341  __ Bind(&miss);
2342  GenerateMiss(masm);
2343}
2344
2345
2346// This method handles the case where a compare stub had the wrong
2347// implementation. It calls a miss handler, which re-writes the stub. All other
2348// CompareICStub::Generate* methods should fall back into this one if their
2349// operands were not the expected types.
2350void CompareICStub::GenerateMiss(MacroAssembler* masm) {
2351  ASM_LOCATION("CompareICStub[Miss]");
2352
2353  Register stub_entry = x11;
2354  {
2355    FrameScope scope(masm, StackFrame::INTERNAL);
2356    Register op = x10;
2357    Register left = x1;
2358    Register right = x0;
2359    // Preserve some caller-saved registers.
2360    __ Push(x1, x0, lr);
2361    // Push the arguments.
2362    __ Mov(op, Smi::FromInt(this->op()));
2363    __ Push(left, right, op);
2364
2365    // Call the miss handler. This also pops the arguments.
2366    __ CallRuntime(Runtime::kCompareIC_Miss);
2367
2368    // Compute the entry point of the rewritten stub.
2369    __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag);
2370    // Restore caller-saved registers.
2371    __ Pop(lr, x0, x1);
2372  }
2373
2374  // Tail-call to the new stub.
2375  __ Jump(stub_entry);
2376}
2377
2378
2379void StringHelper::GenerateFlatOneByteStringEquals(
2380    MacroAssembler* masm, Register left, Register right, Register scratch1,
2381    Register scratch2, Register scratch3) {
2382  DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
2383  Register result = x0;
2384  Register left_length = scratch1;
2385  Register right_length = scratch2;
2386
2387  // Compare lengths. If lengths differ, strings can't be equal. Lengths are
2388  // smis, and don't need to be untagged.
2389  Label strings_not_equal, check_zero_length;
2390  __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
2391  __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
2392  __ Cmp(left_length, right_length);
2393  __ B(eq, &check_zero_length);
2394
2395  __ Bind(&strings_not_equal);
2396  __ Mov(result, Smi::FromInt(NOT_EQUAL));
2397  __ Ret();
2398
2399  // Check if the length is zero. If so, the strings must be equal (and empty.)
2400  Label compare_chars;
2401  __ Bind(&check_zero_length);
2402  STATIC_ASSERT(kSmiTag == 0);
2403  __ Cbnz(left_length, &compare_chars);
2404  __ Mov(result, Smi::FromInt(EQUAL));
2405  __ Ret();
2406
2407  // Compare characters. Falls through if all characters are equal.
2408  __ Bind(&compare_chars);
2409  GenerateOneByteCharsCompareLoop(masm, left, right, left_length, scratch2,
2410                                  scratch3, &strings_not_equal);
2411
2412  // Characters in strings are equal.
2413  __ Mov(result, Smi::FromInt(EQUAL));
2414  __ Ret();
2415}
2416
2417
2418void StringHelper::GenerateCompareFlatOneByteStrings(
2419    MacroAssembler* masm, Register left, Register right, Register scratch1,
2420    Register scratch2, Register scratch3, Register scratch4) {
2421  DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
2422  Label result_not_equal, compare_lengths;
2423
2424  // Find minimum length and length difference.
2425  Register length_delta = scratch3;
2426  __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
2427  __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
2428  __ Subs(length_delta, scratch1, scratch2);
2429
2430  Register min_length = scratch1;
2431  __ Csel(min_length, scratch2, scratch1, gt);
2432  __ Cbz(min_length, &compare_lengths);
2433
2434  // Compare loop.
2435  GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
2436                                  scratch4, &result_not_equal);
2437
2438  // Compare lengths - strings up to min-length are equal.
2439  __ Bind(&compare_lengths);
2440
2441  DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
2442
2443  // Use length_delta as result if it's zero.
2444  Register result = x0;
2445  __ Subs(result, length_delta, 0);
2446
2447  __ Bind(&result_not_equal);
2448  Register greater = x10;
2449  Register less = x11;
2450  __ Mov(greater, Smi::FromInt(GREATER));
2451  __ Mov(less, Smi::FromInt(LESS));
2452  __ CmovX(result, greater, gt);
2453  __ CmovX(result, less, lt);
2454  __ Ret();
2455}
2456
2457
2458void StringHelper::GenerateOneByteCharsCompareLoop(
2459    MacroAssembler* masm, Register left, Register right, Register length,
2460    Register scratch1, Register scratch2, Label* chars_not_equal) {
2461  DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
2462
2463  // Change index to run from -length to -1 by adding length to string
2464  // start. This means that loop ends when index reaches zero, which
2465  // doesn't need an additional compare.
2466  __ SmiUntag(length);
2467  __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
2468  __ Add(left, left, scratch1);
2469  __ Add(right, right, scratch1);
2470
2471  Register index = length;
2472  __ Neg(index, length);  // index = -length;
2473
2474  // Compare loop
2475  Label loop;
2476  __ Bind(&loop);
2477  __ Ldrb(scratch1, MemOperand(left, index));
2478  __ Ldrb(scratch2, MemOperand(right, index));
2479  __ Cmp(scratch1, scratch2);
2480  __ B(ne, chars_not_equal);
2481  __ Add(index, index, 1);
2482  __ Cbnz(index, &loop);
2483}
2484
2485
2486void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
2487  // ----------- S t a t e -------------
2488  //  -- x1    : left
2489  //  -- x0    : right
2490  //  -- lr    : return address
2491  // -----------------------------------
2492
2493  // Load x2 with the allocation site.  We stick an undefined dummy value here
2494  // and replace it with the real allocation site later when we instantiate this
2495  // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
2496  __ LoadObject(x2, handle(isolate()->heap()->undefined_value()));
2497
2498  // Make sure that we actually patched the allocation site.
2499  if (FLAG_debug_code) {
2500    __ AssertNotSmi(x2, kExpectedAllocationSite);
2501    __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
2502    __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
2503                            kExpectedAllocationSite);
2504  }
2505
2506  // Tail call into the stub that handles binary operations with allocation
2507  // sites.
2508  BinaryOpWithAllocationSiteStub stub(isolate(), state());
2509  __ TailCallStub(&stub);
2510}
2511
2512
2513void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
2514  // We need some extra registers for this stub, they have been allocated
2515  // but we need to save them before using them.
2516  regs_.Save(masm);
2517
2518  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
2519    Label dont_need_remembered_set;
2520
2521    Register val = regs_.scratch0();
2522    __ Ldr(val, MemOperand(regs_.address()));
2523    __ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
2524
2525    __ JumpIfInNewSpace(regs_.object(), &dont_need_remembered_set);
2526
2527    // First notify the incremental marker if necessary, then update the
2528    // remembered set.
2529    CheckNeedsToInformIncrementalMarker(
2530        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
2531    InformIncrementalMarker(masm);
2532    regs_.Restore(masm);  // Restore the extra scratch registers we used.
2533
2534    __ RememberedSetHelper(object(), address(),
2535                           value(),  // scratch1
2536                           save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
2537
2538    __ Bind(&dont_need_remembered_set);
2539  }
2540
2541  CheckNeedsToInformIncrementalMarker(
2542      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
2543  InformIncrementalMarker(masm);
2544  regs_.Restore(masm);  // Restore the extra scratch registers we used.
2545  __ Ret();
2546}
2547
2548
2549void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
2550  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
2551  Register address =
2552    x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
2553  DCHECK(!address.Is(regs_.object()));
2554  DCHECK(!address.Is(x0));
2555  __ Mov(address, regs_.address());
2556  __ Mov(x0, regs_.object());
2557  __ Mov(x1, address);
2558  __ Mov(x2, ExternalReference::isolate_address(isolate()));
2559
2560  AllowExternalCallThatCantCauseGC scope(masm);
2561  ExternalReference function =
2562      ExternalReference::incremental_marking_record_write_function(
2563          isolate());
2564  __ CallCFunction(function, 3, 0);
2565
2566  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
2567}
2568
2569
2570void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
2571    MacroAssembler* masm,
2572    OnNoNeedToInformIncrementalMarker on_no_need,
2573    Mode mode) {
2574  Label on_black;
2575  Label need_incremental;
2576  Label need_incremental_pop_scratch;
2577
2578  // If the object is not black we don't have to inform the incremental marker.
2579  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
2580
2581  regs_.Restore(masm);  // Restore the extra scratch registers we used.
2582  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
2583    __ RememberedSetHelper(object(), address(),
2584                           value(),  // scratch1
2585                           save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
2586  } else {
2587    __ Ret();
2588  }
2589
2590  __ Bind(&on_black);
2591  // Get the value from the slot.
2592  Register val = regs_.scratch0();
2593  __ Ldr(val, MemOperand(regs_.address()));
2594
2595  if (mode == INCREMENTAL_COMPACTION) {
2596    Label ensure_not_white;
2597
2598    __ CheckPageFlagClear(val, regs_.scratch1(),
2599                          MemoryChunk::kEvacuationCandidateMask,
2600                          &ensure_not_white);
2601
2602    __ CheckPageFlagClear(regs_.object(),
2603                          regs_.scratch1(),
2604                          MemoryChunk::kSkipEvacuationSlotsRecordingMask,
2605                          &need_incremental);
2606
2607    __ Bind(&ensure_not_white);
2608  }
2609
2610  // We need extra registers for this, so we push the object and the address
2611  // register temporarily.
2612  __ Push(regs_.address(), regs_.object());
2613  __ JumpIfWhite(val,
2614                 regs_.scratch1(),  // Scratch.
2615                 regs_.object(),    // Scratch.
2616                 regs_.address(),   // Scratch.
2617                 regs_.scratch2(),  // Scratch.
2618                 &need_incremental_pop_scratch);
2619  __ Pop(regs_.object(), regs_.address());
2620
2621  regs_.Restore(masm);  // Restore the extra scratch registers we used.
2622  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
2623    __ RememberedSetHelper(object(), address(),
2624                           value(),  // scratch1
2625                           save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
2626  } else {
2627    __ Ret();
2628  }
2629
2630  __ Bind(&need_incremental_pop_scratch);
2631  __ Pop(regs_.object(), regs_.address());
2632
2633  __ Bind(&need_incremental);
2634  // Fall through when we need to inform the incremental marker.
2635}
2636
2637
2638void RecordWriteStub::Generate(MacroAssembler* masm) {
2639  Label skip_to_incremental_noncompacting;
2640  Label skip_to_incremental_compacting;
2641
2642  // We patch these two first instructions back and forth between a nop and
2643  // real branch when we start and stop incremental heap marking.
2644  // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
2645  // are generated.
2646  // See RecordWriteStub::Patch for details.
2647  {
2648    InstructionAccurateScope scope(masm, 2);
2649    __ adr(xzr, &skip_to_incremental_noncompacting);
2650    __ adr(xzr, &skip_to_incremental_compacting);
2651  }
2652
2653  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
2654    __ RememberedSetHelper(object(), address(),
2655                           value(),  // scratch1
2656                           save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
2657  }
2658  __ Ret();
2659
2660  __ Bind(&skip_to_incremental_noncompacting);
2661  GenerateIncremental(masm, INCREMENTAL);
2662
2663  __ Bind(&skip_to_incremental_compacting);
2664  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
2665}
2666
2667
2668void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
2669  CEntryStub ces(isolate(), 1, kSaveFPRegs);
2670  __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
2671  int parameter_count_offset =
2672      StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
2673  __ Ldr(x1, MemOperand(fp, parameter_count_offset));
2674  if (function_mode() == JS_FUNCTION_STUB_MODE) {
2675    __ Add(x1, x1, 1);
2676  }
2677  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
2678  __ Drop(x1);
2679  // Return to IC Miss stub, continuation still on stack.
2680  __ Ret();
2681}
2682
2683// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
2684// a "Push lr" instruction, followed by a call.
2685static const unsigned int kProfileEntryHookCallSize =
2686    Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
2687
2688
2689void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
2690  if (masm->isolate()->function_entry_hook() != NULL) {
2691    ProfileEntryHookStub stub(masm->isolate());
2692    Assembler::BlockConstPoolScope no_const_pools(masm);
2693    DontEmitDebugCodeScope no_debug_code(masm);
2694    Label entry_hook_call_start;
2695    __ Bind(&entry_hook_call_start);
2696    __ Push(lr);
2697    __ CallStub(&stub);
2698    DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
2699           kProfileEntryHookCallSize);
2700
2701    __ Pop(lr);
2702  }
2703}
2704
2705
2706void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
2707  MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
2708
2709  // Save all kCallerSaved registers (including lr), since this can be called
2710  // from anywhere.
2711  // TODO(jbramley): What about FP registers?
2712  __ PushCPURegList(kCallerSaved);
2713  DCHECK(kCallerSaved.IncludesAliasOf(lr));
2714  const int kNumSavedRegs = kCallerSaved.Count();
2715
2716  // Compute the function's address as the first argument.
2717  __ Sub(x0, lr, kProfileEntryHookCallSize);
2718
2719#if V8_HOST_ARCH_ARM64
2720  uintptr_t entry_hook =
2721      reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
2722  __ Mov(x10, entry_hook);
2723#else
2724  // Under the simulator we need to indirect the entry hook through a trampoline
2725  // function at a known address.
2726  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
2727  __ Mov(x10, Operand(ExternalReference(&dispatcher,
2728                                        ExternalReference::BUILTIN_CALL,
2729                                        isolate())));
2730  // It additionally takes an isolate as a third parameter
2731  __ Mov(x2, ExternalReference::isolate_address(isolate()));
2732#endif
2733
2734  // The caller's return address is above the saved temporaries.
2735  // Grab its location for the second argument to the hook.
2736  __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
2737
2738  {
2739    // Create a dummy frame, as CallCFunction requires this.
2740    FrameScope frame(masm, StackFrame::MANUAL);
2741    __ CallCFunction(x10, 2, 0);
2742  }
2743
2744  __ PopCPURegList(kCallerSaved);
2745  __ Ret();
2746}
2747
2748
2749void DirectCEntryStub::Generate(MacroAssembler* masm) {
2750  // When calling into C++ code the stack pointer must be csp.
2751  // Therefore this code must use csp for peek/poke operations when the
2752  // stub is generated. When the stub is called
2753  // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
2754  // and configure the stack pointer *before* doing the call.
2755  const Register old_stack_pointer = __ StackPointer();
2756  __ SetStackPointer(csp);
2757
2758  // Put return address on the stack (accessible to GC through exit frame pc).
2759  __ Poke(lr, 0);
2760  // Call the C++ function.
2761  __ Blr(x10);
2762  // Return to calling code.
2763  __ Peek(lr, 0);
2764  __ AssertFPCRState();
2765  __ Ret();
2766
2767  __ SetStackPointer(old_stack_pointer);
2768}
2769
2770void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
2771                                    Register target) {
2772  // Make sure the caller configured the stack pointer (see comment in
2773  // DirectCEntryStub::Generate).
2774  DCHECK(csp.Is(__ StackPointer()));
2775
2776  intptr_t code =
2777      reinterpret_cast<intptr_t>(GetCode().location());
2778  __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
2779  __ Mov(x10, target);
2780  // Branch to the stub.
2781  __ Blr(lr);
2782}
2783
2784void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
2785                                                      Label* miss,
2786                                                      Label* done,
2787                                                      Register receiver,
2788                                                      Register properties,
2789                                                      Handle<Name> name,
2790                                                      Register scratch0) {
2791  DCHECK(!AreAliased(receiver, properties, scratch0));
2792  DCHECK(name->IsUniqueName());
2793  // If names of slots in range from 1 to kProbes - 1 for the hash value are
2794  // not equal to the name and kProbes-th slot is not used (its name is the
2795  // undefined value), it guarantees the hash table doesn't contain the
2796  // property. It's true even if some slots represent deleted properties
2797  // (their names are the hole value).
2798  for (int i = 0; i < kInlinedProbes; i++) {
2799    // scratch0 points to properties hash.
2800    // Compute the masked index: (hash + i + i * i) & mask.
2801    Register index = scratch0;
2802    // Capacity is smi 2^n.
2803    __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
2804    __ Sub(index, index, 1);
2805    __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
2806
2807    // Scale the index by multiplying by the entry size.
2808    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2809    __ Add(index, index, Operand(index, LSL, 1));  // index *= 3.
2810
2811    Register entity_name = scratch0;
2812    // Having undefined at this place means the name is not contained.
2813    Register tmp = index;
2814    __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
2815    __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
2816
2817    __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
2818
2819    // Stop if found the property.
2820    __ Cmp(entity_name, Operand(name));
2821    __ B(eq, miss);
2822
2823    Label good;
2824    __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
2825
2826    // Check if the entry name is not a unique name.
2827    __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
2828    __ Ldrb(entity_name,
2829            FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
2830    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
2831    __ Bind(&good);
2832  }
2833
2834  CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
2835  spill_list.Combine(lr);
2836  spill_list.Remove(scratch0);  // Scratch registers don't need to be preserved.
2837
2838  __ PushCPURegList(spill_list);
2839
2840  __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2841  __ Mov(x1, Operand(name));
2842  NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
2843  __ CallStub(&stub);
2844  // Move stub return value to scratch0. Note that scratch0 is not included in
2845  // spill_list and won't be clobbered by PopCPURegList.
2846  __ Mov(scratch0, x0);
2847  __ PopCPURegList(spill_list);
2848
2849  __ Cbz(scratch0, done);
2850  __ B(miss);
2851}
2852
2853
2854void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
2855  // This stub overrides SometimesSetsUpAFrame() to return false. That means
2856  // we cannot call anything that could cause a GC from this stub.
2857  //
2858  // Arguments are in x0 and x1:
2859  //   x0: property dictionary.
2860  //   x1: the name of the property we are looking for.
2861  //
2862  // Return value is in x0 and is zero if lookup failed, non zero otherwise.
2863  // If the lookup is successful, x2 will contains the index of the entry.
2864
2865  Register result = x0;
2866  Register dictionary = x0;
2867  Register key = x1;
2868  Register index = x2;
2869  Register mask = x3;
2870  Register hash = x4;
2871  Register undefined = x5;
2872  Register entry_key = x6;
2873
2874  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
2875
2876  __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
2877  __ Sub(mask, mask, 1);
2878
2879  __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
2880  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
2881
2882  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
2883    // Compute the masked index: (hash + i + i * i) & mask.
2884    // Capacity is smi 2^n.
2885    if (i > 0) {
2886      // Add the probe offset (i + i * i) left shifted to avoid right shifting
2887      // the hash in a separate instruction. The value hash + i + i * i is right
2888      // shifted in the following and instruction.
2889      DCHECK(NameDictionary::GetProbeOffset(i) <
2890             1 << (32 - Name::kHashFieldOffset));
2891      __ Add(index, hash,
2892             NameDictionary::GetProbeOffset(i) << Name::kHashShift);
2893    } else {
2894      __ Mov(index, hash);
2895    }
2896    __ And(index, mask, Operand(index, LSR, Name::kHashShift));
2897
2898    // Scale the index by multiplying by the entry size.
2899    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2900    __ Add(index, index, Operand(index, LSL, 1));  // index *= 3.
2901
2902    __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
2903    __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
2904
2905    // Having undefined at this place means the name is not contained.
2906    __ Cmp(entry_key, undefined);
2907    __ B(eq, &not_in_dictionary);
2908
2909    // Stop if found the property.
2910    __ Cmp(entry_key, key);
2911    __ B(eq, &in_dictionary);
2912
2913    if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
2914      // Check if the entry name is not a unique name.
2915      __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
2916      __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
2917      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
2918    }
2919  }
2920
2921  __ Bind(&maybe_in_dictionary);
2922  // If we are doing negative lookup then probing failure should be
2923  // treated as a lookup success. For positive lookup, probing failure
2924  // should be treated as lookup failure.
2925  if (mode() == POSITIVE_LOOKUP) {
2926    __ Mov(result, 0);
2927    __ Ret();
2928  }
2929
2930  __ Bind(&in_dictionary);
2931  __ Mov(result, 1);
2932  __ Ret();
2933
2934  __ Bind(&not_in_dictionary);
2935  __ Mov(result, 0);
2936  __ Ret();
2937}
2938
2939
2940template<class T>
2941static void CreateArrayDispatch(MacroAssembler* masm,
2942                                AllocationSiteOverrideMode mode) {
2943  ASM_LOCATION("CreateArrayDispatch");
2944  if (mode == DISABLE_ALLOCATION_SITES) {
2945    T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
2946     __ TailCallStub(&stub);
2947
2948  } else if (mode == DONT_OVERRIDE) {
2949    Register kind = x3;
2950    int last_index =
2951        GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
2952    for (int i = 0; i <= last_index; ++i) {
2953      Label next;
2954      ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
2955      // TODO(jbramley): Is this the best way to handle this? Can we make the
2956      // tail calls conditional, rather than hopping over each one?
2957      __ CompareAndBranch(kind, candidate_kind, ne, &next);
2958      T stub(masm->isolate(), candidate_kind);
2959      __ TailCallStub(&stub);
2960      __ Bind(&next);
2961    }
2962
2963    // If we reached this point there is a problem.
2964    __ Abort(kUnexpectedElementsKindInArrayConstructor);
2965
2966  } else {
2967    UNREACHABLE();
2968  }
2969}
2970
2971
2972// TODO(jbramley): If this needs to be a special case, make it a proper template
2973// specialization, and not a separate function.
2974static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
2975                                           AllocationSiteOverrideMode mode) {
2976  ASM_LOCATION("CreateArrayDispatchOneArgument");
2977  // x0 - argc
2978  // x1 - constructor?
2979  // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
2980  // x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
2981  // sp[0] - last argument
2982
2983  Register allocation_site = x2;
2984  Register kind = x3;
2985
2986  Label normal_sequence;
2987  if (mode == DONT_OVERRIDE) {
2988    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2989    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2990    STATIC_ASSERT(FAST_ELEMENTS == 2);
2991    STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2992    STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
2993    STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
2994
2995    // Is the low bit set? If so, the array is holey.
2996    __ Tbnz(kind, 0, &normal_sequence);
2997  }
2998
2999  // Look at the last argument.
3000  // TODO(jbramley): What does a 0 argument represent?
3001  __ Peek(x10, 0);
3002  __ Cbz(x10, &normal_sequence);
3003
3004  if (mode == DISABLE_ALLOCATION_SITES) {
3005    ElementsKind initial = GetInitialFastElementsKind();
3006    ElementsKind holey_initial = GetHoleyElementsKind(initial);
3007
3008    ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
3009                                                  holey_initial,
3010                                                  DISABLE_ALLOCATION_SITES);
3011    __ TailCallStub(&stub_holey);
3012
3013    __ Bind(&normal_sequence);
3014    ArraySingleArgumentConstructorStub stub(masm->isolate(),
3015                                            initial,
3016                                            DISABLE_ALLOCATION_SITES);
3017    __ TailCallStub(&stub);
3018  } else if (mode == DONT_OVERRIDE) {
3019    // We are going to create a holey array, but our kind is non-holey.
3020    // Fix kind and retry (only if we have an allocation site in the slot).
3021    __ Orr(kind, kind, 1);
3022
3023    if (FLAG_debug_code) {
3024      __ Ldr(x10, FieldMemOperand(allocation_site, 0));
3025      __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
3026                       &normal_sequence);
3027      __ Assert(eq, kExpectedAllocationSite);
3028    }
3029
3030    // Save the resulting elements kind in type info. We can't just store 'kind'
3031    // in the AllocationSite::transition_info field because elements kind is
3032    // restricted to a portion of the field; upper bits need to be left alone.
3033    STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
3034    __ Ldr(x11, FieldMemOperand(allocation_site,
3035                                AllocationSite::kTransitionInfoOffset));
3036    __ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
3037    __ Str(x11, FieldMemOperand(allocation_site,
3038                                AllocationSite::kTransitionInfoOffset));
3039
3040    __ Bind(&normal_sequence);
3041    int last_index =
3042        GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
3043    for (int i = 0; i <= last_index; ++i) {
3044      Label next;
3045      ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
3046      __ CompareAndBranch(kind, candidate_kind, ne, &next);
3047      ArraySingleArgumentConstructorStub stub(masm->isolate(), candidate_kind);
3048      __ TailCallStub(&stub);
3049      __ Bind(&next);
3050    }
3051
3052    // If we reached this point there is a problem.
3053    __ Abort(kUnexpectedElementsKindInArrayConstructor);
3054  } else {
3055    UNREACHABLE();
3056  }
3057}
3058
3059
3060template<class T>
3061static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
3062  int to_index = GetSequenceIndexFromFastElementsKind(
3063      TERMINAL_FAST_ELEMENTS_KIND);
3064  for (int i = 0; i <= to_index; ++i) {
3065    ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3066    T stub(isolate, kind);
3067    stub.GetCode();
3068    if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
3069      T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
3070      stub1.GetCode();
3071    }
3072  }
3073}
3074
3075void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
3076  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
3077      isolate);
3078  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
3079      isolate);
3080  ArrayNArgumentsConstructorStub stub(isolate);
3081  stub.GetCode();
3082  ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
3083  for (int i = 0; i < 2; i++) {
3084    // For internal arrays we only need a few things
3085    InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
3086    stubh1.GetCode();
3087    InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
3088    stubh2.GetCode();
3089  }
3090}
3091
3092
3093void ArrayConstructorStub::GenerateDispatchToArrayStub(
3094    MacroAssembler* masm,
3095    AllocationSiteOverrideMode mode) {
3096  Register argc = x0;
3097  Label zero_case, n_case;
3098  __ Cbz(argc, &zero_case);
3099  __ Cmp(argc, 1);
3100  __ B(ne, &n_case);
3101
3102  // One argument.
3103  CreateArrayDispatchOneArgument(masm, mode);
3104
3105  __ Bind(&zero_case);
3106  // No arguments.
3107  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
3108
3109  __ Bind(&n_case);
3110  // N arguments.
3111  ArrayNArgumentsConstructorStub stub(masm->isolate());
3112  __ TailCallStub(&stub);
3113}
3114
3115
3116void ArrayConstructorStub::Generate(MacroAssembler* masm) {
3117  ASM_LOCATION("ArrayConstructorStub::Generate");
3118  // ----------- S t a t e -------------
3119  //  -- x0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
3120  //  -- x1 : constructor
3121  //  -- x2 : AllocationSite or undefined
3122  //  -- x3 : new target
3123  //  -- sp[0] : last argument
3124  // -----------------------------------
3125  Register constructor = x1;
3126  Register allocation_site = x2;
3127  Register new_target = x3;
3128
3129  if (FLAG_debug_code) {
3130    // The array construct code is only set for the global and natives
3131    // builtin Array functions which always have maps.
3132
3133    Label unexpected_map, map_ok;
3134    // Initial map for the builtin Array function should be a map.
3135    __ Ldr(x10, FieldMemOperand(constructor,
3136                                JSFunction::kPrototypeOrInitialMapOffset));
3137    // Will both indicate a NULL and a Smi.
3138    __ JumpIfSmi(x10, &unexpected_map);
3139    __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
3140    __ Bind(&unexpected_map);
3141    __ Abort(kUnexpectedInitialMapForArrayFunction);
3142    __ Bind(&map_ok);
3143
3144    // We should either have undefined in the allocation_site register or a
3145    // valid AllocationSite.
3146    __ AssertUndefinedOrAllocationSite(allocation_site, x10);
3147  }
3148
3149  // Enter the context of the Array function.
3150  __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
3151
3152  Label subclassing;
3153  __ Cmp(new_target, constructor);
3154  __ B(ne, &subclassing);
3155
3156  Register kind = x3;
3157  Label no_info;
3158  // Get the elements kind and case on that.
3159  __ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
3160
3161  __ Ldrsw(kind,
3162           UntagSmiFieldMemOperand(allocation_site,
3163                                   AllocationSite::kTransitionInfoOffset));
3164  __ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
3165  GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
3166
3167  __ Bind(&no_info);
3168  GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
3169
3170  // Subclassing support.
3171  __ Bind(&subclassing);
3172  __ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2));
3173  __ Add(x0, x0, Operand(3));
3174  __ Push(new_target, allocation_site);
3175  __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
3176}
3177
3178
3179void InternalArrayConstructorStub::GenerateCase(
3180    MacroAssembler* masm, ElementsKind kind) {
3181  Label zero_case, n_case;
3182  Register argc = x0;
3183
3184  __ Cbz(argc, &zero_case);
3185  __ CompareAndBranch(argc, 1, ne, &n_case);
3186
3187  // One argument.
3188  if (IsFastPackedElementsKind(kind)) {
3189    Label packed_case;
3190
3191    // We might need to create a holey array; look at the first argument.
3192    __ Peek(x10, 0);
3193    __ Cbz(x10, &packed_case);
3194
3195    InternalArraySingleArgumentConstructorStub
3196        stub1_holey(isolate(), GetHoleyElementsKind(kind));
3197    __ TailCallStub(&stub1_holey);
3198
3199    __ Bind(&packed_case);
3200  }
3201  InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
3202  __ TailCallStub(&stub1);
3203
3204  __ Bind(&zero_case);
3205  // No arguments.
3206  InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
3207  __ TailCallStub(&stub0);
3208
3209  __ Bind(&n_case);
3210  // N arguments.
3211  ArrayNArgumentsConstructorStub stubN(isolate());
3212  __ TailCallStub(&stubN);
3213}
3214
3215
3216void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
3217  // ----------- S t a t e -------------
3218  //  -- x0 : argc
3219  //  -- x1 : constructor
3220  //  -- sp[0] : return address
3221  //  -- sp[4] : last argument
3222  // -----------------------------------
3223
3224  Register constructor = x1;
3225
3226  if (FLAG_debug_code) {
3227    // The array construct code is only set for the global and natives
3228    // builtin Array functions which always have maps.
3229
3230    Label unexpected_map, map_ok;
3231    // Initial map for the builtin Array function should be a map.
3232    __ Ldr(x10, FieldMemOperand(constructor,
3233                                JSFunction::kPrototypeOrInitialMapOffset));
3234    // Will both indicate a NULL and a Smi.
3235    __ JumpIfSmi(x10, &unexpected_map);
3236    __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
3237    __ Bind(&unexpected_map);
3238    __ Abort(kUnexpectedInitialMapForArrayFunction);
3239    __ Bind(&map_ok);
3240  }
3241
3242  Register kind = w3;
3243  // Figure out the right elements kind
3244  __ Ldr(x10, FieldMemOperand(constructor,
3245                              JSFunction::kPrototypeOrInitialMapOffset));
3246
3247  // Retrieve elements_kind from map.
3248  __ LoadElementsKindFromMap(kind, x10);
3249
3250  if (FLAG_debug_code) {
3251    Label done;
3252    __ Cmp(x3, FAST_ELEMENTS);
3253    __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
3254    __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
3255  }
3256
3257  Label fast_elements_case;
3258  __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
3259  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
3260
3261  __ Bind(&fast_elements_case);
3262  GenerateCase(masm, FAST_ELEMENTS);
3263}
3264
3265// The number of register that CallApiFunctionAndReturn will need to save on
3266// the stack. The space for these registers need to be allocated in the
3267// ExitFrame before calling CallApiFunctionAndReturn.
3268static const int kCallApiFunctionSpillSpace = 4;
3269
3270
3271static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3272  return static_cast<int>(ref0.address() - ref1.address());
3273}
3274
3275
3276// Calls an API function. Allocates HandleScope, extracts returned value
3277// from handle and propagates exceptions.
3278// 'stack_space' is the space to be unwound on exit (includes the call JS
3279// arguments space and the additional space allocated for the fast call).
3280// 'spill_offset' is the offset from the stack pointer where
3281// CallApiFunctionAndReturn can spill registers.
3282static void CallApiFunctionAndReturn(
3283    MacroAssembler* masm, Register function_address,
3284    ExternalReference thunk_ref, int stack_space,
3285    MemOperand* stack_space_operand, int spill_offset,
3286    MemOperand return_value_operand, MemOperand* context_restore_operand) {
3287  ASM_LOCATION("CallApiFunctionAndReturn");
3288  Isolate* isolate = masm->isolate();
3289  ExternalReference next_address =
3290      ExternalReference::handle_scope_next_address(isolate);
3291  const int kNextOffset = 0;
3292  const int kLimitOffset = AddressOffset(
3293      ExternalReference::handle_scope_limit_address(isolate), next_address);
3294  const int kLevelOffset = AddressOffset(
3295      ExternalReference::handle_scope_level_address(isolate), next_address);
3296
3297  DCHECK(function_address.is(x1) || function_address.is(x2));
3298
3299  Label profiler_disabled;
3300  Label end_profiler_check;
3301  __ Mov(x10, ExternalReference::is_profiling_address(isolate));
3302  __ Ldrb(w10, MemOperand(x10));
3303  __ Cbz(w10, &profiler_disabled);
3304  __ Mov(x3, thunk_ref);
3305  __ B(&end_profiler_check);
3306
3307  __ Bind(&profiler_disabled);
3308  __ Mov(x3, function_address);
3309  __ Bind(&end_profiler_check);
3310
3311  // Save the callee-save registers we are going to use.
3312  // TODO(all): Is this necessary? ARM doesn't do it.
3313  STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
3314  __ Poke(x19, (spill_offset + 0) * kXRegSize);
3315  __ Poke(x20, (spill_offset + 1) * kXRegSize);
3316  __ Poke(x21, (spill_offset + 2) * kXRegSize);
3317  __ Poke(x22, (spill_offset + 3) * kXRegSize);
3318
3319  // Allocate HandleScope in callee-save registers.
3320  // We will need to restore the HandleScope after the call to the API function,
3321  // by allocating it in callee-save registers they will be preserved by C code.
3322  Register handle_scope_base = x22;
3323  Register next_address_reg = x19;
3324  Register limit_reg = x20;
3325  Register level_reg = w21;
3326
3327  __ Mov(handle_scope_base, next_address);
3328  __ Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
3329  __ Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
3330  __ Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
3331  __ Add(level_reg, level_reg, 1);
3332  __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
3333
3334  if (FLAG_log_timer_events) {
3335    FrameScope frame(masm, StackFrame::MANUAL);
3336    __ PushSafepointRegisters();
3337    __ Mov(x0, ExternalReference::isolate_address(isolate));
3338    __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
3339                     1);
3340    __ PopSafepointRegisters();
3341  }
3342
3343  // Native call returns to the DirectCEntry stub which redirects to the
3344  // return address pushed on stack (could have moved after GC).
3345  // DirectCEntry stub itself is generated early and never moves.
3346  DirectCEntryStub stub(isolate);
3347  stub.GenerateCall(masm, x3);
3348
3349  if (FLAG_log_timer_events) {
3350    FrameScope frame(masm, StackFrame::MANUAL);
3351    __ PushSafepointRegisters();
3352    __ Mov(x0, ExternalReference::isolate_address(isolate));
3353    __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
3354                     1);
3355    __ PopSafepointRegisters();
3356  }
3357
3358  Label promote_scheduled_exception;
3359  Label delete_allocated_handles;
3360  Label leave_exit_frame;
3361  Label return_value_loaded;
3362
3363  // Load value from ReturnValue.
3364  __ Ldr(x0, return_value_operand);
3365  __ Bind(&return_value_loaded);
3366  // No more valid handles (the result handle was the last one). Restore
3367  // previous handle scope.
3368  __ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
3369  if (__ emit_debug_code()) {
3370    __ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
3371    __ Cmp(w1, level_reg);
3372    __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
3373  }
3374  __ Sub(level_reg, level_reg, 1);
3375  __ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
3376  __ Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
3377  __ Cmp(limit_reg, x1);
3378  __ B(ne, &delete_allocated_handles);
3379
3380  // Leave the API exit frame.
3381  __ Bind(&leave_exit_frame);
3382  // Restore callee-saved registers.
3383  __ Peek(x19, (spill_offset + 0) * kXRegSize);
3384  __ Peek(x20, (spill_offset + 1) * kXRegSize);
3385  __ Peek(x21, (spill_offset + 2) * kXRegSize);
3386  __ Peek(x22, (spill_offset + 3) * kXRegSize);
3387
3388  bool restore_context = context_restore_operand != NULL;
3389  if (restore_context) {
3390    __ Ldr(cp, *context_restore_operand);
3391  }
3392
3393  if (stack_space_operand != NULL) {
3394    __ Ldr(w2, *stack_space_operand);
3395  }
3396
3397  __ LeaveExitFrame(false, x1, !restore_context);
3398
3399  // Check if the function scheduled an exception.
3400  __ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
3401  __ Ldr(x5, MemOperand(x5));
3402  __ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex,
3403                   &promote_scheduled_exception);
3404
3405  if (stack_space_operand != NULL) {
3406    __ Drop(x2, 1);
3407  } else {
3408    __ Drop(stack_space);
3409  }
3410  __ Ret();
3411
3412  // Re-throw by promoting a scheduled exception.
3413  __ Bind(&promote_scheduled_exception);
3414  __ TailCallRuntime(Runtime::kPromoteScheduledException);
3415
3416  // HandleScope limit has changed. Delete allocated extensions.
3417  __ Bind(&delete_allocated_handles);
3418  __ Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
3419  // Save the return value in a callee-save register.
3420  Register saved_result = x19;
3421  __ Mov(saved_result, x0);
3422  __ Mov(x0, ExternalReference::isolate_address(isolate));
3423  __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
3424                   1);
3425  __ Mov(x0, saved_result);
3426  __ B(&leave_exit_frame);
3427}
3428
3429void CallApiCallbackStub::Generate(MacroAssembler* masm) {
3430  // ----------- S t a t e -------------
3431  //  -- x0                  : callee
3432  //  -- x4                  : call_data
3433  //  -- x2                  : holder
3434  //  -- x1                  : api_function_address
3435  //  -- cp                  : context
3436  //  --
3437  //  -- sp[0]               : last argument
3438  //  -- ...
3439  //  -- sp[(argc - 1) * 8]  : first argument
3440  //  -- sp[argc * 8]        : receiver
3441  // -----------------------------------
3442
3443  Register callee = x0;
3444  Register call_data = x4;
3445  Register holder = x2;
3446  Register api_function_address = x1;
3447  Register context = cp;
3448
3449  typedef FunctionCallbackArguments FCA;
3450
3451  STATIC_ASSERT(FCA::kContextSaveIndex == 6);
3452  STATIC_ASSERT(FCA::kCalleeIndex == 5);
3453  STATIC_ASSERT(FCA::kDataIndex == 4);
3454  STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3455  STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3456  STATIC_ASSERT(FCA::kIsolateIndex == 1);
3457  STATIC_ASSERT(FCA::kHolderIndex == 0);
3458  STATIC_ASSERT(FCA::kNewTargetIndex == 7);
3459  STATIC_ASSERT(FCA::kArgsLength == 8);
3460
3461  // FunctionCallbackArguments
3462
3463  // new target
3464  __ PushRoot(Heap::kUndefinedValueRootIndex);
3465
3466  // context, callee and call data.
3467  __ Push(context, callee, call_data);
3468
3469  if (!is_lazy()) {
3470    // Load context from callee
3471    __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
3472  }
3473
3474  if (!call_data_undefined()) {
3475    __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
3476  }
3477  Register isolate_reg = x5;
3478  __ Mov(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
3479
3480  // FunctionCallbackArguments:
3481  //    return value, return value default, isolate, holder.
3482  __ Push(call_data, call_data, isolate_reg, holder);
3483
3484  // Prepare arguments.
3485  Register args = x6;
3486  __ Mov(args, masm->StackPointer());
3487
3488  // Allocate the v8::Arguments structure in the arguments' space, since it's
3489  // not controlled by GC.
3490  const int kApiStackSpace = 3;
3491
3492  // Allocate space for CallApiFunctionAndReturn can store some scratch
3493  // registeres on the stack.
3494  const int kCallApiFunctionSpillSpace = 4;
3495
3496  FrameScope frame_scope(masm, StackFrame::MANUAL);
3497  __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
3498
3499  DCHECK(!AreAliased(x0, api_function_address));
3500  // x0 = FunctionCallbackInfo&
3501  // Arguments is after the return address.
3502  __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
3503  // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
3504  __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
3505  __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
3506  // FunctionCallbackInfo::length_ = argc
3507  __ Mov(x10, argc());
3508  __ Str(x10, MemOperand(x0, 2 * kPointerSize));
3509
3510  ExternalReference thunk_ref =
3511      ExternalReference::invoke_function_callback(masm->isolate());
3512
3513  AllowExternalCallThatCantCauseGC scope(masm);
3514  MemOperand context_restore_operand(
3515      fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
3516  // Stores return the first js argument
3517  int return_value_offset = 0;
3518  if (is_store()) {
3519    return_value_offset = 2 + FCA::kArgsLength;
3520  } else {
3521    return_value_offset = 2 + FCA::kReturnValueOffset;
3522  }
3523  MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
3524  int stack_space = 0;
3525  MemOperand length_operand =
3526      MemOperand(masm->StackPointer(), 3 * kPointerSize);
3527  MemOperand* stack_space_operand = &length_operand;
3528  stack_space = argc() + FCA::kArgsLength + 1;
3529  stack_space_operand = NULL;
3530
3531  const int spill_offset = 1 + kApiStackSpace;
3532  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
3533                           stack_space_operand, spill_offset,
3534                           return_value_operand, &context_restore_operand);
3535}
3536
3537
3538void CallApiGetterStub::Generate(MacroAssembler* masm) {
3539  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3540  // name below the exit frame to make GC aware of them.
3541  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3542  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3543  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3544  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3545  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3546  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3547  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3548  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3549
3550  Register receiver = ApiGetterDescriptor::ReceiverRegister();
3551  Register holder = ApiGetterDescriptor::HolderRegister();
3552  Register callback = ApiGetterDescriptor::CallbackRegister();
3553  Register scratch = x4;
3554  Register scratch2 = x5;
3555  Register scratch3 = x6;
3556  DCHECK(!AreAliased(receiver, holder, callback, scratch));
3557
3558  __ Push(receiver);
3559
3560  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3561  __ Mov(scratch2, Operand(ExternalReference::isolate_address(isolate())));
3562  __ Ldr(scratch3, FieldMemOperand(callback, AccessorInfo::kDataOffset));
3563  __ Push(scratch3, scratch, scratch, scratch2, holder);
3564  __ Push(Smi::kZero);  // should_throw_on_error -> false
3565  __ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3566  __ Push(scratch);
3567
3568  // v8::PropertyCallbackInfo::args_ array and name handle.
3569  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3570
3571  // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3572  __ Mov(x0, masm->StackPointer());  // x0 = Handle<Name>
3573  __ Add(x1, x0, 1 * kPointerSize);  // x1 = v8::PCI::args_
3574
3575  const int kApiStackSpace = 1;
3576
3577  // Allocate space for CallApiFunctionAndReturn can store some scratch
3578  // registeres on the stack.
3579  const int kCallApiFunctionSpillSpace = 4;
3580
3581  FrameScope frame_scope(masm, StackFrame::MANUAL);
3582  __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
3583
3584  // Create v8::PropertyCallbackInfo object on the stack and initialize
3585  // it's args_ field.
3586  __ Poke(x1, 1 * kPointerSize);
3587  __ Add(x1, masm->StackPointer(), 1 * kPointerSize);
3588  // x1 = v8::PropertyCallbackInfo&
3589
3590  ExternalReference thunk_ref =
3591      ExternalReference::invoke_accessor_getter_callback(isolate());
3592
3593  Register api_function_address = x2;
3594  __ Ldr(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3595  __ Ldr(api_function_address,
3596         FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3597
3598  const int spill_offset = 1 + kApiStackSpace;
3599  // +3 is to skip prolog, return address and name handle.
3600  MemOperand return_value_operand(
3601      fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
3602  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3603                           kStackUnwindSpace, NULL, spill_offset,
3604                           return_value_operand, NULL);
3605}
3606
3607#undef __
3608
3609}  // namespace internal
3610}  // namespace v8
3611
3612#endif  // V8_TARGET_ARCH_ARM64
3613