1// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_PPC
6
7#include "src/code-stubs.h"
8#include "src/api-arguments.h"
9#include "src/base/bits.h"
10#include "src/bootstrapper.h"
11#include "src/codegen.h"
12#include "src/ic/handler-compiler.h"
13#include "src/ic/ic.h"
14#include "src/ic/stub-cache.h"
15#include "src/isolate.h"
16#include "src/ppc/code-stubs-ppc.h"
17#include "src/regexp/jsregexp.h"
18#include "src/regexp/regexp-macro-assembler.h"
19#include "src/runtime/runtime.h"
20
21namespace v8 {
22namespace internal {
23
24#define __ ACCESS_MASM(masm)
25
26void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
27  __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
28  __ StorePX(r4, MemOperand(sp, r0));
29  __ push(r4);
30  __ push(r5);
31  __ addi(r3, r3, Operand(3));
32  __ TailCallRuntime(Runtime::kNewArray);
33}
34
35static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
36                                          Condition cond);
37static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
38                                    Register rhs, Label* lhs_not_nan,
39                                    Label* slow, bool strict);
40static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
41                                           Register rhs);
42
43
44void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
45                                               ExternalReference miss) {
46  // Update the static counter each time a new code stub is generated.
47  isolate()->counters()->code_stubs()->Increment();
48
49  CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
50  int param_count = descriptor.GetRegisterParameterCount();
51  {
52    // Call the runtime system in a fresh internal frame.
53    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
54    DCHECK(param_count == 0 ||
55           r3.is(descriptor.GetRegisterParameter(param_count - 1)));
56    // Push arguments
57    for (int i = 0; i < param_count; ++i) {
58      __ push(descriptor.GetRegisterParameter(i));
59    }
60    __ CallExternalReference(miss, param_count);
61  }
62
63  __ Ret();
64}
65
66
67void DoubleToIStub::Generate(MacroAssembler* masm) {
68  Label out_of_range, only_low, negate, done, fastpath_done;
69  Register input_reg = source();
70  Register result_reg = destination();
71  DCHECK(is_truncating());
72
73  int double_offset = offset();
74
75  // Immediate values for this stub fit in instructions, so it's safe to use ip.
76  Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
77  Register scratch_low =
78      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
79  Register scratch_high =
80      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
81  DoubleRegister double_scratch = kScratchDoubleReg;
82
83  __ push(scratch);
84  // Account for saved regs if input is sp.
85  if (input_reg.is(sp)) double_offset += kPointerSize;
86
87  if (!skip_fastpath()) {
88    // Load double input.
89    __ lfd(double_scratch, MemOperand(input_reg, double_offset));
90
91    // Do fast-path convert from double to int.
92    __ ConvertDoubleToInt64(double_scratch,
93#if !V8_TARGET_ARCH_PPC64
94                            scratch,
95#endif
96                            result_reg, d0);
97
98// Test for overflow
99#if V8_TARGET_ARCH_PPC64
100    __ TestIfInt32(result_reg, r0);
101#else
102    __ TestIfInt32(scratch, result_reg, r0);
103#endif
104    __ beq(&fastpath_done);
105  }
106
107  __ Push(scratch_high, scratch_low);
108  // Account for saved regs if input is sp.
109  if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
110
111  __ lwz(scratch_high,
112         MemOperand(input_reg, double_offset + Register::kExponentOffset));
113  __ lwz(scratch_low,
114         MemOperand(input_reg, double_offset + Register::kMantissaOffset));
115
116  __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
117  // Load scratch with exponent - 1. This is faster than loading
118  // with exponent because Bias + 1 = 1024 which is a *PPC* immediate value.
119  STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
120  __ subi(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
121  // If exponent is greater than or equal to 84, the 32 less significant
122  // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
123  // the result is 0.
124  // Compare exponent with 84 (compare exponent - 1 with 83).
125  __ cmpi(scratch, Operand(83));
126  __ bge(&out_of_range);
127
128  // If we reach this code, 31 <= exponent <= 83.
129  // So, we don't have to handle cases where 0 <= exponent <= 20 for
130  // which we would need to shift right the high part of the mantissa.
131  // Scratch contains exponent - 1.
132  // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
133  __ subfic(scratch, scratch, Operand(51));
134  __ cmpi(scratch, Operand::Zero());
135  __ ble(&only_low);
136  // 21 <= exponent <= 51, shift scratch_low and scratch_high
137  // to generate the result.
138  __ srw(scratch_low, scratch_low, scratch);
139  // Scratch contains: 52 - exponent.
140  // We needs: exponent - 20.
141  // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
142  __ subfic(scratch, scratch, Operand(32));
143  __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
144  // Set the implicit 1 before the mantissa part in scratch_high.
145  STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
146  __ oris(result_reg, result_reg,
147          Operand(1 << ((HeapNumber::kMantissaBitsInTopWord) - 16)));
148  __ slw(r0, result_reg, scratch);
149  __ orx(result_reg, scratch_low, r0);
150  __ b(&negate);
151
152  __ bind(&out_of_range);
153  __ mov(result_reg, Operand::Zero());
154  __ b(&done);
155
156  __ bind(&only_low);
157  // 52 <= exponent <= 83, shift only scratch_low.
158  // On entry, scratch contains: 52 - exponent.
159  __ neg(scratch, scratch);
160  __ slw(result_reg, scratch_low, scratch);
161
162  __ bind(&negate);
163  // If input was positive, scratch_high ASR 31 equals 0 and
164  // scratch_high LSR 31 equals zero.
165  // New result = (result eor 0) + 0 = result.
166  // If the input was negative, we have to negate the result.
167  // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
168  // New result = (result eor 0xffffffff) + 1 = 0 - result.
169  __ srawi(r0, scratch_high, 31);
170#if V8_TARGET_ARCH_PPC64
171  __ srdi(r0, r0, Operand(32));
172#endif
173  __ xor_(result_reg, result_reg, r0);
174  __ srwi(r0, scratch_high, Operand(31));
175  __ add(result_reg, result_reg, r0);
176
177  __ bind(&done);
178  __ Pop(scratch_high, scratch_low);
179
180  __ bind(&fastpath_done);
181  __ pop(scratch);
182
183  __ Ret();
184}
185
186
187// Handle the case where the lhs and rhs are the same object.
188// Equality is almost reflexive (everything but NaN), so this is a test
189// for "identity and not NaN".
190static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
191                                          Condition cond) {
192  Label not_identical;
193  Label heap_number, return_equal;
194  __ cmp(r3, r4);
195  __ bne(&not_identical);
196
197  // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
198  // so we do the second best thing - test it ourselves.
199  // They are both equal and they are not both Smis so both of them are not
200  // Smis.  If it's not a heap number, then return equal.
201  if (cond == lt || cond == gt) {
202    // Call runtime on identical JSObjects.
203    __ CompareObjectType(r3, r7, r7, FIRST_JS_RECEIVER_TYPE);
204    __ bge(slow);
205    // Call runtime on identical symbols since we need to throw a TypeError.
206    __ cmpi(r7, Operand(SYMBOL_TYPE));
207    __ beq(slow);
208  } else {
209    __ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE);
210    __ beq(&heap_number);
211    // Comparing JS objects with <=, >= is complicated.
212    if (cond != eq) {
213      __ cmpi(r7, Operand(FIRST_JS_RECEIVER_TYPE));
214      __ bge(slow);
215      // Call runtime on identical symbols since we need to throw a TypeError.
216      __ cmpi(r7, Operand(SYMBOL_TYPE));
217      __ beq(slow);
218      // Normally here we fall through to return_equal, but undefined is
219      // special: (undefined == undefined) == true, but
220      // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
221      if (cond == le || cond == ge) {
222        __ cmpi(r7, Operand(ODDBALL_TYPE));
223        __ bne(&return_equal);
224        __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
225        __ cmp(r3, r5);
226        __ bne(&return_equal);
227        if (cond == le) {
228          // undefined <= undefined should fail.
229          __ li(r3, Operand(GREATER));
230        } else {
231          // undefined >= undefined should fail.
232          __ li(r3, Operand(LESS));
233        }
234        __ Ret();
235      }
236    }
237  }
238
239  __ bind(&return_equal);
240  if (cond == lt) {
241    __ li(r3, Operand(GREATER));  // Things aren't less than themselves.
242  } else if (cond == gt) {
243    __ li(r3, Operand(LESS));  // Things aren't greater than themselves.
244  } else {
245    __ li(r3, Operand(EQUAL));  // Things are <=, >=, ==, === themselves.
246  }
247  __ Ret();
248
249  // For less and greater we don't have to check for NaN since the result of
250  // x < x is false regardless.  For the others here is some code to check
251  // for NaN.
252  if (cond != lt && cond != gt) {
253    __ bind(&heap_number);
254    // It is a heap number, so return non-equal if it's NaN and equal if it's
255    // not NaN.
256
257    // The representation of NaN values has all exponent bits (52..62) set,
258    // and not all mantissa bits (0..51) clear.
259    // Read top bits of double representation (second word of value).
260    __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset));
261    // Test that exponent bits are all set.
262    STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u);
263    __ ExtractBitMask(r6, r5, HeapNumber::kExponentMask);
264    __ cmpli(r6, Operand(0x7ff));
265    __ bne(&return_equal);
266
267    // Shift out flag and all exponent bits, retaining only mantissa.
268    __ slwi(r5, r5, Operand(HeapNumber::kNonMantissaBitsInTopWord));
269    // Or with all low-bits of mantissa.
270    __ lwz(r6, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
271    __ orx(r3, r6, r5);
272    __ cmpi(r3, Operand::Zero());
273    // For equal we already have the right value in r3:  Return zero (equal)
274    // if all bits in mantissa are zero (it's an Infinity) and non-zero if
275    // not (it's a NaN).  For <= and >= we need to load r0 with the failing
276    // value if it's a NaN.
277    if (cond != eq) {
278      if (CpuFeatures::IsSupported(ISELECT)) {
279        __ li(r4, Operand((cond == le) ? GREATER : LESS));
280        __ isel(eq, r3, r3, r4);
281      } else {
282        // All-zero means Infinity means equal.
283        __ Ret(eq);
284        if (cond == le) {
285          __ li(r3, Operand(GREATER));  // NaN <= NaN should fail.
286        } else {
287          __ li(r3, Operand(LESS));  // NaN >= NaN should fail.
288        }
289      }
290    }
291    __ Ret();
292  }
293  // No fall through here.
294
295  __ bind(&not_identical);
296}
297
298
299// See comment at call site.
300static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
301                                    Register rhs, Label* lhs_not_nan,
302                                    Label* slow, bool strict) {
303  DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
304
305  Label rhs_is_smi;
306  __ JumpIfSmi(rhs, &rhs_is_smi);
307
308  // Lhs is a Smi.  Check whether the rhs is a heap number.
309  __ CompareObjectType(rhs, r6, r7, HEAP_NUMBER_TYPE);
310  if (strict) {
311    // If rhs is not a number and lhs is a Smi then strict equality cannot
312    // succeed.  Return non-equal
313    // If rhs is r3 then there is already a non zero value in it.
314    if (!rhs.is(r3)) {
315      Label skip;
316      __ beq(&skip);
317      __ mov(r3, Operand(NOT_EQUAL));
318      __ Ret();
319      __ bind(&skip);
320    } else {
321      __ Ret(ne);
322    }
323  } else {
324    // Smi compared non-strictly with a non-Smi non-heap-number.  Call
325    // the runtime.
326    __ bne(slow);
327  }
328
329  // Lhs is a smi, rhs is a number.
330  // Convert lhs to a double in d7.
331  __ SmiToDouble(d7, lhs);
332  // Load the double from rhs, tagged HeapNumber r3, to d6.
333  __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
334
335  // We now have both loaded as doubles but we can skip the lhs nan check
336  // since it's a smi.
337  __ b(lhs_not_nan);
338
339  __ bind(&rhs_is_smi);
340  // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
341  __ CompareObjectType(lhs, r7, r7, HEAP_NUMBER_TYPE);
342  if (strict) {
343    // If lhs is not a number and rhs is a smi then strict equality cannot
344    // succeed.  Return non-equal.
345    // If lhs is r3 then there is already a non zero value in it.
346    if (!lhs.is(r3)) {
347      Label skip;
348      __ beq(&skip);
349      __ mov(r3, Operand(NOT_EQUAL));
350      __ Ret();
351      __ bind(&skip);
352    } else {
353      __ Ret(ne);
354    }
355  } else {
356    // Smi compared non-strictly with a non-smi non-heap-number.  Call
357    // the runtime.
358    __ bne(slow);
359  }
360
361  // Rhs is a smi, lhs is a heap number.
362  // Load the double from lhs, tagged HeapNumber r4, to d7.
363  __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
364  // Convert rhs to a double in d6.
365  __ SmiToDouble(d6, rhs);
366  // Fall through to both_loaded_as_doubles.
367}
368
369
370// See comment at call site.
371static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
372                                           Register rhs) {
373  DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
374
375  // If either operand is a JS object or an oddball value, then they are
376  // not equal since their pointers are different.
377  // There is no test for undetectability in strict equality.
378  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
379  Label first_non_object;
380  // Get the type of the first operand into r5 and compare it with
381  // FIRST_JS_RECEIVER_TYPE.
382  __ CompareObjectType(rhs, r5, r5, FIRST_JS_RECEIVER_TYPE);
383  __ blt(&first_non_object);
384
385  // Return non-zero (r3 is not zero)
386  Label return_not_equal;
387  __ bind(&return_not_equal);
388  __ Ret();
389
390  __ bind(&first_non_object);
391  // Check for oddballs: true, false, null, undefined.
392  __ cmpi(r5, Operand(ODDBALL_TYPE));
393  __ beq(&return_not_equal);
394
395  __ CompareObjectType(lhs, r6, r6, FIRST_JS_RECEIVER_TYPE);
396  __ bge(&return_not_equal);
397
398  // Check for oddballs: true, false, null, undefined.
399  __ cmpi(r6, Operand(ODDBALL_TYPE));
400  __ beq(&return_not_equal);
401
402  // Now that we have the types we might as well check for
403  // internalized-internalized.
404  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
405  __ orx(r5, r5, r6);
406  __ andi(r0, r5, Operand(kIsNotStringMask | kIsNotInternalizedMask));
407  __ beq(&return_not_equal, cr0);
408}
409
410
411// See comment at call site.
412static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs,
413                                       Register rhs,
414                                       Label* both_loaded_as_doubles,
415                                       Label* not_heap_numbers, Label* slow) {
416  DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
417
418  __ CompareObjectType(rhs, r6, r5, HEAP_NUMBER_TYPE);
419  __ bne(not_heap_numbers);
420  __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset));
421  __ cmp(r5, r6);
422  __ bne(slow);  // First was a heap number, second wasn't.  Go slow case.
423
424  // Both are heap numbers.  Load them up then jump to the code we have
425  // for that.
426  __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
427  __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
428
429  __ b(both_loaded_as_doubles);
430}
431
432// Fast negative check for internalized-to-internalized equality or receiver
433// equality. Also handles the undetectable receiver to null/undefined
434// comparison.
435static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
436                                                     Register lhs, Register rhs,
437                                                     Label* possible_strings,
438                                                     Label* runtime_call) {
439  DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
440
441  // r5 is object type of rhs.
442  Label object_test, return_equal, return_unequal, undetectable;
443  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
444  __ andi(r0, r5, Operand(kIsNotStringMask));
445  __ bne(&object_test, cr0);
446  __ andi(r0, r5, Operand(kIsNotInternalizedMask));
447  __ bne(possible_strings, cr0);
448  __ CompareObjectType(lhs, r6, r6, FIRST_NONSTRING_TYPE);
449  __ bge(runtime_call);
450  __ andi(r0, r6, Operand(kIsNotInternalizedMask));
451  __ bne(possible_strings, cr0);
452
453  // Both are internalized. We already checked they weren't the same pointer so
454  // they are not equal. Return non-equal by returning the non-zero object
455  // pointer in r3.
456  __ Ret();
457
458  __ bind(&object_test);
459  __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset));
460  __ LoadP(r6, FieldMemOperand(rhs, HeapObject::kMapOffset));
461  __ lbz(r7, FieldMemOperand(r5, Map::kBitFieldOffset));
462  __ lbz(r8, FieldMemOperand(r6, Map::kBitFieldOffset));
463  __ andi(r0, r7, Operand(1 << Map::kIsUndetectable));
464  __ bne(&undetectable, cr0);
465  __ andi(r0, r8, Operand(1 << Map::kIsUndetectable));
466  __ bne(&return_unequal, cr0);
467
468  __ CompareInstanceType(r5, r5, FIRST_JS_RECEIVER_TYPE);
469  __ blt(runtime_call);
470  __ CompareInstanceType(r6, r6, FIRST_JS_RECEIVER_TYPE);
471  __ blt(runtime_call);
472
473  __ bind(&return_unequal);
474  // Return non-equal by returning the non-zero object pointer in r3.
475  __ Ret();
476
477  __ bind(&undetectable);
478  __ andi(r0, r8, Operand(1 << Map::kIsUndetectable));
479  __ beq(&return_unequal, cr0);
480
481  // If both sides are JSReceivers, then the result is false according to
482  // the HTML specification, which says that only comparisons with null or
483  // undefined are affected by special casing for document.all.
484  __ CompareInstanceType(r5, r5, ODDBALL_TYPE);
485  __ beq(&return_equal);
486  __ CompareInstanceType(r6, r6, ODDBALL_TYPE);
487  __ bne(&return_unequal);
488
489  __ bind(&return_equal);
490  __ li(r3, Operand(EQUAL));
491  __ Ret();
492}
493
494
495static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
496                                         Register scratch,
497                                         CompareICState::State expected,
498                                         Label* fail) {
499  Label ok;
500  if (expected == CompareICState::SMI) {
501    __ JumpIfNotSmi(input, fail);
502  } else if (expected == CompareICState::NUMBER) {
503    __ JumpIfSmi(input, &ok);
504    __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
505                DONT_DO_SMI_CHECK);
506  }
507  // We could be strict about internalized/non-internalized here, but as long as
508  // hydrogen doesn't care, the stub doesn't have to care either.
509  __ bind(&ok);
510}
511
512
513// On entry r4 and r5 are the values to be compared.
514// On exit r3 is 0, positive or negative to indicate the result of
515// the comparison.
516void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
517  Register lhs = r4;
518  Register rhs = r3;
519  Condition cc = GetCondition();
520
521  Label miss;
522  CompareICStub_CheckInputType(masm, lhs, r5, left(), &miss);
523  CompareICStub_CheckInputType(masm, rhs, r6, right(), &miss);
524
525  Label slow;  // Call builtin.
526  Label not_smis, both_loaded_as_doubles, lhs_not_nan;
527
528  Label not_two_smis, smi_done;
529  __ orx(r5, r4, r3);
530  __ JumpIfNotSmi(r5, &not_two_smis);
531  __ SmiUntag(r4);
532  __ SmiUntag(r3);
533  __ sub(r3, r4, r3);
534  __ Ret();
535  __ bind(&not_two_smis);
536
537  // NOTICE! This code is only reached after a smi-fast-case check, so
538  // it is certain that at least one operand isn't a smi.
539
540  // Handle the case where the objects are identical.  Either returns the answer
541  // or goes to slow.  Only falls through if the objects were not identical.
542  EmitIdenticalObjectComparison(masm, &slow, cc);
543
544  // If either is a Smi (we know that not both are), then they can only
545  // be strictly equal if the other is a HeapNumber.
546  STATIC_ASSERT(kSmiTag == 0);
547  DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
548  __ and_(r5, lhs, rhs);
549  __ JumpIfNotSmi(r5, &not_smis);
550  // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
551  // 1) Return the answer.
552  // 2) Go to slow.
553  // 3) Fall through to both_loaded_as_doubles.
554  // 4) Jump to lhs_not_nan.
555  // In cases 3 and 4 we have found out we were dealing with a number-number
556  // comparison.  The double values of the numbers have been loaded
557  // into d7 and d6.
558  EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
559
560  __ bind(&both_loaded_as_doubles);
561  // The arguments have been converted to doubles and stored in d6 and d7
562  __ bind(&lhs_not_nan);
563  Label no_nan;
564  __ fcmpu(d7, d6);
565
566  Label nan, equal, less_than;
567  __ bunordered(&nan);
568  if (CpuFeatures::IsSupported(ISELECT)) {
569    DCHECK(EQUAL == 0);
570    __ li(r4, Operand(GREATER));
571    __ li(r5, Operand(LESS));
572    __ isel(eq, r3, r0, r4);
573    __ isel(lt, r3, r5, r3);
574    __ Ret();
575  } else {
576    __ beq(&equal);
577    __ blt(&less_than);
578    __ li(r3, Operand(GREATER));
579    __ Ret();
580    __ bind(&equal);
581    __ li(r3, Operand(EQUAL));
582    __ Ret();
583    __ bind(&less_than);
584    __ li(r3, Operand(LESS));
585    __ Ret();
586  }
587
588  __ bind(&nan);
589  // If one of the sides was a NaN then the v flag is set.  Load r3 with
590  // whatever it takes to make the comparison fail, since comparisons with NaN
591  // always fail.
592  if (cc == lt || cc == le) {
593    __ li(r3, Operand(GREATER));
594  } else {
595    __ li(r3, Operand(LESS));
596  }
597  __ Ret();
598
599  __ bind(&not_smis);
600  // At this point we know we are dealing with two different objects,
601  // and neither of them is a Smi.  The objects are in rhs_ and lhs_.
602  if (strict()) {
603    // This returns non-equal for some object types, or falls through if it
604    // was not lucky.
605    EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
606  }
607
608  Label check_for_internalized_strings;
609  Label flat_string_check;
610  // Check for heap-number-heap-number comparison.  Can jump to slow case,
611  // or load both doubles into r3, r4, r5, r6 and jump to the code that handles
612  // that case.  If the inputs are not doubles then jumps to
613  // check_for_internalized_strings.
614  // In this case r5 will contain the type of rhs_.  Never falls through.
615  EmitCheckForTwoHeapNumbers(masm, lhs, rhs, &both_loaded_as_doubles,
616                             &check_for_internalized_strings,
617                             &flat_string_check);
618
619  __ bind(&check_for_internalized_strings);
620  // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
621  // internalized strings.
622  if (cc == eq && !strict()) {
623    // Returns an answer for two internalized strings or two detectable objects.
624    // Otherwise jumps to string case or not both strings case.
625    // Assumes that r5 is the type of rhs_ on entry.
626    EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, &flat_string_check,
627                                             &slow);
628  }
629
630  // Check for both being sequential one-byte strings,
631  // and inline if that is the case.
632  __ bind(&flat_string_check);
633
634  __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r5, r6, &slow);
635
636  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r5,
637                      r6);
638  if (cc == eq) {
639    StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r5, r6);
640  } else {
641    StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r5, r6, r7);
642  }
643  // Never falls through to here.
644
645  __ bind(&slow);
646
647  if (cc == eq) {
648    {
649      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
650      __ Push(cp);
651      __ Call(strict() ? isolate()->builtins()->StrictEqual()
652                       : isolate()->builtins()->Equal(),
653              RelocInfo::CODE_TARGET);
654      __ Pop(cp);
655    }
656    // Turn true into 0 and false into some non-zero value.
657    STATIC_ASSERT(EQUAL == 0);
658    __ LoadRoot(r4, Heap::kTrueValueRootIndex);
659    __ sub(r3, r3, r4);
660    __ Ret();
661  } else {
662    __ Push(lhs, rhs);
663    int ncr;  // NaN compare result
664    if (cc == lt || cc == le) {
665      ncr = GREATER;
666    } else {
667      DCHECK(cc == gt || cc == ge);  // remaining cases
668      ncr = LESS;
669    }
670    __ LoadSmiLiteral(r3, Smi::FromInt(ncr));
671    __ push(r3);
672
673    // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
674    // tagged as a small integer.
675    __ TailCallRuntime(Runtime::kCompare);
676  }
677
678  __ bind(&miss);
679  GenerateMiss(masm);
680}
681
682
683void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
684  // We don't allow a GC during a store buffer overflow so there is no need to
685  // store the registers in any particular way, but we do have to store and
686  // restore them.
687  __ mflr(r0);
688  __ MultiPush(kJSCallerSaved | r0.bit());
689  if (save_doubles()) {
690    __ MultiPushDoubles(kCallerSavedDoubles);
691  }
692  const int argument_count = 1;
693  const int fp_argument_count = 0;
694  const Register scratch = r4;
695
696  AllowExternalCallThatCantCauseGC scope(masm);
697  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
698  __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
699  __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()),
700                   argument_count);
701  if (save_doubles()) {
702    __ MultiPopDoubles(kCallerSavedDoubles);
703  }
704  __ MultiPop(kJSCallerSaved | r0.bit());
705  __ mtlr(r0);
706  __ Ret();
707}
708
709
710void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
711  __ PushSafepointRegisters();
712  __ blr();
713}
714
715
716void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
717  __ PopSafepointRegisters();
718  __ blr();
719}
720
721
722void MathPowStub::Generate(MacroAssembler* masm) {
723  const Register exponent = MathPowTaggedDescriptor::exponent();
724  DCHECK(exponent.is(r5));
725  const DoubleRegister double_base = d1;
726  const DoubleRegister double_exponent = d2;
727  const DoubleRegister double_result = d3;
728  const DoubleRegister double_scratch = d0;
729  const Register scratch = r11;
730  const Register scratch2 = r10;
731
732  Label call_runtime, done, int_exponent;
733  if (exponent_type() == TAGGED) {
734    // Base is already in double_base.
735    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
736
737    __ lfd(double_exponent,
738           FieldMemOperand(exponent, HeapNumber::kValueOffset));
739  }
740
741  if (exponent_type() != INTEGER) {
742    // Detect integer exponents stored as double.
743    __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2,
744                             double_scratch);
745    __ beq(&int_exponent);
746
747    __ mflr(r0);
748    __ push(r0);
749    {
750      AllowExternalCallThatCantCauseGC scope(masm);
751      __ PrepareCallCFunction(0, 2, scratch);
752      __ MovToFloatParameters(double_base, double_exponent);
753      __ CallCFunction(
754          ExternalReference::power_double_double_function(isolate()), 0, 2);
755    }
756    __ pop(r0);
757    __ mtlr(r0);
758    __ MovFromFloatResult(double_result);
759    __ b(&done);
760  }
761
762  // Calculate power with integer exponent.
763  __ bind(&int_exponent);
764
765  // Get two copies of exponent in the registers scratch and exponent.
766  if (exponent_type() == INTEGER) {
767    __ mr(scratch, exponent);
768  } else {
769    // Exponent has previously been stored into scratch as untagged integer.
770    __ mr(exponent, scratch);
771  }
772  __ fmr(double_scratch, double_base);  // Back up base.
773  __ li(scratch2, Operand(1));
774  __ ConvertIntToDouble(scratch2, double_result);
775
776  // Get absolute value of exponent.
777  __ cmpi(scratch, Operand::Zero());
778  if (CpuFeatures::IsSupported(ISELECT)) {
779    __ neg(scratch2, scratch);
780    __ isel(lt, scratch, scratch2, scratch);
781  } else {
782    Label positive_exponent;
783    __ bge(&positive_exponent);
784    __ neg(scratch, scratch);
785    __ bind(&positive_exponent);
786  }
787
788  Label while_true, no_carry, loop_end;
789  __ bind(&while_true);
790  __ andi(scratch2, scratch, Operand(1));
791  __ beq(&no_carry, cr0);
792  __ fmul(double_result, double_result, double_scratch);
793  __ bind(&no_carry);
794  __ ShiftRightImm(scratch, scratch, Operand(1), SetRC);
795  __ beq(&loop_end, cr0);
796  __ fmul(double_scratch, double_scratch, double_scratch);
797  __ b(&while_true);
798  __ bind(&loop_end);
799
800  __ cmpi(exponent, Operand::Zero());
801  __ bge(&done);
802
803  __ li(scratch2, Operand(1));
804  __ ConvertIntToDouble(scratch2, double_scratch);
805  __ fdiv(double_result, double_scratch, double_result);
806  // Test whether result is zero.  Bail out to check for subnormal result.
807  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
808  __ fcmpu(double_result, kDoubleRegZero);
809  __ bne(&done);
810  // double_exponent may not containe the exponent value if the input was a
811  // smi.  We set it with exponent value before bailing out.
812  __ ConvertIntToDouble(exponent, double_exponent);
813
814  // Returning or bailing out.
815  __ mflr(r0);
816  __ push(r0);
817  {
818    AllowExternalCallThatCantCauseGC scope(masm);
819    __ PrepareCallCFunction(0, 2, scratch);
820    __ MovToFloatParameters(double_base, double_exponent);
821    __ CallCFunction(
822        ExternalReference::power_double_double_function(isolate()), 0, 2);
823  }
824  __ pop(r0);
825  __ mtlr(r0);
826  __ MovFromFloatResult(double_result);
827
828  __ bind(&done);
829  __ Ret();
830}
831
832
833bool CEntryStub::NeedsImmovableCode() { return true; }
834
835
836void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
837  CEntryStub::GenerateAheadOfTime(isolate);
838  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
839  StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
840  CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
841  CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
842  CreateWeakCellStub::GenerateAheadOfTime(isolate);
843  BinaryOpICStub::GenerateAheadOfTime(isolate);
844  StoreRegistersStateStub::GenerateAheadOfTime(isolate);
845  RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
846  BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
847  StoreFastElementStub::GenerateAheadOfTime(isolate);
848}
849
850
851void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
852  StoreRegistersStateStub stub(isolate);
853  stub.GetCode();
854}
855
856
857void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
858  RestoreRegistersStateStub stub(isolate);
859  stub.GetCode();
860}
861
862
863void CodeStub::GenerateFPStubs(Isolate* isolate) {
864  // Generate if not already in cache.
865  SaveFPRegsMode mode = kSaveFPRegs;
866  CEntryStub(isolate, 1, mode).GetCode();
867  StoreBufferOverflowStub(isolate, mode).GetCode();
868}
869
870
871void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
872  CEntryStub stub(isolate, 1, kDontSaveFPRegs);
873  stub.GetCode();
874}
875
876
877void CEntryStub::Generate(MacroAssembler* masm) {
878  // Called from JavaScript; parameters are on stack as if calling JS function.
879  // r3: number of arguments including receiver
880  // r4: pointer to builtin function
881  // fp: frame pointer  (restored after C call)
882  // sp: stack pointer  (restored as callee's sp after C call)
883  // cp: current context  (C callee-saved)
884  //
885  // If argv_in_register():
886  // r5: pointer to the first argument
887  ProfileEntryHookStub::MaybeCallEntryHook(masm);
888
889  __ mr(r15, r4);
890
891  if (argv_in_register()) {
892    // Move argv into the correct register.
893    __ mr(r4, r5);
894  } else {
895    // Compute the argv pointer.
896    __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2));
897    __ add(r4, r4, sp);
898    __ subi(r4, r4, Operand(kPointerSize));
899  }
900
901  // Enter the exit frame that transitions from JavaScript to C++.
902  FrameScope scope(masm, StackFrame::MANUAL);
903
904  // Need at least one extra slot for return address location.
905  int arg_stack_space = 1;
906
907  // Pass buffer for return value on stack if necessary
908  bool needs_return_buffer =
909      result_size() > 2 ||
910      (result_size() == 2 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS);
911  if (needs_return_buffer) {
912    arg_stack_space += result_size();
913  }
914
915  __ EnterExitFrame(save_doubles(), arg_stack_space, is_builtin_exit()
916                                           ? StackFrame::BUILTIN_EXIT
917                                           : StackFrame::EXIT);
918
919  // Store a copy of argc in callee-saved registers for later.
920  __ mr(r14, r3);
921
922  // r3, r14: number of arguments including receiver  (C callee-saved)
923  // r4: pointer to the first argument
924  // r15: pointer to builtin function  (C callee-saved)
925
926  // Result returned in registers or stack, depending on result size and ABI.
927
928  Register isolate_reg = r5;
929  if (needs_return_buffer) {
930    // The return value is a non-scalar value.
931    // Use frame storage reserved by calling function to pass return
932    // buffer as implicit first argument.
933    __ mr(r5, r4);
934    __ mr(r4, r3);
935    __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
936    isolate_reg = r6;
937  }
938
939  // Call C built-in.
940  __ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
941
942  Register target = r15;
943  if (ABI_USES_FUNCTION_DESCRIPTORS) {
944    // AIX/PPC64BE Linux use a function descriptor.
945    __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
946    __ LoadP(ip, MemOperand(r15, 0));  // Instruction address
947    target = ip;
948  } else if (ABI_CALL_VIA_IP) {
949    __ Move(ip, r15);
950    target = ip;
951  }
952
953  // To let the GC traverse the return address of the exit frames, we need to
954  // know where the return address is. The CEntryStub is unmovable, so
955  // we can store the address on the stack to be able to find it again and
956  // we never have to restore it, because it will not change.
957  Label after_call;
958  __ mov_label_addr(r0, &after_call);
959  __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
960  __ Call(target);
961  __ bind(&after_call);
962
963  // If return value is on the stack, pop it to registers.
964  if (needs_return_buffer) {
965    if (result_size() > 2) __ LoadP(r5, MemOperand(r3, 2 * kPointerSize));
966    __ LoadP(r4, MemOperand(r3, kPointerSize));
967    __ LoadP(r3, MemOperand(r3));
968  }
969
970  // Check result for exception sentinel.
971  Label exception_returned;
972  __ CompareRoot(r3, Heap::kExceptionRootIndex);
973  __ beq(&exception_returned);
974
975  // Check that there is no pending exception, otherwise we
976  // should have returned the exception sentinel.
977  if (FLAG_debug_code) {
978    Label okay;
979    ExternalReference pending_exception_address(
980        Isolate::kPendingExceptionAddress, isolate());
981
982    __ mov(r6, Operand(pending_exception_address));
983    __ LoadP(r6, MemOperand(r6));
984    __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
985    // Cannot use check here as it attempts to generate call into runtime.
986    __ beq(&okay);
987    __ stop("Unexpected pending exception");
988    __ bind(&okay);
989  }
990
991  // Exit C frame and return.
992  // r3:r4: result
993  // sp: stack pointer
994  // fp: frame pointer
995  Register argc;
996  if (argv_in_register()) {
997    // We don't want to pop arguments so set argc to no_reg.
998    argc = no_reg;
999  } else {
1000    // r14: still holds argc (callee-saved).
1001    argc = r14;
1002  }
1003  __ LeaveExitFrame(save_doubles(), argc, true);
1004  __ blr();
1005
1006  // Handling of exception.
1007  __ bind(&exception_returned);
1008
1009  ExternalReference pending_handler_context_address(
1010      Isolate::kPendingHandlerContextAddress, isolate());
1011  ExternalReference pending_handler_code_address(
1012      Isolate::kPendingHandlerCodeAddress, isolate());
1013  ExternalReference pending_handler_offset_address(
1014      Isolate::kPendingHandlerOffsetAddress, isolate());
1015  ExternalReference pending_handler_fp_address(
1016      Isolate::kPendingHandlerFPAddress, isolate());
1017  ExternalReference pending_handler_sp_address(
1018      Isolate::kPendingHandlerSPAddress, isolate());
1019
1020  // Ask the runtime for help to determine the handler. This will set r3 to
1021  // contain the current pending exception, don't clobber it.
1022  ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
1023                                 isolate());
1024  {
1025    FrameScope scope(masm, StackFrame::MANUAL);
1026    __ PrepareCallCFunction(3, 0, r3);
1027    __ li(r3, Operand::Zero());
1028    __ li(r4, Operand::Zero());
1029    __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
1030    __ CallCFunction(find_handler, 3);
1031  }
1032
1033  // Retrieve the handler context, SP and FP.
1034  __ mov(cp, Operand(pending_handler_context_address));
1035  __ LoadP(cp, MemOperand(cp));
1036  __ mov(sp, Operand(pending_handler_sp_address));
1037  __ LoadP(sp, MemOperand(sp));
1038  __ mov(fp, Operand(pending_handler_fp_address));
1039  __ LoadP(fp, MemOperand(fp));
1040
1041  // If the handler is a JS frame, restore the context to the frame. Note that
1042  // the context will be set to (cp == 0) for non-JS frames.
1043  Label skip;
1044  __ cmpi(cp, Operand::Zero());
1045  __ beq(&skip);
1046  __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1047  __ bind(&skip);
1048
1049  // Compute the handler entry address and jump to it.
1050  ConstantPoolUnavailableScope constant_pool_unavailable(masm);
1051  __ mov(r4, Operand(pending_handler_code_address));
1052  __ LoadP(r4, MemOperand(r4));
1053  __ mov(r5, Operand(pending_handler_offset_address));
1054  __ LoadP(r5, MemOperand(r5));
1055  __ addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start
1056  if (FLAG_enable_embedded_constant_pool) {
1057    __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r4);
1058  }
1059  __ add(ip, r4, r5);
1060  __ Jump(ip);
1061}
1062
1063
1064void JSEntryStub::Generate(MacroAssembler* masm) {
1065  // r3: code entry
1066  // r4: function
1067  // r5: receiver
1068  // r6: argc
1069  // [sp+0]: argv
1070
1071  Label invoke, handler_entry, exit;
1072
1073// Called from C
1074  __ function_descriptor();
1075
1076  ProfileEntryHookStub::MaybeCallEntryHook(masm);
1077
1078  // PPC LINUX ABI:
1079  // preserve LR in pre-reserved slot in caller's frame
1080  __ mflr(r0);
1081  __ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
1082
1083  // Save callee saved registers on the stack.
1084  __ MultiPush(kCalleeSaved);
1085
1086  // Save callee-saved double registers.
1087  __ MultiPushDoubles(kCalleeSavedDoubles);
1088  // Set up the reserved register for 0.0.
1089  __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0);
1090
1091  // Push a frame with special values setup to mark it as an entry frame.
1092  // r3: code entry
1093  // r4: function
1094  // r5: receiver
1095  // r6: argc
1096  // r7: argv
1097  __ li(r0, Operand(-1));  // Push a bad frame pointer to fail if it is used.
1098  __ push(r0);
1099  if (FLAG_enable_embedded_constant_pool) {
1100    __ li(kConstantPoolRegister, Operand::Zero());
1101    __ push(kConstantPoolRegister);
1102  }
1103  StackFrame::Type marker = type();
1104  __ mov(r0, Operand(StackFrame::TypeToMarker(marker)));
1105  __ push(r0);
1106  __ push(r0);
1107  // Save copies of the top frame descriptor on the stack.
1108  __ mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1109  __ LoadP(r0, MemOperand(r8));
1110  __ push(r0);
1111
1112  // Set up frame pointer for the frame to be pushed.
1113  __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1114
1115  // If this is the outermost JS call, set js_entry_sp value.
1116  Label non_outermost_js;
1117  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1118  __ mov(r8, Operand(ExternalReference(js_entry_sp)));
1119  __ LoadP(r9, MemOperand(r8));
1120  __ cmpi(r9, Operand::Zero());
1121  __ bne(&non_outermost_js);
1122  __ StoreP(fp, MemOperand(r8));
1123  __ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
1124  Label cont;
1125  __ b(&cont);
1126  __ bind(&non_outermost_js);
1127  __ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
1128  __ bind(&cont);
1129  __ push(ip);  // frame-type
1130
1131  // Jump to a faked try block that does the invoke, with a faked catch
1132  // block that sets the pending exception.
1133  __ b(&invoke);
1134
1135  __ bind(&handler_entry);
1136  handler_offset_ = handler_entry.pos();
1137  // Caught exception: Store result (exception) in the pending exception
1138  // field in the JSEnv and return a failure sentinel.  Coming in here the
1139  // fp will be invalid because the PushStackHandler below sets it to 0 to
1140  // signal the existence of the JSEntry frame.
1141  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1142                                       isolate())));
1143
1144  __ StoreP(r3, MemOperand(ip));
1145  __ LoadRoot(r3, Heap::kExceptionRootIndex);
1146  __ b(&exit);
1147
1148  // Invoke: Link this frame into the handler chain.
1149  __ bind(&invoke);
1150  // Must preserve r3-r7.
1151  __ PushStackHandler();
1152  // If an exception not caught by another handler occurs, this handler
1153  // returns control to the code after the b(&invoke) above, which
1154  // restores all kCalleeSaved registers (including cp and fp) to their
1155  // saved values before returning a failure to C.
1156
1157  // Invoke the function by calling through JS entry trampoline builtin.
1158  // Notice that we cannot store a reference to the trampoline code directly in
1159  // this stub, because runtime stubs are not traversed when doing GC.
1160
1161  // Expected registers by Builtins::JSEntryTrampoline
1162  // r3: code entry
1163  // r4: function
1164  // r5: receiver
1165  // r6: argc
1166  // r7: argv
1167  if (type() == StackFrame::ENTRY_CONSTRUCT) {
1168    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1169                                      isolate());
1170    __ mov(ip, Operand(construct_entry));
1171  } else {
1172    ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
1173    __ mov(ip, Operand(entry));
1174  }
1175  __ LoadP(ip, MemOperand(ip));  // deref address
1176
1177  // Branch and link to JSEntryTrampoline.
1178  // the address points to the start of the code object, skip the header
1179  __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1180  __ mtctr(ip);
1181  __ bctrl();  // make the call
1182
1183  // Unlink this frame from the handler chain.
1184  __ PopStackHandler();
1185
1186  __ bind(&exit);  // r3 holds result
1187  // Check if the current stack frame is marked as the outermost JS frame.
1188  Label non_outermost_js_2;
1189  __ pop(r8);
1190  __ cmpi(r8, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
1191  __ bne(&non_outermost_js_2);
1192  __ mov(r9, Operand::Zero());
1193  __ mov(r8, Operand(ExternalReference(js_entry_sp)));
1194  __ StoreP(r9, MemOperand(r8));
1195  __ bind(&non_outermost_js_2);
1196
1197  // Restore the top frame descriptors from the stack.
1198  __ pop(r6);
1199  __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1200  __ StoreP(r6, MemOperand(ip));
1201
1202  // Reset the stack to the callee saved registers.
1203  __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1204
1205  // Restore callee-saved double registers.
1206  __ MultiPopDoubles(kCalleeSavedDoubles);
1207
1208  // Restore callee-saved registers.
1209  __ MultiPop(kCalleeSaved);
1210
1211  // Return
1212  __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
1213  __ mtlr(r0);
1214  __ blr();
1215}
1216
1217void RegExpExecStub::Generate(MacroAssembler* masm) {
1218// Just jump directly to runtime if native RegExp is not selected at compile
1219// time or if regexp entry in generated code is turned off runtime switch or
1220// at compilation.
1221#ifdef V8_INTERPRETED_REGEXP
1222  __ TailCallRuntime(Runtime::kRegExpExec);
1223#else  // V8_INTERPRETED_REGEXP
1224
1225  // Stack frame on entry.
1226  //  sp[0]: last_match_info (expected JSArray)
1227  //  sp[4]: previous index
1228  //  sp[8]: subject string
1229  //  sp[12]: JSRegExp object
1230
1231  const int kLastMatchInfoOffset = 0 * kPointerSize;
1232  const int kPreviousIndexOffset = 1 * kPointerSize;
1233  const int kSubjectOffset = 2 * kPointerSize;
1234  const int kJSRegExpOffset = 3 * kPointerSize;
1235
1236  Label runtime, br_over, encoding_type_UC16;
1237
1238  // Allocation of registers for this function. These are in callee save
1239  // registers and will be preserved by the call to the native RegExp code, as
1240  // this code is called using the normal C calling convention. When calling
1241  // directly from generated code the native RegExp code will not do a GC and
1242  // therefore the content of these registers are safe to use after the call.
1243  Register subject = r14;
1244  Register regexp_data = r15;
1245  Register last_match_info_elements = r16;
1246  Register code = r17;
1247
1248  // Ensure register assigments are consistent with callee save masks
1249  DCHECK(subject.bit() & kCalleeSaved);
1250  DCHECK(regexp_data.bit() & kCalleeSaved);
1251  DCHECK(last_match_info_elements.bit() & kCalleeSaved);
1252  DCHECK(code.bit() & kCalleeSaved);
1253
1254  // Ensure that a RegExp stack is allocated.
1255  ExternalReference address_of_regexp_stack_memory_address =
1256      ExternalReference::address_of_regexp_stack_memory_address(isolate());
1257  ExternalReference address_of_regexp_stack_memory_size =
1258      ExternalReference::address_of_regexp_stack_memory_size(isolate());
1259  __ mov(r3, Operand(address_of_regexp_stack_memory_size));
1260  __ LoadP(r3, MemOperand(r3, 0));
1261  __ cmpi(r3, Operand::Zero());
1262  __ beq(&runtime);
1263
1264  // Check that the first argument is a JSRegExp object.
1265  __ LoadP(r3, MemOperand(sp, kJSRegExpOffset));
1266  __ JumpIfSmi(r3, &runtime);
1267  __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE);
1268  __ bne(&runtime);
1269
1270  // Check that the RegExp has been compiled (data contains a fixed array).
1271  __ LoadP(regexp_data, FieldMemOperand(r3, JSRegExp::kDataOffset));
1272  if (FLAG_debug_code) {
1273    __ TestIfSmi(regexp_data, r0);
1274    __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected, cr0);
1275    __ CompareObjectType(regexp_data, r3, r3, FIXED_ARRAY_TYPE);
1276    __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1277  }
1278
1279  // regexp_data: RegExp data (FixedArray)
1280  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1281  __ LoadP(r3, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
1282  // DCHECK(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu);
1283  __ CmpSmiLiteral(r3, Smi::FromInt(JSRegExp::IRREGEXP), r0);
1284  __ bne(&runtime);
1285
1286  // regexp_data: RegExp data (FixedArray)
1287  // Check that the number of captures fit in the static offsets vector buffer.
1288  __ LoadP(r5,
1289           FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1290  // Check (number_of_captures + 1) * 2 <= offsets vector size
1291  // Or          number_of_captures * 2 <= offsets vector size - 2
1292  // SmiToShortArrayOffset accomplishes the multiplication by 2 and
1293  // SmiUntag (which is a nop for 32-bit).
1294  __ SmiToShortArrayOffset(r5, r5);
1295  STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1296  __ cmpli(r5, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
1297  __ bgt(&runtime);
1298
1299  // Reset offset for possibly sliced string.
1300  __ li(r11, Operand::Zero());
1301  __ LoadP(subject, MemOperand(sp, kSubjectOffset));
1302  __ JumpIfSmi(subject, &runtime);
1303  __ mr(r6, subject);  // Make a copy of the original subject string.
1304  // subject: subject string
1305  // r6: subject string
1306  // regexp_data: RegExp data (FixedArray)
1307  // Handle subject string according to its encoding and representation:
1308  // (1) Sequential string?  If yes, go to (4).
1309  // (2) Sequential or cons?  If not, go to (5).
1310  // (3) Cons string.  If the string is flat, replace subject with first string
1311  //     and go to (1). Otherwise bail out to runtime.
1312  // (4) Sequential string.  Load regexp code according to encoding.
1313  // (E) Carry on.
1314  /// [...]
1315
1316  // Deferred code at the end of the stub:
1317  // (5) Long external string?  If not, go to (7).
1318  // (6) External string.  Make it, offset-wise, look like a sequential string.
1319  //     Go to (4).
1320  // (7) Short external string or not a string?  If yes, bail out to runtime.
1321  // (8) Sliced or thin string.  Replace subject with parent.  Go to (1).
1322
1323  Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
1324      not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
1325
1326  __ bind(&check_underlying);
1327  __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
1328  __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
1329
1330  // (1) Sequential string?  If yes, go to (4).
1331
1332  STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
1333                 kShortExternalStringMask) == 0xa7);
1334  __ andi(r4, r3, Operand(kIsNotStringMask | kStringRepresentationMask |
1335                          kShortExternalStringMask));
1336  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
1337  __ beq(&seq_string, cr0);  // Go to (4).
1338
1339  // (2) Sequential or cons? If not, go to (5).
1340  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1341  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1342  STATIC_ASSERT(kThinStringTag > kExternalStringTag);
1343  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1344  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1345  STATIC_ASSERT(kExternalStringTag < 0xffffu);
1346  __ cmpi(r4, Operand(kExternalStringTag));
1347  __ bge(&not_seq_nor_cons);  // Go to (5).
1348
1349  // (3) Cons string.  Check that it's flat.
1350  // Replace subject with first string and reload instance type.
1351  __ LoadP(r3, FieldMemOperand(subject, ConsString::kSecondOffset));
1352  __ CompareRoot(r3, Heap::kempty_stringRootIndex);
1353  __ bne(&runtime);
1354  __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
1355  __ b(&check_underlying);
1356
1357  // (4) Sequential string.  Load regexp code according to encoding.
1358  __ bind(&seq_string);
1359  // subject: sequential subject string (or look-alike, external string)
1360  // r6: original subject string
1361  // Load previous index and check range before r6 is overwritten.  We have to
1362  // use r6 instead of subject here because subject might have been only made
1363  // to look like a sequential string when it actually is an external string.
1364  __ LoadP(r4, MemOperand(sp, kPreviousIndexOffset));
1365  __ JumpIfNotSmi(r4, &runtime);
1366  __ LoadP(r6, FieldMemOperand(r6, String::kLengthOffset));
1367  __ cmpl(r6, r4);
1368  __ ble(&runtime);
1369  __ SmiUntag(r4);
1370
1371  STATIC_ASSERT(8 == kOneByteStringTag);
1372  STATIC_ASSERT(kTwoByteStringTag == 0);
1373  STATIC_ASSERT(kStringEncodingMask == 8);
1374  __ ExtractBitMask(r6, r3, kStringEncodingMask, SetRC);
1375  __ beq(&encoding_type_UC16, cr0);
1376  __ LoadP(code,
1377           FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
1378  __ b(&br_over);
1379  __ bind(&encoding_type_UC16);
1380  __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
1381  __ bind(&br_over);
1382
1383  // (E) Carry on.  String handling is done.
1384  // code: irregexp code
1385  // Check that the irregexp code has been generated for the actual string
1386  // encoding. If it has, the field contains a code object otherwise it contains
1387  // a smi (code flushing support).
1388  __ JumpIfSmi(code, &runtime);
1389
1390  // r4: previous index
1391  // r6: encoding of subject string (1 if one_byte, 0 if two_byte);
1392  // code: Address of generated regexp code
1393  // subject: Subject string
1394  // regexp_data: RegExp data (FixedArray)
1395  // All checks done. Now push arguments for native regexp code.
1396  __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r3, r5);
1397
1398  // Isolates: note we add an additional parameter here (isolate pointer).
1399  const int kRegExpExecuteArguments = 10;
1400  const int kParameterRegisters = 8;
1401  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
1402
1403  // Stack pointer now points to cell where return address is to be written.
1404  // Arguments are before that on the stack or in registers.
1405
1406  // Argument 10 (in stack parameter area): Pass current isolate address.
1407  __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
1408  __ StoreP(r3, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
1409
1410  // Argument 9 is a dummy that reserves the space used for
1411  // the return address added by the ExitFrame in native calls.
1412
1413  // Argument 8 (r10): Indicate that this is a direct call from JavaScript.
1414  __ li(r10, Operand(1));
1415
1416  // Argument 7 (r9): Start (high end) of backtracking stack memory area.
1417  __ mov(r3, Operand(address_of_regexp_stack_memory_address));
1418  __ LoadP(r3, MemOperand(r3, 0));
1419  __ mov(r5, Operand(address_of_regexp_stack_memory_size));
1420  __ LoadP(r5, MemOperand(r5, 0));
1421  __ add(r9, r3, r5);
1422
1423  // Argument 6 (r8): Set the number of capture registers to zero to force
1424  // global egexps to behave as non-global.  This does not affect non-global
1425  // regexps.
1426  __ li(r8, Operand::Zero());
1427
1428  // Argument 5 (r7): static offsets vector buffer.
1429  __ mov(
1430      r7,
1431      Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
1432
1433  // For arguments 4 (r6) and 3 (r5) get string length, calculate start of data
1434  // and calculate the shift of the index (0 for one-byte and 1 for two-byte).
1435  __ addi(r18, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
1436  __ xori(r6, r6, Operand(1));
1437  // Load the length from the original subject string from the previous stack
1438  // frame. Therefore we have to use fp, which points exactly to two pointer
1439  // sizes below the previous sp. (Because creating a new stack frame pushes
1440  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
1441  __ LoadP(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
1442  // If slice offset is not 0, load the length from the original sliced string.
1443  // Argument 4, r6: End of string data
1444  // Argument 3, r5: Start of string data
1445  // Prepare start and end index of the input.
1446  __ ShiftLeft_(r11, r11, r6);
1447  __ add(r11, r18, r11);
1448  __ ShiftLeft_(r5, r4, r6);
1449  __ add(r5, r11, r5);
1450
1451  __ LoadP(r18, FieldMemOperand(subject, String::kLengthOffset));
1452  __ SmiUntag(r18);
1453  __ ShiftLeft_(r6, r18, r6);
1454  __ add(r6, r11, r6);
1455
1456  // Argument 2 (r4): Previous index.
1457  // Already there
1458
1459  // Argument 1 (r3): Subject string.
1460  __ mr(r3, subject);
1461
1462  // Locate the code entry and call it.
1463  __ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
1464
1465  DirectCEntryStub stub(isolate());
1466  stub.GenerateCall(masm, code);
1467
1468  __ LeaveExitFrame(false, no_reg, true);
1469
1470  // r3: result (int32)
1471  // subject: subject string (callee saved)
1472  // regexp_data: RegExp data (callee saved)
1473  // last_match_info_elements: Last match info elements (callee saved)
1474  // Check the result.
1475  Label success;
1476  __ cmpwi(r3, Operand(1));
1477  // We expect exactly one result since we force the called regexp to behave
1478  // as non-global.
1479  __ beq(&success);
1480  Label failure;
1481  __ cmpwi(r3, Operand(NativeRegExpMacroAssembler::FAILURE));
1482  __ beq(&failure);
1483  __ cmpwi(r3, Operand(NativeRegExpMacroAssembler::EXCEPTION));
1484  // If not exception it can only be retry. Handle that in the runtime system.
1485  __ bne(&runtime);
1486  // Result must now be exception. If there is no pending exception already a
1487  // stack overflow (on the backtrack stack) was detected in RegExp code but
1488  // haven't created the exception yet. Handle that in the runtime system.
1489  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
1490  __ mov(r4, Operand(isolate()->factory()->the_hole_value()));
1491  __ mov(r5, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1492                                       isolate())));
1493  __ LoadP(r3, MemOperand(r5, 0));
1494  __ cmp(r3, r4);
1495  __ beq(&runtime);
1496
1497  // For exception, throw the exception again.
1498  __ TailCallRuntime(Runtime::kRegExpExecReThrow);
1499
1500  __ bind(&failure);
1501  // For failure and exception return null.
1502  __ mov(r3, Operand(isolate()->factory()->null_value()));
1503  __ addi(sp, sp, Operand(4 * kPointerSize));
1504  __ Ret();
1505
1506  // Process the result from the native regexp code.
1507  __ bind(&success);
1508  __ LoadP(r4,
1509           FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
1510  // Calculate number of capture registers (number_of_captures + 1) * 2.
1511  // SmiToShortArrayOffset accomplishes the multiplication by 2 and
1512  // SmiUntag (which is a nop for 32-bit).
1513  __ SmiToShortArrayOffset(r4, r4);
1514  __ addi(r4, r4, Operand(2));
1515
1516  // Check that the last match info is a FixedArray.
1517  __ LoadP(last_match_info_elements, MemOperand(sp, kLastMatchInfoOffset));
1518  __ JumpIfSmi(last_match_info_elements, &runtime);
1519  // Check that the object has fast elements.
1520  __ LoadP(r3,
1521           FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
1522  __ CompareRoot(r3, Heap::kFixedArrayMapRootIndex);
1523  __ bne(&runtime);
1524  // Check that the last match info has space for the capture registers and the
1525  // additional information.
1526  __ LoadP(
1527      r3, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
1528  __ addi(r5, r4, Operand(RegExpMatchInfo::kLastMatchOverhead));
1529  __ SmiUntag(r0, r3);
1530  __ cmp(r5, r0);
1531  __ bgt(&runtime);
1532
1533  // r4: number of capture registers
1534  // subject: subject string
1535  // Store the capture count.
1536  __ SmiTag(r5, r4);
1537  __ StoreP(r5, FieldMemOperand(last_match_info_elements,
1538                                RegExpMatchInfo::kNumberOfCapturesOffset),
1539            r0);
1540  // Store last subject and last input.
1541  __ StoreP(subject, FieldMemOperand(last_match_info_elements,
1542                                     RegExpMatchInfo::kLastSubjectOffset),
1543            r0);
1544  __ mr(r5, subject);
1545  __ RecordWriteField(last_match_info_elements,
1546                      RegExpMatchInfo::kLastSubjectOffset, subject, r10,
1547                      kLRHasNotBeenSaved, kDontSaveFPRegs);
1548  __ mr(subject, r5);
1549  __ StoreP(subject, FieldMemOperand(last_match_info_elements,
1550                                     RegExpMatchInfo::kLastInputOffset),
1551            r0);
1552  __ RecordWriteField(last_match_info_elements,
1553                      RegExpMatchInfo::kLastInputOffset, subject, r10,
1554                      kLRHasNotBeenSaved, kDontSaveFPRegs);
1555
1556  // Get the static offsets vector filled by the native regexp code.
1557  ExternalReference address_of_static_offsets_vector =
1558      ExternalReference::address_of_static_offsets_vector(isolate());
1559  __ mov(r5, Operand(address_of_static_offsets_vector));
1560
1561  // r4: number of capture registers
1562  // r5: offsets vector
1563  Label next_capture;
1564  // Capture register counter starts from number of capture registers and
1565  // counts down until wrapping after zero.
1566  __ addi(r3, last_match_info_elements,
1567          Operand(RegExpMatchInfo::kFirstCaptureOffset - kHeapObjectTag -
1568                  kPointerSize));
1569  __ addi(r5, r5, Operand(-kIntSize));  // bias down for lwzu
1570  __ mtctr(r4);
1571  __ bind(&next_capture);
1572  // Read the value from the static offsets vector buffer.
1573  __ lwzu(r6, MemOperand(r5, kIntSize));
1574  // Store the smi value in the last match info.
1575  __ SmiTag(r6);
1576  __ StorePU(r6, MemOperand(r3, kPointerSize));
1577  __ bdnz(&next_capture);
1578
1579  // Return last match info.
1580  __ mr(r3, last_match_info_elements);
1581  __ addi(sp, sp, Operand(4 * kPointerSize));
1582  __ Ret();
1583
1584  // Do the runtime call to execute the regexp.
1585  __ bind(&runtime);
1586  __ TailCallRuntime(Runtime::kRegExpExec);
1587
1588  // Deferred code for string handling.
1589  // (5) Long external string? If not, go to (7).
1590  __ bind(&not_seq_nor_cons);
1591  // Compare flags are still set.
1592  __ bgt(&not_long_external);  // Go to (7).
1593
1594  // (6) External string.  Make it, offset-wise, look like a sequential string.
1595  __ bind(&external_string);
1596  __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
1597  __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
1598  if (FLAG_debug_code) {
1599    // Assert that we do not have a cons or slice (indirect strings) here.
1600    // Sequential strings have already been ruled out.
1601    STATIC_ASSERT(kIsIndirectStringMask == 1);
1602    __ andi(r0, r3, Operand(kIsIndirectStringMask));
1603    __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
1604  }
1605  __ LoadP(subject,
1606           FieldMemOperand(subject, ExternalString::kResourceDataOffset));
1607  // Move the pointer so that offset-wise, it looks like a sequential string.
1608  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1609  __ subi(subject, subject,
1610          Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
1611  __ b(&seq_string);  // Go to (4).
1612
1613  // (7) Short external string or not a string?  If yes, bail out to runtime.
1614  __ bind(&not_long_external);
1615  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0);
1616  __ andi(r0, r4, Operand(kIsNotStringMask | kShortExternalStringMask));
1617  __ bne(&runtime, cr0);
1618
1619  // (8) Sliced or thin string.  Replace subject with parent.  Go to (4).
1620  Label thin_string;
1621  __ cmpi(r4, Operand(kThinStringTag));
1622  __ beq(&thin_string);
1623  // Load offset into r11 and replace subject string with parent.
1624  __ LoadP(r11, FieldMemOperand(subject, SlicedString::kOffsetOffset));
1625  __ SmiUntag(r11);
1626  __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
1627  __ b(&check_underlying);  // Go to (4).
1628
1629  __ bind(&thin_string);
1630  __ LoadP(subject, FieldMemOperand(subject, ThinString::kActualOffset));
1631  __ b(&check_underlying);  // Go to (4).
1632#endif  // V8_INTERPRETED_REGEXP
1633}
1634
1635
1636static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
1637  // r3 : number of arguments to the construct function
1638  // r4 : the function to call
1639  // r5 : feedback vector
1640  // r6 : slot in feedback vector (Smi)
1641  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1642
1643  // Number-of-arguments register must be smi-tagged to call out.
1644  __ SmiTag(r3);
1645  __ Push(r6, r5, r4, r3);
1646  __ Push(cp);
1647
1648  __ CallStub(stub);
1649
1650  __ Pop(cp);
1651  __ Pop(r6, r5, r4, r3);
1652  __ SmiUntag(r3);
1653}
1654
1655
1656static void GenerateRecordCallTarget(MacroAssembler* masm) {
1657  // Cache the called function in a feedback vector slot.  Cache states
1658  // are uninitialized, monomorphic (indicated by a JSFunction), and
1659  // megamorphic.
1660  // r3 : number of arguments to the construct function
1661  // r4 : the function to call
1662  // r5 : feedback vector
1663  // r6 : slot in feedback vector (Smi)
1664  Label initialize, done, miss, megamorphic, not_array_function;
1665
1666  DCHECK_EQ(*FeedbackVector::MegamorphicSentinel(masm->isolate()),
1667            masm->isolate()->heap()->megamorphic_symbol());
1668  DCHECK_EQ(*FeedbackVector::UninitializedSentinel(masm->isolate()),
1669            masm->isolate()->heap()->uninitialized_symbol());
1670
1671  const int count_offset = FixedArray::kHeaderSize + kPointerSize;
1672
1673  // Load the cache state into r8.
1674  __ SmiToPtrArrayOffset(r8, r6);
1675  __ add(r8, r5, r8);
1676  __ LoadP(r8, FieldMemOperand(r8, FixedArray::kHeaderSize));
1677
1678  // A monomorphic cache hit or an already megamorphic state: invoke the
1679  // function without changing the state.
1680  // We don't know if r8 is a WeakCell or a Symbol, but it's harmless to read at
1681  // this position in a symbol (see static asserts in feedback-vector.h).
1682  Label check_allocation_site;
1683  Register feedback_map = r9;
1684  Register weak_value = r10;
1685  __ LoadP(weak_value, FieldMemOperand(r8, WeakCell::kValueOffset));
1686  __ cmp(r4, weak_value);
1687  __ beq(&done);
1688  __ CompareRoot(r8, Heap::kmegamorphic_symbolRootIndex);
1689  __ beq(&done);
1690  __ LoadP(feedback_map, FieldMemOperand(r8, HeapObject::kMapOffset));
1691  __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
1692  __ bne(&check_allocation_site);
1693
1694  // If the weak cell is cleared, we have a new chance to become monomorphic.
1695  __ JumpIfSmi(weak_value, &initialize);
1696  __ b(&megamorphic);
1697
1698  __ bind(&check_allocation_site);
1699  // If we came here, we need to see if we are the array function.
1700  // If we didn't have a matching function, and we didn't find the megamorph
1701  // sentinel, then we have in the slot either some other function or an
1702  // AllocationSite.
1703  __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
1704  __ bne(&miss);
1705
1706  // Make sure the function is the Array() function
1707  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
1708  __ cmp(r4, r8);
1709  __ bne(&megamorphic);
1710  __ b(&done);
1711
1712  __ bind(&miss);
1713
1714  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
1715  // megamorphic.
1716  __ CompareRoot(r8, Heap::kuninitialized_symbolRootIndex);
1717  __ beq(&initialize);
1718  // MegamorphicSentinel is an immortal immovable object (undefined) so no
1719  // write-barrier is needed.
1720  __ bind(&megamorphic);
1721  __ SmiToPtrArrayOffset(r8, r6);
1722  __ add(r8, r5, r8);
1723  __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
1724  __ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
1725  __ jmp(&done);
1726
1727  // An uninitialized cache is patched with the function
1728  __ bind(&initialize);
1729
1730  // Make sure the function is the Array() function.
1731  __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
1732  __ cmp(r4, r8);
1733  __ bne(&not_array_function);
1734
1735  // The target function is the Array constructor,
1736  // Create an AllocationSite if we don't already have it, store it in the
1737  // slot.
1738  CreateAllocationSiteStub create_stub(masm->isolate());
1739  CallStubInRecordCallTarget(masm, &create_stub);
1740  __ b(&done);
1741
1742  __ bind(&not_array_function);
1743
1744  CreateWeakCellStub weak_cell_stub(masm->isolate());
1745  CallStubInRecordCallTarget(masm, &weak_cell_stub);
1746
1747  __ bind(&done);
1748
1749  // Increment the call count for all function calls.
1750  __ SmiToPtrArrayOffset(r8, r6);
1751  __ add(r8, r5, r8);
1752
1753  __ LoadP(r7, FieldMemOperand(r8, count_offset));
1754  __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
1755  __ StoreP(r7, FieldMemOperand(r8, count_offset), r0);
1756}
1757
1758
1759void CallConstructStub::Generate(MacroAssembler* masm) {
1760  // r3 : number of arguments
1761  // r4 : the function to call
1762  // r5 : feedback vector
1763  // r6 : slot in feedback vector (Smi, for RecordCallTarget)
1764
1765  Label non_function;
1766  // Check that the function is not a smi.
1767  __ JumpIfSmi(r4, &non_function);
1768  // Check that the function is a JSFunction.
1769  __ CompareObjectType(r4, r8, r8, JS_FUNCTION_TYPE);
1770  __ bne(&non_function);
1771
1772  GenerateRecordCallTarget(masm);
1773
1774  __ SmiToPtrArrayOffset(r8, r6);
1775  __ add(r8, r5, r8);
1776  // Put the AllocationSite from the feedback vector into r5, or undefined.
1777  __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
1778  __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
1779  __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
1780  if (CpuFeatures::IsSupported(ISELECT)) {
1781    __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
1782    __ isel(eq, r5, r5, r8);
1783  } else {
1784    Label feedback_register_initialized;
1785    __ beq(&feedback_register_initialized);
1786    __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
1787    __ bind(&feedback_register_initialized);
1788  }
1789
1790  __ AssertUndefinedOrAllocationSite(r5, r8);
1791
1792  // Pass function as new target.
1793  __ mr(r6, r4);
1794
1795  // Tail call to the function-specific construct stub (still in the caller
1796  // context at this point).
1797  __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1798  __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
1799  __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
1800  __ JumpToJSEntry(ip);
1801
1802  __ bind(&non_function);
1803  __ mr(r6, r4);
1804  __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
1805}
1806
1807
1808// StringCharCodeAtGenerator
1809void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
1810  // If the receiver is a smi trigger the non-string case.
1811  if (check_mode_ == RECEIVER_IS_UNKNOWN) {
1812    __ JumpIfSmi(object_, receiver_not_string_);
1813
1814    // Fetch the instance type of the receiver into result register.
1815    __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
1816    __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
1817    // If the receiver is not a string trigger the non-string case.
1818    __ andi(r0, result_, Operand(kIsNotStringMask));
1819    __ bne(receiver_not_string_, cr0);
1820  }
1821
1822  // If the index is non-smi trigger the non-smi case.
1823  __ JumpIfNotSmi(index_, &index_not_smi_);
1824  __ bind(&got_smi_index_);
1825
1826  // Check for index out of range.
1827  __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset));
1828  __ cmpl(ip, index_);
1829  __ ble(index_out_of_range_);
1830
1831  __ SmiUntag(index_);
1832
1833  StringCharLoadGenerator::Generate(masm, object_, index_, result_,
1834                                    &call_runtime_);
1835
1836  __ SmiTag(result_);
1837  __ bind(&exit_);
1838}
1839
1840
1841void StringCharCodeAtGenerator::GenerateSlow(
1842    MacroAssembler* masm, EmbedMode embed_mode,
1843    const RuntimeCallHelper& call_helper) {
1844  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
1845
1846  // Index is not a smi.
1847  __ bind(&index_not_smi_);
1848  // If index is a heap number, try converting it to an integer.
1849  __ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_,
1850              DONT_DO_SMI_CHECK);
1851  call_helper.BeforeCall(masm);
1852  if (embed_mode == PART_OF_IC_HANDLER) {
1853    __ Push(LoadWithVectorDescriptor::VectorRegister(),
1854            LoadWithVectorDescriptor::SlotRegister(), object_, index_);
1855  } else {
1856    // index_ is consumed by runtime conversion function.
1857    __ Push(object_, index_);
1858  }
1859  __ CallRuntime(Runtime::kNumberToSmi);
1860  // Save the conversion result before the pop instructions below
1861  // have a chance to overwrite it.
1862  __ Move(index_, r3);
1863  if (embed_mode == PART_OF_IC_HANDLER) {
1864    __ Pop(LoadWithVectorDescriptor::VectorRegister(),
1865           LoadWithVectorDescriptor::SlotRegister(), object_);
1866  } else {
1867    __ pop(object_);
1868  }
1869  // Reload the instance type.
1870  __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
1871  __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
1872  call_helper.AfterCall(masm);
1873  // If index is still not a smi, it must be out of range.
1874  __ JumpIfNotSmi(index_, index_out_of_range_);
1875  // Otherwise, return to the fast path.
1876  __ b(&got_smi_index_);
1877
1878  // Call runtime. We get here when the receiver is a string and the
1879  // index is a number, but the code of getting the actual character
1880  // is too complex (e.g., when the string needs to be flattened).
1881  __ bind(&call_runtime_);
1882  call_helper.BeforeCall(masm);
1883  __ SmiTag(index_);
1884  __ Push(object_, index_);
1885  __ CallRuntime(Runtime::kStringCharCodeAtRT);
1886  __ Move(result_, r3);
1887  call_helper.AfterCall(masm);
1888  __ b(&exit_);
1889
1890  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
1891}
1892
1893void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
1894                                                   Register left,
1895                                                   Register right,
1896                                                   Register scratch1,
1897                                                   Register scratch2) {
1898  Register length = scratch1;
1899
1900  // Compare lengths.
1901  Label strings_not_equal, check_zero_length;
1902  __ LoadP(length, FieldMemOperand(left, String::kLengthOffset));
1903  __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
1904  __ cmp(length, scratch2);
1905  __ beq(&check_zero_length);
1906  __ bind(&strings_not_equal);
1907  __ LoadSmiLiteral(r3, Smi::FromInt(NOT_EQUAL));
1908  __ Ret();
1909
1910  // Check if the length is zero.
1911  Label compare_chars;
1912  __ bind(&check_zero_length);
1913  STATIC_ASSERT(kSmiTag == 0);
1914  __ cmpi(length, Operand::Zero());
1915  __ bne(&compare_chars);
1916  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
1917  __ Ret();
1918
1919  // Compare characters.
1920  __ bind(&compare_chars);
1921  GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
1922                                  &strings_not_equal);
1923
1924  // Characters are equal.
1925  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
1926  __ Ret();
1927}
1928
1929
1930void StringHelper::GenerateCompareFlatOneByteStrings(
1931    MacroAssembler* masm, Register left, Register right, Register scratch1,
1932    Register scratch2, Register scratch3) {
1933  Label result_not_equal, compare_lengths;
1934  // Find minimum length and length difference.
1935  __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
1936  __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
1937  __ sub(scratch3, scratch1, scratch2, LeaveOE, SetRC);
1938  Register length_delta = scratch3;
1939  if (CpuFeatures::IsSupported(ISELECT)) {
1940    __ isel(gt, scratch1, scratch2, scratch1, cr0);
1941  } else {
1942    Label skip;
1943    __ ble(&skip, cr0);
1944    __ mr(scratch1, scratch2);
1945    __ bind(&skip);
1946  }
1947  Register min_length = scratch1;
1948  STATIC_ASSERT(kSmiTag == 0);
1949  __ cmpi(min_length, Operand::Zero());
1950  __ beq(&compare_lengths);
1951
1952  // Compare loop.
1953  GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
1954                                  &result_not_equal);
1955
1956  // Compare lengths - strings up to min-length are equal.
1957  __ bind(&compare_lengths);
1958  DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
1959  // Use length_delta as result if it's zero.
1960  __ mr(r3, length_delta);
1961  __ cmpi(r3, Operand::Zero());
1962  __ bind(&result_not_equal);
1963  // Conditionally update the result based either on length_delta or
1964  // the last comparion performed in the loop above.
1965  if (CpuFeatures::IsSupported(ISELECT)) {
1966    __ LoadSmiLiteral(r4, Smi::FromInt(GREATER));
1967    __ LoadSmiLiteral(r5, Smi::FromInt(LESS));
1968    __ isel(eq, r3, r0, r4);
1969    __ isel(lt, r3, r5, r3);
1970    __ Ret();
1971  } else {
1972    Label less_equal, equal;
1973    __ ble(&less_equal);
1974    __ LoadSmiLiteral(r3, Smi::FromInt(GREATER));
1975    __ Ret();
1976    __ bind(&less_equal);
1977    __ beq(&equal);
1978    __ LoadSmiLiteral(r3, Smi::FromInt(LESS));
1979    __ bind(&equal);
1980    __ Ret();
1981  }
1982}
1983
1984
1985void StringHelper::GenerateOneByteCharsCompareLoop(
1986    MacroAssembler* masm, Register left, Register right, Register length,
1987    Register scratch1, Label* chars_not_equal) {
1988  // Change index to run from -length to -1 by adding length to string
1989  // start. This means that loop ends when index reaches zero, which
1990  // doesn't need an additional compare.
1991  __ SmiUntag(length);
1992  __ addi(scratch1, length,
1993          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
1994  __ add(left, left, scratch1);
1995  __ add(right, right, scratch1);
1996  __ subfic(length, length, Operand::Zero());
1997  Register index = length;  // index = -length;
1998
1999  // Compare loop.
2000  Label loop;
2001  __ bind(&loop);
2002  __ lbzx(scratch1, MemOperand(left, index));
2003  __ lbzx(r0, MemOperand(right, index));
2004  __ cmp(scratch1, r0);
2005  __ bne(chars_not_equal);
2006  __ addi(index, index, Operand(1));
2007  __ cmpi(index, Operand::Zero());
2008  __ bne(&loop);
2009}
2010
2011
2012void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
2013  // ----------- S t a t e -------------
2014  //  -- r4    : left
2015  //  -- r3    : right
2016  //  -- lr    : return address
2017  // -----------------------------------
2018
2019  // Load r5 with the allocation site.  We stick an undefined dummy value here
2020  // and replace it with the real allocation site later when we instantiate this
2021  // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
2022  __ Move(r5, isolate()->factory()->undefined_value());
2023
2024  // Make sure that we actually patched the allocation site.
2025  if (FLAG_debug_code) {
2026    __ TestIfSmi(r5, r0);
2027    __ Assert(ne, kExpectedAllocationSite, cr0);
2028    __ push(r5);
2029    __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset));
2030    __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
2031    __ cmp(r5, ip);
2032    __ pop(r5);
2033    __ Assert(eq, kExpectedAllocationSite);
2034  }
2035
2036  // Tail call into the stub that handles binary operations with allocation
2037  // sites.
2038  BinaryOpWithAllocationSiteStub stub(isolate(), state());
2039  __ TailCallStub(&stub);
2040}
2041
2042
2043void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
2044  DCHECK_EQ(CompareICState::BOOLEAN, state());
2045  Label miss;
2046
2047  __ CheckMap(r4, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2048  __ CheckMap(r3, r6, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
2049  if (!Token::IsEqualityOp(op())) {
2050    __ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset));
2051    __ AssertSmi(r4);
2052    __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
2053    __ AssertSmi(r3);
2054  }
2055  __ sub(r3, r4, r3);
2056  __ Ret();
2057
2058  __ bind(&miss);
2059  GenerateMiss(masm);
2060}
2061
2062
2063void CompareICStub::GenerateSmis(MacroAssembler* masm) {
2064  DCHECK(state() == CompareICState::SMI);
2065  Label miss;
2066  __ orx(r5, r4, r3);
2067  __ JumpIfNotSmi(r5, &miss);
2068
2069  if (GetCondition() == eq) {
2070    // For equality we do not care about the sign of the result.
2071    // __ sub(r3, r3, r4, SetCC);
2072    __ sub(r3, r3, r4);
2073  } else {
2074    // Untag before subtracting to avoid handling overflow.
2075    __ SmiUntag(r4);
2076    __ SmiUntag(r3);
2077    __ sub(r3, r4, r3);
2078  }
2079  __ Ret();
2080
2081  __ bind(&miss);
2082  GenerateMiss(masm);
2083}
2084
2085
2086void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
2087  DCHECK(state() == CompareICState::NUMBER);
2088
2089  Label generic_stub;
2090  Label unordered, maybe_undefined1, maybe_undefined2;
2091  Label miss;
2092  Label equal, less_than;
2093
2094  if (left() == CompareICState::SMI) {
2095    __ JumpIfNotSmi(r4, &miss);
2096  }
2097  if (right() == CompareICState::SMI) {
2098    __ JumpIfNotSmi(r3, &miss);
2099  }
2100
2101  // Inlining the double comparison and falling back to the general compare
2102  // stub if NaN is involved.
2103  // Load left and right operand.
2104  Label done, left, left_smi, right_smi;
2105  __ JumpIfSmi(r3, &right_smi);
2106  __ CheckMap(r3, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
2107              DONT_DO_SMI_CHECK);
2108  __ lfd(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
2109  __ b(&left);
2110  __ bind(&right_smi);
2111  __ SmiToDouble(d1, r3);
2112
2113  __ bind(&left);
2114  __ JumpIfSmi(r4, &left_smi);
2115  __ CheckMap(r4, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
2116              DONT_DO_SMI_CHECK);
2117  __ lfd(d0, FieldMemOperand(r4, HeapNumber::kValueOffset));
2118  __ b(&done);
2119  __ bind(&left_smi);
2120  __ SmiToDouble(d0, r4);
2121
2122  __ bind(&done);
2123
2124  // Compare operands
2125  __ fcmpu(d0, d1);
2126
2127  // Don't base result on status bits when a NaN is involved.
2128  __ bunordered(&unordered);
2129
2130  // Return a result of -1, 0, or 1, based on status bits.
2131  if (CpuFeatures::IsSupported(ISELECT)) {
2132    DCHECK(EQUAL == 0);
2133    __ li(r4, Operand(GREATER));
2134    __ li(r5, Operand(LESS));
2135    __ isel(eq, r3, r0, r4);
2136    __ isel(lt, r3, r5, r3);
2137    __ Ret();
2138  } else {
2139    __ beq(&equal);
2140    __ blt(&less_than);
2141    //  assume greater than
2142    __ li(r3, Operand(GREATER));
2143    __ Ret();
2144    __ bind(&equal);
2145    __ li(r3, Operand(EQUAL));
2146    __ Ret();
2147    __ bind(&less_than);
2148    __ li(r3, Operand(LESS));
2149    __ Ret();
2150  }
2151
2152  __ bind(&unordered);
2153  __ bind(&generic_stub);
2154  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
2155                     CompareICState::GENERIC, CompareICState::GENERIC);
2156  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2157
2158  __ bind(&maybe_undefined1);
2159  if (Token::IsOrderedRelationalCompareOp(op())) {
2160    __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
2161    __ bne(&miss);
2162    __ JumpIfSmi(r4, &unordered);
2163    __ CompareObjectType(r4, r5, r5, HEAP_NUMBER_TYPE);
2164    __ bne(&maybe_undefined2);
2165    __ b(&unordered);
2166  }
2167
2168  __ bind(&maybe_undefined2);
2169  if (Token::IsOrderedRelationalCompareOp(op())) {
2170    __ CompareRoot(r4, Heap::kUndefinedValueRootIndex);
2171    __ beq(&unordered);
2172  }
2173
2174  __ bind(&miss);
2175  GenerateMiss(masm);
2176}
2177
2178
2179void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
2180  DCHECK(state() == CompareICState::INTERNALIZED_STRING);
2181  Label miss, not_equal;
2182
2183  // Registers containing left and right operands respectively.
2184  Register left = r4;
2185  Register right = r3;
2186  Register tmp1 = r5;
2187  Register tmp2 = r6;
2188
2189  // Check that both operands are heap objects.
2190  __ JumpIfEitherSmi(left, right, &miss);
2191
2192  // Check that both operands are symbols.
2193  __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2194  __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2195  __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2196  __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2197  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2198  __ orx(tmp1, tmp1, tmp2);
2199  __ andi(r0, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2200  __ bne(&miss, cr0);
2201
2202  // Internalized strings are compared by identity.
2203  __ cmp(left, right);
2204  __ bne(&not_equal);
2205  // Make sure r3 is non-zero. At this point input operands are
2206  // guaranteed to be non-zero.
2207  DCHECK(right.is(r3));
2208  STATIC_ASSERT(EQUAL == 0);
2209  STATIC_ASSERT(kSmiTag == 0);
2210  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
2211  __ bind(&not_equal);
2212  __ Ret();
2213
2214  __ bind(&miss);
2215  GenerateMiss(masm);
2216}
2217
2218
2219void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
2220  DCHECK(state() == CompareICState::UNIQUE_NAME);
2221  DCHECK(GetCondition() == eq);
2222  Label miss;
2223
2224  // Registers containing left and right operands respectively.
2225  Register left = r4;
2226  Register right = r3;
2227  Register tmp1 = r5;
2228  Register tmp2 = r6;
2229
2230  // Check that both operands are heap objects.
2231  __ JumpIfEitherSmi(left, right, &miss);
2232
2233  // Check that both operands are unique names. This leaves the instance
2234  // types loaded in tmp1 and tmp2.
2235  __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2236  __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2237  __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2238  __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2239
2240  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
2241  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
2242
2243  // Unique names are compared by identity.
2244  __ cmp(left, right);
2245  __ bne(&miss);
2246  // Make sure r3 is non-zero. At this point input operands are
2247  // guaranteed to be non-zero.
2248  DCHECK(right.is(r3));
2249  STATIC_ASSERT(EQUAL == 0);
2250  STATIC_ASSERT(kSmiTag == 0);
2251  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
2252  __ Ret();
2253
2254  __ bind(&miss);
2255  GenerateMiss(masm);
2256}
2257
2258
2259void CompareICStub::GenerateStrings(MacroAssembler* masm) {
2260  DCHECK(state() == CompareICState::STRING);
2261  Label miss, not_identical, is_symbol;
2262
2263  bool equality = Token::IsEqualityOp(op());
2264
2265  // Registers containing left and right operands respectively.
2266  Register left = r4;
2267  Register right = r3;
2268  Register tmp1 = r5;
2269  Register tmp2 = r6;
2270  Register tmp3 = r7;
2271  Register tmp4 = r8;
2272
2273  // Check that both operands are heap objects.
2274  __ JumpIfEitherSmi(left, right, &miss);
2275
2276  // Check that both operands are strings. This leaves the instance
2277  // types loaded in tmp1 and tmp2.
2278  __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
2279  __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
2280  __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
2281  __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
2282  STATIC_ASSERT(kNotStringTag != 0);
2283  __ orx(tmp3, tmp1, tmp2);
2284  __ andi(r0, tmp3, Operand(kIsNotStringMask));
2285  __ bne(&miss, cr0);
2286
2287  // Fast check for identical strings.
2288  __ cmp(left, right);
2289  STATIC_ASSERT(EQUAL == 0);
2290  STATIC_ASSERT(kSmiTag == 0);
2291  __ bne(&not_identical);
2292  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
2293  __ Ret();
2294  __ bind(&not_identical);
2295
2296  // Handle not identical strings.
2297
2298  // Check that both strings are internalized strings. If they are, we're done
2299  // because we already know they are not identical. We know they are both
2300  // strings.
2301  if (equality) {
2302    DCHECK(GetCondition() == eq);
2303    STATIC_ASSERT(kInternalizedTag == 0);
2304    __ orx(tmp3, tmp1, tmp2);
2305    __ andi(r0, tmp3, Operand(kIsNotInternalizedMask));
2306    // Make sure r3 is non-zero. At this point input operands are
2307    // guaranteed to be non-zero.
2308    DCHECK(right.is(r3));
2309    __ Ret(eq, cr0);
2310  }
2311
2312  // Check that both strings are sequential one-byte.
2313  Label runtime;
2314  __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
2315                                                    &runtime);
2316
2317  // Compare flat one-byte strings. Returns when done.
2318  if (equality) {
2319    StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
2320                                                  tmp2);
2321  } else {
2322    StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
2323                                                    tmp2, tmp3);
2324  }
2325
2326  // Handle more complex cases in runtime.
2327  __ bind(&runtime);
2328  if (equality) {
2329    {
2330      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2331      __ Push(left, right);
2332      __ CallRuntime(Runtime::kStringEqual);
2333    }
2334    __ LoadRoot(r4, Heap::kTrueValueRootIndex);
2335    __ sub(r3, r3, r4);
2336    __ Ret();
2337  } else {
2338    __ Push(left, right);
2339    __ TailCallRuntime(Runtime::kStringCompare);
2340  }
2341
2342  __ bind(&miss);
2343  GenerateMiss(masm);
2344}
2345
2346
2347void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
2348  DCHECK_EQ(CompareICState::RECEIVER, state());
2349  Label miss;
2350  __ and_(r5, r4, r3);
2351  __ JumpIfSmi(r5, &miss);
2352
2353  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2354  __ CompareObjectType(r3, r5, r5, FIRST_JS_RECEIVER_TYPE);
2355  __ blt(&miss);
2356  __ CompareObjectType(r4, r5, r5, FIRST_JS_RECEIVER_TYPE);
2357  __ blt(&miss);
2358
2359  DCHECK(GetCondition() == eq);
2360  __ sub(r3, r3, r4);
2361  __ Ret();
2362
2363  __ bind(&miss);
2364  GenerateMiss(masm);
2365}
2366
2367
2368void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
2369  Label miss;
2370  Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
2371  __ and_(r5, r4, r3);
2372  __ JumpIfSmi(r5, &miss);
2373  __ GetWeakValue(r7, cell);
2374  __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
2375  __ LoadP(r6, FieldMemOperand(r4, HeapObject::kMapOffset));
2376  __ cmp(r5, r7);
2377  __ bne(&miss);
2378  __ cmp(r6, r7);
2379  __ bne(&miss);
2380
2381  if (Token::IsEqualityOp(op())) {
2382    __ sub(r3, r3, r4);
2383    __ Ret();
2384  } else {
2385    if (op() == Token::LT || op() == Token::LTE) {
2386      __ LoadSmiLiteral(r5, Smi::FromInt(GREATER));
2387    } else {
2388      __ LoadSmiLiteral(r5, Smi::FromInt(LESS));
2389    }
2390    __ Push(r4, r3, r5);
2391    __ TailCallRuntime(Runtime::kCompare);
2392  }
2393
2394  __ bind(&miss);
2395  GenerateMiss(masm);
2396}
2397
2398
2399void CompareICStub::GenerateMiss(MacroAssembler* masm) {
2400  {
2401    // Call the runtime system in a fresh internal frame.
2402    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2403    __ Push(r4, r3);
2404    __ Push(r4, r3);
2405    __ LoadSmiLiteral(r0, Smi::FromInt(op()));
2406    __ push(r0);
2407    __ CallRuntime(Runtime::kCompareIC_Miss);
2408    // Compute the entry point of the rewritten stub.
2409    __ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
2410    // Restore registers.
2411    __ Pop(r4, r3);
2412  }
2413
2414  __ JumpToJSEntry(r5);
2415}
2416
2417
2418// This stub is paired with DirectCEntryStub::GenerateCall
2419void DirectCEntryStub::Generate(MacroAssembler* masm) {
2420  // Place the return address on the stack, making the call
2421  // GC safe. The RegExp backend also relies on this.
2422  __ mflr(r0);
2423  __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
2424  __ Call(ip);  // Call the C++ function.
2425  __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
2426  __ mtlr(r0);
2427  __ blr();
2428}
2429
2430
2431void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
2432  if (ABI_USES_FUNCTION_DESCRIPTORS) {
2433    // AIX/PPC64BE Linux use a function descriptor.
2434    __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
2435    __ LoadP(ip, MemOperand(target, 0));  // Instruction address
2436  } else {
2437    // ip needs to be set for DirectCEentryStub::Generate, and also
2438    // for ABI_CALL_VIA_IP.
2439    __ Move(ip, target);
2440  }
2441
2442  intptr_t code = reinterpret_cast<intptr_t>(GetCode().location());
2443  __ mov(r0, Operand(code, RelocInfo::CODE_TARGET));
2444  __ Call(r0);  // Call the stub.
2445}
2446
2447
2448void NameDictionaryLookupStub::GenerateNegativeLookup(
2449    MacroAssembler* masm, Label* miss, Label* done, Register receiver,
2450    Register properties, Handle<Name> name, Register scratch0) {
2451  DCHECK(name->IsUniqueName());
2452  // If names of slots in range from 1 to kProbes - 1 for the hash value are
2453  // not equal to the name and kProbes-th slot is not used (its name is the
2454  // undefined value), it guarantees the hash table doesn't contain the
2455  // property. It's true even if some slots represent deleted properties
2456  // (their names are the hole value).
2457  for (int i = 0; i < kInlinedProbes; i++) {
2458    // scratch0 points to properties hash.
2459    // Compute the masked index: (hash + i + i * i) & mask.
2460    Register index = scratch0;
2461    // Capacity is smi 2^n.
2462    __ LoadP(index, FieldMemOperand(properties, kCapacityOffset));
2463    __ subi(index, index, Operand(1));
2464    __ LoadSmiLiteral(
2465        ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)));
2466    __ and_(index, index, ip);
2467
2468    // Scale the index by multiplying by the entry size.
2469    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2470    __ ShiftLeftImm(ip, index, Operand(1));
2471    __ add(index, index, ip);  // index *= 3.
2472
2473    Register entity_name = scratch0;
2474    // Having undefined at this place means the name is not contained.
2475    Register tmp = properties;
2476    __ SmiToPtrArrayOffset(ip, index);
2477    __ add(tmp, properties, ip);
2478    __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
2479
2480    DCHECK(!tmp.is(entity_name));
2481    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
2482    __ cmp(entity_name, tmp);
2483    __ beq(done);
2484
2485    // Load the hole ready for use below:
2486    __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
2487
2488    // Stop if found the property.
2489    __ Cmpi(entity_name, Operand(Handle<Name>(name)), r0);
2490    __ beq(miss);
2491
2492    Label good;
2493    __ cmp(entity_name, tmp);
2494    __ beq(&good);
2495
2496    // Check if the entry name is not a unique name.
2497    __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
2498    __ lbz(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
2499    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
2500    __ bind(&good);
2501
2502    // Restore the properties.
2503    __ LoadP(properties,
2504             FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2505  }
2506
2507  const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
2508                          r5.bit() | r4.bit() | r3.bit());
2509
2510  __ mflr(r0);
2511  __ MultiPush(spill_mask);
2512
2513  __ LoadP(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
2514  __ mov(r4, Operand(Handle<Name>(name)));
2515  NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
2516  __ CallStub(&stub);
2517  __ cmpi(r3, Operand::Zero());
2518
2519  __ MultiPop(spill_mask);  // MultiPop does not touch condition flags
2520  __ mtlr(r0);
2521
2522  __ beq(done);
2523  __ bne(miss);
2524}
2525
2526void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
2527  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
2528  // we cannot call anything that could cause a GC from this stub.
2529  // Registers:
2530  //  result: NameDictionary to probe
2531  //  r4: key
2532  //  dictionary: NameDictionary to probe.
2533  //  index: will hold an index of entry if lookup is successful.
2534  //         might alias with result_.
2535  // Returns:
2536  //  result_ is zero if lookup failed, non zero otherwise.
2537
2538  Register result = r3;
2539  Register dictionary = r3;
2540  Register key = r4;
2541  Register index = r5;
2542  Register mask = r6;
2543  Register hash = r7;
2544  Register undefined = r8;
2545  Register entry_key = r9;
2546  Register scratch = r9;
2547
2548  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
2549
2550  __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset));
2551  __ SmiUntag(mask);
2552  __ subi(mask, mask, Operand(1));
2553
2554  __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset));
2555
2556  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
2557
2558  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
2559    // Compute the masked index: (hash + i + i * i) & mask.
2560    // Capacity is smi 2^n.
2561    if (i > 0) {
2562      // Add the probe offset (i + i * i) left shifted to avoid right shifting
2563      // the hash in a separate instruction. The value hash + i + i * i is right
2564      // shifted in the following and instruction.
2565      DCHECK(NameDictionary::GetProbeOffset(i) <
2566             1 << (32 - Name::kHashFieldOffset));
2567      __ addi(index, hash,
2568              Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
2569    } else {
2570      __ mr(index, hash);
2571    }
2572    __ srwi(r0, index, Operand(Name::kHashShift));
2573    __ and_(index, mask, r0);
2574
2575    // Scale the index by multiplying by the entry size.
2576    STATIC_ASSERT(NameDictionary::kEntrySize == 3);
2577    __ ShiftLeftImm(scratch, index, Operand(1));
2578    __ add(index, index, scratch);  // index *= 3.
2579
2580    __ ShiftLeftImm(scratch, index, Operand(kPointerSizeLog2));
2581    __ add(index, dictionary, scratch);
2582    __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
2583
2584    // Having undefined at this place means the name is not contained.
2585    __ cmp(entry_key, undefined);
2586    __ beq(&not_in_dictionary);
2587
2588    // Stop if found the property.
2589    __ cmp(entry_key, key);
2590    __ beq(&in_dictionary);
2591
2592    if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
2593      // Check if the entry name is not a unique name.
2594      __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
2595      __ lbz(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
2596      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
2597    }
2598  }
2599
2600  __ bind(&maybe_in_dictionary);
2601  // If we are doing negative lookup then probing failure should be
2602  // treated as a lookup success. For positive lookup probing failure
2603  // should be treated as lookup failure.
2604  if (mode() == POSITIVE_LOOKUP) {
2605    __ li(result, Operand::Zero());
2606    __ Ret();
2607  }
2608
2609  __ bind(&in_dictionary);
2610  __ li(result, Operand(1));
2611  __ Ret();
2612
2613  __ bind(&not_in_dictionary);
2614  __ li(result, Operand::Zero());
2615  __ Ret();
2616}
2617
2618
2619void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
2620    Isolate* isolate) {
2621  StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
2622  stub1.GetCode();
2623  // Hydrogen code stubs need stub2 at snapshot time.
2624  StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
2625  stub2.GetCode();
2626}
2627
2628
2629// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
2630// the value has just been written into the object, now this stub makes sure
2631// we keep the GC informed.  The word in the object where the value has been
2632// written is in the address register.
2633void RecordWriteStub::Generate(MacroAssembler* masm) {
2634  Label skip_to_incremental_noncompacting;
2635  Label skip_to_incremental_compacting;
2636
2637  // The first two branch instructions are generated with labels so as to
2638  // get the offset fixed up correctly by the bind(Label*) call.  We patch
2639  // it back and forth between branch condition True and False
2640  // when we start and stop incremental heap marking.
2641  // See RecordWriteStub::Patch for details.
2642
2643  // Clear the bit, branch on True for NOP action initially
2644  __ crclr(Assembler::encode_crbit(cr2, CR_LT));
2645  __ blt(&skip_to_incremental_noncompacting, cr2);
2646  __ blt(&skip_to_incremental_compacting, cr2);
2647
2648  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
2649    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
2650                           MacroAssembler::kReturnAtEnd);
2651  }
2652  __ Ret();
2653
2654  __ bind(&skip_to_incremental_noncompacting);
2655  GenerateIncremental(masm, INCREMENTAL);
2656
2657  __ bind(&skip_to_incremental_compacting);
2658  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
2659
2660  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
2661  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
2662  // patching not required on PPC as the initial path is effectively NOP
2663}
2664
2665
2666void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
2667  regs_.Save(masm);
2668
2669  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
2670    Label dont_need_remembered_set;
2671
2672    __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
2673    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
2674                           regs_.scratch0(), &dont_need_remembered_set);
2675
2676    __ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
2677                        &dont_need_remembered_set);
2678
2679    // First notify the incremental marker if necessary, then update the
2680    // remembered set.
2681    CheckNeedsToInformIncrementalMarker(
2682        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
2683    InformIncrementalMarker(masm);
2684    regs_.Restore(masm);
2685    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
2686                           MacroAssembler::kReturnAtEnd);
2687
2688    __ bind(&dont_need_remembered_set);
2689  }
2690
2691  CheckNeedsToInformIncrementalMarker(
2692      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
2693  InformIncrementalMarker(masm);
2694  regs_.Restore(masm);
2695  __ Ret();
2696}
2697
2698
2699void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
2700  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
2701  int argument_count = 3;
2702  __ PrepareCallCFunction(argument_count, regs_.scratch0());
2703  Register address =
2704      r3.is(regs_.address()) ? regs_.scratch0() : regs_.address();
2705  DCHECK(!address.is(regs_.object()));
2706  DCHECK(!address.is(r3));
2707  __ mr(address, regs_.address());
2708  __ mr(r3, regs_.object());
2709  __ mr(r4, address);
2710  __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
2711
2712  AllowExternalCallThatCantCauseGC scope(masm);
2713  __ CallCFunction(
2714      ExternalReference::incremental_marking_record_write_function(isolate()),
2715      argument_count);
2716  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
2717}
2718
2719
2720void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
2721    MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
2722    Mode mode) {
2723  Label on_black;
2724  Label need_incremental;
2725  Label need_incremental_pop_scratch;
2726
2727  // Let's look at the color of the object:  If it is not black we don't have
2728  // to inform the incremental marker.
2729  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
2730
2731  regs_.Restore(masm);
2732  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
2733    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
2734                           MacroAssembler::kReturnAtEnd);
2735  } else {
2736    __ Ret();
2737  }
2738
2739  __ bind(&on_black);
2740
2741  // Get the value from the slot.
2742  __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
2743
2744  if (mode == INCREMENTAL_COMPACTION) {
2745    Label ensure_not_white;
2746
2747    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
2748                     regs_.scratch1(),  // Scratch.
2749                     MemoryChunk::kEvacuationCandidateMask, eq,
2750                     &ensure_not_white);
2751
2752    __ CheckPageFlag(regs_.object(),
2753                     regs_.scratch1(),  // Scratch.
2754                     MemoryChunk::kSkipEvacuationSlotsRecordingMask, eq,
2755                     &need_incremental);
2756
2757    __ bind(&ensure_not_white);
2758  }
2759
2760  // We need extra registers for this, so we push the object and the address
2761  // register temporarily.
2762  __ Push(regs_.object(), regs_.address());
2763  __ JumpIfWhite(regs_.scratch0(),  // The value.
2764                 regs_.scratch1(),  // Scratch.
2765                 regs_.object(),    // Scratch.
2766                 regs_.address(),   // Scratch.
2767                 &need_incremental_pop_scratch);
2768  __ Pop(regs_.object(), regs_.address());
2769
2770  regs_.Restore(masm);
2771  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
2772    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
2773                           MacroAssembler::kReturnAtEnd);
2774  } else {
2775    __ Ret();
2776  }
2777
2778  __ bind(&need_incremental_pop_scratch);
2779  __ Pop(regs_.object(), regs_.address());
2780
2781  __ bind(&need_incremental);
2782
2783  // Fall through when we need to inform the incremental marker.
2784}
2785
2786
2787void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
2788  CEntryStub ces(isolate(), 1, kSaveFPRegs);
2789  __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
2790  int parameter_count_offset =
2791      StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
2792  __ LoadP(r4, MemOperand(fp, parameter_count_offset));
2793  if (function_mode() == JS_FUNCTION_STUB_MODE) {
2794    __ addi(r4, r4, Operand(1));
2795  }
2796  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
2797  __ slwi(r4, r4, Operand(kPointerSizeLog2));
2798  __ add(sp, sp, r4);
2799  __ Ret();
2800}
2801
2802void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
2803  if (masm->isolate()->function_entry_hook() != NULL) {
2804    PredictableCodeSizeScope predictable(masm,
2805#if V8_TARGET_ARCH_PPC64
2806                                         14 * Assembler::kInstrSize);
2807#else
2808                                         11 * Assembler::kInstrSize);
2809#endif
2810    ProfileEntryHookStub stub(masm->isolate());
2811    __ mflr(r0);
2812    __ Push(r0, ip);
2813    __ CallStub(&stub);
2814    __ Pop(r0, ip);
2815    __ mtlr(r0);
2816  }
2817}
2818
2819
2820void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
2821  // The entry hook is a "push lr, ip" instruction, followed by a call.
2822  const int32_t kReturnAddressDistanceFromFunctionStart =
2823      Assembler::kCallTargetAddressOffset + 3 * Assembler::kInstrSize;
2824
2825  // This should contain all kJSCallerSaved registers.
2826  const RegList kSavedRegs = kJSCallerSaved |  // Caller saved registers.
2827                             r15.bit();        // Saved stack pointer.
2828
2829  // We also save lr, so the count here is one higher than the mask indicates.
2830  const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
2831
2832  // Save all caller-save registers as this may be called from anywhere.
2833  __ mflr(ip);
2834  __ MultiPush(kSavedRegs | ip.bit());
2835
2836  // Compute the function's address for the first argument.
2837  __ subi(r3, ip, Operand(kReturnAddressDistanceFromFunctionStart));
2838
2839  // The caller's return address is two slots above the saved temporaries.
2840  // Grab that for the second argument to the hook.
2841  __ addi(r4, sp, Operand((kNumSavedRegs + 1) * kPointerSize));
2842
2843  // Align the stack if necessary.
2844  int frame_alignment = masm->ActivationFrameAlignment();
2845  if (frame_alignment > kPointerSize) {
2846    __ mr(r15, sp);
2847    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
2848    __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
2849  }
2850
2851#if !defined(USE_SIMULATOR)
2852  uintptr_t entry_hook =
2853      reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
2854#else
2855  // Under the simulator we need to indirect the entry hook through a
2856  // trampoline function at a known address.
2857  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
2858  ExternalReference entry_hook = ExternalReference(
2859      &dispatcher, ExternalReference::BUILTIN_CALL, isolate());
2860
2861  // It additionally takes an isolate as a third parameter
2862  __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
2863#endif
2864
2865  __ mov(ip, Operand(entry_hook));
2866
2867  if (ABI_USES_FUNCTION_DESCRIPTORS) {
2868    __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
2869    __ LoadP(ip, MemOperand(ip, 0));
2870  }
2871  // ip set above, so nothing more to do for ABI_CALL_VIA_IP.
2872
2873  // PPC LINUX ABI:
2874  __ li(r0, Operand::Zero());
2875  __ StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
2876
2877  __ Call(ip);
2878
2879  __ addi(sp, sp, Operand(kNumRequiredStackFrameSlots * kPointerSize));
2880
2881  // Restore the stack pointer if needed.
2882  if (frame_alignment > kPointerSize) {
2883    __ mr(sp, r15);
2884  }
2885
2886  // Also pop lr to get Ret(0).
2887  __ MultiPop(kSavedRegs | ip.bit());
2888  __ mtlr(ip);
2889  __ Ret();
2890}
2891
2892
2893template <class T>
2894static void CreateArrayDispatch(MacroAssembler* masm,
2895                                AllocationSiteOverrideMode mode) {
2896  if (mode == DISABLE_ALLOCATION_SITES) {
2897    T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
2898    __ TailCallStub(&stub);
2899  } else if (mode == DONT_OVERRIDE) {
2900    int last_index =
2901        GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
2902    for (int i = 0; i <= last_index; ++i) {
2903      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
2904      __ Cmpi(r6, Operand(kind), r0);
2905      T stub(masm->isolate(), kind);
2906      __ TailCallStub(&stub, eq);
2907    }
2908
2909    // If we reached this point there is a problem.
2910    __ Abort(kUnexpectedElementsKindInArrayConstructor);
2911  } else {
2912    UNREACHABLE();
2913  }
2914}
2915
2916
2917static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
2918                                           AllocationSiteOverrideMode mode) {
2919  // r5 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
2920  // r6 - kind (if mode != DISABLE_ALLOCATION_SITES)
2921  // r3 - number of arguments
2922  // r4 - constructor?
2923  // sp[0] - last argument
2924  Label normal_sequence;
2925  if (mode == DONT_OVERRIDE) {
2926    STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2927    STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2928    STATIC_ASSERT(FAST_ELEMENTS == 2);
2929    STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2930    STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
2931    STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
2932
2933    // is the low bit set? If so, we are holey and that is good.
2934    __ andi(r0, r6, Operand(1));
2935    __ bne(&normal_sequence, cr0);
2936  }
2937
2938  // look at the first argument
2939  __ LoadP(r8, MemOperand(sp, 0));
2940  __ cmpi(r8, Operand::Zero());
2941  __ beq(&normal_sequence);
2942
2943  if (mode == DISABLE_ALLOCATION_SITES) {
2944    ElementsKind initial = GetInitialFastElementsKind();
2945    ElementsKind holey_initial = GetHoleyElementsKind(initial);
2946
2947    ArraySingleArgumentConstructorStub stub_holey(
2948        masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
2949    __ TailCallStub(&stub_holey);
2950
2951    __ bind(&normal_sequence);
2952    ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
2953                                            DISABLE_ALLOCATION_SITES);
2954    __ TailCallStub(&stub);
2955  } else if (mode == DONT_OVERRIDE) {
2956    // We are going to create a holey array, but our kind is non-holey.
2957    // Fix kind and retry (only if we have an allocation site in the slot).
2958    __ addi(r6, r6, Operand(1));
2959
2960    if (FLAG_debug_code) {
2961      __ LoadP(r8, FieldMemOperand(r5, 0));
2962      __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
2963      __ Assert(eq, kExpectedAllocationSite);
2964    }
2965
2966    // Save the resulting elements kind in type info. We can't just store r6
2967    // in the AllocationSite::transition_info field because elements kind is
2968    // restricted to a portion of the field...upper bits need to be left alone.
2969    STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
2970    __ LoadP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
2971    __ AddSmiLiteral(r7, r7, Smi::FromInt(kFastElementsKindPackedToHoley), r0);
2972    __ StoreP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset),
2973              r0);
2974
2975    __ bind(&normal_sequence);
2976    int last_index =
2977        GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
2978    for (int i = 0; i <= last_index; ++i) {
2979      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
2980      __ mov(r0, Operand(kind));
2981      __ cmp(r6, r0);
2982      ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
2983      __ TailCallStub(&stub, eq);
2984    }
2985
2986    // If we reached this point there is a problem.
2987    __ Abort(kUnexpectedElementsKindInArrayConstructor);
2988  } else {
2989    UNREACHABLE();
2990  }
2991}
2992
2993
2994template <class T>
2995static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
2996  int to_index =
2997      GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
2998  for (int i = 0; i <= to_index; ++i) {
2999    ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
3000    T stub(isolate, kind);
3001    stub.GetCode();
3002    if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
3003      T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
3004      stub1.GetCode();
3005    }
3006  }
3007}
3008
3009
3010void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
3011  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
3012      isolate);
3013  ArrayNArgumentsConstructorStub stub(isolate);
3014  stub.GetCode();
3015  ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
3016  for (int i = 0; i < 2; i++) {
3017    // For internal arrays we only need a few things
3018    InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
3019    stubh1.GetCode();
3020    InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
3021    stubh2.GetCode();
3022  }
3023}
3024
3025
3026void ArrayConstructorStub::GenerateDispatchToArrayStub(
3027    MacroAssembler* masm, AllocationSiteOverrideMode mode) {
3028  Label not_zero_case, not_one_case;
3029  __ cmpi(r3, Operand::Zero());
3030  __ bne(&not_zero_case);
3031  CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
3032
3033  __ bind(&not_zero_case);
3034  __ cmpi(r3, Operand(1));
3035  __ bgt(&not_one_case);
3036  CreateArrayDispatchOneArgument(masm, mode);
3037
3038  __ bind(&not_one_case);
3039  ArrayNArgumentsConstructorStub stub(masm->isolate());
3040  __ TailCallStub(&stub);
3041}
3042
3043
3044void ArrayConstructorStub::Generate(MacroAssembler* masm) {
3045  // ----------- S t a t e -------------
3046  //  -- r3 : argc (only if argument_count() == ANY)
3047  //  -- r4 : constructor
3048  //  -- r5 : AllocationSite or undefined
3049  //  -- r6 : new target
3050  //  -- sp[0] : return address
3051  //  -- sp[4] : last argument
3052  // -----------------------------------
3053
3054  if (FLAG_debug_code) {
3055    // The array construct code is only set for the global and natives
3056    // builtin Array functions which always have maps.
3057
3058    // Initial map for the builtin Array function should be a map.
3059    __ LoadP(r7, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
3060    // Will both indicate a NULL and a Smi.
3061    __ TestIfSmi(r7, r0);
3062    __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
3063    __ CompareObjectType(r7, r7, r8, MAP_TYPE);
3064    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
3065
3066    // We should either have undefined in r5 or a valid AllocationSite
3067    __ AssertUndefinedOrAllocationSite(r5, r7);
3068  }
3069
3070  // Enter the context of the Array function.
3071  __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
3072
3073  Label subclassing;
3074  __ cmp(r6, r4);
3075  __ bne(&subclassing);
3076
3077  Label no_info;
3078  // Get the elements kind and case on that.
3079  __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
3080  __ beq(&no_info);
3081
3082  __ LoadP(r6, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
3083  __ SmiUntag(r6);
3084  STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
3085  __ And(r6, r6, Operand(AllocationSite::ElementsKindBits::kMask));
3086  GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
3087
3088  __ bind(&no_info);
3089  GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
3090
3091  __ bind(&subclassing);
3092  __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
3093  __ StorePX(r4, MemOperand(sp, r0));
3094  __ addi(r3, r3, Operand(3));
3095  __ Push(r6, r5);
3096  __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
3097}
3098
3099
3100void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
3101                                                ElementsKind kind) {
3102  __ cmpli(r3, Operand(1));
3103
3104  InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
3105  __ TailCallStub(&stub0, lt);
3106
3107  ArrayNArgumentsConstructorStub stubN(isolate());
3108  __ TailCallStub(&stubN, gt);
3109
3110  if (IsFastPackedElementsKind(kind)) {
3111    // We might need to create a holey array
3112    // look at the first argument
3113    __ LoadP(r6, MemOperand(sp, 0));
3114    __ cmpi(r6, Operand::Zero());
3115
3116    InternalArraySingleArgumentConstructorStub stub1_holey(
3117        isolate(), GetHoleyElementsKind(kind));
3118    __ TailCallStub(&stub1_holey, ne);
3119  }
3120
3121  InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
3122  __ TailCallStub(&stub1);
3123}
3124
3125
3126void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
3127  // ----------- S t a t e -------------
3128  //  -- r3 : argc
3129  //  -- r4 : constructor
3130  //  -- sp[0] : return address
3131  //  -- sp[4] : last argument
3132  // -----------------------------------
3133
3134  if (FLAG_debug_code) {
3135    // The array construct code is only set for the global and natives
3136    // builtin Array functions which always have maps.
3137
3138    // Initial map for the builtin Array function should be a map.
3139    __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
3140    // Will both indicate a NULL and a Smi.
3141    __ TestIfSmi(r6, r0);
3142    __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
3143    __ CompareObjectType(r6, r6, r7, MAP_TYPE);
3144    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
3145  }
3146
3147  // Figure out the right elements kind
3148  __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
3149  // Load the map's "bit field 2" into |result|.
3150  __ lbz(r6, FieldMemOperand(r6, Map::kBitField2Offset));
3151  // Retrieve elements_kind from bit field 2.
3152  __ DecodeField<Map::ElementsKindBits>(r6);
3153
3154  if (FLAG_debug_code) {
3155    Label done;
3156    __ cmpi(r6, Operand(FAST_ELEMENTS));
3157    __ beq(&done);
3158    __ cmpi(r6, Operand(FAST_HOLEY_ELEMENTS));
3159    __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
3160    __ bind(&done);
3161  }
3162
3163  Label fast_elements_case;
3164  __ cmpi(r6, Operand(FAST_ELEMENTS));
3165  __ beq(&fast_elements_case);
3166  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
3167
3168  __ bind(&fast_elements_case);
3169  GenerateCase(masm, FAST_ELEMENTS);
3170}
3171
3172static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3173  return ref0.address() - ref1.address();
3174}
3175
3176
3177// Calls an API function.  Allocates HandleScope, extracts returned value
3178// from handle and propagates exceptions.  Restores context.  stack_space
3179// - space to be unwound on exit (includes the call JS arguments space and
3180// the additional space allocated for the fast call).
3181static void CallApiFunctionAndReturn(MacroAssembler* masm,
3182                                     Register function_address,
3183                                     ExternalReference thunk_ref,
3184                                     int stack_space,
3185                                     MemOperand* stack_space_operand,
3186                                     MemOperand return_value_operand,
3187                                     MemOperand* context_restore_operand) {
3188  Isolate* isolate = masm->isolate();
3189  ExternalReference next_address =
3190      ExternalReference::handle_scope_next_address(isolate);
3191  const int kNextOffset = 0;
3192  const int kLimitOffset = AddressOffset(
3193      ExternalReference::handle_scope_limit_address(isolate), next_address);
3194  const int kLevelOffset = AddressOffset(
3195      ExternalReference::handle_scope_level_address(isolate), next_address);
3196
3197  // Additional parameter is the address of the actual callback.
3198  DCHECK(function_address.is(r4) || function_address.is(r5));
3199  Register scratch = r6;
3200
3201  __ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate)));
3202  __ lbz(scratch, MemOperand(scratch, 0));
3203  __ cmpi(scratch, Operand::Zero());
3204
3205  if (CpuFeatures::IsSupported(ISELECT)) {
3206    __ mov(scratch, Operand(thunk_ref));
3207    __ isel(eq, scratch, function_address, scratch);
3208  } else {
3209    Label profiler_disabled;
3210    Label end_profiler_check;
3211    __ beq(&profiler_disabled);
3212    __ mov(scratch, Operand(thunk_ref));
3213    __ b(&end_profiler_check);
3214    __ bind(&profiler_disabled);
3215    __ mr(scratch, function_address);
3216    __ bind(&end_profiler_check);
3217  }
3218
3219  // Allocate HandleScope in callee-save registers.
3220  // r17 - next_address
3221  // r14 - next_address->kNextOffset
3222  // r15 - next_address->kLimitOffset
3223  // r16 - next_address->kLevelOffset
3224  __ mov(r17, Operand(next_address));
3225  __ LoadP(r14, MemOperand(r17, kNextOffset));
3226  __ LoadP(r15, MemOperand(r17, kLimitOffset));
3227  __ lwz(r16, MemOperand(r17, kLevelOffset));
3228  __ addi(r16, r16, Operand(1));
3229  __ stw(r16, MemOperand(r17, kLevelOffset));
3230
3231  if (FLAG_log_timer_events) {
3232    FrameScope frame(masm, StackFrame::MANUAL);
3233    __ PushSafepointRegisters();
3234    __ PrepareCallCFunction(1, r3);
3235    __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
3236    __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
3237                     1);
3238    __ PopSafepointRegisters();
3239  }
3240
3241  // Native call returns to the DirectCEntry stub which redirects to the
3242  // return address pushed on stack (could have moved after GC).
3243  // DirectCEntry stub itself is generated early and never moves.
3244  DirectCEntryStub stub(isolate);
3245  stub.GenerateCall(masm, scratch);
3246
3247  if (FLAG_log_timer_events) {
3248    FrameScope frame(masm, StackFrame::MANUAL);
3249    __ PushSafepointRegisters();
3250    __ PrepareCallCFunction(1, r3);
3251    __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
3252    __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
3253                     1);
3254    __ PopSafepointRegisters();
3255  }
3256
3257  Label promote_scheduled_exception;
3258  Label delete_allocated_handles;
3259  Label leave_exit_frame;
3260  Label return_value_loaded;
3261
3262  // load value from ReturnValue
3263  __ LoadP(r3, return_value_operand);
3264  __ bind(&return_value_loaded);
3265  // No more valid handles (the result handle was the last one). Restore
3266  // previous handle scope.
3267  __ StoreP(r14, MemOperand(r17, kNextOffset));
3268  if (__ emit_debug_code()) {
3269    __ lwz(r4, MemOperand(r17, kLevelOffset));
3270    __ cmp(r4, r16);
3271    __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
3272  }
3273  __ subi(r16, r16, Operand(1));
3274  __ stw(r16, MemOperand(r17, kLevelOffset));
3275  __ LoadP(r0, MemOperand(r17, kLimitOffset));
3276  __ cmp(r15, r0);
3277  __ bne(&delete_allocated_handles);
3278
3279  // Leave the API exit frame.
3280  __ bind(&leave_exit_frame);
3281  bool restore_context = context_restore_operand != NULL;
3282  if (restore_context) {
3283    __ LoadP(cp, *context_restore_operand);
3284  }
3285  // LeaveExitFrame expects unwind space to be in a register.
3286  if (stack_space_operand != NULL) {
3287    __ lwz(r14, *stack_space_operand);
3288  } else {
3289    __ mov(r14, Operand(stack_space));
3290  }
3291  __ LeaveExitFrame(false, r14, !restore_context, stack_space_operand != NULL);
3292
3293  // Check if the function scheduled an exception.
3294  __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
3295  __ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate)));
3296  __ LoadP(r15, MemOperand(r15));
3297  __ cmp(r14, r15);
3298  __ bne(&promote_scheduled_exception);
3299
3300  __ blr();
3301
3302  // Re-throw by promoting a scheduled exception.
3303  __ bind(&promote_scheduled_exception);
3304  __ TailCallRuntime(Runtime::kPromoteScheduledException);
3305
3306  // HandleScope limit has changed. Delete allocated extensions.
3307  __ bind(&delete_allocated_handles);
3308  __ StoreP(r15, MemOperand(r17, kLimitOffset));
3309  __ mr(r14, r3);
3310  __ PrepareCallCFunction(1, r15);
3311  __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
3312  __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
3313                   1);
3314  __ mr(r3, r14);
3315  __ b(&leave_exit_frame);
3316}
3317
3318void CallApiCallbackStub::Generate(MacroAssembler* masm) {
3319  // ----------- S t a t e -------------
3320  //  -- r3                  : callee
3321  //  -- r7                  : call_data
3322  //  -- r5                  : holder
3323  //  -- r4                  : api_function_address
3324  //  -- cp                  : context
3325  //  --
3326  //  -- sp[0]               : last argument
3327  //  -- ...
3328  //  -- sp[(argc - 1)* 4]   : first argument
3329  //  -- sp[argc * 4]        : receiver
3330  // -----------------------------------
3331
3332  Register callee = r3;
3333  Register call_data = r7;
3334  Register holder = r5;
3335  Register api_function_address = r4;
3336  Register context = cp;
3337
3338  typedef FunctionCallbackArguments FCA;
3339
3340  STATIC_ASSERT(FCA::kContextSaveIndex == 6);
3341  STATIC_ASSERT(FCA::kCalleeIndex == 5);
3342  STATIC_ASSERT(FCA::kDataIndex == 4);
3343  STATIC_ASSERT(FCA::kReturnValueOffset == 3);
3344  STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
3345  STATIC_ASSERT(FCA::kIsolateIndex == 1);
3346  STATIC_ASSERT(FCA::kHolderIndex == 0);
3347  STATIC_ASSERT(FCA::kNewTargetIndex == 7);
3348  STATIC_ASSERT(FCA::kArgsLength == 8);
3349
3350  // new target
3351  __ PushRoot(Heap::kUndefinedValueRootIndex);
3352
3353  // context save
3354  __ push(context);
3355  if (!is_lazy()) {
3356    // load context from callee
3357    __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
3358  }
3359
3360  // callee
3361  __ push(callee);
3362
3363  // call data
3364  __ push(call_data);
3365
3366  Register scratch = call_data;
3367  if (!call_data_undefined()) {
3368    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3369  }
3370  // return value
3371  __ push(scratch);
3372  // return value default
3373  __ push(scratch);
3374  // isolate
3375  __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
3376  __ push(scratch);
3377  // holder
3378  __ push(holder);
3379
3380  // Prepare arguments.
3381  __ mr(scratch, sp);
3382
3383  // Allocate the v8::Arguments structure in the arguments' space since
3384  // it's not controlled by GC.
3385  // PPC LINUX ABI:
3386  //
3387  // Create 4 extra slots on stack:
3388  //    [0] space for DirectCEntryStub's LR save
3389  //    [1-3] FunctionCallbackInfo
3390  const int kApiStackSpace = 4;
3391  const int kFunctionCallbackInfoOffset =
3392      (kStackFrameExtraParamSlot + 1) * kPointerSize;
3393
3394  FrameScope frame_scope(masm, StackFrame::MANUAL);
3395  __ EnterExitFrame(false, kApiStackSpace);
3396
3397  DCHECK(!api_function_address.is(r3) && !scratch.is(r3));
3398  // r3 = FunctionCallbackInfo&
3399  // Arguments is after the return address.
3400  __ addi(r3, sp, Operand(kFunctionCallbackInfoOffset));
3401  // FunctionCallbackInfo::implicit_args_
3402  __ StoreP(scratch, MemOperand(r3, 0 * kPointerSize));
3403  // FunctionCallbackInfo::values_
3404  __ addi(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
3405  __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
3406  // FunctionCallbackInfo::length_ = argc
3407  __ li(ip, Operand(argc()));
3408  __ stw(ip, MemOperand(r3, 2 * kPointerSize));
3409
3410  ExternalReference thunk_ref =
3411      ExternalReference::invoke_function_callback(masm->isolate());
3412
3413  AllowExternalCallThatCantCauseGC scope(masm);
3414  MemOperand context_restore_operand(
3415      fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
3416  // Stores return the first js argument
3417  int return_value_offset = 0;
3418  if (is_store()) {
3419    return_value_offset = 2 + FCA::kArgsLength;
3420  } else {
3421    return_value_offset = 2 + FCA::kReturnValueOffset;
3422  }
3423  MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
3424  int stack_space = 0;
3425  MemOperand length_operand =
3426      MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize);
3427  MemOperand* stack_space_operand = &length_operand;
3428  stack_space = argc() + FCA::kArgsLength + 1;
3429  stack_space_operand = NULL;
3430  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
3431                           stack_space_operand, return_value_operand,
3432                           &context_restore_operand);
3433}
3434
3435
3436void CallApiGetterStub::Generate(MacroAssembler* masm) {
3437  int arg0Slot = 0;
3438  int accessorInfoSlot = 0;
3439  int apiStackSpace = 0;
3440  // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
3441  // name below the exit frame to make GC aware of them.
3442  STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
3443  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
3444  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
3445  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
3446  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
3447  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
3448  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
3449  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
3450
3451  Register receiver = ApiGetterDescriptor::ReceiverRegister();
3452  Register holder = ApiGetterDescriptor::HolderRegister();
3453  Register callback = ApiGetterDescriptor::CallbackRegister();
3454  Register scratch = r7;
3455  DCHECK(!AreAliased(receiver, holder, callback, scratch));
3456
3457  Register api_function_address = r5;
3458
3459  __ push(receiver);
3460  // Push data from AccessorInfo.
3461  __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
3462  __ push(scratch);
3463  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3464  __ Push(scratch, scratch);
3465  __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
3466  __ Push(scratch, holder);
3467  __ Push(Smi::kZero);  // should_throw_on_error -> false
3468  __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
3469  __ push(scratch);
3470
3471  // v8::PropertyCallbackInfo::args_ array and name handle.
3472  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
3473
3474  // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
3475  __ mr(r3, sp);                               // r3 = Handle<Name>
3476  __ addi(r4, r3, Operand(1 * kPointerSize));  // r4 = v8::PCI::args_
3477
3478// If ABI passes Handles (pointer-sized struct) in a register:
3479//
3480// Create 2 extra slots on stack:
3481//    [0] space for DirectCEntryStub's LR save
3482//    [1] AccessorInfo&
3483//
3484// Otherwise:
3485//
3486// Create 3 extra slots on stack:
3487//    [0] space for DirectCEntryStub's LR save
3488//    [1] copy of Handle (first arg)
3489//    [2] AccessorInfo&
3490  if (ABI_PASSES_HANDLES_IN_REGS) {
3491    accessorInfoSlot = kStackFrameExtraParamSlot + 1;
3492    apiStackSpace = 2;
3493  } else {
3494    arg0Slot = kStackFrameExtraParamSlot + 1;
3495    accessorInfoSlot = arg0Slot + 1;
3496    apiStackSpace = 3;
3497  }
3498
3499  FrameScope frame_scope(masm, StackFrame::MANUAL);
3500  __ EnterExitFrame(false, apiStackSpace);
3501
3502  if (!ABI_PASSES_HANDLES_IN_REGS) {
3503    // pass 1st arg by reference
3504    __ StoreP(r3, MemOperand(sp, arg0Slot * kPointerSize));
3505    __ addi(r3, sp, Operand(arg0Slot * kPointerSize));
3506  }
3507
3508  // Create v8::PropertyCallbackInfo object on the stack and initialize
3509  // it's args_ field.
3510  __ StoreP(r4, MemOperand(sp, accessorInfoSlot * kPointerSize));
3511  __ addi(r4, sp, Operand(accessorInfoSlot * kPointerSize));
3512  // r4 = v8::PropertyCallbackInfo&
3513
3514  ExternalReference thunk_ref =
3515      ExternalReference::invoke_accessor_getter_callback(isolate());
3516
3517  __ LoadP(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
3518  __ LoadP(api_function_address,
3519        FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
3520
3521  // +3 is to skip prolog, return address and name handle.
3522  MemOperand return_value_operand(
3523      fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
3524  CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
3525                           kStackUnwindSpace, NULL, return_value_operand, NULL);
3526}
3527
3528#undef __
3529}  // namespace internal
3530}  // namespace v8
3531
3532#endif  // V8_TARGET_ARCH_PPC
3533