1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_ARM)
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
34#include "regexp-macro-assembler.h"
35
36namespace v8 {
37namespace internal {
38
39
40#define __ ACCESS_MASM(masm)
41
42static void EmitIdenticalObjectComparison(MacroAssembler* masm,
43                                          Label* slow,
44                                          Condition cond,
45                                          bool never_nan_nan);
46static void EmitSmiNonsmiComparison(MacroAssembler* masm,
47                                    Register lhs,
48                                    Register rhs,
49                                    Label* lhs_not_nan,
50                                    Label* slow,
51                                    bool strict);
52static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
53static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
54                                           Register lhs,
55                                           Register rhs);
56
57
58// Check if the operand is a heap number.
59static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
60                                   Register scratch1, Register scratch2,
61                                   Label* not_a_heap_number) {
62  __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
63  __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
64  __ cmp(scratch1, scratch2);
65  __ b(ne, not_a_heap_number);
66}
67
68
69void ToNumberStub::Generate(MacroAssembler* masm) {
70  // The ToNumber stub takes one argument in eax.
71  Label check_heap_number, call_builtin;
72  __ JumpIfNotSmi(r0, &check_heap_number);
73  __ Ret();
74
75  __ bind(&check_heap_number);
76  EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin);
77  __ Ret();
78
79  __ bind(&call_builtin);
80  __ push(r0);
81  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
82}
83
84
85void FastNewClosureStub::Generate(MacroAssembler* masm) {
86  // Create a new closure from the given function info in new
87  // space. Set the context to the current context in cp.
88  Label gc;
89
90  // Pop the function info from the stack.
91  __ pop(r3);
92
93  // Attempt to allocate new JSFunction in new space.
94  __ AllocateInNewSpace(JSFunction::kSize,
95                        r0,
96                        r1,
97                        r2,
98                        &gc,
99                        TAG_OBJECT);
100
101  int map_index = (language_mode_ == CLASSIC_MODE)
102      ? Context::FUNCTION_MAP_INDEX
103      : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
104
105  // Compute the function map in the current global context and set that
106  // as the map of the allocated object.
107  __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
108  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
109  __ ldr(r2, MemOperand(r2, Context::SlotOffset(map_index)));
110  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
111
112  // Initialize the rest of the function. We don't have to update the
113  // write barrier because the allocated object is in new space.
114  __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
115  __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
116  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
117  __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
118  __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
119  __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
120  __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
121  __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
122  __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
123  __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
124
125  // Initialize the code pointer in the function to be the one
126  // found in the shared function info object.
127  __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
128  __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
129  __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
130
131  // Return result. The argument function info has been popped already.
132  __ Ret();
133
134  // Create a new closure through the slower runtime call.
135  __ bind(&gc);
136  __ LoadRoot(r4, Heap::kFalseValueRootIndex);
137  __ Push(cp, r3, r4);
138  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
139}
140
141
142void FastNewContextStub::Generate(MacroAssembler* masm) {
143  // Try to allocate the context in new space.
144  Label gc;
145  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
146
147  // Attempt to allocate the context in new space.
148  __ AllocateInNewSpace(FixedArray::SizeFor(length),
149                        r0,
150                        r1,
151                        r2,
152                        &gc,
153                        TAG_OBJECT);
154
155  // Load the function from the stack.
156  __ ldr(r3, MemOperand(sp, 0));
157
158  // Set up the object header.
159  __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
160  __ mov(r2, Operand(Smi::FromInt(length)));
161  __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
162  __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
163
164  // Set up the fixed slots, copy the global object from the previous context.
165  __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
166  __ mov(r1, Operand(Smi::FromInt(0)));
167  __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
168  __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
169  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
170  __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
171
172  // Initialize the rest of the slots to undefined.
173  __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
174  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
175    __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
176  }
177
178  // Remove the on-stack argument and return.
179  __ mov(cp, r0);
180  __ pop();
181  __ Ret();
182
183  // Need to collect. Call into runtime system.
184  __ bind(&gc);
185  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
186}
187
188
189void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
190  // Stack layout on entry:
191  //
192  // [sp]: function.
193  // [sp + kPointerSize]: serialized scope info
194
195  // Try to allocate the context in new space.
196  Label gc;
197  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
198  __ AllocateInNewSpace(FixedArray::SizeFor(length),
199                        r0, r1, r2, &gc, TAG_OBJECT);
200
201  // Load the function from the stack.
202  __ ldr(r3, MemOperand(sp, 0));
203
204  // Load the serialized scope info from the stack.
205  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
206
207  // Set up the object header.
208  __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
209  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
210  __ mov(r2, Operand(Smi::FromInt(length)));
211  __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
212
213  // If this block context is nested in the global context we get a smi
214  // sentinel instead of a function. The block context should get the
215  // canonical empty function of the global context as its closure which
216  // we still have to look up.
217  Label after_sentinel;
218  __ JumpIfNotSmi(r3, &after_sentinel);
219  if (FLAG_debug_code) {
220    const char* message = "Expected 0 as a Smi sentinel";
221    __ cmp(r3, Operand::Zero());
222    __ Assert(eq, message);
223  }
224  __ ldr(r3, GlobalObjectOperand());
225  __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
226  __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
227  __ bind(&after_sentinel);
228
229  // Set up the fixed slots, copy the global object from the previous context.
230  __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
231  __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
232  __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
233  __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
234  __ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));
235
236  // Initialize the rest of the slots to the hole value.
237  __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
238  for (int i = 0; i < slots_; i++) {
239    __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
240  }
241
242  // Remove the on-stack argument and return.
243  __ mov(cp, r0);
244  __ add(sp, sp, Operand(2 * kPointerSize));
245  __ Ret();
246
247  // Need to collect. Call into runtime system.
248  __ bind(&gc);
249  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
250}
251
252
253static void GenerateFastCloneShallowArrayCommon(
254    MacroAssembler* masm,
255    int length,
256    FastCloneShallowArrayStub::Mode mode,
257    Label* fail) {
258  // Registers on entry:
259  //
260  // r3: boilerplate literal array.
261  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
262
263  // All sizes here are multiples of kPointerSize.
264  int elements_size = 0;
265  if (length > 0) {
266    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
267        ? FixedDoubleArray::SizeFor(length)
268        : FixedArray::SizeFor(length);
269  }
270  int size = JSArray::kSize + elements_size;
271
272  // Allocate both the JS array and the elements array in one big
273  // allocation. This avoids multiple limit checks.
274  __ AllocateInNewSpace(size,
275                        r0,
276                        r1,
277                        r2,
278                        fail,
279                        TAG_OBJECT);
280
281  // Copy the JS array part.
282  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
283    if ((i != JSArray::kElementsOffset) || (length == 0)) {
284      __ ldr(r1, FieldMemOperand(r3, i));
285      __ str(r1, FieldMemOperand(r0, i));
286    }
287  }
288
289  if (length > 0) {
290    // Get hold of the elements array of the boilerplate and setup the
291    // elements pointer in the resulting object.
292    __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
293    __ add(r2, r0, Operand(JSArray::kSize));
294    __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
295
296    // Copy the elements array.
297    ASSERT((elements_size % kPointerSize) == 0);
298    __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
299  }
300}
301
302void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
303  // Stack layout on entry:
304  //
305  // [sp]: constant elements.
306  // [sp + kPointerSize]: literal index.
307  // [sp + (2 * kPointerSize)]: literals array.
308
309  // Load boilerplate object into r3 and check if we need to create a
310  // boilerplate.
311  Label slow_case;
312  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
313  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
314  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
315  __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
316  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
317  __ b(eq, &slow_case);
318
319  FastCloneShallowArrayStub::Mode mode = mode_;
320  if (mode == CLONE_ANY_ELEMENTS) {
321    Label double_elements, check_fast_elements;
322    __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
323    __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
324    __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
325    __ b(ne, &check_fast_elements);
326    GenerateFastCloneShallowArrayCommon(masm, 0,
327                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
328    // Return and remove the on-stack parameters.
329    __ add(sp, sp, Operand(3 * kPointerSize));
330    __ Ret();
331
332    __ bind(&check_fast_elements);
333    __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
334    __ b(ne, &double_elements);
335    GenerateFastCloneShallowArrayCommon(masm, length_,
336                                        CLONE_ELEMENTS, &slow_case);
337    // Return and remove the on-stack parameters.
338    __ add(sp, sp, Operand(3 * kPointerSize));
339    __ Ret();
340
341    __ bind(&double_elements);
342    mode = CLONE_DOUBLE_ELEMENTS;
343    // Fall through to generate the code to handle double elements.
344  }
345
346  if (FLAG_debug_code) {
347    const char* message;
348    Heap::RootListIndex expected_map_index;
349    if (mode == CLONE_ELEMENTS) {
350      message = "Expected (writable) fixed array";
351      expected_map_index = Heap::kFixedArrayMapRootIndex;
352    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
353      message = "Expected (writable) fixed double array";
354      expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
355    } else {
356      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
357      message = "Expected copy-on-write fixed array";
358      expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
359    }
360    __ push(r3);
361    __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
362    __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
363    __ CompareRoot(r3, expected_map_index);
364    __ Assert(eq, message);
365    __ pop(r3);
366  }
367
368  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
369
370  // Return and remove the on-stack parameters.
371  __ add(sp, sp, Operand(3 * kPointerSize));
372  __ Ret();
373
374  __ bind(&slow_case);
375  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
376}
377
378
379void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
380  // Stack layout on entry:
381  //
382  // [sp]: object literal flags.
383  // [sp + kPointerSize]: constant properties.
384  // [sp + (2 * kPointerSize)]: literal index.
385  // [sp + (3 * kPointerSize)]: literals array.
386
387  // Load boilerplate object into r3 and check if we need to create a
388  // boilerplate.
389  Label slow_case;
390  __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
391  __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
392  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
393  __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
394  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
395  __ b(eq, &slow_case);
396
397  // Check that the boilerplate contains only fast properties and we can
398  // statically determine the instance size.
399  int size = JSObject::kHeaderSize + length_ * kPointerSize;
400  __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
401  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset));
402  __ cmp(r0, Operand(size >> kPointerSizeLog2));
403  __ b(ne, &slow_case);
404
405  // Allocate the JS object and copy header together with all in-object
406  // properties from the boilerplate.
407  __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT);
408  for (int i = 0; i < size; i += kPointerSize) {
409    __ ldr(r1, FieldMemOperand(r3, i));
410    __ str(r1, FieldMemOperand(r0, i));
411  }
412
413  // Return and remove the on-stack parameters.
414  __ add(sp, sp, Operand(4 * kPointerSize));
415  __ Ret();
416
417  __ bind(&slow_case);
418  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
419}
420
421
422// Takes a Smi and converts to an IEEE 64 bit floating point value in two
423// registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
424// 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
425// scratch register.  Destroys the source register.  No GC occurs during this
426// stub so you don't have to set up the frame.
427class ConvertToDoubleStub : public CodeStub {
428 public:
429  ConvertToDoubleStub(Register result_reg_1,
430                      Register result_reg_2,
431                      Register source_reg,
432                      Register scratch_reg)
433      : result1_(result_reg_1),
434        result2_(result_reg_2),
435        source_(source_reg),
436        zeros_(scratch_reg) { }
437
438 private:
439  Register result1_;
440  Register result2_;
441  Register source_;
442  Register zeros_;
443
444  // Minor key encoding in 16 bits.
445  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
446  class OpBits: public BitField<Token::Value, 2, 14> {};
447
448  Major MajorKey() { return ConvertToDouble; }
449  int MinorKey() {
450    // Encode the parameters in a unique 16 bit value.
451    return  result1_.code() +
452           (result2_.code() << 4) +
453           (source_.code() << 8) +
454           (zeros_.code() << 12);
455  }
456
457  void Generate(MacroAssembler* masm);
458};
459
460
461void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
462  Register exponent = result1_;
463  Register mantissa = result2_;
464
465  Label not_special;
466  // Convert from Smi to integer.
467  __ mov(source_, Operand(source_, ASR, kSmiTagSize));
468  // Move sign bit from source to destination.  This works because the sign bit
469  // in the exponent word of the double has the same position and polarity as
470  // the 2's complement sign bit in a Smi.
471  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
472  __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
473  // Subtract from 0 if source was negative.
474  __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
475
476  // We have -1, 0 or 1, which we treat specially. Register source_ contains
477  // absolute value: it is either equal to 1 (special case of -1 and 1),
478  // greater than 1 (not a special case) or less than 1 (special case of 0).
479  __ cmp(source_, Operand(1));
480  __ b(gt, &not_special);
481
482  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
483  const uint32_t exponent_word_for_1 =
484      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
485  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
486  // 1, 0 and -1 all have 0 for the second word.
487  __ mov(mantissa, Operand(0, RelocInfo::NONE));
488  __ Ret();
489
490  __ bind(&not_special);
491  // Count leading zeros.  Uses mantissa for a scratch register on pre-ARM5.
492  // Gets the wrong answer for 0, but we already checked for that case above.
493  __ CountLeadingZeros(zeros_, source_, mantissa);
494  // Compute exponent and or it into the exponent register.
495  // We use mantissa as a scratch register here.  Use a fudge factor to
496  // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
497  // that fit in the ARM's constant field.
498  int fudge = 0x400;
499  __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
500  __ add(mantissa, mantissa, Operand(fudge));
501  __ orr(exponent,
502         exponent,
503         Operand(mantissa, LSL, HeapNumber::kExponentShift));
504  // Shift up the source chopping the top bit off.
505  __ add(zeros_, zeros_, Operand(1));
506  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
507  __ mov(source_, Operand(source_, LSL, zeros_));
508  // Compute lower part of fraction (last 12 bits).
509  __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
510  // And the top (top 20 bits).
511  __ orr(exponent,
512         exponent,
513         Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
514  __ Ret();
515}
516
517
518void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
519                                   FloatingPointHelper::Destination destination,
520                                   Register scratch1,
521                                   Register scratch2) {
522  if (CpuFeatures::IsSupported(VFP3)) {
523    CpuFeatures::Scope scope(VFP3);
524    __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
525    __ vmov(d7.high(), scratch1);
526    __ vcvt_f64_s32(d7, d7.high());
527    __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
528    __ vmov(d6.high(), scratch1);
529    __ vcvt_f64_s32(d6, d6.high());
530    if (destination == kCoreRegisters) {
531      __ vmov(r2, r3, d7);
532      __ vmov(r0, r1, d6);
533    }
534  } else {
535    ASSERT(destination == kCoreRegisters);
536    // Write Smi from r0 to r3 and r2 in double format.
537    __ mov(scratch1, Operand(r0));
538    ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
539    __ push(lr);
540    __ Call(stub1.GetCode());
541    // Write Smi from r1 to r1 and r0 in double format.
542    __ mov(scratch1, Operand(r1));
543    ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
544    __ Call(stub2.GetCode());
545    __ pop(lr);
546  }
547}
548
549
550void FloatingPointHelper::LoadOperands(
551    MacroAssembler* masm,
552    FloatingPointHelper::Destination destination,
553    Register heap_number_map,
554    Register scratch1,
555    Register scratch2,
556    Label* slow) {
557
558  // Load right operand (r0) to d6 or r2/r3.
559  LoadNumber(masm, destination,
560             r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
561
562  // Load left operand (r1) to d7 or r0/r1.
563  LoadNumber(masm, destination,
564             r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
565}
566
567
568void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
569                                     Destination destination,
570                                     Register object,
571                                     DwVfpRegister dst,
572                                     Register dst1,
573                                     Register dst2,
574                                     Register heap_number_map,
575                                     Register scratch1,
576                                     Register scratch2,
577                                     Label* not_number) {
578  if (FLAG_debug_code) {
579    __ AbortIfNotRootValue(heap_number_map,
580                           Heap::kHeapNumberMapRootIndex,
581                           "HeapNumberMap register clobbered.");
582  }
583
584  Label is_smi, done;
585
586  // Smi-check
587  __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
588  // Heap number check
589  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
590
591  // Handle loading a double from a heap number.
592  if (CpuFeatures::IsSupported(VFP3) &&
593      destination == kVFPRegisters) {
594    CpuFeatures::Scope scope(VFP3);
595    // Load the double from tagged HeapNumber to double register.
596    __ sub(scratch1, object, Operand(kHeapObjectTag));
597    __ vldr(dst, scratch1, HeapNumber::kValueOffset);
598  } else {
599    ASSERT(destination == kCoreRegisters);
600    // Load the double from heap number to dst1 and dst2 in double format.
601    __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
602  }
603  __ jmp(&done);
604
605  // Handle loading a double from a smi.
606  __ bind(&is_smi);
607  if (CpuFeatures::IsSupported(VFP3)) {
608    CpuFeatures::Scope scope(VFP3);
609    // Convert smi to double using VFP instructions.
610    __ vmov(dst.high(), scratch1);
611    __ vcvt_f64_s32(dst, dst.high());
612    if (destination == kCoreRegisters) {
613      // Load the converted smi to dst1 and dst2 in double format.
614      __ vmov(dst1, dst2, dst);
615    }
616  } else {
617    ASSERT(destination == kCoreRegisters);
618    // Write smi to dst1 and dst2 double format.
619    __ mov(scratch1, Operand(object));
620    ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
621    __ push(lr);
622    __ Call(stub.GetCode());
623    __ pop(lr);
624  }
625
626  __ bind(&done);
627}
628
629
630void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
631                                               Register object,
632                                               Register dst,
633                                               Register heap_number_map,
634                                               Register scratch1,
635                                               Register scratch2,
636                                               Register scratch3,
637                                               DwVfpRegister double_scratch,
638                                               Label* not_number) {
639  if (FLAG_debug_code) {
640    __ AbortIfNotRootValue(heap_number_map,
641                           Heap::kHeapNumberMapRootIndex,
642                           "HeapNumberMap register clobbered.");
643  }
644  Label done;
645  Label not_in_int32_range;
646
647  __ UntagAndJumpIfSmi(dst, object, &done);
648  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
649  __ cmp(scratch1, heap_number_map);
650  __ b(ne, not_number);
651  __ ConvertToInt32(object,
652                    dst,
653                    scratch1,
654                    scratch2,
655                    double_scratch,
656                    &not_in_int32_range);
657  __ jmp(&done);
658
659  __ bind(&not_in_int32_range);
660  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
661  __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
662
663  __ EmitOutOfInt32RangeTruncate(dst,
664                                 scratch1,
665                                 scratch2,
666                                 scratch3);
667  __ bind(&done);
668}
669
670
671void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
672                                             Register int_scratch,
673                                             Destination destination,
674                                             DwVfpRegister double_dst,
675                                             Register dst1,
676                                             Register dst2,
677                                             Register scratch2,
678                                             SwVfpRegister single_scratch) {
679  ASSERT(!int_scratch.is(scratch2));
680  ASSERT(!int_scratch.is(dst1));
681  ASSERT(!int_scratch.is(dst2));
682
683  Label done;
684
685  if (CpuFeatures::IsSupported(VFP3)) {
686    CpuFeatures::Scope scope(VFP3);
687    __ vmov(single_scratch, int_scratch);
688    __ vcvt_f64_s32(double_dst, single_scratch);
689    if (destination == kCoreRegisters) {
690      __ vmov(dst1, dst2, double_dst);
691    }
692  } else {
693    Label fewer_than_20_useful_bits;
694    // Expected output:
695    // |         dst2            |         dst1            |
696    // | s |   exp   |              mantissa               |
697
698    // Check for zero.
699    __ cmp(int_scratch, Operand::Zero());
700    __ mov(dst2, int_scratch);
701    __ mov(dst1, int_scratch);
702    __ b(eq, &done);
703
704    // Preload the sign of the value.
705    __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
706    // Get the absolute value of the object (as an unsigned integer).
707    __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
708
709    // Get mantissa[51:20].
710
711    // Get the position of the first set bit.
712    __ CountLeadingZeros(dst1, int_scratch, scratch2);
713    __ rsb(dst1, dst1, Operand(31));
714
715    // Set the exponent.
716    __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
717    __ Bfi(dst2, scratch2, scratch2,
718        HeapNumber::kExponentShift, HeapNumber::kExponentBits);
719
720    // Clear the first non null bit.
721    __ mov(scratch2, Operand(1));
722    __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1));
723
724    __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
725    // Get the number of bits to set in the lower part of the mantissa.
726    __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
727    __ b(mi, &fewer_than_20_useful_bits);
728    // Set the higher 20 bits of the mantissa.
729    __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2));
730    __ rsb(scratch2, scratch2, Operand(32));
731    __ mov(dst1, Operand(int_scratch, LSL, scratch2));
732    __ b(&done);
733
734    __ bind(&fewer_than_20_useful_bits);
735    __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
736    __ mov(scratch2, Operand(int_scratch, LSL, scratch2));
737    __ orr(dst2, dst2, scratch2);
738    // Set dst1 to 0.
739    __ mov(dst1, Operand::Zero());
740  }
741  __ bind(&done);
742}
743
744
745void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
746                                                  Register object,
747                                                  Destination destination,
748                                                  DwVfpRegister double_dst,
749                                                  Register dst1,
750                                                  Register dst2,
751                                                  Register heap_number_map,
752                                                  Register scratch1,
753                                                  Register scratch2,
754                                                  SwVfpRegister single_scratch,
755                                                  Label* not_int32) {
756  ASSERT(!scratch1.is(object) && !scratch2.is(object));
757  ASSERT(!scratch1.is(scratch2));
758  ASSERT(!heap_number_map.is(object) &&
759         !heap_number_map.is(scratch1) &&
760         !heap_number_map.is(scratch2));
761
762  Label done, obj_is_not_smi;
763
764  __ JumpIfNotSmi(object, &obj_is_not_smi);
765  __ SmiUntag(scratch1, object);
766  ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
767                     scratch2, single_scratch);
768  __ b(&done);
769
770  __ bind(&obj_is_not_smi);
771  if (FLAG_debug_code) {
772    __ AbortIfNotRootValue(heap_number_map,
773                           Heap::kHeapNumberMapRootIndex,
774                           "HeapNumberMap register clobbered.");
775  }
776  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
777
778  // Load the number.
779  if (CpuFeatures::IsSupported(VFP3)) {
780    CpuFeatures::Scope scope(VFP3);
781    // Load the double value.
782    __ sub(scratch1, object, Operand(kHeapObjectTag));
783    __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
784
785    __ EmitVFPTruncate(kRoundToZero,
786                       single_scratch,
787                       double_dst,
788                       scratch1,
789                       scratch2,
790                       kCheckForInexactConversion);
791
792    // Jump to not_int32 if the operation did not succeed.
793    __ b(ne, not_int32);
794
795    if (destination == kCoreRegisters) {
796      __ vmov(dst1, dst2, double_dst);
797    }
798
799  } else {
800    ASSERT(!scratch1.is(object) && !scratch2.is(object));
801    // Load the double value in the destination registers..
802    __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
803
804    // Check for 0 and -0.
805    __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
806    __ orr(scratch1, scratch1, Operand(dst2));
807    __ cmp(scratch1, Operand::Zero());
808    __ b(eq, &done);
809
810    // Check that the value can be exactly represented by a 32-bit integer.
811    // Jump to not_int32 if that's not the case.
812    DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
813
814    // dst1 and dst2 were trashed. Reload the double value.
815    __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
816  }
817
818  __ bind(&done);
819}
820
821
822void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
823                                            Register object,
824                                            Register dst,
825                                            Register heap_number_map,
826                                            Register scratch1,
827                                            Register scratch2,
828                                            Register scratch3,
829                                            DwVfpRegister double_scratch,
830                                            Label* not_int32) {
831  ASSERT(!dst.is(object));
832  ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
833  ASSERT(!scratch1.is(scratch2) &&
834         !scratch1.is(scratch3) &&
835         !scratch2.is(scratch3));
836
837  Label done;
838
839  __ UntagAndJumpIfSmi(dst, object, &done);
840
841  if (FLAG_debug_code) {
842    __ AbortIfNotRootValue(heap_number_map,
843                           Heap::kHeapNumberMapRootIndex,
844                           "HeapNumberMap register clobbered.");
845  }
846  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
847
848  // Object is a heap number.
849  // Convert the floating point value to a 32-bit integer.
850  if (CpuFeatures::IsSupported(VFP3)) {
851    CpuFeatures::Scope scope(VFP3);
852    SwVfpRegister single_scratch = double_scratch.low();
853    // Load the double value.
854    __ sub(scratch1, object, Operand(kHeapObjectTag));
855    __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
856
857    __ EmitVFPTruncate(kRoundToZero,
858                       single_scratch,
859                       double_scratch,
860                       scratch1,
861                       scratch2,
862                       kCheckForInexactConversion);
863
864    // Jump to not_int32 if the operation did not succeed.
865    __ b(ne, not_int32);
866    // Get the result in the destination register.
867    __ vmov(dst, single_scratch);
868
869  } else {
870    // Load the double value in the destination registers.
871    __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
872    __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
873
874    // Check for 0 and -0.
875    __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
876    __ orr(dst, scratch2, Operand(dst));
877    __ cmp(dst, Operand::Zero());
878    __ b(eq, &done);
879
880    DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
881
882    // Registers state after DoubleIs32BitInteger.
883    // dst: mantissa[51:20].
884    // scratch2: 1
885
886    // Shift back the higher bits of the mantissa.
887    __ mov(dst, Operand(dst, LSR, scratch3));
888    // Set the implicit first bit.
889    __ rsb(scratch3, scratch3, Operand(32));
890    __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
891    // Set the sign.
892    __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
893    __ tst(scratch1, Operand(HeapNumber::kSignMask));
894    __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
895  }
896
897  __ bind(&done);
898}
899
900
901void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
902                                               Register src1,
903                                               Register src2,
904                                               Register dst,
905                                               Register scratch,
906                                               Label* not_int32) {
907  // Get exponent alone in scratch.
908  __ Ubfx(scratch,
909          src1,
910          HeapNumber::kExponentShift,
911          HeapNumber::kExponentBits);
912
913  // Substract the bias from the exponent.
914  __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
915
916  // src1: higher (exponent) part of the double value.
917  // src2: lower (mantissa) part of the double value.
918  // scratch: unbiased exponent.
919
920  // Fast cases. Check for obvious non 32-bit integer values.
921  // Negative exponent cannot yield 32-bit integers.
922  __ b(mi, not_int32);
923  // Exponent greater than 31 cannot yield 32-bit integers.
924  // Also, a positive value with an exponent equal to 31 is outside of the
925  // signed 32-bit integer range.
926  // Another way to put it is that if (exponent - signbit) > 30 then the
927  // number cannot be represented as an int32.
928  Register tmp = dst;
929  __ sub(tmp, scratch, Operand(src1, LSR, 31));
930  __ cmp(tmp, Operand(30));
931  __ b(gt, not_int32);
932  // - Bits [21:0] in the mantissa are not null.
933  __ tst(src2, Operand(0x3fffff));
934  __ b(ne, not_int32);
935
936  // Otherwise the exponent needs to be big enough to shift left all the
937  // non zero bits left. So we need the (30 - exponent) last bits of the
938  // 31 higher bits of the mantissa to be null.
939  // Because bits [21:0] are null, we can check instead that the
940  // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
941
942  // Get the 32 higher bits of the mantissa in dst.
943  __ Ubfx(dst,
944          src2,
945          HeapNumber::kMantissaBitsInTopWord,
946          32 - HeapNumber::kMantissaBitsInTopWord);
947  __ orr(dst,
948         dst,
949         Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
950
951  // Create the mask and test the lower bits (of the higher bits).
952  __ rsb(scratch, scratch, Operand(32));
953  __ mov(src2, Operand(1));
954  __ mov(src1, Operand(src2, LSL, scratch));
955  __ sub(src1, src1, Operand(1));
956  __ tst(dst, src1);
957  __ b(ne, not_int32);
958}
959
960
961void FloatingPointHelper::CallCCodeForDoubleOperation(
962    MacroAssembler* masm,
963    Token::Value op,
964    Register heap_number_result,
965    Register scratch) {
966  // Using core registers:
967  // r0: Left value (least significant part of mantissa).
968  // r1: Left value (sign, exponent, top of mantissa).
969  // r2: Right value (least significant part of mantissa).
970  // r3: Right value (sign, exponent, top of mantissa).
971
972  // Assert that heap_number_result is callee-saved.
973  // We currently always use r5 to pass it.
974  ASSERT(heap_number_result.is(r5));
975
976  // Push the current return address before the C call. Return will be
977  // through pop(pc) below.
978  __ push(lr);
979  __ PrepareCallCFunction(0, 2, scratch);
980  if (masm->use_eabi_hardfloat()) {
981    CpuFeatures::Scope scope(VFP3);
982    __ vmov(d0, r0, r1);
983    __ vmov(d1, r2, r3);
984  }
985  {
986    AllowExternalCallThatCantCauseGC scope(masm);
987    __ CallCFunction(
988        ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
989  }
990  // Store answer in the overwritable heap number. Double returned in
991  // registers r0 and r1 or in d0.
992  if (masm->use_eabi_hardfloat()) {
993    CpuFeatures::Scope scope(VFP3);
994    __ vstr(d0,
995            FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
996  } else {
997    __ Strd(r0, r1, FieldMemOperand(heap_number_result,
998                                    HeapNumber::kValueOffset));
999  }
1000  // Place heap_number_result in r0 and return to the pushed return address.
1001  __ mov(r0, Operand(heap_number_result));
1002  __ pop(pc);
1003}
1004
1005
1006bool WriteInt32ToHeapNumberStub::IsPregenerated() {
1007  // These variants are compiled ahead of time.  See next method.
1008  if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
1009    return true;
1010  }
1011  if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
1012    return true;
1013  }
1014  // Other register combinations are generated as and when they are needed,
1015  // so it is unsafe to call them from stubs (we can't generate a stub while
1016  // we are generating a stub).
1017  return false;
1018}
1019
1020
1021void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
1022  WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
1023  WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
1024  stub1.GetCode()->set_is_pregenerated(true);
1025  stub2.GetCode()->set_is_pregenerated(true);
1026}
1027
1028
1029// See comment for class.
1030void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
1031  Label max_negative_int;
1032  // the_int_ has the answer which is a signed int32 but not a Smi.
1033  // We test for the special value that has a different exponent.  This test
1034  // has the neat side effect of setting the flags according to the sign.
1035  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
1036  __ cmp(the_int_, Operand(0x80000000u));
1037  __ b(eq, &max_negative_int);
1038  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
1039  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
1040  uint32_t non_smi_exponent =
1041      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1042  __ mov(scratch_, Operand(non_smi_exponent));
1043  // Set the sign bit in scratch_ if the value was negative.
1044  __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
1045  // Subtract from 0 if the value was negative.
1046  __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs);
1047  // We should be masking the implict first digit of the mantissa away here,
1048  // but it just ends up combining harmlessly with the last digit of the
1049  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
1050  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
1051  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1052  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1053  __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
1054  __ str(scratch_, FieldMemOperand(the_heap_number_,
1055                                   HeapNumber::kExponentOffset));
1056  __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
1057  __ str(scratch_, FieldMemOperand(the_heap_number_,
1058                                   HeapNumber::kMantissaOffset));
1059  __ Ret();
1060
1061  __ bind(&max_negative_int);
1062  // The max negative int32 is stored as a positive number in the mantissa of
1063  // a double because it uses a sign bit instead of using two's complement.
1064  // The actual mantissa bits stored are all 0 because the implicit most
1065  // significant 1 bit is not stored.
1066  non_smi_exponent += 1 << HeapNumber::kExponentShift;
1067  __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
1068  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1069  __ mov(ip, Operand(0, RelocInfo::NONE));
1070  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1071  __ Ret();
1072}
1073
1074
1075// Handle the case where the lhs and rhs are the same object.
1076// Equality is almost reflexive (everything but NaN), so this is a test
1077// for "identity and not NaN".
1078static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1079                                          Label* slow,
1080                                          Condition cond,
1081                                          bool never_nan_nan) {
1082  Label not_identical;
1083  Label heap_number, return_equal;
1084  __ cmp(r0, r1);
1085  __ b(ne, &not_identical);
1086
1087  // The two objects are identical.  If we know that one of them isn't NaN then
1088  // we now know they test equal.
1089  if (cond != eq || !never_nan_nan) {
1090    // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
1091    // so we do the second best thing - test it ourselves.
1092    // They are both equal and they are not both Smis so both of them are not
1093    // Smis.  If it's not a heap number, then return equal.
1094    if (cond == lt || cond == gt) {
1095      __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
1096      __ b(ge, slow);
1097    } else {
1098      __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
1099      __ b(eq, &heap_number);
1100      // Comparing JS objects with <=, >= is complicated.
1101      if (cond != eq) {
1102        __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
1103        __ b(ge, slow);
1104        // Normally here we fall through to return_equal, but undefined is
1105        // special: (undefined == undefined) == true, but
1106        // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
1107        if (cond == le || cond == ge) {
1108          __ cmp(r4, Operand(ODDBALL_TYPE));
1109          __ b(ne, &return_equal);
1110          __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
1111          __ cmp(r0, r2);
1112          __ b(ne, &return_equal);
1113          if (cond == le) {
1114            // undefined <= undefined should fail.
1115            __ mov(r0, Operand(GREATER));
1116          } else  {
1117            // undefined >= undefined should fail.
1118            __ mov(r0, Operand(LESS));
1119          }
1120          __ Ret();
1121        }
1122      }
1123    }
1124  }
1125
1126  __ bind(&return_equal);
1127  if (cond == lt) {
1128    __ mov(r0, Operand(GREATER));  // Things aren't less than themselves.
1129  } else if (cond == gt) {
1130    __ mov(r0, Operand(LESS));     // Things aren't greater than themselves.
1131  } else {
1132    __ mov(r0, Operand(EQUAL));    // Things are <=, >=, ==, === themselves.
1133  }
1134  __ Ret();
1135
1136  if (cond != eq || !never_nan_nan) {
1137    // For less and greater we don't have to check for NaN since the result of
1138    // x < x is false regardless.  For the others here is some code to check
1139    // for NaN.
1140    if (cond != lt && cond != gt) {
1141      __ bind(&heap_number);
1142      // It is a heap number, so return non-equal if it's NaN and equal if it's
1143      // not NaN.
1144
1145      // The representation of NaN values has all exponent bits (52..62) set,
1146      // and not all mantissa bits (0..51) clear.
1147      // Read top bits of double representation (second word of value).
1148      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
1149      // Test that exponent bits are all set.
1150      __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
1151      // NaNs have all-one exponents so they sign extend to -1.
1152      __ cmp(r3, Operand(-1));
1153      __ b(ne, &return_equal);
1154
1155      // Shift out flag and all exponent bits, retaining only mantissa.
1156      __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
1157      // Or with all low-bits of mantissa.
1158      __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
1159      __ orr(r0, r3, Operand(r2), SetCC);
1160      // For equal we already have the right value in r0:  Return zero (equal)
1161      // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1162      // not (it's a NaN).  For <= and >= we need to load r0 with the failing
1163      // value if it's a NaN.
1164      if (cond != eq) {
1165        // All-zero means Infinity means equal.
1166        __ Ret(eq);
1167        if (cond == le) {
1168          __ mov(r0, Operand(GREATER));  // NaN <= NaN should fail.
1169        } else {
1170          __ mov(r0, Operand(LESS));     // NaN >= NaN should fail.
1171        }
1172      }
1173      __ Ret();
1174    }
1175    // No fall through here.
1176  }
1177
1178  __ bind(&not_identical);
1179}
1180
1181
1182// See comment at call site.
1183static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1184                                    Register lhs,
1185                                    Register rhs,
1186                                    Label* lhs_not_nan,
1187                                    Label* slow,
1188                                    bool strict) {
1189  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1190         (lhs.is(r1) && rhs.is(r0)));
1191
1192  Label rhs_is_smi;
1193  __ JumpIfSmi(rhs, &rhs_is_smi);
1194
1195  // Lhs is a Smi.  Check whether the rhs is a heap number.
1196  __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
1197  if (strict) {
1198    // If rhs is not a number and lhs is a Smi then strict equality cannot
1199    // succeed.  Return non-equal
1200    // If rhs is r0 then there is already a non zero value in it.
1201    if (!rhs.is(r0)) {
1202      __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
1203    }
1204    __ Ret(ne);
1205  } else {
1206    // Smi compared non-strictly with a non-Smi non-heap-number.  Call
1207    // the runtime.
1208    __ b(ne, slow);
1209  }
1210
1211  // Lhs is a smi, rhs is a number.
1212  if (CpuFeatures::IsSupported(VFP3)) {
1213    // Convert lhs to a double in d7.
1214    CpuFeatures::Scope scope(VFP3);
1215    __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
1216    // Load the double from rhs, tagged HeapNumber r0, to d6.
1217    __ sub(r7, rhs, Operand(kHeapObjectTag));
1218    __ vldr(d6, r7, HeapNumber::kValueOffset);
1219  } else {
1220    __ push(lr);
1221    // Convert lhs to a double in r2, r3.
1222    __ mov(r7, Operand(lhs));
1223    ConvertToDoubleStub stub1(r3, r2, r7, r6);
1224    __ Call(stub1.GetCode());
1225    // Load rhs to a double in r0, r1.
1226    __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1227    __ pop(lr);
1228  }
1229
1230  // We now have both loaded as doubles but we can skip the lhs nan check
1231  // since it's a smi.
1232  __ jmp(lhs_not_nan);
1233
1234  __ bind(&rhs_is_smi);
1235  // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
1236  __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
1237  if (strict) {
1238    // If lhs is not a number and rhs is a smi then strict equality cannot
1239    // succeed.  Return non-equal.
1240    // If lhs is r0 then there is already a non zero value in it.
1241    if (!lhs.is(r0)) {
1242      __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
1243    }
1244    __ Ret(ne);
1245  } else {
1246    // Smi compared non-strictly with a non-smi non-heap-number.  Call
1247    // the runtime.
1248    __ b(ne, slow);
1249  }
1250
1251  // Rhs is a smi, lhs is a heap number.
1252  if (CpuFeatures::IsSupported(VFP3)) {
1253    CpuFeatures::Scope scope(VFP3);
1254    // Load the double from lhs, tagged HeapNumber r1, to d7.
1255    __ sub(r7, lhs, Operand(kHeapObjectTag));
1256    __ vldr(d7, r7, HeapNumber::kValueOffset);
1257    // Convert rhs to a double in d6              .
1258    __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
1259  } else {
1260    __ push(lr);
1261    // Load lhs to a double in r2, r3.
1262    __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1263    // Convert rhs to a double in r0, r1.
1264    __ mov(r7, Operand(rhs));
1265    ConvertToDoubleStub stub2(r1, r0, r7, r6);
1266    __ Call(stub2.GetCode());
1267    __ pop(lr);
1268  }
1269  // Fall through to both_loaded_as_doubles.
1270}
1271
1272
1273void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
1274  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1275  Register rhs_exponent = exp_first ? r0 : r1;
1276  Register lhs_exponent = exp_first ? r2 : r3;
1277  Register rhs_mantissa = exp_first ? r1 : r0;
1278  Register lhs_mantissa = exp_first ? r3 : r2;
1279  Label one_is_nan, neither_is_nan;
1280
1281  __ Sbfx(r4,
1282          lhs_exponent,
1283          HeapNumber::kExponentShift,
1284          HeapNumber::kExponentBits);
1285  // NaNs have all-one exponents so they sign extend to -1.
1286  __ cmp(r4, Operand(-1));
1287  __ b(ne, lhs_not_nan);
1288  __ mov(r4,
1289         Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1290         SetCC);
1291  __ b(ne, &one_is_nan);
1292  __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE));
1293  __ b(ne, &one_is_nan);
1294
1295  __ bind(lhs_not_nan);
1296  __ Sbfx(r4,
1297          rhs_exponent,
1298          HeapNumber::kExponentShift,
1299          HeapNumber::kExponentBits);
1300  // NaNs have all-one exponents so they sign extend to -1.
1301  __ cmp(r4, Operand(-1));
1302  __ b(ne, &neither_is_nan);
1303  __ mov(r4,
1304         Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1305         SetCC);
1306  __ b(ne, &one_is_nan);
1307  __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE));
1308  __ b(eq, &neither_is_nan);
1309
1310  __ bind(&one_is_nan);
1311  // NaN comparisons always fail.
1312  // Load whatever we need in r0 to make the comparison fail.
1313  if (cond == lt || cond == le) {
1314    __ mov(r0, Operand(GREATER));
1315  } else {
1316    __ mov(r0, Operand(LESS));
1317  }
1318  __ Ret();
1319
1320  __ bind(&neither_is_nan);
1321}
1322
1323
1324// See comment at call site.
1325static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
1326                                          Condition cond) {
1327  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1328  Register rhs_exponent = exp_first ? r0 : r1;
1329  Register lhs_exponent = exp_first ? r2 : r3;
1330  Register rhs_mantissa = exp_first ? r1 : r0;
1331  Register lhs_mantissa = exp_first ? r3 : r2;
1332
1333  // r0, r1, r2, r3 have the two doubles.  Neither is a NaN.
1334  if (cond == eq) {
1335    // Doubles are not equal unless they have the same bit pattern.
1336    // Exception: 0 and -0.
1337    __ cmp(rhs_mantissa, Operand(lhs_mantissa));
1338    __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
1339    // Return non-zero if the numbers are unequal.
1340    __ Ret(ne);
1341
1342    __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
1343    // If exponents are equal then return 0.
1344    __ Ret(eq);
1345
1346    // Exponents are unequal.  The only way we can return that the numbers
1347    // are equal is if one is -0 and the other is 0.  We already dealt
1348    // with the case where both are -0 or both are 0.
1349    // We start by seeing if the mantissas (that are equal) or the bottom
1350    // 31 bits of the rhs exponent are non-zero.  If so we return not
1351    // equal.
1352    __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
1353    __ mov(r0, Operand(r4), LeaveCC, ne);
1354    __ Ret(ne);
1355    // Now they are equal if and only if the lhs exponent is zero in its
1356    // low 31 bits.
1357    __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
1358    __ Ret();
1359  } else {
1360    // Call a native function to do a comparison between two non-NaNs.
1361    // Call C routine that may not cause GC or other trouble.
1362    __ push(lr);
1363    __ PrepareCallCFunction(0, 2, r5);
1364    if (masm->use_eabi_hardfloat()) {
1365      CpuFeatures::Scope scope(VFP3);
1366      __ vmov(d0, r0, r1);
1367      __ vmov(d1, r2, r3);
1368    }
1369
1370    AllowExternalCallThatCantCauseGC scope(masm);
1371    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1372                     0, 2);
1373    __ pop(pc);  // Return.
1374  }
1375}
1376
1377
1378// See comment at call site.
1379static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1380                                           Register lhs,
1381                                           Register rhs) {
1382    ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1383           (lhs.is(r1) && rhs.is(r0)));
1384
1385    // If either operand is a JS object or an oddball value, then they are
1386    // not equal since their pointers are different.
1387    // There is no test for undetectability in strict equality.
1388    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
1389    Label first_non_object;
1390    // Get the type of the first operand into r2 and compare it with
1391    // FIRST_SPEC_OBJECT_TYPE.
1392    __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
1393    __ b(lt, &first_non_object);
1394
1395    // Return non-zero (r0 is not zero)
1396    Label return_not_equal;
1397    __ bind(&return_not_equal);
1398    __ Ret();
1399
1400    __ bind(&first_non_object);
1401    // Check for oddballs: true, false, null, undefined.
1402    __ cmp(r2, Operand(ODDBALL_TYPE));
1403    __ b(eq, &return_not_equal);
1404
1405    __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
1406    __ b(ge, &return_not_equal);
1407
1408    // Check for oddballs: true, false, null, undefined.
1409    __ cmp(r3, Operand(ODDBALL_TYPE));
1410    __ b(eq, &return_not_equal);
1411
1412    // Now that we have the types we might as well check for symbol-symbol.
1413    // Ensure that no non-strings have the symbol bit set.
1414    STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
1415    STATIC_ASSERT(kSymbolTag != 0);
1416    __ and_(r2, r2, Operand(r3));
1417    __ tst(r2, Operand(kIsSymbolMask));
1418    __ b(ne, &return_not_equal);
1419}
1420
1421
1422// See comment at call site.
1423static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1424                                       Register lhs,
1425                                       Register rhs,
1426                                       Label* both_loaded_as_doubles,
1427                                       Label* not_heap_numbers,
1428                                       Label* slow) {
1429  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1430         (lhs.is(r1) && rhs.is(r0)));
1431
1432  __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
1433  __ b(ne, not_heap_numbers);
1434  __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
1435  __ cmp(r2, r3);
1436  __ b(ne, slow);  // First was a heap number, second wasn't.  Go slow case.
1437
1438  // Both are heap numbers.  Load them up then jump to the code we have
1439  // for that.
1440  if (CpuFeatures::IsSupported(VFP3)) {
1441    CpuFeatures::Scope scope(VFP3);
1442    __ sub(r7, rhs, Operand(kHeapObjectTag));
1443    __ vldr(d6, r7, HeapNumber::kValueOffset);
1444    __ sub(r7, lhs, Operand(kHeapObjectTag));
1445    __ vldr(d7, r7, HeapNumber::kValueOffset);
1446  } else {
1447    __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1448    __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1449  }
1450  __ jmp(both_loaded_as_doubles);
1451}
1452
1453
1454// Fast negative check for symbol-to-symbol equality.
1455static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1456                                         Register lhs,
1457                                         Register rhs,
1458                                         Label* possible_strings,
1459                                         Label* not_both_strings) {
1460  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1461         (lhs.is(r1) && rhs.is(r0)));
1462
1463  // r2 is object type of rhs.
1464  // Ensure that no non-strings have the symbol bit set.
1465  Label object_test;
1466  STATIC_ASSERT(kSymbolTag != 0);
1467  __ tst(r2, Operand(kIsNotStringMask));
1468  __ b(ne, &object_test);
1469  __ tst(r2, Operand(kIsSymbolMask));
1470  __ b(eq, possible_strings);
1471  __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
1472  __ b(ge, not_both_strings);
1473  __ tst(r3, Operand(kIsSymbolMask));
1474  __ b(eq, possible_strings);
1475
1476  // Both are symbols.  We already checked they weren't the same pointer
1477  // so they are not equal.
1478  __ mov(r0, Operand(NOT_EQUAL));
1479  __ Ret();
1480
1481  __ bind(&object_test);
1482  __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
1483  __ b(lt, not_both_strings);
1484  __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
1485  __ b(lt, not_both_strings);
1486  // If both objects are undetectable, they are equal. Otherwise, they
1487  // are not equal, since they are different objects and an object is not
1488  // equal to undefined.
1489  __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
1490  __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
1491  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
1492  __ and_(r0, r2, Operand(r3));
1493  __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
1494  __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
1495  __ Ret();
1496}
1497
1498
1499void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1500                                                         Register object,
1501                                                         Register result,
1502                                                         Register scratch1,
1503                                                         Register scratch2,
1504                                                         Register scratch3,
1505                                                         bool object_is_smi,
1506                                                         Label* not_found) {
1507  // Use of registers. Register result is used as a temporary.
1508  Register number_string_cache = result;
1509  Register mask = scratch3;
1510
1511  // Load the number string cache.
1512  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1513
1514  // Make the hash mask from the length of the number string cache. It
1515  // contains two elements (number and string) for each cache entry.
1516  __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1517  // Divide length by two (length is a smi).
1518  __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
1519  __ sub(mask, mask, Operand(1));  // Make mask.
1520
1521  // Calculate the entry in the number string cache. The hash value in the
1522  // number string cache for smis is just the smi value, and the hash for
1523  // doubles is the xor of the upper and lower words. See
1524  // Heap::GetNumberStringCache.
1525  Isolate* isolate = masm->isolate();
1526  Label is_smi;
1527  Label load_result_from_cache;
1528  if (!object_is_smi) {
1529    __ JumpIfSmi(object, &is_smi);
1530    if (CpuFeatures::IsSupported(VFP3)) {
1531      CpuFeatures::Scope scope(VFP3);
1532      __ CheckMap(object,
1533                  scratch1,
1534                  Heap::kHeapNumberMapRootIndex,
1535                  not_found,
1536                  DONT_DO_SMI_CHECK);
1537
1538      STATIC_ASSERT(8 == kDoubleSize);
1539      __ add(scratch1,
1540             object,
1541             Operand(HeapNumber::kValueOffset - kHeapObjectTag));
1542      __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
1543      __ eor(scratch1, scratch1, Operand(scratch2));
1544      __ and_(scratch1, scratch1, Operand(mask));
1545
1546      // Calculate address of entry in string cache: each entry consists
1547      // of two pointer sized fields.
1548      __ add(scratch1,
1549             number_string_cache,
1550             Operand(scratch1, LSL, kPointerSizeLog2 + 1));
1551
1552      Register probe = mask;
1553      __ ldr(probe,
1554             FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1555      __ JumpIfSmi(probe, not_found);
1556      __ sub(scratch2, object, Operand(kHeapObjectTag));
1557      __ vldr(d0, scratch2, HeapNumber::kValueOffset);
1558      __ sub(probe, probe, Operand(kHeapObjectTag));
1559      __ vldr(d1, probe, HeapNumber::kValueOffset);
1560      __ VFPCompareAndSetFlags(d0, d1);
1561      __ b(ne, not_found);  // The cache did not contain this value.
1562      __ b(&load_result_from_cache);
1563    } else {
1564      __ b(not_found);
1565    }
1566  }
1567
1568  __ bind(&is_smi);
1569  Register scratch = scratch1;
1570  __ and_(scratch, mask, Operand(object, ASR, 1));
1571  // Calculate address of entry in string cache: each entry consists
1572  // of two pointer sized fields.
1573  __ add(scratch,
1574         number_string_cache,
1575         Operand(scratch, LSL, kPointerSizeLog2 + 1));
1576
1577  // Check if the entry is the smi we are looking for.
1578  Register probe = mask;
1579  __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1580  __ cmp(object, probe);
1581  __ b(ne, not_found);
1582
1583  // Get the result from the cache.
1584  __ bind(&load_result_from_cache);
1585  __ ldr(result,
1586         FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1587  __ IncrementCounter(isolate->counters()->number_to_string_native(),
1588                      1,
1589                      scratch1,
1590                      scratch2);
1591}
1592
1593
1594void NumberToStringStub::Generate(MacroAssembler* masm) {
1595  Label runtime;
1596
1597  __ ldr(r1, MemOperand(sp, 0));
1598
1599  // Generate code to lookup number in the number string cache.
1600  GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
1601  __ add(sp, sp, Operand(1 * kPointerSize));
1602  __ Ret();
1603
1604  __ bind(&runtime);
1605  // Handle number to string in the runtime system if not found in the cache.
1606  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
1607}
1608
1609
1610// On entry lhs_ and rhs_ are the values to be compared.
1611// On exit r0 is 0, positive or negative to indicate the result of
1612// the comparison.
1613void CompareStub::Generate(MacroAssembler* masm) {
1614  ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
1615         (lhs_.is(r1) && rhs_.is(r0)));
1616
1617  Label slow;  // Call builtin.
1618  Label not_smis, both_loaded_as_doubles, lhs_not_nan;
1619
1620  if (include_smi_compare_) {
1621    Label not_two_smis, smi_done;
1622    __ orr(r2, r1, r0);
1623    __ JumpIfNotSmi(r2, &not_two_smis);
1624    __ mov(r1, Operand(r1, ASR, 1));
1625    __ sub(r0, r1, Operand(r0, ASR, 1));
1626    __ Ret();
1627    __ bind(&not_two_smis);
1628  } else if (FLAG_debug_code) {
1629    __ orr(r2, r1, r0);
1630    __ tst(r2, Operand(kSmiTagMask));
1631    __ Assert(ne, "CompareStub: unexpected smi operands.");
1632  }
1633
1634  // NOTICE! This code is only reached after a smi-fast-case check, so
1635  // it is certain that at least one operand isn't a smi.
1636
1637  // Handle the case where the objects are identical.  Either returns the answer
1638  // or goes to slow.  Only falls through if the objects were not identical.
1639  EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1640
1641  // If either is a Smi (we know that not both are), then they can only
1642  // be strictly equal if the other is a HeapNumber.
1643  STATIC_ASSERT(kSmiTag == 0);
1644  ASSERT_EQ(0, Smi::FromInt(0));
1645  __ and_(r2, lhs_, Operand(rhs_));
1646  __ JumpIfNotSmi(r2, &not_smis);
1647  // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
1648  // 1) Return the answer.
1649  // 2) Go to slow.
1650  // 3) Fall through to both_loaded_as_doubles.
1651  // 4) Jump to lhs_not_nan.
1652  // In cases 3 and 4 we have found out we were dealing with a number-number
1653  // comparison.  If VFP3 is supported the double values of the numbers have
1654  // been loaded into d7 and d6.  Otherwise, the double values have been loaded
1655  // into r0, r1, r2, and r3.
1656  EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
1657
1658  __ bind(&both_loaded_as_doubles);
1659  // The arguments have been converted to doubles and stored in d6 and d7, if
1660  // VFP3 is supported, or in r0, r1, r2, and r3.
1661  Isolate* isolate = masm->isolate();
1662  if (CpuFeatures::IsSupported(VFP3)) {
1663    __ bind(&lhs_not_nan);
1664    CpuFeatures::Scope scope(VFP3);
1665    Label no_nan;
1666    // ARMv7 VFP3 instructions to implement double precision comparison.
1667    __ VFPCompareAndSetFlags(d7, d6);
1668    Label nan;
1669    __ b(vs, &nan);
1670    __ mov(r0, Operand(EQUAL), LeaveCC, eq);
1671    __ mov(r0, Operand(LESS), LeaveCC, lt);
1672    __ mov(r0, Operand(GREATER), LeaveCC, gt);
1673    __ Ret();
1674
1675    __ bind(&nan);
1676    // If one of the sides was a NaN then the v flag is set.  Load r0 with
1677    // whatever it takes to make the comparison fail, since comparisons with NaN
1678    // always fail.
1679    if (cc_ == lt || cc_ == le) {
1680      __ mov(r0, Operand(GREATER));
1681    } else {
1682      __ mov(r0, Operand(LESS));
1683    }
1684    __ Ret();
1685  } else {
1686    // Checks for NaN in the doubles we have loaded.  Can return the answer or
1687    // fall through if neither is a NaN.  Also binds lhs_not_nan.
1688    EmitNanCheck(masm, &lhs_not_nan, cc_);
1689    // Compares two doubles in r0, r1, r2, r3 that are not NaNs.  Returns the
1690    // answer.  Never falls through.
1691    EmitTwoNonNanDoubleComparison(masm, cc_);
1692  }
1693
1694  __ bind(&not_smis);
1695  // At this point we know we are dealing with two different objects,
1696  // and neither of them is a Smi.  The objects are in rhs_ and lhs_.
1697  if (strict_) {
1698    // This returns non-equal for some object types, or falls through if it
1699    // was not lucky.
1700    EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1701  }
1702
1703  Label check_for_symbols;
1704  Label flat_string_check;
1705  // Check for heap-number-heap-number comparison.  Can jump to slow case,
1706  // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
1707  // that case.  If the inputs are not doubles then jumps to check_for_symbols.
1708  // In this case r2 will contain the type of rhs_.  Never falls through.
1709  EmitCheckForTwoHeapNumbers(masm,
1710                             lhs_,
1711                             rhs_,
1712                             &both_loaded_as_doubles,
1713                             &check_for_symbols,
1714                             &flat_string_check);
1715
1716  __ bind(&check_for_symbols);
1717  // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
1718  // symbols.
1719  if (cc_ == eq && !strict_) {
1720    // Returns an answer for two symbols or two detectable objects.
1721    // Otherwise jumps to string case or not both strings case.
1722    // Assumes that r2 is the type of rhs_ on entry.
1723    EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1724  }
1725
1726  // Check for both being sequential ASCII strings, and inline if that is the
1727  // case.
1728  __ bind(&flat_string_check);
1729
1730  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
1731
1732  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
1733  if (cc_ == eq) {
1734    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1735                                                     lhs_,
1736                                                     rhs_,
1737                                                     r2,
1738                                                     r3,
1739                                                     r4);
1740  } else {
1741    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1742                                                       lhs_,
1743                                                       rhs_,
1744                                                       r2,
1745                                                       r3,
1746                                                       r4,
1747                                                       r5);
1748  }
1749  // Never falls through to here.
1750
1751  __ bind(&slow);
1752
1753  __ Push(lhs_, rhs_);
1754  // Figure out which native to call and setup the arguments.
1755  Builtins::JavaScript native;
1756  if (cc_ == eq) {
1757    native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1758  } else {
1759    native = Builtins::COMPARE;
1760    int ncr;  // NaN compare result
1761    if (cc_ == lt || cc_ == le) {
1762      ncr = GREATER;
1763    } else {
1764      ASSERT(cc_ == gt || cc_ == ge);  // remaining cases
1765      ncr = LESS;
1766    }
1767    __ mov(r0, Operand(Smi::FromInt(ncr)));
1768    __ push(r0);
1769  }
1770
1771  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1772  // tagged as a small integer.
1773  __ InvokeBuiltin(native, JUMP_FUNCTION);
1774}
1775
1776
1777// The stub expects its argument in the tos_ register and returns its result in
1778// it, too: zero for false, and a non-zero value for true.
1779void ToBooleanStub::Generate(MacroAssembler* masm) {
1780  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
1781  // we cannot call anything that could cause a GC from this stub.
1782  // This stub uses VFP3 instructions.
1783  CpuFeatures::Scope scope(VFP3);
1784
1785  Label patch;
1786  const Register map = r9.is(tos_) ? r7 : r9;
1787
1788  // undefined -> false.
1789  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
1790
1791  // Boolean -> its value.
1792  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
1793  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
1794
1795  // 'null' -> false.
1796  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
1797
1798  if (types_.Contains(SMI)) {
1799    // Smis: 0 -> false, all other -> true
1800    __ tst(tos_, Operand(kSmiTagMask));
1801    // tos_ contains the correct return value already
1802    __ Ret(eq);
1803  } else if (types_.NeedsMap()) {
1804    // If we need a map later and have a Smi -> patch.
1805    __ JumpIfSmi(tos_, &patch);
1806  }
1807
1808  if (types_.NeedsMap()) {
1809    __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
1810
1811    if (types_.CanBeUndetectable()) {
1812      __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
1813      __ tst(ip, Operand(1 << Map::kIsUndetectable));
1814      // Undetectable -> false.
1815      __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
1816      __ Ret(ne);
1817    }
1818  }
1819
1820  if (types_.Contains(SPEC_OBJECT)) {
1821    // Spec object -> true.
1822    __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
1823    // tos_ contains the correct non-zero return value already.
1824    __ Ret(ge);
1825  }
1826
1827  if (types_.Contains(STRING)) {
1828    // String value -> false iff empty.
1829  __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
1830  __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt);
1831  __ Ret(lt);  // the string length is OK as the return value
1832  }
1833
1834  if (types_.Contains(HEAP_NUMBER)) {
1835    // Heap number -> false iff +0, -0, or NaN.
1836    Label not_heap_number;
1837    __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1838    __ b(ne, &not_heap_number);
1839    __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1840    __ VFPCompareAndSetFlags(d1, 0.0);
1841    // "tos_" is a register, and contains a non zero value by default.
1842    // Hence we only need to overwrite "tos_" with zero to return false for
1843    // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1844    __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);  // for FP_ZERO
1845    __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs);  // for FP_NAN
1846    __ Ret();
1847    __ bind(&not_heap_number);
1848  }
1849
1850  __ bind(&patch);
1851  GenerateTypeTransition(masm);
1852}
1853
1854
1855void ToBooleanStub::CheckOddball(MacroAssembler* masm,
1856                                 Type type,
1857                                 Heap::RootListIndex value,
1858                                 bool result) {
1859  if (types_.Contains(type)) {
1860    // If we see an expected oddball, return its ToBoolean value tos_.
1861    __ LoadRoot(ip, value);
1862    __ cmp(tos_, ip);
1863    // The value of a root is never NULL, so we can avoid loading a non-null
1864    // value into tos_ when we want to return 'true'.
1865    if (!result) {
1866      __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);
1867    }
1868    __ Ret(eq);
1869  }
1870}
1871
1872
1873void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
1874  if (!tos_.is(r3)) {
1875    __ mov(r3, Operand(tos_));
1876  }
1877  __ mov(r2, Operand(Smi::FromInt(tos_.code())));
1878  __ mov(r1, Operand(Smi::FromInt(types_.ToByte())));
1879  __ Push(r3, r2, r1);
1880  // Patch the caller to an appropriate specialized stub and return the
1881  // operation result to the caller of the stub.
1882  __ TailCallExternalReference(
1883      ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
1884      3,
1885      1);
1886}
1887
1888
1889void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1890  // We don't allow a GC during a store buffer overflow so there is no need to
1891  // store the registers in any particular way, but we do have to store and
1892  // restore them.
1893  __ stm(db_w, sp, kCallerSaved | lr.bit());
1894  if (save_doubles_ == kSaveFPRegs) {
1895    CpuFeatures::Scope scope(VFP3);
1896    __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
1897    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
1898      DwVfpRegister reg = DwVfpRegister::from_code(i);
1899      __ vstr(reg, MemOperand(sp, i * kDoubleSize));
1900    }
1901  }
1902  const int argument_count = 1;
1903  const int fp_argument_count = 0;
1904  const Register scratch = r1;
1905
1906  AllowExternalCallThatCantCauseGC scope(masm);
1907  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1908  __ mov(r0, Operand(ExternalReference::isolate_address()));
1909  __ CallCFunction(
1910      ExternalReference::store_buffer_overflow_function(masm->isolate()),
1911      argument_count);
1912  if (save_doubles_ == kSaveFPRegs) {
1913    CpuFeatures::Scope scope(VFP3);
1914    for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
1915      DwVfpRegister reg = DwVfpRegister::from_code(i);
1916      __ vldr(reg, MemOperand(sp, i * kDoubleSize));
1917    }
1918    __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
1919  }
1920  __ ldm(ia_w, sp, kCallerSaved | pc.bit());  // Also pop pc to get Ret(0).
1921}
1922
1923
1924void UnaryOpStub::PrintName(StringStream* stream) {
1925  const char* op_name = Token::Name(op_);
1926  const char* overwrite_name = NULL;  // Make g++ happy.
1927  switch (mode_) {
1928    case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
1929    case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
1930  }
1931  stream->Add("UnaryOpStub_%s_%s_%s",
1932              op_name,
1933              overwrite_name,
1934              UnaryOpIC::GetName(operand_type_));
1935}
1936
1937
1938// TODO(svenpanne): Use virtual functions instead of switch.
1939void UnaryOpStub::Generate(MacroAssembler* masm) {
1940  switch (operand_type_) {
1941    case UnaryOpIC::UNINITIALIZED:
1942      GenerateTypeTransition(masm);
1943      break;
1944    case UnaryOpIC::SMI:
1945      GenerateSmiStub(masm);
1946      break;
1947    case UnaryOpIC::HEAP_NUMBER:
1948      GenerateHeapNumberStub(masm);
1949      break;
1950    case UnaryOpIC::GENERIC:
1951      GenerateGenericStub(masm);
1952      break;
1953  }
1954}
1955
1956
1957void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1958  __ mov(r3, Operand(r0));  // the operand
1959  __ mov(r2, Operand(Smi::FromInt(op_)));
1960  __ mov(r1, Operand(Smi::FromInt(mode_)));
1961  __ mov(r0, Operand(Smi::FromInt(operand_type_)));
1962  __ Push(r3, r2, r1, r0);
1963
1964  __ TailCallExternalReference(
1965      ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
1966}
1967
1968
1969// TODO(svenpanne): Use virtual functions instead of switch.
1970void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1971  switch (op_) {
1972    case Token::SUB:
1973      GenerateSmiStubSub(masm);
1974      break;
1975    case Token::BIT_NOT:
1976      GenerateSmiStubBitNot(masm);
1977      break;
1978    default:
1979      UNREACHABLE();
1980  }
1981}
1982
1983
1984void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
1985  Label non_smi, slow;
1986  GenerateSmiCodeSub(masm, &non_smi, &slow);
1987  __ bind(&non_smi);
1988  __ bind(&slow);
1989  GenerateTypeTransition(masm);
1990}
1991
1992
1993void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
1994  Label non_smi;
1995  GenerateSmiCodeBitNot(masm, &non_smi);
1996  __ bind(&non_smi);
1997  GenerateTypeTransition(masm);
1998}
1999
2000
2001void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2002                                     Label* non_smi,
2003                                     Label* slow) {
2004  __ JumpIfNotSmi(r0, non_smi);
2005
2006  // The result of negating zero or the smallest negative smi is not a smi.
2007  __ bic(ip, r0, Operand(0x80000000), SetCC);
2008  __ b(eq, slow);
2009
2010  // Return '0 - value'.
2011  __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
2012  __ Ret();
2013}
2014
2015
2016void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2017                                        Label* non_smi) {
2018  __ JumpIfNotSmi(r0, non_smi);
2019
2020  // Flip bits and revert inverted smi-tag.
2021  __ mvn(r0, Operand(r0));
2022  __ bic(r0, r0, Operand(kSmiTagMask));
2023  __ Ret();
2024}
2025
2026
2027// TODO(svenpanne): Use virtual functions instead of switch.
2028void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2029  switch (op_) {
2030    case Token::SUB:
2031      GenerateHeapNumberStubSub(masm);
2032      break;
2033    case Token::BIT_NOT:
2034      GenerateHeapNumberStubBitNot(masm);
2035      break;
2036    default:
2037      UNREACHABLE();
2038  }
2039}
2040
2041
2042void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
2043  Label non_smi, slow, call_builtin;
2044  GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
2045  __ bind(&non_smi);
2046  GenerateHeapNumberCodeSub(masm, &slow);
2047  __ bind(&slow);
2048  GenerateTypeTransition(masm);
2049  __ bind(&call_builtin);
2050  GenerateGenericCodeFallback(masm);
2051}
2052
2053
2054void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
2055  Label non_smi, slow;
2056  GenerateSmiCodeBitNot(masm, &non_smi);
2057  __ bind(&non_smi);
2058  GenerateHeapNumberCodeBitNot(masm, &slow);
2059  __ bind(&slow);
2060  GenerateTypeTransition(masm);
2061}
2062
2063void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
2064                                            Label* slow) {
2065  EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
2066  // r0 is a heap number.  Get a new heap number in r1.
2067  if (mode_ == UNARY_OVERWRITE) {
2068    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
2069    __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
2070    __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
2071  } else {
2072    Label slow_allocate_heapnumber, heapnumber_allocated;
2073    __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber);
2074    __ jmp(&heapnumber_allocated);
2075
2076    __ bind(&slow_allocate_heapnumber);
2077    {
2078      FrameScope scope(masm, StackFrame::INTERNAL);
2079      __ push(r0);
2080      __ CallRuntime(Runtime::kNumberAlloc, 0);
2081      __ mov(r1, Operand(r0));
2082      __ pop(r0);
2083    }
2084
2085    __ bind(&heapnumber_allocated);
2086    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
2087    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
2088    __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
2089    __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
2090    __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
2091    __ mov(r0, Operand(r1));
2092  }
2093  __ Ret();
2094}
2095
2096
2097void UnaryOpStub::GenerateHeapNumberCodeBitNot(
2098    MacroAssembler* masm, Label* slow) {
2099  Label impossible;
2100
2101  EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
2102  // Convert the heap number is r0 to an untagged integer in r1.
2103  __ ConvertToInt32(r0, r1, r2, r3, d0, slow);
2104
2105  // Do the bitwise operation and check if the result fits in a smi.
2106  Label try_float;
2107  __ mvn(r1, Operand(r1));
2108  __ add(r2, r1, Operand(0x40000000), SetCC);
2109  __ b(mi, &try_float);
2110
2111  // Tag the result as a smi and we're done.
2112  __ mov(r0, Operand(r1, LSL, kSmiTagSize));
2113  __ Ret();
2114
2115  // Try to store the result in a heap number.
2116  __ bind(&try_float);
2117  if (mode_ == UNARY_NO_OVERWRITE) {
2118    Label slow_allocate_heapnumber, heapnumber_allocated;
2119    // Allocate a new heap number without zapping r0, which we need if it fails.
2120    __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber);
2121    __ jmp(&heapnumber_allocated);
2122
2123    __ bind(&slow_allocate_heapnumber);
2124    {
2125      FrameScope scope(masm, StackFrame::INTERNAL);
2126      __ push(r0);  // Push the heap number, not the untagged int32.
2127      __ CallRuntime(Runtime::kNumberAlloc, 0);
2128      __ mov(r2, r0);  // Move the new heap number into r2.
2129      // Get the heap number into r0, now that the new heap number is in r2.
2130      __ pop(r0);
2131    }
2132
2133    // Convert the heap number in r0 to an untagged integer in r1.
2134    // This can't go slow-case because it's the same number we already
2135    // converted once again.
2136    __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible);
2137    __ mvn(r1, Operand(r1));
2138
2139    __ bind(&heapnumber_allocated);
2140    __ mov(r0, r2);  // Move newly allocated heap number to r0.
2141  }
2142
2143  if (CpuFeatures::IsSupported(VFP3)) {
2144    // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
2145    CpuFeatures::Scope scope(VFP3);
2146    __ vmov(s0, r1);
2147    __ vcvt_f64_s32(d0, s0);
2148    __ sub(r2, r0, Operand(kHeapObjectTag));
2149    __ vstr(d0, r2, HeapNumber::kValueOffset);
2150    __ Ret();
2151  } else {
2152    // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2153    // have to set up a frame.
2154    WriteInt32ToHeapNumberStub stub(r1, r0, r2);
2155    __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2156  }
2157
2158  __ bind(&impossible);
2159  if (FLAG_debug_code) {
2160    __ stop("Incorrect assumption in bit-not stub");
2161  }
2162}
2163
2164
2165// TODO(svenpanne): Use virtual functions instead of switch.
2166void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2167  switch (op_) {
2168    case Token::SUB:
2169      GenerateGenericStubSub(masm);
2170      break;
2171    case Token::BIT_NOT:
2172      GenerateGenericStubBitNot(masm);
2173      break;
2174    default:
2175      UNREACHABLE();
2176  }
2177}
2178
2179
2180void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2181  Label non_smi, slow;
2182  GenerateSmiCodeSub(masm, &non_smi, &slow);
2183  __ bind(&non_smi);
2184  GenerateHeapNumberCodeSub(masm, &slow);
2185  __ bind(&slow);
2186  GenerateGenericCodeFallback(masm);
2187}
2188
2189
2190void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2191  Label non_smi, slow;
2192  GenerateSmiCodeBitNot(masm, &non_smi);
2193  __ bind(&non_smi);
2194  GenerateHeapNumberCodeBitNot(masm, &slow);
2195  __ bind(&slow);
2196  GenerateGenericCodeFallback(masm);
2197}
2198
2199
2200void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
2201  // Handle the slow case by jumping to the JavaScript builtin.
2202  __ push(r0);
2203  switch (op_) {
2204    case Token::SUB:
2205      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2206      break;
2207    case Token::BIT_NOT:
2208      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2209      break;
2210    default:
2211      UNREACHABLE();
2212  }
2213}
2214
2215
2216void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2217  Label get_result;
2218
2219  __ Push(r1, r0);
2220
2221  __ mov(r2, Operand(Smi::FromInt(MinorKey())));
2222  __ mov(r1, Operand(Smi::FromInt(op_)));
2223  __ mov(r0, Operand(Smi::FromInt(operands_type_)));
2224  __ Push(r2, r1, r0);
2225
2226  __ TailCallExternalReference(
2227      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2228                        masm->isolate()),
2229      5,
2230      1);
2231}
2232
2233
2234void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2235    MacroAssembler* masm) {
2236  UNIMPLEMENTED();
2237}
2238
2239
2240void BinaryOpStub::Generate(MacroAssembler* masm) {
2241  // Explicitly allow generation of nested stubs. It is safe here because
2242  // generation code does not use any raw pointers.
2243  AllowStubCallsScope allow_stub_calls(masm, true);
2244
2245  switch (operands_type_) {
2246    case BinaryOpIC::UNINITIALIZED:
2247      GenerateTypeTransition(masm);
2248      break;
2249    case BinaryOpIC::SMI:
2250      GenerateSmiStub(masm);
2251      break;
2252    case BinaryOpIC::INT32:
2253      GenerateInt32Stub(masm);
2254      break;
2255    case BinaryOpIC::HEAP_NUMBER:
2256      GenerateHeapNumberStub(masm);
2257      break;
2258    case BinaryOpIC::ODDBALL:
2259      GenerateOddballStub(masm);
2260      break;
2261    case BinaryOpIC::BOTH_STRING:
2262      GenerateBothStringStub(masm);
2263      break;
2264    case BinaryOpIC::STRING:
2265      GenerateStringStub(masm);
2266      break;
2267    case BinaryOpIC::GENERIC:
2268      GenerateGeneric(masm);
2269      break;
2270    default:
2271      UNREACHABLE();
2272  }
2273}
2274
2275
2276void BinaryOpStub::PrintName(StringStream* stream) {
2277  const char* op_name = Token::Name(op_);
2278  const char* overwrite_name;
2279  switch (mode_) {
2280    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2281    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2282    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2283    default: overwrite_name = "UnknownOverwrite"; break;
2284  }
2285  stream->Add("BinaryOpStub_%s_%s_%s",
2286              op_name,
2287              overwrite_name,
2288              BinaryOpIC::GetName(operands_type_));
2289}
2290
2291
2292void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2293  Register left = r1;
2294  Register right = r0;
2295  Register scratch1 = r7;
2296  Register scratch2 = r9;
2297
2298  ASSERT(right.is(r0));
2299  STATIC_ASSERT(kSmiTag == 0);
2300
2301  Label not_smi_result;
2302  switch (op_) {
2303    case Token::ADD:
2304      __ add(right, left, Operand(right), SetCC);  // Add optimistically.
2305      __ Ret(vc);
2306      __ sub(right, right, Operand(left));  // Revert optimistic add.
2307      break;
2308    case Token::SUB:
2309      __ sub(right, left, Operand(right), SetCC);  // Subtract optimistically.
2310      __ Ret(vc);
2311      __ sub(right, left, Operand(right));  // Revert optimistic subtract.
2312      break;
2313    case Token::MUL:
2314      // Remove tag from one of the operands. This way the multiplication result
2315      // will be a smi if it fits the smi range.
2316      __ SmiUntag(ip, right);
2317      // Do multiplication
2318      // scratch1 = lower 32 bits of ip * left.
2319      // scratch2 = higher 32 bits of ip * left.
2320      __ smull(scratch1, scratch2, left, ip);
2321      // Check for overflowing the smi range - no overflow if higher 33 bits of
2322      // the result are identical.
2323      __ mov(ip, Operand(scratch1, ASR, 31));
2324      __ cmp(ip, Operand(scratch2));
2325      __ b(ne, &not_smi_result);
2326      // Go slow on zero result to handle -0.
2327      __ cmp(scratch1, Operand(0));
2328      __ mov(right, Operand(scratch1), LeaveCC, ne);
2329      __ Ret(ne);
2330      // We need -0 if we were multiplying a negative number with 0 to get 0.
2331      // We know one of them was zero.
2332      __ add(scratch2, right, Operand(left), SetCC);
2333      __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
2334      __ Ret(pl);  // Return smi 0 if the non-zero one was positive.
2335      // We fall through here if we multiplied a negative number with 0, because
2336      // that would mean we should produce -0.
2337      break;
2338    case Token::DIV:
2339      // Check for power of two on the right hand side.
2340      __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2341      // Check for positive and no remainder (scratch1 contains right - 1).
2342      __ orr(scratch2, scratch1, Operand(0x80000000u));
2343      __ tst(left, scratch2);
2344      __ b(ne, &not_smi_result);
2345
2346      // Perform division by shifting.
2347      __ CountLeadingZeros(scratch1, scratch1, scratch2);
2348      __ rsb(scratch1, scratch1, Operand(31));
2349      __ mov(right, Operand(left, LSR, scratch1));
2350      __ Ret();
2351      break;
2352    case Token::MOD:
2353      // Check for two positive smis.
2354      __ orr(scratch1, left, Operand(right));
2355      __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
2356      __ b(ne, &not_smi_result);
2357
2358      // Check for power of two on the right hand side.
2359      __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2360
2361      // Perform modulus by masking.
2362      __ and_(right, left, Operand(scratch1));
2363      __ Ret();
2364      break;
2365    case Token::BIT_OR:
2366      __ orr(right, left, Operand(right));
2367      __ Ret();
2368      break;
2369    case Token::BIT_AND:
2370      __ and_(right, left, Operand(right));
2371      __ Ret();
2372      break;
2373    case Token::BIT_XOR:
2374      __ eor(right, left, Operand(right));
2375      __ Ret();
2376      break;
2377    case Token::SAR:
2378      // Remove tags from right operand.
2379      __ GetLeastBitsFromSmi(scratch1, right, 5);
2380      __ mov(right, Operand(left, ASR, scratch1));
2381      // Smi tag result.
2382      __ bic(right, right, Operand(kSmiTagMask));
2383      __ Ret();
2384      break;
2385    case Token::SHR:
2386      // Remove tags from operands. We can't do this on a 31 bit number
2387      // because then the 0s get shifted into bit 30 instead of bit 31.
2388      __ SmiUntag(scratch1, left);
2389      __ GetLeastBitsFromSmi(scratch2, right, 5);
2390      __ mov(scratch1, Operand(scratch1, LSR, scratch2));
2391      // Unsigned shift is not allowed to produce a negative number, so
2392      // check the sign bit and the sign bit after Smi tagging.
2393      __ tst(scratch1, Operand(0xc0000000));
2394      __ b(ne, &not_smi_result);
2395      // Smi tag result.
2396      __ SmiTag(right, scratch1);
2397      __ Ret();
2398      break;
2399    case Token::SHL:
2400      // Remove tags from operands.
2401      __ SmiUntag(scratch1, left);
2402      __ GetLeastBitsFromSmi(scratch2, right, 5);
2403      __ mov(scratch1, Operand(scratch1, LSL, scratch2));
2404      // Check that the signed result fits in a Smi.
2405      __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
2406      __ b(mi, &not_smi_result);
2407      __ SmiTag(right, scratch1);
2408      __ Ret();
2409      break;
2410    default:
2411      UNREACHABLE();
2412  }
2413  __ bind(&not_smi_result);
2414}
2415
2416
2417void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2418                                       bool smi_operands,
2419                                       Label* not_numbers,
2420                                       Label* gc_required) {
2421  Register left = r1;
2422  Register right = r0;
2423  Register scratch1 = r7;
2424  Register scratch2 = r9;
2425  Register scratch3 = r4;
2426
2427  ASSERT(smi_operands || (not_numbers != NULL));
2428  if (smi_operands && FLAG_debug_code) {
2429    __ AbortIfNotSmi(left);
2430    __ AbortIfNotSmi(right);
2431  }
2432
2433  Register heap_number_map = r6;
2434  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2435
2436  switch (op_) {
2437    case Token::ADD:
2438    case Token::SUB:
2439    case Token::MUL:
2440    case Token::DIV:
2441    case Token::MOD: {
2442      // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
2443      // depending on whether VFP3 is available or not.
2444      FloatingPointHelper::Destination destination =
2445          CpuFeatures::IsSupported(VFP3) &&
2446          op_ != Token::MOD ?
2447          FloatingPointHelper::kVFPRegisters :
2448          FloatingPointHelper::kCoreRegisters;
2449
2450      // Allocate new heap number for result.
2451      Register result = r5;
2452      GenerateHeapResultAllocation(
2453          masm, result, heap_number_map, scratch1, scratch2, gc_required);
2454
2455      // Load the operands.
2456      if (smi_operands) {
2457        FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2458      } else {
2459        FloatingPointHelper::LoadOperands(masm,
2460                                          destination,
2461                                          heap_number_map,
2462                                          scratch1,
2463                                          scratch2,
2464                                          not_numbers);
2465      }
2466
2467      // Calculate the result.
2468      if (destination == FloatingPointHelper::kVFPRegisters) {
2469        // Using VFP registers:
2470        // d6: Left value
2471        // d7: Right value
2472        CpuFeatures::Scope scope(VFP3);
2473        switch (op_) {
2474          case Token::ADD:
2475            __ vadd(d5, d6, d7);
2476            break;
2477          case Token::SUB:
2478            __ vsub(d5, d6, d7);
2479            break;
2480          case Token::MUL:
2481            __ vmul(d5, d6, d7);
2482            break;
2483          case Token::DIV:
2484            __ vdiv(d5, d6, d7);
2485            break;
2486          default:
2487            UNREACHABLE();
2488        }
2489
2490        __ sub(r0, result, Operand(kHeapObjectTag));
2491        __ vstr(d5, r0, HeapNumber::kValueOffset);
2492        __ add(r0, r0, Operand(kHeapObjectTag));
2493        __ Ret();
2494      } else {
2495        // Call the C function to handle the double operation.
2496        FloatingPointHelper::CallCCodeForDoubleOperation(masm,
2497                                                         op_,
2498                                                         result,
2499                                                         scratch1);
2500        if (FLAG_debug_code) {
2501          __ stop("Unreachable code.");
2502        }
2503      }
2504      break;
2505    }
2506    case Token::BIT_OR:
2507    case Token::BIT_XOR:
2508    case Token::BIT_AND:
2509    case Token::SAR:
2510    case Token::SHR:
2511    case Token::SHL: {
2512      if (smi_operands) {
2513        __ SmiUntag(r3, left);
2514        __ SmiUntag(r2, right);
2515      } else {
2516        // Convert operands to 32-bit integers. Right in r2 and left in r3.
2517        FloatingPointHelper::ConvertNumberToInt32(masm,
2518                                                  left,
2519                                                  r3,
2520                                                  heap_number_map,
2521                                                  scratch1,
2522                                                  scratch2,
2523                                                  scratch3,
2524                                                  d0,
2525                                                  not_numbers);
2526        FloatingPointHelper::ConvertNumberToInt32(masm,
2527                                                  right,
2528                                                  r2,
2529                                                  heap_number_map,
2530                                                  scratch1,
2531                                                  scratch2,
2532                                                  scratch3,
2533                                                  d0,
2534                                                  not_numbers);
2535      }
2536
2537      Label result_not_a_smi;
2538      switch (op_) {
2539        case Token::BIT_OR:
2540          __ orr(r2, r3, Operand(r2));
2541          break;
2542        case Token::BIT_XOR:
2543          __ eor(r2, r3, Operand(r2));
2544          break;
2545        case Token::BIT_AND:
2546          __ and_(r2, r3, Operand(r2));
2547          break;
2548        case Token::SAR:
2549          // Use only the 5 least significant bits of the shift count.
2550          __ GetLeastBitsFromInt32(r2, r2, 5);
2551          __ mov(r2, Operand(r3, ASR, r2));
2552          break;
2553        case Token::SHR:
2554          // Use only the 5 least significant bits of the shift count.
2555          __ GetLeastBitsFromInt32(r2, r2, 5);
2556          __ mov(r2, Operand(r3, LSR, r2), SetCC);
2557          // SHR is special because it is required to produce a positive answer.
2558          // The code below for writing into heap numbers isn't capable of
2559          // writing the register as an unsigned int so we go to slow case if we
2560          // hit this case.
2561          if (CpuFeatures::IsSupported(VFP3)) {
2562            __ b(mi, &result_not_a_smi);
2563          } else {
2564            __ b(mi, not_numbers);
2565          }
2566          break;
2567        case Token::SHL:
2568          // Use only the 5 least significant bits of the shift count.
2569          __ GetLeastBitsFromInt32(r2, r2, 5);
2570          __ mov(r2, Operand(r3, LSL, r2));
2571          break;
2572        default:
2573          UNREACHABLE();
2574      }
2575
2576      // Check that the *signed* result fits in a smi.
2577      __ add(r3, r2, Operand(0x40000000), SetCC);
2578      __ b(mi, &result_not_a_smi);
2579      __ SmiTag(r0, r2);
2580      __ Ret();
2581
2582      // Allocate new heap number for result.
2583      __ bind(&result_not_a_smi);
2584      Register result = r5;
2585      if (smi_operands) {
2586        __ AllocateHeapNumber(
2587            result, scratch1, scratch2, heap_number_map, gc_required);
2588      } else {
2589        GenerateHeapResultAllocation(
2590            masm, result, heap_number_map, scratch1, scratch2, gc_required);
2591      }
2592
2593      // r2: Answer as signed int32.
2594      // r5: Heap number to write answer into.
2595
2596      // Nothing can go wrong now, so move the heap number to r0, which is the
2597      // result.
2598      __ mov(r0, Operand(r5));
2599
2600      if (CpuFeatures::IsSupported(VFP3)) {
2601        // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
2602        // mentioned above SHR needs to always produce a positive result.
2603        CpuFeatures::Scope scope(VFP3);
2604        __ vmov(s0, r2);
2605        if (op_ == Token::SHR) {
2606          __ vcvt_f64_u32(d0, s0);
2607        } else {
2608          __ vcvt_f64_s32(d0, s0);
2609        }
2610        __ sub(r3, r0, Operand(kHeapObjectTag));
2611        __ vstr(d0, r3, HeapNumber::kValueOffset);
2612        __ Ret();
2613      } else {
2614        // Tail call that writes the int32 in r2 to the heap number in r0, using
2615        // r3 as scratch. r0 is preserved and returned.
2616        WriteInt32ToHeapNumberStub stub(r2, r0, r3);
2617        __ TailCallStub(&stub);
2618      }
2619      break;
2620    }
2621    default:
2622      UNREACHABLE();
2623  }
2624}
2625
2626
2627// Generate the smi code. If the operation on smis are successful this return is
2628// generated. If the result is not a smi and heap number allocation is not
2629// requested the code falls through. If number allocation is requested but a
2630// heap number cannot be allocated the code jumps to the lable gc_required.
2631void BinaryOpStub::GenerateSmiCode(
2632    MacroAssembler* masm,
2633    Label* use_runtime,
2634    Label* gc_required,
2635    SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2636  Label not_smis;
2637
2638  Register left = r1;
2639  Register right = r0;
2640  Register scratch1 = r7;
2641
2642  // Perform combined smi check on both operands.
2643  __ orr(scratch1, left, Operand(right));
2644  STATIC_ASSERT(kSmiTag == 0);
2645  __ JumpIfNotSmi(scratch1, &not_smis);
2646
2647  // If the smi-smi operation results in a smi return is generated.
2648  GenerateSmiSmiOperation(masm);
2649
2650  // If heap number results are possible generate the result in an allocated
2651  // heap number.
2652  if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2653    GenerateFPOperation(masm, true, use_runtime, gc_required);
2654  }
2655  __ bind(&not_smis);
2656}
2657
2658
2659void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2660  Label not_smis, call_runtime;
2661
2662  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2663      result_type_ == BinaryOpIC::SMI) {
2664    // Only allow smi results.
2665    GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2666  } else {
2667    // Allow heap number result and don't make a transition if a heap number
2668    // cannot be allocated.
2669    GenerateSmiCode(masm,
2670                    &call_runtime,
2671                    &call_runtime,
2672                    ALLOW_HEAPNUMBER_RESULTS);
2673  }
2674
2675  // Code falls through if the result is not returned as either a smi or heap
2676  // number.
2677  GenerateTypeTransition(masm);
2678
2679  __ bind(&call_runtime);
2680  GenerateCallRuntime(masm);
2681}
2682
2683
2684void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2685  ASSERT(operands_type_ == BinaryOpIC::STRING);
2686  ASSERT(op_ == Token::ADD);
2687  // Try to add arguments as strings, otherwise, transition to the generic
2688  // BinaryOpIC type.
2689  GenerateAddStrings(masm);
2690  GenerateTypeTransition(masm);
2691}
2692
2693
2694void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2695  Label call_runtime;
2696  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2697  ASSERT(op_ == Token::ADD);
2698  // If both arguments are strings, call the string add stub.
2699  // Otherwise, do a transition.
2700
2701  // Registers containing left and right operands respectively.
2702  Register left = r1;
2703  Register right = r0;
2704
2705  // Test if left operand is a string.
2706  __ JumpIfSmi(left, &call_runtime);
2707  __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
2708  __ b(ge, &call_runtime);
2709
2710  // Test if right operand is a string.
2711  __ JumpIfSmi(right, &call_runtime);
2712  __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
2713  __ b(ge, &call_runtime);
2714
2715  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2716  GenerateRegisterArgsPush(masm);
2717  __ TailCallStub(&string_add_stub);
2718
2719  __ bind(&call_runtime);
2720  GenerateTypeTransition(masm);
2721}
2722
2723
2724void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2725  ASSERT(operands_type_ == BinaryOpIC::INT32);
2726
2727  Register left = r1;
2728  Register right = r0;
2729  Register scratch1 = r7;
2730  Register scratch2 = r9;
2731  DwVfpRegister double_scratch = d0;
2732  SwVfpRegister single_scratch = s3;
2733
2734  Register heap_number_result = no_reg;
2735  Register heap_number_map = r6;
2736  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2737
2738  Label call_runtime;
2739  // Labels for type transition, used for wrong input or output types.
2740  // Both label are currently actually bound to the same position. We use two
2741  // different label to differentiate the cause leading to type transition.
2742  Label transition;
2743
2744  // Smi-smi fast case.
2745  Label skip;
2746  __ orr(scratch1, left, right);
2747  __ JumpIfNotSmi(scratch1, &skip);
2748  GenerateSmiSmiOperation(masm);
2749  // Fall through if the result is not a smi.
2750  __ bind(&skip);
2751
2752  switch (op_) {
2753    case Token::ADD:
2754    case Token::SUB:
2755    case Token::MUL:
2756    case Token::DIV:
2757    case Token::MOD: {
2758      // Load both operands and check that they are 32-bit integer.
2759      // Jump to type transition if they are not. The registers r0 and r1 (right
2760      // and left) are preserved for the runtime call.
2761      FloatingPointHelper::Destination destination =
2762          (CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD)
2763              ? FloatingPointHelper::kVFPRegisters
2764              : FloatingPointHelper::kCoreRegisters;
2765
2766      FloatingPointHelper::LoadNumberAsInt32Double(masm,
2767                                                   right,
2768                                                   destination,
2769                                                   d7,
2770                                                   r2,
2771                                                   r3,
2772                                                   heap_number_map,
2773                                                   scratch1,
2774                                                   scratch2,
2775                                                   s0,
2776                                                   &transition);
2777      FloatingPointHelper::LoadNumberAsInt32Double(masm,
2778                                                   left,
2779                                                   destination,
2780                                                   d6,
2781                                                   r4,
2782                                                   r5,
2783                                                   heap_number_map,
2784                                                   scratch1,
2785                                                   scratch2,
2786                                                   s0,
2787                                                   &transition);
2788
2789      if (destination == FloatingPointHelper::kVFPRegisters) {
2790        CpuFeatures::Scope scope(VFP3);
2791        Label return_heap_number;
2792        switch (op_) {
2793          case Token::ADD:
2794            __ vadd(d5, d6, d7);
2795            break;
2796          case Token::SUB:
2797            __ vsub(d5, d6, d7);
2798            break;
2799          case Token::MUL:
2800            __ vmul(d5, d6, d7);
2801            break;
2802          case Token::DIV:
2803            __ vdiv(d5, d6, d7);
2804            break;
2805          default:
2806            UNREACHABLE();
2807        }
2808
2809        if (op_ != Token::DIV) {
2810          // These operations produce an integer result.
2811          // Try to return a smi if we can.
2812          // Otherwise return a heap number if allowed, or jump to type
2813          // transition.
2814
2815          __ EmitVFPTruncate(kRoundToZero,
2816                             single_scratch,
2817                             d5,
2818                             scratch1,
2819                             scratch2);
2820
2821          if (result_type_ <= BinaryOpIC::INT32) {
2822            // If the ne condition is set, result does
2823            // not fit in a 32-bit integer.
2824            __ b(ne, &transition);
2825          }
2826
2827          // Check if the result fits in a smi.
2828          __ vmov(scratch1, single_scratch);
2829          __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
2830          // If not try to return a heap number.
2831          __ b(mi, &return_heap_number);
2832          // Check for minus zero. Return heap number for minus zero.
2833          Label not_zero;
2834          __ cmp(scratch1, Operand::Zero());
2835          __ b(ne, &not_zero);
2836          __ vmov(scratch2, d5.high());
2837          __ tst(scratch2, Operand(HeapNumber::kSignMask));
2838          __ b(ne, &return_heap_number);
2839          __ bind(&not_zero);
2840
2841          // Tag the result and return.
2842          __ SmiTag(r0, scratch1);
2843          __ Ret();
2844        } else {
2845          // DIV just falls through to allocating a heap number.
2846        }
2847
2848        __ bind(&return_heap_number);
2849        // Return a heap number, or fall through to type transition or runtime
2850        // call if we can't.
2851        if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
2852                                                 : BinaryOpIC::INT32)) {
2853          // We are using vfp registers so r5 is available.
2854          heap_number_result = r5;
2855          GenerateHeapResultAllocation(masm,
2856                                       heap_number_result,
2857                                       heap_number_map,
2858                                       scratch1,
2859                                       scratch2,
2860                                       &call_runtime);
2861          __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
2862          __ vstr(d5, r0, HeapNumber::kValueOffset);
2863          __ mov(r0, heap_number_result);
2864          __ Ret();
2865        }
2866
2867        // A DIV operation expecting an integer result falls through
2868        // to type transition.
2869
2870      } else {
2871        // We preserved r0 and r1 to be able to call runtime.
2872        // Save the left value on the stack.
2873        __ Push(r5, r4);
2874
2875        Label pop_and_call_runtime;
2876
2877        // Allocate a heap number to store the result.
2878        heap_number_result = r5;
2879        GenerateHeapResultAllocation(masm,
2880                                     heap_number_result,
2881                                     heap_number_map,
2882                                     scratch1,
2883                                     scratch2,
2884                                     &pop_and_call_runtime);
2885
2886        // Load the left value from the value saved on the stack.
2887        __ Pop(r1, r0);
2888
2889        // Call the C function to handle the double operation.
2890        FloatingPointHelper::CallCCodeForDoubleOperation(
2891            masm, op_, heap_number_result, scratch1);
2892        if (FLAG_debug_code) {
2893          __ stop("Unreachable code.");
2894        }
2895
2896        __ bind(&pop_and_call_runtime);
2897        __ Drop(2);
2898        __ b(&call_runtime);
2899      }
2900
2901      break;
2902    }
2903
2904    case Token::BIT_OR:
2905    case Token::BIT_XOR:
2906    case Token::BIT_AND:
2907    case Token::SAR:
2908    case Token::SHR:
2909    case Token::SHL: {
2910      Label return_heap_number;
2911      Register scratch3 = r5;
2912      // Convert operands to 32-bit integers. Right in r2 and left in r3. The
2913      // registers r0 and r1 (right and left) are preserved for the runtime
2914      // call.
2915      FloatingPointHelper::LoadNumberAsInt32(masm,
2916                                             left,
2917                                             r3,
2918                                             heap_number_map,
2919                                             scratch1,
2920                                             scratch2,
2921                                             scratch3,
2922                                             d0,
2923                                             &transition);
2924      FloatingPointHelper::LoadNumberAsInt32(masm,
2925                                             right,
2926                                             r2,
2927                                             heap_number_map,
2928                                             scratch1,
2929                                             scratch2,
2930                                             scratch3,
2931                                             d0,
2932                                             &transition);
2933
2934      // The ECMA-262 standard specifies that, for shift operations, only the
2935      // 5 least significant bits of the shift value should be used.
2936      switch (op_) {
2937        case Token::BIT_OR:
2938          __ orr(r2, r3, Operand(r2));
2939          break;
2940        case Token::BIT_XOR:
2941          __ eor(r2, r3, Operand(r2));
2942          break;
2943        case Token::BIT_AND:
2944          __ and_(r2, r3, Operand(r2));
2945          break;
2946        case Token::SAR:
2947          __ and_(r2, r2, Operand(0x1f));
2948          __ mov(r2, Operand(r3, ASR, r2));
2949          break;
2950        case Token::SHR:
2951          __ and_(r2, r2, Operand(0x1f));
2952          __ mov(r2, Operand(r3, LSR, r2), SetCC);
2953          // SHR is special because it is required to produce a positive answer.
2954          // We only get a negative result if the shift value (r2) is 0.
2955          // This result cannot be respresented as a signed 32-bit integer, try
2956          // to return a heap number if we can.
2957          // The non vfp3 code does not support this special case, so jump to
2958          // runtime if we don't support it.
2959          if (CpuFeatures::IsSupported(VFP3)) {
2960            __ b(mi, (result_type_ <= BinaryOpIC::INT32)
2961                      ? &transition
2962                      : &return_heap_number);
2963          } else {
2964            __ b(mi, (result_type_ <= BinaryOpIC::INT32)
2965                      ? &transition
2966                      : &call_runtime);
2967          }
2968          break;
2969        case Token::SHL:
2970          __ and_(r2, r2, Operand(0x1f));
2971          __ mov(r2, Operand(r3, LSL, r2));
2972          break;
2973        default:
2974          UNREACHABLE();
2975      }
2976
2977      // Check if the result fits in a smi.
2978      __ add(scratch1, r2, Operand(0x40000000), SetCC);
2979      // If not try to return a heap number. (We know the result is an int32.)
2980      __ b(mi, &return_heap_number);
2981      // Tag the result and return.
2982      __ SmiTag(r0, r2);
2983      __ Ret();
2984
2985      __ bind(&return_heap_number);
2986      heap_number_result = r5;
2987      GenerateHeapResultAllocation(masm,
2988                                   heap_number_result,
2989                                   heap_number_map,
2990                                   scratch1,
2991                                   scratch2,
2992                                   &call_runtime);
2993
2994      if (CpuFeatures::IsSupported(VFP3)) {
2995        CpuFeatures::Scope scope(VFP3);
2996        if (op_ != Token::SHR) {
2997          // Convert the result to a floating point value.
2998          __ vmov(double_scratch.low(), r2);
2999          __ vcvt_f64_s32(double_scratch, double_scratch.low());
3000        } else {
3001          // The result must be interpreted as an unsigned 32-bit integer.
3002          __ vmov(double_scratch.low(), r2);
3003          __ vcvt_f64_u32(double_scratch, double_scratch.low());
3004        }
3005
3006        // Store the result.
3007        __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
3008        __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
3009        __ mov(r0, heap_number_result);
3010        __ Ret();
3011      } else {
3012        // Tail call that writes the int32 in r2 to the heap number in r0, using
3013        // r3 as scratch. r0 is preserved and returned.
3014        __ mov(r0, r5);
3015        WriteInt32ToHeapNumberStub stub(r2, r0, r3);
3016        __ TailCallStub(&stub);
3017      }
3018
3019      break;
3020    }
3021
3022    default:
3023      UNREACHABLE();
3024  }
3025
3026  // We never expect DIV to yield an integer result, so we always generate
3027  // type transition code for DIV operations expecting an integer result: the
3028  // code will fall through to this type transition.
3029  if (transition.is_linked() ||
3030      ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
3031    __ bind(&transition);
3032    GenerateTypeTransition(masm);
3033  }
3034
3035  __ bind(&call_runtime);
3036  GenerateCallRuntime(masm);
3037}
3038
3039
3040void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
3041  Label call_runtime;
3042
3043  if (op_ == Token::ADD) {
3044    // Handle string addition here, because it is the only operation
3045    // that does not do a ToNumber conversion on the operands.
3046    GenerateAddStrings(masm);
3047  }
3048
3049  // Convert oddball arguments to numbers.
3050  Label check, done;
3051  __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
3052  __ b(ne, &check);
3053  if (Token::IsBitOp(op_)) {
3054    __ mov(r1, Operand(Smi::FromInt(0)));
3055  } else {
3056    __ LoadRoot(r1, Heap::kNanValueRootIndex);
3057  }
3058  __ jmp(&done);
3059  __ bind(&check);
3060  __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
3061  __ b(ne, &done);
3062  if (Token::IsBitOp(op_)) {
3063    __ mov(r0, Operand(Smi::FromInt(0)));
3064  } else {
3065    __ LoadRoot(r0, Heap::kNanValueRootIndex);
3066  }
3067  __ bind(&done);
3068
3069  GenerateHeapNumberStub(masm);
3070}
3071
3072
3073void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3074  Label call_runtime;
3075  GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
3076
3077  __ bind(&call_runtime);
3078  GenerateCallRuntime(masm);
3079}
3080
3081
3082void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3083  Label call_runtime, call_string_add_or_runtime;
3084
3085  GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3086
3087  GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3088
3089  __ bind(&call_string_add_or_runtime);
3090  if (op_ == Token::ADD) {
3091    GenerateAddStrings(masm);
3092  }
3093
3094  __ bind(&call_runtime);
3095  GenerateCallRuntime(masm);
3096}
3097
3098
3099void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3100  ASSERT(op_ == Token::ADD);
3101  Label left_not_string, call_runtime;
3102
3103  Register left = r1;
3104  Register right = r0;
3105
3106  // Check if left argument is a string.
3107  __ JumpIfSmi(left, &left_not_string);
3108  __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
3109  __ b(ge, &left_not_string);
3110
3111  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3112  GenerateRegisterArgsPush(masm);
3113  __ TailCallStub(&string_add_left_stub);
3114
3115  // Left operand is not a string, test right.
3116  __ bind(&left_not_string);
3117  __ JumpIfSmi(right, &call_runtime);
3118  __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
3119  __ b(ge, &call_runtime);
3120
3121  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3122  GenerateRegisterArgsPush(masm);
3123  __ TailCallStub(&string_add_right_stub);
3124
3125  // At least one argument is not a string.
3126  __ bind(&call_runtime);
3127}
3128
3129
3130void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3131  GenerateRegisterArgsPush(masm);
3132  switch (op_) {
3133    case Token::ADD:
3134      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3135      break;
3136    case Token::SUB:
3137      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3138      break;
3139    case Token::MUL:
3140      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3141      break;
3142    case Token::DIV:
3143      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3144      break;
3145    case Token::MOD:
3146      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3147      break;
3148    case Token::BIT_OR:
3149      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3150      break;
3151    case Token::BIT_AND:
3152      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3153      break;
3154    case Token::BIT_XOR:
3155      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3156      break;
3157    case Token::SAR:
3158      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3159      break;
3160    case Token::SHR:
3161      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3162      break;
3163    case Token::SHL:
3164      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3165      break;
3166    default:
3167      UNREACHABLE();
3168  }
3169}
3170
3171
3172void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
3173                                                Register result,
3174                                                Register heap_number_map,
3175                                                Register scratch1,
3176                                                Register scratch2,
3177                                                Label* gc_required) {
3178  // Code below will scratch result if allocation fails. To keep both arguments
3179  // intact for the runtime call result cannot be one of these.
3180  ASSERT(!result.is(r0) && !result.is(r1));
3181
3182  if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3183    Label skip_allocation, allocated;
3184    Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
3185    // If the overwritable operand is already an object, we skip the
3186    // allocation of a heap number.
3187    __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3188    // Allocate a heap number for the result.
3189    __ AllocateHeapNumber(
3190        result, scratch1, scratch2, heap_number_map, gc_required);
3191    __ b(&allocated);
3192    __ bind(&skip_allocation);
3193    // Use object holding the overwritable operand for result.
3194    __ mov(result, Operand(overwritable_operand));
3195    __ bind(&allocated);
3196  } else {
3197    ASSERT(mode_ == NO_OVERWRITE);
3198    __ AllocateHeapNumber(
3199        result, scratch1, scratch2, heap_number_map, gc_required);
3200  }
3201}
3202
3203
3204void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3205  __ Push(r1, r0);
3206}
3207
3208
3209void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3210  // Untagged case: double input in d2, double result goes
3211  //   into d2.
3212  // Tagged case: tagged input on top of stack and in r0,
3213  //   tagged result (heap number) goes into r0.
3214
3215  Label input_not_smi;
3216  Label loaded;
3217  Label calculate;
3218  Label invalid_cache;
3219  const Register scratch0 = r9;
3220  const Register scratch1 = r7;
3221  const Register cache_entry = r0;
3222  const bool tagged = (argument_type_ == TAGGED);
3223
3224  if (CpuFeatures::IsSupported(VFP3)) {
3225    CpuFeatures::Scope scope(VFP3);
3226    if (tagged) {
3227      // Argument is a number and is on stack and in r0.
3228      // Load argument and check if it is a smi.
3229      __ JumpIfNotSmi(r0, &input_not_smi);
3230
3231      // Input is a smi. Convert to double and load the low and high words
3232      // of the double into r2, r3.
3233      __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
3234      __ b(&loaded);
3235
3236      __ bind(&input_not_smi);
3237      // Check if input is a HeapNumber.
3238      __ CheckMap(r0,
3239                  r1,
3240                  Heap::kHeapNumberMapRootIndex,
3241                  &calculate,
3242                  DONT_DO_SMI_CHECK);
3243      // Input is a HeapNumber. Load it to a double register and store the
3244      // low and high words into r2, r3.
3245      __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
3246      __ vmov(r2, r3, d0);
3247    } else {
3248      // Input is untagged double in d2. Output goes to d2.
3249      __ vmov(r2, r3, d2);
3250    }
3251    __ bind(&loaded);
3252    // r2 = low 32 bits of double value
3253    // r3 = high 32 bits of double value
3254    // Compute hash (the shifts are arithmetic):
3255    //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3256    __ eor(r1, r2, Operand(r3));
3257    __ eor(r1, r1, Operand(r1, ASR, 16));
3258    __ eor(r1, r1, Operand(r1, ASR, 8));
3259    ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3260    __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3261
3262    // r2 = low 32 bits of double value.
3263    // r3 = high 32 bits of double value.
3264    // r1 = TranscendentalCache::hash(double value).
3265    Isolate* isolate = masm->isolate();
3266    ExternalReference cache_array =
3267        ExternalReference::transcendental_cache_array_address(isolate);
3268    __ mov(cache_entry, Operand(cache_array));
3269    // cache_entry points to cache array.
3270    int cache_array_index
3271        = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
3272    __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
3273    // r0 points to the cache for the type type_.
3274    // If NULL, the cache hasn't been initialized yet, so go through runtime.
3275    __ cmp(cache_entry, Operand(0, RelocInfo::NONE));
3276    __ b(eq, &invalid_cache);
3277
3278#ifdef DEBUG
3279    // Check that the layout of cache elements match expectations.
3280    { TranscendentalCache::SubCache::Element test_elem[2];
3281      char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3282      char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3283      char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3284      char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3285      char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3286      CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
3287      CHECK_EQ(0, elem_in0 - elem_start);
3288      CHECK_EQ(kIntSize, elem_in1 - elem_start);
3289      CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3290    }
3291#endif
3292
3293    // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
3294    __ add(r1, r1, Operand(r1, LSL, 1));
3295    __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
3296    // Check if cache matches: Double value is stored in uint32_t[2] array.
3297    __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
3298    __ cmp(r2, r4);
3299    __ cmp(r3, r5, eq);
3300    __ b(ne, &calculate);
3301    // Cache hit. Load result, cleanup and return.
3302    Counters* counters = masm->isolate()->counters();
3303    __ IncrementCounter(
3304        counters->transcendental_cache_hit(), 1, scratch0, scratch1);
3305    if (tagged) {
3306      // Pop input value from stack and load result into r0.
3307      __ pop();
3308      __ mov(r0, Operand(r6));
3309    } else {
3310      // Load result into d2.
3311       __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
3312    }
3313    __ Ret();
3314  }  // if (CpuFeatures::IsSupported(VFP3))
3315
3316  __ bind(&calculate);
3317  Counters* counters = masm->isolate()->counters();
3318  __ IncrementCounter(
3319      counters->transcendental_cache_miss(), 1, scratch0, scratch1);
3320  if (tagged) {
3321    __ bind(&invalid_cache);
3322    ExternalReference runtime_function =
3323        ExternalReference(RuntimeFunction(), masm->isolate());
3324    __ TailCallExternalReference(runtime_function, 1, 1);
3325  } else {
3326    if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
3327    CpuFeatures::Scope scope(VFP3);
3328
3329    Label no_update;
3330    Label skip_cache;
3331
3332    // Call C function to calculate the result and update the cache.
3333    // Register r0 holds precalculated cache entry address; preserve
3334    // it on the stack and pop it into register cache_entry after the
3335    // call.
3336    __ push(cache_entry);
3337    GenerateCallCFunction(masm, scratch0);
3338    __ GetCFunctionDoubleResult(d2);
3339
3340    // Try to update the cache. If we cannot allocate a
3341    // heap number, we return the result without updating.
3342    __ pop(cache_entry);
3343    __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3344    __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
3345    __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
3346    __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
3347    __ Ret();
3348
3349    __ bind(&invalid_cache);
3350    // The cache is invalid. Call runtime which will recreate the
3351    // cache.
3352    __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3353    __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
3354    __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
3355    {
3356      FrameScope scope(masm, StackFrame::INTERNAL);
3357      __ push(r0);
3358      __ CallRuntime(RuntimeFunction(), 1);
3359    }
3360    __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
3361    __ Ret();
3362
3363    __ bind(&skip_cache);
3364    // Call C function to calculate the result and answer directly
3365    // without updating the cache.
3366    GenerateCallCFunction(masm, scratch0);
3367    __ GetCFunctionDoubleResult(d2);
3368    __ bind(&no_update);
3369
3370    // We return the value in d2 without adding it to the cache, but
3371    // we cause a scavenging GC so that future allocations will succeed.
3372    {
3373      FrameScope scope(masm, StackFrame::INTERNAL);
3374
3375      // Allocate an aligned object larger than a HeapNumber.
3376      ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3377      __ mov(scratch0, Operand(4 * kPointerSize));
3378      __ push(scratch0);
3379      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3380    }
3381    __ Ret();
3382  }
3383}
3384
3385
3386void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3387                                                    Register scratch) {
3388  Isolate* isolate = masm->isolate();
3389
3390  __ push(lr);
3391  __ PrepareCallCFunction(0, 1, scratch);
3392  if (masm->use_eabi_hardfloat()) {
3393    __ vmov(d0, d2);
3394  } else {
3395    __ vmov(r0, r1, d2);
3396  }
3397  AllowExternalCallThatCantCauseGC scope(masm);
3398  switch (type_) {
3399    case TranscendentalCache::SIN:
3400      __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
3401          0, 1);
3402      break;
3403    case TranscendentalCache::COS:
3404      __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
3405          0, 1);
3406      break;
3407    case TranscendentalCache::TAN:
3408      __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
3409          0, 1);
3410      break;
3411    case TranscendentalCache::LOG:
3412      __ CallCFunction(ExternalReference::math_log_double_function(isolate),
3413          0, 1);
3414      break;
3415    default:
3416      UNIMPLEMENTED();
3417      break;
3418  }
3419  __ pop(lr);
3420}
3421
3422
3423Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
3424  switch (type_) {
3425    // Add more cases when necessary.
3426    case TranscendentalCache::SIN: return Runtime::kMath_sin;
3427    case TranscendentalCache::COS: return Runtime::kMath_cos;
3428    case TranscendentalCache::TAN: return Runtime::kMath_tan;
3429    case TranscendentalCache::LOG: return Runtime::kMath_log;
3430    default:
3431      UNIMPLEMENTED();
3432      return Runtime::kAbort;
3433  }
3434}
3435
3436
3437void StackCheckStub::Generate(MacroAssembler* masm) {
3438  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3439}
3440
3441
3442void InterruptStub::Generate(MacroAssembler* masm) {
3443  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3444}
3445
3446
3447void MathPowStub::Generate(MacroAssembler* masm) {
3448  CpuFeatures::Scope vfp3_scope(VFP3);
3449  const Register base = r1;
3450  const Register exponent = r2;
3451  const Register heapnumbermap = r5;
3452  const Register heapnumber = r0;
3453  const DoubleRegister double_base = d1;
3454  const DoubleRegister double_exponent = d2;
3455  const DoubleRegister double_result = d3;
3456  const DoubleRegister double_scratch = d0;
3457  const SwVfpRegister single_scratch = s0;
3458  const Register scratch = r9;
3459  const Register scratch2 = r7;
3460
3461  Label call_runtime, done, int_exponent;
3462  if (exponent_type_ == ON_STACK) {
3463    Label base_is_smi, unpack_exponent;
3464    // The exponent and base are supplied as arguments on the stack.
3465    // This can only happen if the stub is called from non-optimized code.
3466    // Load input parameters from stack to double registers.
3467    __ ldr(base, MemOperand(sp, 1 * kPointerSize));
3468    __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
3469
3470    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
3471
3472    __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
3473    __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3474    __ cmp(scratch, heapnumbermap);
3475    __ b(ne, &call_runtime);
3476
3477    __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
3478    __ jmp(&unpack_exponent);
3479
3480    __ bind(&base_is_smi);
3481    __ vmov(single_scratch, scratch);
3482    __ vcvt_f64_s32(double_base, single_scratch);
3483    __ bind(&unpack_exponent);
3484
3485    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3486
3487    __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3488    __ cmp(scratch, heapnumbermap);
3489    __ b(ne, &call_runtime);
3490    __ vldr(double_exponent,
3491            FieldMemOperand(exponent, HeapNumber::kValueOffset));
3492  } else if (exponent_type_ == TAGGED) {
3493    // Base is already in double_base.
3494    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3495
3496    __ vldr(double_exponent,
3497            FieldMemOperand(exponent, HeapNumber::kValueOffset));
3498  }
3499
3500  if (exponent_type_ != INTEGER) {
3501    Label int_exponent_convert;
3502    // Detect integer exponents stored as double.
3503    __ vcvt_u32_f64(single_scratch, double_exponent);
3504    // We do not check for NaN or Infinity here because comparing numbers on
3505    // ARM correctly distinguishes NaNs.  We end up calling the built-in.
3506    __ vcvt_f64_u32(double_scratch, single_scratch);
3507    __ VFPCompareAndSetFlags(double_scratch, double_exponent);
3508    __ b(eq, &int_exponent_convert);
3509
3510    if (exponent_type_ == ON_STACK) {
3511      // Detect square root case.  Crankshaft detects constant +/-0.5 at
3512      // compile time and uses DoMathPowHalf instead.  We then skip this check
3513      // for non-constant cases of +/-0.5 as these hardly occur.
3514      Label not_plus_half;
3515
3516      // Test for 0.5.
3517      __ vmov(double_scratch, 0.5);
3518      __ VFPCompareAndSetFlags(double_exponent, double_scratch);
3519      __ b(ne, &not_plus_half);
3520
3521      // Calculates square root of base.  Check for the special case of
3522      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3523      __ vmov(double_scratch, -V8_INFINITY);
3524      __ VFPCompareAndSetFlags(double_base, double_scratch);
3525      __ vneg(double_result, double_scratch, eq);
3526      __ b(eq, &done);
3527
3528      // Add +0 to convert -0 to +0.
3529      __ vadd(double_scratch, double_base, kDoubleRegZero);
3530      __ vsqrt(double_result, double_scratch);
3531      __ jmp(&done);
3532
3533      __ bind(&not_plus_half);
3534      __ vmov(double_scratch, -0.5);
3535      __ VFPCompareAndSetFlags(double_exponent, double_scratch);
3536      __ b(ne, &call_runtime);
3537
3538      // Calculates square root of base.  Check for the special case of
3539      // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3540      __ vmov(double_scratch, -V8_INFINITY);
3541      __ VFPCompareAndSetFlags(double_base, double_scratch);
3542      __ vmov(double_result, kDoubleRegZero, eq);
3543      __ b(eq, &done);
3544
3545      // Add +0 to convert -0 to +0.
3546      __ vadd(double_scratch, double_base, kDoubleRegZero);
3547      __ vmov(double_result, 1);
3548      __ vsqrt(double_scratch, double_scratch);
3549      __ vdiv(double_result, double_result, double_scratch);
3550      __ jmp(&done);
3551    }
3552
3553    __ push(lr);
3554    {
3555      AllowExternalCallThatCantCauseGC scope(masm);
3556      __ PrepareCallCFunction(0, 2, scratch);
3557      __ SetCallCDoubleArguments(double_base, double_exponent);
3558      __ CallCFunction(
3559          ExternalReference::power_double_double_function(masm->isolate()),
3560          0, 2);
3561    }
3562    __ pop(lr);
3563    __ GetCFunctionDoubleResult(double_result);
3564    __ jmp(&done);
3565
3566    __ bind(&int_exponent_convert);
3567    __ vcvt_u32_f64(single_scratch, double_exponent);
3568    __ vmov(scratch, single_scratch);
3569  }
3570
3571  // Calculate power with integer exponent.
3572  __ bind(&int_exponent);
3573
3574  // Get two copies of exponent in the registers scratch and exponent.
3575  if (exponent_type_ == INTEGER) {
3576    __ mov(scratch, exponent);
3577  } else {
3578    // Exponent has previously been stored into scratch as untagged integer.
3579    __ mov(exponent, scratch);
3580  }
3581  __ vmov(double_scratch, double_base);  // Back up base.
3582  __ vmov(double_result, 1.0);
3583
3584  // Get absolute value of exponent.
3585  __ cmp(scratch, Operand(0));
3586  __ mov(scratch2, Operand(0), LeaveCC, mi);
3587  __ sub(scratch, scratch2, scratch, LeaveCC, mi);
3588
3589  Label while_true;
3590  __ bind(&while_true);
3591  __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
3592  __ vmul(double_result, double_result, double_scratch, cs);
3593  __ vmul(double_scratch, double_scratch, double_scratch, ne);
3594  __ b(ne, &while_true);
3595
3596  __ cmp(exponent, Operand(0));
3597  __ b(ge, &done);
3598  __ vmov(double_scratch, 1.0);
3599  __ vdiv(double_result, double_scratch, double_result);
3600  // Test whether result is zero.  Bail out to check for subnormal result.
3601  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3602  __ VFPCompareAndSetFlags(double_result, 0.0);
3603  __ b(ne, &done);
3604  // double_exponent may not containe the exponent value if the input was a
3605  // smi.  We set it with exponent value before bailing out.
3606  __ vmov(single_scratch, exponent);
3607  __ vcvt_f64_s32(double_exponent, single_scratch);
3608
3609  // Returning or bailing out.
3610  Counters* counters = masm->isolate()->counters();
3611  if (exponent_type_ == ON_STACK) {
3612    // The arguments are still on the stack.
3613    __ bind(&call_runtime);
3614    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3615
3616    // The stub is called from non-optimized code, which expects the result
3617    // as heap number in exponent.
3618    __ bind(&done);
3619    __ AllocateHeapNumber(
3620        heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
3621    __ vstr(double_result,
3622            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3623    ASSERT(heapnumber.is(r0));
3624    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3625    __ Ret(2);
3626  } else {
3627    __ push(lr);
3628    {
3629      AllowExternalCallThatCantCauseGC scope(masm);
3630      __ PrepareCallCFunction(0, 2, scratch);
3631      __ SetCallCDoubleArguments(double_base, double_exponent);
3632      __ CallCFunction(
3633          ExternalReference::power_double_double_function(masm->isolate()),
3634          0, 2);
3635    }
3636    __ pop(lr);
3637    __ GetCFunctionDoubleResult(double_result);
3638
3639    __ bind(&done);
3640    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3641    __ Ret();
3642  }
3643}
3644
3645
3646bool CEntryStub::NeedsImmovableCode() {
3647  return true;
3648}
3649
3650
3651bool CEntryStub::IsPregenerated() {
3652  return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3653          result_size_ == 1;
3654}
3655
3656
3657void CodeStub::GenerateStubsAheadOfTime() {
3658  CEntryStub::GenerateAheadOfTime();
3659  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
3660  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
3661  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
3662}
3663
3664
3665void CodeStub::GenerateFPStubs() {
3666  CEntryStub save_doubles(1, kSaveFPRegs);
3667  Handle<Code> code = save_doubles.GetCode();
3668  code->set_is_pregenerated(true);
3669  StoreBufferOverflowStub stub(kSaveFPRegs);
3670  stub.GetCode()->set_is_pregenerated(true);
3671  code->GetIsolate()->set_fp_stubs_generated(true);
3672}
3673
3674
3675void CEntryStub::GenerateAheadOfTime() {
3676  CEntryStub stub(1, kDontSaveFPRegs);
3677  Handle<Code> code = stub.GetCode();
3678  code->set_is_pregenerated(true);
3679}
3680
3681
3682void CEntryStub::GenerateCore(MacroAssembler* masm,
3683                              Label* throw_normal_exception,
3684                              Label* throw_termination_exception,
3685                              Label* throw_out_of_memory_exception,
3686                              bool do_gc,
3687                              bool always_allocate) {
3688  // r0: result parameter for PerformGC, if any
3689  // r4: number of arguments including receiver  (C callee-saved)
3690  // r5: pointer to builtin function  (C callee-saved)
3691  // r6: pointer to the first argument (C callee-saved)
3692  Isolate* isolate = masm->isolate();
3693
3694  if (do_gc) {
3695    // Passing r0.
3696    __ PrepareCallCFunction(1, 0, r1);
3697    __ CallCFunction(ExternalReference::perform_gc_function(isolate),
3698        1, 0);
3699  }
3700
3701  ExternalReference scope_depth =
3702      ExternalReference::heap_always_allocate_scope_depth(isolate);
3703  if (always_allocate) {
3704    __ mov(r0, Operand(scope_depth));
3705    __ ldr(r1, MemOperand(r0));
3706    __ add(r1, r1, Operand(1));
3707    __ str(r1, MemOperand(r0));
3708  }
3709
3710  // Call C built-in.
3711  // r0 = argc, r1 = argv
3712  __ mov(r0, Operand(r4));
3713  __ mov(r1, Operand(r6));
3714
3715#if defined(V8_HOST_ARCH_ARM)
3716  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
3717  int frame_alignment_mask = frame_alignment - 1;
3718  if (FLAG_debug_code) {
3719    if (frame_alignment > kPointerSize) {
3720      Label alignment_as_expected;
3721      ASSERT(IsPowerOf2(frame_alignment));
3722      __ tst(sp, Operand(frame_alignment_mask));
3723      __ b(eq, &alignment_as_expected);
3724      // Don't use Check here, as it will call Runtime_Abort re-entering here.
3725      __ stop("Unexpected alignment");
3726      __ bind(&alignment_as_expected);
3727    }
3728  }
3729#endif
3730
3731  __ mov(r2, Operand(ExternalReference::isolate_address()));
3732
3733  // To let the GC traverse the return address of the exit frames, we need to
3734  // know where the return address is. The CEntryStub is unmovable, so
3735  // we can store the address on the stack to be able to find it again and
3736  // we never have to restore it, because it will not change.
3737  // Compute the return address in lr to return to after the jump below. Pc is
3738  // already at '+ 8' from the current instruction but return is after three
3739  // instructions so add another 4 to pc to get the return address.
3740  masm->add(lr, pc, Operand(4));
3741  __ str(lr, MemOperand(sp, 0));
3742  masm->Jump(r5);
3743
3744  if (always_allocate) {
3745    // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
3746    // though (contain the result).
3747    __ mov(r2, Operand(scope_depth));
3748    __ ldr(r3, MemOperand(r2));
3749    __ sub(r3, r3, Operand(1));
3750    __ str(r3, MemOperand(r2));
3751  }
3752
3753  // check for failure result
3754  Label failure_returned;
3755  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3756  // Lower 2 bits of r2 are 0 iff r0 has failure tag.
3757  __ add(r2, r0, Operand(1));
3758  __ tst(r2, Operand(kFailureTagMask));
3759  __ b(eq, &failure_returned);
3760
3761  // Exit C frame and return.
3762  // r0:r1: result
3763  // sp: stack pointer
3764  // fp: frame pointer
3765  //  Callee-saved register r4 still holds argc.
3766  __ LeaveExitFrame(save_doubles_, r4);
3767  __ mov(pc, lr);
3768
3769  // check if we should retry or throw exception
3770  Label retry;
3771  __ bind(&failure_returned);
3772  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3773  __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
3774  __ b(eq, &retry);
3775
3776  // Special handling of out of memory exceptions.
3777  Failure* out_of_memory = Failure::OutOfMemoryException();
3778  __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3779  __ b(eq, throw_out_of_memory_exception);
3780
3781  // Retrieve the pending exception and clear the variable.
3782  __ mov(r3, Operand(isolate->factory()->the_hole_value()));
3783  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3784                                       isolate)));
3785  __ ldr(r0, MemOperand(ip));
3786  __ str(r3, MemOperand(ip));
3787
3788  // Special handling of termination exceptions which are uncatchable
3789  // by javascript code.
3790  __ cmp(r0, Operand(isolate->factory()->termination_exception()));
3791  __ b(eq, throw_termination_exception);
3792
3793  // Handle normal exception.
3794  __ jmp(throw_normal_exception);
3795
3796  __ bind(&retry);  // pass last failure (r0) as parameter (r0) when retrying
3797}
3798
3799
3800void CEntryStub::Generate(MacroAssembler* masm) {
3801  // Called from JavaScript; parameters are on stack as if calling JS function
3802  // r0: number of arguments including receiver
3803  // r1: pointer to builtin function
3804  // fp: frame pointer  (restored after C call)
3805  // sp: stack pointer  (restored as callee's sp after C call)
3806  // cp: current context  (C callee-saved)
3807
3808  // Result returned in r0 or r0+r1 by default.
3809
3810  // NOTE: Invocations of builtins may return failure objects
3811  // instead of a proper result. The builtin entry handles
3812  // this by performing a garbage collection and retrying the
3813  // builtin once.
3814
3815  // Compute the argv pointer in a callee-saved register.
3816  __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
3817  __ sub(r6, r6, Operand(kPointerSize));
3818
3819  // Enter the exit frame that transitions from JavaScript to C++.
3820  FrameScope scope(masm, StackFrame::MANUAL);
3821  __ EnterExitFrame(save_doubles_);
3822
3823  // Set up argc and the builtin function in callee-saved registers.
3824  __ mov(r4, Operand(r0));
3825  __ mov(r5, Operand(r1));
3826
3827  // r4: number of arguments (C callee-saved)
3828  // r5: pointer to builtin function (C callee-saved)
3829  // r6: pointer to first argument (C callee-saved)
3830
3831  Label throw_normal_exception;
3832  Label throw_termination_exception;
3833  Label throw_out_of_memory_exception;
3834
3835  // Call into the runtime system.
3836  GenerateCore(masm,
3837               &throw_normal_exception,
3838               &throw_termination_exception,
3839               &throw_out_of_memory_exception,
3840               false,
3841               false);
3842
3843  // Do space-specific GC and retry runtime call.
3844  GenerateCore(masm,
3845               &throw_normal_exception,
3846               &throw_termination_exception,
3847               &throw_out_of_memory_exception,
3848               true,
3849               false);
3850
3851  // Do full GC and retry runtime call one final time.
3852  Failure* failure = Failure::InternalError();
3853  __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
3854  GenerateCore(masm,
3855               &throw_normal_exception,
3856               &throw_termination_exception,
3857               &throw_out_of_memory_exception,
3858               true,
3859               true);
3860
3861  __ bind(&throw_out_of_memory_exception);
3862  // Set external caught exception to false.
3863  Isolate* isolate = masm->isolate();
3864  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
3865                                    isolate);
3866  __ mov(r0, Operand(false, RelocInfo::NONE));
3867  __ mov(r2, Operand(external_caught));
3868  __ str(r0, MemOperand(r2));
3869
3870  // Set pending exception and r0 to out of memory exception.
3871  Failure* out_of_memory = Failure::OutOfMemoryException();
3872  __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3873  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3874                                       isolate)));
3875  __ str(r0, MemOperand(r2));
3876  // Fall through to the next label.
3877
3878  __ bind(&throw_termination_exception);
3879  __ ThrowUncatchable(r0);
3880
3881  __ bind(&throw_normal_exception);
3882  __ Throw(r0);
3883}
3884
3885
3886void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
3887  // r0: code entry
3888  // r1: function
3889  // r2: receiver
3890  // r3: argc
3891  // [sp+0]: argv
3892
3893  Label invoke, handler_entry, exit;
3894
3895  // Called from C, so do not pop argc and args on exit (preserve sp)
3896  // No need to save register-passed args
3897  // Save callee-saved registers (incl. cp and fp), sp, and lr
3898  __ stm(db_w, sp, kCalleeSaved | lr.bit());
3899
3900  if (CpuFeatures::IsSupported(VFP3)) {
3901    CpuFeatures::Scope scope(VFP3);
3902    // Save callee-saved vfp registers.
3903    __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
3904    // Set up the reserved register for 0.0.
3905    __ vmov(kDoubleRegZero, 0.0);
3906  }
3907
3908  // Get address of argv, see stm above.
3909  // r0: code entry
3910  // r1: function
3911  // r2: receiver
3912  // r3: argc
3913
3914  // Set up argv in r4.
3915  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
3916  if (CpuFeatures::IsSupported(VFP3)) {
3917    offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
3918  }
3919  __ ldr(r4, MemOperand(sp, offset_to_argv));
3920
3921  // Push a frame with special values setup to mark it as an entry frame.
3922  // r0: code entry
3923  // r1: function
3924  // r2: receiver
3925  // r3: argc
3926  // r4: argv
3927  Isolate* isolate = masm->isolate();
3928  __ mov(r8, Operand(-1));  // Push a bad frame pointer to fail if it is used.
3929  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
3930  __ mov(r7, Operand(Smi::FromInt(marker)));
3931  __ mov(r6, Operand(Smi::FromInt(marker)));
3932  __ mov(r5,
3933         Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
3934  __ ldr(r5, MemOperand(r5));
3935  __ Push(r8, r7, r6, r5);
3936
3937  // Set up frame pointer for the frame to be pushed.
3938  __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
3939
3940  // If this is the outermost JS call, set js_entry_sp value.
3941  Label non_outermost_js;
3942  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
3943  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
3944  __ ldr(r6, MemOperand(r5));
3945  __ cmp(r6, Operand::Zero());
3946  __ b(ne, &non_outermost_js);
3947  __ str(fp, MemOperand(r5));
3948  __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
3949  Label cont;
3950  __ b(&cont);
3951  __ bind(&non_outermost_js);
3952  __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
3953  __ bind(&cont);
3954  __ push(ip);
3955
3956  // Jump to a faked try block that does the invoke, with a faked catch
3957  // block that sets the pending exception.
3958  __ jmp(&invoke);
3959  __ bind(&handler_entry);
3960  handler_offset_ = handler_entry.pos();
3961  // Caught exception: Store result (exception) in the pending exception
3962  // field in the JSEnv and return a failure sentinel.  Coming in here the
3963  // fp will be invalid because the PushTryHandler below sets it to 0 to
3964  // signal the existence of the JSEntry frame.
3965  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3966                                       isolate)));
3967  __ str(r0, MemOperand(ip));
3968  __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
3969  __ b(&exit);
3970
3971  // Invoke: Link this frame into the handler chain.  There's only one
3972  // handler block in this code object, so its index is 0.
3973  __ bind(&invoke);
3974  // Must preserve r0-r4, r5-r7 are available.
3975  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
3976  // If an exception not caught by another handler occurs, this handler
3977  // returns control to the code after the bl(&invoke) above, which
3978  // restores all kCalleeSaved registers (including cp and fp) to their
3979  // saved values before returning a failure to C.
3980
3981  // Clear any pending exceptions.
3982  __ mov(r5, Operand(isolate->factory()->the_hole_value()));
3983  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3984                                       isolate)));
3985  __ str(r5, MemOperand(ip));
3986
3987  // Invoke the function by calling through JS entry trampoline builtin.
3988  // Notice that we cannot store a reference to the trampoline code directly in
3989  // this stub, because runtime stubs are not traversed when doing GC.
3990
3991  // Expected registers by Builtins::JSEntryTrampoline
3992  // r0: code entry
3993  // r1: function
3994  // r2: receiver
3995  // r3: argc
3996  // r4: argv
3997  if (is_construct) {
3998    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
3999                                      isolate);
4000    __ mov(ip, Operand(construct_entry));
4001  } else {
4002    ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
4003    __ mov(ip, Operand(entry));
4004  }
4005  __ ldr(ip, MemOperand(ip));  // deref address
4006
4007  // Branch and link to JSEntryTrampoline.  We don't use the double underscore
4008  // macro for the add instruction because we don't want the coverage tool
4009  // inserting instructions here after we read the pc.
4010  __ mov(lr, Operand(pc));
4011  masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
4012
4013  // Unlink this frame from the handler chain.
4014  __ PopTryHandler();
4015
4016  __ bind(&exit);  // r0 holds result
4017  // Check if the current stack frame is marked as the outermost JS frame.
4018  Label non_outermost_js_2;
4019  __ pop(r5);
4020  __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4021  __ b(ne, &non_outermost_js_2);
4022  __ mov(r6, Operand::Zero());
4023  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
4024  __ str(r6, MemOperand(r5));
4025  __ bind(&non_outermost_js_2);
4026
4027  // Restore the top frame descriptors from the stack.
4028  __ pop(r3);
4029  __ mov(ip,
4030         Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
4031  __ str(r3, MemOperand(ip));
4032
4033  // Reset the stack to the callee saved registers.
4034  __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
4035
4036  // Restore callee-saved registers and return.
4037#ifdef DEBUG
4038  if (FLAG_debug_code) {
4039    __ mov(lr, Operand(pc));
4040  }
4041#endif
4042
4043  if (CpuFeatures::IsSupported(VFP3)) {
4044    CpuFeatures::Scope scope(VFP3);
4045    // Restore callee-saved vfp registers.
4046    __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
4047  }
4048
4049  __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
4050}
4051
4052
4053// Uses registers r0 to r4.
4054// Expected input (depending on whether args are in registers or on the stack):
4055// * object: r0 or at sp + 1 * kPointerSize.
4056// * function: r1 or at sp.
4057//
4058// An inlined call site may have been generated before calling this stub.
4059// In this case the offset to the inline site to patch is passed on the stack,
4060// in the safepoint slot for register r4.
4061// (See LCodeGen::DoInstanceOfKnownGlobal)
4062void InstanceofStub::Generate(MacroAssembler* masm) {
4063  // Call site inlining and patching implies arguments in registers.
4064  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4065  // ReturnTrueFalse is only implemented for inlined call sites.
4066  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
4067
4068  // Fixed register usage throughout the stub:
4069  const Register object = r0;  // Object (lhs).
4070  Register map = r3;  // Map of the object.
4071  const Register function = r1;  // Function (rhs).
4072  const Register prototype = r4;  // Prototype of the function.
4073  const Register inline_site = r9;
4074  const Register scratch = r2;
4075
4076  const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
4077
4078  Label slow, loop, is_instance, is_not_instance, not_js_object;
4079
4080  if (!HasArgsInRegisters()) {
4081    __ ldr(object, MemOperand(sp, 1 * kPointerSize));
4082    __ ldr(function, MemOperand(sp, 0));
4083  }
4084
4085  // Check that the left hand is a JS object and load map.
4086  __ JumpIfSmi(object, &not_js_object);
4087  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
4088
4089  // If there is a call site cache don't look in the global cache, but do the
4090  // real lookup and update the call site cache.
4091  if (!HasCallSiteInlineCheck()) {
4092    Label miss;
4093    __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4094    __ b(ne, &miss);
4095    __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
4096    __ b(ne, &miss);
4097    __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
4098    __ Ret(HasArgsInRegisters() ? 0 : 2);
4099
4100    __ bind(&miss);
4101  }
4102
4103  // Get the prototype of the function.
4104  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
4105
4106  // Check that the function prototype is a JS object.
4107  __ JumpIfSmi(prototype, &slow);
4108  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4109
4110  // Update the global instanceof or call site inlined cache with the current
4111  // map and function. The cached answer will be set when it is known below.
4112  if (!HasCallSiteInlineCheck()) {
4113    __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4114    __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
4115  } else {
4116    ASSERT(HasArgsInRegisters());
4117    // Patch the (relocated) inlined map check.
4118
4119    // The offset was stored in r4 safepoint slot.
4120    // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
4121    __ LoadFromSafepointRegisterSlot(scratch, r4);
4122    __ sub(inline_site, lr, scratch);
4123    // Get the map location in scratch and patch it.
4124    __ GetRelocatedValueLocation(inline_site, scratch);
4125    __ ldr(scratch, MemOperand(scratch));
4126    __ str(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
4127  }
4128
4129  // Register mapping: r3 is object map and r4 is function prototype.
4130  // Get prototype of object into r2.
4131  __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
4132
4133  // We don't need map any more. Use it as a scratch register.
4134  Register scratch2 = map;
4135  map = no_reg;
4136
4137  // Loop through the prototype chain looking for the function prototype.
4138  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
4139  __ bind(&loop);
4140  __ cmp(scratch, Operand(prototype));
4141  __ b(eq, &is_instance);
4142  __ cmp(scratch, scratch2);
4143  __ b(eq, &is_not_instance);
4144  __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
4145  __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
4146  __ jmp(&loop);
4147
4148  __ bind(&is_instance);
4149  if (!HasCallSiteInlineCheck()) {
4150    __ mov(r0, Operand(Smi::FromInt(0)));
4151    __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
4152  } else {
4153    // Patch the call site to return true.
4154    __ LoadRoot(r0, Heap::kTrueValueRootIndex);
4155    __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4156    // Get the boolean result location in scratch and patch it.
4157    __ GetRelocatedValueLocation(inline_site, scratch);
4158    __ str(r0, MemOperand(scratch));
4159
4160    if (!ReturnTrueFalseObject()) {
4161      __ mov(r0, Operand(Smi::FromInt(0)));
4162    }
4163  }
4164  __ Ret(HasArgsInRegisters() ? 0 : 2);
4165
4166  __ bind(&is_not_instance);
4167  if (!HasCallSiteInlineCheck()) {
4168    __ mov(r0, Operand(Smi::FromInt(1)));
4169    __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
4170  } else {
4171    // Patch the call site to return false.
4172    __ LoadRoot(r0, Heap::kFalseValueRootIndex);
4173    __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4174    // Get the boolean result location in scratch and patch it.
4175    __ GetRelocatedValueLocation(inline_site, scratch);
4176    __ str(r0, MemOperand(scratch));
4177
4178    if (!ReturnTrueFalseObject()) {
4179      __ mov(r0, Operand(Smi::FromInt(1)));
4180    }
4181  }
4182  __ Ret(HasArgsInRegisters() ? 0 : 2);
4183
4184  Label object_not_null, object_not_null_or_smi;
4185  __ bind(&not_js_object);
4186  // Before null, smi and string value checks, check that the rhs is a function
4187  // as for a non-function rhs an exception needs to be thrown.
4188  __ JumpIfSmi(function, &slow);
4189  __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
4190  __ b(ne, &slow);
4191
4192  // Null is not instance of anything.
4193  __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
4194  __ b(ne, &object_not_null);
4195  __ mov(r0, Operand(Smi::FromInt(1)));
4196  __ Ret(HasArgsInRegisters() ? 0 : 2);
4197
4198  __ bind(&object_not_null);
4199  // Smi values are not instances of anything.
4200  __ JumpIfNotSmi(object, &object_not_null_or_smi);
4201  __ mov(r0, Operand(Smi::FromInt(1)));
4202  __ Ret(HasArgsInRegisters() ? 0 : 2);
4203
4204  __ bind(&object_not_null_or_smi);
4205  // String values are not instances of anything.
4206  __ IsObjectJSStringType(object, scratch, &slow);
4207  __ mov(r0, Operand(Smi::FromInt(1)));
4208  __ Ret(HasArgsInRegisters() ? 0 : 2);
4209
4210  // Slow-case.  Tail call builtin.
4211  __ bind(&slow);
4212  if (!ReturnTrueFalseObject()) {
4213    if (HasArgsInRegisters()) {
4214      __ Push(r0, r1);
4215    }
4216  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4217  } else {
4218    {
4219      FrameScope scope(masm, StackFrame::INTERNAL);
4220      __ Push(r0, r1);
4221      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4222    }
4223    __ cmp(r0, Operand::Zero());
4224    __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
4225    __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
4226    __ Ret(HasArgsInRegisters() ? 0 : 2);
4227  }
4228}
4229
4230
4231Register InstanceofStub::left() { return r0; }
4232
4233
4234Register InstanceofStub::right() { return r1; }
4235
4236
4237void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
4238  // The displacement is the offset of the last parameter (if any)
4239  // relative to the frame pointer.
4240  const int kDisplacement =
4241      StandardFrameConstants::kCallerSPOffset - kPointerSize;
4242
4243  // Check that the key is a smi.
4244  Label slow;
4245  __ JumpIfNotSmi(r1, &slow);
4246
4247  // Check if the calling frame is an arguments adaptor frame.
4248  Label adaptor;
4249  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4250  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
4251  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4252  __ b(eq, &adaptor);
4253
4254  // Check index against formal parameters count limit passed in
4255  // through register r0. Use unsigned comparison to get negative
4256  // check for free.
4257  __ cmp(r1, r0);
4258  __ b(hs, &slow);
4259
4260  // Read the argument from the stack and return it.
4261  __ sub(r3, r0, r1);
4262  __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
4263  __ ldr(r0, MemOperand(r3, kDisplacement));
4264  __ Jump(lr);
4265
4266  // Arguments adaptor case: Check index against actual arguments
4267  // limit found in the arguments adaptor frame. Use unsigned
4268  // comparison to get negative check for free.
4269  __ bind(&adaptor);
4270  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4271  __ cmp(r1, r0);
4272  __ b(cs, &slow);
4273
4274  // Read the argument from the adaptor frame and return it.
4275  __ sub(r3, r0, r1);
4276  __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
4277  __ ldr(r0, MemOperand(r3, kDisplacement));
4278  __ Jump(lr);
4279
4280  // Slow-case: Handle non-smi or out-of-bounds access to arguments
4281  // by calling the runtime system.
4282  __ bind(&slow);
4283  __ push(r1);
4284  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
4285}
4286
4287
4288void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
4289  // sp[0] : number of parameters
4290  // sp[4] : receiver displacement
4291  // sp[8] : function
4292
4293  // Check if the calling frame is an arguments adaptor frame.
4294  Label runtime;
4295  __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4296  __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
4297  __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4298  __ b(ne, &runtime);
4299
4300  // Patch the arguments.length and the parameters pointer in the current frame.
4301  __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4302  __ str(r2, MemOperand(sp, 0 * kPointerSize));
4303  __ add(r3, r3, Operand(r2, LSL, 1));
4304  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
4305  __ str(r3, MemOperand(sp, 1 * kPointerSize));
4306
4307  __ bind(&runtime);
4308  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4309}
4310
4311
4312void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4313  // Stack layout:
4314  //  sp[0] : number of parameters (tagged)
4315  //  sp[4] : address of receiver argument
4316  //  sp[8] : function
4317  // Registers used over whole function:
4318  //  r6 : allocated object (tagged)
4319  //  r9 : mapped parameter count (tagged)
4320
4321  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
4322  // r1 = parameter count (tagged)
4323
4324  // Check if the calling frame is an arguments adaptor frame.
4325  Label runtime;
4326  Label adaptor_frame, try_allocate;
4327  __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4328  __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
4329  __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4330  __ b(eq, &adaptor_frame);
4331
4332  // No adaptor, parameter count = argument count.
4333  __ mov(r2, r1);
4334  __ b(&try_allocate);
4335
4336  // We have an adaptor frame. Patch the parameters pointer.
4337  __ bind(&adaptor_frame);
4338  __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4339  __ add(r3, r3, Operand(r2, LSL, 1));
4340  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
4341  __ str(r3, MemOperand(sp, 1 * kPointerSize));
4342
4343  // r1 = parameter count (tagged)
4344  // r2 = argument count (tagged)
4345  // Compute the mapped parameter count = min(r1, r2) in r1.
4346  __ cmp(r1, Operand(r2));
4347  __ mov(r1, Operand(r2), LeaveCC, gt);
4348
4349  __ bind(&try_allocate);
4350
4351  // Compute the sizes of backing store, parameter map, and arguments object.
4352  // 1. Parameter map, has 2 extra words containing context and backing store.
4353  const int kParameterMapHeaderSize =
4354      FixedArray::kHeaderSize + 2 * kPointerSize;
4355  // If there are no mapped parameters, we do not need the parameter_map.
4356  __ cmp(r1, Operand(Smi::FromInt(0)));
4357  __ mov(r9, Operand::Zero(), LeaveCC, eq);
4358  __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
4359  __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
4360
4361  // 2. Backing store.
4362  __ add(r9, r9, Operand(r2, LSL, 1));
4363  __ add(r9, r9, Operand(FixedArray::kHeaderSize));
4364
4365  // 3. Arguments object.
4366  __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
4367
4368  // Do the allocation of all three objects in one go.
4369  __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT);
4370
4371  // r0 = address of new object(s) (tagged)
4372  // r2 = argument count (tagged)
4373  // Get the arguments boilerplate from the current (global) context into r4.
4374  const int kNormalOffset =
4375      Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
4376  const int kAliasedOffset =
4377      Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
4378
4379  __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_INDEX)));
4380  __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
4381  __ cmp(r1, Operand::Zero());
4382  __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
4383  __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
4384
4385  // r0 = address of new object (tagged)
4386  // r1 = mapped parameter count (tagged)
4387  // r2 = argument count (tagged)
4388  // r4 = address of boilerplate object (tagged)
4389  // Copy the JS object part.
4390  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
4391    __ ldr(r3, FieldMemOperand(r4, i));
4392    __ str(r3, FieldMemOperand(r0, i));
4393  }
4394
4395  // Set up the callee in-object property.
4396  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
4397  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
4398  const int kCalleeOffset = JSObject::kHeaderSize +
4399      Heap::kArgumentsCalleeIndex * kPointerSize;
4400  __ str(r3, FieldMemOperand(r0, kCalleeOffset));
4401
4402  // Use the length (smi tagged) and set that as an in-object property too.
4403  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4404  const int kLengthOffset = JSObject::kHeaderSize +
4405      Heap::kArgumentsLengthIndex * kPointerSize;
4406  __ str(r2, FieldMemOperand(r0, kLengthOffset));
4407
4408  // Set up the elements pointer in the allocated arguments object.
4409  // If we allocated a parameter map, r4 will point there, otherwise
4410  // it will point to the backing store.
4411  __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
4412  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
4413
4414  // r0 = address of new object (tagged)
4415  // r1 = mapped parameter count (tagged)
4416  // r2 = argument count (tagged)
4417  // r4 = address of parameter map or backing store (tagged)
4418  // Initialize parameter map. If there are no mapped arguments, we're done.
4419  Label skip_parameter_map;
4420  __ cmp(r1, Operand(Smi::FromInt(0)));
4421  // Move backing store address to r3, because it is
4422  // expected there when filling in the unmapped arguments.
4423  __ mov(r3, r4, LeaveCC, eq);
4424  __ b(eq, &skip_parameter_map);
4425
4426  __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
4427  __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
4428  __ add(r6, r1, Operand(Smi::FromInt(2)));
4429  __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
4430  __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
4431  __ add(r6, r4, Operand(r1, LSL, 1));
4432  __ add(r6, r6, Operand(kParameterMapHeaderSize));
4433  __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
4434
4435  // Copy the parameter slots and the holes in the arguments.
4436  // We need to fill in mapped_parameter_count slots. They index the context,
4437  // where parameters are stored in reverse order, at
4438  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4439  // The mapped parameter thus need to get indices
4440  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
4441  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4442  // We loop from right to left.
4443  Label parameters_loop, parameters_test;
4444  __ mov(r6, r1);
4445  __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
4446  __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4447  __ sub(r9, r9, Operand(r1));
4448  __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
4449  __ add(r3, r4, Operand(r6, LSL, 1));
4450  __ add(r3, r3, Operand(kParameterMapHeaderSize));
4451
4452  // r6 = loop variable (tagged)
4453  // r1 = mapping index (tagged)
4454  // r3 = address of backing store (tagged)
4455  // r4 = address of parameter map (tagged)
4456  // r5 = temporary scratch (a.o., for address calculation)
4457  // r7 = the hole value
4458  __ jmp(&parameters_test);
4459
4460  __ bind(&parameters_loop);
4461  __ sub(r6, r6, Operand(Smi::FromInt(1)));
4462  __ mov(r5, Operand(r6, LSL, 1));
4463  __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4464  __ str(r9, MemOperand(r4, r5));
4465  __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4466  __ str(r7, MemOperand(r3, r5));
4467  __ add(r9, r9, Operand(Smi::FromInt(1)));
4468  __ bind(&parameters_test);
4469  __ cmp(r6, Operand(Smi::FromInt(0)));
4470  __ b(ne, &parameters_loop);
4471
4472  __ bind(&skip_parameter_map);
4473  // r2 = argument count (tagged)
4474  // r3 = address of backing store (tagged)
4475  // r5 = scratch
4476  // Copy arguments header and remaining slots (if there are any).
4477  __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
4478  __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
4479  __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
4480
4481  Label arguments_loop, arguments_test;
4482  __ mov(r9, r1);
4483  __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
4484  __ sub(r4, r4, Operand(r9, LSL, 1));
4485  __ jmp(&arguments_test);
4486
4487  __ bind(&arguments_loop);
4488  __ sub(r4, r4, Operand(kPointerSize));
4489  __ ldr(r6, MemOperand(r4, 0));
4490  __ add(r5, r3, Operand(r9, LSL, 1));
4491  __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
4492  __ add(r9, r9, Operand(Smi::FromInt(1)));
4493
4494  __ bind(&arguments_test);
4495  __ cmp(r9, Operand(r2));
4496  __ b(lt, &arguments_loop);
4497
4498  // Return and remove the on-stack parameters.
4499  __ add(sp, sp, Operand(3 * kPointerSize));
4500  __ Ret();
4501
4502  // Do the runtime call to allocate the arguments object.
4503  // r2 = argument count (tagged)
4504  __ bind(&runtime);
4505  __ str(r2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
4506  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4507}
4508
4509
4510void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4511  // sp[0] : number of parameters
4512  // sp[4] : receiver displacement
4513  // sp[8] : function
4514  // Check if the calling frame is an arguments adaptor frame.
4515  Label adaptor_frame, try_allocate, runtime;
4516  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4517  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
4518  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4519  __ b(eq, &adaptor_frame);
4520
4521  // Get the length from the frame.
4522  __ ldr(r1, MemOperand(sp, 0));
4523  __ b(&try_allocate);
4524
4525  // Patch the arguments.length and the parameters pointer.
4526  __ bind(&adaptor_frame);
4527  __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4528  __ str(r1, MemOperand(sp, 0));
4529  __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
4530  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
4531  __ str(r3, MemOperand(sp, 1 * kPointerSize));
4532
4533  // Try the new space allocation. Start out with computing the size
4534  // of the arguments object and the elements array in words.
4535  Label add_arguments_object;
4536  __ bind(&try_allocate);
4537  __ cmp(r1, Operand(0, RelocInfo::NONE));
4538  __ b(eq, &add_arguments_object);
4539  __ mov(r1, Operand(r1, LSR, kSmiTagSize));
4540  __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
4541  __ bind(&add_arguments_object);
4542  __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
4543
4544  // Do the allocation of both objects in one go.
4545  __ AllocateInNewSpace(r1,
4546                        r0,
4547                        r2,
4548                        r3,
4549                        &runtime,
4550                        static_cast<AllocationFlags>(TAG_OBJECT |
4551                                                     SIZE_IN_WORDS));
4552
4553  // Get the arguments boilerplate from the current (global) context.
4554  __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4555  __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
4556  __ ldr(r4, MemOperand(r4, Context::SlotOffset(
4557      Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
4558
4559  // Copy the JS object part.
4560  __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
4561
4562  // Get the length (smi tagged) and set that as an in-object property too.
4563  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4564  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
4565  __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
4566      Heap::kArgumentsLengthIndex * kPointerSize));
4567
4568  // If there are no actual arguments, we're done.
4569  Label done;
4570  __ cmp(r1, Operand(0, RelocInfo::NONE));
4571  __ b(eq, &done);
4572
4573  // Get the parameters pointer from the stack.
4574  __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
4575
4576  // Set up the elements pointer in the allocated arguments object and
4577  // initialize the header in the elements fixed array.
4578  __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
4579  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
4580  __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
4581  __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
4582  __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
4583  // Untag the length for the loop.
4584  __ mov(r1, Operand(r1, LSR, kSmiTagSize));
4585
4586  // Copy the fixed array slots.
4587  Label loop;
4588  // Set up r4 to point to the first array slot.
4589  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4590  __ bind(&loop);
4591  // Pre-decrement r2 with kPointerSize on each iteration.
4592  // Pre-decrement in order to skip receiver.
4593  __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
4594  // Post-increment r4 with kPointerSize on each iteration.
4595  __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
4596  __ sub(r1, r1, Operand(1));
4597  __ cmp(r1, Operand(0, RelocInfo::NONE));
4598  __ b(ne, &loop);
4599
4600  // Return and remove the on-stack parameters.
4601  __ bind(&done);
4602  __ add(sp, sp, Operand(3 * kPointerSize));
4603  __ Ret();
4604
4605  // Do the runtime call to allocate the arguments object.
4606  __ bind(&runtime);
4607  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
4608}
4609
4610
4611void RegExpExecStub::Generate(MacroAssembler* masm) {
4612  // Just jump directly to runtime if native RegExp is not selected at compile
4613  // time or if regexp entry in generated code is turned off runtime switch or
4614  // at compilation.
4615#ifdef V8_INTERPRETED_REGEXP
4616  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4617#else  // V8_INTERPRETED_REGEXP
4618
4619  // Stack frame on entry.
4620  //  sp[0]: last_match_info (expected JSArray)
4621  //  sp[4]: previous index
4622  //  sp[8]: subject string
4623  //  sp[12]: JSRegExp object
4624
4625  const int kLastMatchInfoOffset = 0 * kPointerSize;
4626  const int kPreviousIndexOffset = 1 * kPointerSize;
4627  const int kSubjectOffset = 2 * kPointerSize;
4628  const int kJSRegExpOffset = 3 * kPointerSize;
4629
4630  Label runtime, invoke_regexp;
4631
4632  // Allocation of registers for this function. These are in callee save
4633  // registers and will be preserved by the call to the native RegExp code, as
4634  // this code is called using the normal C calling convention. When calling
4635  // directly from generated code the native RegExp code will not do a GC and
4636  // therefore the content of these registers are safe to use after the call.
4637  Register subject = r4;
4638  Register regexp_data = r5;
4639  Register last_match_info_elements = r6;
4640
4641  // Ensure that a RegExp stack is allocated.
4642  Isolate* isolate = masm->isolate();
4643  ExternalReference address_of_regexp_stack_memory_address =
4644      ExternalReference::address_of_regexp_stack_memory_address(isolate);
4645  ExternalReference address_of_regexp_stack_memory_size =
4646      ExternalReference::address_of_regexp_stack_memory_size(isolate);
4647  __ mov(r0, Operand(address_of_regexp_stack_memory_size));
4648  __ ldr(r0, MemOperand(r0, 0));
4649  __ cmp(r0, Operand(0));
4650  __ b(eq, &runtime);
4651
4652  // Check that the first argument is a JSRegExp object.
4653  __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
4654  STATIC_ASSERT(kSmiTag == 0);
4655  __ JumpIfSmi(r0, &runtime);
4656  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
4657  __ b(ne, &runtime);
4658
4659  // Check that the RegExp has been compiled (data contains a fixed array).
4660  __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
4661  if (FLAG_debug_code) {
4662    __ tst(regexp_data, Operand(kSmiTagMask));
4663    __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
4664    __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
4665    __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
4666  }
4667
4668  // regexp_data: RegExp data (FixedArray)
4669  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4670  __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4671  __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4672  __ b(ne, &runtime);
4673
4674  // regexp_data: RegExp data (FixedArray)
4675  // Check that the number of captures fit in the static offsets vector buffer.
4676  __ ldr(r2,
4677         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4678  // Calculate number of capture registers (number_of_captures + 1) * 2. This
4679  // uses the asumption that smis are 2 * their untagged value.
4680  STATIC_ASSERT(kSmiTag == 0);
4681  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4682  __ add(r2, r2, Operand(2));  // r2 was a smi.
4683  // Check that the static offsets vector buffer is large enough.
4684  __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
4685  __ b(hi, &runtime);
4686
4687  // r2: Number of capture registers
4688  // regexp_data: RegExp data (FixedArray)
4689  // Check that the second argument is a string.
4690  __ ldr(subject, MemOperand(sp, kSubjectOffset));
4691  __ JumpIfSmi(subject, &runtime);
4692  Condition is_string = masm->IsObjectStringType(subject, r0);
4693  __ b(NegateCondition(is_string), &runtime);
4694  // Get the length of the string to r3.
4695  __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
4696
4697  // r2: Number of capture registers
4698  // r3: Length of subject string as a smi
4699  // subject: Subject string
4700  // regexp_data: RegExp data (FixedArray)
4701  // Check that the third argument is a positive smi less than the subject
4702  // string length. A negative value will be greater (unsigned comparison).
4703  __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
4704  __ JumpIfNotSmi(r0, &runtime);
4705  __ cmp(r3, Operand(r0));
4706  __ b(ls, &runtime);
4707
4708  // r2: Number of capture registers
4709  // subject: Subject string
4710  // regexp_data: RegExp data (FixedArray)
4711  // Check that the fourth object is a JSArray object.
4712  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
4713  __ JumpIfSmi(r0, &runtime);
4714  __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
4715  __ b(ne, &runtime);
4716  // Check that the JSArray is in fast case.
4717  __ ldr(last_match_info_elements,
4718         FieldMemOperand(r0, JSArray::kElementsOffset));
4719  __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4720  __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
4721  __ b(ne, &runtime);
4722  // Check that the last match info has space for the capture registers and the
4723  // additional information.
4724  __ ldr(r0,
4725         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4726  __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
4727  __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
4728  __ b(gt, &runtime);
4729
4730  // Reset offset for possibly sliced string.
4731  __ mov(r9, Operand(0));
4732  // subject: Subject string
4733  // regexp_data: RegExp data (FixedArray)
4734  // Check the representation and encoding of the subject string.
4735  Label seq_string;
4736  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
4737  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
4738  // First check for flat string.  None of the following string type tests will
4739  // succeed if subject is not a string or a short external string.
4740  __ and_(r1,
4741          r0,
4742          Operand(kIsNotStringMask |
4743                  kStringRepresentationMask |
4744                  kShortExternalStringMask),
4745          SetCC);
4746  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
4747  __ b(eq, &seq_string);
4748
4749  // subject: Subject string
4750  // regexp_data: RegExp data (FixedArray)
4751  // r1: whether subject is a string and if yes, its string representation
4752  // Check for flat cons string or sliced string.
4753  // A flat cons string is a cons string where the second part is the empty
4754  // string. In that case the subject string is just the first part of the cons
4755  // string. Also in this case the first part of the cons string is known to be
4756  // a sequential string or an external string.
4757  // In the case of a sliced string its offset has to be taken into account.
4758  Label cons_string, external_string, check_encoding;
4759  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
4760  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
4761  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
4762  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
4763  __ cmp(r1, Operand(kExternalStringTag));
4764  __ b(lt, &cons_string);
4765  __ b(eq, &external_string);
4766
4767  // Catch non-string subject or short external string.
4768  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
4769  __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
4770  __ b(ne, &runtime);
4771
4772  // String is sliced.
4773  __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
4774  __ mov(r9, Operand(r9, ASR, kSmiTagSize));
4775  __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
4776  // r9: offset of sliced string, smi-tagged.
4777  __ jmp(&check_encoding);
4778  // String is a cons string, check whether it is flat.
4779  __ bind(&cons_string);
4780  __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
4781  __ CompareRoot(r0, Heap::kEmptyStringRootIndex);
4782  __ b(ne, &runtime);
4783  __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
4784  // Is first part of cons or parent of slice a flat string?
4785  __ bind(&check_encoding);
4786  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
4787  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
4788  STATIC_ASSERT(kSeqStringTag == 0);
4789  __ tst(r0, Operand(kStringRepresentationMask));
4790  __ b(ne, &external_string);
4791
4792  __ bind(&seq_string);
4793  // subject: Subject string
4794  // regexp_data: RegExp data (FixedArray)
4795  // r0: Instance type of subject string
4796  STATIC_ASSERT(4 == kAsciiStringTag);
4797  STATIC_ASSERT(kTwoByteStringTag == 0);
4798  // Find the code object based on the assumptions above.
4799  __ and_(r0, r0, Operand(kStringEncodingMask));
4800  __ mov(r3, Operand(r0, ASR, 2), SetCC);
4801  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
4802  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
4803
4804  // Check that the irregexp code has been generated for the actual string
4805  // encoding. If it has, the field contains a code object otherwise it contains
4806  // a smi (code flushing support).
4807  __ JumpIfSmi(r7, &runtime);
4808
4809  // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
4810  // r7: code
4811  // subject: Subject string
4812  // regexp_data: RegExp data (FixedArray)
4813  // Load used arguments before starting to push arguments for call to native
4814  // RegExp code to avoid handling changing stack height.
4815  __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
4816  __ mov(r1, Operand(r1, ASR, kSmiTagSize));
4817
4818  // r1: previous index
4819  // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
4820  // r7: code
4821  // subject: Subject string
4822  // regexp_data: RegExp data (FixedArray)
4823  // All checks done. Now push arguments for native regexp code.
4824  __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
4825
4826  // Isolates: note we add an additional parameter here (isolate pointer).
4827  const int kRegExpExecuteArguments = 8;
4828  const int kParameterRegisters = 4;
4829  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
4830
4831  // Stack pointer now points to cell where return address is to be written.
4832  // Arguments are before that on the stack or in registers.
4833
4834  // Argument 8 (sp[16]): Pass current isolate address.
4835  __ mov(r0, Operand(ExternalReference::isolate_address()));
4836  __ str(r0, MemOperand(sp, 4 * kPointerSize));
4837
4838  // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
4839  __ mov(r0, Operand(1));
4840  __ str(r0, MemOperand(sp, 3 * kPointerSize));
4841
4842  // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
4843  __ mov(r0, Operand(address_of_regexp_stack_memory_address));
4844  __ ldr(r0, MemOperand(r0, 0));
4845  __ mov(r2, Operand(address_of_regexp_stack_memory_size));
4846  __ ldr(r2, MemOperand(r2, 0));
4847  __ add(r0, r0, Operand(r2));
4848  __ str(r0, MemOperand(sp, 2 * kPointerSize));
4849
4850  // Argument 5 (sp[4]): static offsets vector buffer.
4851  __ mov(r0,
4852         Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
4853  __ str(r0, MemOperand(sp, 1 * kPointerSize));
4854
4855  // For arguments 4 and 3 get string length, calculate start of string data and
4856  // calculate the shift of the index (0 for ASCII and 1 for two byte).
4857  __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
4858  __ eor(r3, r3, Operand(1));
4859  // Load the length from the original subject string from the previous stack
4860  // frame. Therefore we have to use fp, which points exactly to two pointer
4861  // sizes below the previous sp. (Because creating a new stack frame pushes
4862  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
4863  __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
4864  // If slice offset is not 0, load the length from the original sliced string.
4865  // Argument 4, r3: End of string data
4866  // Argument 3, r2: Start of string data
4867  // Prepare start and end index of the input.
4868  __ add(r9, r8, Operand(r9, LSL, r3));
4869  __ add(r2, r9, Operand(r1, LSL, r3));
4870
4871  __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset));
4872  __ mov(r8, Operand(r8, ASR, kSmiTagSize));
4873  __ add(r3, r9, Operand(r8, LSL, r3));
4874
4875  // Argument 2 (r1): Previous index.
4876  // Already there
4877
4878  // Argument 1 (r0): Subject string.
4879  __ mov(r0, subject);
4880
4881  // Locate the code entry and call it.
4882  __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
4883  DirectCEntryStub stub;
4884  stub.GenerateCall(masm, r7);
4885
4886  __ LeaveExitFrame(false, no_reg);
4887
4888  // r0: result
4889  // subject: subject string (callee saved)
4890  // regexp_data: RegExp data (callee saved)
4891  // last_match_info_elements: Last match info elements (callee saved)
4892
4893  // Check the result.
4894  Label success;
4895
4896  __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
4897  __ b(eq, &success);
4898  Label failure;
4899  __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
4900  __ b(eq, &failure);
4901  __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
4902  // If not exception it can only be retry. Handle that in the runtime system.
4903  __ b(ne, &runtime);
4904  // Result must now be exception. If there is no pending exception already a
4905  // stack overflow (on the backtrack stack) was detected in RegExp code but
4906  // haven't created the exception yet. Handle that in the runtime system.
4907  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
4908  __ mov(r1, Operand(isolate->factory()->the_hole_value()));
4909  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4910                                       isolate)));
4911  __ ldr(r0, MemOperand(r2, 0));
4912  __ cmp(r0, r1);
4913  __ b(eq, &runtime);
4914
4915  __ str(r1, MemOperand(r2, 0));  // Clear pending exception.
4916
4917  // Check if the exception is a termination. If so, throw as uncatchable.
4918  __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
4919
4920  Label termination_exception;
4921  __ b(eq, &termination_exception);
4922
4923  __ Throw(r0);
4924
4925  __ bind(&termination_exception);
4926  __ ThrowUncatchable(r0);
4927
4928  __ bind(&failure);
4929  // For failure and exception return null.
4930  __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
4931  __ add(sp, sp, Operand(4 * kPointerSize));
4932  __ Ret();
4933
4934  // Process the result from the native regexp code.
4935  __ bind(&success);
4936  __ ldr(r1,
4937         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4938  // Calculate number of capture registers (number_of_captures + 1) * 2.
4939  STATIC_ASSERT(kSmiTag == 0);
4940  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4941  __ add(r1, r1, Operand(2));  // r1 was a smi.
4942
4943  // r1: number of capture registers
4944  // r4: subject string
4945  // Store the capture count.
4946  __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize));  // To smi.
4947  __ str(r2, FieldMemOperand(last_match_info_elements,
4948                             RegExpImpl::kLastCaptureCountOffset));
4949  // Store last subject and last input.
4950  __ str(subject,
4951         FieldMemOperand(last_match_info_elements,
4952                         RegExpImpl::kLastSubjectOffset));
4953  __ mov(r2, subject);
4954  __ RecordWriteField(last_match_info_elements,
4955                      RegExpImpl::kLastSubjectOffset,
4956                      r2,
4957                      r7,
4958                      kLRHasNotBeenSaved,
4959                      kDontSaveFPRegs);
4960  __ str(subject,
4961         FieldMemOperand(last_match_info_elements,
4962                         RegExpImpl::kLastInputOffset));
4963  __ RecordWriteField(last_match_info_elements,
4964                      RegExpImpl::kLastInputOffset,
4965                      subject,
4966                      r7,
4967                      kLRHasNotBeenSaved,
4968                      kDontSaveFPRegs);
4969
4970  // Get the static offsets vector filled by the native regexp code.
4971  ExternalReference address_of_static_offsets_vector =
4972      ExternalReference::address_of_static_offsets_vector(isolate);
4973  __ mov(r2, Operand(address_of_static_offsets_vector));
4974
4975  // r1: number of capture registers
4976  // r2: offsets vector
4977  Label next_capture, done;
4978  // Capture register counter starts from number of capture registers and
4979  // counts down until wraping after zero.
4980  __ add(r0,
4981         last_match_info_elements,
4982         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
4983  __ bind(&next_capture);
4984  __ sub(r1, r1, Operand(1), SetCC);
4985  __ b(mi, &done);
4986  // Read the value from the static offsets vector buffer.
4987  __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
4988  // Store the smi value in the last match info.
4989  __ mov(r3, Operand(r3, LSL, kSmiTagSize));
4990  __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
4991  __ jmp(&next_capture);
4992  __ bind(&done);
4993
4994  // Return last match info.
4995  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
4996  __ add(sp, sp, Operand(4 * kPointerSize));
4997  __ Ret();
4998
4999  // External string.  Short external strings have already been ruled out.
5000  // r0: scratch
5001  __ bind(&external_string);
5002  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
5003  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
5004  if (FLAG_debug_code) {
5005    // Assert that we do not have a cons or slice (indirect strings) here.
5006    // Sequential strings have already been ruled out.
5007    __ tst(r0, Operand(kIsIndirectStringMask));
5008    __ Assert(eq, "external string expected, but not found");
5009  }
5010  __ ldr(subject,
5011         FieldMemOperand(subject, ExternalString::kResourceDataOffset));
5012  // Move the pointer so that offset-wise, it looks like a sequential string.
5013  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
5014  __ sub(subject,
5015         subject,
5016         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5017  __ jmp(&seq_string);
5018
5019  // Do the runtime call to execute the regexp.
5020  __ bind(&runtime);
5021  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
5022#endif  // V8_INTERPRETED_REGEXP
5023}
5024
5025
5026void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
5027  const int kMaxInlineLength = 100;
5028  Label slowcase;
5029  Label done;
5030  Factory* factory = masm->isolate()->factory();
5031
5032  __ ldr(r1, MemOperand(sp, kPointerSize * 2));
5033  STATIC_ASSERT(kSmiTag == 0);
5034  STATIC_ASSERT(kSmiTagSize == 1);
5035  __ JumpIfNotSmi(r1, &slowcase);
5036  __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
5037  __ b(hi, &slowcase);
5038  // Smi-tagging is equivalent to multiplying by 2.
5039  // Allocate RegExpResult followed by FixedArray with size in ebx.
5040  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
5041  // Elements:  [Map][Length][..elements..]
5042  // Size of JSArray with two in-object properties and the header of a
5043  // FixedArray.
5044  int objects_size =
5045      (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
5046  __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
5047  __ add(r2, r5, Operand(objects_size));
5048  __ AllocateInNewSpace(
5049      r2,  // In: Size, in words.
5050      r0,  // Out: Start of allocation (tagged).
5051      r3,  // Scratch register.
5052      r4,  // Scratch register.
5053      &slowcase,
5054      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5055  // r0: Start of allocated area, object-tagged.
5056  // r1: Number of elements in array, as smi.
5057  // r5: Number of elements, untagged.
5058
5059  // Set JSArray map to global.regexp_result_map().
5060  // Set empty properties FixedArray.
5061  // Set elements to point to FixedArray allocated right after the JSArray.
5062  // Interleave operations for better latency.
5063  __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
5064  __ add(r3, r0, Operand(JSRegExpResult::kSize));
5065  __ mov(r4, Operand(factory->empty_fixed_array()));
5066  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
5067  __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
5068  __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
5069  __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
5070  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
5071
5072  // Set input, index and length fields from arguments.
5073  __ ldr(r1, MemOperand(sp, kPointerSize * 0));
5074  __ ldr(r2, MemOperand(sp, kPointerSize * 1));
5075  __ ldr(r6, MemOperand(sp, kPointerSize * 2));
5076  __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
5077  __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
5078  __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset));
5079
5080  // Fill out the elements FixedArray.
5081  // r0: JSArray, tagged.
5082  // r3: FixedArray, tagged.
5083  // r5: Number of elements in array, untagged.
5084
5085  // Set map.
5086  __ mov(r2, Operand(factory->fixed_array_map()));
5087  __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
5088  // Set FixedArray length.
5089  __ mov(r6, Operand(r5, LSL, kSmiTagSize));
5090  __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
5091  // Fill contents of fixed-array with the-hole.
5092  __ mov(r2, Operand(factory->the_hole_value()));
5093  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5094  // Fill fixed array elements with hole.
5095  // r0: JSArray, tagged.
5096  // r2: the hole.
5097  // r3: Start of elements in FixedArray.
5098  // r5: Number of elements to fill.
5099  Label loop;
5100  __ cmp(r5, Operand(0));
5101  __ bind(&loop);
5102  __ b(le, &done);  // Jump if r5 is negative or zero.
5103  __ sub(r5, r5, Operand(1), SetCC);
5104  __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
5105  __ jmp(&loop);
5106
5107  __ bind(&done);
5108  __ add(sp, sp, Operand(3 * kPointerSize));
5109  __ Ret();
5110
5111  __ bind(&slowcase);
5112  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
5113}
5114
5115
5116static void GenerateRecordCallTarget(MacroAssembler* masm) {
5117  // Cache the called function in a global property cell.  Cache states
5118  // are uninitialized, monomorphic (indicated by a JSFunction), and
5119  // megamorphic.
5120  // r1 : the function to call
5121  // r2 : cache cell for call target
5122  Label done;
5123
5124  ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
5125            masm->isolate()->heap()->undefined_value());
5126  ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
5127            masm->isolate()->heap()->the_hole_value());
5128
5129  // Load the cache state into r3.
5130  __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
5131
5132  // A monomorphic cache hit or an already megamorphic state: invoke the
5133  // function without changing the state.
5134  __ cmp(r3, r1);
5135  __ b(eq, &done);
5136  __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
5137  __ b(eq, &done);
5138
5139  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
5140  // megamorphic.
5141  __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
5142  // MegamorphicSentinel is an immortal immovable object (undefined) so no
5143  // write-barrier is needed.
5144  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
5145  __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), ne);
5146
5147  // An uninitialized cache is patched with the function.
5148  __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), eq);
5149  // No need for a write barrier here - cells are rescanned.
5150
5151  __ bind(&done);
5152}
5153
5154
5155void CallFunctionStub::Generate(MacroAssembler* masm) {
5156  // r1 : the function to call
5157  // r2 : cache cell for call target
5158  Label slow, non_function;
5159
5160  // The receiver might implicitly be the global object. This is
5161  // indicated by passing the hole as the receiver to the call
5162  // function stub.
5163  if (ReceiverMightBeImplicit()) {
5164    Label call;
5165    // Get the receiver from the stack.
5166    // function, receiver [, arguments]
5167    __ ldr(r4, MemOperand(sp, argc_ * kPointerSize));
5168    // Call as function is indicated with the hole.
5169    __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
5170    __ b(ne, &call);
5171    // Patch the receiver on the stack with the global receiver object.
5172    __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
5173    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
5174    __ str(r2, MemOperand(sp, argc_ * kPointerSize));
5175    __ bind(&call);
5176  }
5177
5178  // Check that the function is really a JavaScript function.
5179  // r1: pushed function (to be verified)
5180  __ JumpIfSmi(r1, &non_function);
5181  // Get the map of the function object.
5182  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
5183  __ b(ne, &slow);
5184
5185  // Fast-case: Invoke the function now.
5186  // r1: pushed function
5187  ParameterCount actual(argc_);
5188
5189  if (ReceiverMightBeImplicit()) {
5190    Label call_as_function;
5191    __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
5192    __ b(eq, &call_as_function);
5193    __ InvokeFunction(r1,
5194                      actual,
5195                      JUMP_FUNCTION,
5196                      NullCallWrapper(),
5197                      CALL_AS_METHOD);
5198    __ bind(&call_as_function);
5199  }
5200  __ InvokeFunction(r1,
5201                    actual,
5202                    JUMP_FUNCTION,
5203                    NullCallWrapper(),
5204                    CALL_AS_FUNCTION);
5205
5206  // Slow-case: Non-function called.
5207  __ bind(&slow);
5208  // Check for function proxy.
5209  __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
5210  __ b(ne, &non_function);
5211  __ push(r1);  // put proxy as additional argument
5212  __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
5213  __ mov(r2, Operand(0, RelocInfo::NONE));
5214  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
5215  __ SetCallKind(r5, CALL_AS_METHOD);
5216  {
5217    Handle<Code> adaptor =
5218      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5219    __ Jump(adaptor, RelocInfo::CODE_TARGET);
5220  }
5221
5222  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5223  // of the original receiver from the call site).
5224  __ bind(&non_function);
5225  __ str(r1, MemOperand(sp, argc_ * kPointerSize));
5226  __ mov(r0, Operand(argc_));  // Set up the number of arguments.
5227  __ mov(r2, Operand(0, RelocInfo::NONE));
5228  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
5229  __ SetCallKind(r5, CALL_AS_METHOD);
5230  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5231          RelocInfo::CODE_TARGET);
5232}
5233
5234
5235void CallConstructStub::Generate(MacroAssembler* masm) {
5236  // r0 : number of arguments
5237  // r1 : the function to call
5238  // r2 : cache cell for call target
5239  Label slow, non_function_call;
5240
5241  // Check that the function is not a smi.
5242  __ JumpIfSmi(r1, &non_function_call);
5243  // Check that the function is a JSFunction.
5244  __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
5245  __ b(ne, &slow);
5246
5247  if (RecordCallTarget()) {
5248    GenerateRecordCallTarget(masm);
5249  }
5250
5251  // Jump to the function-specific construct stub.
5252  __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
5253  __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
5254  __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
5255
5256  // r0: number of arguments
5257  // r1: called object
5258  // r3: object type
5259  Label do_call;
5260  __ bind(&slow);
5261  __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
5262  __ b(ne, &non_function_call);
5263  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
5264  __ jmp(&do_call);
5265
5266  __ bind(&non_function_call);
5267  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
5268  __ bind(&do_call);
5269  // Set expected number of arguments to zero (not changing r0).
5270  __ mov(r2, Operand(0, RelocInfo::NONE));
5271  __ SetCallKind(r5, CALL_AS_METHOD);
5272  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5273          RelocInfo::CODE_TARGET);
5274}
5275
5276
5277// Unfortunately you have to run without snapshots to see most of these
5278// names in the profile since most compare stubs end up in the snapshot.
5279void CompareStub::PrintName(StringStream* stream) {
5280  ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
5281         (lhs_.is(r1) && rhs_.is(r0)));
5282  const char* cc_name;
5283  switch (cc_) {
5284    case lt: cc_name = "LT"; break;
5285    case gt: cc_name = "GT"; break;
5286    case le: cc_name = "LE"; break;
5287    case ge: cc_name = "GE"; break;
5288    case eq: cc_name = "EQ"; break;
5289    case ne: cc_name = "NE"; break;
5290    default: cc_name = "UnknownCondition"; break;
5291  }
5292  bool is_equality = cc_ == eq || cc_ == ne;
5293  stream->Add("CompareStub_%s", cc_name);
5294  stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
5295  stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
5296  if (strict_ && is_equality) stream->Add("_STRICT");
5297  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5298  if (!include_number_compare_) stream->Add("_NO_NUMBER");
5299  if (!include_smi_compare_) stream->Add("_NO_SMI");
5300}
5301
5302
5303int CompareStub::MinorKey() {
5304  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
5305  // stubs the never NaN NaN condition is only taken into account if the
5306  // condition is equals.
5307  ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
5308  ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
5309         (lhs_.is(r1) && rhs_.is(r0)));
5310  return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
5311         | RegisterField::encode(lhs_.is(r0))
5312         | StrictField::encode(strict_)
5313         | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
5314         | IncludeNumberCompareField::encode(include_number_compare_)
5315         | IncludeSmiCompareField::encode(include_smi_compare_);
5316}
5317
5318
5319// StringCharCodeAtGenerator
5320void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5321  Label flat_string;
5322  Label ascii_string;
5323  Label got_char_code;
5324  Label sliced_string;
5325
5326  // If the receiver is a smi trigger the non-string case.
5327  __ JumpIfSmi(object_, receiver_not_string_);
5328
5329  // Fetch the instance type of the receiver into result register.
5330  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5331  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5332  // If the receiver is not a string trigger the non-string case.
5333  __ tst(result_, Operand(kIsNotStringMask));
5334  __ b(ne, receiver_not_string_);
5335
5336  // If the index is non-smi trigger the non-smi case.
5337  __ JumpIfNotSmi(index_, &index_not_smi_);
5338  __ bind(&got_smi_index_);
5339
5340  // Check for index out of range.
5341  __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
5342  __ cmp(ip, Operand(index_));
5343  __ b(ls, index_out_of_range_);
5344
5345  __ mov(index_, Operand(index_, ASR, kSmiTagSize));
5346
5347  StringCharLoadGenerator::Generate(masm,
5348                                    object_,
5349                                    index_,
5350                                    result_,
5351                                    &call_runtime_);
5352
5353  __ mov(result_, Operand(result_, LSL, kSmiTagSize));
5354  __ bind(&exit_);
5355}
5356
5357
5358void StringCharCodeAtGenerator::GenerateSlow(
5359    MacroAssembler* masm,
5360    const RuntimeCallHelper& call_helper) {
5361  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5362
5363  // Index is not a smi.
5364  __ bind(&index_not_smi_);
5365  // If index is a heap number, try converting it to an integer.
5366  __ CheckMap(index_,
5367              result_,
5368              Heap::kHeapNumberMapRootIndex,
5369              index_not_number_,
5370              DONT_DO_SMI_CHECK);
5371  call_helper.BeforeCall(masm);
5372  __ push(object_);
5373  __ push(index_);  // Consumed by runtime conversion function.
5374  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5375    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5376  } else {
5377    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5378    // NumberToSmi discards numbers that are not exact integers.
5379    __ CallRuntime(Runtime::kNumberToSmi, 1);
5380  }
5381  // Save the conversion result before the pop instructions below
5382  // have a chance to overwrite it.
5383  __ Move(index_, r0);
5384  __ pop(object_);
5385  // Reload the instance type.
5386  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5387  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5388  call_helper.AfterCall(masm);
5389  // If index is still not a smi, it must be out of range.
5390  __ JumpIfNotSmi(index_, index_out_of_range_);
5391  // Otherwise, return to the fast path.
5392  __ jmp(&got_smi_index_);
5393
5394  // Call runtime. We get here when the receiver is a string and the
5395  // index is a number, but the code of getting the actual character
5396  // is too complex (e.g., when the string needs to be flattened).
5397  __ bind(&call_runtime_);
5398  call_helper.BeforeCall(masm);
5399  __ mov(index_, Operand(index_, LSL, kSmiTagSize));
5400  __ Push(object_, index_);
5401  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5402  __ Move(result_, r0);
5403  call_helper.AfterCall(masm);
5404  __ jmp(&exit_);
5405
5406  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5407}
5408
5409
5410// -------------------------------------------------------------------------
5411// StringCharFromCodeGenerator
5412
5413void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5414  // Fast case of Heap::LookupSingleCharacterStringFromCode.
5415  STATIC_ASSERT(kSmiTag == 0);
5416  STATIC_ASSERT(kSmiShiftSize == 0);
5417  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5418  __ tst(code_,
5419         Operand(kSmiTagMask |
5420                 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5421  __ b(ne, &slow_case_);
5422
5423  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5424  // At this point code register contains smi tagged ASCII char code.
5425  STATIC_ASSERT(kSmiTag == 0);
5426  __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
5427  __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
5428  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
5429  __ b(eq, &slow_case_);
5430  __ bind(&exit_);
5431}
5432
5433
5434void StringCharFromCodeGenerator::GenerateSlow(
5435    MacroAssembler* masm,
5436    const RuntimeCallHelper& call_helper) {
5437  __ Abort("Unexpected fallthrough to CharFromCode slow case");
5438
5439  __ bind(&slow_case_);
5440  call_helper.BeforeCall(masm);
5441  __ push(code_);
5442  __ CallRuntime(Runtime::kCharFromCode, 1);
5443  __ Move(result_, r0);
5444  call_helper.AfterCall(masm);
5445  __ jmp(&exit_);
5446
5447  __ Abort("Unexpected fallthrough from CharFromCode slow case");
5448}
5449
5450
5451// -------------------------------------------------------------------------
5452// StringCharAtGenerator
5453
5454void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5455  char_code_at_generator_.GenerateFast(masm);
5456  char_from_code_generator_.GenerateFast(masm);
5457}
5458
5459
5460void StringCharAtGenerator::GenerateSlow(
5461    MacroAssembler* masm,
5462    const RuntimeCallHelper& call_helper) {
5463  char_code_at_generator_.GenerateSlow(masm, call_helper);
5464  char_from_code_generator_.GenerateSlow(masm, call_helper);
5465}
5466
5467
5468void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5469                                          Register dest,
5470                                          Register src,
5471                                          Register count,
5472                                          Register scratch,
5473                                          bool ascii) {
5474  Label loop;
5475  Label done;
5476  // This loop just copies one character at a time, as it is only used for very
5477  // short strings.
5478  if (!ascii) {
5479    __ add(count, count, Operand(count), SetCC);
5480  } else {
5481    __ cmp(count, Operand(0, RelocInfo::NONE));
5482  }
5483  __ b(eq, &done);
5484
5485  __ bind(&loop);
5486  __ ldrb(scratch, MemOperand(src, 1, PostIndex));
5487  // Perform sub between load and dependent store to get the load time to
5488  // complete.
5489  __ sub(count, count, Operand(1), SetCC);
5490  __ strb(scratch, MemOperand(dest, 1, PostIndex));
5491  // last iteration.
5492  __ b(gt, &loop);
5493
5494  __ bind(&done);
5495}
5496
5497
5498enum CopyCharactersFlags {
5499  COPY_ASCII = 1,
5500  DEST_ALWAYS_ALIGNED = 2
5501};
5502
5503
5504void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5505                                              Register dest,
5506                                              Register src,
5507                                              Register count,
5508                                              Register scratch1,
5509                                              Register scratch2,
5510                                              Register scratch3,
5511                                              Register scratch4,
5512                                              Register scratch5,
5513                                              int flags) {
5514  bool ascii = (flags & COPY_ASCII) != 0;
5515  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5516
5517  if (dest_always_aligned && FLAG_debug_code) {
5518    // Check that destination is actually word aligned if the flag says
5519    // that it is.
5520    __ tst(dest, Operand(kPointerAlignmentMask));
5521    __ Check(eq, "Destination of copy not aligned.");
5522  }
5523
5524  const int kReadAlignment = 4;
5525  const int kReadAlignmentMask = kReadAlignment - 1;
5526  // Ensure that reading an entire aligned word containing the last character
5527  // of a string will not read outside the allocated area (because we pad up
5528  // to kObjectAlignment).
5529  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5530  // Assumes word reads and writes are little endian.
5531  // Nothing to do for zero characters.
5532  Label done;
5533  if (!ascii) {
5534    __ add(count, count, Operand(count), SetCC);
5535  } else {
5536    __ cmp(count, Operand(0, RelocInfo::NONE));
5537  }
5538  __ b(eq, &done);
5539
5540  // Assume that you cannot read (or write) unaligned.
5541  Label byte_loop;
5542  // Must copy at least eight bytes, otherwise just do it one byte at a time.
5543  __ cmp(count, Operand(8));
5544  __ add(count, dest, Operand(count));
5545  Register limit = count;  // Read until src equals this.
5546  __ b(lt, &byte_loop);
5547
5548  if (!dest_always_aligned) {
5549    // Align dest by byte copying. Copies between zero and three bytes.
5550    __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
5551    Label dest_aligned;
5552    __ b(eq, &dest_aligned);
5553    __ cmp(scratch4, Operand(2));
5554    __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
5555    __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
5556    __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
5557    __ strb(scratch1, MemOperand(dest, 1, PostIndex));
5558    __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
5559    __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
5560    __ bind(&dest_aligned);
5561  }
5562
5563  Label simple_loop;
5564
5565  __ sub(scratch4, dest, Operand(src));
5566  __ and_(scratch4, scratch4, Operand(0x03), SetCC);
5567  __ b(eq, &simple_loop);
5568  // Shift register is number of bits in a source word that
5569  // must be combined with bits in the next source word in order
5570  // to create a destination word.
5571
5572  // Complex loop for src/dst that are not aligned the same way.
5573  {
5574    Label loop;
5575    __ mov(scratch4, Operand(scratch4, LSL, 3));
5576    Register left_shift = scratch4;
5577    __ and_(src, src, Operand(~3));  // Round down to load previous word.
5578    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
5579    // Store the "shift" most significant bits of scratch in the least
5580    // signficant bits (i.e., shift down by (32-shift)).
5581    __ rsb(scratch2, left_shift, Operand(32));
5582    Register right_shift = scratch2;
5583    __ mov(scratch1, Operand(scratch1, LSR, right_shift));
5584
5585    __ bind(&loop);
5586    __ ldr(scratch3, MemOperand(src, 4, PostIndex));
5587    __ sub(scratch5, limit, Operand(dest));
5588    __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
5589    __ str(scratch1, MemOperand(dest, 4, PostIndex));
5590    __ mov(scratch1, Operand(scratch3, LSR, right_shift));
5591    // Loop if four or more bytes left to copy.
5592    // Compare to eight, because we did the subtract before increasing dst.
5593    __ sub(scratch5, scratch5, Operand(8), SetCC);
5594    __ b(ge, &loop);
5595  }
5596  // There is now between zero and three bytes left to copy (negative that
5597  // number is in scratch5), and between one and three bytes already read into
5598  // scratch1 (eight times that number in scratch4). We may have read past
5599  // the end of the string, but because objects are aligned, we have not read
5600  // past the end of the object.
5601  // Find the minimum of remaining characters to move and preloaded characters
5602  // and write those as bytes.
5603  __ add(scratch5, scratch5, Operand(4), SetCC);
5604  __ b(eq, &done);
5605  __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
5606  // Move minimum of bytes read and bytes left to copy to scratch4.
5607  __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
5608  // Between one and three (value in scratch5) characters already read into
5609  // scratch ready to write.
5610  __ cmp(scratch5, Operand(2));
5611  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
5612  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
5613  __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
5614  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
5615  __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
5616  // Copy any remaining bytes.
5617  __ b(&byte_loop);
5618
5619  // Simple loop.
5620  // Copy words from src to dst, until less than four bytes left.
5621  // Both src and dest are word aligned.
5622  __ bind(&simple_loop);
5623  {
5624    Label loop;
5625    __ bind(&loop);
5626    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
5627    __ sub(scratch3, limit, Operand(dest));
5628    __ str(scratch1, MemOperand(dest, 4, PostIndex));
5629    // Compare to 8, not 4, because we do the substraction before increasing
5630    // dest.
5631    __ cmp(scratch3, Operand(8));
5632    __ b(ge, &loop);
5633  }
5634
5635  // Copy bytes from src to dst until dst hits limit.
5636  __ bind(&byte_loop);
5637  __ cmp(dest, Operand(limit));
5638  __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
5639  __ b(ge, &done);
5640  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
5641  __ b(&byte_loop);
5642
5643  __ bind(&done);
5644}
5645
5646
5647void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5648                                                        Register c1,
5649                                                        Register c2,
5650                                                        Register scratch1,
5651                                                        Register scratch2,
5652                                                        Register scratch3,
5653                                                        Register scratch4,
5654                                                        Register scratch5,
5655                                                        Label* not_found) {
5656  // Register scratch3 is the general scratch register in this function.
5657  Register scratch = scratch3;
5658
5659  // Make sure that both characters are not digits as such strings has a
5660  // different hash algorithm. Don't try to look for these in the symbol table.
5661  Label not_array_index;
5662  __ sub(scratch, c1, Operand(static_cast<int>('0')));
5663  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
5664  __ b(hi, &not_array_index);
5665  __ sub(scratch, c2, Operand(static_cast<int>('0')));
5666  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
5667
5668  // If check failed combine both characters into single halfword.
5669  // This is required by the contract of the method: code at the
5670  // not_found branch expects this combination in c1 register
5671  __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
5672  __ b(ls, not_found);
5673
5674  __ bind(&not_array_index);
5675  // Calculate the two character string hash.
5676  Register hash = scratch1;
5677  StringHelper::GenerateHashInit(masm, hash, c1);
5678  StringHelper::GenerateHashAddCharacter(masm, hash, c2);
5679  StringHelper::GenerateHashGetHash(masm, hash);
5680
5681  // Collect the two characters in a register.
5682  Register chars = c1;
5683  __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
5684
5685  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5686  // hash:  hash of two character string.
5687
5688  // Load symbol table
5689  // Load address of first element of the symbol table.
5690  Register symbol_table = c2;
5691  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5692
5693  Register undefined = scratch4;
5694  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5695
5696  // Calculate capacity mask from the symbol table capacity.
5697  Register mask = scratch2;
5698  __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5699  __ mov(mask, Operand(mask, ASR, 1));
5700  __ sub(mask, mask, Operand(1));
5701
5702  // Calculate untagged address of the first element of the symbol table.
5703  Register first_symbol_table_element = symbol_table;
5704  __ add(first_symbol_table_element, symbol_table,
5705         Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
5706
5707  // Registers
5708  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5709  // hash:  hash of two character string
5710  // mask:  capacity mask
5711  // first_symbol_table_element: address of the first element of
5712  //                             the symbol table
5713  // undefined: the undefined object
5714  // scratch: -
5715
5716  // Perform a number of probes in the symbol table.
5717  const int kProbes = 4;
5718  Label found_in_symbol_table;
5719  Label next_probe[kProbes];
5720  Register candidate = scratch5;  // Scratch register contains candidate.
5721  for (int i = 0; i < kProbes; i++) {
5722    // Calculate entry in symbol table.
5723    if (i > 0) {
5724      __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
5725    } else {
5726      __ mov(candidate, hash);
5727    }
5728
5729    __ and_(candidate, candidate, Operand(mask));
5730
5731    // Load the entry from the symble table.
5732    STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5733    __ ldr(candidate,
5734           MemOperand(first_symbol_table_element,
5735                      candidate,
5736                      LSL,
5737                      kPointerSizeLog2));
5738
5739    // If entry is undefined no string with this hash can be found.
5740    Label is_string;
5741    __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
5742    __ b(ne, &is_string);
5743
5744    __ cmp(undefined, candidate);
5745    __ b(eq, not_found);
5746    // Must be the hole (deleted entry).
5747    if (FLAG_debug_code) {
5748      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
5749      __ cmp(ip, candidate);
5750      __ Assert(eq, "oddball in symbol table is not undefined or the hole");
5751    }
5752    __ jmp(&next_probe[i]);
5753
5754    __ bind(&is_string);
5755
5756    // Check that the candidate is a non-external ASCII string.  The instance
5757    // type is still in the scratch register from the CompareObjectType
5758    // operation.
5759    __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
5760
5761    // If length is not 2 the string is not a candidate.
5762    __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5763    __ cmp(scratch, Operand(Smi::FromInt(2)));
5764    __ b(ne, &next_probe[i]);
5765
5766    // Check if the two characters match.
5767    // Assumes that word load is little endian.
5768    __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
5769    __ cmp(chars, scratch);
5770    __ b(eq, &found_in_symbol_table);
5771    __ bind(&next_probe[i]);
5772  }
5773
5774  // No matching 2 character string found by probing.
5775  __ jmp(not_found);
5776
5777  // Scratch register contains result when we fall through to here.
5778  Register result = candidate;
5779  __ bind(&found_in_symbol_table);
5780  __ Move(r0, result);
5781}
5782
5783
5784void StringHelper::GenerateHashInit(MacroAssembler* masm,
5785                                    Register hash,
5786                                    Register character) {
5787  // hash = character + (character << 10);
5788  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
5789  // Untag smi seed and add the character.
5790  __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
5791  // hash += hash << 10;
5792  __ add(hash, hash, Operand(hash, LSL, 10));
5793  // hash ^= hash >> 6;
5794  __ eor(hash, hash, Operand(hash, LSR, 6));
5795}
5796
5797
5798void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5799                                            Register hash,
5800                                            Register character) {
5801  // hash += character;
5802  __ add(hash, hash, Operand(character));
5803  // hash += hash << 10;
5804  __ add(hash, hash, Operand(hash, LSL, 10));
5805  // hash ^= hash >> 6;
5806  __ eor(hash, hash, Operand(hash, LSR, 6));
5807}
5808
5809
5810void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5811                                       Register hash) {
5812  // hash += hash << 3;
5813  __ add(hash, hash, Operand(hash, LSL, 3));
5814  // hash ^= hash >> 11;
5815  __ eor(hash, hash, Operand(hash, LSR, 11));
5816  // hash += hash << 15;
5817  __ add(hash, hash, Operand(hash, LSL, 15));
5818
5819  __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
5820
5821  // if (hash == 0) hash = 27;
5822  __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
5823}
5824
5825
5826void SubStringStub::Generate(MacroAssembler* masm) {
5827  Label runtime;
5828
5829  // Stack frame on entry.
5830  //  lr: return address
5831  //  sp[0]: to
5832  //  sp[4]: from
5833  //  sp[8]: string
5834
5835  // This stub is called from the native-call %_SubString(...), so
5836  // nothing can be assumed about the arguments. It is tested that:
5837  //  "string" is a sequential string,
5838  //  both "from" and "to" are smis, and
5839  //  0 <= from <= to <= string.length.
5840  // If any of these assumptions fail, we call the runtime system.
5841
5842  const int kToOffset = 0 * kPointerSize;
5843  const int kFromOffset = 1 * kPointerSize;
5844  const int kStringOffset = 2 * kPointerSize;
5845
5846  __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
5847  STATIC_ASSERT(kFromOffset == kToOffset + 4);
5848  STATIC_ASSERT(kSmiTag == 0);
5849  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5850
5851  // I.e., arithmetic shift right by one un-smi-tags.
5852  __ mov(r2, Operand(r2, ASR, 1), SetCC);
5853  __ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
5854  // If either to or from had the smi tag bit set, then carry is set now.
5855  __ b(cs, &runtime);  // Either "from" or "to" is not a smi.
5856  // We want to bailout to runtime here if From is negative.  In that case, the
5857  // next instruction is not executed and we fall through to bailing out to
5858  // runtime.  pl is the opposite of mi.
5859  // Both r2 and r3 are untagged integers.
5860  __ sub(r2, r2, Operand(r3), SetCC, pl);
5861  __ b(mi, &runtime);  // Fail if from > to.
5862
5863  // Make sure first argument is a string.
5864  __ ldr(r0, MemOperand(sp, kStringOffset));
5865  STATIC_ASSERT(kSmiTag == 0);
5866  __ JumpIfSmi(r0, &runtime);
5867  Condition is_string = masm->IsObjectStringType(r0, r1);
5868  __ b(NegateCondition(is_string), &runtime);
5869
5870  // Short-cut for the case of trivial substring.
5871  Label return_r0;
5872  // r0: original string
5873  // r2: result string length
5874  __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
5875  __ cmp(r2, Operand(r4, ASR, 1));
5876  __ b(eq, &return_r0);
5877
5878  Label result_longer_than_two;
5879  // Check for special case of two character ASCII string, in which case
5880  // we do a lookup in the symbol table first.
5881  __ cmp(r2, Operand(2));
5882  __ b(gt, &result_longer_than_two);
5883  __ b(lt, &runtime);
5884
5885  __ JumpIfInstanceTypeIsNotSequentialAscii(r1, r1, &runtime);
5886
5887  // Get the two characters forming the sub string.
5888  __ add(r0, r0, Operand(r3));
5889  __ ldrb(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
5890  __ ldrb(r4, FieldMemOperand(r0, SeqAsciiString::kHeaderSize + 1));
5891
5892  // Try to lookup two character string in symbol table.
5893  Label make_two_character_string;
5894  StringHelper::GenerateTwoCharacterSymbolTableProbe(
5895      masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
5896  __ jmp(&return_r0);
5897
5898  // r2: result string length.
5899  // r3: two characters combined into halfword in little endian byte order.
5900  __ bind(&make_two_character_string);
5901  __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
5902  __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
5903  __ jmp(&return_r0);
5904
5905  __ bind(&result_longer_than_two);
5906  // Deal with different string types: update the index if necessary
5907  // and put the underlying string into r5.
5908  // r0: original string
5909  // r1: instance type
5910  // r2: length
5911  // r3: from index (untagged)
5912  Label underlying_unpacked, sliced_string, seq_or_external_string;
5913  // If the string is not indirect, it can only be sequential or external.
5914  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
5915  STATIC_ASSERT(kIsIndirectStringMask != 0);
5916  __ tst(r1, Operand(kIsIndirectStringMask));
5917  __ b(eq, &seq_or_external_string);
5918
5919  __ tst(r1, Operand(kSlicedNotConsMask));
5920  __ b(ne, &sliced_string);
5921  // Cons string.  Check whether it is flat, then fetch first part.
5922  __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
5923  __ CompareRoot(r5, Heap::kEmptyStringRootIndex);
5924  __ b(ne, &runtime);
5925  __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
5926  // Update instance type.
5927  __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
5928  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
5929  __ jmp(&underlying_unpacked);
5930
5931  __ bind(&sliced_string);
5932  // Sliced string.  Fetch parent and correct start index by offset.
5933  __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
5934  __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
5935  __ add(r3, r3, Operand(r4, ASR, 1));  // Add offset to index.
5936  // Update instance type.
5937  __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
5938  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
5939  __ jmp(&underlying_unpacked);
5940
5941  __ bind(&seq_or_external_string);
5942  // Sequential or external string.  Just move string to the expected register.
5943  __ mov(r5, r0);
5944
5945  __ bind(&underlying_unpacked);
5946
5947  if (FLAG_string_slices) {
5948    Label copy_routine;
5949    // r5: underlying subject string
5950    // r1: instance type of underlying subject string
5951    // r2: length
5952    // r3: adjusted start index (untagged)
5953    __ cmp(r2, Operand(SlicedString::kMinLength));
5954    // Short slice.  Copy instead of slicing.
5955    __ b(lt, &copy_routine);
5956    // Allocate new sliced string.  At this point we do not reload the instance
5957    // type including the string encoding because we simply rely on the info
5958    // provided by the original string.  It does not matter if the original
5959    // string's encoding is wrong because we always have to recheck encoding of
5960    // the newly created string's parent anyways due to externalized strings.
5961    Label two_byte_slice, set_slice_header;
5962    STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
5963    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5964    __ tst(r1, Operand(kStringEncodingMask));
5965    __ b(eq, &two_byte_slice);
5966    __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime);
5967    __ jmp(&set_slice_header);
5968    __ bind(&two_byte_slice);
5969    __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime);
5970    __ bind(&set_slice_header);
5971    __ mov(r3, Operand(r3, LSL, 1));
5972    __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
5973    __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
5974    __ jmp(&return_r0);
5975
5976    __ bind(&copy_routine);
5977  }
5978
5979  // r5: underlying subject string
5980  // r1: instance type of underlying subject string
5981  // r2: length
5982  // r3: adjusted start index (untagged)
5983  Label two_byte_sequential, sequential_string, allocate_result;
5984  STATIC_ASSERT(kExternalStringTag != 0);
5985  STATIC_ASSERT(kSeqStringTag == 0);
5986  __ tst(r1, Operand(kExternalStringTag));
5987  __ b(eq, &sequential_string);
5988
5989  // Handle external string.
5990  // Rule out short external strings.
5991  STATIC_CHECK(kShortExternalStringTag != 0);
5992  __ tst(r1, Operand(kShortExternalStringTag));
5993  __ b(ne, &runtime);
5994  __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
5995  // r5 already points to the first character of underlying string.
5996  __ jmp(&allocate_result);
5997
5998  __ bind(&sequential_string);
5999  // Locate first character of underlying subject string.
6000  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
6001  __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6002
6003  __ bind(&allocate_result);
6004  // Sequential acii string.  Allocate the result.
6005  STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
6006  __ tst(r1, Operand(kStringEncodingMask));
6007  __ b(eq, &two_byte_sequential);
6008
6009  // Allocate and copy the resulting ASCII string.
6010  __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
6011
6012  // Locate first character of substring to copy.
6013  __ add(r5, r5, r3);
6014  // Locate first character of result.
6015  __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6016
6017  // r0: result string
6018  // r1: first character of result string
6019  // r2: result string length
6020  // r5: first character of substring to copy
6021  STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
6022  StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
6023                                           COPY_ASCII | DEST_ALWAYS_ALIGNED);
6024  __ jmp(&return_r0);
6025
6026  // Allocate and copy the resulting two-byte string.
6027  __ bind(&two_byte_sequential);
6028  __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime);
6029
6030  // Locate first character of substring to copy.
6031  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
6032  __ add(r5, r5, Operand(r3, LSL, 1));
6033  // Locate first character of result.
6034  __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6035
6036  // r0: result string.
6037  // r1: first character of result.
6038  // r2: result length.
6039  // r5: first character of substring to copy.
6040  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
6041  StringHelper::GenerateCopyCharactersLong(
6042      masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
6043
6044  __ bind(&return_r0);
6045  Counters* counters = masm->isolate()->counters();
6046  __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
6047  __ add(sp, sp, Operand(3 * kPointerSize));
6048  __ Ret();
6049
6050  // Just jump to runtime to create the sub string.
6051  __ bind(&runtime);
6052  __ TailCallRuntime(Runtime::kSubString, 3, 1);
6053}
6054
6055
6056void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6057                                                      Register left,
6058                                                      Register right,
6059                                                      Register scratch1,
6060                                                      Register scratch2,
6061                                                      Register scratch3) {
6062  Register length = scratch1;
6063
6064  // Compare lengths.
6065  Label strings_not_equal, check_zero_length;
6066  __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
6067  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
6068  __ cmp(length, scratch2);
6069  __ b(eq, &check_zero_length);
6070  __ bind(&strings_not_equal);
6071  __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
6072  __ Ret();
6073
6074  // Check if the length is zero.
6075  Label compare_chars;
6076  __ bind(&check_zero_length);
6077  STATIC_ASSERT(kSmiTag == 0);
6078  __ cmp(length, Operand(0));
6079  __ b(ne, &compare_chars);
6080  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
6081  __ Ret();
6082
6083  // Compare characters.
6084  __ bind(&compare_chars);
6085  GenerateAsciiCharsCompareLoop(masm,
6086                                left, right, length, scratch2, scratch3,
6087                                &strings_not_equal);
6088
6089  // Characters are equal.
6090  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
6091  __ Ret();
6092}
6093
6094
6095void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6096                                                        Register left,
6097                                                        Register right,
6098                                                        Register scratch1,
6099                                                        Register scratch2,
6100                                                        Register scratch3,
6101                                                        Register scratch4) {
6102  Label result_not_equal, compare_lengths;
6103  // Find minimum length and length difference.
6104  __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
6105  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
6106  __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
6107  Register length_delta = scratch3;
6108  __ mov(scratch1, scratch2, LeaveCC, gt);
6109  Register min_length = scratch1;
6110  STATIC_ASSERT(kSmiTag == 0);
6111  __ cmp(min_length, Operand(0));
6112  __ b(eq, &compare_lengths);
6113
6114  // Compare loop.
6115  GenerateAsciiCharsCompareLoop(masm,
6116                                left, right, min_length, scratch2, scratch4,
6117                                &result_not_equal);
6118
6119  // Compare lengths - strings up to min-length are equal.
6120  __ bind(&compare_lengths);
6121  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
6122  // Use length_delta as result if it's zero.
6123  __ mov(r0, Operand(length_delta), SetCC);
6124  __ bind(&result_not_equal);
6125  // Conditionally update the result based either on length_delta or
6126  // the last comparion performed in the loop above.
6127  __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
6128  __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
6129  __ Ret();
6130}
6131
6132
6133void StringCompareStub::GenerateAsciiCharsCompareLoop(
6134    MacroAssembler* masm,
6135    Register left,
6136    Register right,
6137    Register length,
6138    Register scratch1,
6139    Register scratch2,
6140    Label* chars_not_equal) {
6141  // Change index to run from -length to -1 by adding length to string
6142  // start. This means that loop ends when index reaches zero, which
6143  // doesn't need an additional compare.
6144  __ SmiUntag(length);
6145  __ add(scratch1, length,
6146         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6147  __ add(left, left, Operand(scratch1));
6148  __ add(right, right, Operand(scratch1));
6149  __ rsb(length, length, Operand::Zero());
6150  Register index = length;  // index = -length;
6151
6152  // Compare loop.
6153  Label loop;
6154  __ bind(&loop);
6155  __ ldrb(scratch1, MemOperand(left, index));
6156  __ ldrb(scratch2, MemOperand(right, index));
6157  __ cmp(scratch1, scratch2);
6158  __ b(ne, chars_not_equal);
6159  __ add(index, index, Operand(1), SetCC);
6160  __ b(ne, &loop);
6161}
6162
6163
6164void StringCompareStub::Generate(MacroAssembler* masm) {
6165  Label runtime;
6166
6167  Counters* counters = masm->isolate()->counters();
6168
6169  // Stack frame on entry.
6170  //  sp[0]: right string
6171  //  sp[4]: left string
6172  __ Ldrd(r0 , r1, MemOperand(sp));  // Load right in r0, left in r1.
6173
6174  Label not_same;
6175  __ cmp(r0, r1);
6176  __ b(ne, &not_same);
6177  STATIC_ASSERT(EQUAL == 0);
6178  STATIC_ASSERT(kSmiTag == 0);
6179  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
6180  __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
6181  __ add(sp, sp, Operand(2 * kPointerSize));
6182  __ Ret();
6183
6184  __ bind(&not_same);
6185
6186  // Check that both objects are sequential ASCII strings.
6187  __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
6188
6189  // Compare flat ASCII strings natively. Remove arguments from stack first.
6190  __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
6191  __ add(sp, sp, Operand(2 * kPointerSize));
6192  GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
6193
6194  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
6195  // tagged as a small integer.
6196  __ bind(&runtime);
6197  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6198}
6199
6200
6201void StringAddStub::Generate(MacroAssembler* masm) {
6202  Label call_runtime, call_builtin;
6203  Builtins::JavaScript builtin_id = Builtins::ADD;
6204
6205  Counters* counters = masm->isolate()->counters();
6206
6207  // Stack on entry:
6208  // sp[0]: second argument (right).
6209  // sp[4]: first argument (left).
6210
6211  // Load the two arguments.
6212  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // First argument.
6213  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
6214
6215  // Make sure that both arguments are strings if not known in advance.
6216  if (flags_ == NO_STRING_ADD_FLAGS) {
6217    __ JumpIfEitherSmi(r0, r1, &call_runtime);
6218    // Load instance types.
6219    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
6220    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
6221    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
6222    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
6223    STATIC_ASSERT(kStringTag == 0);
6224    // If either is not a string, go to runtime.
6225    __ tst(r4, Operand(kIsNotStringMask));
6226    __ tst(r5, Operand(kIsNotStringMask), eq);
6227    __ b(ne, &call_runtime);
6228  } else {
6229    // Here at least one of the arguments is definitely a string.
6230    // We convert the one that is not known to be a string.
6231    if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6232      ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6233      GenerateConvertArgument(
6234          masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
6235      builtin_id = Builtins::STRING_ADD_RIGHT;
6236    } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6237      ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6238      GenerateConvertArgument(
6239          masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
6240      builtin_id = Builtins::STRING_ADD_LEFT;
6241    }
6242  }
6243
6244  // Both arguments are strings.
6245  // r0: first string
6246  // r1: second string
6247  // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6248  // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6249  {
6250    Label strings_not_empty;
6251    // Check if either of the strings are empty. In that case return the other.
6252    __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
6253    __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
6254    STATIC_ASSERT(kSmiTag == 0);
6255    __ cmp(r2, Operand(Smi::FromInt(0)));  // Test if first string is empty.
6256    __ mov(r0, Operand(r1), LeaveCC, eq);  // If first is empty, return second.
6257    STATIC_ASSERT(kSmiTag == 0);
6258     // Else test if second string is empty.
6259    __ cmp(r3, Operand(Smi::FromInt(0)), ne);
6260    __ b(ne, &strings_not_empty);  // If either string was empty, return r0.
6261
6262    __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6263    __ add(sp, sp, Operand(2 * kPointerSize));
6264    __ Ret();
6265
6266    __ bind(&strings_not_empty);
6267  }
6268
6269  __ mov(r2, Operand(r2, ASR, kSmiTagSize));
6270  __ mov(r3, Operand(r3, ASR, kSmiTagSize));
6271  // Both strings are non-empty.
6272  // r0: first string
6273  // r1: second string
6274  // r2: length of first string
6275  // r3: length of second string
6276  // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6277  // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6278  // Look at the length of the result of adding the two strings.
6279  Label string_add_flat_result, longer_than_two;
6280  // Adding two lengths can't overflow.
6281  STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
6282  __ add(r6, r2, Operand(r3));
6283  // Use the symbol table when adding two one character strings, as it
6284  // helps later optimizations to return a symbol here.
6285  __ cmp(r6, Operand(2));
6286  __ b(ne, &longer_than_two);
6287
6288  // Check that both strings are non-external ASCII strings.
6289  if (flags_ != NO_STRING_ADD_FLAGS) {
6290    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
6291    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
6292    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
6293    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
6294  }
6295  __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
6296                                                  &call_runtime);
6297
6298  // Get the two characters forming the sub string.
6299  __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
6300  __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
6301
6302  // Try to lookup two character string in symbol table. If it is not found
6303  // just allocate a new one.
6304  Label make_two_character_string;
6305  StringHelper::GenerateTwoCharacterSymbolTableProbe(
6306      masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
6307  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6308  __ add(sp, sp, Operand(2 * kPointerSize));
6309  __ Ret();
6310
6311  __ bind(&make_two_character_string);
6312  // Resulting string has length 2 and first chars of two strings
6313  // are combined into single halfword in r2 register.
6314  // So we can fill resulting string without two loops by a single
6315  // halfword store instruction (which assumes that processor is
6316  // in a little endian mode)
6317  __ mov(r6, Operand(2));
6318  __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
6319  __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
6320  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6321  __ add(sp, sp, Operand(2 * kPointerSize));
6322  __ Ret();
6323
6324  __ bind(&longer_than_two);
6325  // Check if resulting string will be flat.
6326  __ cmp(r6, Operand(ConsString::kMinLength));
6327  __ b(lt, &string_add_flat_result);
6328  // Handle exceptionally long strings in the runtime system.
6329  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
6330  ASSERT(IsPowerOf2(String::kMaxLength + 1));
6331  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
6332  __ cmp(r6, Operand(String::kMaxLength + 1));
6333  __ b(hs, &call_runtime);
6334
6335  // If result is not supposed to be flat, allocate a cons string object.
6336  // If both strings are ASCII the result is an ASCII cons string.
6337  if (flags_ != NO_STRING_ADD_FLAGS) {
6338    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
6339    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
6340    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
6341    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
6342  }
6343  Label non_ascii, allocated, ascii_data;
6344  STATIC_ASSERT(kTwoByteStringTag == 0);
6345  __ tst(r4, Operand(kStringEncodingMask));
6346  __ tst(r5, Operand(kStringEncodingMask), ne);
6347  __ b(eq, &non_ascii);
6348
6349  // Allocate an ASCII cons string.
6350  __ bind(&ascii_data);
6351  __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime);
6352  __ bind(&allocated);
6353  // Fill the fields of the cons string.
6354  __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
6355  __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
6356  __ mov(r0, Operand(r7));
6357  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6358  __ add(sp, sp, Operand(2 * kPointerSize));
6359  __ Ret();
6360
6361  __ bind(&non_ascii);
6362  // At least one of the strings is two-byte. Check whether it happens
6363  // to contain only ASCII characters.
6364  // r4: first instance type.
6365  // r5: second instance type.
6366  __ tst(r4, Operand(kAsciiDataHintMask));
6367  __ tst(r5, Operand(kAsciiDataHintMask), ne);
6368  __ b(ne, &ascii_data);
6369  __ eor(r4, r4, Operand(r5));
6370  STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
6371  __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
6372  __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
6373  __ b(eq, &ascii_data);
6374
6375  // Allocate a two byte cons string.
6376  __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
6377  __ jmp(&allocated);
6378
6379  // We cannot encounter sliced strings or cons strings here since:
6380  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
6381  // Handle creating a flat result from either external or sequential strings.
6382  // Locate the first characters' locations.
6383  // r0: first string
6384  // r1: second string
6385  // r2: length of first string
6386  // r3: length of second string
6387  // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6388  // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6389  // r6: sum of lengths.
6390  Label first_prepared, second_prepared;
6391  __ bind(&string_add_flat_result);
6392  if (flags_ != NO_STRING_ADD_FLAGS) {
6393    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
6394    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
6395    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
6396    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
6397  }
6398
6399  // Check whether both strings have same encoding
6400  __ eor(r7, r4, Operand(r5));
6401  __ tst(r7, Operand(kStringEncodingMask));
6402  __ b(ne, &call_runtime);
6403
6404  STATIC_ASSERT(kSeqStringTag == 0);
6405  __ tst(r4, Operand(kStringRepresentationMask));
6406  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6407  __ add(r7,
6408         r0,
6409         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
6410         LeaveCC,
6411         eq);
6412  __ b(eq, &first_prepared);
6413  // External string: rule out short external string and load string resource.
6414  STATIC_ASSERT(kShortExternalStringTag != 0);
6415  __ tst(r4, Operand(kShortExternalStringMask));
6416  __ b(ne, &call_runtime);
6417  __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
6418  __ bind(&first_prepared);
6419
6420  STATIC_ASSERT(kSeqStringTag == 0);
6421  __ tst(r5, Operand(kStringRepresentationMask));
6422  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6423  __ add(r1,
6424         r1,
6425         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
6426         LeaveCC,
6427         eq);
6428  __ b(eq, &second_prepared);
6429  // External string: rule out short external string and load string resource.
6430  STATIC_ASSERT(kShortExternalStringTag != 0);
6431  __ tst(r5, Operand(kShortExternalStringMask));
6432  __ b(ne, &call_runtime);
6433  __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset));
6434  __ bind(&second_prepared);
6435
6436  Label non_ascii_string_add_flat_result;
6437  // r7: first character of first string
6438  // r1: first character of second string
6439  // r2: length of first string.
6440  // r3: length of second string.
6441  // r6: sum of lengths.
6442  // Both strings have the same encoding.
6443  STATIC_ASSERT(kTwoByteStringTag == 0);
6444  __ tst(r5, Operand(kStringEncodingMask));
6445  __ b(eq, &non_ascii_string_add_flat_result);
6446
6447  __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
6448  __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6449  // r0: result string.
6450  // r7: first character of first string.
6451  // r1: first character of second string.
6452  // r2: length of first string.
6453  // r3: length of second string.
6454  // r6: first character of result.
6455  StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true);
6456  // r6: next character of result.
6457  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
6458  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6459  __ add(sp, sp, Operand(2 * kPointerSize));
6460  __ Ret();
6461
6462  __ bind(&non_ascii_string_add_flat_result);
6463  __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime);
6464  __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6465  // r0: result string.
6466  // r7: first character of first string.
6467  // r1: first character of second string.
6468  // r2: length of first string.
6469  // r3: length of second string.
6470  // r6: first character of result.
6471  StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false);
6472  // r6: next character of result.
6473  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
6474  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
6475  __ add(sp, sp, Operand(2 * kPointerSize));
6476  __ Ret();
6477
6478  // Just jump to runtime to add the two strings.
6479  __ bind(&call_runtime);
6480  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6481
6482  if (call_builtin.is_linked()) {
6483    __ bind(&call_builtin);
6484    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6485  }
6486}
6487
6488
6489void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6490                                            int stack_offset,
6491                                            Register arg,
6492                                            Register scratch1,
6493                                            Register scratch2,
6494                                            Register scratch3,
6495                                            Register scratch4,
6496                                            Label* slow) {
6497  // First check if the argument is already a string.
6498  Label not_string, done;
6499  __ JumpIfSmi(arg, &not_string);
6500  __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
6501  __ b(lt, &done);
6502
6503  // Check the number to string cache.
6504  Label not_cached;
6505  __ bind(&not_string);
6506  // Puts the cached result into scratch1.
6507  NumberToStringStub::GenerateLookupNumberStringCache(masm,
6508                                                      arg,
6509                                                      scratch1,
6510                                                      scratch2,
6511                                                      scratch3,
6512                                                      scratch4,
6513                                                      false,
6514                                                      &not_cached);
6515  __ mov(arg, scratch1);
6516  __ str(arg, MemOperand(sp, stack_offset));
6517  __ jmp(&done);
6518
6519  // Check if the argument is a safe string wrapper.
6520  __ bind(&not_cached);
6521  __ JumpIfSmi(arg, slow);
6522  __ CompareObjectType(
6523      arg, scratch1, scratch2, JS_VALUE_TYPE);  // map -> scratch1.
6524  __ b(ne, slow);
6525  __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6526  __ and_(scratch2,
6527          scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
6528  __ cmp(scratch2,
6529         Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
6530  __ b(ne, slow);
6531  __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6532  __ str(arg, MemOperand(sp, stack_offset));
6533
6534  __ bind(&done);
6535}
6536
6537
6538void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6539  ASSERT(state_ == CompareIC::SMIS);
6540  Label miss;
6541  __ orr(r2, r1, r0);
6542  __ JumpIfNotSmi(r2, &miss);
6543
6544  if (GetCondition() == eq) {
6545    // For equality we do not care about the sign of the result.
6546    __ sub(r0, r0, r1, SetCC);
6547  } else {
6548    // Untag before subtracting to avoid handling overflow.
6549    __ SmiUntag(r1);
6550    __ sub(r0, r1, SmiUntagOperand(r0));
6551  }
6552  __ Ret();
6553
6554  __ bind(&miss);
6555  GenerateMiss(masm);
6556}
6557
6558
6559void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6560  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6561
6562  Label generic_stub;
6563  Label unordered, maybe_undefined1, maybe_undefined2;
6564  Label miss;
6565  __ and_(r2, r1, Operand(r0));
6566  __ JumpIfSmi(r2, &generic_stub);
6567
6568  __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
6569  __ b(ne, &maybe_undefined1);
6570  __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
6571  __ b(ne, &maybe_undefined2);
6572
6573  // Inlining the double comparison and falling back to the general compare
6574  // stub if NaN is involved or VFP3 is unsupported.
6575  if (CpuFeatures::IsSupported(VFP3)) {
6576    CpuFeatures::Scope scope(VFP3);
6577
6578    // Load left and right operand
6579    __ sub(r2, r1, Operand(kHeapObjectTag));
6580    __ vldr(d0, r2, HeapNumber::kValueOffset);
6581    __ sub(r2, r0, Operand(kHeapObjectTag));
6582    __ vldr(d1, r2, HeapNumber::kValueOffset);
6583
6584    // Compare operands
6585    __ VFPCompareAndSetFlags(d0, d1);
6586
6587    // Don't base result on status bits when a NaN is involved.
6588    __ b(vs, &unordered);
6589
6590    // Return a result of -1, 0, or 1, based on status bits.
6591    __ mov(r0, Operand(EQUAL), LeaveCC, eq);
6592    __ mov(r0, Operand(LESS), LeaveCC, lt);
6593    __ mov(r0, Operand(GREATER), LeaveCC, gt);
6594    __ Ret();
6595  }
6596
6597  __ bind(&unordered);
6598  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
6599  __ bind(&generic_stub);
6600  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6601
6602  __ bind(&maybe_undefined1);
6603  if (Token::IsOrderedRelationalCompareOp(op_)) {
6604    __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
6605    __ b(ne, &miss);
6606    __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
6607    __ b(ne, &maybe_undefined2);
6608    __ jmp(&unordered);
6609  }
6610
6611  __ bind(&maybe_undefined2);
6612  if (Token::IsOrderedRelationalCompareOp(op_)) {
6613    __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
6614    __ b(eq, &unordered);
6615  }
6616
6617  __ bind(&miss);
6618  GenerateMiss(masm);
6619}
6620
6621
6622void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6623  ASSERT(state_ == CompareIC::SYMBOLS);
6624  Label miss;
6625
6626  // Registers containing left and right operands respectively.
6627  Register left = r1;
6628  Register right = r0;
6629  Register tmp1 = r2;
6630  Register tmp2 = r3;
6631
6632  // Check that both operands are heap objects.
6633  __ JumpIfEitherSmi(left, right, &miss);
6634
6635  // Check that both operands are symbols.
6636  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6637  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6638  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6639  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6640  STATIC_ASSERT(kSymbolTag != 0);
6641  __ and_(tmp1, tmp1, Operand(tmp2));
6642  __ tst(tmp1, Operand(kIsSymbolMask));
6643  __ b(eq, &miss);
6644
6645  // Symbols are compared by identity.
6646  __ cmp(left, right);
6647  // Make sure r0 is non-zero. At this point input operands are
6648  // guaranteed to be non-zero.
6649  ASSERT(right.is(r0));
6650  STATIC_ASSERT(EQUAL == 0);
6651  STATIC_ASSERT(kSmiTag == 0);
6652  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
6653  __ Ret();
6654
6655  __ bind(&miss);
6656  GenerateMiss(masm);
6657}
6658
6659
6660void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6661  ASSERT(state_ == CompareIC::STRINGS);
6662  Label miss;
6663
6664  bool equality = Token::IsEqualityOp(op_);
6665
6666  // Registers containing left and right operands respectively.
6667  Register left = r1;
6668  Register right = r0;
6669  Register tmp1 = r2;
6670  Register tmp2 = r3;
6671  Register tmp3 = r4;
6672  Register tmp4 = r5;
6673
6674  // Check that both operands are heap objects.
6675  __ JumpIfEitherSmi(left, right, &miss);
6676
6677  // Check that both operands are strings. This leaves the instance
6678  // types loaded in tmp1 and tmp2.
6679  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6680  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6681  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6682  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6683  STATIC_ASSERT(kNotStringTag != 0);
6684  __ orr(tmp3, tmp1, tmp2);
6685  __ tst(tmp3, Operand(kIsNotStringMask));
6686  __ b(ne, &miss);
6687
6688  // Fast check for identical strings.
6689  __ cmp(left, right);
6690  STATIC_ASSERT(EQUAL == 0);
6691  STATIC_ASSERT(kSmiTag == 0);
6692  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
6693  __ Ret(eq);
6694
6695  // Handle not identical strings.
6696
6697  // Check that both strings are symbols. If they are, we're done
6698  // because we already know they are not identical.
6699  if (equality) {
6700    ASSERT(GetCondition() == eq);
6701    STATIC_ASSERT(kSymbolTag != 0);
6702    __ and_(tmp3, tmp1, Operand(tmp2));
6703    __ tst(tmp3, Operand(kIsSymbolMask));
6704    // Make sure r0 is non-zero. At this point input operands are
6705    // guaranteed to be non-zero.
6706    ASSERT(right.is(r0));
6707    __ Ret(ne);
6708  }
6709
6710  // Check that both strings are sequential ASCII.
6711  Label runtime;
6712  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
6713      tmp1, tmp2, tmp3, tmp4, &runtime);
6714
6715  // Compare flat ASCII strings. Returns when done.
6716  if (equality) {
6717    StringCompareStub::GenerateFlatAsciiStringEquals(
6718        masm, left, right, tmp1, tmp2, tmp3);
6719  } else {
6720    StringCompareStub::GenerateCompareFlatAsciiStrings(
6721        masm, left, right, tmp1, tmp2, tmp3, tmp4);
6722  }
6723
6724  // Handle more complex cases in runtime.
6725  __ bind(&runtime);
6726  __ Push(left, right);
6727  if (equality) {
6728    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6729  } else {
6730    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6731  }
6732
6733  __ bind(&miss);
6734  GenerateMiss(masm);
6735}
6736
6737
6738void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6739  ASSERT(state_ == CompareIC::OBJECTS);
6740  Label miss;
6741  __ and_(r2, r1, Operand(r0));
6742  __ JumpIfSmi(r2, &miss);
6743
6744  __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
6745  __ b(ne, &miss);
6746  __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
6747  __ b(ne, &miss);
6748
6749  ASSERT(GetCondition() == eq);
6750  __ sub(r0, r0, Operand(r1));
6751  __ Ret();
6752
6753  __ bind(&miss);
6754  GenerateMiss(masm);
6755}
6756
6757
6758void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
6759  Label miss;
6760  __ and_(r2, r1, Operand(r0));
6761  __ JumpIfSmi(r2, &miss);
6762  __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
6763  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
6764  __ cmp(r2, Operand(known_map_));
6765  __ b(ne, &miss);
6766  __ cmp(r3, Operand(known_map_));
6767  __ b(ne, &miss);
6768
6769  __ sub(r0, r0, Operand(r1));
6770  __ Ret();
6771
6772  __ bind(&miss);
6773  GenerateMiss(masm);
6774}
6775
6776
6777
6778void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6779  {
6780    // Call the runtime system in a fresh internal frame.
6781    ExternalReference miss =
6782        ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
6783
6784    FrameScope scope(masm, StackFrame::INTERNAL);
6785    __ Push(r1, r0);
6786    __ push(lr);
6787    __ Push(r1, r0);
6788    __ mov(ip, Operand(Smi::FromInt(op_)));
6789    __ push(ip);
6790    __ CallExternalReference(miss, 3);
6791    // Compute the entry point of the rewritten stub.
6792    __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
6793    // Restore registers.
6794    __ pop(lr);
6795    __ pop(r0);
6796    __ pop(r1);
6797  }
6798
6799  __ Jump(r2);
6800}
6801
6802
6803void DirectCEntryStub::Generate(MacroAssembler* masm) {
6804  __ ldr(pc, MemOperand(sp, 0));
6805}
6806
6807
6808void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6809                                    ExternalReference function) {
6810  __ mov(r2, Operand(function));
6811  GenerateCall(masm, r2);
6812}
6813
6814
6815void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6816                                    Register target) {
6817  __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
6818                     RelocInfo::CODE_TARGET));
6819  // Push return address (accessible to GC through exit frame pc).
6820  // Note that using pc with str is deprecated.
6821  Label start;
6822  __ bind(&start);
6823  __ add(ip, pc, Operand(Assembler::kInstrSize));
6824  __ str(ip, MemOperand(sp, 0));
6825  __ Jump(target);  // Call the C++ function.
6826  ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta,
6827            masm->SizeOfCodeGeneratedSince(&start));
6828}
6829
6830
6831void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
6832                                                        Label* miss,
6833                                                        Label* done,
6834                                                        Register receiver,
6835                                                        Register properties,
6836                                                        Handle<String> name,
6837                                                        Register scratch0) {
6838  // If names of slots in range from 1 to kProbes - 1 for the hash value are
6839  // not equal to the name and kProbes-th slot is not used (its name is the
6840  // undefined value), it guarantees the hash table doesn't contain the
6841  // property. It's true even if some slots represent deleted properties
6842  // (their names are the hole value).
6843  for (int i = 0; i < kInlinedProbes; i++) {
6844    // scratch0 points to properties hash.
6845    // Compute the masked index: (hash + i + i * i) & mask.
6846    Register index = scratch0;
6847    // Capacity is smi 2^n.
6848    __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
6849    __ sub(index, index, Operand(1));
6850    __ and_(index, index, Operand(
6851        Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
6852
6853    // Scale the index by multiplying by the entry size.
6854    ASSERT(StringDictionary::kEntrySize == 3);
6855    __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
6856
6857    Register entity_name = scratch0;
6858    // Having undefined at this place means the name is not contained.
6859    ASSERT_EQ(kSmiTagSize, 1);
6860    Register tmp = properties;
6861    __ add(tmp, properties, Operand(index, LSL, 1));
6862    __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
6863
6864    ASSERT(!tmp.is(entity_name));
6865    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
6866    __ cmp(entity_name, tmp);
6867    __ b(eq, done);
6868
6869    if (i != kInlinedProbes - 1) {
6870      // Load the hole ready for use below:
6871      __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
6872
6873      // Stop if found the property.
6874      __ cmp(entity_name, Operand(Handle<String>(name)));
6875      __ b(eq, miss);
6876
6877      Label the_hole;
6878      __ cmp(entity_name, tmp);
6879      __ b(eq, &the_hole);
6880
6881      // Check if the entry name is not a symbol.
6882      __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
6883      __ ldrb(entity_name,
6884              FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
6885      __ tst(entity_name, Operand(kIsSymbolMask));
6886      __ b(eq, miss);
6887
6888      __ bind(&the_hole);
6889
6890      // Restore the properties.
6891      __ ldr(properties,
6892             FieldMemOperand(receiver, JSObject::kPropertiesOffset));
6893    }
6894  }
6895
6896  const int spill_mask =
6897      (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
6898       r2.bit() | r1.bit() | r0.bit());
6899
6900  __ stm(db_w, sp, spill_mask);
6901  __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
6902  __ mov(r1, Operand(Handle<String>(name)));
6903  StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
6904  __ CallStub(&stub);
6905  __ cmp(r0, Operand(0));
6906  __ ldm(ia_w, sp, spill_mask);
6907
6908  __ b(eq, done);
6909  __ b(ne, miss);
6910}
6911
6912
6913// Probe the string dictionary in the |elements| register. Jump to the
6914// |done| label if a property with the given name is found. Jump to
6915// the |miss| label otherwise.
6916// If lookup was successful |scratch2| will be equal to elements + 4 * index.
6917void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6918                                                        Label* miss,
6919                                                        Label* done,
6920                                                        Register elements,
6921                                                        Register name,
6922                                                        Register scratch1,
6923                                                        Register scratch2) {
6924  ASSERT(!elements.is(scratch1));
6925  ASSERT(!elements.is(scratch2));
6926  ASSERT(!name.is(scratch1));
6927  ASSERT(!name.is(scratch2));
6928
6929  // Assert that name contains a string.
6930  if (FLAG_debug_code) __ AbortIfNotString(name);
6931
6932  // Compute the capacity mask.
6933  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
6934  __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize));  // convert smi to int
6935  __ sub(scratch1, scratch1, Operand(1));
6936
6937  // Generate an unrolled loop that performs a few probes before
6938  // giving up. Measurements done on Gmail indicate that 2 probes
6939  // cover ~93% of loads from dictionaries.
6940  for (int i = 0; i < kInlinedProbes; i++) {
6941    // Compute the masked index: (hash + i + i * i) & mask.
6942    __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
6943    if (i > 0) {
6944      // Add the probe offset (i + i * i) left shifted to avoid right shifting
6945      // the hash in a separate instruction. The value hash + i + i * i is right
6946      // shifted in the following and instruction.
6947      ASSERT(StringDictionary::GetProbeOffset(i) <
6948             1 << (32 - String::kHashFieldOffset));
6949      __ add(scratch2, scratch2, Operand(
6950          StringDictionary::GetProbeOffset(i) << String::kHashShift));
6951    }
6952    __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift));
6953
6954    // Scale the index by multiplying by the element size.
6955    ASSERT(StringDictionary::kEntrySize == 3);
6956    // scratch2 = scratch2 * 3.
6957    __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
6958
6959    // Check if the key is identical to the name.
6960    __ add(scratch2, elements, Operand(scratch2, LSL, 2));
6961    __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
6962    __ cmp(name, Operand(ip));
6963    __ b(eq, done);
6964  }
6965
6966  const int spill_mask =
6967      (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
6968       r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
6969      ~(scratch1.bit() | scratch2.bit());
6970
6971  __ stm(db_w, sp, spill_mask);
6972  if (name.is(r0)) {
6973    ASSERT(!elements.is(r1));
6974    __ Move(r1, name);
6975    __ Move(r0, elements);
6976  } else {
6977    __ Move(r0, elements);
6978    __ Move(r1, name);
6979  }
6980  StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
6981  __ CallStub(&stub);
6982  __ cmp(r0, Operand(0));
6983  __ mov(scratch2, Operand(r2));
6984  __ ldm(ia_w, sp, spill_mask);
6985
6986  __ b(ne, done);
6987  __ b(eq, miss);
6988}
6989
6990
6991void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
6992  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
6993  // we cannot call anything that could cause a GC from this stub.
6994  // Registers:
6995  //  result: StringDictionary to probe
6996  //  r1: key
6997  //  : StringDictionary to probe.
6998  //  index_: will hold an index of entry if lookup is successful.
6999  //          might alias with result_.
7000  // Returns:
7001  //  result_ is zero if lookup failed, non zero otherwise.
7002
7003  Register result = r0;
7004  Register dictionary = r0;
7005  Register key = r1;
7006  Register index = r2;
7007  Register mask = r3;
7008  Register hash = r4;
7009  Register undefined = r5;
7010  Register entry_key = r6;
7011
7012  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
7013
7014  __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
7015  __ mov(mask, Operand(mask, ASR, kSmiTagSize));
7016  __ sub(mask, mask, Operand(1));
7017
7018  __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
7019
7020  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
7021
7022  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
7023    // Compute the masked index: (hash + i + i * i) & mask.
7024    // Capacity is smi 2^n.
7025    if (i > 0) {
7026      // Add the probe offset (i + i * i) left shifted to avoid right shifting
7027      // the hash in a separate instruction. The value hash + i + i * i is right
7028      // shifted in the following and instruction.
7029      ASSERT(StringDictionary::GetProbeOffset(i) <
7030             1 << (32 - String::kHashFieldOffset));
7031      __ add(index, hash, Operand(
7032          StringDictionary::GetProbeOffset(i) << String::kHashShift));
7033    } else {
7034      __ mov(index, Operand(hash));
7035    }
7036    __ and_(index, mask, Operand(index, LSR, String::kHashShift));
7037
7038    // Scale the index by multiplying by the entry size.
7039    ASSERT(StringDictionary::kEntrySize == 3);
7040    __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
7041
7042    ASSERT_EQ(kSmiTagSize, 1);
7043    __ add(index, dictionary, Operand(index, LSL, 2));
7044    __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
7045
7046    // Having undefined at this place means the name is not contained.
7047    __ cmp(entry_key, Operand(undefined));
7048    __ b(eq, &not_in_dictionary);
7049
7050    // Stop if found the property.
7051    __ cmp(entry_key, Operand(key));
7052    __ b(eq, &in_dictionary);
7053
7054    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
7055      // Check if the entry name is not a symbol.
7056      __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
7057      __ ldrb(entry_key,
7058              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
7059      __ tst(entry_key, Operand(kIsSymbolMask));
7060      __ b(eq, &maybe_in_dictionary);
7061    }
7062  }
7063
7064  __ bind(&maybe_in_dictionary);
7065  // If we are doing negative lookup then probing failure should be
7066  // treated as a lookup success. For positive lookup probing failure
7067  // should be treated as lookup failure.
7068  if (mode_ == POSITIVE_LOOKUP) {
7069    __ mov(result, Operand::Zero());
7070    __ Ret();
7071  }
7072
7073  __ bind(&in_dictionary);
7074  __ mov(result, Operand(1));
7075  __ Ret();
7076
7077  __ bind(&not_in_dictionary);
7078  __ mov(result, Operand::Zero());
7079  __ Ret();
7080}
7081
7082
7083struct AheadOfTimeWriteBarrierStubList {
7084  Register object, value, address;
7085  RememberedSetAction action;
7086};
7087
7088#define REG(Name) { kRegister_ ## Name ## _Code }
7089
7090static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7091  // Used in RegExpExecStub.
7092  { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
7093  { REG(r6), REG(r2), REG(r7), EMIT_REMEMBERED_SET },
7094  // Used in CompileArrayPushCall.
7095  // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
7096  // Also used in KeyedStoreIC::GenerateGeneric.
7097  { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET },
7098  // Used in CompileStoreGlobal.
7099  { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET },
7100  // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
7101  { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET },
7102  { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET },
7103  // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7104  { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET },
7105  { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET },
7106  // KeyedStoreStubCompiler::GenerateStoreFastElement.
7107  { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET },
7108  { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET },
7109  // ElementsTransitionGenerator::GenerateSmiOnlyToObject
7110  // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
7111  // and ElementsTransitionGenerator::GenerateDoubleToObject
7112  { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET },
7113  { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET },
7114  // ElementsTransitionGenerator::GenerateDoubleToObject
7115  { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET },
7116  { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET },
7117  // StoreArrayLiteralElementStub::Generate
7118  { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
7119  // Null termination.
7120  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7121};
7122
7123#undef REG
7124
7125bool RecordWriteStub::IsPregenerated() {
7126  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7127       !entry->object.is(no_reg);
7128       entry++) {
7129    if (object_.is(entry->object) &&
7130        value_.is(entry->value) &&
7131        address_.is(entry->address) &&
7132        remembered_set_action_ == entry->action &&
7133        save_fp_regs_mode_ == kDontSaveFPRegs) {
7134      return true;
7135    }
7136  }
7137  return false;
7138}
7139
7140
7141bool StoreBufferOverflowStub::IsPregenerated() {
7142  return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
7143}
7144
7145
7146void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
7147  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
7148  stub1.GetCode()->set_is_pregenerated(true);
7149}
7150
7151
7152void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
7153  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7154       !entry->object.is(no_reg);
7155       entry++) {
7156    RecordWriteStub stub(entry->object,
7157                         entry->value,
7158                         entry->address,
7159                         entry->action,
7160                         kDontSaveFPRegs);
7161    stub.GetCode()->set_is_pregenerated(true);
7162  }
7163}
7164
7165
7166// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
7167// the value has just been written into the object, now this stub makes sure
7168// we keep the GC informed.  The word in the object where the value has been
7169// written is in the address register.
7170void RecordWriteStub::Generate(MacroAssembler* masm) {
7171  Label skip_to_incremental_noncompacting;
7172  Label skip_to_incremental_compacting;
7173
7174  // The first two instructions are generated with labels so as to get the
7175  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
7176  // forth between a compare instructions (a nop in this position) and the
7177  // real branch when we start and stop incremental heap marking.
7178  // See RecordWriteStub::Patch for details.
7179  __ b(&skip_to_incremental_noncompacting);
7180  __ b(&skip_to_incremental_compacting);
7181
7182  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7183    __ RememberedSetHelper(object_,
7184                           address_,
7185                           value_,
7186                           save_fp_regs_mode_,
7187                           MacroAssembler::kReturnAtEnd);
7188  }
7189  __ Ret();
7190
7191  __ bind(&skip_to_incremental_noncompacting);
7192  GenerateIncremental(masm, INCREMENTAL);
7193
7194  __ bind(&skip_to_incremental_compacting);
7195  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7196
7197  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7198  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
7199  ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
7200  ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
7201  PatchBranchIntoNop(masm, 0);
7202  PatchBranchIntoNop(masm, Assembler::kInstrSize);
7203}
7204
7205
7206void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7207  regs_.Save(masm);
7208
7209  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7210    Label dont_need_remembered_set;
7211
7212    __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
7213    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
7214                           regs_.scratch0(),
7215                           &dont_need_remembered_set);
7216
7217    __ CheckPageFlag(regs_.object(),
7218                     regs_.scratch0(),
7219                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
7220                     ne,
7221                     &dont_need_remembered_set);
7222
7223    // First notify the incremental marker if necessary, then update the
7224    // remembered set.
7225    CheckNeedsToInformIncrementalMarker(
7226        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
7227    InformIncrementalMarker(masm, mode);
7228    regs_.Restore(masm);
7229    __ RememberedSetHelper(object_,
7230                           address_,
7231                           value_,
7232                           save_fp_regs_mode_,
7233                           MacroAssembler::kReturnAtEnd);
7234
7235    __ bind(&dont_need_remembered_set);
7236  }
7237
7238  CheckNeedsToInformIncrementalMarker(
7239      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
7240  InformIncrementalMarker(masm, mode);
7241  regs_.Restore(masm);
7242  __ Ret();
7243}
7244
7245
7246void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7247  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7248  int argument_count = 3;
7249  __ PrepareCallCFunction(argument_count, regs_.scratch0());
7250  Register address =
7251      r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
7252  ASSERT(!address.is(regs_.object()));
7253  ASSERT(!address.is(r0));
7254  __ Move(address, regs_.address());
7255  __ Move(r0, regs_.object());
7256  if (mode == INCREMENTAL_COMPACTION) {
7257    __ Move(r1, address);
7258  } else {
7259    ASSERT(mode == INCREMENTAL);
7260    __ ldr(r1, MemOperand(address, 0));
7261  }
7262  __ mov(r2, Operand(ExternalReference::isolate_address()));
7263
7264  AllowExternalCallThatCantCauseGC scope(masm);
7265  if (mode == INCREMENTAL_COMPACTION) {
7266    __ CallCFunction(
7267        ExternalReference::incremental_evacuation_record_write_function(
7268            masm->isolate()),
7269        argument_count);
7270  } else {
7271    ASSERT(mode == INCREMENTAL);
7272    __ CallCFunction(
7273        ExternalReference::incremental_marking_record_write_function(
7274            masm->isolate()),
7275        argument_count);
7276  }
7277  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7278}
7279
7280
7281void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7282    MacroAssembler* masm,
7283    OnNoNeedToInformIncrementalMarker on_no_need,
7284    Mode mode) {
7285  Label on_black;
7286  Label need_incremental;
7287  Label need_incremental_pop_scratch;
7288
7289  // Let's look at the color of the object:  If it is not black we don't have
7290  // to inform the incremental marker.
7291  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
7292
7293  regs_.Restore(masm);
7294  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7295    __ RememberedSetHelper(object_,
7296                           address_,
7297                           value_,
7298                           save_fp_regs_mode_,
7299                           MacroAssembler::kReturnAtEnd);
7300  } else {
7301    __ Ret();
7302  }
7303
7304  __ bind(&on_black);
7305
7306  // Get the value from the slot.
7307  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
7308
7309  if (mode == INCREMENTAL_COMPACTION) {
7310    Label ensure_not_white;
7311
7312    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
7313                     regs_.scratch1(),  // Scratch.
7314                     MemoryChunk::kEvacuationCandidateMask,
7315                     eq,
7316                     &ensure_not_white);
7317
7318    __ CheckPageFlag(regs_.object(),
7319                     regs_.scratch1(),  // Scratch.
7320                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
7321                     eq,
7322                     &need_incremental);
7323
7324    __ bind(&ensure_not_white);
7325  }
7326
7327  // We need extra registers for this, so we push the object and the address
7328  // register temporarily.
7329  __ Push(regs_.object(), regs_.address());
7330  __ EnsureNotWhite(regs_.scratch0(),  // The value.
7331                    regs_.scratch1(),  // Scratch.
7332                    regs_.object(),  // Scratch.
7333                    regs_.address(),  // Scratch.
7334                    &need_incremental_pop_scratch);
7335  __ Pop(regs_.object(), regs_.address());
7336
7337  regs_.Restore(masm);
7338  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7339    __ RememberedSetHelper(object_,
7340                           address_,
7341                           value_,
7342                           save_fp_regs_mode_,
7343                           MacroAssembler::kReturnAtEnd);
7344  } else {
7345    __ Ret();
7346  }
7347
7348  __ bind(&need_incremental_pop_scratch);
7349  __ Pop(regs_.object(), regs_.address());
7350
7351  __ bind(&need_incremental);
7352
7353  // Fall through when we need to inform the incremental marker.
7354}
7355
7356
7357void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7358  // ----------- S t a t e -------------
7359  //  -- r0    : element value to store
7360  //  -- r1    : array literal
7361  //  -- r2    : map of array literal
7362  //  -- r3    : element index as smi
7363  //  -- r4    : array literal index in function as smi
7364  // -----------------------------------
7365
7366  Label element_done;
7367  Label double_elements;
7368  Label smi_element;
7369  Label slow_elements;
7370  Label fast_elements;
7371
7372  __ CheckFastElements(r2, r5, &double_elements);
7373  // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
7374  __ JumpIfSmi(r0, &smi_element);
7375  __ CheckFastSmiOnlyElements(r2, r5, &fast_elements);
7376
7377  // Store into the array literal requires a elements transition. Call into
7378  // the runtime.
7379  __ bind(&slow_elements);
7380  // call.
7381  __ Push(r1, r3, r0);
7382  __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
7383  __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
7384  __ Push(r5, r4);
7385  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7386
7387  // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
7388  __ bind(&fast_elements);
7389  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
7390  __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
7391  __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7392  __ str(r0, MemOperand(r6, 0));
7393  // Update the write barrier for the array store.
7394  __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
7395                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
7396  __ Ret();
7397
7398  // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
7399  // FAST_ELEMENTS, and value is Smi.
7400  __ bind(&smi_element);
7401  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
7402  __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
7403  __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
7404  __ Ret();
7405
7406  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
7407  __ bind(&double_elements);
7408  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
7409  __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r2,
7410                                 &slow_elements);
7411  __ Ret();
7412}
7413
7414#undef __
7415
7416} }  // namespace v8::internal
7417
7418#endif  // V8_TARGET_ARCH_ARM
7419