code-stubs-arm.cc revision e0cee9b3ed82e2391fd85d118aeaa4ea361c687d
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_ARM)
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
34#include "regexp-macro-assembler.h"
35
36namespace v8 {
37namespace internal {
38
39
40#define __ ACCESS_MASM(masm)
41
42static void EmitIdenticalObjectComparison(MacroAssembler* masm,
43                                          Label* slow,
44                                          Condition cond,
45                                          bool never_nan_nan);
46static void EmitSmiNonsmiComparison(MacroAssembler* masm,
47                                    Register lhs,
48                                    Register rhs,
49                                    Label* lhs_not_nan,
50                                    Label* slow,
51                                    bool strict);
52static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
53static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
54                                           Register lhs,
55                                           Register rhs);
56
57
58void ToNumberStub::Generate(MacroAssembler* masm) {
59  // The ToNumber stub takes one argument in eax.
60  Label check_heap_number, call_builtin;
61  __ tst(r0, Operand(kSmiTagMask));
62  __ b(ne, &check_heap_number);
63  __ Ret();
64
65  __ bind(&check_heap_number);
66  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
67  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
68  __ cmp(r1, ip);
69  __ b(ne, &call_builtin);
70  __ Ret();
71
72  __ bind(&call_builtin);
73  __ push(r0);
74  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_JS);
75}
76
77
78void FastNewClosureStub::Generate(MacroAssembler* masm) {
79  // Create a new closure from the given function info in new
80  // space. Set the context to the current context in cp.
81  Label gc;
82
83  // Pop the function info from the stack.
84  __ pop(r3);
85
86  // Attempt to allocate new JSFunction in new space.
87  __ AllocateInNewSpace(JSFunction::kSize,
88                        r0,
89                        r1,
90                        r2,
91                        &gc,
92                        TAG_OBJECT);
93
94  // Compute the function map in the current global context and set that
95  // as the map of the allocated object.
96  __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
97  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
98  __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
99  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
100
101  // Initialize the rest of the function. We don't have to update the
102  // write barrier because the allocated object is in new space.
103  __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
104  __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
105  __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
106  __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset));
107  __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset));
108  __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
109  __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
110  __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
111  __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
112  __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
113
114
115  // Initialize the code pointer in the function to be the one
116  // found in the shared function info object.
117  __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
118  __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
119  __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
120
121  // Return result. The argument function info has been popped already.
122  __ Ret();
123
124  // Create a new closure through the slower runtime call.
125  __ bind(&gc);
126  __ LoadRoot(r4, Heap::kFalseValueRootIndex);
127  __ Push(cp, r3, r4);
128  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
129}
130
131
132void FastNewContextStub::Generate(MacroAssembler* masm) {
133  // Try to allocate the context in new space.
134  Label gc;
135  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
136
137  // Attempt to allocate the context in new space.
138  __ AllocateInNewSpace(FixedArray::SizeFor(length),
139                        r0,
140                        r1,
141                        r2,
142                        &gc,
143                        TAG_OBJECT);
144
145  // Load the function from the stack.
146  __ ldr(r3, MemOperand(sp, 0));
147
148  // Setup the object header.
149  __ LoadRoot(r2, Heap::kContextMapRootIndex);
150  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
151  __ mov(r2, Operand(Smi::FromInt(length)));
152  __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
153
154  // Setup the fixed slots.
155  __ mov(r1, Operand(Smi::FromInt(0)));
156  __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
157  __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
158  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
159  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
160
161  // Copy the global object from the surrounding context.
162  __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
163  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
164
165  // Initialize the rest of the slots to undefined.
166  __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
167  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
168    __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
169  }
170
171  // Remove the on-stack argument and return.
172  __ mov(cp, r0);
173  __ pop();
174  __ Ret();
175
176  // Need to collect. Call into runtime system.
177  __ bind(&gc);
178  __ TailCallRuntime(Runtime::kNewContext, 1, 1);
179}
180
181
182void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
183  // Stack layout on entry:
184  //
185  // [sp]: constant elements.
186  // [sp + kPointerSize]: literal index.
187  // [sp + (2 * kPointerSize)]: literals array.
188
189  // All sizes here are multiples of kPointerSize.
190  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
191  int size = JSArray::kSize + elements_size;
192
193  // Load boilerplate object into r3 and check if we need to create a
194  // boilerplate.
195  Label slow_case;
196  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
197  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
198  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
199  __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
200  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
201  __ cmp(r3, ip);
202  __ b(eq, &slow_case);
203
204  if (FLAG_debug_code) {
205    const char* message;
206    Heap::RootListIndex expected_map_index;
207    if (mode_ == CLONE_ELEMENTS) {
208      message = "Expected (writable) fixed array";
209      expected_map_index = Heap::kFixedArrayMapRootIndex;
210    } else {
211      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
212      message = "Expected copy-on-write fixed array";
213      expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
214    }
215    __ push(r3);
216    __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
217    __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
218    __ LoadRoot(ip, expected_map_index);
219    __ cmp(r3, ip);
220    __ Assert(eq, message);
221    __ pop(r3);
222  }
223
224  // Allocate both the JS array and the elements array in one big
225  // allocation. This avoids multiple limit checks.
226  __ AllocateInNewSpace(size,
227                        r0,
228                        r1,
229                        r2,
230                        &slow_case,
231                        TAG_OBJECT);
232
233  // Copy the JS array part.
234  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
235    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
236      __ ldr(r1, FieldMemOperand(r3, i));
237      __ str(r1, FieldMemOperand(r0, i));
238    }
239  }
240
241  if (length_ > 0) {
242    // Get hold of the elements array of the boilerplate and setup the
243    // elements pointer in the resulting object.
244    __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
245    __ add(r2, r0, Operand(JSArray::kSize));
246    __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
247
248    // Copy the elements array.
249    __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
250  }
251
252  // Return and remove the on-stack parameters.
253  __ add(sp, sp, Operand(3 * kPointerSize));
254  __ Ret();
255
256  __ bind(&slow_case);
257  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
258}
259
260
261// Takes a Smi and converts to an IEEE 64 bit floating point value in two
262// registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
263// 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
264// scratch register.  Destroys the source register.  No GC occurs during this
265// stub so you don't have to set up the frame.
266class ConvertToDoubleStub : public CodeStub {
267 public:
268  ConvertToDoubleStub(Register result_reg_1,
269                      Register result_reg_2,
270                      Register source_reg,
271                      Register scratch_reg)
272      : result1_(result_reg_1),
273        result2_(result_reg_2),
274        source_(source_reg),
275        zeros_(scratch_reg) { }
276
277 private:
278  Register result1_;
279  Register result2_;
280  Register source_;
281  Register zeros_;
282
283  // Minor key encoding in 16 bits.
284  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
285  class OpBits: public BitField<Token::Value, 2, 14> {};
286
287  Major MajorKey() { return ConvertToDouble; }
288  int MinorKey() {
289    // Encode the parameters in a unique 16 bit value.
290    return  result1_.code() +
291           (result2_.code() << 4) +
292           (source_.code() << 8) +
293           (zeros_.code() << 12);
294  }
295
296  void Generate(MacroAssembler* masm);
297
298  const char* GetName() { return "ConvertToDoubleStub"; }
299
300#ifdef DEBUG
301  void Print() { PrintF("ConvertToDoubleStub\n"); }
302#endif
303};
304
305
306void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
307#ifndef BIG_ENDIAN_FLOATING_POINT
308  Register exponent = result1_;
309  Register mantissa = result2_;
310#else
311  Register exponent = result2_;
312  Register mantissa = result1_;
313#endif
314  Label not_special;
315  // Convert from Smi to integer.
316  __ mov(source_, Operand(source_, ASR, kSmiTagSize));
317  // Move sign bit from source to destination.  This works because the sign bit
318  // in the exponent word of the double has the same position and polarity as
319  // the 2's complement sign bit in a Smi.
320  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
321  __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
322  // Subtract from 0 if source was negative.
323  __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
324
325  // We have -1, 0 or 1, which we treat specially. Register source_ contains
326  // absolute value: it is either equal to 1 (special case of -1 and 1),
327  // greater than 1 (not a special case) or less than 1 (special case of 0).
328  __ cmp(source_, Operand(1));
329  __ b(gt, &not_special);
330
331  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
332  static const uint32_t exponent_word_for_1 =
333      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
334  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
335  // 1, 0 and -1 all have 0 for the second word.
336  __ mov(mantissa, Operand(0, RelocInfo::NONE));
337  __ Ret();
338
339  __ bind(&not_special);
340  // Count leading zeros.  Uses mantissa for a scratch register on pre-ARM5.
341  // Gets the wrong answer for 0, but we already checked for that case above.
342  __ CountLeadingZeros(zeros_, source_, mantissa);
343  // Compute exponent and or it into the exponent register.
344  // We use mantissa as a scratch register here.  Use a fudge factor to
345  // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
346  // that fit in the ARM's constant field.
347  int fudge = 0x400;
348  __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
349  __ add(mantissa, mantissa, Operand(fudge));
350  __ orr(exponent,
351         exponent,
352         Operand(mantissa, LSL, HeapNumber::kExponentShift));
353  // Shift up the source chopping the top bit off.
354  __ add(zeros_, zeros_, Operand(1));
355  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
356  __ mov(source_, Operand(source_, LSL, zeros_));
357  // Compute lower part of fraction (last 12 bits).
358  __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
359  // And the top (top 20 bits).
360  __ orr(exponent,
361         exponent,
362         Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
363  __ Ret();
364}
365
366
367class FloatingPointHelper : public AllStatic {
368 public:
369
370  enum Destination {
371    kVFPRegisters,
372    kCoreRegisters
373  };
374
375
376  // Loads smis from r0 and r1 (right and left in binary operations) into
377  // floating point registers. Depending on the destination the values ends up
378  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
379  // floating point registers VFP3 must be supported. If core registers are
380  // requested when VFP3 is supported d6 and d7 will be scratched.
381  static void LoadSmis(MacroAssembler* masm,
382                       Destination destination,
383                       Register scratch1,
384                       Register scratch2);
385
386  // Loads objects from r0 and r1 (right and left in binary operations) into
387  // floating point registers. Depending on the destination the values ends up
388  // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
389  // floating point registers VFP3 must be supported. If core registers are
390  // requested when VFP3 is supported d6 and d7 will still be scratched. If
391  // either r0 or r1 is not a number (not smi and not heap number object) the
392  // not_number label is jumped to with r0 and r1 intact.
393  static void LoadOperands(MacroAssembler* masm,
394                           FloatingPointHelper::Destination destination,
395                           Register heap_number_map,
396                           Register scratch1,
397                           Register scratch2,
398                           Label* not_number);
399
400  // Loads the number from object into dst as a 32-bit integer if possible. If
401  // the object cannot be converted to a 32-bit integer control continues at
402  // the label not_int32. If VFP is supported double_scratch is used
403  // but not scratch2.
404  // Floating point value in the 32-bit integer range will be rounded
405  // to an integer.
406  static void LoadNumberAsInteger(MacroAssembler* masm,
407                                  Register object,
408                                  Register dst,
409                                  Register heap_number_map,
410                                  Register scratch1,
411                                  Register scratch2,
412                                  DwVfpRegister double_scratch,
413                                  Label* not_int32);
414
415  // Load the number from object into double_dst in the double format.
416  // Control will jump to not_int32 if the value cannot be exactly represented
417  // by a 32-bit integer.
418  // Floating point value in the 32-bit integer range that are not exact integer
419  // won't be loaded.
420  static void LoadNumberAsInt32Double(MacroAssembler* masm,
421                                      Register object,
422                                      Destination destination,
423                                      DwVfpRegister double_dst,
424                                      Register dst1,
425                                      Register dst2,
426                                      Register heap_number_map,
427                                      Register scratch1,
428                                      Register scratch2,
429                                      SwVfpRegister single_scratch,
430                                      Label* not_int32);
431
432  // Loads the number from object into dst as a 32-bit integer.
433  // Control will jump to not_int32 if the object cannot be exactly represented
434  // by a 32-bit integer.
435  // Floating point value in the 32-bit integer range that are not exact integer
436  // won't be converted.
437  // scratch3 is not used when VFP3 is supported.
438  static void LoadNumberAsInt32(MacroAssembler* masm,
439                                Register object,
440                                Register dst,
441                                Register heap_number_map,
442                                Register scratch1,
443                                Register scratch2,
444                                Register scratch3,
445                                DwVfpRegister double_scratch,
446                                Label* not_int32);
447
448  // Generate non VFP3 code to check if a double can be exactly represented by a
449  // 32-bit integer. This does not check for 0 or -0, which need
450  // to be checked for separately.
451  // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
452  // through otherwise.
453  // src1 and src2 will be cloberred.
454  //
455  // Expected input:
456  // - src1: higher (exponent) part of the double value.
457  // - src2: lower (mantissa) part of the double value.
458  // Output status:
459  // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
460  // - src2: contains 1.
461  // - other registers are clobbered.
462  static void DoubleIs32BitInteger(MacroAssembler* masm,
463                                   Register src1,
464                                   Register src2,
465                                   Register dst,
466                                   Register scratch,
467                                   Label* not_int32);
468
469  // Generates code to call a C function to do a double operation using core
470  // registers. (Used when VFP3 is not supported.)
471  // This code never falls through, but returns with a heap number containing
472  // the result in r0.
473  // Register heapnumber_result must be a heap number in which the
474  // result of the operation will be stored.
475  // Requires the following layout on entry:
476  // r0: Left value (least significant part of mantissa).
477  // r1: Left value (sign, exponent, top of mantissa).
478  // r2: Right value (least significant part of mantissa).
479  // r3: Right value (sign, exponent, top of mantissa).
480  static void CallCCodeForDoubleOperation(MacroAssembler* masm,
481                                          Token::Value op,
482                                          Register heap_number_result,
483                                          Register scratch);
484
485 private:
486  static void LoadNumber(MacroAssembler* masm,
487                         FloatingPointHelper::Destination destination,
488                         Register object,
489                         DwVfpRegister dst,
490                         Register dst1,
491                         Register dst2,
492                         Register heap_number_map,
493                         Register scratch1,
494                         Register scratch2,
495                         Label* not_number);
496};
497
498
499void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
500                                   FloatingPointHelper::Destination destination,
501                                   Register scratch1,
502                                   Register scratch2) {
503  if (CpuFeatures::IsSupported(VFP3)) {
504    CpuFeatures::Scope scope(VFP3);
505    __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
506    __ vmov(d7.high(), scratch1);
507    __ vcvt_f64_s32(d7, d7.high());
508    __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
509    __ vmov(d6.high(), scratch1);
510    __ vcvt_f64_s32(d6, d6.high());
511    if (destination == kCoreRegisters) {
512      __ vmov(r2, r3, d7);
513      __ vmov(r0, r1, d6);
514    }
515  } else {
516    ASSERT(destination == kCoreRegisters);
517    // Write Smi from r0 to r3 and r2 in double format.
518    __ mov(scratch1, Operand(r0));
519    ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
520    __ push(lr);
521    __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
522    // Write Smi from r1 to r1 and r0 in double format.  r9 is scratch.
523    __ mov(scratch1, Operand(r1));
524    ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
525    __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
526    __ pop(lr);
527  }
528}
529
530
531void FloatingPointHelper::LoadOperands(
532    MacroAssembler* masm,
533    FloatingPointHelper::Destination destination,
534    Register heap_number_map,
535    Register scratch1,
536    Register scratch2,
537    Label* slow) {
538
539  // Load right operand (r0) to d6 or r2/r3.
540  LoadNumber(masm, destination,
541             r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
542
543  // Load left operand (r1) to d7 or r0/r1.
544  LoadNumber(masm, destination,
545             r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
546}
547
548
549void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
550                                     Destination destination,
551                                     Register object,
552                                     DwVfpRegister dst,
553                                     Register dst1,
554                                     Register dst2,
555                                     Register heap_number_map,
556                                     Register scratch1,
557                                     Register scratch2,
558                                     Label* not_number) {
559  if (FLAG_debug_code) {
560    __ AbortIfNotRootValue(heap_number_map,
561                           Heap::kHeapNumberMapRootIndex,
562                           "HeapNumberMap register clobbered.");
563  }
564
565  Label is_smi, done;
566
567  __ JumpIfSmi(object, &is_smi);
568  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
569
570  // Handle loading a double from a heap number.
571  if (CpuFeatures::IsSupported(VFP3) && destination == kVFPRegisters) {
572    CpuFeatures::Scope scope(VFP3);
573    // Load the double from tagged HeapNumber to double register.
574    __ sub(scratch1, object, Operand(kHeapObjectTag));
575    __ vldr(dst, scratch1, HeapNumber::kValueOffset);
576  } else {
577    ASSERT(destination == kCoreRegisters);
578    // Load the double from heap number to dst1 and dst2 in double format.
579    __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
580  }
581  __ jmp(&done);
582
583  // Handle loading a double from a smi.
584  __ bind(&is_smi);
585  if (CpuFeatures::IsSupported(VFP3)) {
586    CpuFeatures::Scope scope(VFP3);
587    // Convert smi to double using VFP instructions.
588    __ SmiUntag(scratch1, object);
589    __ vmov(dst.high(), scratch1);
590    __ vcvt_f64_s32(dst, dst.high());
591    if (destination == kCoreRegisters) {
592      // Load the converted smi to dst1 and dst2 in double format.
593      __ vmov(dst1, dst2, dst);
594    }
595  } else {
596    ASSERT(destination == kCoreRegisters);
597    // Write smi to dst1 and dst2 double format.
598    __ mov(scratch1, Operand(object));
599    ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
600    __ push(lr);
601    __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
602    __ pop(lr);
603  }
604
605  __ bind(&done);
606}
607
608
609void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
610                                              Register object,
611                                              Register dst,
612                                              Register heap_number_map,
613                                              Register scratch1,
614                                              Register scratch2,
615                                              DwVfpRegister double_scratch,
616                                              Label* not_int32) {
617  if (FLAG_debug_code) {
618    __ AbortIfNotRootValue(heap_number_map,
619                           Heap::kHeapNumberMapRootIndex,
620                           "HeapNumberMap register clobbered.");
621  }
622  Label is_smi, done;
623  __ JumpIfSmi(object, &is_smi);
624  __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
625  __ cmp(scratch1, heap_number_map);
626  __ b(ne, not_int32);
627  __ ConvertToInt32(
628      object, dst, scratch1, scratch2, double_scratch, not_int32);
629  __ jmp(&done);
630  __ bind(&is_smi);
631  __ SmiUntag(dst, object);
632  __ bind(&done);
633}
634
635
636void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
637                                                  Register object,
638                                                  Destination destination,
639                                                  DwVfpRegister double_dst,
640                                                  Register dst1,
641                                                  Register dst2,
642                                                  Register heap_number_map,
643                                                  Register scratch1,
644                                                  Register scratch2,
645                                                  SwVfpRegister single_scratch,
646                                                  Label* not_int32) {
647  ASSERT(!scratch1.is(object) && !scratch2.is(object));
648  ASSERT(!scratch1.is(scratch2));
649  ASSERT(!heap_number_map.is(object) &&
650         !heap_number_map.is(scratch1) &&
651         !heap_number_map.is(scratch2));
652
653  Label done, obj_is_not_smi;
654
655  __ JumpIfNotSmi(object, &obj_is_not_smi);
656  __ SmiUntag(scratch1, object);
657  if (CpuFeatures::IsSupported(VFP3)) {
658    CpuFeatures::Scope scope(VFP3);
659    __ vmov(single_scratch, scratch1);
660    __ vcvt_f64_s32(double_dst, single_scratch);
661    if (destination == kCoreRegisters) {
662      __ vmov(dst1, dst2, double_dst);
663    }
664  } else {
665    Label fewer_than_20_useful_bits;
666    // Expected output:
667    // |         dst1            |         dst2            |
668    // | s |   exp   |              mantissa               |
669
670    // Check for zero.
671    __ cmp(scratch1, Operand(0));
672    __ mov(dst1, scratch1);
673    __ mov(dst2, scratch1);
674    __ b(eq, &done);
675
676    // Preload the sign of the value.
677    __ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC);
678    // Get the absolute value of the object (as an unsigned integer).
679    __ rsb(scratch1, scratch1, Operand(0), SetCC, mi);
680
681    // Get mantisssa[51:20].
682
683    // Get the position of the first set bit.
684    __ CountLeadingZeros(dst2, scratch1, scratch2);
685    __ rsb(dst2, dst2, Operand(31));
686
687    // Set the exponent.
688    __ add(scratch2, dst2, Operand(HeapNumber::kExponentBias));
689    __ Bfi(dst1, scratch2, scratch2,
690        HeapNumber::kExponentShift, HeapNumber::kExponentBits);
691
692    // Clear the first non null bit.
693    __ mov(scratch2, Operand(1));
694    __ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2));
695
696    __ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
697    // Get the number of bits to set in the lower part of the mantissa.
698    __ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
699    __ b(mi, &fewer_than_20_useful_bits);
700    // Set the higher 20 bits of the mantissa.
701    __ orr(dst1, dst1, Operand(scratch1, LSR, scratch2));
702    __ rsb(scratch2, scratch2, Operand(32));
703    __ mov(dst2, Operand(scratch1, LSL, scratch2));
704    __ b(&done);
705
706    __ bind(&fewer_than_20_useful_bits);
707    __ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
708    __ mov(scratch2, Operand(scratch1, LSL, scratch2));
709    __ orr(dst1, dst1, scratch2);
710    // Set dst2 to 0.
711    __ mov(dst2, Operand(0));
712  }
713
714  __ b(&done);
715
716  __ bind(&obj_is_not_smi);
717  if (FLAG_debug_code) {
718    __ AbortIfNotRootValue(heap_number_map,
719                           Heap::kHeapNumberMapRootIndex,
720                           "HeapNumberMap register clobbered.");
721  }
722  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
723
724  // Load the number.
725  if (CpuFeatures::IsSupported(VFP3)) {
726    CpuFeatures::Scope scope(VFP3);
727    // Load the double value.
728    __ sub(scratch1, object, Operand(kHeapObjectTag));
729    __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
730
731    __ EmitVFPTruncate(kRoundToZero,
732                       single_scratch,
733                       double_dst,
734                       scratch1,
735                       scratch2,
736                       kCheckForInexactConversion);
737
738    // Jump to not_int32 if the operation did not succeed.
739    __ b(ne, not_int32);
740
741    if (destination == kCoreRegisters) {
742      __ vmov(dst1, dst2, double_dst);
743    }
744
745  } else {
746    ASSERT(!scratch1.is(object) && !scratch2.is(object));
747    // Load the double value in the destination registers..
748    __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
749
750    // Check for 0 and -0.
751    __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
752    __ orr(scratch1, scratch1, Operand(dst2));
753    __ cmp(scratch1, Operand(0));
754    __ b(eq, &done);
755
756    // Check that the value can be exactly represented by a 32-bit integer.
757    // Jump to not_int32 if that's not the case.
758    DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
759
760    // dst1 and dst2 were trashed. Reload the double value.
761    __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
762  }
763
764  __ bind(&done);
765}
766
767
768void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
769                                            Register object,
770                                            Register dst,
771                                            Register heap_number_map,
772                                            Register scratch1,
773                                            Register scratch2,
774                                            Register scratch3,
775                                            DwVfpRegister double_scratch,
776                                            Label* not_int32) {
777  ASSERT(!dst.is(object));
778  ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
779  ASSERT(!scratch1.is(scratch2) &&
780         !scratch1.is(scratch3) &&
781         !scratch2.is(scratch3));
782
783  Label done;
784
785  // Untag the object into the destination register.
786  __ SmiUntag(dst, object);
787  // Just return if the object is a smi.
788  __ JumpIfSmi(object, &done);
789
790  if (FLAG_debug_code) {
791    __ AbortIfNotRootValue(heap_number_map,
792                           Heap::kHeapNumberMapRootIndex,
793                           "HeapNumberMap register clobbered.");
794  }
795  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
796
797  // Object is a heap number.
798  // Convert the floating point value to a 32-bit integer.
799  if (CpuFeatures::IsSupported(VFP3)) {
800    CpuFeatures::Scope scope(VFP3);
801    SwVfpRegister single_scratch = double_scratch.low();
802    // Load the double value.
803    __ sub(scratch1, object, Operand(kHeapObjectTag));
804    __ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
805
806    __ EmitVFPTruncate(kRoundToZero,
807                       single_scratch,
808                       double_scratch,
809                       scratch1,
810                       scratch2,
811                       kCheckForInexactConversion);
812
813    // Jump to not_int32 if the operation did not succeed.
814    __ b(ne, not_int32);
815    // Get the result in the destination register.
816    __ vmov(dst, single_scratch);
817
818  } else {
819    // Load the double value in the destination registers.
820    __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
821    __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
822
823    // Check for 0 and -0.
824    __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
825    __ orr(dst, scratch2, Operand(dst));
826    __ cmp(dst, Operand(0));
827    __ b(eq, &done);
828
829    DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
830
831    // Registers state after DoubleIs32BitInteger.
832    // dst: mantissa[51:20].
833    // scratch2: 1
834
835    // Shift back the higher bits of the mantissa.
836    __ mov(dst, Operand(dst, LSR, scratch3));
837    // Set the implicit first bit.
838    __ rsb(scratch3, scratch3, Operand(32));
839    __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
840    // Set the sign.
841    __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
842    __ tst(scratch1, Operand(HeapNumber::kSignMask));
843    __ rsb(dst, dst, Operand(0), LeaveCC, mi);
844  }
845
846  __ bind(&done);
847}
848
849
850void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
851                                               Register src1,
852                                               Register src2,
853                                               Register dst,
854                                               Register scratch,
855                                               Label* not_int32) {
856  // Get exponent alone in scratch.
857  __ Ubfx(scratch,
858          src1,
859          HeapNumber::kExponentShift,
860          HeapNumber::kExponentBits);
861
862  // Substract the bias from the exponent.
863  __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
864
865  // src1: higher (exponent) part of the double value.
866  // src2: lower (mantissa) part of the double value.
867  // scratch: unbiased exponent.
868
869  // Fast cases. Check for obvious non 32-bit integer values.
870  // Negative exponent cannot yield 32-bit integers.
871  __ b(mi, not_int32);
872  // Exponent greater than 31 cannot yield 32-bit integers.
873  // Also, a positive value with an exponent equal to 31 is outside of the
874  // signed 32-bit integer range.
875  // Another way to put it is that if (exponent - signbit) > 30 then the
876  // number cannot be represented as an int32.
877  Register tmp = dst;
878  __ sub(tmp, scratch, Operand(src1, LSR, 31));
879  __ cmp(tmp, Operand(30));
880  __ b(gt, not_int32);
881  // - Bits [21:0] in the mantissa are not null.
882  __ tst(src2, Operand(0x3fffff));
883  __ b(ne, not_int32);
884
885  // Otherwise the exponent needs to be big enough to shift left all the
886  // non zero bits left. So we need the (30 - exponent) last bits of the
887  // 31 higher bits of the mantissa to be null.
888  // Because bits [21:0] are null, we can check instead that the
889  // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
890
891  // Get the 32 higher bits of the mantissa in dst.
892  __ Ubfx(dst,
893          src2,
894          HeapNumber::kMantissaBitsInTopWord,
895          32 - HeapNumber::kMantissaBitsInTopWord);
896  __ orr(dst,
897         dst,
898         Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
899
900  // Create the mask and test the lower bits (of the higher bits).
901  __ rsb(scratch, scratch, Operand(32));
902  __ mov(src2, Operand(1));
903  __ mov(src1, Operand(src2, LSL, scratch));
904  __ sub(src1, src1, Operand(1));
905  __ tst(dst, src1);
906  __ b(ne, not_int32);
907}
908
909
910void FloatingPointHelper::CallCCodeForDoubleOperation(
911    MacroAssembler* masm,
912    Token::Value op,
913    Register heap_number_result,
914    Register scratch) {
915  // Using core registers:
916  // r0: Left value (least significant part of mantissa).
917  // r1: Left value (sign, exponent, top of mantissa).
918  // r2: Right value (least significant part of mantissa).
919  // r3: Right value (sign, exponent, top of mantissa).
920
921  // Assert that heap_number_result is callee-saved.
922  // We currently always use r5 to pass it.
923  ASSERT(heap_number_result.is(r5));
924
925  // Push the current return address before the C call. Return will be
926  // through pop(pc) below.
927  __ push(lr);
928  __ PrepareCallCFunction(4, scratch);  // Two doubles are 4 arguments.
929  // Call C routine that may not cause GC or other trouble.
930  __ CallCFunction(ExternalReference::double_fp_operation(op), 4);
931  // Store answer in the overwritable heap number.
932#if !defined(USE_ARM_EABI)
933  // Double returned in fp coprocessor register 0 and 1, encoded as
934  // register cr8.  Offsets must be divisible by 4 for coprocessor so we
935  // need to substract the tag from heap_number_result.
936  __ sub(scratch, heap_number_result, Operand(kHeapObjectTag));
937  __ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset));
938#else
939  // Double returned in registers 0 and 1.
940  __ Strd(r0, r1, FieldMemOperand(heap_number_result,
941                                  HeapNumber::kValueOffset));
942#endif
943  // Place heap_number_result in r0 and return to the pushed return address.
944  __ mov(r0, Operand(heap_number_result));
945  __ pop(pc);
946}
947
948
949// See comment for class.
950void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
951  Label max_negative_int;
952  // the_int_ has the answer which is a signed int32 but not a Smi.
953  // We test for the special value that has a different exponent.  This test
954  // has the neat side effect of setting the flags according to the sign.
955  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
956  __ cmp(the_int_, Operand(0x80000000u));
957  __ b(eq, &max_negative_int);
958  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
959  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
960  uint32_t non_smi_exponent =
961      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
962  __ mov(scratch_, Operand(non_smi_exponent));
963  // Set the sign bit in scratch_ if the value was negative.
964  __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
965  // Subtract from 0 if the value was negative.
966  __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs);
967  // We should be masking the implict first digit of the mantissa away here,
968  // but it just ends up combining harmlessly with the last digit of the
969  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
970  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
971  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
972  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
973  __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
974  __ str(scratch_, FieldMemOperand(the_heap_number_,
975                                   HeapNumber::kExponentOffset));
976  __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
977  __ str(scratch_, FieldMemOperand(the_heap_number_,
978                                   HeapNumber::kMantissaOffset));
979  __ Ret();
980
981  __ bind(&max_negative_int);
982  // The max negative int32 is stored as a positive number in the mantissa of
983  // a double because it uses a sign bit instead of using two's complement.
984  // The actual mantissa bits stored are all 0 because the implicit most
985  // significant 1 bit is not stored.
986  non_smi_exponent += 1 << HeapNumber::kExponentShift;
987  __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
988  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
989  __ mov(ip, Operand(0, RelocInfo::NONE));
990  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
991  __ Ret();
992}
993
994
995// Handle the case where the lhs and rhs are the same object.
996// Equality is almost reflexive (everything but NaN), so this is a test
997// for "identity and not NaN".
998static void EmitIdenticalObjectComparison(MacroAssembler* masm,
999                                          Label* slow,
1000                                          Condition cond,
1001                                          bool never_nan_nan) {
1002  Label not_identical;
1003  Label heap_number, return_equal;
1004  __ cmp(r0, r1);
1005  __ b(ne, &not_identical);
1006
1007  // The two objects are identical.  If we know that one of them isn't NaN then
1008  // we now know they test equal.
1009  if (cond != eq || !never_nan_nan) {
1010    // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
1011    // so we do the second best thing - test it ourselves.
1012    // They are both equal and they are not both Smis so both of them are not
1013    // Smis.  If it's not a heap number, then return equal.
1014    if (cond == lt || cond == gt) {
1015      __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
1016      __ b(ge, slow);
1017    } else {
1018      __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
1019      __ b(eq, &heap_number);
1020      // Comparing JS objects with <=, >= is complicated.
1021      if (cond != eq) {
1022        __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
1023        __ b(ge, slow);
1024        // Normally here we fall through to return_equal, but undefined is
1025        // special: (undefined == undefined) == true, but
1026        // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
1027        if (cond == le || cond == ge) {
1028          __ cmp(r4, Operand(ODDBALL_TYPE));
1029          __ b(ne, &return_equal);
1030          __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
1031          __ cmp(r0, r2);
1032          __ b(ne, &return_equal);
1033          if (cond == le) {
1034            // undefined <= undefined should fail.
1035            __ mov(r0, Operand(GREATER));
1036          } else  {
1037            // undefined >= undefined should fail.
1038            __ mov(r0, Operand(LESS));
1039          }
1040          __ Ret();
1041        }
1042      }
1043    }
1044  }
1045
1046  __ bind(&return_equal);
1047  if (cond == lt) {
1048    __ mov(r0, Operand(GREATER));  // Things aren't less than themselves.
1049  } else if (cond == gt) {
1050    __ mov(r0, Operand(LESS));     // Things aren't greater than themselves.
1051  } else {
1052    __ mov(r0, Operand(EQUAL));    // Things are <=, >=, ==, === themselves.
1053  }
1054  __ Ret();
1055
1056  if (cond != eq || !never_nan_nan) {
1057    // For less and greater we don't have to check for NaN since the result of
1058    // x < x is false regardless.  For the others here is some code to check
1059    // for NaN.
1060    if (cond != lt && cond != gt) {
1061      __ bind(&heap_number);
1062      // It is a heap number, so return non-equal if it's NaN and equal if it's
1063      // not NaN.
1064
1065      // The representation of NaN values has all exponent bits (52..62) set,
1066      // and not all mantissa bits (0..51) clear.
1067      // Read top bits of double representation (second word of value).
1068      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
1069      // Test that exponent bits are all set.
1070      __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
1071      // NaNs have all-one exponents so they sign extend to -1.
1072      __ cmp(r3, Operand(-1));
1073      __ b(ne, &return_equal);
1074
1075      // Shift out flag and all exponent bits, retaining only mantissa.
1076      __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
1077      // Or with all low-bits of mantissa.
1078      __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
1079      __ orr(r0, r3, Operand(r2), SetCC);
1080      // For equal we already have the right value in r0:  Return zero (equal)
1081      // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1082      // not (it's a NaN).  For <= and >= we need to load r0 with the failing
1083      // value if it's a NaN.
1084      if (cond != eq) {
1085        // All-zero means Infinity means equal.
1086        __ Ret(eq);
1087        if (cond == le) {
1088          __ mov(r0, Operand(GREATER));  // NaN <= NaN should fail.
1089        } else {
1090          __ mov(r0, Operand(LESS));     // NaN >= NaN should fail.
1091        }
1092      }
1093      __ Ret();
1094    }
1095    // No fall through here.
1096  }
1097
1098  __ bind(&not_identical);
1099}
1100
1101
1102// See comment at call site.
1103static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1104                                    Register lhs,
1105                                    Register rhs,
1106                                    Label* lhs_not_nan,
1107                                    Label* slow,
1108                                    bool strict) {
1109  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1110         (lhs.is(r1) && rhs.is(r0)));
1111
1112  Label rhs_is_smi;
1113  __ tst(rhs, Operand(kSmiTagMask));
1114  __ b(eq, &rhs_is_smi);
1115
1116  // Lhs is a Smi.  Check whether the rhs is a heap number.
1117  __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
1118  if (strict) {
1119    // If rhs is not a number and lhs is a Smi then strict equality cannot
1120    // succeed.  Return non-equal
1121    // If rhs is r0 then there is already a non zero value in it.
1122    if (!rhs.is(r0)) {
1123      __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
1124    }
1125    __ Ret(ne);
1126  } else {
1127    // Smi compared non-strictly with a non-Smi non-heap-number.  Call
1128    // the runtime.
1129    __ b(ne, slow);
1130  }
1131
1132  // Lhs is a smi, rhs is a number.
1133  if (CpuFeatures::IsSupported(VFP3)) {
1134    // Convert lhs to a double in d7.
1135    CpuFeatures::Scope scope(VFP3);
1136    __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
1137    // Load the double from rhs, tagged HeapNumber r0, to d6.
1138    __ sub(r7, rhs, Operand(kHeapObjectTag));
1139    __ vldr(d6, r7, HeapNumber::kValueOffset);
1140  } else {
1141    __ push(lr);
1142    // Convert lhs to a double in r2, r3.
1143    __ mov(r7, Operand(lhs));
1144    ConvertToDoubleStub stub1(r3, r2, r7, r6);
1145    __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
1146    // Load rhs to a double in r0, r1.
1147    __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1148    __ pop(lr);
1149  }
1150
1151  // We now have both loaded as doubles but we can skip the lhs nan check
1152  // since it's a smi.
1153  __ jmp(lhs_not_nan);
1154
1155  __ bind(&rhs_is_smi);
1156  // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
1157  __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
1158  if (strict) {
1159    // If lhs is not a number and rhs is a smi then strict equality cannot
1160    // succeed.  Return non-equal.
1161    // If lhs is r0 then there is already a non zero value in it.
1162    if (!lhs.is(r0)) {
1163      __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
1164    }
1165    __ Ret(ne);
1166  } else {
1167    // Smi compared non-strictly with a non-smi non-heap-number.  Call
1168    // the runtime.
1169    __ b(ne, slow);
1170  }
1171
1172  // Rhs is a smi, lhs is a heap number.
1173  if (CpuFeatures::IsSupported(VFP3)) {
1174    CpuFeatures::Scope scope(VFP3);
1175    // Load the double from lhs, tagged HeapNumber r1, to d7.
1176    __ sub(r7, lhs, Operand(kHeapObjectTag));
1177    __ vldr(d7, r7, HeapNumber::kValueOffset);
1178    // Convert rhs to a double in d6              .
1179    __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
1180  } else {
1181    __ push(lr);
1182    // Load lhs to a double in r2, r3.
1183    __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1184    // Convert rhs to a double in r0, r1.
1185    __ mov(r7, Operand(rhs));
1186    ConvertToDoubleStub stub2(r1, r0, r7, r6);
1187    __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
1188    __ pop(lr);
1189  }
1190  // Fall through to both_loaded_as_doubles.
1191}
1192
1193
1194void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
1195  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1196  Register rhs_exponent = exp_first ? r0 : r1;
1197  Register lhs_exponent = exp_first ? r2 : r3;
1198  Register rhs_mantissa = exp_first ? r1 : r0;
1199  Register lhs_mantissa = exp_first ? r3 : r2;
1200  Label one_is_nan, neither_is_nan;
1201
1202  __ Sbfx(r4,
1203          lhs_exponent,
1204          HeapNumber::kExponentShift,
1205          HeapNumber::kExponentBits);
1206  // NaNs have all-one exponents so they sign extend to -1.
1207  __ cmp(r4, Operand(-1));
1208  __ b(ne, lhs_not_nan);
1209  __ mov(r4,
1210         Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1211         SetCC);
1212  __ b(ne, &one_is_nan);
1213  __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE));
1214  __ b(ne, &one_is_nan);
1215
1216  __ bind(lhs_not_nan);
1217  __ Sbfx(r4,
1218          rhs_exponent,
1219          HeapNumber::kExponentShift,
1220          HeapNumber::kExponentBits);
1221  // NaNs have all-one exponents so they sign extend to -1.
1222  __ cmp(r4, Operand(-1));
1223  __ b(ne, &neither_is_nan);
1224  __ mov(r4,
1225         Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
1226         SetCC);
1227  __ b(ne, &one_is_nan);
1228  __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE));
1229  __ b(eq, &neither_is_nan);
1230
1231  __ bind(&one_is_nan);
1232  // NaN comparisons always fail.
1233  // Load whatever we need in r0 to make the comparison fail.
1234  if (cond == lt || cond == le) {
1235    __ mov(r0, Operand(GREATER));
1236  } else {
1237    __ mov(r0, Operand(LESS));
1238  }
1239  __ Ret();
1240
1241  __ bind(&neither_is_nan);
1242}
1243
1244
1245// See comment at call site.
1246static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
1247                                          Condition cond) {
1248  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1249  Register rhs_exponent = exp_first ? r0 : r1;
1250  Register lhs_exponent = exp_first ? r2 : r3;
1251  Register rhs_mantissa = exp_first ? r1 : r0;
1252  Register lhs_mantissa = exp_first ? r3 : r2;
1253
1254  // r0, r1, r2, r3 have the two doubles.  Neither is a NaN.
1255  if (cond == eq) {
1256    // Doubles are not equal unless they have the same bit pattern.
1257    // Exception: 0 and -0.
1258    __ cmp(rhs_mantissa, Operand(lhs_mantissa));
1259    __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
1260    // Return non-zero if the numbers are unequal.
1261    __ Ret(ne);
1262
1263    __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
1264    // If exponents are equal then return 0.
1265    __ Ret(eq);
1266
1267    // Exponents are unequal.  The only way we can return that the numbers
1268    // are equal is if one is -0 and the other is 0.  We already dealt
1269    // with the case where both are -0 or both are 0.
1270    // We start by seeing if the mantissas (that are equal) or the bottom
1271    // 31 bits of the rhs exponent are non-zero.  If so we return not
1272    // equal.
1273    __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
1274    __ mov(r0, Operand(r4), LeaveCC, ne);
1275    __ Ret(ne);
1276    // Now they are equal if and only if the lhs exponent is zero in its
1277    // low 31 bits.
1278    __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
1279    __ Ret();
1280  } else {
1281    // Call a native function to do a comparison between two non-NaNs.
1282    // Call C routine that may not cause GC or other trouble.
1283    __ push(lr);
1284    __ PrepareCallCFunction(4, r5);  // Two doubles count as 4 arguments.
1285    __ CallCFunction(ExternalReference::compare_doubles(), 4);
1286    __ pop(pc);  // Return.
1287  }
1288}
1289
1290
1291// See comment at call site.
1292static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1293                                           Register lhs,
1294                                           Register rhs) {
1295    ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1296           (lhs.is(r1) && rhs.is(r0)));
1297
1298    // If either operand is a JSObject or an oddball value, then they are
1299    // not equal since their pointers are different.
1300    // There is no test for undetectability in strict equality.
1301    STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1302    Label first_non_object;
1303    // Get the type of the first operand into r2 and compare it with
1304    // FIRST_JS_OBJECT_TYPE.
1305    __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE);
1306    __ b(lt, &first_non_object);
1307
1308    // Return non-zero (r0 is not zero)
1309    Label return_not_equal;
1310    __ bind(&return_not_equal);
1311    __ Ret();
1312
1313    __ bind(&first_non_object);
1314    // Check for oddballs: true, false, null, undefined.
1315    __ cmp(r2, Operand(ODDBALL_TYPE));
1316    __ b(eq, &return_not_equal);
1317
1318    __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE);
1319    __ b(ge, &return_not_equal);
1320
1321    // Check for oddballs: true, false, null, undefined.
1322    __ cmp(r3, Operand(ODDBALL_TYPE));
1323    __ b(eq, &return_not_equal);
1324
1325    // Now that we have the types we might as well check for symbol-symbol.
1326    // Ensure that no non-strings have the symbol bit set.
1327    STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
1328    STATIC_ASSERT(kSymbolTag != 0);
1329    __ and_(r2, r2, Operand(r3));
1330    __ tst(r2, Operand(kIsSymbolMask));
1331    __ b(ne, &return_not_equal);
1332}
1333
1334
1335// See comment at call site.
1336static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1337                                       Register lhs,
1338                                       Register rhs,
1339                                       Label* both_loaded_as_doubles,
1340                                       Label* not_heap_numbers,
1341                                       Label* slow) {
1342  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1343         (lhs.is(r1) && rhs.is(r0)));
1344
1345  __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
1346  __ b(ne, not_heap_numbers);
1347  __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
1348  __ cmp(r2, r3);
1349  __ b(ne, slow);  // First was a heap number, second wasn't.  Go slow case.
1350
1351  // Both are heap numbers.  Load them up then jump to the code we have
1352  // for that.
1353  if (CpuFeatures::IsSupported(VFP3)) {
1354    CpuFeatures::Scope scope(VFP3);
1355    __ sub(r7, rhs, Operand(kHeapObjectTag));
1356    __ vldr(d6, r7, HeapNumber::kValueOffset);
1357    __ sub(r7, lhs, Operand(kHeapObjectTag));
1358    __ vldr(d7, r7, HeapNumber::kValueOffset);
1359  } else {
1360    __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1361    __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1362  }
1363  __ jmp(both_loaded_as_doubles);
1364}
1365
1366
1367// Fast negative check for symbol-to-symbol equality.
1368static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1369                                         Register lhs,
1370                                         Register rhs,
1371                                         Label* possible_strings,
1372                                         Label* not_both_strings) {
1373  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
1374         (lhs.is(r1) && rhs.is(r0)));
1375
1376  // r2 is object type of rhs.
1377  // Ensure that no non-strings have the symbol bit set.
1378  Label object_test;
1379  STATIC_ASSERT(kSymbolTag != 0);
1380  __ tst(r2, Operand(kIsNotStringMask));
1381  __ b(ne, &object_test);
1382  __ tst(r2, Operand(kIsSymbolMask));
1383  __ b(eq, possible_strings);
1384  __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
1385  __ b(ge, not_both_strings);
1386  __ tst(r3, Operand(kIsSymbolMask));
1387  __ b(eq, possible_strings);
1388
1389  // Both are symbols.  We already checked they weren't the same pointer
1390  // so they are not equal.
1391  __ mov(r0, Operand(NOT_EQUAL));
1392  __ Ret();
1393
1394  __ bind(&object_test);
1395  __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
1396  __ b(lt, not_both_strings);
1397  __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE);
1398  __ b(lt, not_both_strings);
1399  // If both objects are undetectable, they are equal. Otherwise, they
1400  // are not equal, since they are different objects and an object is not
1401  // equal to undefined.
1402  __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
1403  __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
1404  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
1405  __ and_(r0, r2, Operand(r3));
1406  __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
1407  __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
1408  __ Ret();
1409}
1410
1411
1412void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1413                                                         Register object,
1414                                                         Register result,
1415                                                         Register scratch1,
1416                                                         Register scratch2,
1417                                                         Register scratch3,
1418                                                         bool object_is_smi,
1419                                                         Label* not_found) {
1420  // Use of registers. Register result is used as a temporary.
1421  Register number_string_cache = result;
1422  Register mask = scratch3;
1423
1424  // Load the number string cache.
1425  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1426
1427  // Make the hash mask from the length of the number string cache. It
1428  // contains two elements (number and string) for each cache entry.
1429  __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1430  // Divide length by two (length is a smi).
1431  __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
1432  __ sub(mask, mask, Operand(1));  // Make mask.
1433
1434  // Calculate the entry in the number string cache. The hash value in the
1435  // number string cache for smis is just the smi value, and the hash for
1436  // doubles is the xor of the upper and lower words. See
1437  // Heap::GetNumberStringCache.
1438  Label is_smi;
1439  Label load_result_from_cache;
1440  if (!object_is_smi) {
1441    __ JumpIfSmi(object, &is_smi);
1442    if (CpuFeatures::IsSupported(VFP3)) {
1443      CpuFeatures::Scope scope(VFP3);
1444      __ CheckMap(object,
1445                  scratch1,
1446                  Heap::kHeapNumberMapRootIndex,
1447                  not_found,
1448                  true);
1449
1450      STATIC_ASSERT(8 == kDoubleSize);
1451      __ add(scratch1,
1452             object,
1453             Operand(HeapNumber::kValueOffset - kHeapObjectTag));
1454      __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
1455      __ eor(scratch1, scratch1, Operand(scratch2));
1456      __ and_(scratch1, scratch1, Operand(mask));
1457
1458      // Calculate address of entry in string cache: each entry consists
1459      // of two pointer sized fields.
1460      __ add(scratch1,
1461             number_string_cache,
1462             Operand(scratch1, LSL, kPointerSizeLog2 + 1));
1463
1464      Register probe = mask;
1465      __ ldr(probe,
1466             FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1467      __ JumpIfSmi(probe, not_found);
1468      __ sub(scratch2, object, Operand(kHeapObjectTag));
1469      __ vldr(d0, scratch2, HeapNumber::kValueOffset);
1470      __ sub(probe, probe, Operand(kHeapObjectTag));
1471      __ vldr(d1, probe, HeapNumber::kValueOffset);
1472      __ VFPCompareAndSetFlags(d0, d1);
1473      __ b(ne, not_found);  // The cache did not contain this value.
1474      __ b(&load_result_from_cache);
1475    } else {
1476      __ b(not_found);
1477    }
1478  }
1479
1480  __ bind(&is_smi);
1481  Register scratch = scratch1;
1482  __ and_(scratch, mask, Operand(object, ASR, 1));
1483  // Calculate address of entry in string cache: each entry consists
1484  // of two pointer sized fields.
1485  __ add(scratch,
1486         number_string_cache,
1487         Operand(scratch, LSL, kPointerSizeLog2 + 1));
1488
1489  // Check if the entry is the smi we are looking for.
1490  Register probe = mask;
1491  __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1492  __ cmp(object, probe);
1493  __ b(ne, not_found);
1494
1495  // Get the result from the cache.
1496  __ bind(&load_result_from_cache);
1497  __ ldr(result,
1498         FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1499  __ IncrementCounter(&Counters::number_to_string_native,
1500                      1,
1501                      scratch1,
1502                      scratch2);
1503}
1504
1505
1506void NumberToStringStub::Generate(MacroAssembler* masm) {
1507  Label runtime;
1508
1509  __ ldr(r1, MemOperand(sp, 0));
1510
1511  // Generate code to lookup number in the number string cache.
1512  GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
1513  __ add(sp, sp, Operand(1 * kPointerSize));
1514  __ Ret();
1515
1516  __ bind(&runtime);
1517  // Handle number to string in the runtime system if not found in the cache.
1518  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
1519}
1520
1521
1522// On entry lhs_ and rhs_ are the values to be compared.
1523// On exit r0 is 0, positive or negative to indicate the result of
1524// the comparison.
1525void CompareStub::Generate(MacroAssembler* masm) {
1526  ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
1527         (lhs_.is(r1) && rhs_.is(r0)));
1528
1529  Label slow;  // Call builtin.
1530  Label not_smis, both_loaded_as_doubles, lhs_not_nan;
1531
1532  if (include_smi_compare_) {
1533    Label not_two_smis, smi_done;
1534    __ orr(r2, r1, r0);
1535    __ tst(r2, Operand(kSmiTagMask));
1536    __ b(ne, &not_two_smis);
1537    __ mov(r1, Operand(r1, ASR, 1));
1538    __ sub(r0, r1, Operand(r0, ASR, 1));
1539    __ Ret();
1540    __ bind(&not_two_smis);
1541  } else if (FLAG_debug_code) {
1542    __ orr(r2, r1, r0);
1543    __ tst(r2, Operand(kSmiTagMask));
1544    __ Assert(ne, "CompareStub: unexpected smi operands.");
1545  }
1546
1547  // NOTICE! This code is only reached after a smi-fast-case check, so
1548  // it is certain that at least one operand isn't a smi.
1549
1550  // Handle the case where the objects are identical.  Either returns the answer
1551  // or goes to slow.  Only falls through if the objects were not identical.
1552  EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1553
1554  // If either is a Smi (we know that not both are), then they can only
1555  // be strictly equal if the other is a HeapNumber.
1556  STATIC_ASSERT(kSmiTag == 0);
1557  ASSERT_EQ(0, Smi::FromInt(0));
1558  __ and_(r2, lhs_, Operand(rhs_));
1559  __ tst(r2, Operand(kSmiTagMask));
1560  __ b(ne, &not_smis);
1561  // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
1562  // 1) Return the answer.
1563  // 2) Go to slow.
1564  // 3) Fall through to both_loaded_as_doubles.
1565  // 4) Jump to lhs_not_nan.
1566  // In cases 3 and 4 we have found out we were dealing with a number-number
1567  // comparison.  If VFP3 is supported the double values of the numbers have
1568  // been loaded into d7 and d6.  Otherwise, the double values have been loaded
1569  // into r0, r1, r2, and r3.
1570  EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
1571
1572  __ bind(&both_loaded_as_doubles);
1573  // The arguments have been converted to doubles and stored in d6 and d7, if
1574  // VFP3 is supported, or in r0, r1, r2, and r3.
1575  if (CpuFeatures::IsSupported(VFP3)) {
1576    __ bind(&lhs_not_nan);
1577    CpuFeatures::Scope scope(VFP3);
1578    Label no_nan;
1579    // ARMv7 VFP3 instructions to implement double precision comparison.
1580    __ VFPCompareAndSetFlags(d7, d6);
1581    Label nan;
1582    __ b(vs, &nan);
1583    __ mov(r0, Operand(EQUAL), LeaveCC, eq);
1584    __ mov(r0, Operand(LESS), LeaveCC, lt);
1585    __ mov(r0, Operand(GREATER), LeaveCC, gt);
1586    __ Ret();
1587
1588    __ bind(&nan);
1589    // If one of the sides was a NaN then the v flag is set.  Load r0 with
1590    // whatever it takes to make the comparison fail, since comparisons with NaN
1591    // always fail.
1592    if (cc_ == lt || cc_ == le) {
1593      __ mov(r0, Operand(GREATER));
1594    } else {
1595      __ mov(r0, Operand(LESS));
1596    }
1597    __ Ret();
1598  } else {
1599    // Checks for NaN in the doubles we have loaded.  Can return the answer or
1600    // fall through if neither is a NaN.  Also binds lhs_not_nan.
1601    EmitNanCheck(masm, &lhs_not_nan, cc_);
1602    // Compares two doubles in r0, r1, r2, r3 that are not NaNs.  Returns the
1603    // answer.  Never falls through.
1604    EmitTwoNonNanDoubleComparison(masm, cc_);
1605  }
1606
1607  __ bind(&not_smis);
1608  // At this point we know we are dealing with two different objects,
1609  // and neither of them is a Smi.  The objects are in rhs_ and lhs_.
1610  if (strict_) {
1611    // This returns non-equal for some object types, or falls through if it
1612    // was not lucky.
1613    EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1614  }
1615
1616  Label check_for_symbols;
1617  Label flat_string_check;
1618  // Check for heap-number-heap-number comparison.  Can jump to slow case,
1619  // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
1620  // that case.  If the inputs are not doubles then jumps to check_for_symbols.
1621  // In this case r2 will contain the type of rhs_.  Never falls through.
1622  EmitCheckForTwoHeapNumbers(masm,
1623                             lhs_,
1624                             rhs_,
1625                             &both_loaded_as_doubles,
1626                             &check_for_symbols,
1627                             &flat_string_check);
1628
1629  __ bind(&check_for_symbols);
1630  // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
1631  // symbols.
1632  if (cc_ == eq && !strict_) {
1633    // Returns an answer for two symbols or two detectable objects.
1634    // Otherwise jumps to string case or not both strings case.
1635    // Assumes that r2 is the type of rhs_ on entry.
1636    EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1637  }
1638
1639  // Check for both being sequential ASCII strings, and inline if that is the
1640  // case.
1641  __ bind(&flat_string_check);
1642
1643  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
1644
1645  __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
1646  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1647                                                     lhs_,
1648                                                     rhs_,
1649                                                     r2,
1650                                                     r3,
1651                                                     r4,
1652                                                     r5);
1653  // Never falls through to here.
1654
1655  __ bind(&slow);
1656
1657  __ Push(lhs_, rhs_);
1658  // Figure out which native to call and setup the arguments.
1659  Builtins::JavaScript native;
1660  if (cc_ == eq) {
1661    native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1662  } else {
1663    native = Builtins::COMPARE;
1664    int ncr;  // NaN compare result
1665    if (cc_ == lt || cc_ == le) {
1666      ncr = GREATER;
1667    } else {
1668      ASSERT(cc_ == gt || cc_ == ge);  // remaining cases
1669      ncr = LESS;
1670    }
1671    __ mov(r0, Operand(Smi::FromInt(ncr)));
1672    __ push(r0);
1673  }
1674
1675  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1676  // tagged as a small integer.
1677  __ InvokeBuiltin(native, JUMP_JS);
1678}
1679
1680
1681// This stub does not handle the inlined cases (Smis, Booleans, undefined).
1682// The stub returns zero for false, and a non-zero value for true.
1683void ToBooleanStub::Generate(MacroAssembler* masm) {
1684  // This stub uses VFP3 instructions.
1685  ASSERT(CpuFeatures::IsEnabled(VFP3));
1686
1687  Label false_result;
1688  Label not_heap_number;
1689  Register scratch = r9.is(tos_) ? r7 : r9;
1690
1691  __ LoadRoot(ip, Heap::kNullValueRootIndex);
1692  __ cmp(tos_, ip);
1693  __ b(eq, &false_result);
1694
1695  // HeapNumber => false iff +0, -0, or NaN.
1696  __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
1697  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
1698  __ cmp(scratch, ip);
1699  __ b(&not_heap_number, ne);
1700
1701  __ sub(ip, tos_, Operand(kHeapObjectTag));
1702  __ vldr(d1, ip, HeapNumber::kValueOffset);
1703  __ VFPCompareAndSetFlags(d1, 0.0);
1704  // "tos_" is a register, and contains a non zero value by default.
1705  // Hence we only need to overwrite "tos_" with zero to return false for
1706  // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1707  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);  // for FP_ZERO
1708  __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs);  // for FP_NAN
1709  __ Ret();
1710
1711  __ bind(&not_heap_number);
1712
1713  // Check if the value is 'null'.
1714  // 'null' => false.
1715  __ LoadRoot(ip, Heap::kNullValueRootIndex);
1716  __ cmp(tos_, ip);
1717  __ b(&false_result, eq);
1718
1719  // It can be an undetectable object.
1720  // Undetectable => false.
1721  __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset));
1722  __ ldrb(scratch, FieldMemOperand(ip, Map::kBitFieldOffset));
1723  __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
1724  __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
1725  __ b(&false_result, eq);
1726
1727  // JavaScript object => true.
1728  __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
1729  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1730  __ cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
1731  // "tos_" is a register and contains a non-zero value.
1732  // Hence we implicitly return true if the greater than
1733  // condition is satisfied.
1734  __ Ret(gt);
1735
1736  // Check for string
1737  __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
1738  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1739  __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
1740  // "tos_" is a register and contains a non-zero value.
1741  // Hence we implicitly return true if the greater than
1742  // condition is satisfied.
1743  __ Ret(gt);
1744
1745  // String value => false iff empty, i.e., length is zero
1746  __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1747  // If length is zero, "tos_" contains zero ==> false.
1748  // If length is not zero, "tos_" contains a non-zero value ==> true.
1749  __ Ret();
1750
1751  // Return 0 in "tos_" for false .
1752  __ bind(&false_result);
1753  __ mov(tos_, Operand(0, RelocInfo::NONE));
1754  __ Ret();
1755}
1756
1757
1758// We fall into this code if the operands were Smis, but the result was
1759// not (eg. overflow).  We branch into this code (to the not_smi label) if
1760// the operands were not both Smi.  The operands are in r0 and r1.  In order
1761// to call the C-implemented binary fp operation routines we need to end up
1762// with the double precision floating point operands in r0 and r1 (for the
1763// value in r1) and r2 and r3 (for the value in r0).
1764void GenericBinaryOpStub::HandleBinaryOpSlowCases(
1765    MacroAssembler* masm,
1766    Label* not_smi,
1767    Register lhs,
1768    Register rhs,
1769    const Builtins::JavaScript& builtin) {
1770  Label slow, slow_reverse, do_the_call;
1771  bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_;
1772
1773  ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)));
1774  Register heap_number_map = r6;
1775
1776  if (ShouldGenerateSmiCode()) {
1777    __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1778
1779    // Smi-smi case (overflow).
1780    // Since both are Smis there is no heap number to overwrite, so allocate.
1781    // The new heap number is in r5.  r3 and r7 are scratch.
1782    __ AllocateHeapNumber(
1783        r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow);
1784
1785    // If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
1786    // using registers d7 and d6 for the double values.
1787    if (CpuFeatures::IsSupported(VFP3)) {
1788      CpuFeatures::Scope scope(VFP3);
1789      __ mov(r7, Operand(rhs, ASR, kSmiTagSize));
1790      __ vmov(s15, r7);
1791      __ vcvt_f64_s32(d7, s15);
1792      __ mov(r7, Operand(lhs, ASR, kSmiTagSize));
1793      __ vmov(s13, r7);
1794      __ vcvt_f64_s32(d6, s13);
1795      if (!use_fp_registers) {
1796        __ vmov(r2, r3, d7);
1797        __ vmov(r0, r1, d6);
1798      }
1799    } else {
1800      // Write Smi from rhs to r3 and r2 in double format.  r9 is scratch.
1801      __ mov(r7, Operand(rhs));
1802      ConvertToDoubleStub stub1(r3, r2, r7, r9);
1803      __ push(lr);
1804      __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
1805      // Write Smi from lhs to r1 and r0 in double format.  r9 is scratch.
1806      __ mov(r7, Operand(lhs));
1807      ConvertToDoubleStub stub2(r1, r0, r7, r9);
1808      __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
1809      __ pop(lr);
1810    }
1811    __ jmp(&do_the_call);  // Tail call.  No return.
1812  }
1813
1814  // We branch here if at least one of r0 and r1 is not a Smi.
1815  __ bind(not_smi);
1816  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1817
1818  // After this point we have the left hand side in r1 and the right hand side
1819  // in r0.
1820  if (lhs.is(r0)) {
1821    __ Swap(r0, r1, ip);
1822  }
1823
1824  // The type transition also calculates the answer.
1825  bool generate_code_to_calculate_answer = true;
1826
1827  if (ShouldGenerateFPCode()) {
1828    // DIV has neither SmiSmi fast code nor specialized slow code.
1829    // So don't try to patch a DIV Stub.
1830    if (runtime_operands_type_ == BinaryOpIC::DEFAULT) {
1831      switch (op_) {
1832        case Token::ADD:
1833        case Token::SUB:
1834        case Token::MUL:
1835          GenerateTypeTransition(masm);  // Tail call.
1836          generate_code_to_calculate_answer = false;
1837          break;
1838
1839        case Token::DIV:
1840          // DIV has neither SmiSmi fast code nor specialized slow code.
1841          // So don't try to patch a DIV Stub.
1842          break;
1843
1844        default:
1845          break;
1846      }
1847    }
1848
1849    if (generate_code_to_calculate_answer) {
1850      Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1;
1851      if (mode_ == NO_OVERWRITE) {
1852        // In the case where there is no chance of an overwritable float we may
1853        // as well do the allocation immediately while r0 and r1 are untouched.
1854        __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow);
1855      }
1856
1857      // Move r0 to a double in r2-r3.
1858      __ tst(r0, Operand(kSmiTagMask));
1859      __ b(eq, &r0_is_smi);  // It's a Smi so don't check it's a heap number.
1860      __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
1861      __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1862      __ cmp(r4, heap_number_map);
1863      __ b(ne, &slow);
1864      if (mode_ == OVERWRITE_RIGHT) {
1865        __ mov(r5, Operand(r0));  // Overwrite this heap number.
1866      }
1867      if (use_fp_registers) {
1868        CpuFeatures::Scope scope(VFP3);
1869        // Load the double from tagged HeapNumber r0 to d7.
1870        __ sub(r7, r0, Operand(kHeapObjectTag));
1871        __ vldr(d7, r7, HeapNumber::kValueOffset);
1872      } else {
1873        // Calling convention says that second double is in r2 and r3.
1874        __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset));
1875      }
1876      __ jmp(&finished_loading_r0);
1877      __ bind(&r0_is_smi);
1878      if (mode_ == OVERWRITE_RIGHT) {
1879        // We can't overwrite a Smi so get address of new heap number into r5.
1880      __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
1881      }
1882
1883      if (CpuFeatures::IsSupported(VFP3)) {
1884        CpuFeatures::Scope scope(VFP3);
1885        // Convert smi in r0 to double in d7.
1886        __ mov(r7, Operand(r0, ASR, kSmiTagSize));
1887        __ vmov(s15, r7);
1888        __ vcvt_f64_s32(d7, s15);
1889        if (!use_fp_registers) {
1890          __ vmov(r2, r3, d7);
1891        }
1892      } else {
1893        // Write Smi from r0 to r3 and r2 in double format.
1894        __ mov(r7, Operand(r0));
1895        ConvertToDoubleStub stub3(r3, r2, r7, r4);
1896        __ push(lr);
1897        __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET);
1898        __ pop(lr);
1899      }
1900
1901      // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis.
1902      // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC.
1903      Label r1_is_not_smi;
1904      if ((runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) &&
1905          HasSmiSmiFastPath()) {
1906        __ tst(r1, Operand(kSmiTagMask));
1907        __ b(ne, &r1_is_not_smi);
1908        GenerateTypeTransition(masm);  // Tail call.
1909      }
1910
1911      __ bind(&finished_loading_r0);
1912
1913      // Move r1 to a double in r0-r1.
1914      __ tst(r1, Operand(kSmiTagMask));
1915      __ b(eq, &r1_is_smi);  // It's a Smi so don't check it's a heap number.
1916      __ bind(&r1_is_not_smi);
1917      __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset));
1918      __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1919      __ cmp(r4, heap_number_map);
1920      __ b(ne, &slow);
1921      if (mode_ == OVERWRITE_LEFT) {
1922        __ mov(r5, Operand(r1));  // Overwrite this heap number.
1923      }
1924      if (use_fp_registers) {
1925        CpuFeatures::Scope scope(VFP3);
1926        // Load the double from tagged HeapNumber r1 to d6.
1927        __ sub(r7, r1, Operand(kHeapObjectTag));
1928        __ vldr(d6, r7, HeapNumber::kValueOffset);
1929      } else {
1930        // Calling convention says that first double is in r0 and r1.
1931        __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset));
1932      }
1933      __ jmp(&finished_loading_r1);
1934      __ bind(&r1_is_smi);
1935      if (mode_ == OVERWRITE_LEFT) {
1936        // We can't overwrite a Smi so get address of new heap number into r5.
1937      __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
1938      }
1939
1940      if (CpuFeatures::IsSupported(VFP3)) {
1941        CpuFeatures::Scope scope(VFP3);
1942        // Convert smi in r1 to double in d6.
1943        __ mov(r7, Operand(r1, ASR, kSmiTagSize));
1944        __ vmov(s13, r7);
1945        __ vcvt_f64_s32(d6, s13);
1946        if (!use_fp_registers) {
1947          __ vmov(r0, r1, d6);
1948        }
1949      } else {
1950        // Write Smi from r1 to r1 and r0 in double format.
1951        __ mov(r7, Operand(r1));
1952        ConvertToDoubleStub stub4(r1, r0, r7, r9);
1953        __ push(lr);
1954        __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET);
1955        __ pop(lr);
1956      }
1957
1958      __ bind(&finished_loading_r1);
1959    }
1960
1961    if (generate_code_to_calculate_answer || do_the_call.is_linked()) {
1962      __ bind(&do_the_call);
1963      // If we are inlining the operation using VFP3 instructions for
1964      // add, subtract, multiply, or divide, the arguments are in d6 and d7.
1965      if (use_fp_registers) {
1966        CpuFeatures::Scope scope(VFP3);
1967        // ARMv7 VFP3 instructions to implement
1968        // double precision, add, subtract, multiply, divide.
1969
1970        if (Token::MUL == op_) {
1971          __ vmul(d5, d6, d7);
1972        } else if (Token::DIV == op_) {
1973          __ vdiv(d5, d6, d7);
1974        } else if (Token::ADD == op_) {
1975          __ vadd(d5, d6, d7);
1976        } else if (Token::SUB == op_) {
1977          __ vsub(d5, d6, d7);
1978        } else {
1979          UNREACHABLE();
1980        }
1981        __ sub(r0, r5, Operand(kHeapObjectTag));
1982        __ vstr(d5, r0, HeapNumber::kValueOffset);
1983        __ add(r0, r0, Operand(kHeapObjectTag));
1984        __ Ret();
1985      } else {
1986        // If we did not inline the operation, then the arguments are in:
1987        // r0: Left value (least significant part of mantissa).
1988        // r1: Left value (sign, exponent, top of mantissa).
1989        // r2: Right value (least significant part of mantissa).
1990        // r3: Right value (sign, exponent, top of mantissa).
1991        // r5: Address of heap number for result.
1992
1993        __ push(lr);   // For later.
1994        __ PrepareCallCFunction(4, r4);  // Two doubles count as 4 arguments.
1995        // Call C routine that may not cause GC or other trouble. r5 is callee
1996        // save.
1997        __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
1998        // Store answer in the overwritable heap number.
1999    #if !defined(USE_ARM_EABI)
2000        // Double returned in fp coprocessor register 0 and 1, encoded as
2001        // register cr8.  Offsets must be divisible by 4 for coprocessor so we
2002        // need to substract the tag from r5.
2003        __ sub(r4, r5, Operand(kHeapObjectTag));
2004        __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset));
2005    #else
2006        // Double returned in registers 0 and 1.
2007        __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset));
2008    #endif
2009        __ mov(r0, Operand(r5));
2010        // And we are done.
2011        __ pop(pc);
2012      }
2013    }
2014  }
2015
2016  if (!generate_code_to_calculate_answer &&
2017      !slow_reverse.is_linked() &&
2018      !slow.is_linked()) {
2019    return;
2020  }
2021
2022  if (lhs.is(r0)) {
2023    __ b(&slow);
2024    __ bind(&slow_reverse);
2025    __ Swap(r0, r1, ip);
2026  }
2027
2028  heap_number_map = no_reg;  // Don't use this any more from here on.
2029
2030  // We jump to here if something goes wrong (one param is not a number of any
2031  // sort or new-space allocation fails).
2032  __ bind(&slow);
2033
2034  // Push arguments to the stack
2035  __ Push(r1, r0);
2036
2037  if (Token::ADD == op_) {
2038    // Test for string arguments before calling runtime.
2039    // r1 : first argument
2040    // r0 : second argument
2041    // sp[0] : second argument
2042    // sp[4] : first argument
2043
2044    Label not_strings, not_string1, string1, string1_smi2;
2045    __ tst(r1, Operand(kSmiTagMask));
2046    __ b(eq, &not_string1);
2047    __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE);
2048    __ b(ge, &not_string1);
2049
2050    // First argument is a a string, test second.
2051    __ tst(r0, Operand(kSmiTagMask));
2052    __ b(eq, &string1_smi2);
2053    __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
2054    __ b(ge, &string1);
2055
2056    // First and second argument are strings.
2057    StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2058    __ TailCallStub(&string_add_stub);
2059
2060    __ bind(&string1_smi2);
2061    // First argument is a string, second is a smi. Try to lookup the number
2062    // string for the smi in the number string cache.
2063    NumberToStringStub::GenerateLookupNumberStringCache(
2064        masm, r0, r2, r4, r5, r6, true, &string1);
2065
2066    // Replace second argument on stack and tailcall string add stub to make
2067    // the result.
2068    __ str(r2, MemOperand(sp, 0));
2069    __ TailCallStub(&string_add_stub);
2070
2071    // Only first argument is a string.
2072    __ bind(&string1);
2073    __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS);
2074
2075    // First argument was not a string, test second.
2076    __ bind(&not_string1);
2077    __ tst(r0, Operand(kSmiTagMask));
2078    __ b(eq, &not_strings);
2079    __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
2080    __ b(ge, &not_strings);
2081
2082    // Only second argument is a string.
2083    __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
2084
2085    __ bind(&not_strings);
2086  }
2087
2088  __ InvokeBuiltin(builtin, JUMP_JS);  // Tail call.  No return.
2089}
2090
2091
2092// For bitwise ops where the inputs are not both Smis we here try to determine
2093// whether both inputs are either Smis or at least heap numbers that can be
2094// represented by a 32 bit signed value.  We truncate towards zero as required
2095// by the ES spec.  If this is the case we do the bitwise op and see if the
2096// result is a Smi.  If so, great, otherwise we try to find a heap number to
2097// write the answer into (either by allocating or by overwriting).
2098// On entry the operands are in lhs and rhs.  On exit the answer is in r0.
2099void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
2100                                                Register lhs,
2101                                                Register rhs) {
2102  Label slow, result_not_a_smi;
2103  Label rhs_is_smi, lhs_is_smi;
2104  Label done_checking_rhs, done_checking_lhs;
2105
2106  Register heap_number_map = r6;
2107  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2108
2109  __ tst(lhs, Operand(kSmiTagMask));
2110  __ b(eq, &lhs_is_smi);  // It's a Smi so don't check it's a heap number.
2111  __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
2112  __ cmp(r4, heap_number_map);
2113  __ b(ne, &slow);
2114  __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow);
2115  __ jmp(&done_checking_lhs);
2116  __ bind(&lhs_is_smi);
2117  __ mov(r3, Operand(lhs, ASR, 1));
2118  __ bind(&done_checking_lhs);
2119
2120  __ tst(rhs, Operand(kSmiTagMask));
2121  __ b(eq, &rhs_is_smi);  // It's a Smi so don't check it's a heap number.
2122  __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
2123  __ cmp(r4, heap_number_map);
2124  __ b(ne, &slow);
2125  __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow);
2126  __ jmp(&done_checking_rhs);
2127  __ bind(&rhs_is_smi);
2128  __ mov(r2, Operand(rhs, ASR, 1));
2129  __ bind(&done_checking_rhs);
2130
2131  ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
2132
2133  // r0 and r1: Original operands (Smi or heap numbers).
2134  // r2 and r3: Signed int32 operands.
2135  switch (op_) {
2136    case Token::BIT_OR:  __ orr(r2, r2, Operand(r3)); break;
2137    case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break;
2138    case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break;
2139    case Token::SAR:
2140      // Use only the 5 least significant bits of the shift count.
2141      __ and_(r2, r2, Operand(0x1f));
2142      __ mov(r2, Operand(r3, ASR, r2));
2143      break;
2144    case Token::SHR:
2145      // Use only the 5 least significant bits of the shift count.
2146      __ and_(r2, r2, Operand(0x1f));
2147      __ mov(r2, Operand(r3, LSR, r2), SetCC);
2148      // SHR is special because it is required to produce a positive answer.
2149      // The code below for writing into heap numbers isn't capable of writing
2150      // the register as an unsigned int so we go to slow case if we hit this
2151      // case.
2152      if (CpuFeatures::IsSupported(VFP3)) {
2153        __ b(mi, &result_not_a_smi);
2154      } else {
2155        __ b(mi, &slow);
2156      }
2157      break;
2158    case Token::SHL:
2159      // Use only the 5 least significant bits of the shift count.
2160      __ and_(r2, r2, Operand(0x1f));
2161      __ mov(r2, Operand(r3, LSL, r2));
2162      break;
2163    default: UNREACHABLE();
2164  }
2165  // check that the *signed* result fits in a smi
2166  __ add(r3, r2, Operand(0x40000000), SetCC);
2167  __ b(mi, &result_not_a_smi);
2168  __ mov(r0, Operand(r2, LSL, kSmiTagSize));
2169  __ Ret();
2170
2171  Label have_to_allocate, got_a_heap_number;
2172  __ bind(&result_not_a_smi);
2173  switch (mode_) {
2174    case OVERWRITE_RIGHT: {
2175      __ tst(rhs, Operand(kSmiTagMask));
2176      __ b(eq, &have_to_allocate);
2177      __ mov(r5, Operand(rhs));
2178      break;
2179    }
2180    case OVERWRITE_LEFT: {
2181      __ tst(lhs, Operand(kSmiTagMask));
2182      __ b(eq, &have_to_allocate);
2183      __ mov(r5, Operand(lhs));
2184      break;
2185    }
2186    case NO_OVERWRITE: {
2187      // Get a new heap number in r5.  r4 and r7 are scratch.
2188      __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
2189    }
2190    default: break;
2191  }
2192  __ bind(&got_a_heap_number);
2193  // r2: Answer as signed int32.
2194  // r5: Heap number to write answer into.
2195
2196  // Nothing can go wrong now, so move the heap number to r0, which is the
2197  // result.
2198  __ mov(r0, Operand(r5));
2199
2200  if (CpuFeatures::IsSupported(VFP3)) {
2201    // Convert the int32 in r2 to the heap number in r0. r3 is corrupted.
2202    CpuFeatures::Scope scope(VFP3);
2203    __ vmov(s0, r2);
2204    if (op_ == Token::SHR) {
2205      __ vcvt_f64_u32(d0, s0);
2206    } else {
2207      __ vcvt_f64_s32(d0, s0);
2208    }
2209    __ sub(r3, r0, Operand(kHeapObjectTag));
2210    __ vstr(d0, r3, HeapNumber::kValueOffset);
2211    __ Ret();
2212  } else {
2213    // Tail call that writes the int32 in r2 to the heap number in r0, using
2214    // r3 as scratch.  r0 is preserved and returned.
2215    WriteInt32ToHeapNumberStub stub(r2, r0, r3);
2216    __ TailCallStub(&stub);
2217  }
2218
2219  if (mode_ != NO_OVERWRITE) {
2220    __ bind(&have_to_allocate);
2221    // Get a new heap number in r5.  r4 and r7 are scratch.
2222    __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow);
2223    __ jmp(&got_a_heap_number);
2224  }
2225
2226  // If all else failed then we go to the runtime system.
2227  __ bind(&slow);
2228  __ Push(lhs, rhs);  // Restore stack.
2229  switch (op_) {
2230    case Token::BIT_OR:
2231      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
2232      break;
2233    case Token::BIT_AND:
2234      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
2235      break;
2236    case Token::BIT_XOR:
2237      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
2238      break;
2239    case Token::SAR:
2240      __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
2241      break;
2242    case Token::SHR:
2243      __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
2244      break;
2245    case Token::SHL:
2246      __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
2247      break;
2248    default:
2249      UNREACHABLE();
2250  }
2251}
2252
2253
2254
2255
2256// This function takes the known int in a register for the cases
2257// where it doesn't know a good trick, and may deliver
2258// a result that needs shifting.
2259static void MultiplyByKnownIntInStub(
2260    MacroAssembler* masm,
2261    Register result,
2262    Register source,
2263    Register known_int_register,   // Smi tagged.
2264    int known_int,
2265    int* required_shift) {  // Including Smi tag shift
2266  switch (known_int) {
2267    case 3:
2268      __ add(result, source, Operand(source, LSL, 1));
2269      *required_shift = 1;
2270      break;
2271    case 5:
2272      __ add(result, source, Operand(source, LSL, 2));
2273      *required_shift = 1;
2274      break;
2275    case 6:
2276      __ add(result, source, Operand(source, LSL, 1));
2277      *required_shift = 2;
2278      break;
2279    case 7:
2280      __ rsb(result, source, Operand(source, LSL, 3));
2281      *required_shift = 1;
2282      break;
2283    case 9:
2284      __ add(result, source, Operand(source, LSL, 3));
2285      *required_shift = 1;
2286      break;
2287    case 10:
2288      __ add(result, source, Operand(source, LSL, 2));
2289      *required_shift = 2;
2290      break;
2291    default:
2292      ASSERT(!IsPowerOf2(known_int));  // That would be very inefficient.
2293      __ mul(result, source, known_int_register);
2294      *required_shift = 0;
2295  }
2296}
2297
2298
2299// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3
2300// trick.  See http://en.wikipedia.org/wiki/Divisibility_rule
2301// Takes the sum of the digits base (mask + 1) repeatedly until we have a
2302// number from 0 to mask.  On exit the 'eq' condition flags are set if the
2303// answer is exactly the mask.
2304void IntegerModStub::DigitSum(MacroAssembler* masm,
2305                              Register lhs,
2306                              int mask,
2307                              int shift,
2308                              Label* entry) {
2309  ASSERT(mask > 0);
2310  ASSERT(mask <= 0xff);  // This ensures we don't need ip to use it.
2311  Label loop;
2312  __ bind(&loop);
2313  __ and_(ip, lhs, Operand(mask));
2314  __ add(lhs, ip, Operand(lhs, LSR, shift));
2315  __ bind(entry);
2316  __ cmp(lhs, Operand(mask));
2317  __ b(gt, &loop);
2318}
2319
2320
2321void IntegerModStub::DigitSum(MacroAssembler* masm,
2322                              Register lhs,
2323                              Register scratch,
2324                              int mask,
2325                              int shift1,
2326                              int shift2,
2327                              Label* entry) {
2328  ASSERT(mask > 0);
2329  ASSERT(mask <= 0xff);  // This ensures we don't need ip to use it.
2330  Label loop;
2331  __ bind(&loop);
2332  __ bic(scratch, lhs, Operand(mask));
2333  __ and_(ip, lhs, Operand(mask));
2334  __ add(lhs, ip, Operand(lhs, LSR, shift1));
2335  __ add(lhs, lhs, Operand(scratch, LSR, shift2));
2336  __ bind(entry);
2337  __ cmp(lhs, Operand(mask));
2338  __ b(gt, &loop);
2339}
2340
2341
2342// Splits the number into two halves (bottom half has shift bits).  The top
2343// half is subtracted from the bottom half.  If the result is negative then
2344// rhs is added.
2345void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm,
2346                                                Register lhs,
2347                                                int shift,
2348                                                int rhs) {
2349  int mask = (1 << shift) - 1;
2350  __ and_(ip, lhs, Operand(mask));
2351  __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC);
2352  __ add(lhs, lhs, Operand(rhs), LeaveCC, mi);
2353}
2354
2355
2356void IntegerModStub::ModReduce(MacroAssembler* masm,
2357                               Register lhs,
2358                               int max,
2359                               int denominator) {
2360  int limit = denominator;
2361  while (limit * 2 <= max) limit *= 2;
2362  while (limit >= denominator) {
2363    __ cmp(lhs, Operand(limit));
2364    __ sub(lhs, lhs, Operand(limit), LeaveCC, ge);
2365    limit >>= 1;
2366  }
2367}
2368
2369
2370void IntegerModStub::ModAnswer(MacroAssembler* masm,
2371                               Register result,
2372                               Register shift_distance,
2373                               Register mask_bits,
2374                               Register sum_of_digits) {
2375  __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance));
2376  __ Ret();
2377}
2378
2379
2380// See comment for class.
2381void IntegerModStub::Generate(MacroAssembler* masm) {
2382  __ mov(lhs_, Operand(lhs_, LSR, shift_distance_));
2383  __ bic(odd_number_, odd_number_, Operand(1));
2384  __ mov(odd_number_, Operand(odd_number_, LSL, 1));
2385  // We now have (odd_number_ - 1) * 2 in the register.
2386  // Build a switch out of branches instead of data because it avoids
2387  // having to teach the assembler about intra-code-object pointers
2388  // that are not in relative branch instructions.
2389  Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19;
2390  Label mod21, mod23, mod25;
2391  { Assembler::BlockConstPoolScope block_const_pool(masm);
2392    __ add(pc, pc, Operand(odd_number_));
2393    // When you read pc it is always 8 ahead, but when you write it you always
2394    // write the actual value.  So we put in two nops to take up the slack.
2395    __ nop();
2396    __ nop();
2397    __ b(&mod3);
2398    __ b(&mod5);
2399    __ b(&mod7);
2400    __ b(&mod9);
2401    __ b(&mod11);
2402    __ b(&mod13);
2403    __ b(&mod15);
2404    __ b(&mod17);
2405    __ b(&mod19);
2406    __ b(&mod21);
2407    __ b(&mod23);
2408    __ b(&mod25);
2409  }
2410
2411  // For each denominator we find a multiple that is almost only ones
2412  // when expressed in binary.  Then we do the sum-of-digits trick for
2413  // that number.  If the multiple is not 1 then we have to do a little
2414  // more work afterwards to get the answer into the 0-denominator-1
2415  // range.
2416  DigitSum(masm, lhs_, 3, 2, &mod3);  // 3 = b11.
2417  __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq);
2418  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2419
2420  DigitSum(masm, lhs_, 0xf, 4, &mod5);  // 5 * 3 = b1111.
2421  ModGetInRangeBySubtraction(masm, lhs_, 2, 5);
2422  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2423
2424  DigitSum(masm, lhs_, 7, 3, &mod7);  // 7 = b111.
2425  __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq);
2426  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2427
2428  DigitSum(masm, lhs_, 0x3f, 6, &mod9);  // 7 * 9 = b111111.
2429  ModGetInRangeBySubtraction(masm, lhs_, 3, 9);
2430  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2431
2432  DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11);  // 5 * 11 = b110111.
2433  ModReduce(masm, lhs_, 0x3f, 11);
2434  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2435
2436  DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13);  // 19 * 13 = b11110111.
2437  ModReduce(masm, lhs_, 0xff, 13);
2438  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2439
2440  DigitSum(masm, lhs_, 0xf, 4, &mod15);  // 15 = b1111.
2441  __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq);
2442  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2443
2444  DigitSum(masm, lhs_, 0xff, 8, &mod17);  // 15 * 17 = b11111111.
2445  ModGetInRangeBySubtraction(masm, lhs_, 4, 17);
2446  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2447
2448  DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19);  // 13 * 19 = b11110111.
2449  ModReduce(masm, lhs_, 0xff, 19);
2450  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2451
2452  DigitSum(masm, lhs_, 0x3f, 6, &mod21);  // 3 * 21 = b111111.
2453  ModReduce(masm, lhs_, 0x3f, 21);
2454  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2455
2456  DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23);  // 11 * 23 = b11111101.
2457  ModReduce(masm, lhs_, 0xff, 23);
2458  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2459
2460  DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25);  // 5 * 25 = b1111101.
2461  ModReduce(masm, lhs_, 0x7f, 25);
2462  ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_);
2463}
2464
2465
2466void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
2467  // lhs_ : x
2468  // rhs_ : y
2469  // r0   : result
2470
2471  Register result = r0;
2472  Register lhs = lhs_;
2473  Register rhs = rhs_;
2474
2475  // This code can't cope with other register allocations yet.
2476  ASSERT(result.is(r0) &&
2477         ((lhs.is(r0) && rhs.is(r1)) ||
2478          (lhs.is(r1) && rhs.is(r0))));
2479
2480  Register smi_test_reg = r7;
2481  Register scratch = r9;
2482
2483  // All ops need to know whether we are dealing with two Smis.  Set up
2484  // smi_test_reg to tell us that.
2485  if (ShouldGenerateSmiCode()) {
2486    __ orr(smi_test_reg, lhs, Operand(rhs));
2487  }
2488
2489  switch (op_) {
2490    case Token::ADD: {
2491      Label not_smi;
2492      // Fast path.
2493      if (ShouldGenerateSmiCode()) {
2494        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
2495        __ tst(smi_test_reg, Operand(kSmiTagMask));
2496        __ b(ne, &not_smi);
2497        __ add(r0, r1, Operand(r0), SetCC);  // Add y optimistically.
2498        // Return if no overflow.
2499        __ Ret(vc);
2500        __ sub(r0, r0, Operand(r1));  // Revert optimistic add.
2501      }
2502      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::ADD);
2503      break;
2504    }
2505
2506    case Token::SUB: {
2507      Label not_smi;
2508      // Fast path.
2509      if (ShouldGenerateSmiCode()) {
2510        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
2511        __ tst(smi_test_reg, Operand(kSmiTagMask));
2512        __ b(ne, &not_smi);
2513        if (lhs.is(r1)) {
2514          __ sub(r0, r1, Operand(r0), SetCC);  // Subtract y optimistically.
2515          // Return if no overflow.
2516          __ Ret(vc);
2517          __ sub(r0, r1, Operand(r0));  // Revert optimistic subtract.
2518        } else {
2519          __ sub(r0, r0, Operand(r1), SetCC);  // Subtract y optimistically.
2520          // Return if no overflow.
2521          __ Ret(vc);
2522          __ add(r0, r0, Operand(r1));  // Revert optimistic subtract.
2523        }
2524      }
2525      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::SUB);
2526      break;
2527    }
2528
2529    case Token::MUL: {
2530      Label not_smi, slow;
2531      if (ShouldGenerateSmiCode()) {
2532        STATIC_ASSERT(kSmiTag == 0);  // adjust code below
2533        __ tst(smi_test_reg, Operand(kSmiTagMask));
2534        Register scratch2 = smi_test_reg;
2535        smi_test_reg = no_reg;
2536        __ b(ne, &not_smi);
2537        // Remove tag from one operand (but keep sign), so that result is Smi.
2538        __ mov(ip, Operand(rhs, ASR, kSmiTagSize));
2539        // Do multiplication
2540        // scratch = lower 32 bits of ip * lhs.
2541        __ smull(scratch, scratch2, lhs, ip);
2542        // Go slow on overflows (overflow bit is not set).
2543        __ mov(ip, Operand(scratch, ASR, 31));
2544        // No overflow if higher 33 bits are identical.
2545        __ cmp(ip, Operand(scratch2));
2546        __ b(ne, &slow);
2547        // Go slow on zero result to handle -0.
2548        __ tst(scratch, Operand(scratch));
2549        __ mov(result, Operand(scratch), LeaveCC, ne);
2550        __ Ret(ne);
2551        // We need -0 if we were multiplying a negative number with 0 to get 0.
2552        // We know one of them was zero.
2553        __ add(scratch2, rhs, Operand(lhs), SetCC);
2554        __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl);
2555        __ Ret(pl);  // Return Smi 0 if the non-zero one was positive.
2556        // Slow case.  We fall through here if we multiplied a negative number
2557        // with 0, because that would mean we should produce -0.
2558        __ bind(&slow);
2559      }
2560      HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
2561      break;
2562    }
2563
2564    case Token::DIV:
2565    case Token::MOD: {
2566      Label not_smi;
2567      if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
2568        Label lhs_is_unsuitable;
2569        __ JumpIfNotSmi(lhs, &not_smi);
2570        if (IsPowerOf2(constant_rhs_)) {
2571          if (op_ == Token::MOD) {
2572            __ and_(rhs,
2573                    lhs,
2574                    Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
2575                    SetCC);
2576            // We now have the answer, but if the input was negative we also
2577            // have the sign bit.  Our work is done if the result is
2578            // positive or zero:
2579            if (!rhs.is(r0)) {
2580              __ mov(r0, rhs, LeaveCC, pl);
2581            }
2582            __ Ret(pl);
2583            // A mod of a negative left hand side must return a negative number.
2584            // Unfortunately if the answer is 0 then we must return -0.  And we
2585            // already optimistically trashed rhs so we may need to restore it.
2586            __ eor(rhs, rhs, Operand(0x80000000u), SetCC);
2587            // Next two instructions are conditional on the answer being -0.
2588            __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq);
2589            __ b(eq, &lhs_is_unsuitable);
2590            // We need to subtract the dividend.  Eg. -3 % 4 == -3.
2591            __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_)));
2592          } else {
2593            ASSERT(op_ == Token::DIV);
2594            __ tst(lhs,
2595                   Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)));
2596            __ b(ne, &lhs_is_unsuitable);  // Go slow on negative or remainder.
2597            int shift = 0;
2598            int d = constant_rhs_;
2599            while ((d & 1) == 0) {
2600              d >>= 1;
2601              shift++;
2602            }
2603            __ mov(r0, Operand(lhs, LSR, shift));
2604            __ bic(r0, r0, Operand(kSmiTagMask));
2605          }
2606        } else {
2607          // Not a power of 2.
2608          __ tst(lhs, Operand(0x80000000u));
2609          __ b(ne, &lhs_is_unsuitable);
2610          // Find a fixed point reciprocal of the divisor so we can divide by
2611          // multiplying.
2612          double divisor = 1.0 / constant_rhs_;
2613          int shift = 32;
2614          double scale = 4294967296.0;  // 1 << 32.
2615          uint32_t mul;
2616          // Maximise the precision of the fixed point reciprocal.
2617          while (true) {
2618            mul = static_cast<uint32_t>(scale * divisor);
2619            if (mul >= 0x7fffffff) break;
2620            scale *= 2.0;
2621            shift++;
2622          }
2623          mul++;
2624          Register scratch2 = smi_test_reg;
2625          smi_test_reg = no_reg;
2626          __ mov(scratch2, Operand(mul));
2627          __ umull(scratch, scratch2, scratch2, lhs);
2628          __ mov(scratch2, Operand(scratch2, LSR, shift - 31));
2629          // scratch2 is lhs / rhs.  scratch2 is not Smi tagged.
2630          // rhs is still the known rhs.  rhs is Smi tagged.
2631          // lhs is still the unkown lhs.  lhs is Smi tagged.
2632          int required_scratch_shift = 0;  // Including the Smi tag shift of 1.
2633          // scratch = scratch2 * rhs.
2634          MultiplyByKnownIntInStub(masm,
2635                                   scratch,
2636                                   scratch2,
2637                                   rhs,
2638                                   constant_rhs_,
2639                                   &required_scratch_shift);
2640          // scratch << required_scratch_shift is now the Smi tagged rhs *
2641          // (lhs / rhs) where / indicates integer division.
2642          if (op_ == Token::DIV) {
2643            __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift));
2644            __ b(ne, &lhs_is_unsuitable);  // There was a remainder.
2645            __ mov(result, Operand(scratch2, LSL, kSmiTagSize));
2646          } else {
2647            ASSERT(op_ == Token::MOD);
2648            __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift));
2649          }
2650        }
2651        __ Ret();
2652        __ bind(&lhs_is_unsuitable);
2653      } else if (op_ == Token::MOD &&
2654                 runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
2655                 runtime_operands_type_ != BinaryOpIC::STRINGS) {
2656        // Do generate a bit of smi code for modulus even though the default for
2657        // modulus is not to do it, but as the ARM processor has no coprocessor
2658        // support for modulus checking for smis makes sense.  We can handle
2659        // 1 to 25 times any power of 2.  This covers over half the numbers from
2660        // 1 to 100 including all of the first 25.  (Actually the constants < 10
2661        // are handled above by reciprocal multiplication.  We only get here for
2662        // those cases if the right hand side is not a constant or for cases
2663        // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod
2664        // stub.)
2665        Label slow;
2666        Label not_power_of_2;
2667        ASSERT(!ShouldGenerateSmiCode());
2668        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
2669        // Check for two positive smis.
2670        __ orr(smi_test_reg, lhs, Operand(rhs));
2671        __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
2672        __ b(ne, &slow);
2673        // Check that rhs is a power of two and not zero.
2674        Register mask_bits = r3;
2675        __ sub(scratch, rhs, Operand(1), SetCC);
2676        __ b(mi, &slow);
2677        __ and_(mask_bits, rhs, Operand(scratch), SetCC);
2678        __ b(ne, &not_power_of_2);
2679        // Calculate power of two modulus.
2680        __ and_(result, lhs, Operand(scratch));
2681        __ Ret();
2682
2683        __ bind(&not_power_of_2);
2684        __ eor(scratch, scratch, Operand(mask_bits));
2685        // At least two bits are set in the modulus.  The high one(s) are in
2686        // mask_bits and the low one is scratch + 1.
2687        __ and_(mask_bits, scratch, Operand(lhs));
2688        Register shift_distance = scratch;
2689        scratch = no_reg;
2690
2691        // The rhs consists of a power of 2 multiplied by some odd number.
2692        // The power-of-2 part we handle by putting the corresponding bits
2693        // from the lhs in the mask_bits register, and the power in the
2694        // shift_distance register.  Shift distance is never 0 due to Smi
2695        // tagging.
2696        __ CountLeadingZeros(r4, shift_distance, shift_distance);
2697        __ rsb(shift_distance, r4, Operand(32));
2698
2699        // Now we need to find out what the odd number is. The last bit is
2700        // always 1.
2701        Register odd_number = r4;
2702        __ mov(odd_number, Operand(rhs, LSR, shift_distance));
2703        __ cmp(odd_number, Operand(25));
2704        __ b(gt, &slow);
2705
2706        IntegerModStub stub(
2707            result, shift_distance, odd_number, mask_bits, lhs, r5);
2708        __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);  // Tail call.
2709
2710        __ bind(&slow);
2711      }
2712      HandleBinaryOpSlowCases(
2713          masm,
2714          &not_smi,
2715          lhs,
2716          rhs,
2717          op_ == Token::MOD ? Builtins::MOD : Builtins::DIV);
2718      break;
2719    }
2720
2721    case Token::BIT_OR:
2722    case Token::BIT_AND:
2723    case Token::BIT_XOR:
2724    case Token::SAR:
2725    case Token::SHR:
2726    case Token::SHL: {
2727      Label slow;
2728      STATIC_ASSERT(kSmiTag == 0);  // adjust code below
2729      __ tst(smi_test_reg, Operand(kSmiTagMask));
2730      __ b(ne, &slow);
2731      Register scratch2 = smi_test_reg;
2732      smi_test_reg = no_reg;
2733      switch (op_) {
2734        case Token::BIT_OR:  __ orr(result, rhs, Operand(lhs)); break;
2735        case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break;
2736        case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break;
2737        case Token::SAR:
2738          // Remove tags from right operand.
2739          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
2740          __ mov(result, Operand(lhs, ASR, scratch2));
2741          // Smi tag result.
2742          __ bic(result, result, Operand(kSmiTagMask));
2743          break;
2744        case Token::SHR:
2745          // Remove tags from operands.  We can't do this on a 31 bit number
2746          // because then the 0s get shifted into bit 30 instead of bit 31.
2747          __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x
2748          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
2749          __ mov(scratch, Operand(scratch, LSR, scratch2));
2750          // Unsigned shift is not allowed to produce a negative number, so
2751          // check the sign bit and the sign bit after Smi tagging.
2752          __ tst(scratch, Operand(0xc0000000));
2753          __ b(ne, &slow);
2754          // Smi tag result.
2755          __ mov(result, Operand(scratch, LSL, kSmiTagSize));
2756          break;
2757        case Token::SHL:
2758          // Remove tags from operands.
2759          __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x
2760          __ GetLeastBitsFromSmi(scratch2, rhs, 5);
2761          __ mov(scratch, Operand(scratch, LSL, scratch2));
2762          // Check that the signed result fits in a Smi.
2763          __ add(scratch2, scratch, Operand(0x40000000), SetCC);
2764          __ b(mi, &slow);
2765          __ mov(result, Operand(scratch, LSL, kSmiTagSize));
2766          break;
2767        default: UNREACHABLE();
2768      }
2769      __ Ret();
2770      __ bind(&slow);
2771      HandleNonSmiBitwiseOp(masm, lhs, rhs);
2772      break;
2773    }
2774
2775    default: UNREACHABLE();
2776  }
2777  // This code should be unreachable.
2778  __ stop("Unreachable");
2779
2780  // Generate an unreachable reference to the DEFAULT stub so that it can be
2781  // found at the end of this stub when clearing ICs at GC.
2782  // TODO(kaznacheev): Check performance impact and get rid of this.
2783  if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
2784    GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
2785    __ CallStub(&uninit);
2786  }
2787}
2788
2789
2790void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2791  Label get_result;
2792
2793  __ Push(r1, r0);
2794
2795  __ mov(r2, Operand(Smi::FromInt(MinorKey())));
2796  __ mov(r1, Operand(Smi::FromInt(op_)));
2797  __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_)));
2798  __ Push(r2, r1, r0);
2799
2800  __ TailCallExternalReference(
2801      ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
2802      5,
2803      1);
2804}
2805
2806
2807Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
2808  GenericBinaryOpStub stub(key, type_info);
2809  return stub.GetCode();
2810}
2811
2812
2813Handle<Code> GetTypeRecordingBinaryOpStub(int key,
2814    TRBinaryOpIC::TypeInfo type_info,
2815    TRBinaryOpIC::TypeInfo result_type_info) {
2816  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
2817  return stub.GetCode();
2818}
2819
2820
2821void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2822  Label get_result;
2823
2824  __ Push(r1, r0);
2825
2826  __ mov(r2, Operand(Smi::FromInt(MinorKey())));
2827  __ mov(r1, Operand(Smi::FromInt(op_)));
2828  __ mov(r0, Operand(Smi::FromInt(operands_type_)));
2829  __ Push(r2, r1, r0);
2830
2831  __ TailCallExternalReference(
2832      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
2833      5,
2834      1);
2835}
2836
2837
2838void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2839    MacroAssembler* masm) {
2840  UNIMPLEMENTED();
2841}
2842
2843
2844void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
2845  switch (operands_type_) {
2846    case TRBinaryOpIC::UNINITIALIZED:
2847      GenerateTypeTransition(masm);
2848      break;
2849    case TRBinaryOpIC::SMI:
2850      GenerateSmiStub(masm);
2851      break;
2852    case TRBinaryOpIC::INT32:
2853      GenerateInt32Stub(masm);
2854      break;
2855    case TRBinaryOpIC::HEAP_NUMBER:
2856      GenerateHeapNumberStub(masm);
2857      break;
2858    case TRBinaryOpIC::STRING:
2859      GenerateStringStub(masm);
2860      break;
2861    case TRBinaryOpIC::GENERIC:
2862      GenerateGeneric(masm);
2863      break;
2864    default:
2865      UNREACHABLE();
2866  }
2867}
2868
2869
2870const char* TypeRecordingBinaryOpStub::GetName() {
2871  if (name_ != NULL) return name_;
2872  const int kMaxNameLength = 100;
2873  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
2874  if (name_ == NULL) return "OOM";
2875  const char* op_name = Token::Name(op_);
2876  const char* overwrite_name;
2877  switch (mode_) {
2878    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2879    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2880    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2881    default: overwrite_name = "UnknownOverwrite"; break;
2882  }
2883
2884  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
2885               "TypeRecordingBinaryOpStub_%s_%s_%s",
2886               op_name,
2887               overwrite_name,
2888               TRBinaryOpIC::GetName(operands_type_));
2889  return name_;
2890}
2891
2892
2893void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
2894    MacroAssembler* masm) {
2895  Register left = r1;
2896  Register right = r0;
2897  Register scratch1 = r7;
2898  Register scratch2 = r9;
2899
2900  ASSERT(right.is(r0));
2901  STATIC_ASSERT(kSmiTag == 0);
2902
2903  Label not_smi_result;
2904  switch (op_) {
2905    case Token::ADD:
2906      __ add(right, left, Operand(right), SetCC);  // Add optimistically.
2907      __ Ret(vc);
2908      __ sub(right, right, Operand(left));  // Revert optimistic add.
2909      break;
2910    case Token::SUB:
2911      __ sub(right, left, Operand(right), SetCC);  // Subtract optimistically.
2912      __ Ret(vc);
2913      __ sub(right, left, Operand(right));  // Revert optimistic subtract.
2914      break;
2915    case Token::MUL:
2916      // Remove tag from one of the operands. This way the multiplication result
2917      // will be a smi if it fits the smi range.
2918      __ SmiUntag(ip, right);
2919      // Do multiplication
2920      // scratch1 = lower 32 bits of ip * left.
2921      // scratch2 = higher 32 bits of ip * left.
2922      __ smull(scratch1, scratch2, left, ip);
2923      // Check for overflowing the smi range - no overflow if higher 33 bits of
2924      // the result are identical.
2925      __ mov(ip, Operand(scratch1, ASR, 31));
2926      __ cmp(ip, Operand(scratch2));
2927      __ b(ne, &not_smi_result);
2928      // Go slow on zero result to handle -0.
2929      __ tst(scratch1, Operand(scratch1));
2930      __ mov(right, Operand(scratch1), LeaveCC, ne);
2931      __ Ret(ne);
2932      // We need -0 if we were multiplying a negative number with 0 to get 0.
2933      // We know one of them was zero.
2934      __ add(scratch2, right, Operand(left), SetCC);
2935      __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
2936      __ Ret(pl);  // Return smi 0 if the non-zero one was positive.
2937      // We fall through here if we multiplied a negative number with 0, because
2938      // that would mean we should produce -0.
2939      break;
2940    case Token::DIV:
2941      // Check for power of two on the right hand side.
2942      __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2943      // Check for positive and no remainder (scratch1 contains right - 1).
2944      __ orr(scratch2, scratch1, Operand(0x80000000u));
2945      __ tst(left, scratch2);
2946      __ b(ne, &not_smi_result);
2947
2948      // Perform division by shifting.
2949      __ CountLeadingZeros(scratch1, scratch1, scratch2);
2950      __ rsb(scratch1, scratch1, Operand(31));
2951      __ mov(right, Operand(left, LSR, scratch1));
2952      __ Ret();
2953      break;
2954    case Token::MOD:
2955      // Check for two positive smis.
2956      __ orr(scratch1, left, Operand(right));
2957      __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
2958      __ b(ne, &not_smi_result);
2959
2960      // Check for power of two on the right hand side.
2961      __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
2962
2963      // Perform modulus by masking.
2964      __ and_(right, left, Operand(scratch1));
2965      __ Ret();
2966      break;
2967    case Token::BIT_OR:
2968      __ orr(right, left, Operand(right));
2969      __ Ret();
2970      break;
2971    case Token::BIT_AND:
2972      __ and_(right, left, Operand(right));
2973      __ Ret();
2974      break;
2975    case Token::BIT_XOR:
2976      __ eor(right, left, Operand(right));
2977      __ Ret();
2978      break;
2979    case Token::SAR:
2980      // Remove tags from right operand.
2981      __ GetLeastBitsFromSmi(scratch1, right, 5);
2982      __ mov(right, Operand(left, ASR, scratch1));
2983      // Smi tag result.
2984      __ bic(right, right, Operand(kSmiTagMask));
2985      __ Ret();
2986      break;
2987    case Token::SHR:
2988      // Remove tags from operands. We can't do this on a 31 bit number
2989      // because then the 0s get shifted into bit 30 instead of bit 31.
2990      __ SmiUntag(scratch1, left);
2991      __ GetLeastBitsFromSmi(scratch2, right, 5);
2992      __ mov(scratch1, Operand(scratch1, LSR, scratch2));
2993      // Unsigned shift is not allowed to produce a negative number, so
2994      // check the sign bit and the sign bit after Smi tagging.
2995      __ tst(scratch1, Operand(0xc0000000));
2996      __ b(ne, &not_smi_result);
2997      // Smi tag result.
2998      __ SmiTag(right, scratch1);
2999      __ Ret();
3000      break;
3001    case Token::SHL:
3002      // Remove tags from operands.
3003      __ SmiUntag(scratch1, left);
3004      __ GetLeastBitsFromSmi(scratch2, right, 5);
3005      __ mov(scratch1, Operand(scratch1, LSL, scratch2));
3006      // Check that the signed result fits in a Smi.
3007      __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
3008      __ b(mi, &not_smi_result);
3009      __ SmiTag(right, scratch1);
3010      __ Ret();
3011      break;
3012    default:
3013      UNREACHABLE();
3014  }
3015  __ bind(&not_smi_result);
3016}
3017
3018
3019void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
3020                                                    bool smi_operands,
3021                                                    Label* not_numbers,
3022                                                    Label* gc_required) {
3023  Register left = r1;
3024  Register right = r0;
3025  Register scratch1 = r7;
3026  Register scratch2 = r9;
3027
3028  ASSERT(smi_operands || (not_numbers != NULL));
3029  if (smi_operands && FLAG_debug_code) {
3030    __ AbortIfNotSmi(left);
3031    __ AbortIfNotSmi(right);
3032  }
3033
3034  Register heap_number_map = r6;
3035  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3036
3037  switch (op_) {
3038    case Token::ADD:
3039    case Token::SUB:
3040    case Token::MUL:
3041    case Token::DIV:
3042    case Token::MOD: {
3043      // Load left and right operands into d6 and d7 or r0/r1 and r2/r3
3044      // depending on whether VFP3 is available or not.
3045      FloatingPointHelper::Destination destination =
3046          CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
3047          FloatingPointHelper::kVFPRegisters :
3048          FloatingPointHelper::kCoreRegisters;
3049
3050      // Allocate new heap number for result.
3051      Register result = r5;
3052      GenerateHeapResultAllocation(
3053          masm, result, heap_number_map, scratch1, scratch2, gc_required);
3054
3055      // Load the operands.
3056      if (smi_operands) {
3057        FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
3058      } else {
3059        FloatingPointHelper::LoadOperands(masm,
3060                                          destination,
3061                                          heap_number_map,
3062                                          scratch1,
3063                                          scratch2,
3064                                          not_numbers);
3065      }
3066
3067      // Calculate the result.
3068      if (destination == FloatingPointHelper::kVFPRegisters) {
3069        // Using VFP registers:
3070        // d6: Left value
3071        // d7: Right value
3072        CpuFeatures::Scope scope(VFP3);
3073        switch (op_) {
3074          case Token::ADD:
3075            __ vadd(d5, d6, d7);
3076            break;
3077          case Token::SUB:
3078            __ vsub(d5, d6, d7);
3079            break;
3080          case Token::MUL:
3081            __ vmul(d5, d6, d7);
3082            break;
3083          case Token::DIV:
3084            __ vdiv(d5, d6, d7);
3085            break;
3086          default:
3087            UNREACHABLE();
3088        }
3089
3090        __ sub(r0, result, Operand(kHeapObjectTag));
3091        __ vstr(d5, r0, HeapNumber::kValueOffset);
3092        __ add(r0, r0, Operand(kHeapObjectTag));
3093        __ Ret();
3094      } else {
3095        // Call the C function to handle the double operation.
3096        FloatingPointHelper::CallCCodeForDoubleOperation(masm,
3097                                                         op_,
3098                                                         result,
3099                                                         scratch1);
3100      }
3101      break;
3102    }
3103    case Token::BIT_OR:
3104    case Token::BIT_XOR:
3105    case Token::BIT_AND:
3106    case Token::SAR:
3107    case Token::SHR:
3108    case Token::SHL: {
3109      if (smi_operands) {
3110        __ SmiUntag(r3, left);
3111        __ SmiUntag(r2, right);
3112      } else {
3113        // Convert operands to 32-bit integers. Right in r2 and left in r3.
3114        FloatingPointHelper::LoadNumberAsInteger(masm,
3115                                                 left,
3116                                                 r3,
3117                                                 heap_number_map,
3118                                                 scratch1,
3119                                                 scratch2,
3120                                                 d0,
3121                                                 not_numbers);
3122        FloatingPointHelper::LoadNumberAsInteger(masm,
3123                                                 right,
3124                                                 r2,
3125                                                 heap_number_map,
3126                                                 scratch1,
3127                                                 scratch2,
3128                                                 d0,
3129                                                 not_numbers);
3130      }
3131
3132      Label result_not_a_smi;
3133      switch (op_) {
3134        case Token::BIT_OR:
3135          __ orr(r2, r3, Operand(r2));
3136          break;
3137        case Token::BIT_XOR:
3138          __ eor(r2, r3, Operand(r2));
3139          break;
3140        case Token::BIT_AND:
3141          __ and_(r2, r3, Operand(r2));
3142          break;
3143        case Token::SAR:
3144          // Use only the 5 least significant bits of the shift count.
3145          __ GetLeastBitsFromInt32(r2, r2, 5);
3146          __ mov(r2, Operand(r3, ASR, r2));
3147          break;
3148        case Token::SHR:
3149          // Use only the 5 least significant bits of the shift count.
3150          __ GetLeastBitsFromInt32(r2, r2, 5);
3151          __ mov(r2, Operand(r3, LSR, r2), SetCC);
3152          // SHR is special because it is required to produce a positive answer.
3153          // The code below for writing into heap numbers isn't capable of
3154          // writing the register as an unsigned int so we go to slow case if we
3155          // hit this case.
3156          if (CpuFeatures::IsSupported(VFP3)) {
3157            __ b(mi, &result_not_a_smi);
3158          } else {
3159            __ b(mi, not_numbers);
3160          }
3161          break;
3162        case Token::SHL:
3163          // Use only the 5 least significant bits of the shift count.
3164          __ GetLeastBitsFromInt32(r2, r2, 5);
3165          __ mov(r2, Operand(r3, LSL, r2));
3166          break;
3167        default:
3168          UNREACHABLE();
3169      }
3170
3171      // Check that the *signed* result fits in a smi.
3172      __ add(r3, r2, Operand(0x40000000), SetCC);
3173      __ b(mi, &result_not_a_smi);
3174      __ SmiTag(r0, r2);
3175      __ Ret();
3176
3177      // Allocate new heap number for result.
3178      __ bind(&result_not_a_smi);
3179      Register result = r5;
3180      if (smi_operands) {
3181        __ AllocateHeapNumber(
3182            result, scratch1, scratch2, heap_number_map, gc_required);
3183      } else {
3184        GenerateHeapResultAllocation(
3185            masm, result, heap_number_map, scratch1, scratch2, gc_required);
3186      }
3187
3188      // r2: Answer as signed int32.
3189      // r5: Heap number to write answer into.
3190
3191      // Nothing can go wrong now, so move the heap number to r0, which is the
3192      // result.
3193      __ mov(r0, Operand(r5));
3194
3195      if (CpuFeatures::IsSupported(VFP3)) {
3196        // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
3197        // mentioned above SHR needs to always produce a positive result.
3198        CpuFeatures::Scope scope(VFP3);
3199        __ vmov(s0, r2);
3200        if (op_ == Token::SHR) {
3201          __ vcvt_f64_u32(d0, s0);
3202        } else {
3203          __ vcvt_f64_s32(d0, s0);
3204        }
3205        __ sub(r3, r0, Operand(kHeapObjectTag));
3206        __ vstr(d0, r3, HeapNumber::kValueOffset);
3207        __ Ret();
3208      } else {
3209        // Tail call that writes the int32 in r2 to the heap number in r0, using
3210        // r3 as scratch. r0 is preserved and returned.
3211        WriteInt32ToHeapNumberStub stub(r2, r0, r3);
3212        __ TailCallStub(&stub);
3213      }
3214      break;
3215    }
3216    default:
3217      UNREACHABLE();
3218  }
3219}
3220
3221
3222// Generate the smi code. If the operation on smis are successful this return is
3223// generated. If the result is not a smi and heap number allocation is not
3224// requested the code falls through. If number allocation is requested but a
3225// heap number cannot be allocated the code jumps to the lable gc_required.
3226void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
3227    Label* gc_required,
3228    SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
3229  Label not_smis;
3230
3231  Register left = r1;
3232  Register right = r0;
3233  Register scratch1 = r7;
3234  Register scratch2 = r9;
3235
3236  // Perform combined smi check on both operands.
3237  __ orr(scratch1, left, Operand(right));
3238  STATIC_ASSERT(kSmiTag == 0);
3239  __ tst(scratch1, Operand(kSmiTagMask));
3240  __ b(ne, &not_smis);
3241
3242  // If the smi-smi operation results in a smi return is generated.
3243  GenerateSmiSmiOperation(masm);
3244
3245  // If heap number results are possible generate the result in an allocated
3246  // heap number.
3247  if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
3248    GenerateFPOperation(masm, true, NULL, gc_required);
3249  }
3250  __ bind(&not_smis);
3251}
3252
3253
3254void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
3255  Label not_smis, call_runtime;
3256
3257  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
3258      result_type_ == TRBinaryOpIC::SMI) {
3259    // Only allow smi results.
3260    GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
3261  } else {
3262    // Allow heap number result and don't make a transition if a heap number
3263    // cannot be allocated.
3264    GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3265  }
3266
3267  // Code falls through if the result is not returned as either a smi or heap
3268  // number.
3269  GenerateTypeTransition(masm);
3270
3271  __ bind(&call_runtime);
3272  GenerateCallRuntime(masm);
3273}
3274
3275
3276void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
3277  ASSERT(operands_type_ == TRBinaryOpIC::STRING);
3278  ASSERT(op_ == Token::ADD);
3279  // Try to add arguments as strings, otherwise, transition to the generic
3280  // TRBinaryOpIC type.
3281  GenerateAddStrings(masm);
3282  GenerateTypeTransition(masm);
3283}
3284
3285
3286void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
3287  ASSERT(operands_type_ == TRBinaryOpIC::INT32);
3288
3289  Register left = r1;
3290  Register right = r0;
3291  Register scratch1 = r7;
3292  Register scratch2 = r9;
3293  DwVfpRegister double_scratch = d0;
3294  SwVfpRegister single_scratch = s3;
3295
3296  Register heap_number_result = no_reg;
3297  Register heap_number_map = r6;
3298  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3299
3300  Label call_runtime;
3301  // Labels for type transition, used for wrong input or output types.
3302  // Both label are currently actually bound to the same position. We use two
3303  // different label to differentiate the cause leading to type transition.
3304  Label transition;
3305
3306  // Smi-smi fast case.
3307  Label skip;
3308  __ orr(scratch1, left, right);
3309  __ JumpIfNotSmi(scratch1, &skip);
3310  GenerateSmiSmiOperation(masm);
3311  // Fall through if the result is not a smi.
3312  __ bind(&skip);
3313
3314  switch (op_) {
3315    case Token::ADD:
3316    case Token::SUB:
3317    case Token::MUL:
3318    case Token::DIV:
3319    case Token::MOD: {
3320    // Load both operands and check that they are 32-bit integer.
3321    // Jump to type transition if they are not. The registers r0 and r1 (right
3322    // and left) are preserved for the runtime call.
3323    FloatingPointHelper::Destination destination =
3324        CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
3325        FloatingPointHelper::kVFPRegisters :
3326        FloatingPointHelper::kCoreRegisters;
3327
3328    FloatingPointHelper::LoadNumberAsInt32Double(masm,
3329                                                 right,
3330                                                 destination,
3331                                                 d7,
3332                                                 r2,
3333                                                 r3,
3334                                                 heap_number_map,
3335                                                 scratch1,
3336                                                 scratch2,
3337                                                 s0,
3338                                                 &transition);
3339    FloatingPointHelper::LoadNumberAsInt32Double(masm,
3340                                                 left,
3341                                                 destination,
3342                                                 d6,
3343                                                 r4,
3344                                                 r5,
3345                                                 heap_number_map,
3346                                                 scratch1,
3347                                                 scratch2,
3348                                                 s0,
3349                                                 &transition);
3350
3351      if (destination == FloatingPointHelper::kVFPRegisters) {
3352        CpuFeatures::Scope scope(VFP3);
3353        Label return_heap_number;
3354        switch (op_) {
3355          case Token::ADD:
3356            __ vadd(d5, d6, d7);
3357            break;
3358          case Token::SUB:
3359            __ vsub(d5, d6, d7);
3360            break;
3361          case Token::MUL:
3362            __ vmul(d5, d6, d7);
3363            break;
3364          case Token::DIV:
3365            __ vdiv(d5, d6, d7);
3366            break;
3367          default:
3368            UNREACHABLE();
3369        }
3370
3371        if (op_ != Token::DIV) {
3372          // These operations produce an integer result.
3373          // Try to return a smi if we can.
3374          // Otherwise return a heap number if allowed, or jump to type
3375          // transition.
3376
3377          __ EmitVFPTruncate(kRoundToZero,
3378                             single_scratch,
3379                             d5,
3380                             scratch1,
3381                             scratch2);
3382
3383          if (result_type_ <= TRBinaryOpIC::INT32) {
3384            // If the ne condition is set, result does
3385            // not fit in a 32-bit integer.
3386            __ b(ne, &transition);
3387          }
3388
3389          // Check if the result fits in a smi.
3390          __ vmov(scratch1, single_scratch);
3391          __ add(scratch2, scratch1, Operand(0x40000000), SetCC);
3392          // If not try to return a heap number.
3393          __ b(mi, &return_heap_number);
3394          // Tag the result and return.
3395          __ SmiTag(r0, scratch1);
3396          __ Ret();
3397        }
3398
3399        if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER
3400                                                : TRBinaryOpIC::INT32) {
3401          __ bind(&return_heap_number);
3402          // We are using vfp registers so r5 is available.
3403          heap_number_result = r5;
3404          GenerateHeapResultAllocation(masm,
3405                                       heap_number_result,
3406                                       heap_number_map,
3407                                       scratch1,
3408                                       scratch2,
3409                                       &call_runtime);
3410          __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
3411          __ vstr(d5, r0, HeapNumber::kValueOffset);
3412          __ mov(r0, heap_number_result);
3413          __ Ret();
3414        }
3415
3416        // A DIV operation expecting an integer result falls through
3417        // to type transition.
3418
3419      } else {
3420        // We preserved r0 and r1 to be able to call runtime.
3421        // Save the left value on the stack.
3422        __ Push(r5, r4);
3423
3424        // Allocate a heap number to store the result.
3425        heap_number_result = r5;
3426        GenerateHeapResultAllocation(masm,
3427                                     heap_number_result,
3428                                     heap_number_map,
3429                                     scratch1,
3430                                     scratch2,
3431                                     &call_runtime);
3432
3433        // Load the left value from the value saved on the stack.
3434        __ Pop(r1, r0);
3435
3436        // Call the C function to handle the double operation.
3437        FloatingPointHelper::CallCCodeForDoubleOperation(
3438            masm, op_, heap_number_result, scratch1);
3439      }
3440
3441      break;
3442    }
3443
3444    case Token::BIT_OR:
3445    case Token::BIT_XOR:
3446    case Token::BIT_AND:
3447    case Token::SAR:
3448    case Token::SHR:
3449    case Token::SHL: {
3450      Label return_heap_number;
3451      Register scratch3 = r5;
3452      // Convert operands to 32-bit integers. Right in r2 and left in r3. The
3453      // registers r0 and r1 (right and left) are preserved for the runtime
3454      // call.
3455      FloatingPointHelper::LoadNumberAsInt32(masm,
3456                                             left,
3457                                             r3,
3458                                             heap_number_map,
3459                                             scratch1,
3460                                             scratch2,
3461                                             scratch3,
3462                                             d0,
3463                                             &transition);
3464      FloatingPointHelper::LoadNumberAsInt32(masm,
3465                                             right,
3466                                             r2,
3467                                             heap_number_map,
3468                                             scratch1,
3469                                             scratch2,
3470                                             scratch3,
3471                                             d0,
3472                                             &transition);
3473
3474      // The ECMA-262 standard specifies that, for shift operations, only the
3475      // 5 least significant bits of the shift value should be used.
3476      switch (op_) {
3477        case Token::BIT_OR:
3478          __ orr(r2, r3, Operand(r2));
3479          break;
3480        case Token::BIT_XOR:
3481          __ eor(r2, r3, Operand(r2));
3482          break;
3483        case Token::BIT_AND:
3484          __ and_(r2, r3, Operand(r2));
3485          break;
3486        case Token::SAR:
3487          __ and_(r2, r2, Operand(0x1f));
3488          __ mov(r2, Operand(r3, ASR, r2));
3489          break;
3490        case Token::SHR:
3491          __ and_(r2, r2, Operand(0x1f));
3492          __ mov(r2, Operand(r3, LSR, r2), SetCC);
3493          // SHR is special because it is required to produce a positive answer.
3494          // We only get a negative result if the shift value (r2) is 0.
3495          // This result cannot be respresented as a signed 32-bit integer, try
3496          // to return a heap number if we can.
3497          // The non vfp3 code does not support this special case, so jump to
3498          // runtime if we don't support it.
3499          if (CpuFeatures::IsSupported(VFP3)) {
3500            __ b(mi,
3501                 (result_type_ <= TRBinaryOpIC::INT32) ? &transition
3502                                                       : &return_heap_number);
3503          } else {
3504            __ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition
3505                                                           : &call_runtime);
3506          }
3507          break;
3508        case Token::SHL:
3509          __ and_(r2, r2, Operand(0x1f));
3510          __ mov(r2, Operand(r3, LSL, r2));
3511          break;
3512        default:
3513          UNREACHABLE();
3514      }
3515
3516      // Check if the result fits in a smi.
3517      __ add(scratch1, r2, Operand(0x40000000), SetCC);
3518      // If not try to return a heap number. (We know the result is an int32.)
3519      __ b(mi, &return_heap_number);
3520      // Tag the result and return.
3521      __ SmiTag(r0, r2);
3522      __ Ret();
3523
3524      __ bind(&return_heap_number);
3525      if (CpuFeatures::IsSupported(VFP3)) {
3526        CpuFeatures::Scope scope(VFP3);
3527        heap_number_result = r5;
3528        GenerateHeapResultAllocation(masm,
3529                                     heap_number_result,
3530                                     heap_number_map,
3531                                     scratch1,
3532                                     scratch2,
3533                                     &call_runtime);
3534
3535        if (op_ != Token::SHR) {
3536          // Convert the result to a floating point value.
3537          __ vmov(double_scratch.low(), r2);
3538          __ vcvt_f64_s32(double_scratch, double_scratch.low());
3539        } else {
3540          // The result must be interpreted as an unsigned 32-bit integer.
3541          __ vmov(double_scratch.low(), r2);
3542          __ vcvt_f64_u32(double_scratch, double_scratch.low());
3543        }
3544
3545        // Store the result.
3546        __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
3547        __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
3548        __ mov(r0, heap_number_result);
3549        __ Ret();
3550      } else {
3551        // Tail call that writes the int32 in r2 to the heap number in r0, using
3552        // r3 as scratch. r0 is preserved and returned.
3553        WriteInt32ToHeapNumberStub stub(r2, r0, r3);
3554        __ TailCallStub(&stub);
3555      }
3556
3557      break;
3558    }
3559
3560    default:
3561      UNREACHABLE();
3562  }
3563
3564  if (transition.is_linked()) {
3565    __ bind(&transition);
3566    GenerateTypeTransition(masm);
3567  }
3568
3569  __ bind(&call_runtime);
3570  GenerateCallRuntime(masm);
3571}
3572
3573
3574void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3575  Label not_numbers, call_runtime;
3576  ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
3577
3578  GenerateFPOperation(masm, false, &not_numbers, &call_runtime);
3579
3580  __ bind(&not_numbers);
3581  GenerateTypeTransition(masm);
3582
3583  __ bind(&call_runtime);
3584  GenerateCallRuntime(masm);
3585}
3586
3587
3588void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3589  Label call_runtime, call_string_add_or_runtime;
3590
3591  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3592
3593  GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3594
3595  __ bind(&call_string_add_or_runtime);
3596  if (op_ == Token::ADD) {
3597    GenerateAddStrings(masm);
3598  }
3599
3600  __ bind(&call_runtime);
3601  GenerateCallRuntime(masm);
3602}
3603
3604
3605void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3606  ASSERT(op_ == Token::ADD);
3607  Label left_not_string, call_runtime;
3608
3609  Register left = r1;
3610  Register right = r0;
3611
3612  // Check if left argument is a string.
3613  __ JumpIfSmi(left, &left_not_string);
3614  __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
3615  __ b(ge, &left_not_string);
3616
3617  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3618  GenerateRegisterArgsPush(masm);
3619  __ TailCallStub(&string_add_left_stub);
3620
3621  // Left operand is not a string, test right.
3622  __ bind(&left_not_string);
3623  __ JumpIfSmi(right, &call_runtime);
3624  __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
3625  __ b(ge, &call_runtime);
3626
3627  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3628  GenerateRegisterArgsPush(masm);
3629  __ TailCallStub(&string_add_right_stub);
3630
3631  // At least one argument is not a string.
3632  __ bind(&call_runtime);
3633}
3634
3635
3636void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3637  GenerateRegisterArgsPush(masm);
3638  switch (op_) {
3639    case Token::ADD:
3640      __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
3641      break;
3642    case Token::SUB:
3643      __ InvokeBuiltin(Builtins::SUB, JUMP_JS);
3644      break;
3645    case Token::MUL:
3646      __ InvokeBuiltin(Builtins::MUL, JUMP_JS);
3647      break;
3648    case Token::DIV:
3649      __ InvokeBuiltin(Builtins::DIV, JUMP_JS);
3650      break;
3651    case Token::MOD:
3652      __ InvokeBuiltin(Builtins::MOD, JUMP_JS);
3653      break;
3654    case Token::BIT_OR:
3655      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
3656      break;
3657    case Token::BIT_AND:
3658      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
3659      break;
3660    case Token::BIT_XOR:
3661      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
3662      break;
3663    case Token::SAR:
3664      __ InvokeBuiltin(Builtins::SAR, JUMP_JS);
3665      break;
3666    case Token::SHR:
3667      __ InvokeBuiltin(Builtins::SHR, JUMP_JS);
3668      break;
3669    case Token::SHL:
3670      __ InvokeBuiltin(Builtins::SHL, JUMP_JS);
3671      break;
3672    default:
3673      UNREACHABLE();
3674  }
3675}
3676
3677
3678void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
3679    MacroAssembler* masm,
3680    Register result,
3681    Register heap_number_map,
3682    Register scratch1,
3683    Register scratch2,
3684    Label* gc_required) {
3685
3686  // Code below will scratch result if allocation fails. To keep both arguments
3687  // intact for the runtime call result cannot be one of these.
3688  ASSERT(!result.is(r0) && !result.is(r1));
3689
3690  if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3691    Label skip_allocation, allocated;
3692    Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
3693    // If the overwritable operand is already an object, we skip the
3694    // allocation of a heap number.
3695    __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3696    // Allocate a heap number for the result.
3697    __ AllocateHeapNumber(
3698        result, scratch1, scratch2, heap_number_map, gc_required);
3699    __ b(&allocated);
3700    __ bind(&skip_allocation);
3701    // Use object holding the overwritable operand for result.
3702    __ mov(result, Operand(overwritable_operand));
3703    __ bind(&allocated);
3704  } else {
3705    ASSERT(mode_ == NO_OVERWRITE);
3706    __ AllocateHeapNumber(
3707        result, scratch1, scratch2, heap_number_map, gc_required);
3708  }
3709}
3710
3711
3712void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3713  __ Push(r1, r0);
3714}
3715
3716
3717void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3718  // Untagged case: double input in d2, double result goes
3719  //   into d2.
3720  // Tagged case: tagged input on top of stack and in r0,
3721  //   tagged result (heap number) goes into r0.
3722
3723  Label input_not_smi;
3724  Label loaded;
3725  Label calculate;
3726  Label invalid_cache;
3727  const Register scratch0 = r9;
3728  const Register scratch1 = r7;
3729  const Register cache_entry = r0;
3730  const bool tagged = (argument_type_ == TAGGED);
3731
3732  if (CpuFeatures::IsSupported(VFP3)) {
3733    CpuFeatures::Scope scope(VFP3);
3734    if (tagged) {
3735      // Argument is a number and is on stack and in r0.
3736      // Load argument and check if it is a smi.
3737      __ JumpIfNotSmi(r0, &input_not_smi);
3738
3739      // Input is a smi. Convert to double and load the low and high words
3740      // of the double into r2, r3.
3741      __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
3742      __ b(&loaded);
3743
3744      __ bind(&input_not_smi);
3745      // Check if input is a HeapNumber.
3746      __ CheckMap(r0,
3747                  r1,
3748                  Heap::kHeapNumberMapRootIndex,
3749                  &calculate,
3750                  true);
3751      // Input is a HeapNumber. Load it to a double register and store the
3752      // low and high words into r2, r3.
3753      __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
3754      __ vmov(r2, r3, d0);
3755    } else {
3756      // Input is untagged double in d2. Output goes to d2.
3757      __ vmov(r2, r3, d2);
3758    }
3759    __ bind(&loaded);
3760    // r2 = low 32 bits of double value
3761    // r3 = high 32 bits of double value
3762    // Compute hash (the shifts are arithmetic):
3763    //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3764    __ eor(r1, r2, Operand(r3));
3765    __ eor(r1, r1, Operand(r1, ASR, 16));
3766    __ eor(r1, r1, Operand(r1, ASR, 8));
3767    ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
3768    __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1));
3769
3770    // r2 = low 32 bits of double value.
3771    // r3 = high 32 bits of double value.
3772    // r1 = TranscendentalCache::hash(double value).
3773    __ mov(cache_entry,
3774           Operand(ExternalReference::transcendental_cache_array_address()));
3775    // r0 points to cache array.
3776    __ ldr(cache_entry, MemOperand(cache_entry,
3777        type_ * sizeof(TranscendentalCache::caches_[0])));
3778    // r0 points to the cache for the type type_.
3779    // If NULL, the cache hasn't been initialized yet, so go through runtime.
3780    __ cmp(cache_entry, Operand(0, RelocInfo::NONE));
3781    __ b(eq, &invalid_cache);
3782
3783#ifdef DEBUG
3784    // Check that the layout of cache elements match expectations.
3785    { TranscendentalCache::Element test_elem[2];
3786      char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3787      char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3788      char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3789      char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3790      char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3791      CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
3792      CHECK_EQ(0, elem_in0 - elem_start);
3793      CHECK_EQ(kIntSize, elem_in1 - elem_start);
3794      CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3795    }
3796#endif
3797
3798    // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
3799    __ add(r1, r1, Operand(r1, LSL, 1));
3800    __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
3801    // Check if cache matches: Double value is stored in uint32_t[2] array.
3802    __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
3803    __ cmp(r2, r4);
3804    __ b(ne, &calculate);
3805    __ cmp(r3, r5);
3806    __ b(ne, &calculate);
3807    // Cache hit. Load result, cleanup and return.
3808    if (tagged) {
3809      // Pop input value from stack and load result into r0.
3810      __ pop();
3811      __ mov(r0, Operand(r6));
3812    } else {
3813      // Load result into d2.
3814       __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
3815    }
3816    __ Ret();
3817  }  // if (CpuFeatures::IsSupported(VFP3))
3818
3819  __ bind(&calculate);
3820  if (tagged) {
3821    __ bind(&invalid_cache);
3822    __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
3823  } else {
3824    if (!CpuFeatures::IsSupported(VFP3)) UNREACHABLE();
3825    CpuFeatures::Scope scope(VFP3);
3826
3827    Label no_update;
3828    Label skip_cache;
3829    const Register heap_number_map = r5;
3830
3831    // Call C function to calculate the result and update the cache.
3832    // Register r0 holds precalculated cache entry address; preserve
3833    // it on the stack and pop it into register cache_entry after the
3834    // call.
3835    __ push(cache_entry);
3836    GenerateCallCFunction(masm, scratch0);
3837    __ GetCFunctionDoubleResult(d2);
3838
3839    // Try to update the cache. If we cannot allocate a
3840    // heap number, we return the result without updating.
3841    __ pop(cache_entry);
3842    __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3843    __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
3844    __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
3845    __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
3846    __ Ret();
3847
3848    __ bind(&invalid_cache);
3849    // The cache is invalid. Call runtime which will recreate the
3850    // cache.
3851    __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
3852    __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
3853    __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
3854    __ EnterInternalFrame();
3855    __ push(r0);
3856    __ CallRuntime(RuntimeFunction(), 1);
3857    __ LeaveInternalFrame();
3858    __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
3859    __ Ret();
3860
3861    __ bind(&skip_cache);
3862    // Call C function to calculate the result and answer directly
3863    // without updating the cache.
3864    GenerateCallCFunction(masm, scratch0);
3865    __ GetCFunctionDoubleResult(d2);
3866    __ bind(&no_update);
3867
3868    // We return the value in d2 without adding it to the cache, but
3869    // we cause a scavenging GC so that future allocations will succeed.
3870    __ EnterInternalFrame();
3871
3872    // Allocate an aligned object larger than a HeapNumber.
3873    ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3874    __ mov(scratch0, Operand(4 * kPointerSize));
3875    __ push(scratch0);
3876    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3877    __ LeaveInternalFrame();
3878    __ Ret();
3879  }
3880}
3881
3882
3883void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3884                                                    Register scratch) {
3885  __ push(lr);
3886  __ PrepareCallCFunction(2, scratch);
3887  __ vmov(r0, r1, d2);
3888  switch (type_) {
3889    case TranscendentalCache::SIN:
3890      __ CallCFunction(ExternalReference::math_sin_double_function(), 2);
3891      break;
3892    case TranscendentalCache::COS:
3893      __ CallCFunction(ExternalReference::math_cos_double_function(), 2);
3894      break;
3895    case TranscendentalCache::LOG:
3896      __ CallCFunction(ExternalReference::math_log_double_function(), 2);
3897      break;
3898    default:
3899      UNIMPLEMENTED();
3900      break;
3901  }
3902  __ pop(lr);
3903}
3904
3905
3906Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
3907  switch (type_) {
3908    // Add more cases when necessary.
3909    case TranscendentalCache::SIN: return Runtime::kMath_sin;
3910    case TranscendentalCache::COS: return Runtime::kMath_cos;
3911    case TranscendentalCache::LOG: return Runtime::kMath_log;
3912    default:
3913      UNIMPLEMENTED();
3914      return Runtime::kAbort;
3915  }
3916}
3917
3918
3919void StackCheckStub::Generate(MacroAssembler* masm) {
3920  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3921}
3922
3923
3924void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
3925  Label slow, done;
3926
3927  Register heap_number_map = r6;
3928  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3929
3930  if (op_ == Token::SUB) {
3931    if (include_smi_code_) {
3932      // Check whether the value is a smi.
3933      Label try_float;
3934      __ tst(r0, Operand(kSmiTagMask));
3935      __ b(ne, &try_float);
3936
3937      // Go slow case if the value of the expression is zero
3938      // to make sure that we switch between 0 and -0.
3939      if (negative_zero_ == kStrictNegativeZero) {
3940        // If we have to check for zero, then we can check for the max negative
3941        // smi while we are at it.
3942        __ bic(ip, r0, Operand(0x80000000), SetCC);
3943        __ b(eq, &slow);
3944        __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
3945        __ Ret();
3946      } else {
3947        // The value of the expression is a smi and 0 is OK for -0.  Try
3948        // optimistic subtraction '0 - value'.
3949        __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
3950        __ Ret(vc);
3951        // We don't have to reverse the optimistic neg since the only case
3952        // where we fall through is the minimum negative Smi, which is the case
3953        // where the neg leaves the register unchanged.
3954        __ jmp(&slow);  // Go slow on max negative Smi.
3955      }
3956      __ bind(&try_float);
3957    } else if (FLAG_debug_code) {
3958      __ tst(r0, Operand(kSmiTagMask));
3959      __ Assert(ne, "Unexpected smi operand.");
3960    }
3961
3962    __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
3963    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3964    __ cmp(r1, heap_number_map);
3965    __ b(ne, &slow);
3966    // r0 is a heap number.  Get a new heap number in r1.
3967    if (overwrite_ == UNARY_OVERWRITE) {
3968      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
3969      __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
3970      __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
3971    } else {
3972      __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
3973      __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
3974      __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
3975      __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
3976      __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign.
3977      __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
3978      __ mov(r0, Operand(r1));
3979    }
3980  } else if (op_ == Token::BIT_NOT) {
3981    if (include_smi_code_) {
3982      Label non_smi;
3983      __ JumpIfNotSmi(r0, &non_smi);
3984      __ mvn(r0, Operand(r0));
3985      // Bit-clear inverted smi-tag.
3986      __ bic(r0, r0, Operand(kSmiTagMask));
3987      __ Ret();
3988      __ bind(&non_smi);
3989    } else if (FLAG_debug_code) {
3990      __ tst(r0, Operand(kSmiTagMask));
3991      __ Assert(ne, "Unexpected smi operand.");
3992    }
3993
3994    // Check if the operand is a heap number.
3995    __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
3996    __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3997    __ cmp(r1, heap_number_map);
3998    __ b(ne, &slow);
3999
4000    // Convert the heap number is r0 to an untagged integer in r1.
4001    __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
4002
4003    // Do the bitwise operation (move negated) and check if the result
4004    // fits in a smi.
4005    Label try_float;
4006    __ mvn(r1, Operand(r1));
4007    __ add(r2, r1, Operand(0x40000000), SetCC);
4008    __ b(mi, &try_float);
4009    __ mov(r0, Operand(r1, LSL, kSmiTagSize));
4010    __ b(&done);
4011
4012    __ bind(&try_float);
4013    if (!overwrite_ == UNARY_OVERWRITE) {
4014      // Allocate a fresh heap number, but don't overwrite r0 until
4015      // we're sure we can do it without going through the slow case
4016      // that needs the value in r0.
4017      __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
4018      __ mov(r0, Operand(r2));
4019    }
4020
4021    if (CpuFeatures::IsSupported(VFP3)) {
4022      // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
4023      CpuFeatures::Scope scope(VFP3);
4024      __ vmov(s0, r1);
4025      __ vcvt_f64_s32(d0, s0);
4026      __ sub(r2, r0, Operand(kHeapObjectTag));
4027      __ vstr(d0, r2, HeapNumber::kValueOffset);
4028    } else {
4029      // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
4030      // have to set up a frame.
4031      WriteInt32ToHeapNumberStub stub(r1, r0, r2);
4032      __ push(lr);
4033      __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
4034      __ pop(lr);
4035    }
4036  } else {
4037    UNIMPLEMENTED();
4038  }
4039
4040  __ bind(&done);
4041  __ Ret();
4042
4043  // Handle the slow case by jumping to the JavaScript builtin.
4044  __ bind(&slow);
4045  __ push(r0);
4046  switch (op_) {
4047    case Token::SUB:
4048      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
4049      break;
4050    case Token::BIT_NOT:
4051      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
4052      break;
4053    default:
4054      UNREACHABLE();
4055  }
4056}
4057
4058
4059void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
4060  __ Throw(r0);
4061}
4062
4063
4064void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
4065                                          UncatchableExceptionType type) {
4066  __ ThrowUncatchable(type, r0);
4067}
4068
4069
4070void CEntryStub::GenerateCore(MacroAssembler* masm,
4071                              Label* throw_normal_exception,
4072                              Label* throw_termination_exception,
4073                              Label* throw_out_of_memory_exception,
4074                              bool do_gc,
4075                              bool always_allocate) {
4076  // r0: result parameter for PerformGC, if any
4077  // r4: number of arguments including receiver  (C callee-saved)
4078  // r5: pointer to builtin function  (C callee-saved)
4079  // r6: pointer to the first argument (C callee-saved)
4080
4081  if (do_gc) {
4082    // Passing r0.
4083    __ PrepareCallCFunction(1, r1);
4084    __ CallCFunction(ExternalReference::perform_gc_function(), 1);
4085  }
4086
4087  ExternalReference scope_depth =
4088      ExternalReference::heap_always_allocate_scope_depth();
4089  if (always_allocate) {
4090    __ mov(r0, Operand(scope_depth));
4091    __ ldr(r1, MemOperand(r0));
4092    __ add(r1, r1, Operand(1));
4093    __ str(r1, MemOperand(r0));
4094  }
4095
4096  // Call C built-in.
4097  // r0 = argc, r1 = argv
4098  __ mov(r0, Operand(r4));
4099  __ mov(r1, Operand(r6));
4100
4101#if defined(V8_HOST_ARCH_ARM)
4102  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4103  int frame_alignment_mask = frame_alignment - 1;
4104  if (FLAG_debug_code) {
4105    if (frame_alignment > kPointerSize) {
4106      Label alignment_as_expected;
4107      ASSERT(IsPowerOf2(frame_alignment));
4108      __ tst(sp, Operand(frame_alignment_mask));
4109      __ b(eq, &alignment_as_expected);
4110      // Don't use Check here, as it will call Runtime_Abort re-entering here.
4111      __ stop("Unexpected alignment");
4112      __ bind(&alignment_as_expected);
4113    }
4114  }
4115#endif
4116
4117  // TODO(1242173): To let the GC traverse the return address of the exit
4118  // frames, we need to know where the return address is. Right now,
4119  // we store it on the stack to be able to find it again, but we never
4120  // restore from it in case of changes, which makes it impossible to
4121  // support moving the C entry code stub. This should be fixed, but currently
4122  // this is OK because the CEntryStub gets generated so early in the V8 boot
4123  // sequence that it is not moving ever.
4124
4125  // Compute the return address in lr to return to after the jump below. Pc is
4126  // already at '+ 8' from the current instruction but return is after three
4127  // instructions so add another 4 to pc to get the return address.
4128  masm->add(lr, pc, Operand(4));
4129  __ str(lr, MemOperand(sp, 0));
4130  masm->Jump(r5);
4131
4132  if (always_allocate) {
4133    // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
4134    // though (contain the result).
4135    __ mov(r2, Operand(scope_depth));
4136    __ ldr(r3, MemOperand(r2));
4137    __ sub(r3, r3, Operand(1));
4138    __ str(r3, MemOperand(r2));
4139  }
4140
4141  // check for failure result
4142  Label failure_returned;
4143  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
4144  // Lower 2 bits of r2 are 0 iff r0 has failure tag.
4145  __ add(r2, r0, Operand(1));
4146  __ tst(r2, Operand(kFailureTagMask));
4147  __ b(eq, &failure_returned);
4148
4149  // Exit C frame and return.
4150  // r0:r1: result
4151  // sp: stack pointer
4152  // fp: frame pointer
4153  //  Callee-saved register r4 still holds argc.
4154  __ LeaveExitFrame(save_doubles_, r4);
4155  __ mov(pc, lr);
4156
4157  // check if we should retry or throw exception
4158  Label retry;
4159  __ bind(&failure_returned);
4160  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
4161  __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
4162  __ b(eq, &retry);
4163
4164  // Special handling of out of memory exceptions.
4165  Failure* out_of_memory = Failure::OutOfMemoryException();
4166  __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
4167  __ b(eq, throw_out_of_memory_exception);
4168
4169  // Retrieve the pending exception and clear the variable.
4170  __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
4171  __ ldr(r3, MemOperand(ip));
4172  __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
4173  __ ldr(r0, MemOperand(ip));
4174  __ str(r3, MemOperand(ip));
4175
4176  // Special handling of termination exceptions which are uncatchable
4177  // by javascript code.
4178  __ cmp(r0, Operand(Factory::termination_exception()));
4179  __ b(eq, throw_termination_exception);
4180
4181  // Handle normal exception.
4182  __ jmp(throw_normal_exception);
4183
4184  __ bind(&retry);  // pass last failure (r0) as parameter (r0) when retrying
4185}
4186
4187
4188void CEntryStub::Generate(MacroAssembler* masm) {
4189  // Called from JavaScript; parameters are on stack as if calling JS function
4190  // r0: number of arguments including receiver
4191  // r1: pointer to builtin function
4192  // fp: frame pointer  (restored after C call)
4193  // sp: stack pointer  (restored as callee's sp after C call)
4194  // cp: current context  (C callee-saved)
4195
4196  // Result returned in r0 or r0+r1 by default.
4197
4198  // NOTE: Invocations of builtins may return failure objects
4199  // instead of a proper result. The builtin entry handles
4200  // this by performing a garbage collection and retrying the
4201  // builtin once.
4202
4203  // Compute the argv pointer in a callee-saved register.
4204  __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
4205  __ sub(r6, r6, Operand(kPointerSize));
4206
4207  // Enter the exit frame that transitions from JavaScript to C++.
4208  __ EnterExitFrame(save_doubles_);
4209
4210  // Setup argc and the builtin function in callee-saved registers.
4211  __ mov(r4, Operand(r0));
4212  __ mov(r5, Operand(r1));
4213
4214  // r4: number of arguments (C callee-saved)
4215  // r5: pointer to builtin function (C callee-saved)
4216  // r6: pointer to first argument (C callee-saved)
4217
4218  Label throw_normal_exception;
4219  Label throw_termination_exception;
4220  Label throw_out_of_memory_exception;
4221
4222  // Call into the runtime system.
4223  GenerateCore(masm,
4224               &throw_normal_exception,
4225               &throw_termination_exception,
4226               &throw_out_of_memory_exception,
4227               false,
4228               false);
4229
4230  // Do space-specific GC and retry runtime call.
4231  GenerateCore(masm,
4232               &throw_normal_exception,
4233               &throw_termination_exception,
4234               &throw_out_of_memory_exception,
4235               true,
4236               false);
4237
4238  // Do full GC and retry runtime call one final time.
4239  Failure* failure = Failure::InternalError();
4240  __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
4241  GenerateCore(masm,
4242               &throw_normal_exception,
4243               &throw_termination_exception,
4244               &throw_out_of_memory_exception,
4245               true,
4246               true);
4247
4248  __ bind(&throw_out_of_memory_exception);
4249  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
4250
4251  __ bind(&throw_termination_exception);
4252  GenerateThrowUncatchable(masm, TERMINATION);
4253
4254  __ bind(&throw_normal_exception);
4255  GenerateThrowTOS(masm);
4256}
4257
4258
4259void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4260  // r0: code entry
4261  // r1: function
4262  // r2: receiver
4263  // r3: argc
4264  // [sp+0]: argv
4265
4266  Label invoke, exit;
4267
4268  // Called from C, so do not pop argc and args on exit (preserve sp)
4269  // No need to save register-passed args
4270  // Save callee-saved registers (incl. cp and fp), sp, and lr
4271  __ stm(db_w, sp, kCalleeSaved | lr.bit());
4272
4273  // Get address of argv, see stm above.
4274  // r0: code entry
4275  // r1: function
4276  // r2: receiver
4277  // r3: argc
4278  __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize));  // argv
4279
4280  // Push a frame with special values setup to mark it as an entry frame.
4281  // r0: code entry
4282  // r1: function
4283  // r2: receiver
4284  // r3: argc
4285  // r4: argv
4286  __ mov(r8, Operand(-1));  // Push a bad frame pointer to fail if it is used.
4287  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4288  __ mov(r7, Operand(Smi::FromInt(marker)));
4289  __ mov(r6, Operand(Smi::FromInt(marker)));
4290  __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
4291  __ ldr(r5, MemOperand(r5));
4292  __ Push(r8, r7, r6, r5);
4293
4294  // Setup frame pointer for the frame to be pushed.
4295  __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
4296
4297#ifdef ENABLE_LOGGING_AND_PROFILING
4298  // If this is the outermost JS call, set js_entry_sp value.
4299  ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
4300  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
4301  __ ldr(r6, MemOperand(r5));
4302  __ cmp(r6, Operand(0, RelocInfo::NONE));
4303  __ str(fp, MemOperand(r5), eq);
4304#endif
4305
4306  // Call a faked try-block that does the invoke.
4307  __ bl(&invoke);
4308
4309  // Caught exception: Store result (exception) in the pending
4310  // exception field in the JSEnv and return a failure sentinel.
4311  // Coming in here the fp will be invalid because the PushTryHandler below
4312  // sets it to 0 to signal the existence of the JSEntry frame.
4313  __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
4314  __ str(r0, MemOperand(ip));
4315  __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
4316  __ b(&exit);
4317
4318  // Invoke: Link this frame into the handler chain.
4319  __ bind(&invoke);
4320  // Must preserve r0-r4, r5-r7 are available.
4321  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
4322  // If an exception not caught by another handler occurs, this handler
4323  // returns control to the code after the bl(&invoke) above, which
4324  // restores all kCalleeSaved registers (including cp and fp) to their
4325  // saved values before returning a failure to C.
4326
4327  // Clear any pending exceptions.
4328  __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
4329  __ ldr(r5, MemOperand(ip));
4330  __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
4331  __ str(r5, MemOperand(ip));
4332
4333  // Invoke the function by calling through JS entry trampoline builtin.
4334  // Notice that we cannot store a reference to the trampoline code directly in
4335  // this stub, because runtime stubs are not traversed when doing GC.
4336
4337  // Expected registers by Builtins::JSEntryTrampoline
4338  // r0: code entry
4339  // r1: function
4340  // r2: receiver
4341  // r3: argc
4342  // r4: argv
4343  if (is_construct) {
4344    ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
4345    __ mov(ip, Operand(construct_entry));
4346  } else {
4347    ExternalReference entry(Builtins::JSEntryTrampoline);
4348    __ mov(ip, Operand(entry));
4349  }
4350  __ ldr(ip, MemOperand(ip));  // deref address
4351
4352  // Branch and link to JSEntryTrampoline.  We don't use the double underscore
4353  // macro for the add instruction because we don't want the coverage tool
4354  // inserting instructions here after we read the pc.
4355  __ mov(lr, Operand(pc));
4356  masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
4357
4358  // Unlink this frame from the handler chain. When reading the
4359  // address of the next handler, there is no need to use the address
4360  // displacement since the current stack pointer (sp) points directly
4361  // to the stack handler.
4362  __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
4363  __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
4364  __ str(r3, MemOperand(ip));
4365  // No need to restore registers
4366  __ add(sp, sp, Operand(StackHandlerConstants::kSize));
4367
4368#ifdef ENABLE_LOGGING_AND_PROFILING
4369  // If current FP value is the same as js_entry_sp value, it means that
4370  // the current function is the outermost.
4371  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
4372  __ ldr(r6, MemOperand(r5));
4373  __ cmp(fp, Operand(r6));
4374  __ mov(r6, Operand(0, RelocInfo::NONE), LeaveCC, eq);
4375  __ str(r6, MemOperand(r5), eq);
4376#endif
4377
4378  __ bind(&exit);  // r0 holds result
4379  // Restore the top frame descriptors from the stack.
4380  __ pop(r3);
4381  __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
4382  __ str(r3, MemOperand(ip));
4383
4384  // Reset the stack to the callee saved registers.
4385  __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
4386
4387  // Restore callee-saved registers and return.
4388#ifdef DEBUG
4389  if (FLAG_debug_code) {
4390    __ mov(lr, Operand(pc));
4391  }
4392#endif
4393  __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
4394}
4395
4396
4397// Uses registers r0 to r4.
4398// Expected input (depending on whether args are in registers or on the stack):
4399// * object: r0 or at sp + 1 * kPointerSize.
4400// * function: r1 or at sp.
4401//
4402// An inlined call site may have been generated before calling this stub.
4403// In this case the offset to the inline site to patch is passed on the stack,
4404// in the safepoint slot for register r4.
4405// (See LCodeGen::DoInstanceOfKnownGlobal)
4406void InstanceofStub::Generate(MacroAssembler* masm) {
4407  // Call site inlining and patching implies arguments in registers.
4408  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4409  // ReturnTrueFalse is only implemented for inlined call sites.
4410  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
4411
4412  // Fixed register usage throughout the stub:
4413  const Register object = r0;  // Object (lhs).
4414  Register map = r3;  // Map of the object.
4415  const Register function = r1;  // Function (rhs).
4416  const Register prototype = r4;  // Prototype of the function.
4417  const Register inline_site = r9;
4418  const Register scratch = r2;
4419
4420  const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize;
4421
4422  Label slow, loop, is_instance, is_not_instance, not_js_object;
4423
4424  if (!HasArgsInRegisters()) {
4425    __ ldr(object, MemOperand(sp, 1 * kPointerSize));
4426    __ ldr(function, MemOperand(sp, 0));
4427  }
4428
4429  // Check that the left hand is a JS object and load map.
4430  __ JumpIfSmi(object, &not_js_object);
4431  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
4432
4433  // If there is a call site cache don't look in the global cache, but do the
4434  // real lookup and update the call site cache.
4435  if (!HasCallSiteInlineCheck()) {
4436    Label miss;
4437    __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
4438    __ cmp(function, ip);
4439    __ b(ne, &miss);
4440    __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
4441    __ cmp(map, ip);
4442    __ b(ne, &miss);
4443    __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
4444    __ Ret(HasArgsInRegisters() ? 0 : 2);
4445
4446    __ bind(&miss);
4447  }
4448
4449  // Get the prototype of the function.
4450  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
4451
4452  // Check that the function prototype is a JS object.
4453  __ JumpIfSmi(prototype, &slow);
4454  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4455
4456  // Update the global instanceof or call site inlined cache with the current
4457  // map and function. The cached answer will be set when it is known below.
4458  if (!HasCallSiteInlineCheck()) {
4459    __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4460    __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
4461  } else {
4462    ASSERT(HasArgsInRegisters());
4463    // Patch the (relocated) inlined map check.
4464
4465    // The offset was stored in r4 safepoint slot.
4466    // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
4467    __ LoadFromSafepointRegisterSlot(scratch, r4);
4468    __ sub(inline_site, lr, scratch);
4469    // Get the map location in scratch and patch it.
4470    __ GetRelocatedValueLocation(inline_site, scratch);
4471    __ str(map, MemOperand(scratch));
4472  }
4473
4474  // Register mapping: r3 is object map and r4 is function prototype.
4475  // Get prototype of object into r2.
4476  __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
4477
4478  // We don't need map any more. Use it as a scratch register.
4479  Register scratch2 = map;
4480  map = no_reg;
4481
4482  // Loop through the prototype chain looking for the function prototype.
4483  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
4484  __ bind(&loop);
4485  __ cmp(scratch, Operand(prototype));
4486  __ b(eq, &is_instance);
4487  __ cmp(scratch, scratch2);
4488  __ b(eq, &is_not_instance);
4489  __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
4490  __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
4491  __ jmp(&loop);
4492
4493  __ bind(&is_instance);
4494  if (!HasCallSiteInlineCheck()) {
4495    __ mov(r0, Operand(Smi::FromInt(0)));
4496    __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
4497  } else {
4498    // Patch the call site to return true.
4499    __ LoadRoot(r0, Heap::kTrueValueRootIndex);
4500    __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4501    // Get the boolean result location in scratch and patch it.
4502    __ GetRelocatedValueLocation(inline_site, scratch);
4503    __ str(r0, MemOperand(scratch));
4504
4505    if (!ReturnTrueFalseObject()) {
4506      __ mov(r0, Operand(Smi::FromInt(0)));
4507    }
4508  }
4509  __ Ret(HasArgsInRegisters() ? 0 : 2);
4510
4511  __ bind(&is_not_instance);
4512  if (!HasCallSiteInlineCheck()) {
4513    __ mov(r0, Operand(Smi::FromInt(1)));
4514    __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
4515  } else {
4516    // Patch the call site to return false.
4517    __ LoadRoot(r0, Heap::kFalseValueRootIndex);
4518    __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4519    // Get the boolean result location in scratch and patch it.
4520    __ GetRelocatedValueLocation(inline_site, scratch);
4521    __ str(r0, MemOperand(scratch));
4522
4523    if (!ReturnTrueFalseObject()) {
4524      __ mov(r0, Operand(Smi::FromInt(1)));
4525    }
4526  }
4527  __ Ret(HasArgsInRegisters() ? 0 : 2);
4528
4529  Label object_not_null, object_not_null_or_smi;
4530  __ bind(&not_js_object);
4531  // Before null, smi and string value checks, check that the rhs is a function
4532  // as for a non-function rhs an exception needs to be thrown.
4533  __ JumpIfSmi(function, &slow);
4534  __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
4535  __ b(ne, &slow);
4536
4537  // Null is not instance of anything.
4538  __ cmp(scratch, Operand(Factory::null_value()));
4539  __ b(ne, &object_not_null);
4540  __ mov(r0, Operand(Smi::FromInt(1)));
4541  __ Ret(HasArgsInRegisters() ? 0 : 2);
4542
4543  __ bind(&object_not_null);
4544  // Smi values are not instances of anything.
4545  __ JumpIfNotSmi(object, &object_not_null_or_smi);
4546  __ mov(r0, Operand(Smi::FromInt(1)));
4547  __ Ret(HasArgsInRegisters() ? 0 : 2);
4548
4549  __ bind(&object_not_null_or_smi);
4550  // String values are not instances of anything.
4551  __ IsObjectJSStringType(object, scratch, &slow);
4552  __ mov(r0, Operand(Smi::FromInt(1)));
4553  __ Ret(HasArgsInRegisters() ? 0 : 2);
4554
4555  // Slow-case.  Tail call builtin.
4556  __ bind(&slow);
4557  if (!ReturnTrueFalseObject()) {
4558    if (HasArgsInRegisters()) {
4559      __ Push(r0, r1);
4560    }
4561  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
4562  } else {
4563    __ EnterInternalFrame();
4564    __ Push(r0, r1);
4565    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS);
4566    __ LeaveInternalFrame();
4567    __ cmp(r0, Operand(0));
4568    __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
4569    __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
4570    __ Ret(HasArgsInRegisters() ? 0 : 2);
4571  }
4572}
4573
4574
4575Register InstanceofStub::left() { return r0; }
4576
4577
4578Register InstanceofStub::right() { return r1; }
4579
4580
4581void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
4582  // The displacement is the offset of the last parameter (if any)
4583  // relative to the frame pointer.
4584  static const int kDisplacement =
4585      StandardFrameConstants::kCallerSPOffset - kPointerSize;
4586
4587  // Check that the key is a smi.
4588  Label slow;
4589  __ JumpIfNotSmi(r1, &slow);
4590
4591  // Check if the calling frame is an arguments adaptor frame.
4592  Label adaptor;
4593  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4594  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
4595  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4596  __ b(eq, &adaptor);
4597
4598  // Check index against formal parameters count limit passed in
4599  // through register r0. Use unsigned comparison to get negative
4600  // check for free.
4601  __ cmp(r1, r0);
4602  __ b(hs, &slow);
4603
4604  // Read the argument from the stack and return it.
4605  __ sub(r3, r0, r1);
4606  __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
4607  __ ldr(r0, MemOperand(r3, kDisplacement));
4608  __ Jump(lr);
4609
4610  // Arguments adaptor case: Check index against actual arguments
4611  // limit found in the arguments adaptor frame. Use unsigned
4612  // comparison to get negative check for free.
4613  __ bind(&adaptor);
4614  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4615  __ cmp(r1, r0);
4616  __ b(cs, &slow);
4617
4618  // Read the argument from the adaptor frame and return it.
4619  __ sub(r3, r0, r1);
4620  __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
4621  __ ldr(r0, MemOperand(r3, kDisplacement));
4622  __ Jump(lr);
4623
4624  // Slow-case: Handle non-smi or out-of-bounds access to arguments
4625  // by calling the runtime system.
4626  __ bind(&slow);
4627  __ push(r1);
4628  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
4629}
4630
4631
4632void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
4633  // sp[0] : number of parameters
4634  // sp[4] : receiver displacement
4635  // sp[8] : function
4636
4637  // Check if the calling frame is an arguments adaptor frame.
4638  Label adaptor_frame, try_allocate, runtime;
4639  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4640  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
4641  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4642  __ b(eq, &adaptor_frame);
4643
4644  // Get the length from the frame.
4645  __ ldr(r1, MemOperand(sp, 0));
4646  __ b(&try_allocate);
4647
4648  // Patch the arguments.length and the parameters pointer.
4649  __ bind(&adaptor_frame);
4650  __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4651  __ str(r1, MemOperand(sp, 0));
4652  __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
4653  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
4654  __ str(r3, MemOperand(sp, 1 * kPointerSize));
4655
4656  // Try the new space allocation. Start out with computing the size
4657  // of the arguments object and the elements array in words.
4658  Label add_arguments_object;
4659  __ bind(&try_allocate);
4660  __ cmp(r1, Operand(0, RelocInfo::NONE));
4661  __ b(eq, &add_arguments_object);
4662  __ mov(r1, Operand(r1, LSR, kSmiTagSize));
4663  __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
4664  __ bind(&add_arguments_object);
4665  __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize));
4666
4667  // Do the allocation of both objects in one go.
4668  __ AllocateInNewSpace(
4669      r1,
4670      r0,
4671      r2,
4672      r3,
4673      &runtime,
4674      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
4675
4676  // Get the arguments boilerplate from the current (global) context.
4677  int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
4678  __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4679  __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
4680  __ ldr(r4, MemOperand(r4, offset));
4681
4682  // Copy the JS object part.
4683  __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
4684
4685  // Setup the callee in-object property.
4686  STATIC_ASSERT(Heap::arguments_callee_index == 0);
4687  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
4688  __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
4689
4690  // Get the length (smi tagged) and set that as an in-object property too.
4691  STATIC_ASSERT(Heap::arguments_length_index == 1);
4692  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
4693  __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
4694
4695  // If there are no actual arguments, we're done.
4696  Label done;
4697  __ cmp(r1, Operand(0, RelocInfo::NONE));
4698  __ b(eq, &done);
4699
4700  // Get the parameters pointer from the stack.
4701  __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
4702
4703  // Setup the elements pointer in the allocated arguments object and
4704  // initialize the header in the elements fixed array.
4705  __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
4706  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
4707  __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
4708  __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
4709  __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
4710  __ mov(r1, Operand(r1, LSR, kSmiTagSize));  // Untag the length for the loop.
4711
4712  // Copy the fixed array slots.
4713  Label loop;
4714  // Setup r4 to point to the first array slot.
4715  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4716  __ bind(&loop);
4717  // Pre-decrement r2 with kPointerSize on each iteration.
4718  // Pre-decrement in order to skip receiver.
4719  __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
4720  // Post-increment r4 with kPointerSize on each iteration.
4721  __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
4722  __ sub(r1, r1, Operand(1));
4723  __ cmp(r1, Operand(0, RelocInfo::NONE));
4724  __ b(ne, &loop);
4725
4726  // Return and remove the on-stack parameters.
4727  __ bind(&done);
4728  __ add(sp, sp, Operand(3 * kPointerSize));
4729  __ Ret();
4730
4731  // Do the runtime call to allocate the arguments object.
4732  __ bind(&runtime);
4733  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4734}
4735
4736
4737void RegExpExecStub::Generate(MacroAssembler* masm) {
4738  // Just jump directly to runtime if native RegExp is not selected at compile
4739  // time or if regexp entry in generated code is turned off runtime switch or
4740  // at compilation.
4741#ifdef V8_INTERPRETED_REGEXP
4742  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4743#else  // V8_INTERPRETED_REGEXP
4744  if (!FLAG_regexp_entry_native) {
4745    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4746    return;
4747  }
4748
4749  // Stack frame on entry.
4750  //  sp[0]: last_match_info (expected JSArray)
4751  //  sp[4]: previous index
4752  //  sp[8]: subject string
4753  //  sp[12]: JSRegExp object
4754
4755  static const int kLastMatchInfoOffset = 0 * kPointerSize;
4756  static const int kPreviousIndexOffset = 1 * kPointerSize;
4757  static const int kSubjectOffset = 2 * kPointerSize;
4758  static const int kJSRegExpOffset = 3 * kPointerSize;
4759
4760  Label runtime, invoke_regexp;
4761
4762  // Allocation of registers for this function. These are in callee save
4763  // registers and will be preserved by the call to the native RegExp code, as
4764  // this code is called using the normal C calling convention. When calling
4765  // directly from generated code the native RegExp code will not do a GC and
4766  // therefore the content of these registers are safe to use after the call.
4767  Register subject = r4;
4768  Register regexp_data = r5;
4769  Register last_match_info_elements = r6;
4770
4771  // Ensure that a RegExp stack is allocated.
4772  ExternalReference address_of_regexp_stack_memory_address =
4773      ExternalReference::address_of_regexp_stack_memory_address();
4774  ExternalReference address_of_regexp_stack_memory_size =
4775      ExternalReference::address_of_regexp_stack_memory_size();
4776  __ mov(r0, Operand(address_of_regexp_stack_memory_size));
4777  __ ldr(r0, MemOperand(r0, 0));
4778  __ tst(r0, Operand(r0));
4779  __ b(eq, &runtime);
4780
4781  // Check that the first argument is a JSRegExp object.
4782  __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
4783  STATIC_ASSERT(kSmiTag == 0);
4784  __ tst(r0, Operand(kSmiTagMask));
4785  __ b(eq, &runtime);
4786  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
4787  __ b(ne, &runtime);
4788
4789  // Check that the RegExp has been compiled (data contains a fixed array).
4790  __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
4791  if (FLAG_debug_code) {
4792    __ tst(regexp_data, Operand(kSmiTagMask));
4793    __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
4794    __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
4795    __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
4796  }
4797
4798  // regexp_data: RegExp data (FixedArray)
4799  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4800  __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4801  __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4802  __ b(ne, &runtime);
4803
4804  // regexp_data: RegExp data (FixedArray)
4805  // Check that the number of captures fit in the static offsets vector buffer.
4806  __ ldr(r2,
4807         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4808  // Calculate number of capture registers (number_of_captures + 1) * 2. This
4809  // uses the asumption that smis are 2 * their untagged value.
4810  STATIC_ASSERT(kSmiTag == 0);
4811  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4812  __ add(r2, r2, Operand(2));  // r2 was a smi.
4813  // Check that the static offsets vector buffer is large enough.
4814  __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
4815  __ b(hi, &runtime);
4816
4817  // r2: Number of capture registers
4818  // regexp_data: RegExp data (FixedArray)
4819  // Check that the second argument is a string.
4820  __ ldr(subject, MemOperand(sp, kSubjectOffset));
4821  __ tst(subject, Operand(kSmiTagMask));
4822  __ b(eq, &runtime);
4823  Condition is_string = masm->IsObjectStringType(subject, r0);
4824  __ b(NegateCondition(is_string), &runtime);
4825  // Get the length of the string to r3.
4826  __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
4827
4828  // r2: Number of capture registers
4829  // r3: Length of subject string as a smi
4830  // subject: Subject string
4831  // regexp_data: RegExp data (FixedArray)
4832  // Check that the third argument is a positive smi less than the subject
4833  // string length. A negative value will be greater (unsigned comparison).
4834  __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
4835  __ tst(r0, Operand(kSmiTagMask));
4836  __ b(ne, &runtime);
4837  __ cmp(r3, Operand(r0));
4838  __ b(ls, &runtime);
4839
4840  // r2: Number of capture registers
4841  // subject: Subject string
4842  // regexp_data: RegExp data (FixedArray)
4843  // Check that the fourth object is a JSArray object.
4844  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
4845  __ tst(r0, Operand(kSmiTagMask));
4846  __ b(eq, &runtime);
4847  __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
4848  __ b(ne, &runtime);
4849  // Check that the JSArray is in fast case.
4850  __ ldr(last_match_info_elements,
4851         FieldMemOperand(r0, JSArray::kElementsOffset));
4852  __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4853  __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
4854  __ cmp(r0, ip);
4855  __ b(ne, &runtime);
4856  // Check that the last match info has space for the capture registers and the
4857  // additional information.
4858  __ ldr(r0,
4859         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4860  __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
4861  __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
4862  __ b(gt, &runtime);
4863
4864  // subject: Subject string
4865  // regexp_data: RegExp data (FixedArray)
4866  // Check the representation and encoding of the subject string.
4867  Label seq_string;
4868  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
4869  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
4870  // First check for flat string.
4871  __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
4872  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
4873  __ b(eq, &seq_string);
4874
4875  // subject: Subject string
4876  // regexp_data: RegExp data (FixedArray)
4877  // Check for flat cons string.
4878  // A flat cons string is a cons string where the second part is the empty
4879  // string. In that case the subject string is just the first part of the cons
4880  // string. Also in this case the first part of the cons string is known to be
4881  // a sequential string or an external string.
4882  STATIC_ASSERT(kExternalStringTag !=0);
4883  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
4884  __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
4885  __ b(ne, &runtime);
4886  __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
4887  __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
4888  __ cmp(r0, r1);
4889  __ b(ne, &runtime);
4890  __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
4891  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
4892  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
4893  // Is first part a flat string?
4894  STATIC_ASSERT(kSeqStringTag == 0);
4895  __ tst(r0, Operand(kStringRepresentationMask));
4896  __ b(ne, &runtime);
4897
4898  __ bind(&seq_string);
4899  // subject: Subject string
4900  // regexp_data: RegExp data (FixedArray)
4901  // r0: Instance type of subject string
4902  STATIC_ASSERT(4 == kAsciiStringTag);
4903  STATIC_ASSERT(kTwoByteStringTag == 0);
4904  // Find the code object based on the assumptions above.
4905  __ and_(r0, r0, Operand(kStringEncodingMask));
4906  __ mov(r3, Operand(r0, ASR, 2), SetCC);
4907  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
4908  __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
4909
4910  // Check that the irregexp code has been generated for the actual string
4911  // encoding. If it has, the field contains a code object otherwise it contains
4912  // the hole.
4913  __ CompareObjectType(r7, r0, r0, CODE_TYPE);
4914  __ b(ne, &runtime);
4915
4916  // r3: encoding of subject string (1 if ascii, 0 if two_byte);
4917  // r7: code
4918  // subject: Subject string
4919  // regexp_data: RegExp data (FixedArray)
4920  // Load used arguments before starting to push arguments for call to native
4921  // RegExp code to avoid handling changing stack height.
4922  __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
4923  __ mov(r1, Operand(r1, ASR, kSmiTagSize));
4924
4925  // r1: previous index
4926  // r3: encoding of subject string (1 if ascii, 0 if two_byte);
4927  // r7: code
4928  // subject: Subject string
4929  // regexp_data: RegExp data (FixedArray)
4930  // All checks done. Now push arguments for native regexp code.
4931  __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2);
4932
4933  static const int kRegExpExecuteArguments = 7;
4934  static const int kParameterRegisters = 4;
4935  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
4936
4937  // Stack pointer now points to cell where return address is to be written.
4938  // Arguments are before that on the stack or in registers.
4939
4940  // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript.
4941  __ mov(r0, Operand(1));
4942  __ str(r0, MemOperand(sp, 3 * kPointerSize));
4943
4944  // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area.
4945  __ mov(r0, Operand(address_of_regexp_stack_memory_address));
4946  __ ldr(r0, MemOperand(r0, 0));
4947  __ mov(r2, Operand(address_of_regexp_stack_memory_size));
4948  __ ldr(r2, MemOperand(r2, 0));
4949  __ add(r0, r0, Operand(r2));
4950  __ str(r0, MemOperand(sp, 2 * kPointerSize));
4951
4952  // Argument 5 (sp[4]): static offsets vector buffer.
4953  __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector()));
4954  __ str(r0, MemOperand(sp, 1 * kPointerSize));
4955
4956  // For arguments 4 and 3 get string length, calculate start of string data and
4957  // calculate the shift of the index (0 for ASCII and 1 for two byte).
4958  __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
4959  __ mov(r0, Operand(r0, ASR, kSmiTagSize));
4960  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4961  __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
4962  __ eor(r3, r3, Operand(1));
4963  // Argument 4 (r3): End of string data
4964  // Argument 3 (r2): Start of string data
4965  __ add(r2, r9, Operand(r1, LSL, r3));
4966  __ add(r3, r9, Operand(r0, LSL, r3));
4967
4968  // Argument 2 (r1): Previous index.
4969  // Already there
4970
4971  // Argument 1 (r0): Subject string.
4972  __ mov(r0, subject);
4973
4974  // Locate the code entry and call it.
4975  __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
4976  DirectCEntryStub stub;
4977  stub.GenerateCall(masm, r7);
4978
4979  __ LeaveExitFrame(false, no_reg);
4980
4981  // r0: result
4982  // subject: subject string (callee saved)
4983  // regexp_data: RegExp data (callee saved)
4984  // last_match_info_elements: Last match info elements (callee saved)
4985
4986  // Check the result.
4987  Label success;
4988
4989  __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS));
4990  __ b(eq, &success);
4991  Label failure;
4992  __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
4993  __ b(eq, &failure);
4994  __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
4995  // If not exception it can only be retry. Handle that in the runtime system.
4996  __ b(ne, &runtime);
4997  // Result must now be exception. If there is no pending exception already a
4998  // stack overflow (on the backtrack stack) was detected in RegExp code but
4999  // haven't created the exception yet. Handle that in the runtime system.
5000  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
5001  __ mov(r1, Operand(ExternalReference::the_hole_value_location()));
5002  __ ldr(r1, MemOperand(r1, 0));
5003  __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
5004  __ ldr(r0, MemOperand(r2, 0));
5005  __ cmp(r0, r1);
5006  __ b(eq, &runtime);
5007
5008  __ str(r1, MemOperand(r2, 0));  // Clear pending exception.
5009
5010  // Check if the exception is a termination. If so, throw as uncatchable.
5011  __ LoadRoot(ip, Heap::kTerminationExceptionRootIndex);
5012  __ cmp(r0, ip);
5013  Label termination_exception;
5014  __ b(eq, &termination_exception);
5015
5016  __ Throw(r0);  // Expects thrown value in r0.
5017
5018  __ bind(&termination_exception);
5019  __ ThrowUncatchable(TERMINATION, r0);  // Expects thrown value in r0.
5020
5021  __ bind(&failure);
5022  // For failure and exception return null.
5023  __ mov(r0, Operand(Factory::null_value()));
5024  __ add(sp, sp, Operand(4 * kPointerSize));
5025  __ Ret();
5026
5027  // Process the result from the native regexp code.
5028  __ bind(&success);
5029  __ ldr(r1,
5030         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
5031  // Calculate number of capture registers (number_of_captures + 1) * 2.
5032  STATIC_ASSERT(kSmiTag == 0);
5033  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5034  __ add(r1, r1, Operand(2));  // r1 was a smi.
5035
5036  // r1: number of capture registers
5037  // r4: subject string
5038  // Store the capture count.
5039  __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize));  // To smi.
5040  __ str(r2, FieldMemOperand(last_match_info_elements,
5041                             RegExpImpl::kLastCaptureCountOffset));
5042  // Store last subject and last input.
5043  __ mov(r3, last_match_info_elements);  // Moved up to reduce latency.
5044  __ str(subject,
5045         FieldMemOperand(last_match_info_elements,
5046                         RegExpImpl::kLastSubjectOffset));
5047  __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7);
5048  __ str(subject,
5049         FieldMemOperand(last_match_info_elements,
5050                         RegExpImpl::kLastInputOffset));
5051  __ mov(r3, last_match_info_elements);
5052  __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7);
5053
5054  // Get the static offsets vector filled by the native regexp code.
5055  ExternalReference address_of_static_offsets_vector =
5056      ExternalReference::address_of_static_offsets_vector();
5057  __ mov(r2, Operand(address_of_static_offsets_vector));
5058
5059  // r1: number of capture registers
5060  // r2: offsets vector
5061  Label next_capture, done;
5062  // Capture register counter starts from number of capture registers and
5063  // counts down until wraping after zero.
5064  __ add(r0,
5065         last_match_info_elements,
5066         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
5067  __ bind(&next_capture);
5068  __ sub(r1, r1, Operand(1), SetCC);
5069  __ b(mi, &done);
5070  // Read the value from the static offsets vector buffer.
5071  __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
5072  // Store the smi value in the last match info.
5073  __ mov(r3, Operand(r3, LSL, kSmiTagSize));
5074  __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
5075  __ jmp(&next_capture);
5076  __ bind(&done);
5077
5078  // Return last match info.
5079  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
5080  __ add(sp, sp, Operand(4 * kPointerSize));
5081  __ Ret();
5082
5083  // Do the runtime call to execute the regexp.
5084  __ bind(&runtime);
5085  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
5086#endif  // V8_INTERPRETED_REGEXP
5087}
5088
5089
5090void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
5091  const int kMaxInlineLength = 100;
5092  Label slowcase;
5093  Label done;
5094  __ ldr(r1, MemOperand(sp, kPointerSize * 2));
5095  STATIC_ASSERT(kSmiTag == 0);
5096  STATIC_ASSERT(kSmiTagSize == 1);
5097  __ tst(r1, Operand(kSmiTagMask));
5098  __ b(ne, &slowcase);
5099  __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
5100  __ b(hi, &slowcase);
5101  // Smi-tagging is equivalent to multiplying by 2.
5102  // Allocate RegExpResult followed by FixedArray with size in ebx.
5103  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
5104  // Elements:  [Map][Length][..elements..]
5105  // Size of JSArray with two in-object properties and the header of a
5106  // FixedArray.
5107  int objects_size =
5108      (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
5109  __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize));
5110  __ add(r2, r5, Operand(objects_size));
5111  __ AllocateInNewSpace(
5112      r2,  // In: Size, in words.
5113      r0,  // Out: Start of allocation (tagged).
5114      r3,  // Scratch register.
5115      r4,  // Scratch register.
5116      &slowcase,
5117      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5118  // r0: Start of allocated area, object-tagged.
5119  // r1: Number of elements in array, as smi.
5120  // r5: Number of elements, untagged.
5121
5122  // Set JSArray map to global.regexp_result_map().
5123  // Set empty properties FixedArray.
5124  // Set elements to point to FixedArray allocated right after the JSArray.
5125  // Interleave operations for better latency.
5126  __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
5127  __ add(r3, r0, Operand(JSRegExpResult::kSize));
5128  __ mov(r4, Operand(Factory::empty_fixed_array()));
5129  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
5130  __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
5131  __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
5132  __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
5133  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
5134
5135  // Set input, index and length fields from arguments.
5136  __ ldr(r1, MemOperand(sp, kPointerSize * 0));
5137  __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
5138  __ ldr(r1, MemOperand(sp, kPointerSize * 1));
5139  __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
5140  __ ldr(r1, MemOperand(sp, kPointerSize * 2));
5141  __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
5142
5143  // Fill out the elements FixedArray.
5144  // r0: JSArray, tagged.
5145  // r3: FixedArray, tagged.
5146  // r5: Number of elements in array, untagged.
5147
5148  // Set map.
5149  __ mov(r2, Operand(Factory::fixed_array_map()));
5150  __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
5151  // Set FixedArray length.
5152  __ mov(r6, Operand(r5, LSL, kSmiTagSize));
5153  __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
5154  // Fill contents of fixed-array with the-hole.
5155  __ mov(r2, Operand(Factory::the_hole_value()));
5156  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5157  // Fill fixed array elements with hole.
5158  // r0: JSArray, tagged.
5159  // r2: the hole.
5160  // r3: Start of elements in FixedArray.
5161  // r5: Number of elements to fill.
5162  Label loop;
5163  __ tst(r5, Operand(r5));
5164  __ bind(&loop);
5165  __ b(le, &done);  // Jump if r1 is negative or zero.
5166  __ sub(r5, r5, Operand(1), SetCC);
5167  __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
5168  __ jmp(&loop);
5169
5170  __ bind(&done);
5171  __ add(sp, sp, Operand(3 * kPointerSize));
5172  __ Ret();
5173
5174  __ bind(&slowcase);
5175  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
5176}
5177
5178
5179void CallFunctionStub::Generate(MacroAssembler* masm) {
5180  Label slow;
5181
5182  // If the receiver might be a value (string, number or boolean) check for this
5183  // and box it if it is.
5184  if (ReceiverMightBeValue()) {
5185    // Get the receiver from the stack.
5186    // function, receiver [, arguments]
5187    Label receiver_is_value, receiver_is_js_object;
5188    __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
5189
5190    // Check if receiver is a smi (which is a number value).
5191    __ JumpIfSmi(r1, &receiver_is_value);
5192
5193    // Check if the receiver is a valid JS object.
5194    __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
5195    __ b(ge, &receiver_is_js_object);
5196
5197    // Call the runtime to box the value.
5198    __ bind(&receiver_is_value);
5199    __ EnterInternalFrame();
5200    __ push(r1);
5201    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
5202    __ LeaveInternalFrame();
5203    __ str(r0, MemOperand(sp, argc_ * kPointerSize));
5204
5205    __ bind(&receiver_is_js_object);
5206  }
5207
5208  // Get the function to call from the stack.
5209  // function, receiver [, arguments]
5210  __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
5211
5212  // Check that the function is really a JavaScript function.
5213  // r1: pushed function (to be verified)
5214  __ JumpIfSmi(r1, &slow);
5215  // Get the map of the function object.
5216  __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
5217  __ b(ne, &slow);
5218
5219  // Fast-case: Invoke the function now.
5220  // r1: pushed function
5221  ParameterCount actual(argc_);
5222  __ InvokeFunction(r1, actual, JUMP_FUNCTION);
5223
5224  // Slow-case: Non-function called.
5225  __ bind(&slow);
5226  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5227  // of the original receiver from the call site).
5228  __ str(r1, MemOperand(sp, argc_ * kPointerSize));
5229  __ mov(r0, Operand(argc_));  // Setup the number of arguments.
5230  __ mov(r2, Operand(0, RelocInfo::NONE));
5231  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
5232  __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
5233          RelocInfo::CODE_TARGET);
5234}
5235
5236
5237// Unfortunately you have to run without snapshots to see most of these
5238// names in the profile since most compare stubs end up in the snapshot.
5239const char* CompareStub::GetName() {
5240  ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
5241         (lhs_.is(r1) && rhs_.is(r0)));
5242
5243  if (name_ != NULL) return name_;
5244  const int kMaxNameLength = 100;
5245  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
5246  if (name_ == NULL) return "OOM";
5247
5248  const char* cc_name;
5249  switch (cc_) {
5250    case lt: cc_name = "LT"; break;
5251    case gt: cc_name = "GT"; break;
5252    case le: cc_name = "LE"; break;
5253    case ge: cc_name = "GE"; break;
5254    case eq: cc_name = "EQ"; break;
5255    case ne: cc_name = "NE"; break;
5256    default: cc_name = "UnknownCondition"; break;
5257  }
5258
5259  const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1";
5260  const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1";
5261
5262  const char* strict_name = "";
5263  if (strict_ && (cc_ == eq || cc_ == ne)) {
5264    strict_name = "_STRICT";
5265  }
5266
5267  const char* never_nan_nan_name = "";
5268  if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
5269    never_nan_nan_name = "_NO_NAN";
5270  }
5271
5272  const char* include_number_compare_name = "";
5273  if (!include_number_compare_) {
5274    include_number_compare_name = "_NO_NUMBER";
5275  }
5276
5277  const char* include_smi_compare_name = "";
5278  if (!include_smi_compare_) {
5279    include_smi_compare_name = "_NO_SMI";
5280  }
5281
5282  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
5283               "CompareStub_%s%s%s%s%s%s",
5284               cc_name,
5285               lhs_name,
5286               rhs_name,
5287               strict_name,
5288               never_nan_nan_name,
5289               include_number_compare_name,
5290               include_smi_compare_name);
5291  return name_;
5292}
5293
5294
5295int CompareStub::MinorKey() {
5296  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
5297  // stubs the never NaN NaN condition is only taken into account if the
5298  // condition is equals.
5299  ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
5300  ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
5301         (lhs_.is(r1) && rhs_.is(r0)));
5302  return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
5303         | RegisterField::encode(lhs_.is(r0))
5304         | StrictField::encode(strict_)
5305         | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
5306         | IncludeNumberCompareField::encode(include_number_compare_)
5307         | IncludeSmiCompareField::encode(include_smi_compare_);
5308}
5309
5310
5311// StringCharCodeAtGenerator
5312void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5313  Label flat_string;
5314  Label ascii_string;
5315  Label got_char_code;
5316
5317  // If the receiver is a smi trigger the non-string case.
5318  __ JumpIfSmi(object_, receiver_not_string_);
5319
5320  // Fetch the instance type of the receiver into result register.
5321  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5322  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5323  // If the receiver is not a string trigger the non-string case.
5324  __ tst(result_, Operand(kIsNotStringMask));
5325  __ b(ne, receiver_not_string_);
5326
5327  // If the index is non-smi trigger the non-smi case.
5328  __ JumpIfNotSmi(index_, &index_not_smi_);
5329
5330  // Put smi-tagged index into scratch register.
5331  __ mov(scratch_, index_);
5332  __ bind(&got_smi_index_);
5333
5334  // Check for index out of range.
5335  __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
5336  __ cmp(ip, Operand(scratch_));
5337  __ b(ls, index_out_of_range_);
5338
5339  // We need special handling for non-flat strings.
5340  STATIC_ASSERT(kSeqStringTag == 0);
5341  __ tst(result_, Operand(kStringRepresentationMask));
5342  __ b(eq, &flat_string);
5343
5344  // Handle non-flat strings.
5345  __ tst(result_, Operand(kIsConsStringMask));
5346  __ b(eq, &call_runtime_);
5347
5348  // ConsString.
5349  // Check whether the right hand side is the empty string (i.e. if
5350  // this is really a flat string in a cons string). If that is not
5351  // the case we would rather go to the runtime system now to flatten
5352  // the string.
5353  __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
5354  __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
5355  __ cmp(result_, Operand(ip));
5356  __ b(ne, &call_runtime_);
5357  // Get the first of the two strings and load its instance type.
5358  __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
5359  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5360  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5361  // If the first cons component is also non-flat, then go to runtime.
5362  STATIC_ASSERT(kSeqStringTag == 0);
5363  __ tst(result_, Operand(kStringRepresentationMask));
5364  __ b(ne, &call_runtime_);
5365
5366  // Check for 1-byte or 2-byte string.
5367  __ bind(&flat_string);
5368  STATIC_ASSERT(kAsciiStringTag != 0);
5369  __ tst(result_, Operand(kStringEncodingMask));
5370  __ b(ne, &ascii_string);
5371
5372  // 2-byte string.
5373  // Load the 2-byte character code into the result register. We can
5374  // add without shifting since the smi tag size is the log2 of the
5375  // number of bytes in a two-byte character.
5376  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
5377  __ add(scratch_, object_, Operand(scratch_));
5378  __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
5379  __ jmp(&got_char_code);
5380
5381  // ASCII string.
5382  // Load the byte into the result register.
5383  __ bind(&ascii_string);
5384  __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
5385  __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
5386
5387  __ bind(&got_char_code);
5388  __ mov(result_, Operand(result_, LSL, kSmiTagSize));
5389  __ bind(&exit_);
5390}
5391
5392
5393void StringCharCodeAtGenerator::GenerateSlow(
5394    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
5395  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5396
5397  // Index is not a smi.
5398  __ bind(&index_not_smi_);
5399  // If index is a heap number, try converting it to an integer.
5400  __ CheckMap(index_,
5401              scratch_,
5402              Heap::kHeapNumberMapRootIndex,
5403              index_not_number_,
5404              true);
5405  call_helper.BeforeCall(masm);
5406  __ Push(object_, index_);
5407  __ push(index_);  // Consumed by runtime conversion function.
5408  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5409    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5410  } else {
5411    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5412    // NumberToSmi discards numbers that are not exact integers.
5413    __ CallRuntime(Runtime::kNumberToSmi, 1);
5414  }
5415  // Save the conversion result before the pop instructions below
5416  // have a chance to overwrite it.
5417  __ Move(scratch_, r0);
5418  __ pop(index_);
5419  __ pop(object_);
5420  // Reload the instance type.
5421  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5422  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5423  call_helper.AfterCall(masm);
5424  // If index is still not a smi, it must be out of range.
5425  __ JumpIfNotSmi(scratch_, index_out_of_range_);
5426  // Otherwise, return to the fast path.
5427  __ jmp(&got_smi_index_);
5428
5429  // Call runtime. We get here when the receiver is a string and the
5430  // index is a number, but the code of getting the actual character
5431  // is too complex (e.g., when the string needs to be flattened).
5432  __ bind(&call_runtime_);
5433  call_helper.BeforeCall(masm);
5434  __ Push(object_, index_);
5435  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5436  __ Move(result_, r0);
5437  call_helper.AfterCall(masm);
5438  __ jmp(&exit_);
5439
5440  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5441}
5442
5443
5444// -------------------------------------------------------------------------
5445// StringCharFromCodeGenerator
5446
5447void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5448  // Fast case of Heap::LookupSingleCharacterStringFromCode.
5449  STATIC_ASSERT(kSmiTag == 0);
5450  STATIC_ASSERT(kSmiShiftSize == 0);
5451  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5452  __ tst(code_,
5453         Operand(kSmiTagMask |
5454                 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5455  __ b(ne, &slow_case_);
5456
5457  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5458  // At this point code register contains smi tagged ascii char code.
5459  STATIC_ASSERT(kSmiTag == 0);
5460  __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
5461  __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
5462  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5463  __ cmp(result_, Operand(ip));
5464  __ b(eq, &slow_case_);
5465  __ bind(&exit_);
5466}
5467
5468
5469void StringCharFromCodeGenerator::GenerateSlow(
5470    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
5471  __ Abort("Unexpected fallthrough to CharFromCode slow case");
5472
5473  __ bind(&slow_case_);
5474  call_helper.BeforeCall(masm);
5475  __ push(code_);
5476  __ CallRuntime(Runtime::kCharFromCode, 1);
5477  __ Move(result_, r0);
5478  call_helper.AfterCall(masm);
5479  __ jmp(&exit_);
5480
5481  __ Abort("Unexpected fallthrough from CharFromCode slow case");
5482}
5483
5484
5485// -------------------------------------------------------------------------
5486// StringCharAtGenerator
5487
5488void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5489  char_code_at_generator_.GenerateFast(masm);
5490  char_from_code_generator_.GenerateFast(masm);
5491}
5492
5493
5494void StringCharAtGenerator::GenerateSlow(
5495    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
5496  char_code_at_generator_.GenerateSlow(masm, call_helper);
5497  char_from_code_generator_.GenerateSlow(masm, call_helper);
5498}
5499
5500
5501class StringHelper : public AllStatic {
5502 public:
5503  // Generate code for copying characters using a simple loop. This should only
5504  // be used in places where the number of characters is small and the
5505  // additional setup and checking in GenerateCopyCharactersLong adds too much
5506  // overhead. Copying of overlapping regions is not supported.
5507  // Dest register ends at the position after the last character written.
5508  static void GenerateCopyCharacters(MacroAssembler* masm,
5509                                     Register dest,
5510                                     Register src,
5511                                     Register count,
5512                                     Register scratch,
5513                                     bool ascii);
5514
5515  // Generate code for copying a large number of characters. This function
5516  // is allowed to spend extra time setting up conditions to make copying
5517  // faster. Copying of overlapping regions is not supported.
5518  // Dest register ends at the position after the last character written.
5519  static void GenerateCopyCharactersLong(MacroAssembler* masm,
5520                                         Register dest,
5521                                         Register src,
5522                                         Register count,
5523                                         Register scratch1,
5524                                         Register scratch2,
5525                                         Register scratch3,
5526                                         Register scratch4,
5527                                         Register scratch5,
5528                                         int flags);
5529
5530
5531  // Probe the symbol table for a two character string. If the string is
5532  // not found by probing a jump to the label not_found is performed. This jump
5533  // does not guarantee that the string is not in the symbol table. If the
5534  // string is found the code falls through with the string in register r0.
5535  // Contents of both c1 and c2 registers are modified. At the exit c1 is
5536  // guaranteed to contain halfword with low and high bytes equal to
5537  // initial contents of c1 and c2 respectively.
5538  static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5539                                                   Register c1,
5540                                                   Register c2,
5541                                                   Register scratch1,
5542                                                   Register scratch2,
5543                                                   Register scratch3,
5544                                                   Register scratch4,
5545                                                   Register scratch5,
5546                                                   Label* not_found);
5547
5548  // Generate string hash.
5549  static void GenerateHashInit(MacroAssembler* masm,
5550                               Register hash,
5551                               Register character);
5552
5553  static void GenerateHashAddCharacter(MacroAssembler* masm,
5554                                       Register hash,
5555                                       Register character);
5556
5557  static void GenerateHashGetHash(MacroAssembler* masm,
5558                                  Register hash);
5559
5560 private:
5561  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
5562};
5563
5564
5565void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5566                                          Register dest,
5567                                          Register src,
5568                                          Register count,
5569                                          Register scratch,
5570                                          bool ascii) {
5571  Label loop;
5572  Label done;
5573  // This loop just copies one character at a time, as it is only used for very
5574  // short strings.
5575  if (!ascii) {
5576    __ add(count, count, Operand(count), SetCC);
5577  } else {
5578    __ cmp(count, Operand(0, RelocInfo::NONE));
5579  }
5580  __ b(eq, &done);
5581
5582  __ bind(&loop);
5583  __ ldrb(scratch, MemOperand(src, 1, PostIndex));
5584  // Perform sub between load and dependent store to get the load time to
5585  // complete.
5586  __ sub(count, count, Operand(1), SetCC);
5587  __ strb(scratch, MemOperand(dest, 1, PostIndex));
5588  // last iteration.
5589  __ b(gt, &loop);
5590
5591  __ bind(&done);
5592}
5593
5594
5595enum CopyCharactersFlags {
5596  COPY_ASCII = 1,
5597  DEST_ALWAYS_ALIGNED = 2
5598};
5599
5600
5601void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5602                                              Register dest,
5603                                              Register src,
5604                                              Register count,
5605                                              Register scratch1,
5606                                              Register scratch2,
5607                                              Register scratch3,
5608                                              Register scratch4,
5609                                              Register scratch5,
5610                                              int flags) {
5611  bool ascii = (flags & COPY_ASCII) != 0;
5612  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5613
5614  if (dest_always_aligned && FLAG_debug_code) {
5615    // Check that destination is actually word aligned if the flag says
5616    // that it is.
5617    __ tst(dest, Operand(kPointerAlignmentMask));
5618    __ Check(eq, "Destination of copy not aligned.");
5619  }
5620
5621  const int kReadAlignment = 4;
5622  const int kReadAlignmentMask = kReadAlignment - 1;
5623  // Ensure that reading an entire aligned word containing the last character
5624  // of a string will not read outside the allocated area (because we pad up
5625  // to kObjectAlignment).
5626  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5627  // Assumes word reads and writes are little endian.
5628  // Nothing to do for zero characters.
5629  Label done;
5630  if (!ascii) {
5631    __ add(count, count, Operand(count), SetCC);
5632  } else {
5633    __ cmp(count, Operand(0, RelocInfo::NONE));
5634  }
5635  __ b(eq, &done);
5636
5637  // Assume that you cannot read (or write) unaligned.
5638  Label byte_loop;
5639  // Must copy at least eight bytes, otherwise just do it one byte at a time.
5640  __ cmp(count, Operand(8));
5641  __ add(count, dest, Operand(count));
5642  Register limit = count;  // Read until src equals this.
5643  __ b(lt, &byte_loop);
5644
5645  if (!dest_always_aligned) {
5646    // Align dest by byte copying. Copies between zero and three bytes.
5647    __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
5648    Label dest_aligned;
5649    __ b(eq, &dest_aligned);
5650    __ cmp(scratch4, Operand(2));
5651    __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
5652    __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
5653    __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
5654    __ strb(scratch1, MemOperand(dest, 1, PostIndex));
5655    __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
5656    __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
5657    __ bind(&dest_aligned);
5658  }
5659
5660  Label simple_loop;
5661
5662  __ sub(scratch4, dest, Operand(src));
5663  __ and_(scratch4, scratch4, Operand(0x03), SetCC);
5664  __ b(eq, &simple_loop);
5665  // Shift register is number of bits in a source word that
5666  // must be combined with bits in the next source word in order
5667  // to create a destination word.
5668
5669  // Complex loop for src/dst that are not aligned the same way.
5670  {
5671    Label loop;
5672    __ mov(scratch4, Operand(scratch4, LSL, 3));
5673    Register left_shift = scratch4;
5674    __ and_(src, src, Operand(~3));  // Round down to load previous word.
5675    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
5676    // Store the "shift" most significant bits of scratch in the least
5677    // signficant bits (i.e., shift down by (32-shift)).
5678    __ rsb(scratch2, left_shift, Operand(32));
5679    Register right_shift = scratch2;
5680    __ mov(scratch1, Operand(scratch1, LSR, right_shift));
5681
5682    __ bind(&loop);
5683    __ ldr(scratch3, MemOperand(src, 4, PostIndex));
5684    __ sub(scratch5, limit, Operand(dest));
5685    __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
5686    __ str(scratch1, MemOperand(dest, 4, PostIndex));
5687    __ mov(scratch1, Operand(scratch3, LSR, right_shift));
5688    // Loop if four or more bytes left to copy.
5689    // Compare to eight, because we did the subtract before increasing dst.
5690    __ sub(scratch5, scratch5, Operand(8), SetCC);
5691    __ b(ge, &loop);
5692  }
5693  // There is now between zero and three bytes left to copy (negative that
5694  // number is in scratch5), and between one and three bytes already read into
5695  // scratch1 (eight times that number in scratch4). We may have read past
5696  // the end of the string, but because objects are aligned, we have not read
5697  // past the end of the object.
5698  // Find the minimum of remaining characters to move and preloaded characters
5699  // and write those as bytes.
5700  __ add(scratch5, scratch5, Operand(4), SetCC);
5701  __ b(eq, &done);
5702  __ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
5703  // Move minimum of bytes read and bytes left to copy to scratch4.
5704  __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt);
5705  // Between one and three (value in scratch5) characters already read into
5706  // scratch ready to write.
5707  __ cmp(scratch5, Operand(2));
5708  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
5709  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
5710  __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
5711  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
5712  __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
5713  // Copy any remaining bytes.
5714  __ b(&byte_loop);
5715
5716  // Simple loop.
5717  // Copy words from src to dst, until less than four bytes left.
5718  // Both src and dest are word aligned.
5719  __ bind(&simple_loop);
5720  {
5721    Label loop;
5722    __ bind(&loop);
5723    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
5724    __ sub(scratch3, limit, Operand(dest));
5725    __ str(scratch1, MemOperand(dest, 4, PostIndex));
5726    // Compare to 8, not 4, because we do the substraction before increasing
5727    // dest.
5728    __ cmp(scratch3, Operand(8));
5729    __ b(ge, &loop);
5730  }
5731
5732  // Copy bytes from src to dst until dst hits limit.
5733  __ bind(&byte_loop);
5734  __ cmp(dest, Operand(limit));
5735  __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
5736  __ b(ge, &done);
5737  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
5738  __ b(&byte_loop);
5739
5740  __ bind(&done);
5741}
5742
5743
5744void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5745                                                        Register c1,
5746                                                        Register c2,
5747                                                        Register scratch1,
5748                                                        Register scratch2,
5749                                                        Register scratch3,
5750                                                        Register scratch4,
5751                                                        Register scratch5,
5752                                                        Label* not_found) {
5753  // Register scratch3 is the general scratch register in this function.
5754  Register scratch = scratch3;
5755
5756  // Make sure that both characters are not digits as such strings has a
5757  // different hash algorithm. Don't try to look for these in the symbol table.
5758  Label not_array_index;
5759  __ sub(scratch, c1, Operand(static_cast<int>('0')));
5760  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
5761  __ b(hi, &not_array_index);
5762  __ sub(scratch, c2, Operand(static_cast<int>('0')));
5763  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
5764
5765  // If check failed combine both characters into single halfword.
5766  // This is required by the contract of the method: code at the
5767  // not_found branch expects this combination in c1 register
5768  __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
5769  __ b(ls, not_found);
5770
5771  __ bind(&not_array_index);
5772  // Calculate the two character string hash.
5773  Register hash = scratch1;
5774  StringHelper::GenerateHashInit(masm, hash, c1);
5775  StringHelper::GenerateHashAddCharacter(masm, hash, c2);
5776  StringHelper::GenerateHashGetHash(masm, hash);
5777
5778  // Collect the two characters in a register.
5779  Register chars = c1;
5780  __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
5781
5782  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5783  // hash:  hash of two character string.
5784
5785  // Load symbol table
5786  // Load address of first element of the symbol table.
5787  Register symbol_table = c2;
5788  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5789
5790  // Load undefined value
5791  Register undefined = scratch4;
5792  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5793
5794  // Calculate capacity mask from the symbol table capacity.
5795  Register mask = scratch2;
5796  __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5797  __ mov(mask, Operand(mask, ASR, 1));
5798  __ sub(mask, mask, Operand(1));
5799
5800  // Calculate untagged address of the first element of the symbol table.
5801  Register first_symbol_table_element = symbol_table;
5802  __ add(first_symbol_table_element, symbol_table,
5803         Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
5804
5805  // Registers
5806  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5807  // hash:  hash of two character string
5808  // mask:  capacity mask
5809  // first_symbol_table_element: address of the first element of
5810  //                             the symbol table
5811  // scratch: -
5812
5813  // Perform a number of probes in the symbol table.
5814  static const int kProbes = 4;
5815  Label found_in_symbol_table;
5816  Label next_probe[kProbes];
5817  for (int i = 0; i < kProbes; i++) {
5818    Register candidate = scratch5;  // Scratch register contains candidate.
5819
5820    // Calculate entry in symbol table.
5821    if (i > 0) {
5822      __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
5823    } else {
5824      __ mov(candidate, hash);
5825    }
5826
5827    __ and_(candidate, candidate, Operand(mask));
5828
5829    // Load the entry from the symble table.
5830    STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5831    __ ldr(candidate,
5832           MemOperand(first_symbol_table_element,
5833                      candidate,
5834                      LSL,
5835                      kPointerSizeLog2));
5836
5837    // If entry is undefined no string with this hash can be found.
5838    __ cmp(candidate, undefined);
5839    __ b(eq, not_found);
5840
5841    // If length is not 2 the string is not a candidate.
5842    __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5843    __ cmp(scratch, Operand(Smi::FromInt(2)));
5844    __ b(ne, &next_probe[i]);
5845
5846    // Check that the candidate is a non-external ascii string.
5847    __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset));
5848    __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5849    __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch,
5850                                              &next_probe[i]);
5851
5852    // Check if the two characters match.
5853    // Assumes that word load is little endian.
5854    __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
5855    __ cmp(chars, scratch);
5856    __ b(eq, &found_in_symbol_table);
5857    __ bind(&next_probe[i]);
5858  }
5859
5860  // No matching 2 character string found by probing.
5861  __ jmp(not_found);
5862
5863  // Scratch register contains result when we fall through to here.
5864  Register result = scratch;
5865  __ bind(&found_in_symbol_table);
5866  __ Move(r0, result);
5867}
5868
5869
5870void StringHelper::GenerateHashInit(MacroAssembler* masm,
5871                                    Register hash,
5872                                    Register character) {
5873  // hash = character + (character << 10);
5874  __ add(hash, character, Operand(character, LSL, 10));
5875  // hash ^= hash >> 6;
5876  __ eor(hash, hash, Operand(hash, ASR, 6));
5877}
5878
5879
5880void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5881                                            Register hash,
5882                                            Register character) {
5883  // hash += character;
5884  __ add(hash, hash, Operand(character));
5885  // hash += hash << 10;
5886  __ add(hash, hash, Operand(hash, LSL, 10));
5887  // hash ^= hash >> 6;
5888  __ eor(hash, hash, Operand(hash, ASR, 6));
5889}
5890
5891
5892void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5893                                       Register hash) {
5894  // hash += hash << 3;
5895  __ add(hash, hash, Operand(hash, LSL, 3));
5896  // hash ^= hash >> 11;
5897  __ eor(hash, hash, Operand(hash, ASR, 11));
5898  // hash += hash << 15;
5899  __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
5900
5901  // if (hash == 0) hash = 27;
5902  __ mov(hash, Operand(27), LeaveCC, ne);
5903}
5904
5905
5906void SubStringStub::Generate(MacroAssembler* masm) {
5907  Label runtime;
5908
5909  // Stack frame on entry.
5910  //  lr: return address
5911  //  sp[0]: to
5912  //  sp[4]: from
5913  //  sp[8]: string
5914
5915  // This stub is called from the native-call %_SubString(...), so
5916  // nothing can be assumed about the arguments. It is tested that:
5917  //  "string" is a sequential string,
5918  //  both "from" and "to" are smis, and
5919  //  0 <= from <= to <= string.length.
5920  // If any of these assumptions fail, we call the runtime system.
5921
5922  static const int kToOffset = 0 * kPointerSize;
5923  static const int kFromOffset = 1 * kPointerSize;
5924  static const int kStringOffset = 2 * kPointerSize;
5925
5926
5927  // Check bounds and smi-ness.
5928  Register to = r6;
5929  Register from = r7;
5930  __ Ldrd(to, from, MemOperand(sp, kToOffset));
5931  STATIC_ASSERT(kFromOffset == kToOffset + 4);
5932  STATIC_ASSERT(kSmiTag == 0);
5933  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5934  // I.e., arithmetic shift right by one un-smi-tags.
5935  __ mov(r2, Operand(to, ASR, 1), SetCC);
5936  __ mov(r3, Operand(from, ASR, 1), SetCC, cc);
5937  // If either to or from had the smi tag bit set, then carry is set now.
5938  __ b(cs, &runtime);  // Either "from" or "to" is not a smi.
5939  __ b(mi, &runtime);  // From is negative.
5940
5941  // Both to and from are smis.
5942
5943  __ sub(r2, r2, Operand(r3), SetCC);
5944  __ b(mi, &runtime);  // Fail if from > to.
5945  // Special handling of sub-strings of length 1 and 2. One character strings
5946  // are handled in the runtime system (looked up in the single character
5947  // cache). Two character strings are looked for in the symbol cache.
5948  __ cmp(r2, Operand(2));
5949  __ b(lt, &runtime);
5950
5951  // r2: length
5952  // r3: from index (untaged smi)
5953  // r6 (a.k.a. to): to (smi)
5954  // r7 (a.k.a. from): from offset (smi)
5955
5956  // Make sure first argument is a sequential (or flat) string.
5957  __ ldr(r5, MemOperand(sp, kStringOffset));
5958  STATIC_ASSERT(kSmiTag == 0);
5959  __ tst(r5, Operand(kSmiTagMask));
5960  __ b(eq, &runtime);
5961  Condition is_string = masm->IsObjectStringType(r5, r1);
5962  __ b(NegateCondition(is_string), &runtime);
5963
5964  // r1: instance type
5965  // r2: length
5966  // r3: from index (untagged smi)
5967  // r5: string
5968  // r6 (a.k.a. to): to (smi)
5969  // r7 (a.k.a. from): from offset (smi)
5970  Label seq_string;
5971  __ and_(r4, r1, Operand(kStringRepresentationMask));
5972  STATIC_ASSERT(kSeqStringTag < kConsStringTag);
5973  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
5974  __ cmp(r4, Operand(kConsStringTag));
5975  __ b(gt, &runtime);  // External strings go to runtime.
5976  __ b(lt, &seq_string);  // Sequential strings are handled directly.
5977
5978  // Cons string. Try to recurse (once) on the first substring.
5979  // (This adds a little more generality than necessary to handle flattened
5980  // cons strings, but not much).
5981  __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset));
5982  __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
5983  __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
5984  __ tst(r1, Operand(kStringRepresentationMask));
5985  STATIC_ASSERT(kSeqStringTag == 0);
5986  __ b(ne, &runtime);  // Cons and External strings go to runtime.
5987
5988  // Definitly a sequential string.
5989  __ bind(&seq_string);
5990
5991  // r1: instance type.
5992  // r2: length
5993  // r3: from index (untaged smi)
5994  // r5: string
5995  // r6 (a.k.a. to): to (smi)
5996  // r7 (a.k.a. from): from offset (smi)
5997  __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset));
5998  __ cmp(r4, Operand(to));
5999  __ b(lt, &runtime);  // Fail if to > length.
6000  to = no_reg;
6001
6002  // r1: instance type.
6003  // r2: result string length.
6004  // r3: from index (untaged smi)
6005  // r5: string.
6006  // r7 (a.k.a. from): from offset (smi)
6007  // Check for flat ascii string.
6008  Label non_ascii_flat;
6009  __ tst(r1, Operand(kStringEncodingMask));
6010  STATIC_ASSERT(kTwoByteStringTag == 0);
6011  __ b(eq, &non_ascii_flat);
6012
6013  Label result_longer_than_two;
6014  __ cmp(r2, Operand(2));
6015  __ b(gt, &result_longer_than_two);
6016
6017  // Sub string of length 2 requested.
6018  // Get the two characters forming the sub string.
6019  __ add(r5, r5, Operand(r3));
6020  __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize));
6021  __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1));
6022
6023  // Try to lookup two character string in symbol table.
6024  Label make_two_character_string;
6025  StringHelper::GenerateTwoCharacterSymbolTableProbe(
6026      masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string);
6027  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
6028  __ add(sp, sp, Operand(3 * kPointerSize));
6029  __ Ret();
6030
6031  // r2: result string length.
6032  // r3: two characters combined into halfword in little endian byte order.
6033  __ bind(&make_two_character_string);
6034  __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime);
6035  __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
6036  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
6037  __ add(sp, sp, Operand(3 * kPointerSize));
6038  __ Ret();
6039
6040  __ bind(&result_longer_than_two);
6041
6042  // Allocate the result.
6043  __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime);
6044
6045  // r0: result string.
6046  // r2: result string length.
6047  // r5: string.
6048  // r7 (a.k.a. from): from offset (smi)
6049  // Locate first character of result.
6050  __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6051  // Locate 'from' character of string.
6052  __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6053  __ add(r5, r5, Operand(from, ASR, 1));
6054
6055  // r0: result string.
6056  // r1: first character of result string.
6057  // r2: result string length.
6058  // r5: first character of sub string to copy.
6059  STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
6060  StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
6061                                           COPY_ASCII | DEST_ALWAYS_ALIGNED);
6062  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
6063  __ add(sp, sp, Operand(3 * kPointerSize));
6064  __ Ret();
6065
6066  __ bind(&non_ascii_flat);
6067  // r2: result string length.
6068  // r5: string.
6069  // r7 (a.k.a. from): from offset (smi)
6070  // Check for flat two byte string.
6071
6072  // Allocate the result.
6073  __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime);
6074
6075  // r0: result string.
6076  // r2: result string length.
6077  // r5: string.
6078  // Locate first character of result.
6079  __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6080  // Locate 'from' character of string.
6081  __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6082  // As "from" is a smi it is 2 times the value which matches the size of a two
6083  // byte character.
6084  __ add(r5, r5, Operand(from));
6085  from = no_reg;
6086
6087  // r0: result string.
6088  // r1: first character of result.
6089  // r2: result length.
6090  // r5: first character of string to copy.
6091  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
6092  StringHelper::GenerateCopyCharactersLong(
6093      masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED);
6094  __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
6095  __ add(sp, sp, Operand(3 * kPointerSize));
6096  __ Ret();
6097
6098  // Just jump to runtime to create the sub string.
6099  __ bind(&runtime);
6100  __ TailCallRuntime(Runtime::kSubString, 3, 1);
6101}
6102
6103
6104void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6105                                                        Register left,
6106                                                        Register right,
6107                                                        Register scratch1,
6108                                                        Register scratch2,
6109                                                        Register scratch3,
6110                                                        Register scratch4) {
6111  Label compare_lengths;
6112  // Find minimum length and length difference.
6113  __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
6114  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
6115  __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
6116  Register length_delta = scratch3;
6117  __ mov(scratch1, scratch2, LeaveCC, gt);
6118  Register min_length = scratch1;
6119  STATIC_ASSERT(kSmiTag == 0);
6120  __ tst(min_length, Operand(min_length));
6121  __ b(eq, &compare_lengths);
6122
6123  // Untag smi.
6124  __ mov(min_length, Operand(min_length, ASR, kSmiTagSize));
6125
6126  // Setup registers so that we only need to increment one register
6127  // in the loop.
6128  __ add(scratch2, min_length,
6129         Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6130  __ add(left, left, Operand(scratch2));
6131  __ add(right, right, Operand(scratch2));
6132  // Registers left and right points to the min_length character of strings.
6133  __ rsb(min_length, min_length, Operand(-1));
6134  Register index = min_length;
6135  // Index starts at -min_length.
6136
6137  {
6138    // Compare loop.
6139    Label loop;
6140    __ bind(&loop);
6141    // Compare characters.
6142    __ add(index, index, Operand(1), SetCC);
6143    __ ldrb(scratch2, MemOperand(left, index), ne);
6144    __ ldrb(scratch4, MemOperand(right, index), ne);
6145    // Skip to compare lengths with eq condition true.
6146    __ b(eq, &compare_lengths);
6147    __ cmp(scratch2, scratch4);
6148    __ b(eq, &loop);
6149    // Fallthrough with eq condition false.
6150  }
6151  // Compare lengths -  strings up to min-length are equal.
6152  __ bind(&compare_lengths);
6153  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
6154  // Use zero length_delta as result.
6155  __ mov(r0, Operand(length_delta), SetCC, eq);
6156  // Fall through to here if characters compare not-equal.
6157  __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
6158  __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
6159  __ Ret();
6160}
6161
6162
6163void StringCompareStub::Generate(MacroAssembler* masm) {
6164  Label runtime;
6165
6166  // Stack frame on entry.
6167  //  sp[0]: right string
6168  //  sp[4]: left string
6169  __ Ldrd(r0 , r1, MemOperand(sp));  // Load right in r0, left in r1.
6170
6171  Label not_same;
6172  __ cmp(r0, r1);
6173  __ b(ne, &not_same);
6174  STATIC_ASSERT(EQUAL == 0);
6175  STATIC_ASSERT(kSmiTag == 0);
6176  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
6177  __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
6178  __ add(sp, sp, Operand(2 * kPointerSize));
6179  __ Ret();
6180
6181  __ bind(&not_same);
6182
6183  // Check that both objects are sequential ascii strings.
6184  __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
6185
6186  // Compare flat ascii strings natively. Remove arguments from stack first.
6187  __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3);
6188  __ add(sp, sp, Operand(2 * kPointerSize));
6189  GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
6190
6191  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
6192  // tagged as a small integer.
6193  __ bind(&runtime);
6194  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6195}
6196
6197
6198void StringAddStub::Generate(MacroAssembler* masm) {
6199  Label string_add_runtime, call_builtin;
6200  Builtins::JavaScript builtin_id = Builtins::ADD;
6201
6202  // Stack on entry:
6203  // sp[0]: second argument (right).
6204  // sp[4]: first argument (left).
6205
6206  // Load the two arguments.
6207  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // First argument.
6208  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
6209
6210  // Make sure that both arguments are strings if not known in advance.
6211  if (flags_ == NO_STRING_ADD_FLAGS) {
6212    __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
6213    // Load instance types.
6214    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
6215    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
6216    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
6217    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
6218    STATIC_ASSERT(kStringTag == 0);
6219    // If either is not a string, go to runtime.
6220    __ tst(r4, Operand(kIsNotStringMask));
6221    __ tst(r5, Operand(kIsNotStringMask), eq);
6222    __ b(ne, &string_add_runtime);
6223  } else {
6224    // Here at least one of the arguments is definitely a string.
6225    // We convert the one that is not known to be a string.
6226    if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6227      ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6228      GenerateConvertArgument(
6229          masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
6230      builtin_id = Builtins::STRING_ADD_RIGHT;
6231    } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6232      ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6233      GenerateConvertArgument(
6234          masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
6235      builtin_id = Builtins::STRING_ADD_LEFT;
6236    }
6237  }
6238
6239  // Both arguments are strings.
6240  // r0: first string
6241  // r1: second string
6242  // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6243  // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6244  {
6245    Label strings_not_empty;
6246    // Check if either of the strings are empty. In that case return the other.
6247    __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
6248    __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
6249    STATIC_ASSERT(kSmiTag == 0);
6250    __ cmp(r2, Operand(Smi::FromInt(0)));  // Test if first string is empty.
6251    __ mov(r0, Operand(r1), LeaveCC, eq);  // If first is empty, return second.
6252    STATIC_ASSERT(kSmiTag == 0);
6253     // Else test if second string is empty.
6254    __ cmp(r3, Operand(Smi::FromInt(0)), ne);
6255    __ b(ne, &strings_not_empty);  // If either string was empty, return r0.
6256
6257    __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
6258    __ add(sp, sp, Operand(2 * kPointerSize));
6259    __ Ret();
6260
6261    __ bind(&strings_not_empty);
6262  }
6263
6264  __ mov(r2, Operand(r2, ASR, kSmiTagSize));
6265  __ mov(r3, Operand(r3, ASR, kSmiTagSize));
6266  // Both strings are non-empty.
6267  // r0: first string
6268  // r1: second string
6269  // r2: length of first string
6270  // r3: length of second string
6271  // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6272  // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6273  // Look at the length of the result of adding the two strings.
6274  Label string_add_flat_result, longer_than_two;
6275  // Adding two lengths can't overflow.
6276  STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
6277  __ add(r6, r2, Operand(r3));
6278  // Use the runtime system when adding two one character strings, as it
6279  // contains optimizations for this specific case using the symbol table.
6280  __ cmp(r6, Operand(2));
6281  __ b(ne, &longer_than_two);
6282
6283  // Check that both strings are non-external ascii strings.
6284  if (flags_ != NO_STRING_ADD_FLAGS) {
6285    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
6286    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
6287    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
6288    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
6289  }
6290  __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7,
6291                                                  &string_add_runtime);
6292
6293  // Get the two characters forming the sub string.
6294  __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
6295  __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
6296
6297  // Try to lookup two character string in symbol table. If it is not found
6298  // just allocate a new one.
6299  Label make_two_character_string;
6300  StringHelper::GenerateTwoCharacterSymbolTableProbe(
6301      masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
6302  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
6303  __ add(sp, sp, Operand(2 * kPointerSize));
6304  __ Ret();
6305
6306  __ bind(&make_two_character_string);
6307  // Resulting string has length 2 and first chars of two strings
6308  // are combined into single halfword in r2 register.
6309  // So we can fill resulting string without two loops by a single
6310  // halfword store instruction (which assumes that processor is
6311  // in a little endian mode)
6312  __ mov(r6, Operand(2));
6313  __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime);
6314  __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
6315  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
6316  __ add(sp, sp, Operand(2 * kPointerSize));
6317  __ Ret();
6318
6319  __ bind(&longer_than_two);
6320  // Check if resulting string will be flat.
6321  __ cmp(r6, Operand(String::kMinNonFlatLength));
6322  __ b(lt, &string_add_flat_result);
6323  // Handle exceptionally long strings in the runtime system.
6324  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
6325  ASSERT(IsPowerOf2(String::kMaxLength + 1));
6326  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
6327  __ cmp(r6, Operand(String::kMaxLength + 1));
6328  __ b(hs, &string_add_runtime);
6329
6330  // If result is not supposed to be flat, allocate a cons string object.
6331  // If both strings are ascii the result is an ascii cons string.
6332  if (flags_ != NO_STRING_ADD_FLAGS) {
6333    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
6334    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
6335    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
6336    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
6337  }
6338  Label non_ascii, allocated, ascii_data;
6339  STATIC_ASSERT(kTwoByteStringTag == 0);
6340  __ tst(r4, Operand(kStringEncodingMask));
6341  __ tst(r5, Operand(kStringEncodingMask), ne);
6342  __ b(eq, &non_ascii);
6343
6344  // Allocate an ASCII cons string.
6345  __ bind(&ascii_data);
6346  __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime);
6347  __ bind(&allocated);
6348  // Fill the fields of the cons string.
6349  __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset));
6350  __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset));
6351  __ mov(r0, Operand(r7));
6352  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
6353  __ add(sp, sp, Operand(2 * kPointerSize));
6354  __ Ret();
6355
6356  __ bind(&non_ascii);
6357  // At least one of the strings is two-byte. Check whether it happens
6358  // to contain only ascii characters.
6359  // r4: first instance type.
6360  // r5: second instance type.
6361  __ tst(r4, Operand(kAsciiDataHintMask));
6362  __ tst(r5, Operand(kAsciiDataHintMask), ne);
6363  __ b(ne, &ascii_data);
6364  __ eor(r4, r4, Operand(r5));
6365  STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
6366  __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
6367  __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
6368  __ b(eq, &ascii_data);
6369
6370  // Allocate a two byte cons string.
6371  __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime);
6372  __ jmp(&allocated);
6373
6374  // Handle creating a flat result. First check that both strings are
6375  // sequential and that they have the same encoding.
6376  // r0: first string
6377  // r1: second string
6378  // r2: length of first string
6379  // r3: length of second string
6380  // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6381  // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6382  // r6: sum of lengths.
6383  __ bind(&string_add_flat_result);
6384  if (flags_ != NO_STRING_ADD_FLAGS) {
6385    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
6386    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
6387    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
6388    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
6389  }
6390  // Check that both strings are sequential.
6391  STATIC_ASSERT(kSeqStringTag == 0);
6392  __ tst(r4, Operand(kStringRepresentationMask));
6393  __ tst(r5, Operand(kStringRepresentationMask), eq);
6394  __ b(ne, &string_add_runtime);
6395  // Now check if both strings have the same encoding (ASCII/Two-byte).
6396  // r0: first string.
6397  // r1: second string.
6398  // r2: length of first string.
6399  // r3: length of second string.
6400  // r6: sum of lengths..
6401  Label non_ascii_string_add_flat_result;
6402  ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test.
6403  __ eor(r7, r4, Operand(r5));
6404  __ tst(r7, Operand(kStringEncodingMask));
6405  __ b(ne, &string_add_runtime);
6406  // And see if it's ASCII or two-byte.
6407  __ tst(r4, Operand(kStringEncodingMask));
6408  __ b(eq, &non_ascii_string_add_flat_result);
6409
6410  // Both strings are sequential ASCII strings. We also know that they are
6411  // short (since the sum of the lengths is less than kMinNonFlatLength).
6412  // r6: length of resulting flat string
6413  __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime);
6414  // Locate first character of result.
6415  __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6416  // Locate first character of first argument.
6417  __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6418  // r0: first character of first string.
6419  // r1: second string.
6420  // r2: length of first string.
6421  // r3: length of second string.
6422  // r6: first character of result.
6423  // r7: result string.
6424  StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true);
6425
6426  // Load second argument and locate first character.
6427  __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6428  // r1: first character of second string.
6429  // r3: length of second string.
6430  // r6: next character of result.
6431  // r7: result string.
6432  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true);
6433  __ mov(r0, Operand(r7));
6434  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
6435  __ add(sp, sp, Operand(2 * kPointerSize));
6436  __ Ret();
6437
6438  __ bind(&non_ascii_string_add_flat_result);
6439  // Both strings are sequential two byte strings.
6440  // r0: first string.
6441  // r1: second string.
6442  // r2: length of first string.
6443  // r3: length of second string.
6444  // r6: sum of length of strings.
6445  __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime);
6446  // r0: first string.
6447  // r1: second string.
6448  // r2: length of first string.
6449  // r3: length of second string.
6450  // r7: result string.
6451
6452  // Locate first character of result.
6453  __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6454  // Locate first character of first argument.
6455  __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6456
6457  // r0: first character of first string.
6458  // r1: second string.
6459  // r2: length of first string.
6460  // r3: length of second string.
6461  // r6: first character of result.
6462  // r7: result string.
6463  StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false);
6464
6465  // Locate first character of second argument.
6466  __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6467
6468  // r1: first character of second string.
6469  // r3: length of second string.
6470  // r6: next character of result (after copy of first string).
6471  // r7: result string.
6472  StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false);
6473
6474  __ mov(r0, Operand(r7));
6475  __ IncrementCounter(&Counters::string_add_native, 1, r2, r3);
6476  __ add(sp, sp, Operand(2 * kPointerSize));
6477  __ Ret();
6478
6479  // Just jump to runtime to add the two strings.
6480  __ bind(&string_add_runtime);
6481  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6482
6483  if (call_builtin.is_linked()) {
6484    __ bind(&call_builtin);
6485    __ InvokeBuiltin(builtin_id, JUMP_JS);
6486  }
6487}
6488
6489
6490void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6491                                            int stack_offset,
6492                                            Register arg,
6493                                            Register scratch1,
6494                                            Register scratch2,
6495                                            Register scratch3,
6496                                            Register scratch4,
6497                                            Label* slow) {
6498  // First check if the argument is already a string.
6499  Label not_string, done;
6500  __ JumpIfSmi(arg, &not_string);
6501  __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
6502  __ b(lt, &done);
6503
6504  // Check the number to string cache.
6505  Label not_cached;
6506  __ bind(&not_string);
6507  // Puts the cached result into scratch1.
6508  NumberToStringStub::GenerateLookupNumberStringCache(masm,
6509                                                      arg,
6510                                                      scratch1,
6511                                                      scratch2,
6512                                                      scratch3,
6513                                                      scratch4,
6514                                                      false,
6515                                                      &not_cached);
6516  __ mov(arg, scratch1);
6517  __ str(arg, MemOperand(sp, stack_offset));
6518  __ jmp(&done);
6519
6520  // Check if the argument is a safe string wrapper.
6521  __ bind(&not_cached);
6522  __ JumpIfSmi(arg, slow);
6523  __ CompareObjectType(
6524      arg, scratch1, scratch2, JS_VALUE_TYPE);  // map -> scratch1.
6525  __ b(ne, slow);
6526  __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6527  __ and_(scratch2,
6528          scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
6529  __ cmp(scratch2,
6530         Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
6531  __ b(ne, slow);
6532  __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6533  __ str(arg, MemOperand(sp, stack_offset));
6534
6535  __ bind(&done);
6536}
6537
6538
6539void StringCharAtStub::Generate(MacroAssembler* masm) {
6540  // Expects two arguments (object, index) on the stack:
6541  //  lr: return address
6542  //  sp[0]: index
6543  //  sp[4]: object
6544  Register object = r1;
6545  Register index = r0;
6546  Register scratch1 = r2;
6547  Register scratch2 = r3;
6548  Register result = r0;
6549
6550  // Get object and index from the stack.
6551  __ pop(index);
6552  __ pop(object);
6553
6554  Label need_conversion;
6555  Label index_out_of_range;
6556  Label done;
6557  StringCharAtGenerator generator(object,
6558                                  index,
6559                                  scratch1,
6560                                  scratch2,
6561                                  result,
6562                                  &need_conversion,
6563                                  &need_conversion,
6564                                  &index_out_of_range,
6565                                  STRING_INDEX_IS_NUMBER);
6566  generator.GenerateFast(masm);
6567  __ b(&done);
6568
6569  __ bind(&index_out_of_range);
6570  // When the index is out of range, the spec requires us to return
6571  // the empty string.
6572  __ LoadRoot(result, Heap::kEmptyStringRootIndex);
6573  __ jmp(&done);
6574
6575  __ bind(&need_conversion);
6576  // Move smi zero into the result register, which will trigger
6577  // conversion.
6578  __ mov(result, Operand(Smi::FromInt(0)));
6579  __ b(&done);
6580
6581  StubRuntimeCallHelper call_helper;
6582  generator.GenerateSlow(masm, call_helper);
6583
6584  __ bind(&done);
6585  __ Ret();
6586}
6587
6588
6589void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6590  ASSERT(state_ == CompareIC::SMIS);
6591  Label miss;
6592  __ orr(r2, r1, r0);
6593  __ tst(r2, Operand(kSmiTagMask));
6594  __ b(ne, &miss);
6595
6596  if (GetCondition() == eq) {
6597    // For equality we do not care about the sign of the result.
6598    __ sub(r0, r0, r1, SetCC);
6599  } else {
6600    // Untag before subtracting to avoid handling overflow.
6601    __ SmiUntag(r1);
6602    __ sub(r0, r1, SmiUntagOperand(r0));
6603  }
6604  __ Ret();
6605
6606  __ bind(&miss);
6607  GenerateMiss(masm);
6608}
6609
6610
6611void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6612  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6613
6614  Label generic_stub;
6615  Label unordered;
6616  Label miss;
6617  __ and_(r2, r1, Operand(r0));
6618  __ tst(r2, Operand(kSmiTagMask));
6619  __ b(eq, &generic_stub);
6620
6621  __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
6622  __ b(ne, &miss);
6623  __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
6624  __ b(ne, &miss);
6625
6626  // Inlining the double comparison and falling back to the general compare
6627  // stub if NaN is involved or VFP3 is unsupported.
6628  if (CpuFeatures::IsSupported(VFP3)) {
6629    CpuFeatures::Scope scope(VFP3);
6630
6631    // Load left and right operand
6632    __ sub(r2, r1, Operand(kHeapObjectTag));
6633    __ vldr(d0, r2, HeapNumber::kValueOffset);
6634    __ sub(r2, r0, Operand(kHeapObjectTag));
6635    __ vldr(d1, r2, HeapNumber::kValueOffset);
6636
6637    // Compare operands
6638    __ VFPCompareAndSetFlags(d0, d1);
6639
6640    // Don't base result on status bits when a NaN is involved.
6641    __ b(vs, &unordered);
6642
6643    // Return a result of -1, 0, or 1, based on status bits.
6644    __ mov(r0, Operand(EQUAL), LeaveCC, eq);
6645    __ mov(r0, Operand(LESS), LeaveCC, lt);
6646    __ mov(r0, Operand(GREATER), LeaveCC, gt);
6647    __ Ret();
6648
6649    __ bind(&unordered);
6650  }
6651
6652  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
6653  __ bind(&generic_stub);
6654  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6655
6656  __ bind(&miss);
6657  GenerateMiss(masm);
6658}
6659
6660
6661void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6662  ASSERT(state_ == CompareIC::OBJECTS);
6663  Label miss;
6664  __ and_(r2, r1, Operand(r0));
6665  __ tst(r2, Operand(kSmiTagMask));
6666  __ b(eq, &miss);
6667
6668  __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
6669  __ b(ne, &miss);
6670  __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
6671  __ b(ne, &miss);
6672
6673  ASSERT(GetCondition() == eq);
6674  __ sub(r0, r0, Operand(r1));
6675  __ Ret();
6676
6677  __ bind(&miss);
6678  GenerateMiss(masm);
6679}
6680
6681
6682void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6683  __ Push(r1, r0);
6684  __ push(lr);
6685
6686  // Call the runtime system in a fresh internal frame.
6687  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
6688  __ EnterInternalFrame();
6689  __ Push(r1, r0);
6690  __ mov(ip, Operand(Smi::FromInt(op_)));
6691  __ push(ip);
6692  __ CallExternalReference(miss, 3);
6693  __ LeaveInternalFrame();
6694  // Compute the entry point of the rewritten stub.
6695  __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
6696  // Restore registers.
6697  __ pop(lr);
6698  __ pop(r0);
6699  __ pop(r1);
6700  __ Jump(r2);
6701}
6702
6703
6704void DirectCEntryStub::Generate(MacroAssembler* masm) {
6705  __ ldr(pc, MemOperand(sp, 0));
6706}
6707
6708
6709void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6710                                    ExternalReference function) {
6711  __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
6712                     RelocInfo::CODE_TARGET));
6713  __ mov(r2, Operand(function));
6714  // Push return address (accessible to GC through exit frame pc).
6715  __ str(pc, MemOperand(sp, 0));
6716  __ Jump(r2);  // Call the api function.
6717}
6718
6719
6720void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6721                                    Register target) {
6722  __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
6723                     RelocInfo::CODE_TARGET));
6724  // Push return address (accessible to GC through exit frame pc).
6725  __ str(pc, MemOperand(sp, 0));
6726  __ Jump(target);  // Call the C++ function.
6727}
6728
6729
6730void GenerateFastPixelArrayLoad(MacroAssembler* masm,
6731                                Register receiver,
6732                                Register key,
6733                                Register elements_map,
6734                                Register elements,
6735                                Register scratch1,
6736                                Register scratch2,
6737                                Register result,
6738                                Label* not_pixel_array,
6739                                Label* key_not_smi,
6740                                Label* out_of_range) {
6741  // Register use:
6742  //
6743  // receiver - holds the receiver on entry.
6744  //            Unchanged unless 'result' is the same register.
6745  //
6746  // key      - holds the smi key on entry.
6747  //            Unchanged unless 'result' is the same register.
6748  //
6749  // elements - set to be the receiver's elements on exit.
6750  //
6751  // elements_map - set to be the map of the receiver's elements
6752  //            on exit.
6753  //
6754  // result   - holds the result of the pixel array load on exit,
6755  //            tagged as a smi if successful.
6756  //
6757  // Scratch registers:
6758  //
6759  // scratch1 - used a scratch register in map check, if map
6760  //            check is successful, contains the length of the
6761  //            pixel array, the pointer to external elements and
6762  //            the untagged result.
6763  //
6764  // scratch2 - holds the untaged key.
6765
6766  // Some callers already have verified that the key is a smi.  key_not_smi is
6767  // set to NULL as a sentinel for that case.  Otherwise, add an explicit check
6768  // to ensure the key is a smi must be added.
6769  if (key_not_smi != NULL) {
6770    __ JumpIfNotSmi(key, key_not_smi);
6771  } else {
6772    if (FLAG_debug_code) {
6773      __ AbortIfNotSmi(key);
6774    }
6775  }
6776  __ SmiUntag(scratch2, key);
6777
6778  // Verify that the receiver has pixel array elements.
6779  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
6780  __ CheckMap(elements, scratch1, Heap::kPixelArrayMapRootIndex,
6781              not_pixel_array, true);
6782
6783  // Key must be in range of the pixel array.
6784  __ ldr(scratch1, FieldMemOperand(elements, PixelArray::kLengthOffset));
6785  __ cmp(scratch2, scratch1);
6786  __ b(hs, out_of_range);  // unsigned check handles negative keys.
6787
6788  // Perform the indexed load and tag the result as a smi.
6789  __ ldr(scratch1,
6790         FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
6791  __ ldrb(scratch1, MemOperand(scratch1, scratch2));
6792  __ SmiTag(r0, scratch1);
6793  __ Ret();
6794}
6795
6796
6797void GenerateFastPixelArrayStore(MacroAssembler* masm,
6798                                 Register receiver,
6799                                 Register key,
6800                                 Register value,
6801                                 Register elements,
6802                                 Register elements_map,
6803                                 Register scratch1,
6804                                 Register scratch2,
6805                                 bool load_elements_from_receiver,
6806                                 bool load_elements_map_from_elements,
6807                                 Label* key_not_smi,
6808                                 Label* value_not_smi,
6809                                 Label* not_pixel_array,
6810                                 Label* out_of_range) {
6811  // Register use:
6812  //   receiver - holds the receiver and is unchanged unless the
6813  //              store succeeds.
6814  //   key - holds the key (must be a smi) and is unchanged.
6815  //   value - holds the value (must be a smi) and is unchanged.
6816  //   elements - holds the element object of the receiver on entry if
6817  //              load_elements_from_receiver is false, otherwise used
6818  //              internally to store the pixel arrays elements and
6819  //              external array pointer.
6820  //   elements_map - holds the map of the element object if
6821  //              load_elements_map_from_elements is false, otherwise
6822  //              loaded with the element map.
6823  //
6824  Register external_pointer = elements;
6825  Register untagged_key = scratch1;
6826  Register untagged_value = scratch2;
6827
6828  if (load_elements_from_receiver) {
6829    __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
6830  }
6831
6832  // By passing NULL as not_pixel_array, callers signal that they have already
6833  // verified that the receiver has pixel array elements.
6834  if (not_pixel_array != NULL) {
6835    if (load_elements_map_from_elements) {
6836      __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
6837    }
6838    __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
6839    __ cmp(elements_map, ip);
6840    __ b(ne, not_pixel_array);
6841  } else {
6842    if (FLAG_debug_code) {
6843      // Map check should have already made sure that elements is a pixel array.
6844      __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
6845      __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
6846      __ cmp(elements_map, ip);
6847      __ Assert(eq, "Elements isn't a pixel array");
6848    }
6849  }
6850
6851  // Some callers already have verified that the key is a smi.  key_not_smi is
6852  // set to NULL as a sentinel for that case.  Otherwise, add an explicit check
6853  // to ensure the key is a smi must be added.
6854  if (key_not_smi != NULL) {
6855    __ JumpIfNotSmi(key, key_not_smi);
6856  } else {
6857    if (FLAG_debug_code) {
6858      __ AbortIfNotSmi(key);
6859    }
6860  }
6861
6862  __ SmiUntag(untagged_key, key);
6863
6864  // Perform bounds check.
6865  __ ldr(scratch2, FieldMemOperand(elements, PixelArray::kLengthOffset));
6866  __ cmp(untagged_key, scratch2);
6867  __ b(hs, out_of_range);  // unsigned check handles negative keys.
6868
6869  __ JumpIfNotSmi(value, value_not_smi);
6870  __ SmiUntag(untagged_value, value);
6871
6872  // Clamp the value to [0..255].
6873  __ Usat(untagged_value, 8, Operand(untagged_value));
6874  // Get the pointer to the external array. This clobbers elements.
6875  __ ldr(external_pointer,
6876         FieldMemOperand(elements, PixelArray::kExternalPointerOffset));
6877  __ strb(untagged_value, MemOperand(external_pointer, untagged_key));
6878  __ Ret();
6879}
6880
6881
6882#undef __
6883
6884} }  // namespace v8::internal
6885
6886#endif  // V8_TARGET_ARCH_ARM
6887