1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if V8_TARGET_ARCH_ARM
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
34#include "regexp-macro-assembler.h"
35#include "stub-cache.h"
36
37namespace v8 {
38namespace internal {
39
40
41void FastNewClosureStub::InitializeInterfaceDescriptor(
42    Isolate* isolate,
43    CodeStubInterfaceDescriptor* descriptor) {
44  static Register registers[] = { r2 };
45  descriptor->register_param_count_ = 1;
46  descriptor->register_params_ = registers;
47  descriptor->deoptimization_handler_ =
48      Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
49}
50
51
52void ToNumberStub::InitializeInterfaceDescriptor(
53    Isolate* isolate,
54    CodeStubInterfaceDescriptor* descriptor) {
55  static Register registers[] = { r0 };
56  descriptor->register_param_count_ = 1;
57  descriptor->register_params_ = registers;
58  descriptor->deoptimization_handler_ = NULL;
59}
60
61
62void NumberToStringStub::InitializeInterfaceDescriptor(
63    Isolate* isolate,
64    CodeStubInterfaceDescriptor* descriptor) {
65  static Register registers[] = { r0 };
66  descriptor->register_param_count_ = 1;
67  descriptor->register_params_ = registers;
68  descriptor->deoptimization_handler_ =
69      Runtime::FunctionForId(Runtime::kNumberToString)->entry;
70}
71
72
73void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
74    Isolate* isolate,
75    CodeStubInterfaceDescriptor* descriptor) {
76  static Register registers[] = { r3, r2, r1 };
77  descriptor->register_param_count_ = 3;
78  descriptor->register_params_ = registers;
79  descriptor->deoptimization_handler_ =
80      Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
81}
82
83
84void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
85    Isolate* isolate,
86    CodeStubInterfaceDescriptor* descriptor) {
87  static Register registers[] = { r3, r2, r1, r0 };
88  descriptor->register_param_count_ = 4;
89  descriptor->register_params_ = registers;
90  descriptor->deoptimization_handler_ =
91      Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
92}
93
94
95void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
96    Isolate* isolate,
97    CodeStubInterfaceDescriptor* descriptor) {
98  static Register registers[] = { r2 };
99  descriptor->register_param_count_ = 1;
100  descriptor->register_params_ = registers;
101  descriptor->deoptimization_handler_ = NULL;
102}
103
104
105void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
106    Isolate* isolate,
107    CodeStubInterfaceDescriptor* descriptor) {
108  static Register registers[] = { r1, r0 };
109  descriptor->register_param_count_ = 2;
110  descriptor->register_params_ = registers;
111  descriptor->deoptimization_handler_ =
112      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
113}
114
115
116void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
117    Isolate* isolate,
118    CodeStubInterfaceDescriptor* descriptor) {
119  static Register registers[] = { r1, r0 };
120  descriptor->register_param_count_ = 2;
121  descriptor->register_params_ = registers;
122  descriptor->deoptimization_handler_ =
123      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
124}
125
126
127void LoadFieldStub::InitializeInterfaceDescriptor(
128    Isolate* isolate,
129    CodeStubInterfaceDescriptor* descriptor) {
130  static Register registers[] = { r0 };
131  descriptor->register_param_count_ = 1;
132  descriptor->register_params_ = registers;
133  descriptor->deoptimization_handler_ = NULL;
134}
135
136
137void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
138    Isolate* isolate,
139    CodeStubInterfaceDescriptor* descriptor) {
140  static Register registers[] = { r1 };
141  descriptor->register_param_count_ = 1;
142  descriptor->register_params_ = registers;
143  descriptor->deoptimization_handler_ = NULL;
144}
145
146
147void KeyedArrayCallStub::InitializeInterfaceDescriptor(
148    Isolate* isolate,
149    CodeStubInterfaceDescriptor* descriptor) {
150  static Register registers[] = { r2 };
151  descriptor->register_param_count_ = 1;
152  descriptor->register_params_ = registers;
153  descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
154  descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
155  descriptor->deoptimization_handler_ =
156      FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
157}
158
159
160void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
161    Isolate* isolate,
162    CodeStubInterfaceDescriptor* descriptor) {
163  static Register registers[] = { r2, r1, r0 };
164  descriptor->register_param_count_ = 3;
165  descriptor->register_params_ = registers;
166  descriptor->deoptimization_handler_ =
167      FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
168}
169
170
171void TransitionElementsKindStub::InitializeInterfaceDescriptor(
172    Isolate* isolate,
173    CodeStubInterfaceDescriptor* descriptor) {
174  static Register registers[] = { r0, r1 };
175  descriptor->register_param_count_ = 2;
176  descriptor->register_params_ = registers;
177  Address entry =
178      Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
179  descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
180}
181
182
183void CompareNilICStub::InitializeInterfaceDescriptor(
184    Isolate* isolate,
185    CodeStubInterfaceDescriptor* descriptor) {
186  static Register registers[] = { r0 };
187  descriptor->register_param_count_ = 1;
188  descriptor->register_params_ = registers;
189  descriptor->deoptimization_handler_ =
190      FUNCTION_ADDR(CompareNilIC_Miss);
191  descriptor->SetMissHandler(
192      ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
193}
194
195
196void BinaryOpICStub::InitializeInterfaceDescriptor(
197    Isolate* isolate,
198    CodeStubInterfaceDescriptor* descriptor) {
199  static Register registers[] = { r1, r0 };
200  descriptor->register_param_count_ = 2;
201  descriptor->register_params_ = registers;
202  descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
203  descriptor->SetMissHandler(
204      ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
205}
206
207
208static void InitializeArrayConstructorDescriptor(
209    Isolate* isolate,
210    CodeStubInterfaceDescriptor* descriptor,
211    int constant_stack_parameter_count) {
212  // register state
213  // r0 -- number of arguments
214  // r1 -- function
215  // r2 -- type info cell with elements kind
216  static Register registers_variable_args[] = { r1, r2, r0 };
217  static Register registers_no_args[] = { r1, r2 };
218
219  if (constant_stack_parameter_count == 0) {
220    descriptor->register_param_count_ = 2;
221    descriptor->register_params_ = registers_no_args;
222  } else {
223    // stack param count needs (constructor pointer, and single argument)
224    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
225    descriptor->stack_parameter_count_ = r0;
226    descriptor->register_param_count_ = 3;
227    descriptor->register_params_ = registers_variable_args;
228  }
229
230  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
231  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
232  descriptor->deoptimization_handler_ =
233      Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
234}
235
236
237static void InitializeInternalArrayConstructorDescriptor(
238    Isolate* isolate,
239    CodeStubInterfaceDescriptor* descriptor,
240    int constant_stack_parameter_count) {
241  // register state
242  // r0 -- number of arguments
243  // r1 -- constructor function
244  static Register registers_variable_args[] = { r1, r0 };
245  static Register registers_no_args[] = { r1 };
246
247  if (constant_stack_parameter_count == 0) {
248    descriptor->register_param_count_ = 1;
249    descriptor->register_params_ = registers_no_args;
250  } else {
251    // stack param count needs (constructor pointer, and single argument)
252    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
253    descriptor->stack_parameter_count_ = r0;
254    descriptor->register_param_count_ = 2;
255    descriptor->register_params_ = registers_variable_args;
256  }
257
258  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
259  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
260  descriptor->deoptimization_handler_ =
261      Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
262}
263
264
265void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
266    Isolate* isolate,
267    CodeStubInterfaceDescriptor* descriptor) {
268  InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
269}
270
271
272void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
273    Isolate* isolate,
274    CodeStubInterfaceDescriptor* descriptor) {
275  InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
276}
277
278
279void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
280    Isolate* isolate,
281    CodeStubInterfaceDescriptor* descriptor) {
282  InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
283}
284
285
286void ToBooleanStub::InitializeInterfaceDescriptor(
287    Isolate* isolate,
288    CodeStubInterfaceDescriptor* descriptor) {
289  static Register registers[] = { r0 };
290  descriptor->register_param_count_ = 1;
291  descriptor->register_params_ = registers;
292  descriptor->deoptimization_handler_ =
293      FUNCTION_ADDR(ToBooleanIC_Miss);
294  descriptor->SetMissHandler(
295      ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
296}
297
298
299void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
300    Isolate* isolate,
301    CodeStubInterfaceDescriptor* descriptor) {
302  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
303}
304
305
306void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
307    Isolate* isolate,
308    CodeStubInterfaceDescriptor* descriptor) {
309  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
310}
311
312
313void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
314    Isolate* isolate,
315    CodeStubInterfaceDescriptor* descriptor) {
316  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
317}
318
319
320void StoreGlobalStub::InitializeInterfaceDescriptor(
321    Isolate* isolate,
322    CodeStubInterfaceDescriptor* descriptor) {
323  static Register registers[] = { r1, r2, r0 };
324  descriptor->register_param_count_ = 3;
325  descriptor->register_params_ = registers;
326  descriptor->deoptimization_handler_ =
327      FUNCTION_ADDR(StoreIC_MissFromStubFailure);
328}
329
330
331void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
332    Isolate* isolate,
333    CodeStubInterfaceDescriptor* descriptor) {
334  static Register registers[] = { r0, r3, r1, r2 };
335  descriptor->register_param_count_ = 4;
336  descriptor->register_params_ = registers;
337  descriptor->deoptimization_handler_ =
338      FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
339}
340
341
342void NewStringAddStub::InitializeInterfaceDescriptor(
343    Isolate* isolate,
344    CodeStubInterfaceDescriptor* descriptor) {
345  static Register registers[] = { r1, r0 };
346  descriptor->register_param_count_ = 2;
347  descriptor->register_params_ = registers;
348  descriptor->deoptimization_handler_ =
349      Runtime::FunctionForId(Runtime::kStringAdd)->entry;
350}
351
352
353#define __ ACCESS_MASM(masm)
354
355
356static void EmitIdenticalObjectComparison(MacroAssembler* masm,
357                                          Label* slow,
358                                          Condition cond);
359static void EmitSmiNonsmiComparison(MacroAssembler* masm,
360                                    Register lhs,
361                                    Register rhs,
362                                    Label* lhs_not_nan,
363                                    Label* slow,
364                                    bool strict);
365static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
366                                           Register lhs,
367                                           Register rhs);
368
369
370void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
371  // Update the static counter each time a new code stub is generated.
372  Isolate* isolate = masm->isolate();
373  isolate->counters()->code_stubs()->Increment();
374
375  CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
376  int param_count = descriptor->register_param_count_;
377  {
378    // Call the runtime system in a fresh internal frame.
379    FrameScope scope(masm, StackFrame::INTERNAL);
380    ASSERT(descriptor->register_param_count_ == 0 ||
381           r0.is(descriptor->register_params_[param_count - 1]));
382    // Push arguments
383    for (int i = 0; i < param_count; ++i) {
384      __ push(descriptor->register_params_[i]);
385    }
386    ExternalReference miss = descriptor->miss_handler();
387    __ CallExternalReference(miss, descriptor->register_param_count_);
388  }
389
390  __ Ret();
391}
392
393
394void FastNewContextStub::Generate(MacroAssembler* masm) {
395  // Try to allocate the context in new space.
396  Label gc;
397  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
398
399  // Attempt to allocate the context in new space.
400  __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT);
401
402  // Load the function from the stack.
403  __ ldr(r3, MemOperand(sp, 0));
404
405  // Set up the object header.
406  __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
407  __ mov(r2, Operand(Smi::FromInt(length)));
408  __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
409  __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
410
411  // Set up the fixed slots, copy the global object from the previous context.
412  __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
413  __ mov(r1, Operand(Smi::FromInt(0)));
414  __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
415  __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
416  __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
417  __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
418
419  // Initialize the rest of the slots to undefined.
420  __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
421  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
422    __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
423  }
424
425  // Remove the on-stack argument and return.
426  __ mov(cp, r0);
427  __ pop();
428  __ Ret();
429
430  // Need to collect. Call into runtime system.
431  __ bind(&gc);
432  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
433}
434
435
436void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
437  // Stack layout on entry:
438  //
439  // [sp]: function.
440  // [sp + kPointerSize]: serialized scope info
441
442  // Try to allocate the context in new space.
443  Label gc;
444  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
445  __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT);
446
447  // Load the function from the stack.
448  __ ldr(r3, MemOperand(sp, 0));
449
450  // Load the serialized scope info from the stack.
451  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
452
453  // Set up the object header.
454  __ LoadRoot(r2, Heap::kBlockContextMapRootIndex);
455  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
456  __ mov(r2, Operand(Smi::FromInt(length)));
457  __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
458
459  // If this block context is nested in the native context we get a smi
460  // sentinel instead of a function. The block context should get the
461  // canonical empty function of the native context as its closure which
462  // we still have to look up.
463  Label after_sentinel;
464  __ JumpIfNotSmi(r3, &after_sentinel);
465  if (FLAG_debug_code) {
466    __ cmp(r3, Operand::Zero());
467    __ Assert(eq, kExpected0AsASmiSentinel);
468  }
469  __ ldr(r3, GlobalObjectOperand());
470  __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
471  __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
472  __ bind(&after_sentinel);
473
474  // Set up the fixed slots, copy the global object from the previous context.
475  __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
476  __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
477  __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
478  __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
479  __ str(r2, ContextOperand(r0, Context::GLOBAL_OBJECT_INDEX));
480
481  // Initialize the rest of the slots to the hole value.
482  __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
483  for (int i = 0; i < slots_; i++) {
484    __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS));
485  }
486
487  // Remove the on-stack argument and return.
488  __ mov(cp, r0);
489  __ add(sp, sp, Operand(2 * kPointerSize));
490  __ Ret();
491
492  // Need to collect. Call into runtime system.
493  __ bind(&gc);
494  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
495}
496
497
498// Takes a Smi and converts to an IEEE 64 bit floating point value in two
499// registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
500// 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
501// scratch register.  Destroys the source register.  No GC occurs during this
502// stub so you don't have to set up the frame.
503class ConvertToDoubleStub : public PlatformCodeStub {
504 public:
505  ConvertToDoubleStub(Register result_reg_1,
506                      Register result_reg_2,
507                      Register source_reg,
508                      Register scratch_reg)
509      : result1_(result_reg_1),
510        result2_(result_reg_2),
511        source_(source_reg),
512        zeros_(scratch_reg) { }
513
514 private:
515  Register result1_;
516  Register result2_;
517  Register source_;
518  Register zeros_;
519
520  // Minor key encoding in 16 bits.
521  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
522  class OpBits: public BitField<Token::Value, 2, 14> {};
523
524  Major MajorKey() { return ConvertToDouble; }
525  int MinorKey() {
526    // Encode the parameters in a unique 16 bit value.
527    return  result1_.code() +
528           (result2_.code() << 4) +
529           (source_.code() << 8) +
530           (zeros_.code() << 12);
531  }
532
533  void Generate(MacroAssembler* masm);
534};
535
536
537void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
538  Register exponent = result1_;
539  Register mantissa = result2_;
540
541  Label not_special;
542  __ SmiUntag(source_);
543  // Move sign bit from source to destination.  This works because the sign bit
544  // in the exponent word of the double has the same position and polarity as
545  // the 2's complement sign bit in a Smi.
546  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
547  __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
548  // Subtract from 0 if source was negative.
549  __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne);
550
551  // We have -1, 0 or 1, which we treat specially. Register source_ contains
552  // absolute value: it is either equal to 1 (special case of -1 and 1),
553  // greater than 1 (not a special case) or less than 1 (special case of 0).
554  __ cmp(source_, Operand(1));
555  __ b(gt, &not_special);
556
557  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
558  const uint32_t exponent_word_for_1 =
559      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
560  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
561  // 1, 0 and -1 all have 0 for the second word.
562  __ mov(mantissa, Operand::Zero());
563  __ Ret();
564
565  __ bind(&not_special);
566  __ clz(zeros_, source_);
567  // Compute exponent and or it into the exponent register.
568  // We use mantissa as a scratch register here.  Use a fudge factor to
569  // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
570  // that fit in the ARM's constant field.
571  int fudge = 0x400;
572  __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
573  __ add(mantissa, mantissa, Operand(fudge));
574  __ orr(exponent,
575         exponent,
576         Operand(mantissa, LSL, HeapNumber::kExponentShift));
577  // Shift up the source chopping the top bit off.
578  __ add(zeros_, zeros_, Operand(1));
579  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
580  __ mov(source_, Operand(source_, LSL, zeros_));
581  // Compute lower part of fraction (last 12 bits).
582  __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
583  // And the top (top 20 bits).
584  __ orr(exponent,
585         exponent,
586         Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
587  __ Ret();
588}
589
590
591void DoubleToIStub::Generate(MacroAssembler* masm) {
592  Label out_of_range, only_low, negate, done;
593  Register input_reg = source();
594  Register result_reg = destination();
595
596  int double_offset = offset();
597  // Account for saved regs if input is sp.
598  if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
599
600  // Immediate values for this stub fit in instructions, so it's safe to use ip.
601  Register scratch = ip;
602  Register scratch_low =
603      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
604  Register scratch_high =
605      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
606  LowDwVfpRegister double_scratch = kScratchDoubleReg;
607
608  __ Push(scratch_high, scratch_low);
609
610  if (!skip_fastpath()) {
611    // Load double input.
612    __ vldr(double_scratch, MemOperand(input_reg, double_offset));
613    __ vmov(scratch_low, scratch_high, double_scratch);
614
615    // Do fast-path convert from double to int.
616    __ vcvt_s32_f64(double_scratch.low(), double_scratch);
617    __ vmov(result_reg, double_scratch.low());
618
619    // If result is not saturated (0x7fffffff or 0x80000000), we are done.
620    __ sub(scratch, result_reg, Operand(1));
621    __ cmp(scratch, Operand(0x7ffffffe));
622    __ b(lt, &done);
623  } else {
624    // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
625    // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
626    if (double_offset == 0) {
627      __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
628    } else {
629      __ ldr(scratch_low, MemOperand(input_reg, double_offset));
630      __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
631    }
632  }
633
634  __ Ubfx(scratch, scratch_high,
635         HeapNumber::kExponentShift, HeapNumber::kExponentBits);
636  // Load scratch with exponent - 1. This is faster than loading
637  // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
638  STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
639  __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
640  // If exponent is greater than or equal to 84, the 32 less significant
641  // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
642  // the result is 0.
643  // Compare exponent with 84 (compare exponent - 1 with 83).
644  __ cmp(scratch, Operand(83));
645  __ b(ge, &out_of_range);
646
647  // If we reach this code, 31 <= exponent <= 83.
648  // So, we don't have to handle cases where 0 <= exponent <= 20 for
649  // which we would need to shift right the high part of the mantissa.
650  // Scratch contains exponent - 1.
651  // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
652  __ rsb(scratch, scratch, Operand(51), SetCC);
653  __ b(ls, &only_low);
654  // 21 <= exponent <= 51, shift scratch_low and scratch_high
655  // to generate the result.
656  __ mov(scratch_low, Operand(scratch_low, LSR, scratch));
657  // Scratch contains: 52 - exponent.
658  // We needs: exponent - 20.
659  // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
660  __ rsb(scratch, scratch, Operand(32));
661  __ Ubfx(result_reg, scratch_high,
662          0, HeapNumber::kMantissaBitsInTopWord);
663  // Set the implicit 1 before the mantissa part in scratch_high.
664  __ orr(result_reg, result_reg,
665         Operand(1 << HeapNumber::kMantissaBitsInTopWord));
666  __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
667  __ b(&negate);
668
669  __ bind(&out_of_range);
670  __ mov(result_reg, Operand::Zero());
671  __ b(&done);
672
673  __ bind(&only_low);
674  // 52 <= exponent <= 83, shift only scratch_low.
675  // On entry, scratch contains: 52 - exponent.
676  __ rsb(scratch, scratch, Operand::Zero());
677  __ mov(result_reg, Operand(scratch_low, LSL, scratch));
678
679  __ bind(&negate);
680  // If input was positive, scratch_high ASR 31 equals 0 and
681  // scratch_high LSR 31 equals zero.
682  // New result = (result eor 0) + 0 = result.
683  // If the input was negative, we have to negate the result.
684  // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
685  // New result = (result eor 0xffffffff) + 1 = 0 - result.
686  __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
687  __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
688
689  __ bind(&done);
690
691  __ Pop(scratch_high, scratch_low);
692  __ Ret();
693}
694
695
696void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
697    Isolate* isolate) {
698  WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
699  WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
700  stub1.GetCode(isolate);
701  stub2.GetCode(isolate);
702}
703
704
705// See comment for class.
706void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
707  Label max_negative_int;
708  // the_int_ has the answer which is a signed int32 but not a Smi.
709  // We test for the special value that has a different exponent.  This test
710  // has the neat side effect of setting the flags according to the sign.
711  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
712  __ cmp(the_int_, Operand(0x80000000u));
713  __ b(eq, &max_negative_int);
714  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
715  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
716  uint32_t non_smi_exponent =
717      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
718  __ mov(scratch_, Operand(non_smi_exponent));
719  // Set the sign bit in scratch_ if the value was negative.
720  __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
721  // Subtract from 0 if the value was negative.
722  __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs);
723  // We should be masking the implict first digit of the mantissa away here,
724  // but it just ends up combining harmlessly with the last digit of the
725  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
726  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
727  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
728  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
729  __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
730  __ str(scratch_, FieldMemOperand(the_heap_number_,
731                                   HeapNumber::kExponentOffset));
732  __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
733  __ str(scratch_, FieldMemOperand(the_heap_number_,
734                                   HeapNumber::kMantissaOffset));
735  __ Ret();
736
737  __ bind(&max_negative_int);
738  // The max negative int32 is stored as a positive number in the mantissa of
739  // a double because it uses a sign bit instead of using two's complement.
740  // The actual mantissa bits stored are all 0 because the implicit most
741  // significant 1 bit is not stored.
742  non_smi_exponent += 1 << HeapNumber::kExponentShift;
743  __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
744  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
745  __ mov(ip, Operand::Zero());
746  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
747  __ Ret();
748}
749
750
751// Handle the case where the lhs and rhs are the same object.
752// Equality is almost reflexive (everything but NaN), so this is a test
753// for "identity and not NaN".
754static void EmitIdenticalObjectComparison(MacroAssembler* masm,
755                                          Label* slow,
756                                          Condition cond) {
757  Label not_identical;
758  Label heap_number, return_equal;
759  __ cmp(r0, r1);
760  __ b(ne, &not_identical);
761
762  // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
763  // so we do the second best thing - test it ourselves.
764  // They are both equal and they are not both Smis so both of them are not
765  // Smis.  If it's not a heap number, then return equal.
766  if (cond == lt || cond == gt) {
767    __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
768    __ b(ge, slow);
769  } else {
770    __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
771    __ b(eq, &heap_number);
772    // Comparing JS objects with <=, >= is complicated.
773    if (cond != eq) {
774      __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
775      __ b(ge, slow);
776      // Normally here we fall through to return_equal, but undefined is
777      // special: (undefined == undefined) == true, but
778      // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
779      if (cond == le || cond == ge) {
780        __ cmp(r4, Operand(ODDBALL_TYPE));
781        __ b(ne, &return_equal);
782        __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
783        __ cmp(r0, r2);
784        __ b(ne, &return_equal);
785        if (cond == le) {
786          // undefined <= undefined should fail.
787          __ mov(r0, Operand(GREATER));
788        } else  {
789          // undefined >= undefined should fail.
790          __ mov(r0, Operand(LESS));
791        }
792        __ Ret();
793      }
794    }
795  }
796
797  __ bind(&return_equal);
798  if (cond == lt) {
799    __ mov(r0, Operand(GREATER));  // Things aren't less than themselves.
800  } else if (cond == gt) {
801    __ mov(r0, Operand(LESS));     // Things aren't greater than themselves.
802  } else {
803    __ mov(r0, Operand(EQUAL));    // Things are <=, >=, ==, === themselves.
804  }
805  __ Ret();
806
807  // For less and greater we don't have to check for NaN since the result of
808  // x < x is false regardless.  For the others here is some code to check
809  // for NaN.
810  if (cond != lt && cond != gt) {
811    __ bind(&heap_number);
812    // It is a heap number, so return non-equal if it's NaN and equal if it's
813    // not NaN.
814
815    // The representation of NaN values has all exponent bits (52..62) set,
816    // and not all mantissa bits (0..51) clear.
817    // Read top bits of double representation (second word of value).
818    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
819    // Test that exponent bits are all set.
820    __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
821    // NaNs have all-one exponents so they sign extend to -1.
822    __ cmp(r3, Operand(-1));
823    __ b(ne, &return_equal);
824
825    // Shift out flag and all exponent bits, retaining only mantissa.
826    __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
827    // Or with all low-bits of mantissa.
828    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
829    __ orr(r0, r3, Operand(r2), SetCC);
830    // For equal we already have the right value in r0:  Return zero (equal)
831    // if all bits in mantissa are zero (it's an Infinity) and non-zero if
832    // not (it's a NaN).  For <= and >= we need to load r0 with the failing
833    // value if it's a NaN.
834    if (cond != eq) {
835      // All-zero means Infinity means equal.
836      __ Ret(eq);
837      if (cond == le) {
838        __ mov(r0, Operand(GREATER));  // NaN <= NaN should fail.
839      } else {
840        __ mov(r0, Operand(LESS));     // NaN >= NaN should fail.
841      }
842    }
843    __ Ret();
844  }
845  // No fall through here.
846
847  __ bind(&not_identical);
848}
849
850
851// See comment at call site.
852static void EmitSmiNonsmiComparison(MacroAssembler* masm,
853                                    Register lhs,
854                                    Register rhs,
855                                    Label* lhs_not_nan,
856                                    Label* slow,
857                                    bool strict) {
858  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
859         (lhs.is(r1) && rhs.is(r0)));
860
861  Label rhs_is_smi;
862  __ JumpIfSmi(rhs, &rhs_is_smi);
863
864  // Lhs is a Smi.  Check whether the rhs is a heap number.
865  __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
866  if (strict) {
867    // If rhs is not a number and lhs is a Smi then strict equality cannot
868    // succeed.  Return non-equal
869    // If rhs is r0 then there is already a non zero value in it.
870    if (!rhs.is(r0)) {
871      __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
872    }
873    __ Ret(ne);
874  } else {
875    // Smi compared non-strictly with a non-Smi non-heap-number.  Call
876    // the runtime.
877    __ b(ne, slow);
878  }
879
880  // Lhs is a smi, rhs is a number.
881  // Convert lhs to a double in d7.
882  __ SmiToDouble(d7, lhs);
883  // Load the double from rhs, tagged HeapNumber r0, to d6.
884  __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
885
886  // We now have both loaded as doubles but we can skip the lhs nan check
887  // since it's a smi.
888  __ jmp(lhs_not_nan);
889
890  __ bind(&rhs_is_smi);
891  // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
892  __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE);
893  if (strict) {
894    // If lhs is not a number and rhs is a smi then strict equality cannot
895    // succeed.  Return non-equal.
896    // If lhs is r0 then there is already a non zero value in it.
897    if (!lhs.is(r0)) {
898      __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne);
899    }
900    __ Ret(ne);
901  } else {
902    // Smi compared non-strictly with a non-smi non-heap-number.  Call
903    // the runtime.
904    __ b(ne, slow);
905  }
906
907  // Rhs is a smi, lhs is a heap number.
908  // Load the double from lhs, tagged HeapNumber r1, to d7.
909  __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
910  // Convert rhs to a double in d6              .
911  __ SmiToDouble(d6, rhs);
912  // Fall through to both_loaded_as_doubles.
913}
914
915
916// See comment at call site.
917static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
918                                           Register lhs,
919                                           Register rhs) {
920    ASSERT((lhs.is(r0) && rhs.is(r1)) ||
921           (lhs.is(r1) && rhs.is(r0)));
922
923    // If either operand is a JS object or an oddball value, then they are
924    // not equal since their pointers are different.
925    // There is no test for undetectability in strict equality.
926    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
927    Label first_non_object;
928    // Get the type of the first operand into r2 and compare it with
929    // FIRST_SPEC_OBJECT_TYPE.
930    __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
931    __ b(lt, &first_non_object);
932
933    // Return non-zero (r0 is not zero)
934    Label return_not_equal;
935    __ bind(&return_not_equal);
936    __ Ret();
937
938    __ bind(&first_non_object);
939    // Check for oddballs: true, false, null, undefined.
940    __ cmp(r2, Operand(ODDBALL_TYPE));
941    __ b(eq, &return_not_equal);
942
943    __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
944    __ b(ge, &return_not_equal);
945
946    // Check for oddballs: true, false, null, undefined.
947    __ cmp(r3, Operand(ODDBALL_TYPE));
948    __ b(eq, &return_not_equal);
949
950    // Now that we have the types we might as well check for
951    // internalized-internalized.
952    STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
953    __ orr(r2, r2, Operand(r3));
954    __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
955    __ b(eq, &return_not_equal);
956}
957
958
959// See comment at call site.
960static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
961                                       Register lhs,
962                                       Register rhs,
963                                       Label* both_loaded_as_doubles,
964                                       Label* not_heap_numbers,
965                                       Label* slow) {
966  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
967         (lhs.is(r1) && rhs.is(r0)));
968
969  __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
970  __ b(ne, not_heap_numbers);
971  __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset));
972  __ cmp(r2, r3);
973  __ b(ne, slow);  // First was a heap number, second wasn't.  Go slow case.
974
975  // Both are heap numbers.  Load them up then jump to the code we have
976  // for that.
977  __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
978  __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
979  __ jmp(both_loaded_as_doubles);
980}
981
982
983// Fast negative check for internalized-to-internalized equality.
984static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
985                                                     Register lhs,
986                                                     Register rhs,
987                                                     Label* possible_strings,
988                                                     Label* not_both_strings) {
989  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
990         (lhs.is(r1) && rhs.is(r0)));
991
992  // r2 is object type of rhs.
993  Label object_test;
994  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
995  __ tst(r2, Operand(kIsNotStringMask));
996  __ b(ne, &object_test);
997  __ tst(r2, Operand(kIsNotInternalizedMask));
998  __ b(ne, possible_strings);
999  __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
1000  __ b(ge, not_both_strings);
1001  __ tst(r3, Operand(kIsNotInternalizedMask));
1002  __ b(ne, possible_strings);
1003
1004  // Both are internalized.  We already checked they weren't the same pointer
1005  // so they are not equal.
1006  __ mov(r0, Operand(NOT_EQUAL));
1007  __ Ret();
1008
1009  __ bind(&object_test);
1010  __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
1011  __ b(lt, not_both_strings);
1012  __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
1013  __ b(lt, not_both_strings);
1014  // If both objects are undetectable, they are equal. Otherwise, they
1015  // are not equal, since they are different objects and an object is not
1016  // equal to undefined.
1017  __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset));
1018  __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset));
1019  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
1020  __ and_(r0, r2, Operand(r3));
1021  __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
1022  __ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
1023  __ Ret();
1024}
1025
1026
1027static void ICCompareStub_CheckInputType(MacroAssembler* masm,
1028                                         Register input,
1029                                         Register scratch,
1030                                         CompareIC::State expected,
1031                                         Label* fail) {
1032  Label ok;
1033  if (expected == CompareIC::SMI) {
1034    __ JumpIfNotSmi(input, fail);
1035  } else if (expected == CompareIC::NUMBER) {
1036    __ JumpIfSmi(input, &ok);
1037    __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
1038                DONT_DO_SMI_CHECK);
1039  }
1040  // We could be strict about internalized/non-internalized here, but as long as
1041  // hydrogen doesn't care, the stub doesn't have to care either.
1042  __ bind(&ok);
1043}
1044
1045
1046// On entry r1 and r2 are the values to be compared.
1047// On exit r0 is 0, positive or negative to indicate the result of
1048// the comparison.
1049void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
1050  Register lhs = r1;
1051  Register rhs = r0;
1052  Condition cc = GetCondition();
1053
1054  Label miss;
1055  ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
1056  ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
1057
1058  Label slow;  // Call builtin.
1059  Label not_smis, both_loaded_as_doubles, lhs_not_nan;
1060
1061  Label not_two_smis, smi_done;
1062  __ orr(r2, r1, r0);
1063  __ JumpIfNotSmi(r2, &not_two_smis);
1064  __ mov(r1, Operand(r1, ASR, 1));
1065  __ sub(r0, r1, Operand(r0, ASR, 1));
1066  __ Ret();
1067  __ bind(&not_two_smis);
1068
1069  // NOTICE! This code is only reached after a smi-fast-case check, so
1070  // it is certain that at least one operand isn't a smi.
1071
1072  // Handle the case where the objects are identical.  Either returns the answer
1073  // or goes to slow.  Only falls through if the objects were not identical.
1074  EmitIdenticalObjectComparison(masm, &slow, cc);
1075
1076  // If either is a Smi (we know that not both are), then they can only
1077  // be strictly equal if the other is a HeapNumber.
1078  STATIC_ASSERT(kSmiTag == 0);
1079  ASSERT_EQ(0, Smi::FromInt(0));
1080  __ and_(r2, lhs, Operand(rhs));
1081  __ JumpIfNotSmi(r2, &not_smis);
1082  // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
1083  // 1) Return the answer.
1084  // 2) Go to slow.
1085  // 3) Fall through to both_loaded_as_doubles.
1086  // 4) Jump to lhs_not_nan.
1087  // In cases 3 and 4 we have found out we were dealing with a number-number
1088  // comparison.  If VFP3 is supported the double values of the numbers have
1089  // been loaded into d7 and d6.  Otherwise, the double values have been loaded
1090  // into r0, r1, r2, and r3.
1091  EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
1092
1093  __ bind(&both_loaded_as_doubles);
1094  // The arguments have been converted to doubles and stored in d6 and d7, if
1095  // VFP3 is supported, or in r0, r1, r2, and r3.
1096  Isolate* isolate = masm->isolate();
1097  __ bind(&lhs_not_nan);
1098  Label no_nan;
1099  // ARMv7 VFP3 instructions to implement double precision comparison.
1100  __ VFPCompareAndSetFlags(d7, d6);
1101  Label nan;
1102  __ b(vs, &nan);
1103  __ mov(r0, Operand(EQUAL), LeaveCC, eq);
1104  __ mov(r0, Operand(LESS), LeaveCC, lt);
1105  __ mov(r0, Operand(GREATER), LeaveCC, gt);
1106  __ Ret();
1107
1108  __ bind(&nan);
1109  // If one of the sides was a NaN then the v flag is set.  Load r0 with
1110  // whatever it takes to make the comparison fail, since comparisons with NaN
1111  // always fail.
1112  if (cc == lt || cc == le) {
1113    __ mov(r0, Operand(GREATER));
1114  } else {
1115    __ mov(r0, Operand(LESS));
1116  }
1117  __ Ret();
1118
1119  __ bind(&not_smis);
1120  // At this point we know we are dealing with two different objects,
1121  // and neither of them is a Smi.  The objects are in rhs_ and lhs_.
1122  if (strict()) {
1123    // This returns non-equal for some object types, or falls through if it
1124    // was not lucky.
1125    EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
1126  }
1127
1128  Label check_for_internalized_strings;
1129  Label flat_string_check;
1130  // Check for heap-number-heap-number comparison.  Can jump to slow case,
1131  // or load both doubles into r0, r1, r2, r3 and jump to the code that handles
1132  // that case.  If the inputs are not doubles then jumps to
1133  // check_for_internalized_strings.
1134  // In this case r2 will contain the type of rhs_.  Never falls through.
1135  EmitCheckForTwoHeapNumbers(masm,
1136                             lhs,
1137                             rhs,
1138                             &both_loaded_as_doubles,
1139                             &check_for_internalized_strings,
1140                             &flat_string_check);
1141
1142  __ bind(&check_for_internalized_strings);
1143  // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
1144  // internalized strings.
1145  if (cc == eq && !strict()) {
1146    // Returns an answer for two internalized strings or two detectable objects.
1147    // Otherwise jumps to string case or not both strings case.
1148    // Assumes that r2 is the type of rhs_ on entry.
1149    EmitCheckForInternalizedStringsOrObjects(
1150        masm, lhs, rhs, &flat_string_check, &slow);
1151  }
1152
1153  // Check for both being sequential ASCII strings, and inline if that is the
1154  // case.
1155  __ bind(&flat_string_check);
1156
1157  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
1158
1159  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
1160  if (cc == eq) {
1161    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1162                                                     lhs,
1163                                                     rhs,
1164                                                     r2,
1165                                                     r3,
1166                                                     r4);
1167  } else {
1168    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1169                                                       lhs,
1170                                                       rhs,
1171                                                       r2,
1172                                                       r3,
1173                                                       r4,
1174                                                       r5);
1175  }
1176  // Never falls through to here.
1177
1178  __ bind(&slow);
1179
1180  __ Push(lhs, rhs);
1181  // Figure out which native to call and setup the arguments.
1182  Builtins::JavaScript native;
1183  if (cc == eq) {
1184    native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1185  } else {
1186    native = Builtins::COMPARE;
1187    int ncr;  // NaN compare result
1188    if (cc == lt || cc == le) {
1189      ncr = GREATER;
1190    } else {
1191      ASSERT(cc == gt || cc == ge);  // remaining cases
1192      ncr = LESS;
1193    }
1194    __ mov(r0, Operand(Smi::FromInt(ncr)));
1195    __ push(r0);
1196  }
1197
1198  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1199  // tagged as a small integer.
1200  __ InvokeBuiltin(native, JUMP_FUNCTION);
1201
1202  __ bind(&miss);
1203  GenerateMiss(masm);
1204}
1205
1206
1207void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1208  // We don't allow a GC during a store buffer overflow so there is no need to
1209  // store the registers in any particular way, but we do have to store and
1210  // restore them.
1211  __ stm(db_w, sp, kCallerSaved | lr.bit());
1212
1213  const Register scratch = r1;
1214
1215  if (save_doubles_ == kSaveFPRegs) {
1216    __ SaveFPRegs(sp, scratch);
1217  }
1218  const int argument_count = 1;
1219  const int fp_argument_count = 0;
1220
1221  AllowExternalCallThatCantCauseGC scope(masm);
1222  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1223  __ mov(r0, Operand(ExternalReference::isolate_address(masm->isolate())));
1224  __ CallCFunction(
1225      ExternalReference::store_buffer_overflow_function(masm->isolate()),
1226      argument_count);
1227  if (save_doubles_ == kSaveFPRegs) {
1228    __ RestoreFPRegs(sp, scratch);
1229  }
1230  __ ldm(ia_w, sp, kCallerSaved | pc.bit());  // Also pop pc to get Ret(0).
1231}
1232
1233
1234void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1235  // Untagged case: double input in d2, double result goes
1236  //   into d2.
1237  // Tagged case: tagged input on top of stack and in r0,
1238  //   tagged result (heap number) goes into r0.
1239
1240  Label input_not_smi;
1241  Label loaded;
1242  Label calculate;
1243  Label invalid_cache;
1244  const Register scratch0 = r9;
1245  Register scratch1 = no_reg;  // will be r4
1246  const Register cache_entry = r0;
1247  const bool tagged = (argument_type_ == TAGGED);
1248
1249  if (tagged) {
1250    // Argument is a number and is on stack and in r0.
1251    // Load argument and check if it is a smi.
1252    __ JumpIfNotSmi(r0, &input_not_smi);
1253
1254    // Input is a smi. Convert to double and load the low and high words
1255    // of the double into r2, r3.
1256    __ SmiToDouble(d7, r0);
1257    __ vmov(r2, r3, d7);
1258    __ b(&loaded);
1259
1260    __ bind(&input_not_smi);
1261    // Check if input is a HeapNumber.
1262    __ CheckMap(r0,
1263                r1,
1264                Heap::kHeapNumberMapRootIndex,
1265                &calculate,
1266                DONT_DO_SMI_CHECK);
1267    // Input is a HeapNumber. Load it to a double register and store the
1268    // low and high words into r2, r3.
1269    __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
1270    __ vmov(r2, r3, d0);
1271  } else {
1272    // Input is untagged double in d2. Output goes to d2.
1273    __ vmov(r2, r3, d2);
1274  }
1275  __ bind(&loaded);
1276  // r2 = low 32 bits of double value
1277  // r3 = high 32 bits of double value
1278  // Compute hash (the shifts are arithmetic):
1279  //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
1280  __ eor(r1, r2, Operand(r3));
1281  __ eor(r1, r1, Operand(r1, ASR, 16));
1282  __ eor(r1, r1, Operand(r1, ASR, 8));
1283  ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
1284  __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
1285
1286  // r2 = low 32 bits of double value.
1287  // r3 = high 32 bits of double value.
1288  // r1 = TranscendentalCache::hash(double value).
1289  Isolate* isolate = masm->isolate();
1290  ExternalReference cache_array =
1291      ExternalReference::transcendental_cache_array_address(isolate);
1292  __ mov(cache_entry, Operand(cache_array));
1293  // cache_entry points to cache array.
1294  int cache_array_index
1295      = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
1296  __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
1297  // r0 points to the cache for the type type_.
1298  // If NULL, the cache hasn't been initialized yet, so go through runtime.
1299  __ cmp(cache_entry, Operand::Zero());
1300  __ b(eq, &invalid_cache);
1301
1302#ifdef DEBUG
1303  // Check that the layout of cache elements match expectations.
1304  { TranscendentalCache::SubCache::Element test_elem[2];
1305    char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
1306    char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
1307    char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
1308    char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
1309    char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
1310    CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
1311    CHECK_EQ(0, elem_in0 - elem_start);
1312    CHECK_EQ(kIntSize, elem_in1 - elem_start);
1313    CHECK_EQ(2 * kIntSize, elem_out - elem_start);
1314  }
1315#endif
1316
1317  // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
1318  __ add(r1, r1, Operand(r1, LSL, 1));
1319  __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
1320  // Check if cache matches: Double value is stored in uint32_t[2] array.
1321  __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
1322  __ cmp(r2, r4);
1323  __ cmp(r3, r5, eq);
1324  __ b(ne, &calculate);
1325
1326  scratch1 = r4;  // Start of scratch1 range.
1327
1328  // Cache hit. Load result, cleanup and return.
1329  Counters* counters = masm->isolate()->counters();
1330  __ IncrementCounter(
1331      counters->transcendental_cache_hit(), 1, scratch0, scratch1);
1332  if (tagged) {
1333    // Pop input value from stack and load result into r0.
1334    __ pop();
1335    __ mov(r0, Operand(r6));
1336  } else {
1337    // Load result into d2.
1338    __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
1339  }
1340  __ Ret();
1341
1342  __ bind(&calculate);
1343  __ IncrementCounter(
1344      counters->transcendental_cache_miss(), 1, scratch0, scratch1);
1345  if (tagged) {
1346    __ bind(&invalid_cache);
1347    ExternalReference runtime_function =
1348        ExternalReference(RuntimeFunction(), masm->isolate());
1349    __ TailCallExternalReference(runtime_function, 1, 1);
1350  } else {
1351    Label no_update;
1352    Label skip_cache;
1353
1354    // Call C function to calculate the result and update the cache.
1355    // r0: precalculated cache entry address.
1356    // r2 and r3: parts of the double value.
1357    // Store r0, r2 and r3 on stack for later before calling C function.
1358    __ Push(r3, r2, cache_entry);
1359    GenerateCallCFunction(masm, scratch0);
1360    __ GetCFunctionDoubleResult(d2);
1361
1362    // Try to update the cache. If we cannot allocate a
1363    // heap number, we return the result without updating.
1364    __ Pop(r3, r2, cache_entry);
1365    __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
1366    __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update);
1367    __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
1368    __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit());
1369    __ Ret();
1370
1371    __ bind(&invalid_cache);
1372    // The cache is invalid. Call runtime which will recreate the
1373    // cache.
1374    __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex);
1375    __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache);
1376    __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
1377    {
1378      FrameScope scope(masm, StackFrame::INTERNAL);
1379      __ push(r0);
1380      __ CallRuntime(RuntimeFunction(), 1);
1381    }
1382    __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
1383    __ Ret();
1384
1385    __ bind(&skip_cache);
1386    // Call C function to calculate the result and answer directly
1387    // without updating the cache.
1388    GenerateCallCFunction(masm, scratch0);
1389    __ GetCFunctionDoubleResult(d2);
1390    __ bind(&no_update);
1391
1392    // We return the value in d2 without adding it to the cache, but
1393    // we cause a scavenging GC so that future allocations will succeed.
1394    {
1395      FrameScope scope(masm, StackFrame::INTERNAL);
1396
1397      // Allocate an aligned object larger than a HeapNumber.
1398      ASSERT(4 * kPointerSize >= HeapNumber::kSize);
1399      __ mov(scratch0, Operand(4 * kPointerSize));
1400      __ push(scratch0);
1401      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
1402    }
1403    __ Ret();
1404  }
1405}
1406
1407
1408void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
1409                                                    Register scratch) {
1410  Isolate* isolate = masm->isolate();
1411
1412  __ push(lr);
1413  __ PrepareCallCFunction(0, 1, scratch);
1414  if (masm->use_eabi_hardfloat()) {
1415    __ vmov(d0, d2);
1416  } else {
1417    __ vmov(r0, r1, d2);
1418  }
1419  AllowExternalCallThatCantCauseGC scope(masm);
1420  switch (type_) {
1421    case TranscendentalCache::SIN:
1422      __ CallCFunction(ExternalReference::math_sin_double_function(isolate),
1423          0, 1);
1424      break;
1425    case TranscendentalCache::COS:
1426      __ CallCFunction(ExternalReference::math_cos_double_function(isolate),
1427          0, 1);
1428      break;
1429    case TranscendentalCache::TAN:
1430      __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
1431          0, 1);
1432      break;
1433    case TranscendentalCache::LOG:
1434      __ CallCFunction(ExternalReference::math_log_double_function(isolate),
1435          0, 1);
1436      break;
1437    default:
1438      UNIMPLEMENTED();
1439      break;
1440  }
1441  __ pop(lr);
1442}
1443
1444
1445Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
1446  switch (type_) {
1447    // Add more cases when necessary.
1448    case TranscendentalCache::SIN: return Runtime::kMath_sin;
1449    case TranscendentalCache::COS: return Runtime::kMath_cos;
1450    case TranscendentalCache::TAN: return Runtime::kMath_tan;
1451    case TranscendentalCache::LOG: return Runtime::kMath_log;
1452    default:
1453      UNIMPLEMENTED();
1454      return Runtime::kAbort;
1455  }
1456}
1457
1458
1459void MathPowStub::Generate(MacroAssembler* masm) {
1460  const Register base = r1;
1461  const Register exponent = r2;
1462  const Register heapnumbermap = r5;
1463  const Register heapnumber = r0;
1464  const DwVfpRegister double_base = d0;
1465  const DwVfpRegister double_exponent = d1;
1466  const DwVfpRegister double_result = d2;
1467  const DwVfpRegister double_scratch = d3;
1468  const SwVfpRegister single_scratch = s6;
1469  const Register scratch = r9;
1470  const Register scratch2 = r4;
1471
1472  Label call_runtime, done, int_exponent;
1473  if (exponent_type_ == ON_STACK) {
1474    Label base_is_smi, unpack_exponent;
1475    // The exponent and base are supplied as arguments on the stack.
1476    // This can only happen if the stub is called from non-optimized code.
1477    // Load input parameters from stack to double registers.
1478    __ ldr(base, MemOperand(sp, 1 * kPointerSize));
1479    __ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
1480
1481    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
1482
1483    __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
1484    __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
1485    __ cmp(scratch, heapnumbermap);
1486    __ b(ne, &call_runtime);
1487
1488    __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
1489    __ jmp(&unpack_exponent);
1490
1491    __ bind(&base_is_smi);
1492    __ vmov(single_scratch, scratch);
1493    __ vcvt_f64_s32(double_base, single_scratch);
1494    __ bind(&unpack_exponent);
1495
1496    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1497
1498    __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
1499    __ cmp(scratch, heapnumbermap);
1500    __ b(ne, &call_runtime);
1501    __ vldr(double_exponent,
1502            FieldMemOperand(exponent, HeapNumber::kValueOffset));
1503  } else if (exponent_type_ == TAGGED) {
1504    // Base is already in double_base.
1505    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1506
1507    __ vldr(double_exponent,
1508            FieldMemOperand(exponent, HeapNumber::kValueOffset));
1509  }
1510
1511  if (exponent_type_ != INTEGER) {
1512    Label int_exponent_convert;
1513    // Detect integer exponents stored as double.
1514    __ vcvt_u32_f64(single_scratch, double_exponent);
1515    // We do not check for NaN or Infinity here because comparing numbers on
1516    // ARM correctly distinguishes NaNs.  We end up calling the built-in.
1517    __ vcvt_f64_u32(double_scratch, single_scratch);
1518    __ VFPCompareAndSetFlags(double_scratch, double_exponent);
1519    __ b(eq, &int_exponent_convert);
1520
1521    if (exponent_type_ == ON_STACK) {
1522      // Detect square root case.  Crankshaft detects constant +/-0.5 at
1523      // compile time and uses DoMathPowHalf instead.  We then skip this check
1524      // for non-constant cases of +/-0.5 as these hardly occur.
1525      Label not_plus_half;
1526
1527      // Test for 0.5.
1528      __ vmov(double_scratch, 0.5, scratch);
1529      __ VFPCompareAndSetFlags(double_exponent, double_scratch);
1530      __ b(ne, &not_plus_half);
1531
1532      // Calculates square root of base.  Check for the special case of
1533      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
1534      __ vmov(double_scratch, -V8_INFINITY, scratch);
1535      __ VFPCompareAndSetFlags(double_base, double_scratch);
1536      __ vneg(double_result, double_scratch, eq);
1537      __ b(eq, &done);
1538
1539      // Add +0 to convert -0 to +0.
1540      __ vadd(double_scratch, double_base, kDoubleRegZero);
1541      __ vsqrt(double_result, double_scratch);
1542      __ jmp(&done);
1543
1544      __ bind(&not_plus_half);
1545      __ vmov(double_scratch, -0.5, scratch);
1546      __ VFPCompareAndSetFlags(double_exponent, double_scratch);
1547      __ b(ne, &call_runtime);
1548
1549      // Calculates square root of base.  Check for the special case of
1550      // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
1551      __ vmov(double_scratch, -V8_INFINITY, scratch);
1552      __ VFPCompareAndSetFlags(double_base, double_scratch);
1553      __ vmov(double_result, kDoubleRegZero, eq);
1554      __ b(eq, &done);
1555
1556      // Add +0 to convert -0 to +0.
1557      __ vadd(double_scratch, double_base, kDoubleRegZero);
1558      __ vmov(double_result, 1.0, scratch);
1559      __ vsqrt(double_scratch, double_scratch);
1560      __ vdiv(double_result, double_result, double_scratch);
1561      __ jmp(&done);
1562    }
1563
1564    __ push(lr);
1565    {
1566      AllowExternalCallThatCantCauseGC scope(masm);
1567      __ PrepareCallCFunction(0, 2, scratch);
1568      __ SetCallCDoubleArguments(double_base, double_exponent);
1569      __ CallCFunction(
1570          ExternalReference::power_double_double_function(masm->isolate()),
1571          0, 2);
1572    }
1573    __ pop(lr);
1574    __ GetCFunctionDoubleResult(double_result);
1575    __ jmp(&done);
1576
1577    __ bind(&int_exponent_convert);
1578    __ vcvt_u32_f64(single_scratch, double_exponent);
1579    __ vmov(scratch, single_scratch);
1580  }
1581
1582  // Calculate power with integer exponent.
1583  __ bind(&int_exponent);
1584
1585  // Get two copies of exponent in the registers scratch and exponent.
1586  if (exponent_type_ == INTEGER) {
1587    __ mov(scratch, exponent);
1588  } else {
1589    // Exponent has previously been stored into scratch as untagged integer.
1590    __ mov(exponent, scratch);
1591  }
1592  __ vmov(double_scratch, double_base);  // Back up base.
1593  __ vmov(double_result, 1.0, scratch2);
1594
1595  // Get absolute value of exponent.
1596  __ cmp(scratch, Operand::Zero());
1597  __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
1598  __ sub(scratch, scratch2, scratch, LeaveCC, mi);
1599
1600  Label while_true;
1601  __ bind(&while_true);
1602  __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
1603  __ vmul(double_result, double_result, double_scratch, cs);
1604  __ vmul(double_scratch, double_scratch, double_scratch, ne);
1605  __ b(ne, &while_true);
1606
1607  __ cmp(exponent, Operand::Zero());
1608  __ b(ge, &done);
1609  __ vmov(double_scratch, 1.0, scratch);
1610  __ vdiv(double_result, double_scratch, double_result);
1611  // Test whether result is zero.  Bail out to check for subnormal result.
1612  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1613  __ VFPCompareAndSetFlags(double_result, 0.0);
1614  __ b(ne, &done);
1615  // double_exponent may not containe the exponent value if the input was a
1616  // smi.  We set it with exponent value before bailing out.
1617  __ vmov(single_scratch, exponent);
1618  __ vcvt_f64_s32(double_exponent, single_scratch);
1619
1620  // Returning or bailing out.
1621  Counters* counters = masm->isolate()->counters();
1622  if (exponent_type_ == ON_STACK) {
1623    // The arguments are still on the stack.
1624    __ bind(&call_runtime);
1625    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
1626
1627    // The stub is called from non-optimized code, which expects the result
1628    // as heap number in exponent.
1629    __ bind(&done);
1630    __ AllocateHeapNumber(
1631        heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1632    __ vstr(double_result,
1633            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
1634    ASSERT(heapnumber.is(r0));
1635    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1636    __ Ret(2);
1637  } else {
1638    __ push(lr);
1639    {
1640      AllowExternalCallThatCantCauseGC scope(masm);
1641      __ PrepareCallCFunction(0, 2, scratch);
1642      __ SetCallCDoubleArguments(double_base, double_exponent);
1643      __ CallCFunction(
1644          ExternalReference::power_double_double_function(masm->isolate()),
1645          0, 2);
1646    }
1647    __ pop(lr);
1648    __ GetCFunctionDoubleResult(double_result);
1649
1650    __ bind(&done);
1651    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1652    __ Ret();
1653  }
1654}
1655
1656
1657bool CEntryStub::NeedsImmovableCode() {
1658  return true;
1659}
1660
1661
1662void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1663  CEntryStub::GenerateAheadOfTime(isolate);
1664  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1665  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1666  StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1667  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1668  CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1669  BinaryOpICStub::GenerateAheadOfTime(isolate);
1670}
1671
1672
1673void CodeStub::GenerateFPStubs(Isolate* isolate) {
1674  SaveFPRegsMode mode = kSaveFPRegs;
1675  CEntryStub save_doubles(1, mode);
1676  StoreBufferOverflowStub stub(mode);
1677  // These stubs might already be in the snapshot, detect that and don't
1678  // regenerate, which would lead to code stub initialization state being messed
1679  // up.
1680  Code* save_doubles_code;
1681  if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
1682    save_doubles_code = *save_doubles.GetCode(isolate);
1683  }
1684  Code* store_buffer_overflow_code;
1685  if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
1686      store_buffer_overflow_code = *stub.GetCode(isolate);
1687  }
1688  isolate->set_fp_stubs_generated(true);
1689}
1690
1691
1692void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1693  CEntryStub stub(1, kDontSaveFPRegs);
1694  stub.GetCode(isolate);
1695}
1696
1697
1698static void JumpIfOOM(MacroAssembler* masm,
1699                      Register value,
1700                      Register scratch,
1701                      Label* oom_label) {
1702  STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
1703  STATIC_ASSERT(kFailureTag == 3);
1704  __ and_(scratch, value, Operand(0xf));
1705  __ cmp(scratch, Operand(0xf));
1706  __ b(eq, oom_label);
1707}
1708
1709
1710void CEntryStub::GenerateCore(MacroAssembler* masm,
1711                              Label* throw_normal_exception,
1712                              Label* throw_termination_exception,
1713                              Label* throw_out_of_memory_exception,
1714                              bool do_gc,
1715                              bool always_allocate) {
1716  // r0: result parameter for PerformGC, if any
1717  // r4: number of arguments including receiver  (C callee-saved)
1718  // r5: pointer to builtin function  (C callee-saved)
1719  // r6: pointer to the first argument (C callee-saved)
1720  Isolate* isolate = masm->isolate();
1721
1722  if (do_gc) {
1723    // Passing r0.
1724    __ PrepareCallCFunction(2, 0, r1);
1725    __ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate())));
1726    __ CallCFunction(ExternalReference::perform_gc_function(isolate),
1727        2, 0);
1728  }
1729
1730  ExternalReference scope_depth =
1731      ExternalReference::heap_always_allocate_scope_depth(isolate);
1732  if (always_allocate) {
1733    __ mov(r0, Operand(scope_depth));
1734    __ ldr(r1, MemOperand(r0));
1735    __ add(r1, r1, Operand(1));
1736    __ str(r1, MemOperand(r0));
1737  }
1738
1739  // Call C built-in.
1740  // r0 = argc, r1 = argv
1741  __ mov(r0, Operand(r4));
1742  __ mov(r1, Operand(r6));
1743
1744#if V8_HOST_ARCH_ARM
1745  int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1746  int frame_alignment_mask = frame_alignment - 1;
1747  if (FLAG_debug_code) {
1748    if (frame_alignment > kPointerSize) {
1749      Label alignment_as_expected;
1750      ASSERT(IsPowerOf2(frame_alignment));
1751      __ tst(sp, Operand(frame_alignment_mask));
1752      __ b(eq, &alignment_as_expected);
1753      // Don't use Check here, as it will call Runtime_Abort re-entering here.
1754      __ stop("Unexpected alignment");
1755      __ bind(&alignment_as_expected);
1756    }
1757  }
1758#endif
1759
1760  __ mov(r2, Operand(ExternalReference::isolate_address(isolate)));
1761
1762  // To let the GC traverse the return address of the exit frames, we need to
1763  // know where the return address is. The CEntryStub is unmovable, so
1764  // we can store the address on the stack to be able to find it again and
1765  // we never have to restore it, because it will not change.
1766  // Compute the return address in lr to return to after the jump below. Pc is
1767  // already at '+ 8' from the current instruction but return is after three
1768  // instructions so add another 4 to pc to get the return address.
1769  {
1770    // Prevent literal pool emission before return address.
1771    Assembler::BlockConstPoolScope block_const_pool(masm);
1772    masm->add(lr, pc, Operand(4));
1773    __ str(lr, MemOperand(sp, 0));
1774    masm->Jump(r5);
1775  }
1776
1777  __ VFPEnsureFPSCRState(r2);
1778
1779  if (always_allocate) {
1780    // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
1781    // though (contain the result).
1782    __ mov(r2, Operand(scope_depth));
1783    __ ldr(r3, MemOperand(r2));
1784    __ sub(r3, r3, Operand(1));
1785    __ str(r3, MemOperand(r2));
1786  }
1787
1788  // check for failure result
1789  Label failure_returned;
1790  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
1791  // Lower 2 bits of r2 are 0 iff r0 has failure tag.
1792  __ add(r2, r0, Operand(1));
1793  __ tst(r2, Operand(kFailureTagMask));
1794  __ b(eq, &failure_returned);
1795
1796  // Exit C frame and return.
1797  // r0:r1: result
1798  // sp: stack pointer
1799  // fp: frame pointer
1800  //  Callee-saved register r4 still holds argc.
1801  __ LeaveExitFrame(save_doubles_, r4, true);
1802  __ mov(pc, lr);
1803
1804  // check if we should retry or throw exception
1805  Label retry;
1806  __ bind(&failure_returned);
1807  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
1808  __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
1809  __ b(eq, &retry);
1810
1811  // Special handling of out of memory exceptions.
1812  JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
1813
1814  // Retrieve the pending exception.
1815  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1816                                       isolate)));
1817  __ ldr(r0, MemOperand(ip));
1818
1819  // See if we just retrieved an OOM exception.
1820  JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
1821
1822  // Clear the pending exception.
1823  __ mov(r3, Operand(isolate->factory()->the_hole_value()));
1824  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1825                                       isolate)));
1826  __ str(r3, MemOperand(ip));
1827
1828  // Special handling of termination exceptions which are uncatchable
1829  // by javascript code.
1830  __ cmp(r0, Operand(isolate->factory()->termination_exception()));
1831  __ b(eq, throw_termination_exception);
1832
1833  // Handle normal exception.
1834  __ jmp(throw_normal_exception);
1835
1836  __ bind(&retry);  // pass last failure (r0) as parameter (r0) when retrying
1837}
1838
1839
1840void CEntryStub::Generate(MacroAssembler* masm) {
1841  // Called from JavaScript; parameters are on stack as if calling JS function
1842  // r0: number of arguments including receiver
1843  // r1: pointer to builtin function
1844  // fp: frame pointer  (restored after C call)
1845  // sp: stack pointer  (restored as callee's sp after C call)
1846  // cp: current context  (C callee-saved)
1847
1848  ProfileEntryHookStub::MaybeCallEntryHook(masm);
1849
1850  // Result returned in r0 or r0+r1 by default.
1851
1852  // NOTE: Invocations of builtins may return failure objects
1853  // instead of a proper result. The builtin entry handles
1854  // this by performing a garbage collection and retrying the
1855  // builtin once.
1856
1857  // Compute the argv pointer in a callee-saved register.
1858  __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
1859  __ sub(r6, r6, Operand(kPointerSize));
1860
1861  // Enter the exit frame that transitions from JavaScript to C++.
1862  FrameScope scope(masm, StackFrame::MANUAL);
1863  __ EnterExitFrame(save_doubles_);
1864
1865  // Set up argc and the builtin function in callee-saved registers.
1866  __ mov(r4, Operand(r0));
1867  __ mov(r5, Operand(r1));
1868
1869  // r4: number of arguments (C callee-saved)
1870  // r5: pointer to builtin function (C callee-saved)
1871  // r6: pointer to first argument (C callee-saved)
1872
1873  Label throw_normal_exception;
1874  Label throw_termination_exception;
1875  Label throw_out_of_memory_exception;
1876
1877  // Call into the runtime system.
1878  GenerateCore(masm,
1879               &throw_normal_exception,
1880               &throw_termination_exception,
1881               &throw_out_of_memory_exception,
1882               false,
1883               false);
1884
1885  // Do space-specific GC and retry runtime call.
1886  GenerateCore(masm,
1887               &throw_normal_exception,
1888               &throw_termination_exception,
1889               &throw_out_of_memory_exception,
1890               true,
1891               false);
1892
1893  // Do full GC and retry runtime call one final time.
1894  Failure* failure = Failure::InternalError();
1895  __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
1896  GenerateCore(masm,
1897               &throw_normal_exception,
1898               &throw_termination_exception,
1899               &throw_out_of_memory_exception,
1900               true,
1901               true);
1902
1903  __ bind(&throw_out_of_memory_exception);
1904  // Set external caught exception to false.
1905  Isolate* isolate = masm->isolate();
1906  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
1907                                    isolate);
1908  __ mov(r0, Operand(false, RelocInfo::NONE32));
1909  __ mov(r2, Operand(external_caught));
1910  __ str(r0, MemOperand(r2));
1911
1912  // Set pending exception and r0 to out of memory exception.
1913  Label already_have_failure;
1914  JumpIfOOM(masm, r0, ip, &already_have_failure);
1915  Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
1916  __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
1917  __ bind(&already_have_failure);
1918  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1919                                       isolate)));
1920  __ str(r0, MemOperand(r2));
1921  // Fall through to the next label.
1922
1923  __ bind(&throw_termination_exception);
1924  __ ThrowUncatchable(r0);
1925
1926  __ bind(&throw_normal_exception);
1927  __ Throw(r0);
1928}
1929
1930
1931void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
1932  // r0: code entry
1933  // r1: function
1934  // r2: receiver
1935  // r3: argc
1936  // [sp+0]: argv
1937
1938  Label invoke, handler_entry, exit;
1939
1940  ProfileEntryHookStub::MaybeCallEntryHook(masm);
1941
1942  // Called from C, so do not pop argc and args on exit (preserve sp)
1943  // No need to save register-passed args
1944  // Save callee-saved registers (incl. cp and fp), sp, and lr
1945  __ stm(db_w, sp, kCalleeSaved | lr.bit());
1946
1947  // Save callee-saved vfp registers.
1948  __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
1949  // Set up the reserved register for 0.0.
1950  __ vmov(kDoubleRegZero, 0.0);
1951  __ VFPEnsureFPSCRState(r4);
1952
1953  // Get address of argv, see stm above.
1954  // r0: code entry
1955  // r1: function
1956  // r2: receiver
1957  // r3: argc
1958
1959  // Set up argv in r4.
1960  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
1961  offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
1962  __ ldr(r4, MemOperand(sp, offset_to_argv));
1963
1964  // Push a frame with special values setup to mark it as an entry frame.
1965  // r0: code entry
1966  // r1: function
1967  // r2: receiver
1968  // r3: argc
1969  // r4: argv
1970  Isolate* isolate = masm->isolate();
1971  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
1972  __ mov(r8, Operand(Smi::FromInt(marker)));
1973  __ mov(r6, Operand(Smi::FromInt(marker)));
1974  __ mov(r5,
1975         Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
1976  __ ldr(r5, MemOperand(r5));
1977  __ mov(ip, Operand(-1));  // Push a bad frame pointer to fail if it is used.
1978  __ Push(ip, r8, r6, r5);
1979
1980  // Set up frame pointer for the frame to be pushed.
1981  __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1982
1983  // If this is the outermost JS call, set js_entry_sp value.
1984  Label non_outermost_js;
1985  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
1986  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
1987  __ ldr(r6, MemOperand(r5));
1988  __ cmp(r6, Operand::Zero());
1989  __ b(ne, &non_outermost_js);
1990  __ str(fp, MemOperand(r5));
1991  __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
1992  Label cont;
1993  __ b(&cont);
1994  __ bind(&non_outermost_js);
1995  __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
1996  __ bind(&cont);
1997  __ push(ip);
1998
1999  // Jump to a faked try block that does the invoke, with a faked catch
2000  // block that sets the pending exception.
2001  __ jmp(&invoke);
2002
2003  // Block literal pool emission whilst taking the position of the handler
2004  // entry. This avoids making the assumption that literal pools are always
2005  // emitted after an instruction is emitted, rather than before.
2006  {
2007    Assembler::BlockConstPoolScope block_const_pool(masm);
2008    __ bind(&handler_entry);
2009    handler_offset_ = handler_entry.pos();
2010    // Caught exception: Store result (exception) in the pending exception
2011    // field in the JSEnv and return a failure sentinel.  Coming in here the
2012    // fp will be invalid because the PushTryHandler below sets it to 0 to
2013    // signal the existence of the JSEntry frame.
2014    __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2015                                         isolate)));
2016  }
2017  __ str(r0, MemOperand(ip));
2018  __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
2019  __ b(&exit);
2020
2021  // Invoke: Link this frame into the handler chain.  There's only one
2022  // handler block in this code object, so its index is 0.
2023  __ bind(&invoke);
2024  // Must preserve r0-r4, r5-r6 are available.
2025  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
2026  // If an exception not caught by another handler occurs, this handler
2027  // returns control to the code after the bl(&invoke) above, which
2028  // restores all kCalleeSaved registers (including cp and fp) to their
2029  // saved values before returning a failure to C.
2030
2031  // Clear any pending exceptions.
2032  __ mov(r5, Operand(isolate->factory()->the_hole_value()));
2033  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2034                                       isolate)));
2035  __ str(r5, MemOperand(ip));
2036
2037  // Invoke the function by calling through JS entry trampoline builtin.
2038  // Notice that we cannot store a reference to the trampoline code directly in
2039  // this stub, because runtime stubs are not traversed when doing GC.
2040
2041  // Expected registers by Builtins::JSEntryTrampoline
2042  // r0: code entry
2043  // r1: function
2044  // r2: receiver
2045  // r3: argc
2046  // r4: argv
2047  if (is_construct) {
2048    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
2049                                      isolate);
2050    __ mov(ip, Operand(construct_entry));
2051  } else {
2052    ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
2053    __ mov(ip, Operand(entry));
2054  }
2055  __ ldr(ip, MemOperand(ip));  // deref address
2056
2057  // Branch and link to JSEntryTrampoline.  We don't use the double underscore
2058  // macro for the add instruction because we don't want the coverage tool
2059  // inserting instructions here after we read the pc. We block literal pool
2060  // emission for the same reason.
2061  {
2062    Assembler::BlockConstPoolScope block_const_pool(masm);
2063    __ mov(lr, Operand(pc));
2064    masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
2065  }
2066
2067  // Unlink this frame from the handler chain.
2068  __ PopTryHandler();
2069
2070  __ bind(&exit);  // r0 holds result
2071  // Check if the current stack frame is marked as the outermost JS frame.
2072  Label non_outermost_js_2;
2073  __ pop(r5);
2074  __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
2075  __ b(ne, &non_outermost_js_2);
2076  __ mov(r6, Operand::Zero());
2077  __ mov(r5, Operand(ExternalReference(js_entry_sp)));
2078  __ str(r6, MemOperand(r5));
2079  __ bind(&non_outermost_js_2);
2080
2081  // Restore the top frame descriptors from the stack.
2082  __ pop(r3);
2083  __ mov(ip,
2084         Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate)));
2085  __ str(r3, MemOperand(ip));
2086
2087  // Reset the stack to the callee saved registers.
2088  __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
2089
2090  // Restore callee-saved registers and return.
2091#ifdef DEBUG
2092  if (FLAG_debug_code) {
2093    __ mov(lr, Operand(pc));
2094  }
2095#endif
2096
2097  // Restore callee-saved vfp registers.
2098  __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
2099
2100  __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
2101}
2102
2103
2104// Uses registers r0 to r4.
2105// Expected input (depending on whether args are in registers or on the stack):
2106// * object: r0 or at sp + 1 * kPointerSize.
2107// * function: r1 or at sp.
2108//
2109// An inlined call site may have been generated before calling this stub.
2110// In this case the offset to the inline site to patch is passed on the stack,
2111// in the safepoint slot for register r4.
2112// (See LCodeGen::DoInstanceOfKnownGlobal)
2113void InstanceofStub::Generate(MacroAssembler* masm) {
2114  // Call site inlining and patching implies arguments in registers.
2115  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
2116  // ReturnTrueFalse is only implemented for inlined call sites.
2117  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
2118
2119  // Fixed register usage throughout the stub:
2120  const Register object = r0;  // Object (lhs).
2121  Register map = r3;  // Map of the object.
2122  const Register function = r1;  // Function (rhs).
2123  const Register prototype = r4;  // Prototype of the function.
2124  const Register inline_site = r9;
2125  const Register scratch = r2;
2126
2127  const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
2128
2129  Label slow, loop, is_instance, is_not_instance, not_js_object;
2130
2131  if (!HasArgsInRegisters()) {
2132    __ ldr(object, MemOperand(sp, 1 * kPointerSize));
2133    __ ldr(function, MemOperand(sp, 0));
2134  }
2135
2136  // Check that the left hand is a JS object and load map.
2137  __ JumpIfSmi(object, &not_js_object);
2138  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
2139
2140  // If there is a call site cache don't look in the global cache, but do the
2141  // real lookup and update the call site cache.
2142  if (!HasCallSiteInlineCheck()) {
2143    Label miss;
2144    __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
2145    __ b(ne, &miss);
2146    __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
2147    __ b(ne, &miss);
2148    __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
2149    __ Ret(HasArgsInRegisters() ? 0 : 2);
2150
2151    __ bind(&miss);
2152  }
2153
2154  // Get the prototype of the function.
2155  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
2156
2157  // Check that the function prototype is a JS object.
2158  __ JumpIfSmi(prototype, &slow);
2159  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
2160
2161  // Update the global instanceof or call site inlined cache with the current
2162  // map and function. The cached answer will be set when it is known below.
2163  if (!HasCallSiteInlineCheck()) {
2164    __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
2165    __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
2166  } else {
2167    ASSERT(HasArgsInRegisters());
2168    // Patch the (relocated) inlined map check.
2169
2170    // The offset was stored in r4 safepoint slot.
2171    // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
2172    __ LoadFromSafepointRegisterSlot(scratch, r4);
2173    __ sub(inline_site, lr, scratch);
2174    // Get the map location in scratch and patch it.
2175    __ GetRelocatedValueLocation(inline_site, scratch);
2176    __ ldr(scratch, MemOperand(scratch));
2177    __ str(map, FieldMemOperand(scratch, Cell::kValueOffset));
2178  }
2179
2180  // Register mapping: r3 is object map and r4 is function prototype.
2181  // Get prototype of object into r2.
2182  __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
2183
2184  // We don't need map any more. Use it as a scratch register.
2185  Register scratch2 = map;
2186  map = no_reg;
2187
2188  // Loop through the prototype chain looking for the function prototype.
2189  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
2190  __ bind(&loop);
2191  __ cmp(scratch, Operand(prototype));
2192  __ b(eq, &is_instance);
2193  __ cmp(scratch, scratch2);
2194  __ b(eq, &is_not_instance);
2195  __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
2196  __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
2197  __ jmp(&loop);
2198
2199  __ bind(&is_instance);
2200  if (!HasCallSiteInlineCheck()) {
2201    __ mov(r0, Operand(Smi::FromInt(0)));
2202    __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
2203  } else {
2204    // Patch the call site to return true.
2205    __ LoadRoot(r0, Heap::kTrueValueRootIndex);
2206    __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
2207    // Get the boolean result location in scratch and patch it.
2208    __ GetRelocatedValueLocation(inline_site, scratch);
2209    __ str(r0, MemOperand(scratch));
2210
2211    if (!ReturnTrueFalseObject()) {
2212      __ mov(r0, Operand(Smi::FromInt(0)));
2213    }
2214  }
2215  __ Ret(HasArgsInRegisters() ? 0 : 2);
2216
2217  __ bind(&is_not_instance);
2218  if (!HasCallSiteInlineCheck()) {
2219    __ mov(r0, Operand(Smi::FromInt(1)));
2220    __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
2221  } else {
2222    // Patch the call site to return false.
2223    __ LoadRoot(r0, Heap::kFalseValueRootIndex);
2224    __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
2225    // Get the boolean result location in scratch and patch it.
2226    __ GetRelocatedValueLocation(inline_site, scratch);
2227    __ str(r0, MemOperand(scratch));
2228
2229    if (!ReturnTrueFalseObject()) {
2230      __ mov(r0, Operand(Smi::FromInt(1)));
2231    }
2232  }
2233  __ Ret(HasArgsInRegisters() ? 0 : 2);
2234
2235  Label object_not_null, object_not_null_or_smi;
2236  __ bind(&not_js_object);
2237  // Before null, smi and string value checks, check that the rhs is a function
2238  // as for a non-function rhs an exception needs to be thrown.
2239  __ JumpIfSmi(function, &slow);
2240  __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
2241  __ b(ne, &slow);
2242
2243  // Null is not instance of anything.
2244  __ cmp(scratch, Operand(masm->isolate()->factory()->null_value()));
2245  __ b(ne, &object_not_null);
2246  __ mov(r0, Operand(Smi::FromInt(1)));
2247  __ Ret(HasArgsInRegisters() ? 0 : 2);
2248
2249  __ bind(&object_not_null);
2250  // Smi values are not instances of anything.
2251  __ JumpIfNotSmi(object, &object_not_null_or_smi);
2252  __ mov(r0, Operand(Smi::FromInt(1)));
2253  __ Ret(HasArgsInRegisters() ? 0 : 2);
2254
2255  __ bind(&object_not_null_or_smi);
2256  // String values are not instances of anything.
2257  __ IsObjectJSStringType(object, scratch, &slow);
2258  __ mov(r0, Operand(Smi::FromInt(1)));
2259  __ Ret(HasArgsInRegisters() ? 0 : 2);
2260
2261  // Slow-case.  Tail call builtin.
2262  __ bind(&slow);
2263  if (!ReturnTrueFalseObject()) {
2264    if (HasArgsInRegisters()) {
2265      __ Push(r0, r1);
2266    }
2267  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
2268  } else {
2269    {
2270      FrameScope scope(masm, StackFrame::INTERNAL);
2271      __ Push(r0, r1);
2272      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
2273    }
2274    __ cmp(r0, Operand::Zero());
2275    __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
2276    __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
2277    __ Ret(HasArgsInRegisters() ? 0 : 2);
2278  }
2279}
2280
2281
2282void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
2283  Label miss;
2284  Register receiver;
2285  if (kind() == Code::KEYED_LOAD_IC) {
2286    // ----------- S t a t e -------------
2287    //  -- lr    : return address
2288    //  -- r0    : key
2289    //  -- r1    : receiver
2290    // -----------------------------------
2291    __ cmp(r0, Operand(masm->isolate()->factory()->prototype_string()));
2292    __ b(ne, &miss);
2293    receiver = r1;
2294  } else {
2295    ASSERT(kind() == Code::LOAD_IC);
2296    // ----------- S t a t e -------------
2297    //  -- r2    : name
2298    //  -- lr    : return address
2299    //  -- r0    : receiver
2300    //  -- sp[0] : receiver
2301    // -----------------------------------
2302    receiver = r0;
2303  }
2304
2305  StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss);
2306  __ bind(&miss);
2307  StubCompiler::TailCallBuiltin(
2308      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2309}
2310
2311
2312void StringLengthStub::Generate(MacroAssembler* masm) {
2313  Label miss;
2314  Register receiver;
2315  if (kind() == Code::KEYED_LOAD_IC) {
2316    // ----------- S t a t e -------------
2317    //  -- lr    : return address
2318    //  -- r0    : key
2319    //  -- r1    : receiver
2320    // -----------------------------------
2321    __ cmp(r0, Operand(masm->isolate()->factory()->length_string()));
2322    __ b(ne, &miss);
2323    receiver = r1;
2324  } else {
2325    ASSERT(kind() == Code::LOAD_IC);
2326    // ----------- S t a t e -------------
2327    //  -- r2    : name
2328    //  -- lr    : return address
2329    //  -- r0    : receiver
2330    //  -- sp[0] : receiver
2331    // -----------------------------------
2332    receiver = r0;
2333  }
2334
2335  StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss);
2336
2337  __ bind(&miss);
2338  StubCompiler::TailCallBuiltin(
2339      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2340}
2341
2342
2343void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
2344  // This accepts as a receiver anything JSArray::SetElementsLength accepts
2345  // (currently anything except for external arrays which means anything with
2346  // elements of FixedArray type).  Value must be a number, but only smis are
2347  // accepted as the most common case.
2348  Label miss;
2349
2350  Register receiver;
2351  Register value;
2352  if (kind() == Code::KEYED_STORE_IC) {
2353    // ----------- S t a t e -------------
2354    //  -- lr    : return address
2355    //  -- r0    : value
2356    //  -- r1    : key
2357    //  -- r2    : receiver
2358    // -----------------------------------
2359    __ cmp(r1, Operand(masm->isolate()->factory()->length_string()));
2360    __ b(ne, &miss);
2361    receiver = r2;
2362    value = r0;
2363  } else {
2364    ASSERT(kind() == Code::STORE_IC);
2365    // ----------- S t a t e -------------
2366    //  -- lr    : return address
2367    //  -- r0    : value
2368    //  -- r1    : receiver
2369    //  -- r2    : key
2370    // -----------------------------------
2371    receiver = r1;
2372    value = r0;
2373  }
2374  Register scratch = r3;
2375
2376  // Check that the receiver isn't a smi.
2377  __ JumpIfSmi(receiver, &miss);
2378
2379  // Check that the object is a JS array.
2380  __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
2381  __ b(ne, &miss);
2382
2383  // Check that elements are FixedArray.
2384  // We rely on StoreIC_ArrayLength below to deal with all types of
2385  // fast elements (including COW).
2386  __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
2387  __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
2388  __ b(ne, &miss);
2389
2390  // Check that the array has fast properties, otherwise the length
2391  // property might have been redefined.
2392  __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
2393  __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
2394  __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
2395  __ b(eq, &miss);
2396
2397  // Check that value is a smi.
2398  __ JumpIfNotSmi(value, &miss);
2399
2400  // Prepare tail call to StoreIC_ArrayLength.
2401  __ Push(receiver, value);
2402
2403  ExternalReference ref =
2404      ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
2405  __ TailCallExternalReference(ref, 2, 1);
2406
2407  __ bind(&miss);
2408
2409  StubCompiler::TailCallBuiltin(
2410      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2411}
2412
2413
2414Register InstanceofStub::left() { return r0; }
2415
2416
2417Register InstanceofStub::right() { return r1; }
2418
2419
2420void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2421  // The displacement is the offset of the last parameter (if any)
2422  // relative to the frame pointer.
2423  const int kDisplacement =
2424      StandardFrameConstants::kCallerSPOffset - kPointerSize;
2425
2426  // Check that the key is a smi.
2427  Label slow;
2428  __ JumpIfNotSmi(r1, &slow);
2429
2430  // Check if the calling frame is an arguments adaptor frame.
2431  Label adaptor;
2432  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2433  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
2434  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2435  __ b(eq, &adaptor);
2436
2437  // Check index against formal parameters count limit passed in
2438  // through register r0. Use unsigned comparison to get negative
2439  // check for free.
2440  __ cmp(r1, r0);
2441  __ b(hs, &slow);
2442
2443  // Read the argument from the stack and return it.
2444  __ sub(r3, r0, r1);
2445  __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3));
2446  __ ldr(r0, MemOperand(r3, kDisplacement));
2447  __ Jump(lr);
2448
2449  // Arguments adaptor case: Check index against actual arguments
2450  // limit found in the arguments adaptor frame. Use unsigned
2451  // comparison to get negative check for free.
2452  __ bind(&adaptor);
2453  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2454  __ cmp(r1, r0);
2455  __ b(cs, &slow);
2456
2457  // Read the argument from the adaptor frame and return it.
2458  __ sub(r3, r0, r1);
2459  __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3));
2460  __ ldr(r0, MemOperand(r3, kDisplacement));
2461  __ Jump(lr);
2462
2463  // Slow-case: Handle non-smi or out-of-bounds access to arguments
2464  // by calling the runtime system.
2465  __ bind(&slow);
2466  __ push(r1);
2467  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2468}
2469
2470
2471void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
2472  // sp[0] : number of parameters
2473  // sp[4] : receiver displacement
2474  // sp[8] : function
2475
2476  // Check if the calling frame is an arguments adaptor frame.
2477  Label runtime;
2478  __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2479  __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
2480  __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2481  __ b(ne, &runtime);
2482
2483  // Patch the arguments.length and the parameters pointer in the current frame.
2484  __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2485  __ str(r2, MemOperand(sp, 0 * kPointerSize));
2486  __ add(r3, r3, Operand(r2, LSL, 1));
2487  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
2488  __ str(r3, MemOperand(sp, 1 * kPointerSize));
2489
2490  __ bind(&runtime);
2491  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2492}
2493
2494
2495void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
2496  // Stack layout:
2497  //  sp[0] : number of parameters (tagged)
2498  //  sp[4] : address of receiver argument
2499  //  sp[8] : function
2500  // Registers used over whole function:
2501  //  r6 : allocated object (tagged)
2502  //  r9 : mapped parameter count (tagged)
2503
2504  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
2505  // r1 = parameter count (tagged)
2506
2507  // Check if the calling frame is an arguments adaptor frame.
2508  Label runtime;
2509  Label adaptor_frame, try_allocate;
2510  __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2511  __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
2512  __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2513  __ b(eq, &adaptor_frame);
2514
2515  // No adaptor, parameter count = argument count.
2516  __ mov(r2, r1);
2517  __ b(&try_allocate);
2518
2519  // We have an adaptor frame. Patch the parameters pointer.
2520  __ bind(&adaptor_frame);
2521  __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2522  __ add(r3, r3, Operand(r2, LSL, 1));
2523  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
2524  __ str(r3, MemOperand(sp, 1 * kPointerSize));
2525
2526  // r1 = parameter count (tagged)
2527  // r2 = argument count (tagged)
2528  // Compute the mapped parameter count = min(r1, r2) in r1.
2529  __ cmp(r1, Operand(r2));
2530  __ mov(r1, Operand(r2), LeaveCC, gt);
2531
2532  __ bind(&try_allocate);
2533
2534  // Compute the sizes of backing store, parameter map, and arguments object.
2535  // 1. Parameter map, has 2 extra words containing context and backing store.
2536  const int kParameterMapHeaderSize =
2537      FixedArray::kHeaderSize + 2 * kPointerSize;
2538  // If there are no mapped parameters, we do not need the parameter_map.
2539  __ cmp(r1, Operand(Smi::FromInt(0)));
2540  __ mov(r9, Operand::Zero(), LeaveCC, eq);
2541  __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
2542  __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
2543
2544  // 2. Backing store.
2545  __ add(r9, r9, Operand(r2, LSL, 1));
2546  __ add(r9, r9, Operand(FixedArray::kHeaderSize));
2547
2548  // 3. Arguments object.
2549  __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
2550
2551  // Do the allocation of all three objects in one go.
2552  __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
2553
2554  // r0 = address of new object(s) (tagged)
2555  // r2 = argument count (tagged)
2556  // Get the arguments boilerplate from the current native context into r4.
2557  const int kNormalOffset =
2558      Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
2559  const int kAliasedOffset =
2560      Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
2561
2562  __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2563  __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
2564  __ cmp(r1, Operand::Zero());
2565  __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
2566  __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
2567
2568  // r0 = address of new object (tagged)
2569  // r1 = mapped parameter count (tagged)
2570  // r2 = argument count (tagged)
2571  // r4 = address of boilerplate object (tagged)
2572  // Copy the JS object part.
2573  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2574    __ ldr(r3, FieldMemOperand(r4, i));
2575    __ str(r3, FieldMemOperand(r0, i));
2576  }
2577
2578  // Set up the callee in-object property.
2579  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2580  __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
2581  const int kCalleeOffset = JSObject::kHeaderSize +
2582      Heap::kArgumentsCalleeIndex * kPointerSize;
2583  __ str(r3, FieldMemOperand(r0, kCalleeOffset));
2584
2585  // Use the length (smi tagged) and set that as an in-object property too.
2586  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2587  const int kLengthOffset = JSObject::kHeaderSize +
2588      Heap::kArgumentsLengthIndex * kPointerSize;
2589  __ str(r2, FieldMemOperand(r0, kLengthOffset));
2590
2591  // Set up the elements pointer in the allocated arguments object.
2592  // If we allocated a parameter map, r4 will point there, otherwise
2593  // it will point to the backing store.
2594  __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
2595  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
2596
2597  // r0 = address of new object (tagged)
2598  // r1 = mapped parameter count (tagged)
2599  // r2 = argument count (tagged)
2600  // r4 = address of parameter map or backing store (tagged)
2601  // Initialize parameter map. If there are no mapped arguments, we're done.
2602  Label skip_parameter_map;
2603  __ cmp(r1, Operand(Smi::FromInt(0)));
2604  // Move backing store address to r3, because it is
2605  // expected there when filling in the unmapped arguments.
2606  __ mov(r3, r4, LeaveCC, eq);
2607  __ b(eq, &skip_parameter_map);
2608
2609  __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
2610  __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
2611  __ add(r6, r1, Operand(Smi::FromInt(2)));
2612  __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
2613  __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
2614  __ add(r6, r4, Operand(r1, LSL, 1));
2615  __ add(r6, r6, Operand(kParameterMapHeaderSize));
2616  __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
2617
2618  // Copy the parameter slots and the holes in the arguments.
2619  // We need to fill in mapped_parameter_count slots. They index the context,
2620  // where parameters are stored in reverse order, at
2621  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2622  // The mapped parameter thus need to get indices
2623  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
2624  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2625  // We loop from right to left.
2626  Label parameters_loop, parameters_test;
2627  __ mov(r6, r1);
2628  __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
2629  __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
2630  __ sub(r9, r9, Operand(r1));
2631  __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
2632  __ add(r3, r4, Operand(r6, LSL, 1));
2633  __ add(r3, r3, Operand(kParameterMapHeaderSize));
2634
2635  // r6 = loop variable (tagged)
2636  // r1 = mapping index (tagged)
2637  // r3 = address of backing store (tagged)
2638  // r4 = address of parameter map (tagged), which is also the address of new
2639  //      object + Heap::kArgumentsObjectSize (tagged)
2640  // r0 = temporary scratch (a.o., for address calculation)
2641  // r5 = the hole value
2642  __ jmp(&parameters_test);
2643
2644  __ bind(&parameters_loop);
2645  __ sub(r6, r6, Operand(Smi::FromInt(1)));
2646  __ mov(r0, Operand(r6, LSL, 1));
2647  __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
2648  __ str(r9, MemOperand(r4, r0));
2649  __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
2650  __ str(r5, MemOperand(r3, r0));
2651  __ add(r9, r9, Operand(Smi::FromInt(1)));
2652  __ bind(&parameters_test);
2653  __ cmp(r6, Operand(Smi::FromInt(0)));
2654  __ b(ne, &parameters_loop);
2655
2656  // Restore r0 = new object (tagged)
2657  __ sub(r0, r4, Operand(Heap::kArgumentsObjectSize));
2658
2659  __ bind(&skip_parameter_map);
2660  // r0 = address of new object (tagged)
2661  // r2 = argument count (tagged)
2662  // r3 = address of backing store (tagged)
2663  // r5 = scratch
2664  // Copy arguments header and remaining slots (if there are any).
2665  __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
2666  __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
2667  __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
2668
2669  Label arguments_loop, arguments_test;
2670  __ mov(r9, r1);
2671  __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
2672  __ sub(r4, r4, Operand(r9, LSL, 1));
2673  __ jmp(&arguments_test);
2674
2675  __ bind(&arguments_loop);
2676  __ sub(r4, r4, Operand(kPointerSize));
2677  __ ldr(r6, MemOperand(r4, 0));
2678  __ add(r5, r3, Operand(r9, LSL, 1));
2679  __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
2680  __ add(r9, r9, Operand(Smi::FromInt(1)));
2681
2682  __ bind(&arguments_test);
2683  __ cmp(r9, Operand(r2));
2684  __ b(lt, &arguments_loop);
2685
2686  // Return and remove the on-stack parameters.
2687  __ add(sp, sp, Operand(3 * kPointerSize));
2688  __ Ret();
2689
2690  // Do the runtime call to allocate the arguments object.
2691  // r0 = address of new object (tagged)
2692  // r2 = argument count (tagged)
2693  __ bind(&runtime);
2694  __ str(r2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
2695  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2696}
2697
2698
2699void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2700  // sp[0] : number of parameters
2701  // sp[4] : receiver displacement
2702  // sp[8] : function
2703  // Check if the calling frame is an arguments adaptor frame.
2704  Label adaptor_frame, try_allocate, runtime;
2705  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2706  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
2707  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2708  __ b(eq, &adaptor_frame);
2709
2710  // Get the length from the frame.
2711  __ ldr(r1, MemOperand(sp, 0));
2712  __ b(&try_allocate);
2713
2714  // Patch the arguments.length and the parameters pointer.
2715  __ bind(&adaptor_frame);
2716  __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2717  __ str(r1, MemOperand(sp, 0));
2718  __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
2719  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
2720  __ str(r3, MemOperand(sp, 1 * kPointerSize));
2721
2722  // Try the new space allocation. Start out with computing the size
2723  // of the arguments object and the elements array in words.
2724  Label add_arguments_object;
2725  __ bind(&try_allocate);
2726  __ SmiUntag(r1, SetCC);
2727  __ b(eq, &add_arguments_object);
2728  __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
2729  __ bind(&add_arguments_object);
2730  __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
2731
2732  // Do the allocation of both objects in one go.
2733  __ Allocate(r1, r0, r2, r3, &runtime,
2734              static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2735
2736  // Get the arguments boilerplate from the current native context.
2737  __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2738  __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
2739  __ ldr(r4, MemOperand(r4, Context::SlotOffset(
2740      Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
2741
2742  // Copy the JS object part.
2743  __ CopyFields(r0, r4, d0, JSObject::kHeaderSize / kPointerSize);
2744
2745  // Get the length (smi tagged) and set that as an in-object property too.
2746  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2747  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
2748  __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
2749      Heap::kArgumentsLengthIndex * kPointerSize));
2750
2751  // If there are no actual arguments, we're done.
2752  Label done;
2753  __ cmp(r1, Operand::Zero());
2754  __ b(eq, &done);
2755
2756  // Get the parameters pointer from the stack.
2757  __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
2758
2759  // Set up the elements pointer in the allocated arguments object and
2760  // initialize the header in the elements fixed array.
2761  __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
2762  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
2763  __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
2764  __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
2765  __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
2766  __ SmiUntag(r1);
2767
2768  // Copy the fixed array slots.
2769  Label loop;
2770  // Set up r4 to point to the first array slot.
2771  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2772  __ bind(&loop);
2773  // Pre-decrement r2 with kPointerSize on each iteration.
2774  // Pre-decrement in order to skip receiver.
2775  __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
2776  // Post-increment r4 with kPointerSize on each iteration.
2777  __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
2778  __ sub(r1, r1, Operand(1));
2779  __ cmp(r1, Operand::Zero());
2780  __ b(ne, &loop);
2781
2782  // Return and remove the on-stack parameters.
2783  __ bind(&done);
2784  __ add(sp, sp, Operand(3 * kPointerSize));
2785  __ Ret();
2786
2787  // Do the runtime call to allocate the arguments object.
2788  __ bind(&runtime);
2789  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
2790}
2791
2792
2793void RegExpExecStub::Generate(MacroAssembler* masm) {
2794  // Just jump directly to runtime if native RegExp is not selected at compile
2795  // time or if regexp entry in generated code is turned off runtime switch or
2796  // at compilation.
2797#ifdef V8_INTERPRETED_REGEXP
2798  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2799#else  // V8_INTERPRETED_REGEXP
2800
2801  // Stack frame on entry.
2802  //  sp[0]: last_match_info (expected JSArray)
2803  //  sp[4]: previous index
2804  //  sp[8]: subject string
2805  //  sp[12]: JSRegExp object
2806
2807  const int kLastMatchInfoOffset = 0 * kPointerSize;
2808  const int kPreviousIndexOffset = 1 * kPointerSize;
2809  const int kSubjectOffset = 2 * kPointerSize;
2810  const int kJSRegExpOffset = 3 * kPointerSize;
2811
2812  Label runtime;
2813  // Allocation of registers for this function. These are in callee save
2814  // registers and will be preserved by the call to the native RegExp code, as
2815  // this code is called using the normal C calling convention. When calling
2816  // directly from generated code the native RegExp code will not do a GC and
2817  // therefore the content of these registers are safe to use after the call.
2818  Register subject = r4;
2819  Register regexp_data = r5;
2820  Register last_match_info_elements = no_reg;  // will be r6;
2821
2822  // Ensure that a RegExp stack is allocated.
2823  Isolate* isolate = masm->isolate();
2824  ExternalReference address_of_regexp_stack_memory_address =
2825      ExternalReference::address_of_regexp_stack_memory_address(isolate);
2826  ExternalReference address_of_regexp_stack_memory_size =
2827      ExternalReference::address_of_regexp_stack_memory_size(isolate);
2828  __ mov(r0, Operand(address_of_regexp_stack_memory_size));
2829  __ ldr(r0, MemOperand(r0, 0));
2830  __ cmp(r0, Operand::Zero());
2831  __ b(eq, &runtime);
2832
2833  // Check that the first argument is a JSRegExp object.
2834  __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
2835  __ JumpIfSmi(r0, &runtime);
2836  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
2837  __ b(ne, &runtime);
2838
2839  // Check that the RegExp has been compiled (data contains a fixed array).
2840  __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
2841  if (FLAG_debug_code) {
2842    __ SmiTst(regexp_data);
2843    __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2844    __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
2845    __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2846  }
2847
2848  // regexp_data: RegExp data (FixedArray)
2849  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2850  __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2851  __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2852  __ b(ne, &runtime);
2853
2854  // regexp_data: RegExp data (FixedArray)
2855  // Check that the number of captures fit in the static offsets vector buffer.
2856  __ ldr(r2,
2857         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2858  // Check (number_of_captures + 1) * 2 <= offsets vector size
2859  // Or          number_of_captures * 2 <= offsets vector size - 2
2860  // Multiplying by 2 comes for free since r2 is smi-tagged.
2861  STATIC_ASSERT(kSmiTag == 0);
2862  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2863  STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2864  __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2865  __ b(hi, &runtime);
2866
2867  // Reset offset for possibly sliced string.
2868  __ mov(r9, Operand::Zero());
2869  __ ldr(subject, MemOperand(sp, kSubjectOffset));
2870  __ JumpIfSmi(subject, &runtime);
2871  __ mov(r3, subject);  // Make a copy of the original subject string.
2872  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2873  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2874  // subject: subject string
2875  // r3: subject string
2876  // r0: subject string instance type
2877  // regexp_data: RegExp data (FixedArray)
2878  // Handle subject string according to its encoding and representation:
2879  // (1) Sequential string?  If yes, go to (5).
2880  // (2) Anything but sequential or cons?  If yes, go to (6).
2881  // (3) Cons string.  If the string is flat, replace subject with first string.
2882  //     Otherwise bailout.
2883  // (4) Is subject external?  If yes, go to (7).
2884  // (5) Sequential string.  Load regexp code according to encoding.
2885  // (E) Carry on.
2886  /// [...]
2887
2888  // Deferred code at the end of the stub:
2889  // (6) Not a long external string?  If yes, go to (8).
2890  // (7) External string.  Make it, offset-wise, look like a sequential string.
2891  //     Go to (5).
2892  // (8) Short external string or not a string?  If yes, bail out to runtime.
2893  // (9) Sliced string.  Replace subject with parent.  Go to (4).
2894
2895  Label seq_string /* 5 */, external_string /* 7 */,
2896        check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2897        not_long_external /* 8 */;
2898
2899  // (1) Sequential string?  If yes, go to (5).
2900  __ and_(r1,
2901          r0,
2902          Operand(kIsNotStringMask |
2903                  kStringRepresentationMask |
2904                  kShortExternalStringMask),
2905          SetCC);
2906  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2907  __ b(eq, &seq_string);  // Go to (5).
2908
2909  // (2) Anything but sequential or cons?  If yes, go to (6).
2910  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2911  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2912  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2913  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2914  __ cmp(r1, Operand(kExternalStringTag));
2915  __ b(ge, &not_seq_nor_cons);  // Go to (6).
2916
2917  // (3) Cons string.  Check that it's flat.
2918  // Replace subject with first string and reload instance type.
2919  __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
2920  __ CompareRoot(r0, Heap::kempty_stringRootIndex);
2921  __ b(ne, &runtime);
2922  __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2923
2924  // (4) Is subject external?  If yes, go to (7).
2925  __ bind(&check_underlying);
2926  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
2927  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
2928  STATIC_ASSERT(kSeqStringTag == 0);
2929  __ tst(r0, Operand(kStringRepresentationMask));
2930  // The underlying external string is never a short external string.
2931  STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
2932  STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2933  __ b(ne, &external_string);  // Go to (7).
2934
2935  // (5) Sequential string.  Load regexp code according to encoding.
2936  __ bind(&seq_string);
2937  // subject: sequential subject string (or look-alike, external string)
2938  // r3: original subject string
2939  // Load previous index and check range before r3 is overwritten.  We have to
2940  // use r3 instead of subject here because subject might have been only made
2941  // to look like a sequential string when it actually is an external string.
2942  __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
2943  __ JumpIfNotSmi(r1, &runtime);
2944  __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
2945  __ cmp(r3, Operand(r1));
2946  __ b(ls, &runtime);
2947  __ SmiUntag(r1);
2948
2949  STATIC_ASSERT(4 == kOneByteStringTag);
2950  STATIC_ASSERT(kTwoByteStringTag == 0);
2951  __ and_(r0, r0, Operand(kStringEncodingMask));
2952  __ mov(r3, Operand(r0, ASR, 2), SetCC);
2953  __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
2954  __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
2955
2956  // (E) Carry on.  String handling is done.
2957  // r6: irregexp code
2958  // Check that the irregexp code has been generated for the actual string
2959  // encoding. If it has, the field contains a code object otherwise it contains
2960  // a smi (code flushing support).
2961  __ JumpIfSmi(r6, &runtime);
2962
2963  // r1: previous index
2964  // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
2965  // r6: code
2966  // subject: Subject string
2967  // regexp_data: RegExp data (FixedArray)
2968  // All checks done. Now push arguments for native regexp code.
2969  __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
2970
2971  // Isolates: note we add an additional parameter here (isolate pointer).
2972  const int kRegExpExecuteArguments = 9;
2973  const int kParameterRegisters = 4;
2974  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2975
2976  // Stack pointer now points to cell where return address is to be written.
2977  // Arguments are before that on the stack or in registers.
2978
2979  // Argument 9 (sp[20]): Pass current isolate address.
2980  __ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
2981  __ str(r0, MemOperand(sp, 5 * kPointerSize));
2982
2983  // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
2984  __ mov(r0, Operand(1));
2985  __ str(r0, MemOperand(sp, 4 * kPointerSize));
2986
2987  // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
2988  __ mov(r0, Operand(address_of_regexp_stack_memory_address));
2989  __ ldr(r0, MemOperand(r0, 0));
2990  __ mov(r2, Operand(address_of_regexp_stack_memory_size));
2991  __ ldr(r2, MemOperand(r2, 0));
2992  __ add(r0, r0, Operand(r2));
2993  __ str(r0, MemOperand(sp, 3 * kPointerSize));
2994
2995  // Argument 6: Set the number of capture registers to zero to force global
2996  // regexps to behave as non-global.  This does not affect non-global regexps.
2997  __ mov(r0, Operand::Zero());
2998  __ str(r0, MemOperand(sp, 2 * kPointerSize));
2999
3000  // Argument 5 (sp[4]): static offsets vector buffer.
3001  __ mov(r0,
3002         Operand(ExternalReference::address_of_static_offsets_vector(isolate)));
3003  __ str(r0, MemOperand(sp, 1 * kPointerSize));
3004
3005  // For arguments 4 and 3 get string length, calculate start of string data and
3006  // calculate the shift of the index (0 for ASCII and 1 for two byte).
3007  __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
3008  __ eor(r3, r3, Operand(1));
3009  // Load the length from the original subject string from the previous stack
3010  // frame. Therefore we have to use fp, which points exactly to two pointer
3011  // sizes below the previous sp. (Because creating a new stack frame pushes
3012  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
3013  __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
3014  // If slice offset is not 0, load the length from the original sliced string.
3015  // Argument 4, r3: End of string data
3016  // Argument 3, r2: Start of string data
3017  // Prepare start and end index of the input.
3018  __ add(r9, r7, Operand(r9, LSL, r3));
3019  __ add(r2, r9, Operand(r1, LSL, r3));
3020
3021  __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset));
3022  __ SmiUntag(r7);
3023  __ add(r3, r9, Operand(r7, LSL, r3));
3024
3025  // Argument 2 (r1): Previous index.
3026  // Already there
3027
3028  // Argument 1 (r0): Subject string.
3029  __ mov(r0, subject);
3030
3031  // Locate the code entry and call it.
3032  __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
3033  DirectCEntryStub stub;
3034  stub.GenerateCall(masm, r6);
3035
3036  __ LeaveExitFrame(false, no_reg, true);
3037
3038  last_match_info_elements = r6;
3039
3040  // r0: result
3041  // subject: subject string (callee saved)
3042  // regexp_data: RegExp data (callee saved)
3043  // last_match_info_elements: Last match info elements (callee saved)
3044  // Check the result.
3045  Label success;
3046  __ cmp(r0, Operand(1));
3047  // We expect exactly one result since we force the called regexp to behave
3048  // as non-global.
3049  __ b(eq, &success);
3050  Label failure;
3051  __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE));
3052  __ b(eq, &failure);
3053  __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
3054  // If not exception it can only be retry. Handle that in the runtime system.
3055  __ b(ne, &runtime);
3056  // Result must now be exception. If there is no pending exception already a
3057  // stack overflow (on the backtrack stack) was detected in RegExp code but
3058  // haven't created the exception yet. Handle that in the runtime system.
3059  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
3060  __ mov(r1, Operand(isolate->factory()->the_hole_value()));
3061  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3062                                       isolate)));
3063  __ ldr(r0, MemOperand(r2, 0));
3064  __ cmp(r0, r1);
3065  __ b(eq, &runtime);
3066
3067  __ str(r1, MemOperand(r2, 0));  // Clear pending exception.
3068
3069  // Check if the exception is a termination. If so, throw as uncatchable.
3070  __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
3071
3072  Label termination_exception;
3073  __ b(eq, &termination_exception);
3074
3075  __ Throw(r0);
3076
3077  __ bind(&termination_exception);
3078  __ ThrowUncatchable(r0);
3079
3080  __ bind(&failure);
3081  // For failure and exception return null.
3082  __ mov(r0, Operand(masm->isolate()->factory()->null_value()));
3083  __ add(sp, sp, Operand(4 * kPointerSize));
3084  __ Ret();
3085
3086  // Process the result from the native regexp code.
3087  __ bind(&success);
3088  __ ldr(r1,
3089         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
3090  // Calculate number of capture registers (number_of_captures + 1) * 2.
3091  // Multiplying by 2 comes for free since r1 is smi-tagged.
3092  STATIC_ASSERT(kSmiTag == 0);
3093  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3094  __ add(r1, r1, Operand(2));  // r1 was a smi.
3095
3096  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
3097  __ JumpIfSmi(r0, &runtime);
3098  __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
3099  __ b(ne, &runtime);
3100  // Check that the JSArray is in fast case.
3101  __ ldr(last_match_info_elements,
3102         FieldMemOperand(r0, JSArray::kElementsOffset));
3103  __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
3104  __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
3105  __ b(ne, &runtime);
3106  // Check that the last match info has space for the capture registers and the
3107  // additional information.
3108  __ ldr(r0,
3109         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
3110  __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
3111  __ cmp(r2, Operand::SmiUntag(r0));
3112  __ b(gt, &runtime);
3113
3114  // r1: number of capture registers
3115  // r4: subject string
3116  // Store the capture count.
3117  __ SmiTag(r2, r1);
3118  __ str(r2, FieldMemOperand(last_match_info_elements,
3119                             RegExpImpl::kLastCaptureCountOffset));
3120  // Store last subject and last input.
3121  __ str(subject,
3122         FieldMemOperand(last_match_info_elements,
3123                         RegExpImpl::kLastSubjectOffset));
3124  __ mov(r2, subject);
3125  __ RecordWriteField(last_match_info_elements,
3126                      RegExpImpl::kLastSubjectOffset,
3127                      subject,
3128                      r3,
3129                      kLRHasNotBeenSaved,
3130                      kDontSaveFPRegs);
3131  __ mov(subject, r2);
3132  __ str(subject,
3133         FieldMemOperand(last_match_info_elements,
3134                         RegExpImpl::kLastInputOffset));
3135  __ RecordWriteField(last_match_info_elements,
3136                      RegExpImpl::kLastInputOffset,
3137                      subject,
3138                      r3,
3139                      kLRHasNotBeenSaved,
3140                      kDontSaveFPRegs);
3141
3142  // Get the static offsets vector filled by the native regexp code.
3143  ExternalReference address_of_static_offsets_vector =
3144      ExternalReference::address_of_static_offsets_vector(isolate);
3145  __ mov(r2, Operand(address_of_static_offsets_vector));
3146
3147  // r1: number of capture registers
3148  // r2: offsets vector
3149  Label next_capture, done;
3150  // Capture register counter starts from number of capture registers and
3151  // counts down until wraping after zero.
3152  __ add(r0,
3153         last_match_info_elements,
3154         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
3155  __ bind(&next_capture);
3156  __ sub(r1, r1, Operand(1), SetCC);
3157  __ b(mi, &done);
3158  // Read the value from the static offsets vector buffer.
3159  __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex));
3160  // Store the smi value in the last match info.
3161  __ SmiTag(r3);
3162  __ str(r3, MemOperand(r0, kPointerSize, PostIndex));
3163  __ jmp(&next_capture);
3164  __ bind(&done);
3165
3166  // Return last match info.
3167  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
3168  __ add(sp, sp, Operand(4 * kPointerSize));
3169  __ Ret();
3170
3171  // Do the runtime call to execute the regexp.
3172  __ bind(&runtime);
3173  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3174
3175  // Deferred code for string handling.
3176  // (6) Not a long external string?  If yes, go to (8).
3177  __ bind(&not_seq_nor_cons);
3178  // Compare flags are still set.
3179  __ b(gt, &not_long_external);  // Go to (8).
3180
3181  // (7) External string.  Make it, offset-wise, look like a sequential string.
3182  __ bind(&external_string);
3183  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
3184  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
3185  if (FLAG_debug_code) {
3186    // Assert that we do not have a cons or slice (indirect strings) here.
3187    // Sequential strings have already been ruled out.
3188    __ tst(r0, Operand(kIsIndirectStringMask));
3189    __ Assert(eq, kExternalStringExpectedButNotFound);
3190  }
3191  __ ldr(subject,
3192         FieldMemOperand(subject, ExternalString::kResourceDataOffset));
3193  // Move the pointer so that offset-wise, it looks like a sequential string.
3194  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3195  __ sub(subject,
3196         subject,
3197         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3198  __ jmp(&seq_string);    // Go to (5).
3199
3200  // (8) Short external string or not a string?  If yes, bail out to runtime.
3201  __ bind(&not_long_external);
3202  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
3203  __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
3204  __ b(ne, &runtime);
3205
3206  // (9) Sliced string.  Replace subject with parent.  Go to (4).
3207  // Load offset into r9 and replace subject string with parent.
3208  __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
3209  __ SmiUntag(r9);
3210  __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
3211  __ jmp(&check_underlying);  // Go to (4).
3212#endif  // V8_INTERPRETED_REGEXP
3213}
3214
3215
3216void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
3217  const int kMaxInlineLength = 100;
3218  Label slowcase;
3219  Label done;
3220  Factory* factory = masm->isolate()->factory();
3221
3222  __ ldr(r1, MemOperand(sp, kPointerSize * 2));
3223  STATIC_ASSERT(kSmiTag == 0);
3224  STATIC_ASSERT(kSmiTagSize == 1);
3225  __ JumpIfNotSmi(r1, &slowcase);
3226  __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
3227  __ b(hi, &slowcase);
3228  // Smi-tagging is equivalent to multiplying by 2.
3229  // Allocate RegExpResult followed by FixedArray with size in ebx.
3230  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
3231  // Elements:  [Map][Length][..elements..]
3232  // Size of JSArray with two in-object properties and the header of a
3233  // FixedArray.
3234  int objects_size =
3235      (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
3236  __ SmiUntag(r5, r1);
3237  __ add(r2, r5, Operand(objects_size));
3238  __ Allocate(
3239      r2,  // In: Size, in words.
3240      r0,  // Out: Start of allocation (tagged).
3241      r3,  // Scratch register.
3242      r4,  // Scratch register.
3243      &slowcase,
3244      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
3245  // r0: Start of allocated area, object-tagged.
3246  // r1: Number of elements in array, as smi.
3247  // r5: Number of elements, untagged.
3248
3249  // Set JSArray map to global.regexp_result_map().
3250  // Set empty properties FixedArray.
3251  // Set elements to point to FixedArray allocated right after the JSArray.
3252  // Interleave operations for better latency.
3253  __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
3254  __ add(r3, r0, Operand(JSRegExpResult::kSize));
3255  __ mov(r4, Operand(factory->empty_fixed_array()));
3256  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset));
3257  __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
3258  __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX));
3259  __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
3260  __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
3261
3262  // Set input, index and length fields from arguments.
3263  __ ldr(r1, MemOperand(sp, kPointerSize * 0));
3264  __ ldr(r2, MemOperand(sp, kPointerSize * 1));
3265  __ ldr(r6, MemOperand(sp, kPointerSize * 2));
3266  __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
3267  __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
3268  __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset));
3269
3270  // Fill out the elements FixedArray.
3271  // r0: JSArray, tagged.
3272  // r3: FixedArray, tagged.
3273  // r5: Number of elements in array, untagged.
3274
3275  // Set map.
3276  __ mov(r2, Operand(factory->fixed_array_map()));
3277  __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
3278  // Set FixedArray length.
3279  __ SmiTag(r6, r5);
3280  __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
3281  // Fill contents of fixed-array with undefined.
3282  __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
3283  __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3284  // Fill fixed array elements with undefined.
3285  // r0: JSArray, tagged.
3286  // r2: undefined.
3287  // r3: Start of elements in FixedArray.
3288  // r5: Number of elements to fill.
3289  Label loop;
3290  __ cmp(r5, Operand::Zero());
3291  __ bind(&loop);
3292  __ b(le, &done);  // Jump if r5 is negative or zero.
3293  __ sub(r5, r5, Operand(1), SetCC);
3294  __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
3295  __ jmp(&loop);
3296
3297  __ bind(&done);
3298  __ add(sp, sp, Operand(3 * kPointerSize));
3299  __ Ret();
3300
3301  __ bind(&slowcase);
3302  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
3303}
3304
3305
3306static void GenerateRecordCallTarget(MacroAssembler* masm) {
3307  // Cache the called function in a global property cell.  Cache states
3308  // are uninitialized, monomorphic (indicated by a JSFunction), and
3309  // megamorphic.
3310  // r0 : number of arguments to the construct function
3311  // r1 : the function to call
3312  // r2 : cache cell for call target
3313  Label initialize, done, miss, megamorphic, not_array_function;
3314
3315  ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
3316            masm->isolate()->heap()->undefined_value());
3317  ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
3318            masm->isolate()->heap()->the_hole_value());
3319
3320  // Load the cache state into r3.
3321  __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
3322
3323  // A monomorphic cache hit or an already megamorphic state: invoke the
3324  // function without changing the state.
3325  __ cmp(r3, r1);
3326  __ b(eq, &done);
3327
3328  // If we came here, we need to see if we are the array function.
3329  // If we didn't have a matching function, and we didn't find the megamorph
3330  // sentinel, then we have in the cell either some other function or an
3331  // AllocationSite. Do a map check on the object in ecx.
3332  __ ldr(r5, FieldMemOperand(r3, 0));
3333  __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
3334  __ b(ne, &miss);
3335
3336  // Make sure the function is the Array() function
3337  __ LoadArrayFunction(r3);
3338  __ cmp(r1, r3);
3339  __ b(ne, &megamorphic);
3340  __ jmp(&done);
3341
3342  __ bind(&miss);
3343
3344  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
3345  // megamorphic.
3346  __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
3347  __ b(eq, &initialize);
3348  // MegamorphicSentinel is an immortal immovable object (undefined) so no
3349  // write-barrier is needed.
3350  __ bind(&megamorphic);
3351  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3352  __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
3353  __ jmp(&done);
3354
3355  // An uninitialized cache is patched with the function or sentinel to
3356  // indicate the ElementsKind if function is the Array constructor.
3357  __ bind(&initialize);
3358  // Make sure the function is the Array() function
3359  __ LoadArrayFunction(r3);
3360  __ cmp(r1, r3);
3361  __ b(ne, &not_array_function);
3362
3363  // The target function is the Array constructor,
3364  // Create an AllocationSite if we don't already have it, store it in the cell
3365  {
3366    FrameScope scope(masm, StackFrame::INTERNAL);
3367
3368    // Arguments register must be smi-tagged to call out.
3369    __ SmiTag(r0);
3370    __ Push(r2, r1, r0);
3371
3372    CreateAllocationSiteStub create_stub;
3373    __ CallStub(&create_stub);
3374
3375    __ Pop(r2, r1, r0);
3376    __ SmiUntag(r0);
3377  }
3378  __ b(&done);
3379
3380  __ bind(&not_array_function);
3381  __ str(r1, FieldMemOperand(r2, Cell::kValueOffset));
3382  // No need for a write barrier here - cells are rescanned.
3383
3384  __ bind(&done);
3385}
3386
3387
3388void CallFunctionStub::Generate(MacroAssembler* masm) {
3389  // r1 : the function to call
3390  // r2 : cache cell for call target
3391  Label slow, non_function;
3392
3393  // The receiver might implicitly be the global object. This is
3394  // indicated by passing the hole as the receiver to the call
3395  // function stub.
3396  if (ReceiverMightBeImplicit()) {
3397    Label call;
3398    // Get the receiver from the stack.
3399    // function, receiver [, arguments]
3400    __ ldr(r4, MemOperand(sp, argc_ * kPointerSize));
3401    // Call as function is indicated with the hole.
3402    __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
3403    __ b(ne, &call);
3404    // Patch the receiver on the stack with the global receiver object.
3405    __ ldr(r3,
3406           MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3407    __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset));
3408    __ str(r3, MemOperand(sp, argc_ * kPointerSize));
3409    __ bind(&call);
3410  }
3411
3412  // Check that the function is really a JavaScript function.
3413  // r1: pushed function (to be verified)
3414  __ JumpIfSmi(r1, &non_function);
3415  // Get the map of the function object.
3416  __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
3417  __ b(ne, &slow);
3418
3419  if (RecordCallTarget()) {
3420    GenerateRecordCallTarget(masm);
3421  }
3422
3423  // Fast-case: Invoke the function now.
3424  // r1: pushed function
3425  ParameterCount actual(argc_);
3426
3427  if (ReceiverMightBeImplicit()) {
3428    Label call_as_function;
3429    __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
3430    __ b(eq, &call_as_function);
3431    __ InvokeFunction(r1,
3432                      actual,
3433                      JUMP_FUNCTION,
3434                      NullCallWrapper(),
3435                      CALL_AS_METHOD);
3436    __ bind(&call_as_function);
3437  }
3438  __ InvokeFunction(r1,
3439                    actual,
3440                    JUMP_FUNCTION,
3441                    NullCallWrapper(),
3442                    CALL_AS_FUNCTION);
3443
3444  // Slow-case: Non-function called.
3445  __ bind(&slow);
3446  if (RecordCallTarget()) {
3447    // If there is a call target cache, mark it megamorphic in the
3448    // non-function case.  MegamorphicSentinel is an immortal immovable
3449    // object (undefined) so no write barrier is needed.
3450    ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
3451              masm->isolate()->heap()->undefined_value());
3452    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3453    __ str(ip, FieldMemOperand(r2, Cell::kValueOffset));
3454  }
3455  // Check for function proxy.
3456  __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
3457  __ b(ne, &non_function);
3458  __ push(r1);  // put proxy as additional argument
3459  __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
3460  __ mov(r2, Operand::Zero());
3461  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
3462  __ SetCallKind(r5, CALL_AS_METHOD);
3463  {
3464    Handle<Code> adaptor =
3465      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3466    __ Jump(adaptor, RelocInfo::CODE_TARGET);
3467  }
3468
3469  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3470  // of the original receiver from the call site).
3471  __ bind(&non_function);
3472  __ str(r1, MemOperand(sp, argc_ * kPointerSize));
3473  __ mov(r0, Operand(argc_));  // Set up the number of arguments.
3474  __ mov(r2, Operand::Zero());
3475  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
3476  __ SetCallKind(r5, CALL_AS_METHOD);
3477  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3478          RelocInfo::CODE_TARGET);
3479}
3480
3481
3482void CallConstructStub::Generate(MacroAssembler* masm) {
3483  // r0 : number of arguments
3484  // r1 : the function to call
3485  // r2 : cache cell for call target
3486  Label slow, non_function_call;
3487
3488  // Check that the function is not a smi.
3489  __ JumpIfSmi(r1, &non_function_call);
3490  // Check that the function is a JSFunction.
3491  __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
3492  __ b(ne, &slow);
3493
3494  if (RecordCallTarget()) {
3495    GenerateRecordCallTarget(masm);
3496  }
3497
3498  // Jump to the function-specific construct stub.
3499  Register jmp_reg = r3;
3500  __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
3501  __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
3502                                  SharedFunctionInfo::kConstructStubOffset));
3503  __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3504
3505  // r0: number of arguments
3506  // r1: called object
3507  // r3: object type
3508  Label do_call;
3509  __ bind(&slow);
3510  __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
3511  __ b(ne, &non_function_call);
3512  __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3513  __ jmp(&do_call);
3514
3515  __ bind(&non_function_call);
3516  __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3517  __ bind(&do_call);
3518  // Set expected number of arguments to zero (not changing r0).
3519  __ mov(r2, Operand::Zero());
3520  __ SetCallKind(r5, CALL_AS_METHOD);
3521  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3522          RelocInfo::CODE_TARGET);
3523}
3524
3525
3526// StringCharCodeAtGenerator
3527void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3528  Label flat_string;
3529  Label ascii_string;
3530  Label got_char_code;
3531  Label sliced_string;
3532
3533  // If the receiver is a smi trigger the non-string case.
3534  __ JumpIfSmi(object_, receiver_not_string_);
3535
3536  // Fetch the instance type of the receiver into result register.
3537  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3538  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3539  // If the receiver is not a string trigger the non-string case.
3540  __ tst(result_, Operand(kIsNotStringMask));
3541  __ b(ne, receiver_not_string_);
3542
3543  // If the index is non-smi trigger the non-smi case.
3544  __ JumpIfNotSmi(index_, &index_not_smi_);
3545  __ bind(&got_smi_index_);
3546
3547  // Check for index out of range.
3548  __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
3549  __ cmp(ip, Operand(index_));
3550  __ b(ls, index_out_of_range_);
3551
3552  __ SmiUntag(index_);
3553
3554  StringCharLoadGenerator::Generate(masm,
3555                                    object_,
3556                                    index_,
3557                                    result_,
3558                                    &call_runtime_);
3559
3560  __ SmiTag(result_);
3561  __ bind(&exit_);
3562}
3563
3564
3565void StringCharCodeAtGenerator::GenerateSlow(
3566    MacroAssembler* masm,
3567    const RuntimeCallHelper& call_helper) {
3568  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3569
3570  // Index is not a smi.
3571  __ bind(&index_not_smi_);
3572  // If index is a heap number, try converting it to an integer.
3573  __ CheckMap(index_,
3574              result_,
3575              Heap::kHeapNumberMapRootIndex,
3576              index_not_number_,
3577              DONT_DO_SMI_CHECK);
3578  call_helper.BeforeCall(masm);
3579  __ push(object_);
3580  __ push(index_);  // Consumed by runtime conversion function.
3581  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3582    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3583  } else {
3584    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3585    // NumberToSmi discards numbers that are not exact integers.
3586    __ CallRuntime(Runtime::kNumberToSmi, 1);
3587  }
3588  // Save the conversion result before the pop instructions below
3589  // have a chance to overwrite it.
3590  __ Move(index_, r0);
3591  __ pop(object_);
3592  // Reload the instance type.
3593  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3594  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3595  call_helper.AfterCall(masm);
3596  // If index is still not a smi, it must be out of range.
3597  __ JumpIfNotSmi(index_, index_out_of_range_);
3598  // Otherwise, return to the fast path.
3599  __ jmp(&got_smi_index_);
3600
3601  // Call runtime. We get here when the receiver is a string and the
3602  // index is a number, but the code of getting the actual character
3603  // is too complex (e.g., when the string needs to be flattened).
3604  __ bind(&call_runtime_);
3605  call_helper.BeforeCall(masm);
3606  __ SmiTag(index_);
3607  __ Push(object_, index_);
3608  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
3609  __ Move(result_, r0);
3610  call_helper.AfterCall(masm);
3611  __ jmp(&exit_);
3612
3613  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3614}
3615
3616
3617// -------------------------------------------------------------------------
3618// StringCharFromCodeGenerator
3619
3620void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3621  // Fast case of Heap::LookupSingleCharacterStringFromCode.
3622  STATIC_ASSERT(kSmiTag == 0);
3623  STATIC_ASSERT(kSmiShiftSize == 0);
3624  ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
3625  __ tst(code_,
3626         Operand(kSmiTagMask |
3627                 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3628  __ b(ne, &slow_case_);
3629
3630  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3631  // At this point code register contains smi tagged ASCII char code.
3632  __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
3633  __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3634  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
3635  __ b(eq, &slow_case_);
3636  __ bind(&exit_);
3637}
3638
3639
3640void StringCharFromCodeGenerator::GenerateSlow(
3641    MacroAssembler* masm,
3642    const RuntimeCallHelper& call_helper) {
3643  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3644
3645  __ bind(&slow_case_);
3646  call_helper.BeforeCall(masm);
3647  __ push(code_);
3648  __ CallRuntime(Runtime::kCharFromCode, 1);
3649  __ Move(result_, r0);
3650  call_helper.AfterCall(masm);
3651  __ jmp(&exit_);
3652
3653  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3654}
3655
3656
3657void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3658                                          Register dest,
3659                                          Register src,
3660                                          Register count,
3661                                          Register scratch,
3662                                          bool ascii) {
3663  Label loop;
3664  Label done;
3665  // This loop just copies one character at a time, as it is only used for very
3666  // short strings.
3667  if (!ascii) {
3668    __ add(count, count, Operand(count), SetCC);
3669  } else {
3670    __ cmp(count, Operand::Zero());
3671  }
3672  __ b(eq, &done);
3673
3674  __ bind(&loop);
3675  __ ldrb(scratch, MemOperand(src, 1, PostIndex));
3676  // Perform sub between load and dependent store to get the load time to
3677  // complete.
3678  __ sub(count, count, Operand(1), SetCC);
3679  __ strb(scratch, MemOperand(dest, 1, PostIndex));
3680  // last iteration.
3681  __ b(gt, &loop);
3682
3683  __ bind(&done);
3684}
3685
3686
3687enum CopyCharactersFlags {
3688  COPY_ASCII = 1,
3689  DEST_ALWAYS_ALIGNED = 2
3690};
3691
3692
3693void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
3694                                              Register dest,
3695                                              Register src,
3696                                              Register count,
3697                                              Register scratch1,
3698                                              Register scratch2,
3699                                              Register scratch3,
3700                                              Register scratch4,
3701                                              int flags) {
3702  bool ascii = (flags & COPY_ASCII) != 0;
3703  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
3704
3705  if (dest_always_aligned && FLAG_debug_code) {
3706    // Check that destination is actually word aligned if the flag says
3707    // that it is.
3708    __ tst(dest, Operand(kPointerAlignmentMask));
3709    __ Check(eq, kDestinationOfCopyNotAligned);
3710  }
3711
3712  const int kReadAlignment = 4;
3713  const int kReadAlignmentMask = kReadAlignment - 1;
3714  // Ensure that reading an entire aligned word containing the last character
3715  // of a string will not read outside the allocated area (because we pad up
3716  // to kObjectAlignment).
3717  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
3718  // Assumes word reads and writes are little endian.
3719  // Nothing to do for zero characters.
3720  Label done;
3721  if (!ascii) {
3722    __ add(count, count, Operand(count), SetCC);
3723  } else {
3724    __ cmp(count, Operand::Zero());
3725  }
3726  __ b(eq, &done);
3727
3728  // Assume that you cannot read (or write) unaligned.
3729  Label byte_loop;
3730  // Must copy at least eight bytes, otherwise just do it one byte at a time.
3731  __ cmp(count, Operand(8));
3732  __ add(count, dest, Operand(count));
3733  Register limit = count;  // Read until src equals this.
3734  __ b(lt, &byte_loop);
3735
3736  if (!dest_always_aligned) {
3737    // Align dest by byte copying. Copies between zero and three bytes.
3738    __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC);
3739    Label dest_aligned;
3740    __ b(eq, &dest_aligned);
3741    __ cmp(scratch4, Operand(2));
3742    __ ldrb(scratch1, MemOperand(src, 1, PostIndex));
3743    __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
3744    __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
3745    __ strb(scratch1, MemOperand(dest, 1, PostIndex));
3746    __ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
3747    __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
3748    __ bind(&dest_aligned);
3749  }
3750
3751  Label simple_loop;
3752
3753  __ sub(scratch4, dest, Operand(src));
3754  __ and_(scratch4, scratch4, Operand(0x03), SetCC);
3755  __ b(eq, &simple_loop);
3756  // Shift register is number of bits in a source word that
3757  // must be combined with bits in the next source word in order
3758  // to create a destination word.
3759
3760  // Complex loop for src/dst that are not aligned the same way.
3761  {
3762    Label loop;
3763    __ mov(scratch4, Operand(scratch4, LSL, 3));
3764    Register left_shift = scratch4;
3765    __ and_(src, src, Operand(~3));  // Round down to load previous word.
3766    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
3767    // Store the "shift" most significant bits of scratch in the least
3768    // signficant bits (i.e., shift down by (32-shift)).
3769    __ rsb(scratch2, left_shift, Operand(32));
3770    Register right_shift = scratch2;
3771    __ mov(scratch1, Operand(scratch1, LSR, right_shift));
3772
3773    __ bind(&loop);
3774    __ ldr(scratch3, MemOperand(src, 4, PostIndex));
3775    __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift));
3776    __ str(scratch1, MemOperand(dest, 4, PostIndex));
3777    __ mov(scratch1, Operand(scratch3, LSR, right_shift));
3778    // Loop if four or more bytes left to copy.
3779    __ sub(scratch3, limit, Operand(dest));
3780    __ sub(scratch3, scratch3, Operand(4), SetCC);
3781    __ b(ge, &loop);
3782  }
3783  // There is now between zero and three bytes left to copy (negative that
3784  // number is in scratch3), and between one and three bytes already read into
3785  // scratch1 (eight times that number in scratch4). We may have read past
3786  // the end of the string, but because objects are aligned, we have not read
3787  // past the end of the object.
3788  // Find the minimum of remaining characters to move and preloaded characters
3789  // and write those as bytes.
3790  __ add(scratch3, scratch3, Operand(4), SetCC);
3791  __ b(eq, &done);
3792  __ cmp(scratch4, Operand(scratch3, LSL, 3), ne);
3793  // Move minimum of bytes read and bytes left to copy to scratch4.
3794  __ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt);
3795  // Between one and three (value in scratch3) characters already read into
3796  // scratch ready to write.
3797  __ cmp(scratch3, Operand(2));
3798  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
3799  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
3800  __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
3801  __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
3802  __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
3803  // Copy any remaining bytes.
3804  __ b(&byte_loop);
3805
3806  // Simple loop.
3807  // Copy words from src to dst, until less than four bytes left.
3808  // Both src and dest are word aligned.
3809  __ bind(&simple_loop);
3810  {
3811    Label loop;
3812    __ bind(&loop);
3813    __ ldr(scratch1, MemOperand(src, 4, PostIndex));
3814    __ sub(scratch3, limit, Operand(dest));
3815    __ str(scratch1, MemOperand(dest, 4, PostIndex));
3816    // Compare to 8, not 4, because we do the substraction before increasing
3817    // dest.
3818    __ cmp(scratch3, Operand(8));
3819    __ b(ge, &loop);
3820  }
3821
3822  // Copy bytes from src to dst until dst hits limit.
3823  __ bind(&byte_loop);
3824  __ cmp(dest, Operand(limit));
3825  __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
3826  __ b(ge, &done);
3827  __ strb(scratch1, MemOperand(dest, 1, PostIndex));
3828  __ b(&byte_loop);
3829
3830  __ bind(&done);
3831}
3832
3833
3834void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
3835                                                        Register c1,
3836                                                        Register c2,
3837                                                        Register scratch1,
3838                                                        Register scratch2,
3839                                                        Register scratch3,
3840                                                        Register scratch4,
3841                                                        Register scratch5,
3842                                                        Label* not_found) {
3843  // Register scratch3 is the general scratch register in this function.
3844  Register scratch = scratch3;
3845
3846  // Make sure that both characters are not digits as such strings has a
3847  // different hash algorithm. Don't try to look for these in the string table.
3848  Label not_array_index;
3849  __ sub(scratch, c1, Operand(static_cast<int>('0')));
3850  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
3851  __ b(hi, &not_array_index);
3852  __ sub(scratch, c2, Operand(static_cast<int>('0')));
3853  __ cmp(scratch, Operand(static_cast<int>('9' - '0')));
3854
3855  // If check failed combine both characters into single halfword.
3856  // This is required by the contract of the method: code at the
3857  // not_found branch expects this combination in c1 register
3858  __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls);
3859  __ b(ls, not_found);
3860
3861  __ bind(&not_array_index);
3862  // Calculate the two character string hash.
3863  Register hash = scratch1;
3864  StringHelper::GenerateHashInit(masm, hash, c1);
3865  StringHelper::GenerateHashAddCharacter(masm, hash, c2);
3866  StringHelper::GenerateHashGetHash(masm, hash);
3867
3868  // Collect the two characters in a register.
3869  Register chars = c1;
3870  __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte));
3871
3872  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
3873  // hash:  hash of two character string.
3874
3875  // Load string table
3876  // Load address of first element of the string table.
3877  Register string_table = c2;
3878  __ LoadRoot(string_table, Heap::kStringTableRootIndex);
3879
3880  Register undefined = scratch4;
3881  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
3882
3883  // Calculate capacity mask from the string table capacity.
3884  Register mask = scratch2;
3885  __ ldr(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
3886  __ mov(mask, Operand(mask, ASR, 1));
3887  __ sub(mask, mask, Operand(1));
3888
3889  // Calculate untagged address of the first element of the string table.
3890  Register first_string_table_element = string_table;
3891  __ add(first_string_table_element, string_table,
3892         Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
3893
3894  // Registers
3895  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
3896  // hash:  hash of two character string
3897  // mask:  capacity mask
3898  // first_string_table_element: address of the first element of
3899  //                             the string table
3900  // undefined: the undefined object
3901  // scratch: -
3902
3903  // Perform a number of probes in the string table.
3904  const int kProbes = 4;
3905  Label found_in_string_table;
3906  Label next_probe[kProbes];
3907  Register candidate = scratch5;  // Scratch register contains candidate.
3908  for (int i = 0; i < kProbes; i++) {
3909    // Calculate entry in string table.
3910    if (i > 0) {
3911      __ add(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
3912    } else {
3913      __ mov(candidate, hash);
3914    }
3915
3916    __ and_(candidate, candidate, Operand(mask));
3917
3918    // Load the entry from the symble table.
3919    STATIC_ASSERT(StringTable::kEntrySize == 1);
3920    __ ldr(candidate,
3921           MemOperand(first_string_table_element,
3922                      candidate,
3923                      LSL,
3924                      kPointerSizeLog2));
3925
3926    // If entry is undefined no string with this hash can be found.
3927    Label is_string;
3928    __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE);
3929    __ b(ne, &is_string);
3930
3931    __ cmp(undefined, candidate);
3932    __ b(eq, not_found);
3933    // Must be the hole (deleted entry).
3934    if (FLAG_debug_code) {
3935      __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3936      __ cmp(ip, candidate);
3937      __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole);
3938    }
3939    __ jmp(&next_probe[i]);
3940
3941    __ bind(&is_string);
3942
3943    // Check that the candidate is a non-external ASCII string.  The instance
3944    // type is still in the scratch register from the CompareObjectType
3945    // operation.
3946    __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
3947
3948    // If length is not 2 the string is not a candidate.
3949    __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset));
3950    __ cmp(scratch, Operand(Smi::FromInt(2)));
3951    __ b(ne, &next_probe[i]);
3952
3953    // Check if the two characters match.
3954    // Assumes that word load is little endian.
3955    __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
3956    __ cmp(chars, scratch);
3957    __ b(eq, &found_in_string_table);
3958    __ bind(&next_probe[i]);
3959  }
3960
3961  // No matching 2 character string found by probing.
3962  __ jmp(not_found);
3963
3964  // Scratch register contains result when we fall through to here.
3965  Register result = candidate;
3966  __ bind(&found_in_string_table);
3967  __ Move(r0, result);
3968}
3969
3970
3971void StringHelper::GenerateHashInit(MacroAssembler* masm,
3972                                    Register hash,
3973                                    Register character) {
3974  // hash = character + (character << 10);
3975  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
3976  // Untag smi seed and add the character.
3977  __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
3978  // hash += hash << 10;
3979  __ add(hash, hash, Operand(hash, LSL, 10));
3980  // hash ^= hash >> 6;
3981  __ eor(hash, hash, Operand(hash, LSR, 6));
3982}
3983
3984
3985void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
3986                                            Register hash,
3987                                            Register character) {
3988  // hash += character;
3989  __ add(hash, hash, Operand(character));
3990  // hash += hash << 10;
3991  __ add(hash, hash, Operand(hash, LSL, 10));
3992  // hash ^= hash >> 6;
3993  __ eor(hash, hash, Operand(hash, LSR, 6));
3994}
3995
3996
3997void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
3998                                       Register hash) {
3999  // hash += hash << 3;
4000  __ add(hash, hash, Operand(hash, LSL, 3));
4001  // hash ^= hash >> 11;
4002  __ eor(hash, hash, Operand(hash, LSR, 11));
4003  // hash += hash << 15;
4004  __ add(hash, hash, Operand(hash, LSL, 15));
4005
4006  __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
4007
4008  // if (hash == 0) hash = 27;
4009  __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
4010}
4011
4012
4013void SubStringStub::Generate(MacroAssembler* masm) {
4014  Label runtime;
4015
4016  // Stack frame on entry.
4017  //  lr: return address
4018  //  sp[0]: to
4019  //  sp[4]: from
4020  //  sp[8]: string
4021
4022  // This stub is called from the native-call %_SubString(...), so
4023  // nothing can be assumed about the arguments. It is tested that:
4024  //  "string" is a sequential string,
4025  //  both "from" and "to" are smis, and
4026  //  0 <= from <= to <= string.length.
4027  // If any of these assumptions fail, we call the runtime system.
4028
4029  const int kToOffset = 0 * kPointerSize;
4030  const int kFromOffset = 1 * kPointerSize;
4031  const int kStringOffset = 2 * kPointerSize;
4032
4033  __ Ldrd(r2, r3, MemOperand(sp, kToOffset));
4034  STATIC_ASSERT(kFromOffset == kToOffset + 4);
4035  STATIC_ASSERT(kSmiTag == 0);
4036  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4037
4038  // Arithmetic shift right by one un-smi-tags. In this case we rotate right
4039  // instead because we bail out on non-smi values: ROR and ASR are equivalent
4040  // for smis but they set the flags in a way that's easier to optimize.
4041  __ mov(r2, Operand(r2, ROR, 1), SetCC);
4042  __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
4043  // If either to or from had the smi tag bit set, then C is set now, and N
4044  // has the same value: we rotated by 1, so the bottom bit is now the top bit.
4045  // We want to bailout to runtime here if From is negative.  In that case, the
4046  // next instruction is not executed and we fall through to bailing out to
4047  // runtime.
4048  // Executed if both r2 and r3 are untagged integers.
4049  __ sub(r2, r2, Operand(r3), SetCC, cc);
4050  // One of the above un-smis or the above SUB could have set N==1.
4051  __ b(mi, &runtime);  // Either "from" or "to" is not an smi, or from > to.
4052
4053  // Make sure first argument is a string.
4054  __ ldr(r0, MemOperand(sp, kStringOffset));
4055  // Do a JumpIfSmi, but fold its jump into the subsequent string test.
4056  __ SmiTst(r0);
4057  Condition is_string = masm->IsObjectStringType(r0, r1, ne);
4058  ASSERT(is_string == eq);
4059  __ b(NegateCondition(is_string), &runtime);
4060
4061  Label single_char;
4062  __ cmp(r2, Operand(1));
4063  __ b(eq, &single_char);
4064
4065  // Short-cut for the case of trivial substring.
4066  Label return_r0;
4067  // r0: original string
4068  // r2: result string length
4069  __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset));
4070  __ cmp(r2, Operand(r4, ASR, 1));
4071  // Return original string.
4072  __ b(eq, &return_r0);
4073  // Longer than original string's length or negative: unsafe arguments.
4074  __ b(hi, &runtime);
4075  // Shorter than original string's length: an actual substring.
4076
4077  // Deal with different string types: update the index if necessary
4078  // and put the underlying string into r5.
4079  // r0: original string
4080  // r1: instance type
4081  // r2: length
4082  // r3: from index (untagged)
4083  Label underlying_unpacked, sliced_string, seq_or_external_string;
4084  // If the string is not indirect, it can only be sequential or external.
4085  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
4086  STATIC_ASSERT(kIsIndirectStringMask != 0);
4087  __ tst(r1, Operand(kIsIndirectStringMask));
4088  __ b(eq, &seq_or_external_string);
4089
4090  __ tst(r1, Operand(kSlicedNotConsMask));
4091  __ b(ne, &sliced_string);
4092  // Cons string.  Check whether it is flat, then fetch first part.
4093  __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
4094  __ CompareRoot(r5, Heap::kempty_stringRootIndex);
4095  __ b(ne, &runtime);
4096  __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
4097  // Update instance type.
4098  __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
4099  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
4100  __ jmp(&underlying_unpacked);
4101
4102  __ bind(&sliced_string);
4103  // Sliced string.  Fetch parent and correct start index by offset.
4104  __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
4105  __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
4106  __ add(r3, r3, Operand(r4, ASR, 1));  // Add offset to index.
4107  // Update instance type.
4108  __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
4109  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
4110  __ jmp(&underlying_unpacked);
4111
4112  __ bind(&seq_or_external_string);
4113  // Sequential or external string.  Just move string to the expected register.
4114  __ mov(r5, r0);
4115
4116  __ bind(&underlying_unpacked);
4117
4118  if (FLAG_string_slices) {
4119    Label copy_routine;
4120    // r5: underlying subject string
4121    // r1: instance type of underlying subject string
4122    // r2: length
4123    // r3: adjusted start index (untagged)
4124    __ cmp(r2, Operand(SlicedString::kMinLength));
4125    // Short slice.  Copy instead of slicing.
4126    __ b(lt, &copy_routine);
4127    // Allocate new sliced string.  At this point we do not reload the instance
4128    // type including the string encoding because we simply rely on the info
4129    // provided by the original string.  It does not matter if the original
4130    // string's encoding is wrong because we always have to recheck encoding of
4131    // the newly created string's parent anyways due to externalized strings.
4132    Label two_byte_slice, set_slice_header;
4133    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
4134    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
4135    __ tst(r1, Operand(kStringEncodingMask));
4136    __ b(eq, &two_byte_slice);
4137    __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime);
4138    __ jmp(&set_slice_header);
4139    __ bind(&two_byte_slice);
4140    __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
4141    __ bind(&set_slice_header);
4142    __ mov(r3, Operand(r3, LSL, 1));
4143    __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
4144    __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset));
4145    __ jmp(&return_r0);
4146
4147    __ bind(&copy_routine);
4148  }
4149
4150  // r5: underlying subject string
4151  // r1: instance type of underlying subject string
4152  // r2: length
4153  // r3: adjusted start index (untagged)
4154  Label two_byte_sequential, sequential_string, allocate_result;
4155  STATIC_ASSERT(kExternalStringTag != 0);
4156  STATIC_ASSERT(kSeqStringTag == 0);
4157  __ tst(r1, Operand(kExternalStringTag));
4158  __ b(eq, &sequential_string);
4159
4160  // Handle external string.
4161  // Rule out short external strings.
4162  STATIC_CHECK(kShortExternalStringTag != 0);
4163  __ tst(r1, Operand(kShortExternalStringTag));
4164  __ b(ne, &runtime);
4165  __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset));
4166  // r5 already points to the first character of underlying string.
4167  __ jmp(&allocate_result);
4168
4169  __ bind(&sequential_string);
4170  // Locate first character of underlying subject string.
4171  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
4172  __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
4173
4174  __ bind(&allocate_result);
4175  // Sequential acii string.  Allocate the result.
4176  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
4177  __ tst(r1, Operand(kStringEncodingMask));
4178  __ b(eq, &two_byte_sequential);
4179
4180  // Allocate and copy the resulting ASCII string.
4181  __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime);
4182
4183  // Locate first character of substring to copy.
4184  __ add(r5, r5, r3);
4185  // Locate first character of result.
4186  __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
4187
4188  // r0: result string
4189  // r1: first character of result string
4190  // r2: result string length
4191  // r5: first character of substring to copy
4192  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4193  StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r9,
4194                                           COPY_ASCII | DEST_ALWAYS_ALIGNED);
4195  __ jmp(&return_r0);
4196
4197  // Allocate and copy the resulting two-byte string.
4198  __ bind(&two_byte_sequential);
4199  __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime);
4200
4201  // Locate first character of substring to copy.
4202  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
4203  __ add(r5, r5, Operand(r3, LSL, 1));
4204  // Locate first character of result.
4205  __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
4206
4207  // r0: result string.
4208  // r1: first character of result.
4209  // r2: result length.
4210  // r5: first character of substring to copy.
4211  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4212  StringHelper::GenerateCopyCharactersLong(
4213      masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED);
4214
4215  __ bind(&return_r0);
4216  Counters* counters = masm->isolate()->counters();
4217  __ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
4218  __ Drop(3);
4219  __ Ret();
4220
4221  // Just jump to runtime to create the sub string.
4222  __ bind(&runtime);
4223  __ TailCallRuntime(Runtime::kSubString, 3, 1);
4224
4225  __ bind(&single_char);
4226  // r0: original string
4227  // r1: instance type
4228  // r2: length
4229  // r3: from index (untagged)
4230  __ SmiTag(r3, r3);
4231  StringCharAtGenerator generator(
4232      r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
4233  generator.GenerateFast(masm);
4234  __ Drop(3);
4235  __ Ret();
4236  generator.SkipSlow(masm, &runtime);
4237}
4238
4239
4240void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
4241                                                      Register left,
4242                                                      Register right,
4243                                                      Register scratch1,
4244                                                      Register scratch2,
4245                                                      Register scratch3) {
4246  Register length = scratch1;
4247
4248  // Compare lengths.
4249  Label strings_not_equal, check_zero_length;
4250  __ ldr(length, FieldMemOperand(left, String::kLengthOffset));
4251  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
4252  __ cmp(length, scratch2);
4253  __ b(eq, &check_zero_length);
4254  __ bind(&strings_not_equal);
4255  __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
4256  __ Ret();
4257
4258  // Check if the length is zero.
4259  Label compare_chars;
4260  __ bind(&check_zero_length);
4261  STATIC_ASSERT(kSmiTag == 0);
4262  __ cmp(length, Operand::Zero());
4263  __ b(ne, &compare_chars);
4264  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
4265  __ Ret();
4266
4267  // Compare characters.
4268  __ bind(&compare_chars);
4269  GenerateAsciiCharsCompareLoop(masm,
4270                                left, right, length, scratch2, scratch3,
4271                                &strings_not_equal);
4272
4273  // Characters are equal.
4274  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
4275  __ Ret();
4276}
4277
4278
4279void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
4280                                                        Register left,
4281                                                        Register right,
4282                                                        Register scratch1,
4283                                                        Register scratch2,
4284                                                        Register scratch3,
4285                                                        Register scratch4) {
4286  Label result_not_equal, compare_lengths;
4287  // Find minimum length and length difference.
4288  __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
4289  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
4290  __ sub(scratch3, scratch1, Operand(scratch2), SetCC);
4291  Register length_delta = scratch3;
4292  __ mov(scratch1, scratch2, LeaveCC, gt);
4293  Register min_length = scratch1;
4294  STATIC_ASSERT(kSmiTag == 0);
4295  __ cmp(min_length, Operand::Zero());
4296  __ b(eq, &compare_lengths);
4297
4298  // Compare loop.
4299  GenerateAsciiCharsCompareLoop(masm,
4300                                left, right, min_length, scratch2, scratch4,
4301                                &result_not_equal);
4302
4303  // Compare lengths - strings up to min-length are equal.
4304  __ bind(&compare_lengths);
4305  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
4306  // Use length_delta as result if it's zero.
4307  __ mov(r0, Operand(length_delta), SetCC);
4308  __ bind(&result_not_equal);
4309  // Conditionally update the result based either on length_delta or
4310  // the last comparion performed in the loop above.
4311  __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
4312  __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
4313  __ Ret();
4314}
4315
4316
4317void StringCompareStub::GenerateAsciiCharsCompareLoop(
4318    MacroAssembler* masm,
4319    Register left,
4320    Register right,
4321    Register length,
4322    Register scratch1,
4323    Register scratch2,
4324    Label* chars_not_equal) {
4325  // Change index to run from -length to -1 by adding length to string
4326  // start. This means that loop ends when index reaches zero, which
4327  // doesn't need an additional compare.
4328  __ SmiUntag(length);
4329  __ add(scratch1, length,
4330         Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
4331  __ add(left, left, Operand(scratch1));
4332  __ add(right, right, Operand(scratch1));
4333  __ rsb(length, length, Operand::Zero());
4334  Register index = length;  // index = -length;
4335
4336  // Compare loop.
4337  Label loop;
4338  __ bind(&loop);
4339  __ ldrb(scratch1, MemOperand(left, index));
4340  __ ldrb(scratch2, MemOperand(right, index));
4341  __ cmp(scratch1, scratch2);
4342  __ b(ne, chars_not_equal);
4343  __ add(index, index, Operand(1), SetCC);
4344  __ b(ne, &loop);
4345}
4346
4347
4348void StringCompareStub::Generate(MacroAssembler* masm) {
4349  Label runtime;
4350
4351  Counters* counters = masm->isolate()->counters();
4352
4353  // Stack frame on entry.
4354  //  sp[0]: right string
4355  //  sp[4]: left string
4356  __ Ldrd(r0 , r1, MemOperand(sp));  // Load right in r0, left in r1.
4357
4358  Label not_same;
4359  __ cmp(r0, r1);
4360  __ b(ne, &not_same);
4361  STATIC_ASSERT(EQUAL == 0);
4362  STATIC_ASSERT(kSmiTag == 0);
4363  __ mov(r0, Operand(Smi::FromInt(EQUAL)));
4364  __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
4365  __ add(sp, sp, Operand(2 * kPointerSize));
4366  __ Ret();
4367
4368  __ bind(&not_same);
4369
4370  // Check that both objects are sequential ASCII strings.
4371  __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
4372
4373  // Compare flat ASCII strings natively. Remove arguments from stack first.
4374  __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
4375  __ add(sp, sp, Operand(2 * kPointerSize));
4376  GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
4377
4378  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
4379  // tagged as a small integer.
4380  __ bind(&runtime);
4381  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
4382}
4383
4384
4385void StringAddStub::Generate(MacroAssembler* masm) {
4386  Label call_runtime, call_builtin;
4387  Builtins::JavaScript builtin_id = Builtins::ADD;
4388
4389  Counters* counters = masm->isolate()->counters();
4390
4391  // Stack on entry:
4392  // sp[0]: second argument (right).
4393  // sp[4]: first argument (left).
4394
4395  // Load the two arguments.
4396  __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // First argument.
4397  __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
4398
4399  // Make sure that both arguments are strings if not known in advance.
4400  // Otherwise, at least one of the arguments is definitely a string,
4401  // and we convert the one that is not known to be a string.
4402  if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
4403    ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
4404    ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
4405    __ JumpIfEitherSmi(r0, r1, &call_runtime);
4406    // Load instance types.
4407    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
4408    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
4409    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
4410    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
4411    STATIC_ASSERT(kStringTag == 0);
4412    // If either is not a string, go to runtime.
4413    __ tst(r4, Operand(kIsNotStringMask));
4414    __ tst(r5, Operand(kIsNotStringMask), eq);
4415    __ b(ne, &call_runtime);
4416  } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
4417    ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
4418    GenerateConvertArgument(
4419        masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
4420    builtin_id = Builtins::STRING_ADD_RIGHT;
4421  } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
4422    ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
4423    GenerateConvertArgument(
4424        masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
4425    builtin_id = Builtins::STRING_ADD_LEFT;
4426  }
4427
4428  // Both arguments are strings.
4429  // r0: first string
4430  // r1: second string
4431  // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4432  // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4433  {
4434    Label strings_not_empty;
4435    // Check if either of the strings are empty. In that case return the other.
4436    __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
4437    __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
4438    STATIC_ASSERT(kSmiTag == 0);
4439    __ cmp(r2, Operand(Smi::FromInt(0)));  // Test if first string is empty.
4440    __ mov(r0, Operand(r1), LeaveCC, eq);  // If first is empty, return second.
4441    STATIC_ASSERT(kSmiTag == 0);
4442     // Else test if second string is empty.
4443    __ cmp(r3, Operand(Smi::FromInt(0)), ne);
4444    __ b(ne, &strings_not_empty);  // If either string was empty, return r0.
4445
4446    __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
4447    __ add(sp, sp, Operand(2 * kPointerSize));
4448    __ Ret();
4449
4450    __ bind(&strings_not_empty);
4451  }
4452
4453  __ SmiUntag(r2);
4454  __ SmiUntag(r3);
4455  // Both strings are non-empty.
4456  // r0: first string
4457  // r1: second string
4458  // r2: length of first string
4459  // r3: length of second string
4460  // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4461  // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4462  // Look at the length of the result of adding the two strings.
4463  Label string_add_flat_result, longer_than_two;
4464  // Adding two lengths can't overflow.
4465  STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
4466  __ add(r6, r2, Operand(r3));
4467  // Use the string table when adding two one character strings, as it
4468  // helps later optimizations to return a string here.
4469  __ cmp(r6, Operand(2));
4470  __ b(ne, &longer_than_two);
4471
4472  // Check that both strings are non-external ASCII strings.
4473  if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
4474    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
4475    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
4476    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
4477    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
4478  }
4479  __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r3,
4480                                                  &call_runtime);
4481
4482  // Get the two characters forming the sub string.
4483  __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
4484  __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize));
4485
4486  // Try to lookup two character string in string table. If it is not found
4487  // just allocate a new one.
4488  Label make_two_character_string;
4489  StringHelper::GenerateTwoCharacterStringTableProbe(
4490      masm, r2, r3, r6, r0, r4, r5, r9, &make_two_character_string);
4491  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
4492  __ add(sp, sp, Operand(2 * kPointerSize));
4493  __ Ret();
4494
4495  __ bind(&make_two_character_string);
4496  // Resulting string has length 2 and first chars of two strings
4497  // are combined into single halfword in r2 register.
4498  // So we can fill resulting string without two loops by a single
4499  // halfword store instruction (which assumes that processor is
4500  // in a little endian mode)
4501  __ mov(r6, Operand(2));
4502  __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
4503  __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
4504  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
4505  __ add(sp, sp, Operand(2 * kPointerSize));
4506  __ Ret();
4507
4508  __ bind(&longer_than_two);
4509  // Check if resulting string will be flat.
4510  __ cmp(r6, Operand(ConsString::kMinLength));
4511  __ b(lt, &string_add_flat_result);
4512  // Handle exceptionally long strings in the runtime system.
4513  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
4514  ASSERT(IsPowerOf2(String::kMaxLength + 1));
4515  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
4516  __ cmp(r6, Operand(String::kMaxLength + 1));
4517  __ b(hs, &call_runtime);
4518
4519  // If result is not supposed to be flat, allocate a cons string object.
4520  // If both strings are ASCII the result is an ASCII cons string.
4521  if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
4522    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
4523    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
4524    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
4525    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
4526  }
4527  Label non_ascii, allocated, ascii_data;
4528  STATIC_ASSERT(kTwoByteStringTag == 0);
4529  __ tst(r4, Operand(kStringEncodingMask));
4530  __ tst(r5, Operand(kStringEncodingMask), ne);
4531  __ b(eq, &non_ascii);
4532
4533  // Allocate an ASCII cons string.
4534  __ bind(&ascii_data);
4535  __ AllocateAsciiConsString(r3, r6, r4, r5, &call_runtime);
4536  __ bind(&allocated);
4537  // Fill the fields of the cons string.
4538  Label skip_write_barrier, after_writing;
4539  ExternalReference high_promotion_mode = ExternalReference::
4540      new_space_high_promotion_mode_active_address(masm->isolate());
4541  __ mov(r4, Operand(high_promotion_mode));
4542  __ ldr(r4, MemOperand(r4, 0));
4543  __ cmp(r4, Operand::Zero());
4544  __ b(eq, &skip_write_barrier);
4545
4546  __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
4547  __ RecordWriteField(r3,
4548                      ConsString::kFirstOffset,
4549                      r0,
4550                      r4,
4551                      kLRHasNotBeenSaved,
4552                      kDontSaveFPRegs);
4553  __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
4554  __ RecordWriteField(r3,
4555                      ConsString::kSecondOffset,
4556                      r1,
4557                      r4,
4558                      kLRHasNotBeenSaved,
4559                      kDontSaveFPRegs);
4560  __ jmp(&after_writing);
4561
4562  __ bind(&skip_write_barrier);
4563  __ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset));
4564  __ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset));
4565
4566  __ bind(&after_writing);
4567
4568  __ mov(r0, Operand(r3));
4569  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
4570  __ add(sp, sp, Operand(2 * kPointerSize));
4571  __ Ret();
4572
4573  __ bind(&non_ascii);
4574  // At least one of the strings is two-byte. Check whether it happens
4575  // to contain only one byte characters.
4576  // r4: first instance type.
4577  // r5: second instance type.
4578  __ tst(r4, Operand(kOneByteDataHintMask));
4579  __ tst(r5, Operand(kOneByteDataHintMask), ne);
4580  __ b(ne, &ascii_data);
4581  __ eor(r4, r4, Operand(r5));
4582  STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
4583  __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag));
4584  __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag));
4585  __ b(eq, &ascii_data);
4586
4587  // Allocate a two byte cons string.
4588  __ AllocateTwoByteConsString(r3, r6, r4, r5, &call_runtime);
4589  __ jmp(&allocated);
4590
4591  // We cannot encounter sliced strings or cons strings here since:
4592  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
4593  // Handle creating a flat result from either external or sequential strings.
4594  // Locate the first characters' locations.
4595  // r0: first string
4596  // r1: second string
4597  // r2: length of first string
4598  // r3: length of second string
4599  // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4600  // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4601  // r6: sum of lengths.
4602  Label first_prepared, second_prepared;
4603  __ bind(&string_add_flat_result);
4604  if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
4605    __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
4606    __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
4607    __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
4608    __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
4609  }
4610
4611  // Check whether both strings have same encoding
4612  __ eor(ip, r4, Operand(r5));
4613  ASSERT(__ ImmediateFitsAddrMode1Instruction(kStringEncodingMask));
4614  __ tst(ip, Operand(kStringEncodingMask));
4615  __ b(ne, &call_runtime);
4616
4617  STATIC_ASSERT(kSeqStringTag == 0);
4618  __ tst(r4, Operand(kStringRepresentationMask));
4619  STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4620  __ add(r6,
4621         r0,
4622         Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
4623         LeaveCC,
4624         eq);
4625  __ b(eq, &first_prepared);
4626  // External string: rule out short external string and load string resource.
4627  STATIC_ASSERT(kShortExternalStringTag != 0);
4628  __ tst(r4, Operand(kShortExternalStringMask));
4629  __ b(ne, &call_runtime);
4630  __ ldr(r6, FieldMemOperand(r0, ExternalString::kResourceDataOffset));
4631  __ bind(&first_prepared);
4632
4633  STATIC_ASSERT(kSeqStringTag == 0);
4634  __ tst(r5, Operand(kStringRepresentationMask));
4635  STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4636  __ add(r1,
4637         r1,
4638         Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
4639         LeaveCC,
4640         eq);
4641  __ b(eq, &second_prepared);
4642  // External string: rule out short external string and load string resource.
4643  STATIC_ASSERT(kShortExternalStringTag != 0);
4644  __ tst(r5, Operand(kShortExternalStringMask));
4645  __ b(ne, &call_runtime);
4646  __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset));
4647  __ bind(&second_prepared);
4648
4649  Label non_ascii_string_add_flat_result;
4650  // r6: first character of first string
4651  // r1: first character of second string
4652  // r2: length of first string.
4653  // r3: length of second string.
4654  // Both strings have the same encoding.
4655  STATIC_ASSERT(kTwoByteStringTag == 0);
4656  __ tst(r5, Operand(kStringEncodingMask));
4657  __ b(eq, &non_ascii_string_add_flat_result);
4658
4659  __ add(r2, r2, Operand(r3));
4660  __ AllocateAsciiString(r0, r2, r4, r5, r9, &call_runtime);
4661  __ sub(r2, r2, Operand(r3));
4662  __ add(r5, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
4663  // r0: result string.
4664  // r6: first character of first string.
4665  // r1: first character of second string.
4666  // r2: length of first string.
4667  // r3: length of second string.
4668  // r5: first character of result.
4669  StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, true);
4670  // r5: next character of result.
4671  StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, true);
4672  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
4673  __ add(sp, sp, Operand(2 * kPointerSize));
4674  __ Ret();
4675
4676  __ bind(&non_ascii_string_add_flat_result);
4677  __ add(r2, r2, Operand(r3));
4678  __ AllocateTwoByteString(r0, r2, r4, r5, r9, &call_runtime);
4679  __ sub(r2, r2, Operand(r3));
4680  __ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
4681  // r0: result string.
4682  // r6: first character of first string.
4683  // r1: first character of second string.
4684  // r2: length of first string.
4685  // r3: length of second string.
4686  // r5: first character of result.
4687  StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, false);
4688  // r5: next character of result.
4689  StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, false);
4690  __ IncrementCounter(counters->string_add_native(), 1, r2, r3);
4691  __ add(sp, sp, Operand(2 * kPointerSize));
4692  __ Ret();
4693
4694  // Just jump to runtime to add the two strings.
4695  __ bind(&call_runtime);
4696  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
4697
4698  if (call_builtin.is_linked()) {
4699    __ bind(&call_builtin);
4700    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
4701  }
4702}
4703
4704
4705void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
4706  __ push(r0);
4707  __ push(r1);
4708}
4709
4710
4711void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) {
4712  __ pop(r1);
4713  __ pop(r0);
4714}
4715
4716
4717void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
4718                                            int stack_offset,
4719                                            Register arg,
4720                                            Register scratch1,
4721                                            Register scratch2,
4722                                            Register scratch3,
4723                                            Register scratch4,
4724                                            Label* slow) {
4725  // First check if the argument is already a string.
4726  Label not_string, done;
4727  __ JumpIfSmi(arg, &not_string);
4728  __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
4729  __ b(lt, &done);
4730
4731  // Check the number to string cache.
4732  __ bind(&not_string);
4733  // Puts the cached result into scratch1.
4734  __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
4735  __ mov(arg, scratch1);
4736  __ str(arg, MemOperand(sp, stack_offset));
4737  __ bind(&done);
4738}
4739
4740
4741void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
4742  ASSERT(state_ == CompareIC::SMI);
4743  Label miss;
4744  __ orr(r2, r1, r0);
4745  __ JumpIfNotSmi(r2, &miss);
4746
4747  if (GetCondition() == eq) {
4748    // For equality we do not care about the sign of the result.
4749    __ sub(r0, r0, r1, SetCC);
4750  } else {
4751    // Untag before subtracting to avoid handling overflow.
4752    __ SmiUntag(r1);
4753    __ sub(r0, r1, Operand::SmiUntag(r0));
4754  }
4755  __ Ret();
4756
4757  __ bind(&miss);
4758  GenerateMiss(masm);
4759}
4760
4761
4762void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
4763  ASSERT(state_ == CompareIC::NUMBER);
4764
4765  Label generic_stub;
4766  Label unordered, maybe_undefined1, maybe_undefined2;
4767  Label miss;
4768
4769  if (left_ == CompareIC::SMI) {
4770    __ JumpIfNotSmi(r1, &miss);
4771  }
4772  if (right_ == CompareIC::SMI) {
4773    __ JumpIfNotSmi(r0, &miss);
4774  }
4775
4776  // Inlining the double comparison and falling back to the general compare
4777  // stub if NaN is involved.
4778  // Load left and right operand.
4779  Label done, left, left_smi, right_smi;
4780  __ JumpIfSmi(r0, &right_smi);
4781  __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
4782              DONT_DO_SMI_CHECK);
4783  __ sub(r2, r0, Operand(kHeapObjectTag));
4784  __ vldr(d1, r2, HeapNumber::kValueOffset);
4785  __ b(&left);
4786  __ bind(&right_smi);
4787  __ SmiToDouble(d1, r0);
4788
4789  __ bind(&left);
4790  __ JumpIfSmi(r1, &left_smi);
4791  __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
4792              DONT_DO_SMI_CHECK);
4793  __ sub(r2, r1, Operand(kHeapObjectTag));
4794  __ vldr(d0, r2, HeapNumber::kValueOffset);
4795  __ b(&done);
4796  __ bind(&left_smi);
4797  __ SmiToDouble(d0, r1);
4798
4799  __ bind(&done);
4800  // Compare operands.
4801  __ VFPCompareAndSetFlags(d0, d1);
4802
4803  // Don't base result on status bits when a NaN is involved.
4804  __ b(vs, &unordered);
4805
4806  // Return a result of -1, 0, or 1, based on status bits.
4807  __ mov(r0, Operand(EQUAL), LeaveCC, eq);
4808  __ mov(r0, Operand(LESS), LeaveCC, lt);
4809  __ mov(r0, Operand(GREATER), LeaveCC, gt);
4810  __ Ret();
4811
4812  __ bind(&unordered);
4813  __ bind(&generic_stub);
4814  ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
4815                     CompareIC::GENERIC);
4816  __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
4817
4818  __ bind(&maybe_undefined1);
4819  if (Token::IsOrderedRelationalCompareOp(op_)) {
4820    __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
4821    __ b(ne, &miss);
4822    __ JumpIfSmi(r1, &unordered);
4823    __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
4824    __ b(ne, &maybe_undefined2);
4825    __ jmp(&unordered);
4826  }
4827
4828  __ bind(&maybe_undefined2);
4829  if (Token::IsOrderedRelationalCompareOp(op_)) {
4830    __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
4831    __ b(eq, &unordered);
4832  }
4833
4834  __ bind(&miss);
4835  GenerateMiss(masm);
4836}
4837
4838
4839void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
4840  ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
4841  Label miss;
4842
4843  // Registers containing left and right operands respectively.
4844  Register left = r1;
4845  Register right = r0;
4846  Register tmp1 = r2;
4847  Register tmp2 = r3;
4848
4849  // Check that both operands are heap objects.
4850  __ JumpIfEitherSmi(left, right, &miss);
4851
4852  // Check that both operands are internalized strings.
4853  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4854  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4855  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4856  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4857  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4858  __ orr(tmp1, tmp1, Operand(tmp2));
4859  __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4860  __ b(ne, &miss);
4861
4862  // Internalized strings are compared by identity.
4863  __ cmp(left, right);
4864  // Make sure r0 is non-zero. At this point input operands are
4865  // guaranteed to be non-zero.
4866  ASSERT(right.is(r0));
4867  STATIC_ASSERT(EQUAL == 0);
4868  STATIC_ASSERT(kSmiTag == 0);
4869  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
4870  __ Ret();
4871
4872  __ bind(&miss);
4873  GenerateMiss(masm);
4874}
4875
4876
4877void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
4878  ASSERT(state_ == CompareIC::UNIQUE_NAME);
4879  ASSERT(GetCondition() == eq);
4880  Label miss;
4881
4882  // Registers containing left and right operands respectively.
4883  Register left = r1;
4884  Register right = r0;
4885  Register tmp1 = r2;
4886  Register tmp2 = r3;
4887
4888  // Check that both operands are heap objects.
4889  __ JumpIfEitherSmi(left, right, &miss);
4890
4891  // Check that both operands are unique names. This leaves the instance
4892  // types loaded in tmp1 and tmp2.
4893  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4894  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4895  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4896  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4897
4898  __ JumpIfNotUniqueName(tmp1, &miss);
4899  __ JumpIfNotUniqueName(tmp2, &miss);
4900
4901  // Unique names are compared by identity.
4902  __ cmp(left, right);
4903  // Make sure r0 is non-zero. At this point input operands are
4904  // guaranteed to be non-zero.
4905  ASSERT(right.is(r0));
4906  STATIC_ASSERT(EQUAL == 0);
4907  STATIC_ASSERT(kSmiTag == 0);
4908  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
4909  __ Ret();
4910
4911  __ bind(&miss);
4912  GenerateMiss(masm);
4913}
4914
4915
4916void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
4917  ASSERT(state_ == CompareIC::STRING);
4918  Label miss;
4919
4920  bool equality = Token::IsEqualityOp(op_);
4921
4922  // Registers containing left and right operands respectively.
4923  Register left = r1;
4924  Register right = r0;
4925  Register tmp1 = r2;
4926  Register tmp2 = r3;
4927  Register tmp3 = r4;
4928  Register tmp4 = r5;
4929
4930  // Check that both operands are heap objects.
4931  __ JumpIfEitherSmi(left, right, &miss);
4932
4933  // Check that both operands are strings. This leaves the instance
4934  // types loaded in tmp1 and tmp2.
4935  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4936  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4937  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4938  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4939  STATIC_ASSERT(kNotStringTag != 0);
4940  __ orr(tmp3, tmp1, tmp2);
4941  __ tst(tmp3, Operand(kIsNotStringMask));
4942  __ b(ne, &miss);
4943
4944  // Fast check for identical strings.
4945  __ cmp(left, right);
4946  STATIC_ASSERT(EQUAL == 0);
4947  STATIC_ASSERT(kSmiTag == 0);
4948  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
4949  __ Ret(eq);
4950
4951  // Handle not identical strings.
4952
4953  // Check that both strings are internalized strings. If they are, we're done
4954  // because we already know they are not identical. We know they are both
4955  // strings.
4956  if (equality) {
4957    ASSERT(GetCondition() == eq);
4958    STATIC_ASSERT(kInternalizedTag == 0);
4959    __ orr(tmp3, tmp1, Operand(tmp2));
4960    __ tst(tmp3, Operand(kIsNotInternalizedMask));
4961    // Make sure r0 is non-zero. At this point input operands are
4962    // guaranteed to be non-zero.
4963    ASSERT(right.is(r0));
4964    __ Ret(eq);
4965  }
4966
4967  // Check that both strings are sequential ASCII.
4968  Label runtime;
4969  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
4970      tmp1, tmp2, tmp3, tmp4, &runtime);
4971
4972  // Compare flat ASCII strings. Returns when done.
4973  if (equality) {
4974    StringCompareStub::GenerateFlatAsciiStringEquals(
4975        masm, left, right, tmp1, tmp2, tmp3);
4976  } else {
4977    StringCompareStub::GenerateCompareFlatAsciiStrings(
4978        masm, left, right, tmp1, tmp2, tmp3, tmp4);
4979  }
4980
4981  // Handle more complex cases in runtime.
4982  __ bind(&runtime);
4983  __ Push(left, right);
4984  if (equality) {
4985    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
4986  } else {
4987    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
4988  }
4989
4990  __ bind(&miss);
4991  GenerateMiss(masm);
4992}
4993
4994
4995void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
4996  ASSERT(state_ == CompareIC::OBJECT);
4997  Label miss;
4998  __ and_(r2, r1, Operand(r0));
4999  __ JumpIfSmi(r2, &miss);
5000
5001  __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
5002  __ b(ne, &miss);
5003  __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
5004  __ b(ne, &miss);
5005
5006  ASSERT(GetCondition() == eq);
5007  __ sub(r0, r0, Operand(r1));
5008  __ Ret();
5009
5010  __ bind(&miss);
5011  GenerateMiss(masm);
5012}
5013
5014
5015void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
5016  Label miss;
5017  __ and_(r2, r1, Operand(r0));
5018  __ JumpIfSmi(r2, &miss);
5019  __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
5020  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
5021  __ cmp(r2, Operand(known_map_));
5022  __ b(ne, &miss);
5023  __ cmp(r3, Operand(known_map_));
5024  __ b(ne, &miss);
5025
5026  __ sub(r0, r0, Operand(r1));
5027  __ Ret();
5028
5029  __ bind(&miss);
5030  GenerateMiss(masm);
5031}
5032
5033
5034
5035void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
5036  {
5037    // Call the runtime system in a fresh internal frame.
5038    ExternalReference miss =
5039        ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
5040
5041    FrameScope scope(masm, StackFrame::INTERNAL);
5042    __ Push(r1, r0);
5043    __ Push(lr, r1, r0);
5044    __ mov(ip, Operand(Smi::FromInt(op_)));
5045    __ push(ip);
5046    __ CallExternalReference(miss, 3);
5047    // Compute the entry point of the rewritten stub.
5048    __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
5049    // Restore registers.
5050    __ pop(lr);
5051    __ Pop(r1, r0);
5052  }
5053
5054  __ Jump(r2);
5055}
5056
5057
5058void DirectCEntryStub::Generate(MacroAssembler* masm) {
5059  // Place the return address on the stack, making the call
5060  // GC safe. The RegExp backend also relies on this.
5061  __ str(lr, MemOperand(sp, 0));
5062  __ blx(ip);  // Call the C++ function.
5063  __ VFPEnsureFPSCRState(r2);
5064  __ ldr(pc, MemOperand(sp, 0));
5065}
5066
5067
5068void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
5069                                    Register target) {
5070  intptr_t code =
5071      reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
5072  __ Move(ip, target);
5073  __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
5074  __ blx(lr);  // Call the stub.
5075}
5076
5077
5078void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
5079                                                      Label* miss,
5080                                                      Label* done,
5081                                                      Register receiver,
5082                                                      Register properties,
5083                                                      Handle<Name> name,
5084                                                      Register scratch0) {
5085  ASSERT(name->IsUniqueName());
5086  // If names of slots in range from 1 to kProbes - 1 for the hash value are
5087  // not equal to the name and kProbes-th slot is not used (its name is the
5088  // undefined value), it guarantees the hash table doesn't contain the
5089  // property. It's true even if some slots represent deleted properties
5090  // (their names are the hole value).
5091  for (int i = 0; i < kInlinedProbes; i++) {
5092    // scratch0 points to properties hash.
5093    // Compute the masked index: (hash + i + i * i) & mask.
5094    Register index = scratch0;
5095    // Capacity is smi 2^n.
5096    __ ldr(index, FieldMemOperand(properties, kCapacityOffset));
5097    __ sub(index, index, Operand(1));
5098    __ and_(index, index, Operand(
5099        Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
5100
5101    // Scale the index by multiplying by the entry size.
5102    ASSERT(NameDictionary::kEntrySize == 3);
5103    __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
5104
5105    Register entity_name = scratch0;
5106    // Having undefined at this place means the name is not contained.
5107    ASSERT_EQ(kSmiTagSize, 1);
5108    Register tmp = properties;
5109    __ add(tmp, properties, Operand(index, LSL, 1));
5110    __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
5111
5112    ASSERT(!tmp.is(entity_name));
5113    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
5114    __ cmp(entity_name, tmp);
5115    __ b(eq, done);
5116
5117    // Load the hole ready for use below:
5118    __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
5119
5120    // Stop if found the property.
5121    __ cmp(entity_name, Operand(Handle<Name>(name)));
5122    __ b(eq, miss);
5123
5124    Label good;
5125    __ cmp(entity_name, tmp);
5126    __ b(eq, &good);
5127
5128    // Check if the entry name is not a unique name.
5129    __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
5130    __ ldrb(entity_name,
5131            FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
5132    __ JumpIfNotUniqueName(entity_name, miss);
5133    __ bind(&good);
5134
5135    // Restore the properties.
5136    __ ldr(properties,
5137           FieldMemOperand(receiver, JSObject::kPropertiesOffset));
5138  }
5139
5140  const int spill_mask =
5141      (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
5142       r2.bit() | r1.bit() | r0.bit());
5143
5144  __ stm(db_w, sp, spill_mask);
5145  __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
5146  __ mov(r1, Operand(Handle<Name>(name)));
5147  NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
5148  __ CallStub(&stub);
5149  __ cmp(r0, Operand::Zero());
5150  __ ldm(ia_w, sp, spill_mask);
5151
5152  __ b(eq, done);
5153  __ b(ne, miss);
5154}
5155
5156
5157// Probe the name dictionary in the |elements| register. Jump to the
5158// |done| label if a property with the given name is found. Jump to
5159// the |miss| label otherwise.
5160// If lookup was successful |scratch2| will be equal to elements + 4 * index.
5161void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
5162                                                      Label* miss,
5163                                                      Label* done,
5164                                                      Register elements,
5165                                                      Register name,
5166                                                      Register scratch1,
5167                                                      Register scratch2) {
5168  ASSERT(!elements.is(scratch1));
5169  ASSERT(!elements.is(scratch2));
5170  ASSERT(!name.is(scratch1));
5171  ASSERT(!name.is(scratch2));
5172
5173  __ AssertName(name);
5174
5175  // Compute the capacity mask.
5176  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
5177  __ SmiUntag(scratch1);
5178  __ sub(scratch1, scratch1, Operand(1));
5179
5180  // Generate an unrolled loop that performs a few probes before
5181  // giving up. Measurements done on Gmail indicate that 2 probes
5182  // cover ~93% of loads from dictionaries.
5183  for (int i = 0; i < kInlinedProbes; i++) {
5184    // Compute the masked index: (hash + i + i * i) & mask.
5185    __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
5186    if (i > 0) {
5187      // Add the probe offset (i + i * i) left shifted to avoid right shifting
5188      // the hash in a separate instruction. The value hash + i + i * i is right
5189      // shifted in the following and instruction.
5190      ASSERT(NameDictionary::GetProbeOffset(i) <
5191             1 << (32 - Name::kHashFieldOffset));
5192      __ add(scratch2, scratch2, Operand(
5193          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
5194    }
5195    __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
5196
5197    // Scale the index by multiplying by the element size.
5198    ASSERT(NameDictionary::kEntrySize == 3);
5199    // scratch2 = scratch2 * 3.
5200    __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
5201
5202    // Check if the key is identical to the name.
5203    __ add(scratch2, elements, Operand(scratch2, LSL, 2));
5204    __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
5205    __ cmp(name, Operand(ip));
5206    __ b(eq, done);
5207  }
5208
5209  const int spill_mask =
5210      (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
5211       r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
5212      ~(scratch1.bit() | scratch2.bit());
5213
5214  __ stm(db_w, sp, spill_mask);
5215  if (name.is(r0)) {
5216    ASSERT(!elements.is(r1));
5217    __ Move(r1, name);
5218    __ Move(r0, elements);
5219  } else {
5220    __ Move(r0, elements);
5221    __ Move(r1, name);
5222  }
5223  NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
5224  __ CallStub(&stub);
5225  __ cmp(r0, Operand::Zero());
5226  __ mov(scratch2, Operand(r2));
5227  __ ldm(ia_w, sp, spill_mask);
5228
5229  __ b(ne, done);
5230  __ b(eq, miss);
5231}
5232
5233
5234void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
5235  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
5236  // we cannot call anything that could cause a GC from this stub.
5237  // Registers:
5238  //  result: NameDictionary to probe
5239  //  r1: key
5240  //  dictionary: NameDictionary to probe.
5241  //  index: will hold an index of entry if lookup is successful.
5242  //         might alias with result_.
5243  // Returns:
5244  //  result_ is zero if lookup failed, non zero otherwise.
5245
5246  Register result = r0;
5247  Register dictionary = r0;
5248  Register key = r1;
5249  Register index = r2;
5250  Register mask = r3;
5251  Register hash = r4;
5252  Register undefined = r5;
5253  Register entry_key = r6;
5254
5255  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
5256
5257  __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
5258  __ SmiUntag(mask);
5259  __ sub(mask, mask, Operand(1));
5260
5261  __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
5262
5263  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5264
5265  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
5266    // Compute the masked index: (hash + i + i * i) & mask.
5267    // Capacity is smi 2^n.
5268    if (i > 0) {
5269      // Add the probe offset (i + i * i) left shifted to avoid right shifting
5270      // the hash in a separate instruction. The value hash + i + i * i is right
5271      // shifted in the following and instruction.
5272      ASSERT(NameDictionary::GetProbeOffset(i) <
5273             1 << (32 - Name::kHashFieldOffset));
5274      __ add(index, hash, Operand(
5275          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
5276    } else {
5277      __ mov(index, Operand(hash));
5278    }
5279    __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
5280
5281    // Scale the index by multiplying by the entry size.
5282    ASSERT(NameDictionary::kEntrySize == 3);
5283    __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
5284
5285    ASSERT_EQ(kSmiTagSize, 1);
5286    __ add(index, dictionary, Operand(index, LSL, 2));
5287    __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
5288
5289    // Having undefined at this place means the name is not contained.
5290    __ cmp(entry_key, Operand(undefined));
5291    __ b(eq, &not_in_dictionary);
5292
5293    // Stop if found the property.
5294    __ cmp(entry_key, Operand(key));
5295    __ b(eq, &in_dictionary);
5296
5297    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
5298      // Check if the entry name is not a unique name.
5299      __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
5300      __ ldrb(entry_key,
5301              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
5302      __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
5303    }
5304  }
5305
5306  __ bind(&maybe_in_dictionary);
5307  // If we are doing negative lookup then probing failure should be
5308  // treated as a lookup success. For positive lookup probing failure
5309  // should be treated as lookup failure.
5310  if (mode_ == POSITIVE_LOOKUP) {
5311    __ mov(result, Operand::Zero());
5312    __ Ret();
5313  }
5314
5315  __ bind(&in_dictionary);
5316  __ mov(result, Operand(1));
5317  __ Ret();
5318
5319  __ bind(&not_in_dictionary);
5320  __ mov(result, Operand::Zero());
5321  __ Ret();
5322}
5323
5324
5325void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
5326    Isolate* isolate) {
5327  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
5328  stub1.GetCode(isolate);
5329  // Hydrogen code stubs need stub2 at snapshot time.
5330  StoreBufferOverflowStub stub2(kSaveFPRegs);
5331  stub2.GetCode(isolate);
5332}
5333
5334
5335bool CodeStub::CanUseFPRegisters() {
5336  return true;  // VFP2 is a base requirement for V8
5337}
5338
5339
5340// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
5341// the value has just been written into the object, now this stub makes sure
5342// we keep the GC informed.  The word in the object where the value has been
5343// written is in the address register.
5344void RecordWriteStub::Generate(MacroAssembler* masm) {
5345  Label skip_to_incremental_noncompacting;
5346  Label skip_to_incremental_compacting;
5347
5348  // The first two instructions are generated with labels so as to get the
5349  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
5350  // forth between a compare instructions (a nop in this position) and the
5351  // real branch when we start and stop incremental heap marking.
5352  // See RecordWriteStub::Patch for details.
5353  {
5354    // Block literal pool emission, as the position of these two instructions
5355    // is assumed by the patching code.
5356    Assembler::BlockConstPoolScope block_const_pool(masm);
5357    __ b(&skip_to_incremental_noncompacting);
5358    __ b(&skip_to_incremental_compacting);
5359  }
5360
5361  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
5362    __ RememberedSetHelper(object_,
5363                           address_,
5364                           value_,
5365                           save_fp_regs_mode_,
5366                           MacroAssembler::kReturnAtEnd);
5367  }
5368  __ Ret();
5369
5370  __ bind(&skip_to_incremental_noncompacting);
5371  GenerateIncremental(masm, INCREMENTAL);
5372
5373  __ bind(&skip_to_incremental_compacting);
5374  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
5375
5376  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
5377  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
5378  ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
5379  ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
5380  PatchBranchIntoNop(masm, 0);
5381  PatchBranchIntoNop(masm, Assembler::kInstrSize);
5382}
5383
5384
5385void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
5386  regs_.Save(masm);
5387
5388  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
5389    Label dont_need_remembered_set;
5390
5391    __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
5392    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
5393                           regs_.scratch0(),
5394                           &dont_need_remembered_set);
5395
5396    __ CheckPageFlag(regs_.object(),
5397                     regs_.scratch0(),
5398                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
5399                     ne,
5400                     &dont_need_remembered_set);
5401
5402    // First notify the incremental marker if necessary, then update the
5403    // remembered set.
5404    CheckNeedsToInformIncrementalMarker(
5405        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
5406    InformIncrementalMarker(masm, mode);
5407    regs_.Restore(masm);
5408    __ RememberedSetHelper(object_,
5409                           address_,
5410                           value_,
5411                           save_fp_regs_mode_,
5412                           MacroAssembler::kReturnAtEnd);
5413
5414    __ bind(&dont_need_remembered_set);
5415  }
5416
5417  CheckNeedsToInformIncrementalMarker(
5418      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
5419  InformIncrementalMarker(masm, mode);
5420  regs_.Restore(masm);
5421  __ Ret();
5422}
5423
5424
5425void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
5426  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
5427  int argument_count = 3;
5428  __ PrepareCallCFunction(argument_count, regs_.scratch0());
5429  Register address =
5430      r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
5431  ASSERT(!address.is(regs_.object()));
5432  ASSERT(!address.is(r0));
5433  __ Move(address, regs_.address());
5434  __ Move(r0, regs_.object());
5435  __ Move(r1, address);
5436  __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
5437
5438  AllowExternalCallThatCantCauseGC scope(masm);
5439  if (mode == INCREMENTAL_COMPACTION) {
5440    __ CallCFunction(
5441        ExternalReference::incremental_evacuation_record_write_function(
5442            masm->isolate()),
5443        argument_count);
5444  } else {
5445    ASSERT(mode == INCREMENTAL);
5446    __ CallCFunction(
5447        ExternalReference::incremental_marking_record_write_function(
5448            masm->isolate()),
5449        argument_count);
5450  }
5451  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
5452}
5453
5454
5455void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
5456    MacroAssembler* masm,
5457    OnNoNeedToInformIncrementalMarker on_no_need,
5458    Mode mode) {
5459  Label on_black;
5460  Label need_incremental;
5461  Label need_incremental_pop_scratch;
5462
5463  __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
5464  __ ldr(regs_.scratch1(),
5465         MemOperand(regs_.scratch0(),
5466                    MemoryChunk::kWriteBarrierCounterOffset));
5467  __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
5468  __ str(regs_.scratch1(),
5469         MemOperand(regs_.scratch0(),
5470                    MemoryChunk::kWriteBarrierCounterOffset));
5471  __ b(mi, &need_incremental);
5472
5473  // Let's look at the color of the object:  If it is not black we don't have
5474  // to inform the incremental marker.
5475  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
5476
5477  regs_.Restore(masm);
5478  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
5479    __ RememberedSetHelper(object_,
5480                           address_,
5481                           value_,
5482                           save_fp_regs_mode_,
5483                           MacroAssembler::kReturnAtEnd);
5484  } else {
5485    __ Ret();
5486  }
5487
5488  __ bind(&on_black);
5489
5490  // Get the value from the slot.
5491  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
5492
5493  if (mode == INCREMENTAL_COMPACTION) {
5494    Label ensure_not_white;
5495
5496    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
5497                     regs_.scratch1(),  // Scratch.
5498                     MemoryChunk::kEvacuationCandidateMask,
5499                     eq,
5500                     &ensure_not_white);
5501
5502    __ CheckPageFlag(regs_.object(),
5503                     regs_.scratch1(),  // Scratch.
5504                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
5505                     eq,
5506                     &need_incremental);
5507
5508    __ bind(&ensure_not_white);
5509  }
5510
5511  // We need extra registers for this, so we push the object and the address
5512  // register temporarily.
5513  __ Push(regs_.object(), regs_.address());
5514  __ EnsureNotWhite(regs_.scratch0(),  // The value.
5515                    regs_.scratch1(),  // Scratch.
5516                    regs_.object(),  // Scratch.
5517                    regs_.address(),  // Scratch.
5518                    &need_incremental_pop_scratch);
5519  __ Pop(regs_.object(), regs_.address());
5520
5521  regs_.Restore(masm);
5522  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
5523    __ RememberedSetHelper(object_,
5524                           address_,
5525                           value_,
5526                           save_fp_regs_mode_,
5527                           MacroAssembler::kReturnAtEnd);
5528  } else {
5529    __ Ret();
5530  }
5531
5532  __ bind(&need_incremental_pop_scratch);
5533  __ Pop(regs_.object(), regs_.address());
5534
5535  __ bind(&need_incremental);
5536
5537  // Fall through when we need to inform the incremental marker.
5538}
5539
5540
5541void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
5542  // ----------- S t a t e -------------
5543  //  -- r0    : element value to store
5544  //  -- r3    : element index as smi
5545  //  -- sp[0] : array literal index in function as smi
5546  //  -- sp[4] : array literal
5547  // clobbers r1, r2, r4
5548  // -----------------------------------
5549
5550  Label element_done;
5551  Label double_elements;
5552  Label smi_element;
5553  Label slow_elements;
5554  Label fast_elements;
5555
5556  // Get array literal index, array literal and its map.
5557  __ ldr(r4, MemOperand(sp, 0 * kPointerSize));
5558  __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
5559  __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset));
5560
5561  __ CheckFastElements(r2, r5, &double_elements);
5562  // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
5563  __ JumpIfSmi(r0, &smi_element);
5564  __ CheckFastSmiElements(r2, r5, &fast_elements);
5565
5566  // Store into the array literal requires a elements transition. Call into
5567  // the runtime.
5568  __ bind(&slow_elements);
5569  // call.
5570  __ Push(r1, r3, r0);
5571  __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5572  __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset));
5573  __ Push(r5, r4);
5574  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
5575
5576  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
5577  __ bind(&fast_elements);
5578  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
5579  __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
5580  __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5581  __ str(r0, MemOperand(r6, 0));
5582  // Update the write barrier for the array store.
5583  __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs,
5584                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
5585  __ Ret();
5586
5587  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
5588  // and value is Smi.
5589  __ bind(&smi_element);
5590  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
5591  __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3));
5592  __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
5593  __ Ret();
5594
5595  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
5596  __ bind(&double_elements);
5597  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
5598  __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements);
5599  __ Ret();
5600}
5601
5602
5603void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
5604  CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
5605  __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
5606  int parameter_count_offset =
5607      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
5608  __ ldr(r1, MemOperand(fp, parameter_count_offset));
5609  if (function_mode_ == JS_FUNCTION_STUB_MODE) {
5610    __ add(r1, r1, Operand(1));
5611  }
5612  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
5613  __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
5614  __ add(sp, sp, r1);
5615  __ Ret();
5616}
5617
5618
5619void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
5620  CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
5621  __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
5622  __ mov(r1, r0);
5623  int parameter_count_offset =
5624      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
5625  __ ldr(r0, MemOperand(fp, parameter_count_offset));
5626  // The parameter count above includes the receiver for the arguments passed to
5627  // the deoptimization handler. Subtract the receiver for the parameter count
5628  // for the call.
5629  __ sub(r0, r0, Operand(1));
5630  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
5631  ParameterCount argument_count(r0);
5632  __ InvokeFunction(
5633      r1, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
5634}
5635
5636
5637void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
5638  if (masm->isolate()->function_entry_hook() != NULL) {
5639    PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
5640    ProfileEntryHookStub stub;
5641    __ push(lr);
5642    __ CallStub(&stub);
5643    __ pop(lr);
5644  }
5645}
5646
5647
5648void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
5649  // The entry hook is a "push lr" instruction, followed by a call.
5650  const int32_t kReturnAddressDistanceFromFunctionStart =
5651      3 * Assembler::kInstrSize;
5652
5653  // This should contain all kCallerSaved registers.
5654  const RegList kSavedRegs =
5655      1 <<  0 |  // r0
5656      1 <<  1 |  // r1
5657      1 <<  2 |  // r2
5658      1 <<  3 |  // r3
5659      1 <<  5 |  // r5
5660      1 <<  9;   // r9
5661  // We also save lr, so the count here is one higher than the mask indicates.
5662  const int32_t kNumSavedRegs = 7;
5663
5664  ASSERT((kCallerSaved & kSavedRegs) == kCallerSaved);
5665
5666  // Save all caller-save registers as this may be called from anywhere.
5667  __ stm(db_w, sp, kSavedRegs | lr.bit());
5668
5669  // Compute the function's address for the first argument.
5670  __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart));
5671
5672  // The caller's return address is above the saved temporaries.
5673  // Grab that for the second argument to the hook.
5674  __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize));
5675
5676  // Align the stack if necessary.
5677  int frame_alignment = masm->ActivationFrameAlignment();
5678  if (frame_alignment > kPointerSize) {
5679    __ mov(r5, sp);
5680    ASSERT(IsPowerOf2(frame_alignment));
5681    __ and_(sp, sp, Operand(-frame_alignment));
5682  }
5683
5684#if V8_HOST_ARCH_ARM
5685  int32_t entry_hook =
5686      reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
5687  __ mov(ip, Operand(entry_hook));
5688#else
5689  // Under the simulator we need to indirect the entry hook through a
5690  // trampoline function at a known address.
5691  // It additionally takes an isolate as a third parameter
5692  __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate())));
5693
5694  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
5695  __ mov(ip, Operand(ExternalReference(&dispatcher,
5696                                       ExternalReference::BUILTIN_CALL,
5697                                       masm->isolate())));
5698#endif
5699  __ Call(ip);
5700
5701  // Restore the stack pointer if needed.
5702  if (frame_alignment > kPointerSize) {
5703    __ mov(sp, r5);
5704  }
5705
5706  // Also pop pc to get Ret(0).
5707  __ ldm(ia_w, sp, kSavedRegs | pc.bit());
5708}
5709
5710
5711template<class T>
5712static void CreateArrayDispatch(MacroAssembler* masm,
5713                                AllocationSiteOverrideMode mode) {
5714  if (mode == DISABLE_ALLOCATION_SITES) {
5715    T stub(GetInitialFastElementsKind(),
5716           CONTEXT_CHECK_REQUIRED,
5717           mode);
5718    __ TailCallStub(&stub);
5719  } else if (mode == DONT_OVERRIDE) {
5720    int last_index = GetSequenceIndexFromFastElementsKind(
5721        TERMINAL_FAST_ELEMENTS_KIND);
5722    for (int i = 0; i <= last_index; ++i) {
5723      Label next;
5724      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5725      __ cmp(r3, Operand(kind));
5726      __ b(ne, &next);
5727      T stub(kind);
5728      __ TailCallStub(&stub);
5729      __ bind(&next);
5730    }
5731
5732    // If we reached this point there is a problem.
5733    __ Abort(kUnexpectedElementsKindInArrayConstructor);
5734  } else {
5735    UNREACHABLE();
5736  }
5737}
5738
5739
5740static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
5741                                           AllocationSiteOverrideMode mode) {
5742  // r2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
5743  // r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
5744  // r0 - number of arguments
5745  // r1 - constructor?
5746  // sp[0] - last argument
5747  Label normal_sequence;
5748  if (mode == DONT_OVERRIDE) {
5749    ASSERT(FAST_SMI_ELEMENTS == 0);
5750    ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
5751    ASSERT(FAST_ELEMENTS == 2);
5752    ASSERT(FAST_HOLEY_ELEMENTS == 3);
5753    ASSERT(FAST_DOUBLE_ELEMENTS == 4);
5754    ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
5755
5756    // is the low bit set? If so, we are holey and that is good.
5757    __ tst(r3, Operand(1));
5758    __ b(ne, &normal_sequence);
5759  }
5760
5761  // look at the first argument
5762  __ ldr(r5, MemOperand(sp, 0));
5763  __ cmp(r5, Operand::Zero());
5764  __ b(eq, &normal_sequence);
5765
5766  if (mode == DISABLE_ALLOCATION_SITES) {
5767    ElementsKind initial = GetInitialFastElementsKind();
5768    ElementsKind holey_initial = GetHoleyElementsKind(initial);
5769
5770    ArraySingleArgumentConstructorStub stub_holey(holey_initial,
5771                                                  CONTEXT_CHECK_REQUIRED,
5772                                                  DISABLE_ALLOCATION_SITES);
5773    __ TailCallStub(&stub_holey);
5774
5775    __ bind(&normal_sequence);
5776    ArraySingleArgumentConstructorStub stub(initial,
5777                                            CONTEXT_CHECK_REQUIRED,
5778                                            DISABLE_ALLOCATION_SITES);
5779    __ TailCallStub(&stub);
5780  } else if (mode == DONT_OVERRIDE) {
5781    // We are going to create a holey array, but our kind is non-holey.
5782    // Fix kind and retry (only if we have an allocation site in the cell).
5783    __ add(r3, r3, Operand(1));
5784    __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
5785
5786    if (FLAG_debug_code) {
5787      __ ldr(r5, FieldMemOperand(r5, 0));
5788      __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
5789      __ Assert(eq, kExpectedAllocationSiteInCell);
5790      __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset));
5791    }
5792
5793    // Save the resulting elements kind in type info. We can't just store r3
5794    // in the AllocationSite::transition_info field because elements kind is
5795    // restricted to a portion of the field...upper bits need to be left alone.
5796    STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5797    __ ldr(r4, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
5798    __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
5799    __ str(r4, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
5800
5801    __ bind(&normal_sequence);
5802    int last_index = GetSequenceIndexFromFastElementsKind(
5803        TERMINAL_FAST_ELEMENTS_KIND);
5804    for (int i = 0; i <= last_index; ++i) {
5805      Label next;
5806      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5807      __ cmp(r3, Operand(kind));
5808      __ b(ne, &next);
5809      ArraySingleArgumentConstructorStub stub(kind);
5810      __ TailCallStub(&stub);
5811      __ bind(&next);
5812    }
5813
5814    // If we reached this point there is a problem.
5815    __ Abort(kUnexpectedElementsKindInArrayConstructor);
5816  } else {
5817    UNREACHABLE();
5818  }
5819}
5820
5821
5822template<class T>
5823static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
5824  ElementsKind initial_kind = GetInitialFastElementsKind();
5825  ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
5826
5827  int to_index = GetSequenceIndexFromFastElementsKind(
5828      TERMINAL_FAST_ELEMENTS_KIND);
5829  for (int i = 0; i <= to_index; ++i) {
5830    ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5831    T stub(kind);
5832    stub.GetCode(isolate);
5833    if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
5834        (!FLAG_track_allocation_sites &&
5835         (kind == initial_kind || kind == initial_holey_kind))) {
5836      T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
5837      stub1.GetCode(isolate);
5838    }
5839  }
5840}
5841
5842
5843void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
5844  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
5845      isolate);
5846  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
5847      isolate);
5848  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
5849      isolate);
5850}
5851
5852
5853void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
5854    Isolate* isolate) {
5855  ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
5856  for (int i = 0; i < 2; i++) {
5857    // For internal arrays we only need a few things
5858    InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
5859    stubh1.GetCode(isolate);
5860    InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
5861    stubh2.GetCode(isolate);
5862    InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
5863    stubh3.GetCode(isolate);
5864  }
5865}
5866
5867
5868void ArrayConstructorStub::GenerateDispatchToArrayStub(
5869    MacroAssembler* masm,
5870    AllocationSiteOverrideMode mode) {
5871  if (argument_count_ == ANY) {
5872    Label not_zero_case, not_one_case;
5873    __ tst(r0, r0);
5874    __ b(ne, &not_zero_case);
5875    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5876
5877    __ bind(&not_zero_case);
5878    __ cmp(r0, Operand(1));
5879    __ b(gt, &not_one_case);
5880    CreateArrayDispatchOneArgument(masm, mode);
5881
5882    __ bind(&not_one_case);
5883    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5884  } else if (argument_count_ == NONE) {
5885    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
5886  } else if (argument_count_ == ONE) {
5887    CreateArrayDispatchOneArgument(masm, mode);
5888  } else if (argument_count_ == MORE_THAN_ONE) {
5889    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
5890  } else {
5891    UNREACHABLE();
5892  }
5893}
5894
5895
5896void ArrayConstructorStub::Generate(MacroAssembler* masm) {
5897  // ----------- S t a t e -------------
5898  //  -- r0 : argc (only if argument_count_ == ANY)
5899  //  -- r1 : constructor
5900  //  -- r2 : type info cell
5901  //  -- sp[0] : return address
5902  //  -- sp[4] : last argument
5903  // -----------------------------------
5904  if (FLAG_debug_code) {
5905    // The array construct code is only set for the global and natives
5906    // builtin Array functions which always have maps.
5907
5908    // Initial map for the builtin Array function should be a map.
5909    __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
5910    // Will both indicate a NULL and a Smi.
5911    __ tst(r3, Operand(kSmiTagMask));
5912    __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
5913    __ CompareObjectType(r3, r3, r4, MAP_TYPE);
5914    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
5915
5916    // We should either have undefined in ebx or a valid cell
5917    Label okay_here;
5918    Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
5919    __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
5920    __ b(eq, &okay_here);
5921    __ ldr(r3, FieldMemOperand(r2, 0));
5922    __ cmp(r3, Operand(cell_map));
5923    __ Assert(eq, kExpectedPropertyCellInRegisterEbx);
5924    __ bind(&okay_here);
5925  }
5926
5927  Label no_info;
5928  // Get the elements kind and case on that.
5929  __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
5930  __ b(eq, &no_info);
5931  __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
5932
5933  // If the type cell is undefined, or contains anything other than an
5934  // AllocationSite, call an array constructor that doesn't use AllocationSites.
5935  __ ldr(r4, FieldMemOperand(r3, 0));
5936  __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex);
5937  __ b(ne, &no_info);
5938
5939  __ ldr(r3, FieldMemOperand(r3, AllocationSite::kTransitionInfoOffset));
5940  __ SmiUntag(r3);
5941  STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5942  __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
5943  GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
5944
5945  __ bind(&no_info);
5946  GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
5947}
5948
5949
5950void InternalArrayConstructorStub::GenerateCase(
5951    MacroAssembler* masm, ElementsKind kind) {
5952  Label not_zero_case, not_one_case;
5953  Label normal_sequence;
5954
5955  __ tst(r0, r0);
5956  __ b(ne, &not_zero_case);
5957  InternalArrayNoArgumentConstructorStub stub0(kind);
5958  __ TailCallStub(&stub0);
5959
5960  __ bind(&not_zero_case);
5961  __ cmp(r0, Operand(1));
5962  __ b(gt, &not_one_case);
5963
5964  if (IsFastPackedElementsKind(kind)) {
5965    // We might need to create a holey array
5966    // look at the first argument
5967    __ ldr(r3, MemOperand(sp, 0));
5968    __ cmp(r3, Operand::Zero());
5969    __ b(eq, &normal_sequence);
5970
5971    InternalArraySingleArgumentConstructorStub
5972        stub1_holey(GetHoleyElementsKind(kind));
5973    __ TailCallStub(&stub1_holey);
5974  }
5975
5976  __ bind(&normal_sequence);
5977  InternalArraySingleArgumentConstructorStub stub1(kind);
5978  __ TailCallStub(&stub1);
5979
5980  __ bind(&not_one_case);
5981  InternalArrayNArgumentsConstructorStub stubN(kind);
5982  __ TailCallStub(&stubN);
5983}
5984
5985
5986void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
5987  // ----------- S t a t e -------------
5988  //  -- r0 : argc
5989  //  -- r1 : constructor
5990  //  -- sp[0] : return address
5991  //  -- sp[4] : last argument
5992  // -----------------------------------
5993
5994  if (FLAG_debug_code) {
5995    // The array construct code is only set for the global and natives
5996    // builtin Array functions which always have maps.
5997
5998    // Initial map for the builtin Array function should be a map.
5999    __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
6000    // Will both indicate a NULL and a Smi.
6001    __ tst(r3, Operand(kSmiTagMask));
6002    __ Assert(ne, kUnexpectedInitialMapForArrayFunction);
6003    __ CompareObjectType(r3, r3, r4, MAP_TYPE);
6004    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
6005  }
6006
6007  // Figure out the right elements kind
6008  __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
6009  // Load the map's "bit field 2" into |result|. We only need the first byte,
6010  // but the following bit field extraction takes care of that anyway.
6011  __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
6012  // Retrieve elements_kind from bit field 2.
6013  __ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount);
6014
6015  if (FLAG_debug_code) {
6016    Label done;
6017    __ cmp(r3, Operand(FAST_ELEMENTS));
6018    __ b(eq, &done);
6019    __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
6020    __ Assert(eq,
6021              kInvalidElementsKindForInternalArrayOrInternalPackedArray);
6022    __ bind(&done);
6023  }
6024
6025  Label fast_elements_case;
6026  __ cmp(r3, Operand(FAST_ELEMENTS));
6027  __ b(eq, &fast_elements_case);
6028  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
6029
6030  __ bind(&fast_elements_case);
6031  GenerateCase(masm, FAST_ELEMENTS);
6032}
6033
6034
6035#undef __
6036
6037} }  // namespace v8::internal
6038
6039#endif  // V8_TARGET_ARCH_ARM
6040