1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if V8_TARGET_ARCH_MIPS
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
34#include "codegen.h"
35#include "regexp-macro-assembler.h"
36#include "stub-cache.h"
37
38namespace v8 {
39namespace internal {
40
41
42void FastNewClosureStub::InitializeInterfaceDescriptor(
43    Isolate* isolate,
44    CodeStubInterfaceDescriptor* descriptor) {
45  static Register registers[] = { a2 };
46  descriptor->register_param_count_ = 1;
47  descriptor->register_params_ = registers;
48  descriptor->deoptimization_handler_ =
49      Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
50}
51
52
53void ToNumberStub::InitializeInterfaceDescriptor(
54    Isolate* isolate,
55    CodeStubInterfaceDescriptor* descriptor) {
56  static Register registers[] = { a0 };
57  descriptor->register_param_count_ = 1;
58  descriptor->register_params_ = registers;
59  descriptor->deoptimization_handler_ = NULL;
60}
61
62
63void NumberToStringStub::InitializeInterfaceDescriptor(
64    Isolate* isolate,
65    CodeStubInterfaceDescriptor* descriptor) {
66  static Register registers[] = { a0 };
67  descriptor->register_param_count_ = 1;
68  descriptor->register_params_ = registers;
69  descriptor->deoptimization_handler_ =
70      Runtime::FunctionForId(Runtime::kNumberToString)->entry;
71}
72
73
74void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
75    Isolate* isolate,
76    CodeStubInterfaceDescriptor* descriptor) {
77  static Register registers[] = { a3, a2, a1 };
78  descriptor->register_param_count_ = 3;
79  descriptor->register_params_ = registers;
80  descriptor->deoptimization_handler_ =
81      Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
82}
83
84
85void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
86    Isolate* isolate,
87    CodeStubInterfaceDescriptor* descriptor) {
88  static Register registers[] = { a3, a2, a1, a0 };
89  descriptor->register_param_count_ = 4;
90  descriptor->register_params_ = registers;
91  descriptor->deoptimization_handler_ =
92      Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
93}
94
95
96void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
97    Isolate* isolate,
98    CodeStubInterfaceDescriptor* descriptor) {
99  static Register registers[] = { a2 };
100  descriptor->register_param_count_ = 1;
101  descriptor->register_params_ = registers;
102  descriptor->deoptimization_handler_ = NULL;
103}
104
105
106void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
107    Isolate* isolate,
108    CodeStubInterfaceDescriptor* descriptor) {
109  static Register registers[] = { a1, a0 };
110  descriptor->register_param_count_ = 2;
111  descriptor->register_params_ = registers;
112  descriptor->deoptimization_handler_ =
113      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
114}
115
116
117void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
118    Isolate* isolate,
119    CodeStubInterfaceDescriptor* descriptor) {
120  static Register registers[] = {a1, a0 };
121  descriptor->register_param_count_ = 2;
122  descriptor->register_params_ = registers;
123  descriptor->deoptimization_handler_ =
124      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
125}
126
127
128void LoadFieldStub::InitializeInterfaceDescriptor(
129    Isolate* isolate,
130    CodeStubInterfaceDescriptor* descriptor) {
131  static Register registers[] = { a0 };
132  descriptor->register_param_count_ = 1;
133  descriptor->register_params_ = registers;
134  descriptor->deoptimization_handler_ = NULL;
135}
136
137
138void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
139    Isolate* isolate,
140    CodeStubInterfaceDescriptor* descriptor) {
141  static Register registers[] = { a1 };
142  descriptor->register_param_count_ = 1;
143  descriptor->register_params_ = registers;
144  descriptor->deoptimization_handler_ = NULL;
145}
146
147
148void KeyedArrayCallStub::InitializeInterfaceDescriptor(
149    Isolate* isolate,
150    CodeStubInterfaceDescriptor* descriptor) {
151  static Register registers[] = { a2 };
152  descriptor->register_param_count_ = 1;
153  descriptor->register_params_ = registers;
154  descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
155  descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
156  descriptor->deoptimization_handler_ =
157      FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
158}
159
160
161void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
162    Isolate* isolate,
163    CodeStubInterfaceDescriptor* descriptor) {
164  static Register registers[] = { a2, a1, a0 };
165  descriptor->register_param_count_ = 3;
166  descriptor->register_params_ = registers;
167  descriptor->deoptimization_handler_ =
168      FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
169}
170
171
172void TransitionElementsKindStub::InitializeInterfaceDescriptor(
173    Isolate* isolate,
174    CodeStubInterfaceDescriptor* descriptor) {
175  static Register registers[] = { a0, a1 };
176  descriptor->register_param_count_ = 2;
177  descriptor->register_params_ = registers;
178  Address entry =
179      Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
180  descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
181}
182
183
184void CompareNilICStub::InitializeInterfaceDescriptor(
185    Isolate* isolate,
186    CodeStubInterfaceDescriptor* descriptor) {
187  static Register registers[] = { a0 };
188  descriptor->register_param_count_ = 1;
189  descriptor->register_params_ = registers;
190  descriptor->deoptimization_handler_ =
191      FUNCTION_ADDR(CompareNilIC_Miss);
192  descriptor->SetMissHandler(
193      ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
194}
195
196
197static void InitializeArrayConstructorDescriptor(
198    Isolate* isolate,
199    CodeStubInterfaceDescriptor* descriptor,
200    int constant_stack_parameter_count) {
201  // register state
202  // a0 -- number of arguments
203  // a1 -- function
204  // a2 -- type info cell with elements kind
205  static Register registers_variable_args[] = { a1, a2, a0 };
206  static Register registers_no_args[] = { a1, a2 };
207
208  if (constant_stack_parameter_count == 0) {
209    descriptor->register_param_count_ = 2;
210    descriptor->register_params_ = registers_no_args;
211  } else {
212    // stack param count needs (constructor pointer, and single argument)
213    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
214    descriptor->stack_parameter_count_ = a0;
215    descriptor->register_param_count_ = 3;
216    descriptor->register_params_ = registers_variable_args;
217  }
218
219  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
220  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
221  descriptor->deoptimization_handler_ =
222      Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
223}
224
225
226static void InitializeInternalArrayConstructorDescriptor(
227    Isolate* isolate,
228    CodeStubInterfaceDescriptor* descriptor,
229    int constant_stack_parameter_count) {
230  // register state
231  // a0 -- number of arguments
232  // a1 -- constructor function
233  static Register registers_variable_args[] = { a1, a0 };
234  static Register registers_no_args[] = { a1 };
235
236  if (constant_stack_parameter_count == 0) {
237    descriptor->register_param_count_ = 1;
238    descriptor->register_params_ = registers_no_args;
239  } else {
240    // stack param count needs (constructor pointer, and single argument)
241    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
242    descriptor->stack_parameter_count_ = a0;
243    descriptor->register_param_count_ = 2;
244    descriptor->register_params_ = registers_variable_args;
245  }
246
247  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
248  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
249  descriptor->deoptimization_handler_ =
250      Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
251}
252
253
254void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
255    Isolate* isolate,
256    CodeStubInterfaceDescriptor* descriptor) {
257  InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
258}
259
260
261void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
262    Isolate* isolate,
263    CodeStubInterfaceDescriptor* descriptor) {
264  InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
265}
266
267
268void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
269    Isolate* isolate,
270    CodeStubInterfaceDescriptor* descriptor) {
271  InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
272}
273
274
275void ToBooleanStub::InitializeInterfaceDescriptor(
276    Isolate* isolate,
277    CodeStubInterfaceDescriptor* descriptor) {
278  static Register registers[] = { a0 };
279  descriptor->register_param_count_ = 1;
280  descriptor->register_params_ = registers;
281  descriptor->deoptimization_handler_ =
282      FUNCTION_ADDR(ToBooleanIC_Miss);
283  descriptor->SetMissHandler(
284      ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
285}
286
287
288void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
289    Isolate* isolate,
290    CodeStubInterfaceDescriptor* descriptor) {
291  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
292}
293
294
295void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
296    Isolate* isolate,
297    CodeStubInterfaceDescriptor* descriptor) {
298  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
299}
300
301
302void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
303    Isolate* isolate,
304    CodeStubInterfaceDescriptor* descriptor) {
305  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
306}
307
308
309void StoreGlobalStub::InitializeInterfaceDescriptor(
310    Isolate* isolate,
311    CodeStubInterfaceDescriptor* descriptor) {
312  static Register registers[] = { a1, a2, a0 };
313  descriptor->register_param_count_ = 3;
314  descriptor->register_params_ = registers;
315  descriptor->deoptimization_handler_ =
316      FUNCTION_ADDR(StoreIC_MissFromStubFailure);
317}
318
319
320void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
321    Isolate* isolate,
322    CodeStubInterfaceDescriptor* descriptor) {
323  static Register registers[] = { a0, a3, a1, a2 };
324  descriptor->register_param_count_ = 4;
325  descriptor->register_params_ = registers;
326  descriptor->deoptimization_handler_ =
327      FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
328}
329
330
331void NewStringAddStub::InitializeInterfaceDescriptor(
332    Isolate* isolate,
333    CodeStubInterfaceDescriptor* descriptor) {
334  static Register registers[] = { a1, a0 };
335  descriptor->register_param_count_ = 2;
336  descriptor->register_params_ = registers;
337  descriptor->deoptimization_handler_ =
338      Runtime::FunctionForId(Runtime::kStringAdd)->entry;
339}
340
341
342#define __ ACCESS_MASM(masm)
343
344
345static void EmitIdenticalObjectComparison(MacroAssembler* masm,
346                                          Label* slow,
347                                          Condition cc);
348static void EmitSmiNonsmiComparison(MacroAssembler* masm,
349                                    Register lhs,
350                                    Register rhs,
351                                    Label* rhs_not_nan,
352                                    Label* slow,
353                                    bool strict);
354static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
355                                           Register lhs,
356                                           Register rhs);
357
358
359void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
360  // Update the static counter each time a new code stub is generated.
361  Isolate* isolate = masm->isolate();
362  isolate->counters()->code_stubs()->Increment();
363
364  CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
365  int param_count = descriptor->register_param_count_;
366  {
367    // Call the runtime system in a fresh internal frame.
368    FrameScope scope(masm, StackFrame::INTERNAL);
369    ASSERT(descriptor->register_param_count_ == 0 ||
370           a0.is(descriptor->register_params_[param_count - 1]));
371    // Push arguments
372    for (int i = 0; i < param_count; ++i) {
373      __ push(descriptor->register_params_[i]);
374    }
375    ExternalReference miss = descriptor->miss_handler();
376    __ CallExternalReference(miss, descriptor->register_param_count_);
377  }
378
379  __ Ret();
380}
381
382
383void FastNewContextStub::Generate(MacroAssembler* masm) {
384  // Try to allocate the context in new space.
385  Label gc;
386  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
387
388  // Attempt to allocate the context in new space.
389  __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
390
391  // Load the function from the stack.
392  __ lw(a3, MemOperand(sp, 0));
393
394  // Set up the object header.
395  __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
396  __ li(a2, Operand(Smi::FromInt(length)));
397  __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
398  __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
399
400  // Set up the fixed slots, copy the global object from the previous context.
401  __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
402  __ li(a1, Operand(Smi::FromInt(0)));
403  __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
404  __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
405  __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
406  __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
407
408  // Initialize the rest of the slots to undefined.
409  __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
410  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
411    __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
412  }
413
414  // Remove the on-stack argument and return.
415  __ mov(cp, v0);
416  __ DropAndRet(1);
417
418  // Need to collect. Call into runtime system.
419  __ bind(&gc);
420  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
421}
422
423
424void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
425  // Stack layout on entry:
426  //
427  // [sp]: function.
428  // [sp + kPointerSize]: serialized scope info
429
430  // Try to allocate the context in new space.
431  Label gc;
432  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
433  __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
434
435  // Load the function from the stack.
436  __ lw(a3, MemOperand(sp, 0));
437
438  // Load the serialized scope info from the stack.
439  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
440
441  // Set up the object header.
442  __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
443  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
444  __ li(a2, Operand(Smi::FromInt(length)));
445  __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
446
447  // If this block context is nested in the native context we get a smi
448  // sentinel instead of a function. The block context should get the
449  // canonical empty function of the native context as its closure which
450  // we still have to look up.
451  Label after_sentinel;
452  __ JumpIfNotSmi(a3, &after_sentinel);
453  if (FLAG_debug_code) {
454    __ Assert(eq, kExpected0AsASmiSentinel, a3, Operand(zero_reg));
455  }
456  __ lw(a3, GlobalObjectOperand());
457  __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
458  __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
459  __ bind(&after_sentinel);
460
461  // Set up the fixed slots, copy the global object from the previous context.
462  __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
463  __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
464  __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
465  __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
466  __ sw(a2, ContextOperand(v0, Context::GLOBAL_OBJECT_INDEX));
467
468  // Initialize the rest of the slots to the hole value.
469  __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
470  for (int i = 0; i < slots_; i++) {
471    __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
472  }
473
474  // Remove the on-stack argument and return.
475  __ mov(cp, v0);
476  __ DropAndRet(2);
477
478  // Need to collect. Call into runtime system.
479  __ bind(&gc);
480  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
481}
482
483
484// Takes a Smi and converts to an IEEE 64 bit floating point value in two
485// registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
486// 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
487// scratch register.  Destroys the source register.  No GC occurs during this
488// stub so you don't have to set up the frame.
489class ConvertToDoubleStub : public PlatformCodeStub {
490 public:
491  ConvertToDoubleStub(Register result_reg_1,
492                      Register result_reg_2,
493                      Register source_reg,
494                      Register scratch_reg)
495      : result1_(result_reg_1),
496        result2_(result_reg_2),
497        source_(source_reg),
498        zeros_(scratch_reg) { }
499
500 private:
501  Register result1_;
502  Register result2_;
503  Register source_;
504  Register zeros_;
505
506  // Minor key encoding in 16 bits.
507  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
508  class OpBits: public BitField<Token::Value, 2, 14> {};
509
510  Major MajorKey() { return ConvertToDouble; }
511  int MinorKey() {
512    // Encode the parameters in a unique 16 bit value.
513    return  result1_.code() +
514           (result2_.code() << 4) +
515           (source_.code() << 8) +
516           (zeros_.code() << 12);
517  }
518
519  void Generate(MacroAssembler* masm);
520};
521
522
523void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
524#ifndef BIG_ENDIAN_FLOATING_POINT
525  Register exponent = result1_;
526  Register mantissa = result2_;
527#else
528  Register exponent = result2_;
529  Register mantissa = result1_;
530#endif
531  Label not_special;
532  // Convert from Smi to integer.
533  __ sra(source_, source_, kSmiTagSize);
534  // Move sign bit from source to destination.  This works because the sign bit
535  // in the exponent word of the double has the same position and polarity as
536  // the 2's complement sign bit in a Smi.
537  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
538  __ And(exponent, source_, Operand(HeapNumber::kSignMask));
539  // Subtract from 0 if source was negative.
540  __ subu(at, zero_reg, source_);
541  __ Movn(source_, at, exponent);
542
543  // We have -1, 0 or 1, which we treat specially. Register source_ contains
544  // absolute value: it is either equal to 1 (special case of -1 and 1),
545  // greater than 1 (not a special case) or less than 1 (special case of 0).
546  __ Branch(&not_special, gt, source_, Operand(1));
547
548  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
549  const uint32_t exponent_word_for_1 =
550      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
551  // Safe to use 'at' as dest reg here.
552  __ Or(at, exponent, Operand(exponent_word_for_1));
553  __ Movn(exponent, at, source_);  // Write exp when source not 0.
554  // 1, 0 and -1 all have 0 for the second word.
555  __ Ret(USE_DELAY_SLOT);
556  __ mov(mantissa, zero_reg);
557
558  __ bind(&not_special);
559  // Count leading zeros.
560  // Gets the wrong answer for 0, but we already checked for that case above.
561  __ Clz(zeros_, source_);
562  // Compute exponent and or it into the exponent register.
563  // We use mantissa as a scratch register here.
564  __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
565  __ subu(mantissa, mantissa, zeros_);
566  __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
567  __ Or(exponent, exponent, mantissa);
568
569  // Shift up the source chopping the top bit off.
570  __ Addu(zeros_, zeros_, Operand(1));
571  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
572  __ sllv(source_, source_, zeros_);
573  // Compute lower part of fraction (last 12 bits).
574  __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
575  // And the top (top 20 bits).
576  __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
577
578  __ Ret(USE_DELAY_SLOT);
579  __ or_(exponent, exponent, source_);
580}
581
582
583void DoubleToIStub::Generate(MacroAssembler* masm) {
584  Label out_of_range, only_low, negate, done;
585  Register input_reg = source();
586  Register result_reg = destination();
587
588  int double_offset = offset();
589  // Account for saved regs if input is sp.
590  if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
591
592  Register scratch =
593      GetRegisterThatIsNotOneOf(input_reg, result_reg);
594  Register scratch2 =
595      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
596  Register scratch3 =
597      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
598  DoubleRegister double_scratch = kLithiumScratchDouble;
599
600  __ Push(scratch, scratch2, scratch3);
601
602  if (!skip_fastpath()) {
603    // Load double input.
604    __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
605
606    // Clear cumulative exception flags and save the FCSR.
607    __ cfc1(scratch2, FCSR);
608    __ ctc1(zero_reg, FCSR);
609
610    // Try a conversion to a signed integer.
611    __ Trunc_w_d(double_scratch, double_scratch);
612    // Move the converted value into the result register.
613    __ mfc1(result_reg, double_scratch);
614
615    // Retrieve and restore the FCSR.
616    __ cfc1(scratch, FCSR);
617    __ ctc1(scratch2, FCSR);
618
619    // Check for overflow and NaNs.
620    __ And(
621        scratch, scratch,
622        kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
623           | kFCSRInvalidOpFlagMask);
624    // If we had no exceptions we are done.
625    __ Branch(&done, eq, scratch, Operand(zero_reg));
626  }
627
628  // Load the double value and perform a manual truncation.
629  Register input_high = scratch2;
630  Register input_low = scratch3;
631
632  __ lw(input_low, MemOperand(input_reg, double_offset));
633  __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
634
635  Label normal_exponent, restore_sign;
636  // Extract the biased exponent in result.
637  __ Ext(result_reg,
638         input_high,
639         HeapNumber::kExponentShift,
640         HeapNumber::kExponentBits);
641
642  // Check for Infinity and NaNs, which should return 0.
643  __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
644  __ Movz(result_reg, zero_reg, scratch);
645  __ Branch(&done, eq, scratch, Operand(zero_reg));
646
647  // Express exponent as delta to (number of mantissa bits + 31).
648  __ Subu(result_reg,
649          result_reg,
650          Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
651
652  // If the delta is strictly positive, all bits would be shifted away,
653  // which means that we can return 0.
654  __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
655  __ mov(result_reg, zero_reg);
656  __ Branch(&done);
657
658  __ bind(&normal_exponent);
659  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
660  // Calculate shift.
661  __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
662
663  // Save the sign.
664  Register sign = result_reg;
665  result_reg = no_reg;
666  __ And(sign, input_high, Operand(HeapNumber::kSignMask));
667
668  // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
669  // to check for this specific case.
670  Label high_shift_needed, high_shift_done;
671  __ Branch(&high_shift_needed, lt, scratch, Operand(32));
672  __ mov(input_high, zero_reg);
673  __ Branch(&high_shift_done);
674  __ bind(&high_shift_needed);
675
676  // Set the implicit 1 before the mantissa part in input_high.
677  __ Or(input_high,
678        input_high,
679        Operand(1 << HeapNumber::kMantissaBitsInTopWord));
680  // Shift the mantissa bits to the correct position.
681  // We don't need to clear non-mantissa bits as they will be shifted away.
682  // If they weren't, it would mean that the answer is in the 32bit range.
683  __ sllv(input_high, input_high, scratch);
684
685  __ bind(&high_shift_done);
686
687  // Replace the shifted bits with bits from the lower mantissa word.
688  Label pos_shift, shift_done;
689  __ li(at, 32);
690  __ subu(scratch, at, scratch);
691  __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
692
693  // Negate scratch.
694  __ Subu(scratch, zero_reg, scratch);
695  __ sllv(input_low, input_low, scratch);
696  __ Branch(&shift_done);
697
698  __ bind(&pos_shift);
699  __ srlv(input_low, input_low, scratch);
700
701  __ bind(&shift_done);
702  __ Or(input_high, input_high, Operand(input_low));
703  // Restore sign if necessary.
704  __ mov(scratch, sign);
705  result_reg = sign;
706  sign = no_reg;
707  __ Subu(result_reg, zero_reg, input_high);
708  __ Movz(result_reg, input_high, scratch);
709
710  __ bind(&done);
711
712  __ Pop(scratch, scratch2, scratch3);
713  __ Ret();
714}
715
716
717void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
718    Isolate* isolate) {
719  WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
720  WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
721  stub1.GetCode(isolate);
722  stub2.GetCode(isolate);
723}
724
725
726// See comment for class, this does NOT work for int32's that are in Smi range.
727void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
728  Label max_negative_int;
729  // the_int_ has the answer which is a signed int32 but not a Smi.
730  // We test for the special value that has a different exponent.
731  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
732  // Test sign, and save for later conditionals.
733  __ And(sign_, the_int_, Operand(0x80000000u));
734  __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
735
736  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
737  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
738  uint32_t non_smi_exponent =
739      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
740  __ li(scratch_, Operand(non_smi_exponent));
741  // Set the sign bit in scratch_ if the value was negative.
742  __ or_(scratch_, scratch_, sign_);
743  // Subtract from 0 if the value was negative.
744  __ subu(at, zero_reg, the_int_);
745  __ Movn(the_int_, at, sign_);
746  // We should be masking the implict first digit of the mantissa away here,
747  // but it just ends up combining harmlessly with the last digit of the
748  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
749  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
750  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
751  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
752  __ srl(at, the_int_, shift_distance);
753  __ or_(scratch_, scratch_, at);
754  __ sw(scratch_, FieldMemOperand(the_heap_number_,
755                                   HeapNumber::kExponentOffset));
756  __ sll(scratch_, the_int_, 32 - shift_distance);
757  __ Ret(USE_DELAY_SLOT);
758  __ sw(scratch_, FieldMemOperand(the_heap_number_,
759                                   HeapNumber::kMantissaOffset));
760
761  __ bind(&max_negative_int);
762  // The max negative int32 is stored as a positive number in the mantissa of
763  // a double because it uses a sign bit instead of using two's complement.
764  // The actual mantissa bits stored are all 0 because the implicit most
765  // significant 1 bit is not stored.
766  non_smi_exponent += 1 << HeapNumber::kExponentShift;
767  __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
768  __ sw(scratch_,
769        FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
770  __ mov(scratch_, zero_reg);
771  __ Ret(USE_DELAY_SLOT);
772  __ sw(scratch_,
773        FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
774}
775
776
777// Handle the case where the lhs and rhs are the same object.
778// Equality is almost reflexive (everything but NaN), so this is a test
779// for "identity and not NaN".
780static void EmitIdenticalObjectComparison(MacroAssembler* masm,
781                                          Label* slow,
782                                          Condition cc) {
783  Label not_identical;
784  Label heap_number, return_equal;
785  Register exp_mask_reg = t5;
786
787  __ Branch(&not_identical, ne, a0, Operand(a1));
788
789  __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
790
791  // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
792  // so we do the second best thing - test it ourselves.
793  // They are both equal and they are not both Smis so both of them are not
794  // Smis. If it's not a heap number, then return equal.
795  if (cc == less || cc == greater) {
796    __ GetObjectType(a0, t4, t4);
797    __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
798  } else {
799    __ GetObjectType(a0, t4, t4);
800    __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
801    // Comparing JS objects with <=, >= is complicated.
802    if (cc != eq) {
803    __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
804      // Normally here we fall through to return_equal, but undefined is
805      // special: (undefined == undefined) == true, but
806      // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
807      if (cc == less_equal || cc == greater_equal) {
808        __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
809        __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
810        __ Branch(&return_equal, ne, a0, Operand(t2));
811        ASSERT(is_int16(GREATER) && is_int16(LESS));
812        __ Ret(USE_DELAY_SLOT);
813        if (cc == le) {
814          // undefined <= undefined should fail.
815          __ li(v0, Operand(GREATER));
816        } else  {
817          // undefined >= undefined should fail.
818          __ li(v0, Operand(LESS));
819        }
820      }
821    }
822  }
823
824  __ bind(&return_equal);
825  ASSERT(is_int16(GREATER) && is_int16(LESS));
826  __ Ret(USE_DELAY_SLOT);
827  if (cc == less) {
828    __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
829  } else if (cc == greater) {
830    __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
831  } else {
832    __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
833  }
834
835  // For less and greater we don't have to check for NaN since the result of
836  // x < x is false regardless.  For the others here is some code to check
837  // for NaN.
838  if (cc != lt && cc != gt) {
839    __ bind(&heap_number);
840    // It is a heap number, so return non-equal if it's NaN and equal if it's
841    // not NaN.
842
843    // The representation of NaN values has all exponent bits (52..62) set,
844    // and not all mantissa bits (0..51) clear.
845    // Read top bits of double representation (second word of value).
846    __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
847    // Test that exponent bits are all set.
848    __ And(t3, t2, Operand(exp_mask_reg));
849    // If all bits not set (ne cond), then not a NaN, objects are equal.
850    __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
851
852    // Shift out flag and all exponent bits, retaining only mantissa.
853    __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
854    // Or with all low-bits of mantissa.
855    __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
856    __ Or(v0, t3, Operand(t2));
857    // For equal we already have the right value in v0:  Return zero (equal)
858    // if all bits in mantissa are zero (it's an Infinity) and non-zero if
859    // not (it's a NaN).  For <= and >= we need to load v0 with the failing
860    // value if it's a NaN.
861    if (cc != eq) {
862      // All-zero means Infinity means equal.
863      __ Ret(eq, v0, Operand(zero_reg));
864      ASSERT(is_int16(GREATER) && is_int16(LESS));
865      __ Ret(USE_DELAY_SLOT);
866      if (cc == le) {
867        __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
868      } else {
869        __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
870      }
871    }
872  }
873  // No fall through here.
874
875  __ bind(&not_identical);
876}
877
878
879static void EmitSmiNonsmiComparison(MacroAssembler* masm,
880                                    Register lhs,
881                                    Register rhs,
882                                    Label* both_loaded_as_doubles,
883                                    Label* slow,
884                                    bool strict) {
885  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
886         (lhs.is(a1) && rhs.is(a0)));
887
888  Label lhs_is_smi;
889  __ JumpIfSmi(lhs, &lhs_is_smi);
890  // Rhs is a Smi.
891  // Check whether the non-smi is a heap number.
892  __ GetObjectType(lhs, t4, t4);
893  if (strict) {
894    // If lhs was not a number and rhs was a Smi then strict equality cannot
895    // succeed. Return non-equal (lhs is already not zero).
896    __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
897    __ mov(v0, lhs);
898  } else {
899    // Smi compared non-strictly with a non-Smi non-heap-number. Call
900    // the runtime.
901    __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
902  }
903
904  // Rhs is a smi, lhs is a number.
905  // Convert smi rhs to double.
906  __ sra(at, rhs, kSmiTagSize);
907  __ mtc1(at, f14);
908  __ cvt_d_w(f14, f14);
909  __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
910
911  // We now have both loaded as doubles.
912  __ jmp(both_loaded_as_doubles);
913
914  __ bind(&lhs_is_smi);
915  // Lhs is a Smi.  Check whether the non-smi is a heap number.
916  __ GetObjectType(rhs, t4, t4);
917  if (strict) {
918    // If lhs was not a number and rhs was a Smi then strict equality cannot
919    // succeed. Return non-equal.
920    __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
921    __ li(v0, Operand(1));
922  } else {
923    // Smi compared non-strictly with a non-Smi non-heap-number. Call
924    // the runtime.
925    __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
926  }
927
928  // Lhs is a smi, rhs is a number.
929  // Convert smi lhs to double.
930  __ sra(at, lhs, kSmiTagSize);
931  __ mtc1(at, f12);
932  __ cvt_d_w(f12, f12);
933  __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
934  // Fall through to both_loaded_as_doubles.
935}
936
937
938static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
939                                           Register lhs,
940                                           Register rhs) {
941    // If either operand is a JS object or an oddball value, then they are
942    // not equal since their pointers are different.
943    // There is no test for undetectability in strict equality.
944    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
945    Label first_non_object;
946    // Get the type of the first operand into a2 and compare it with
947    // FIRST_SPEC_OBJECT_TYPE.
948    __ GetObjectType(lhs, a2, a2);
949    __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
950
951    // Return non-zero.
952    Label return_not_equal;
953    __ bind(&return_not_equal);
954    __ Ret(USE_DELAY_SLOT);
955    __ li(v0, Operand(1));
956
957    __ bind(&first_non_object);
958    // Check for oddballs: true, false, null, undefined.
959    __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
960
961    __ GetObjectType(rhs, a3, a3);
962    __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
963
964    // Check for oddballs: true, false, null, undefined.
965    __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
966
967    // Now that we have the types we might as well check for
968    // internalized-internalized.
969    STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
970    __ Or(a2, a2, Operand(a3));
971    __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
972    __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
973}
974
975
976static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
977                                       Register lhs,
978                                       Register rhs,
979                                       Label* both_loaded_as_doubles,
980                                       Label* not_heap_numbers,
981                                       Label* slow) {
982  __ GetObjectType(lhs, a3, a2);
983  __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
984  __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
985  // If first was a heap number & second wasn't, go to slow case.
986  __ Branch(slow, ne, a3, Operand(a2));
987
988  // Both are heap numbers. Load them up then jump to the code we have
989  // for that.
990  __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
991  __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
992
993  __ jmp(both_loaded_as_doubles);
994}
995
996
997// Fast negative check for internalized-to-internalized equality.
998static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
999                                                     Register lhs,
1000                                                     Register rhs,
1001                                                     Label* possible_strings,
1002                                                     Label* not_both_strings) {
1003  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1004         (lhs.is(a1) && rhs.is(a0)));
1005
1006  // a2 is object type of rhs.
1007  Label object_test;
1008  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
1009  __ And(at, a2, Operand(kIsNotStringMask));
1010  __ Branch(&object_test, ne, at, Operand(zero_reg));
1011  __ And(at, a2, Operand(kIsNotInternalizedMask));
1012  __ Branch(possible_strings, ne, at, Operand(zero_reg));
1013  __ GetObjectType(rhs, a3, a3);
1014  __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1015  __ And(at, a3, Operand(kIsNotInternalizedMask));
1016  __ Branch(possible_strings, ne, at, Operand(zero_reg));
1017
1018  // Both are internalized strings. We already checked they weren't the same
1019  // pointer so they are not equal.
1020  __ Ret(USE_DELAY_SLOT);
1021  __ li(v0, Operand(1));   // Non-zero indicates not equal.
1022
1023  __ bind(&object_test);
1024  __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1025  __ GetObjectType(rhs, a2, a3);
1026  __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1027
1028  // If both objects are undetectable, they are equal.  Otherwise, they
1029  // are not equal, since they are different objects and an object is not
1030  // equal to undefined.
1031  __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1032  __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1033  __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1034  __ and_(a0, a2, a3);
1035  __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1036  __ Ret(USE_DELAY_SLOT);
1037  __ xori(v0, a0, 1 << Map::kIsUndetectable);
1038}
1039
1040
1041static void ICCompareStub_CheckInputType(MacroAssembler* masm,
1042                                         Register input,
1043                                         Register scratch,
1044                                         CompareIC::State expected,
1045                                         Label* fail) {
1046  Label ok;
1047  if (expected == CompareIC::SMI) {
1048    __ JumpIfNotSmi(input, fail);
1049  } else if (expected == CompareIC::NUMBER) {
1050    __ JumpIfSmi(input, &ok);
1051    __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
1052                DONT_DO_SMI_CHECK);
1053  }
1054  // We could be strict about internalized/string here, but as long as
1055  // hydrogen doesn't care, the stub doesn't have to care either.
1056  __ bind(&ok);
1057}
1058
1059
1060// On entry a1 and a2 are the values to be compared.
1061// On exit a0 is 0, positive or negative to indicate the result of
1062// the comparison.
1063void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
1064  Register lhs = a1;
1065  Register rhs = a0;
1066  Condition cc = GetCondition();
1067
1068  Label miss;
1069  ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
1070  ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
1071
1072  Label slow;  // Call builtin.
1073  Label not_smis, both_loaded_as_doubles;
1074
1075  Label not_two_smis, smi_done;
1076  __ Or(a2, a1, a0);
1077  __ JumpIfNotSmi(a2, &not_two_smis);
1078  __ sra(a1, a1, 1);
1079  __ sra(a0, a0, 1);
1080  __ Ret(USE_DELAY_SLOT);
1081  __ subu(v0, a1, a0);
1082  __ bind(&not_two_smis);
1083
1084  // NOTICE! This code is only reached after a smi-fast-case check, so
1085  // it is certain that at least one operand isn't a smi.
1086
1087  // Handle the case where the objects are identical.  Either returns the answer
1088  // or goes to slow.  Only falls through if the objects were not identical.
1089  EmitIdenticalObjectComparison(masm, &slow, cc);
1090
1091  // If either is a Smi (we know that not both are), then they can only
1092  // be strictly equal if the other is a HeapNumber.
1093  STATIC_ASSERT(kSmiTag == 0);
1094  ASSERT_EQ(0, Smi::FromInt(0));
1095  __ And(t2, lhs, Operand(rhs));
1096  __ JumpIfNotSmi(t2, &not_smis, t0);
1097  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1098  // 1) Return the answer.
1099  // 2) Go to slow.
1100  // 3) Fall through to both_loaded_as_doubles.
1101  // 4) Jump to rhs_not_nan.
1102  // In cases 3 and 4 we have found out we were dealing with a number-number
1103  // comparison and the numbers have been loaded into f12 and f14 as doubles,
1104  // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1105  EmitSmiNonsmiComparison(masm, lhs, rhs,
1106                          &both_loaded_as_doubles, &slow, strict());
1107
1108  __ bind(&both_loaded_as_doubles);
1109  // f12, f14 are the double representations of the left hand side
1110  // and the right hand side if we have FPU. Otherwise a2, a3 represent
1111  // left hand side and a0, a1 represent right hand side.
1112
1113  Isolate* isolate = masm->isolate();
1114  Label nan;
1115  __ li(t0, Operand(LESS));
1116  __ li(t1, Operand(GREATER));
1117  __ li(t2, Operand(EQUAL));
1118
1119  // Check if either rhs or lhs is NaN.
1120  __ BranchF(NULL, &nan, eq, f12, f14);
1121
1122  // Check if LESS condition is satisfied. If true, move conditionally
1123  // result to v0.
1124  __ c(OLT, D, f12, f14);
1125  __ Movt(v0, t0);
1126  // Use previous check to store conditionally to v0 oposite condition
1127  // (GREATER). If rhs is equal to lhs, this will be corrected in next
1128  // check.
1129  __ Movf(v0, t1);
1130  // Check if EQUAL condition is satisfied. If true, move conditionally
1131  // result to v0.
1132  __ c(EQ, D, f12, f14);
1133  __ Movt(v0, t2);
1134
1135  __ Ret();
1136
1137  __ bind(&nan);
1138  // NaN comparisons always fail.
1139  // Load whatever we need in v0 to make the comparison fail.
1140  ASSERT(is_int16(GREATER) && is_int16(LESS));
1141  __ Ret(USE_DELAY_SLOT);
1142  if (cc == lt || cc == le) {
1143    __ li(v0, Operand(GREATER));
1144  } else {
1145    __ li(v0, Operand(LESS));
1146  }
1147
1148
1149  __ bind(&not_smis);
1150  // At this point we know we are dealing with two different objects,
1151  // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1152  if (strict()) {
1153    // This returns non-equal for some object types, or falls through if it
1154    // was not lucky.
1155    EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
1156  }
1157
1158  Label check_for_internalized_strings;
1159  Label flat_string_check;
1160  // Check for heap-number-heap-number comparison. Can jump to slow case,
1161  // or load both doubles and jump to the code that handles
1162  // that case. If the inputs are not doubles then jumps to
1163  // check_for_internalized_strings.
1164  // In this case a2 will contain the type of lhs_.
1165  EmitCheckForTwoHeapNumbers(masm,
1166                             lhs,
1167                             rhs,
1168                             &both_loaded_as_doubles,
1169                             &check_for_internalized_strings,
1170                             &flat_string_check);
1171
1172  __ bind(&check_for_internalized_strings);
1173  if (cc == eq && !strict()) {
1174    // Returns an answer for two internalized strings or two
1175    // detectable objects.
1176    // Otherwise jumps to string case or not both strings case.
1177    // Assumes that a2 is the type of lhs_ on entry.
1178    EmitCheckForInternalizedStringsOrObjects(
1179        masm, lhs, rhs, &flat_string_check, &slow);
1180  }
1181
1182  // Check for both being sequential ASCII strings, and inline if that is the
1183  // case.
1184  __ bind(&flat_string_check);
1185
1186  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
1187
1188  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1189  if (cc == eq) {
1190    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1191                                                     lhs,
1192                                                     rhs,
1193                                                     a2,
1194                                                     a3,
1195                                                     t0);
1196  } else {
1197    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1198                                                       lhs,
1199                                                       rhs,
1200                                                       a2,
1201                                                       a3,
1202                                                       t0,
1203                                                       t1);
1204  }
1205  // Never falls through to here.
1206
1207  __ bind(&slow);
1208  // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1209  // a1 (rhs) second.
1210  __ Push(lhs, rhs);
1211  // Figure out which native to call and setup the arguments.
1212  Builtins::JavaScript native;
1213  if (cc == eq) {
1214    native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1215  } else {
1216    native = Builtins::COMPARE;
1217    int ncr;  // NaN compare result.
1218    if (cc == lt || cc == le) {
1219      ncr = GREATER;
1220    } else {
1221      ASSERT(cc == gt || cc == ge);  // Remaining cases.
1222      ncr = LESS;
1223    }
1224    __ li(a0, Operand(Smi::FromInt(ncr)));
1225    __ push(a0);
1226  }
1227
1228  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1229  // tagged as a small integer.
1230  __ InvokeBuiltin(native, JUMP_FUNCTION);
1231
1232  __ bind(&miss);
1233  GenerateMiss(masm);
1234}
1235
1236
1237void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1238  // We don't allow a GC during a store buffer overflow so there is no need to
1239  // store the registers in any particular way, but we do have to store and
1240  // restore them.
1241  __ MultiPush(kJSCallerSaved | ra.bit());
1242  if (save_doubles_ == kSaveFPRegs) {
1243    __ MultiPushFPU(kCallerSavedFPU);
1244  }
1245  const int argument_count = 1;
1246  const int fp_argument_count = 0;
1247  const Register scratch = a1;
1248
1249  AllowExternalCallThatCantCauseGC scope(masm);
1250  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1251  __ li(a0, Operand(ExternalReference::isolate_address(masm->isolate())));
1252  __ CallCFunction(
1253      ExternalReference::store_buffer_overflow_function(masm->isolate()),
1254      argument_count);
1255  if (save_doubles_ == kSaveFPRegs) {
1256    __ MultiPopFPU(kCallerSavedFPU);
1257  }
1258
1259  __ MultiPop(kJSCallerSaved | ra.bit());
1260  __ Ret();
1261}
1262
1263
1264void BinaryOpICStub::InitializeInterfaceDescriptor(
1265    Isolate* isolate,
1266    CodeStubInterfaceDescriptor* descriptor) {
1267  static Register registers[] = { a1, a0 };
1268  descriptor->register_param_count_ = 2;
1269  descriptor->register_params_ = registers;
1270  descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
1271  descriptor->SetMissHandler(
1272      ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
1273}
1274
1275
1276void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1277  // Untagged case: double input in f4, double result goes
1278  //   into f4.
1279  // Tagged case: tagged input on top of stack and in a0,
1280  //   tagged result (heap number) goes into v0.
1281
1282  Label input_not_smi;
1283  Label loaded;
1284  Label calculate;
1285  Label invalid_cache;
1286  const Register scratch0 = t5;
1287  const Register scratch1 = t3;
1288  const Register cache_entry = a0;
1289  const bool tagged = (argument_type_ == TAGGED);
1290
1291  if (tagged) {
1292    // Argument is a number and is on stack and in a0.
1293    // Load argument and check if it is a smi.
1294    __ JumpIfNotSmi(a0, &input_not_smi);
1295
1296    // Input is a smi. Convert to double and load the low and high words
1297    // of the double into a2, a3.
1298    __ sra(t0, a0, kSmiTagSize);
1299    __ mtc1(t0, f4);
1300    __ cvt_d_w(f4, f4);
1301    __ Move(a2, a3, f4);
1302    __ Branch(&loaded);
1303
1304    __ bind(&input_not_smi);
1305    // Check if input is a HeapNumber.
1306    __ CheckMap(a0,
1307                a1,
1308                Heap::kHeapNumberMapRootIndex,
1309                &calculate,
1310                DONT_DO_SMI_CHECK);
1311    // Input is a HeapNumber. Store the
1312    // low and high words into a2, a3.
1313    __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
1314    __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
1315  } else {
1316    // Input is untagged double in f4. Output goes to f4.
1317    __ Move(a2, a3, f4);
1318  }
1319  __ bind(&loaded);
1320  // a2 = low 32 bits of double value.
1321  // a3 = high 32 bits of double value.
1322  // Compute hash (the shifts are arithmetic):
1323  //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
1324  __ Xor(a1, a2, a3);
1325  __ sra(t0, a1, 16);
1326  __ Xor(a1, a1, t0);
1327  __ sra(t0, a1, 8);
1328  __ Xor(a1, a1, t0);
1329  ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
1330  __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
1331
1332  // a2 = low 32 bits of double value.
1333  // a3 = high 32 bits of double value.
1334  // a1 = TranscendentalCache::hash(double value).
1335  __ li(cache_entry, Operand(
1336      ExternalReference::transcendental_cache_array_address(
1337          masm->isolate())));
1338  // a0 points to cache array.
1339  __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
1340      Isolate::Current()->transcendental_cache()->caches_[0])));
1341  // a0 points to the cache for the type type_.
1342  // If NULL, the cache hasn't been initialized yet, so go through runtime.
1343  __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
1344
1345#ifdef DEBUG
1346  // Check that the layout of cache elements match expectations.
1347  { TranscendentalCache::SubCache::Element test_elem[2];
1348    char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
1349    char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
1350    char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
1351    char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
1352    char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
1353    CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
1354    CHECK_EQ(0, elem_in0 - elem_start);
1355    CHECK_EQ(kIntSize, elem_in1 - elem_start);
1356    CHECK_EQ(2 * kIntSize, elem_out - elem_start);
1357  }
1358#endif
1359
1360  // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
1361  __ sll(t0, a1, 1);
1362  __ Addu(a1, a1, t0);
1363  __ sll(t0, a1, 2);
1364  __ Addu(cache_entry, cache_entry, t0);
1365
1366  // Check if cache matches: Double value is stored in uint32_t[2] array.
1367  __ lw(t0, MemOperand(cache_entry, 0));
1368  __ lw(t1, MemOperand(cache_entry, 4));
1369  __ lw(t2, MemOperand(cache_entry, 8));
1370  __ Branch(&calculate, ne, a2, Operand(t0));
1371  __ Branch(&calculate, ne, a3, Operand(t1));
1372  // Cache hit. Load result, cleanup and return.
1373  Counters* counters = masm->isolate()->counters();
1374  __ IncrementCounter(
1375      counters->transcendental_cache_hit(), 1, scratch0, scratch1);
1376  if (tagged) {
1377    // Pop input value from stack and load result into v0.
1378    __ Drop(1);
1379    __ mov(v0, t2);
1380  } else {
1381    // Load result into f4.
1382    __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
1383  }
1384  __ Ret();
1385
1386  __ bind(&calculate);
1387  __ IncrementCounter(
1388      counters->transcendental_cache_miss(), 1, scratch0, scratch1);
1389  if (tagged) {
1390    __ bind(&invalid_cache);
1391    __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
1392                                                   masm->isolate()),
1393                                 1,
1394                                 1);
1395  } else {
1396    Label no_update;
1397    Label skip_cache;
1398
1399    // Call C function to calculate the result and update the cache.
1400    // a0: precalculated cache entry address.
1401    // a2 and a3: parts of the double value.
1402    // Store a0, a2 and a3 on stack for later before calling C function.
1403    __ Push(a3, a2, cache_entry);
1404    GenerateCallCFunction(masm, scratch0);
1405    __ GetCFunctionDoubleResult(f4);
1406
1407    // Try to update the cache. If we cannot allocate a
1408    // heap number, we return the result without updating.
1409    __ Pop(a3, a2, cache_entry);
1410    __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
1411    __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
1412    __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
1413
1414    __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
1415    __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
1416    __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
1417
1418    __ Ret(USE_DELAY_SLOT);
1419    __ mov(v0, cache_entry);
1420
1421    __ bind(&invalid_cache);
1422    // The cache is invalid. Call runtime which will recreate the
1423    // cache.
1424    __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
1425    __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
1426    __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
1427    {
1428      FrameScope scope(masm, StackFrame::INTERNAL);
1429      __ push(a0);
1430      __ CallRuntime(RuntimeFunction(), 1);
1431    }
1432    __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
1433    __ Ret();
1434
1435    __ bind(&skip_cache);
1436    // Call C function to calculate the result and answer directly
1437    // without updating the cache.
1438    GenerateCallCFunction(masm, scratch0);
1439    __ GetCFunctionDoubleResult(f4);
1440    __ bind(&no_update);
1441
1442    // We return the value in f4 without adding it to the cache, but
1443    // we cause a scavenging GC so that future allocations will succeed.
1444    {
1445      FrameScope scope(masm, StackFrame::INTERNAL);
1446
1447      // Allocate an aligned object larger than a HeapNumber.
1448      ASSERT(4 * kPointerSize >= HeapNumber::kSize);
1449      __ li(scratch0, Operand(4 * kPointerSize));
1450      __ push(scratch0);
1451      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
1452    }
1453    __ Ret();
1454  }
1455}
1456
1457
1458void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
1459                                                    Register scratch) {
1460  __ push(ra);
1461  __ PrepareCallCFunction(2, scratch);
1462  if (IsMipsSoftFloatABI) {
1463    __ Move(a0, a1, f4);
1464  } else {
1465    __ mov_d(f12, f4);
1466  }
1467  AllowExternalCallThatCantCauseGC scope(masm);
1468  Isolate* isolate = masm->isolate();
1469  switch (type_) {
1470    case TranscendentalCache::SIN:
1471      __ CallCFunction(
1472          ExternalReference::math_sin_double_function(isolate),
1473          0, 1);
1474      break;
1475    case TranscendentalCache::COS:
1476      __ CallCFunction(
1477          ExternalReference::math_cos_double_function(isolate),
1478          0, 1);
1479      break;
1480    case TranscendentalCache::TAN:
1481      __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
1482          0, 1);
1483      break;
1484    case TranscendentalCache::LOG:
1485      __ CallCFunction(
1486          ExternalReference::math_log_double_function(isolate),
1487          0, 1);
1488      break;
1489    default:
1490      UNIMPLEMENTED();
1491      break;
1492  }
1493  __ pop(ra);
1494}
1495
1496
1497Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
1498  switch (type_) {
1499    // Add more cases when necessary.
1500    case TranscendentalCache::SIN: return Runtime::kMath_sin;
1501    case TranscendentalCache::COS: return Runtime::kMath_cos;
1502    case TranscendentalCache::TAN: return Runtime::kMath_tan;
1503    case TranscendentalCache::LOG: return Runtime::kMath_log;
1504    default:
1505      UNIMPLEMENTED();
1506      return Runtime::kAbort;
1507  }
1508}
1509
1510
1511void MathPowStub::Generate(MacroAssembler* masm) {
1512  const Register base = a1;
1513  const Register exponent = a2;
1514  const Register heapnumbermap = t1;
1515  const Register heapnumber = v0;
1516  const DoubleRegister double_base = f2;
1517  const DoubleRegister double_exponent = f4;
1518  const DoubleRegister double_result = f0;
1519  const DoubleRegister double_scratch = f6;
1520  const FPURegister single_scratch = f8;
1521  const Register scratch = t5;
1522  const Register scratch2 = t3;
1523
1524  Label call_runtime, done, int_exponent;
1525  if (exponent_type_ == ON_STACK) {
1526    Label base_is_smi, unpack_exponent;
1527    // The exponent and base are supplied as arguments on the stack.
1528    // This can only happen if the stub is called from non-optimized code.
1529    // Load input parameters from stack to double registers.
1530    __ lw(base, MemOperand(sp, 1 * kPointerSize));
1531    __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
1532
1533    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
1534
1535    __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
1536    __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
1537    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
1538
1539    __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
1540    __ jmp(&unpack_exponent);
1541
1542    __ bind(&base_is_smi);
1543    __ mtc1(scratch, single_scratch);
1544    __ cvt_d_w(double_base, single_scratch);
1545    __ bind(&unpack_exponent);
1546
1547    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1548
1549    __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
1550    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
1551    __ ldc1(double_exponent,
1552            FieldMemOperand(exponent, HeapNumber::kValueOffset));
1553  } else if (exponent_type_ == TAGGED) {
1554    // Base is already in double_base.
1555    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1556
1557    __ ldc1(double_exponent,
1558            FieldMemOperand(exponent, HeapNumber::kValueOffset));
1559  }
1560
1561  if (exponent_type_ != INTEGER) {
1562    Label int_exponent_convert;
1563    // Detect integer exponents stored as double.
1564    __ EmitFPUTruncate(kRoundToMinusInf,
1565                       scratch,
1566                       double_exponent,
1567                       at,
1568                       double_scratch,
1569                       scratch2,
1570                       kCheckForInexactConversion);
1571    // scratch2 == 0 means there was no conversion error.
1572    __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
1573
1574    if (exponent_type_ == ON_STACK) {
1575      // Detect square root case.  Crankshaft detects constant +/-0.5 at
1576      // compile time and uses DoMathPowHalf instead.  We then skip this check
1577      // for non-constant cases of +/-0.5 as these hardly occur.
1578      Label not_plus_half;
1579
1580      // Test for 0.5.
1581      __ Move(double_scratch, 0.5);
1582      __ BranchF(USE_DELAY_SLOT,
1583                 &not_plus_half,
1584                 NULL,
1585                 ne,
1586                 double_exponent,
1587                 double_scratch);
1588      // double_scratch can be overwritten in the delay slot.
1589      // Calculates square root of base.  Check for the special case of
1590      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
1591      __ Move(double_scratch, -V8_INFINITY);
1592      __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
1593      __ neg_d(double_result, double_scratch);
1594
1595      // Add +0 to convert -0 to +0.
1596      __ add_d(double_scratch, double_base, kDoubleRegZero);
1597      __ sqrt_d(double_result, double_scratch);
1598      __ jmp(&done);
1599
1600      __ bind(&not_plus_half);
1601      __ Move(double_scratch, -0.5);
1602      __ BranchF(USE_DELAY_SLOT,
1603                 &call_runtime,
1604                 NULL,
1605                 ne,
1606                 double_exponent,
1607                 double_scratch);
1608      // double_scratch can be overwritten in the delay slot.
1609      // Calculates square root of base.  Check for the special case of
1610      // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
1611      __ Move(double_scratch, -V8_INFINITY);
1612      __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
1613      __ Move(double_result, kDoubleRegZero);
1614
1615      // Add +0 to convert -0 to +0.
1616      __ add_d(double_scratch, double_base, kDoubleRegZero);
1617      __ Move(double_result, 1);
1618      __ sqrt_d(double_scratch, double_scratch);
1619      __ div_d(double_result, double_result, double_scratch);
1620      __ jmp(&done);
1621    }
1622
1623    __ push(ra);
1624    {
1625      AllowExternalCallThatCantCauseGC scope(masm);
1626      __ PrepareCallCFunction(0, 2, scratch2);
1627      __ SetCallCDoubleArguments(double_base, double_exponent);
1628      __ CallCFunction(
1629          ExternalReference::power_double_double_function(masm->isolate()),
1630          0, 2);
1631    }
1632    __ pop(ra);
1633    __ GetCFunctionDoubleResult(double_result);
1634    __ jmp(&done);
1635
1636    __ bind(&int_exponent_convert);
1637  }
1638
1639  // Calculate power with integer exponent.
1640  __ bind(&int_exponent);
1641
1642  // Get two copies of exponent in the registers scratch and exponent.
1643  if (exponent_type_ == INTEGER) {
1644    __ mov(scratch, exponent);
1645  } else {
1646    // Exponent has previously been stored into scratch as untagged integer.
1647    __ mov(exponent, scratch);
1648  }
1649
1650  __ mov_d(double_scratch, double_base);  // Back up base.
1651  __ Move(double_result, 1.0);
1652
1653  // Get absolute value of exponent.
1654  Label positive_exponent;
1655  __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
1656  __ Subu(scratch, zero_reg, scratch);
1657  __ bind(&positive_exponent);
1658
1659  Label while_true, no_carry, loop_end;
1660  __ bind(&while_true);
1661
1662  __ And(scratch2, scratch, 1);
1663
1664  __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
1665  __ mul_d(double_result, double_result, double_scratch);
1666  __ bind(&no_carry);
1667
1668  __ sra(scratch, scratch, 1);
1669
1670  __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
1671  __ mul_d(double_scratch, double_scratch, double_scratch);
1672
1673  __ Branch(&while_true);
1674
1675  __ bind(&loop_end);
1676
1677  __ Branch(&done, ge, exponent, Operand(zero_reg));
1678  __ Move(double_scratch, 1.0);
1679  __ div_d(double_result, double_scratch, double_result);
1680  // Test whether result is zero.  Bail out to check for subnormal result.
1681  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1682  __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
1683
1684  // double_exponent may not contain the exponent value if the input was a
1685  // smi.  We set it with exponent value before bailing out.
1686  __ mtc1(exponent, single_scratch);
1687  __ cvt_d_w(double_exponent, single_scratch);
1688
1689  // Returning or bailing out.
1690  Counters* counters = masm->isolate()->counters();
1691  if (exponent_type_ == ON_STACK) {
1692    // The arguments are still on the stack.
1693    __ bind(&call_runtime);
1694    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
1695
1696    // The stub is called from non-optimized code, which expects the result
1697    // as heap number in exponent.
1698    __ bind(&done);
1699    __ AllocateHeapNumber(
1700        heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1701    __ sdc1(double_result,
1702            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
1703    ASSERT(heapnumber.is(v0));
1704    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1705    __ DropAndRet(2);
1706  } else {
1707    __ push(ra);
1708    {
1709      AllowExternalCallThatCantCauseGC scope(masm);
1710      __ PrepareCallCFunction(0, 2, scratch);
1711      __ SetCallCDoubleArguments(double_base, double_exponent);
1712      __ CallCFunction(
1713          ExternalReference::power_double_double_function(masm->isolate()),
1714          0, 2);
1715    }
1716    __ pop(ra);
1717    __ GetCFunctionDoubleResult(double_result);
1718
1719    __ bind(&done);
1720    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1721    __ Ret();
1722  }
1723}
1724
1725
1726bool CEntryStub::NeedsImmovableCode() {
1727  return true;
1728}
1729
1730
1731void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1732  CEntryStub::GenerateAheadOfTime(isolate);
1733  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1734  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1735  StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1736  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1737  CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1738  BinaryOpICStub::GenerateAheadOfTime(isolate);
1739}
1740
1741
1742void CodeStub::GenerateFPStubs(Isolate* isolate) {
1743  SaveFPRegsMode mode = kSaveFPRegs;
1744  CEntryStub save_doubles(1, mode);
1745  StoreBufferOverflowStub stub(mode);
1746  // These stubs might already be in the snapshot, detect that and don't
1747  // regenerate, which would lead to code stub initialization state being messed
1748  // up.
1749  Code* save_doubles_code;
1750  if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
1751    save_doubles_code = *save_doubles.GetCode(isolate);
1752  }
1753  Code* store_buffer_overflow_code;
1754  if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
1755      store_buffer_overflow_code = *stub.GetCode(isolate);
1756  }
1757  isolate->set_fp_stubs_generated(true);
1758}
1759
1760
1761void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1762  CEntryStub stub(1, kDontSaveFPRegs);
1763  stub.GetCode(isolate);
1764}
1765
1766
1767static void JumpIfOOM(MacroAssembler* masm,
1768                      Register value,
1769                      Register scratch,
1770                      Label* oom_label) {
1771  STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
1772  STATIC_ASSERT(kFailureTag == 3);
1773  __ andi(scratch, value, 0xf);
1774  __ Branch(oom_label, eq, scratch, Operand(0xf));
1775}
1776
1777
1778void CEntryStub::GenerateCore(MacroAssembler* masm,
1779                              Label* throw_normal_exception,
1780                              Label* throw_termination_exception,
1781                              Label* throw_out_of_memory_exception,
1782                              bool do_gc,
1783                              bool always_allocate) {
1784  // v0: result parameter for PerformGC, if any
1785  // s0: number of arguments including receiver (C callee-saved)
1786  // s1: pointer to the first argument          (C callee-saved)
1787  // s2: pointer to builtin function            (C callee-saved)
1788
1789  Isolate* isolate = masm->isolate();
1790
1791  if (do_gc) {
1792    // Move result passed in v0 into a0 to call PerformGC.
1793    __ mov(a0, v0);
1794    __ PrepareCallCFunction(2, 0, a1);
1795    __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
1796    __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0);
1797  }
1798
1799  ExternalReference scope_depth =
1800      ExternalReference::heap_always_allocate_scope_depth(isolate);
1801  if (always_allocate) {
1802    __ li(a0, Operand(scope_depth));
1803    __ lw(a1, MemOperand(a0));
1804    __ Addu(a1, a1, Operand(1));
1805    __ sw(a1, MemOperand(a0));
1806  }
1807
1808  // Prepare arguments for C routine.
1809  // a0 = argc
1810  __ mov(a0, s0);
1811  // a1 = argv (set in the delay slot after find_ra below).
1812
1813  // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1814  // also need to reserve the 4 argument slots on the stack.
1815
1816  __ AssertStackIsAligned();
1817
1818  __ li(a2, Operand(ExternalReference::isolate_address(isolate)));
1819
1820  // To let the GC traverse the return address of the exit frames, we need to
1821  // know where the return address is. The CEntryStub is unmovable, so
1822  // we can store the address on the stack to be able to find it again and
1823  // we never have to restore it, because it will not change.
1824  { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1825    // This branch-and-link sequence is needed to find the current PC on mips,
1826    // saved to the ra register.
1827    // Use masm-> here instead of the double-underscore macro since extra
1828    // coverage code can interfere with the proper calculation of ra.
1829    Label find_ra;
1830    masm->bal(&find_ra);  // bal exposes branch delay slot.
1831    masm->mov(a1, s1);
1832    masm->bind(&find_ra);
1833
1834    // Adjust the value in ra to point to the correct return location, 2nd
1835    // instruction past the real call into C code (the jalr(t9)), and push it.
1836    // This is the return address of the exit frame.
1837    const int kNumInstructionsToJump = 5;
1838    masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
1839    masm->sw(ra, MemOperand(sp));  // This spot was reserved in EnterExitFrame.
1840    // Stack space reservation moved to the branch delay slot below.
1841    // Stack is still aligned.
1842
1843    // Call the C routine.
1844    masm->mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
1845    masm->jalr(t9);
1846    // Set up sp in the delay slot.
1847    masm->addiu(sp, sp, -kCArgsSlotsSize);
1848    // Make sure the stored 'ra' points to this position.
1849    ASSERT_EQ(kNumInstructionsToJump,
1850              masm->InstructionsGeneratedSince(&find_ra));
1851  }
1852
1853  if (always_allocate) {
1854    // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
1855    __ li(a2, Operand(scope_depth));
1856    __ lw(a3, MemOperand(a2));
1857    __ Subu(a3, a3, Operand(1));
1858    __ sw(a3, MemOperand(a2));
1859  }
1860
1861  // Check for failure result.
1862  Label failure_returned;
1863  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
1864  __ addiu(a2, v0, 1);
1865  __ andi(t0, a2, kFailureTagMask);
1866  __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
1867  // Restore stack (remove arg slots) in branch delay slot.
1868  __ addiu(sp, sp, kCArgsSlotsSize);
1869
1870
1871  // Exit C frame and return.
1872  // v0:v1: result
1873  // sp: stack pointer
1874  // fp: frame pointer
1875  __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
1876
1877  // Check if we should retry or throw exception.
1878  Label retry;
1879  __ bind(&failure_returned);
1880  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
1881  __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
1882  __ Branch(&retry, eq, t0, Operand(zero_reg));
1883
1884  // Special handling of out of memory exceptions.
1885  JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
1886
1887  // Retrieve the pending exception.
1888  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1889                                      isolate)));
1890  __ lw(v0, MemOperand(t0));
1891
1892  // See if we just retrieved an OOM exception.
1893  JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
1894
1895  // Clear the pending exception.
1896  __ li(a3, Operand(isolate->factory()->the_hole_value()));
1897  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1898                                      isolate)));
1899  __ sw(a3, MemOperand(t0));
1900
1901  // Special handling of termination exceptions which are uncatchable
1902  // by javascript code.
1903  __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
1904  __ Branch(throw_termination_exception, eq, v0, Operand(t0));
1905
1906  // Handle normal exception.
1907  __ jmp(throw_normal_exception);
1908
1909  __ bind(&retry);
1910  // Last failure (v0) will be moved to (a0) for parameter when retrying.
1911}
1912
1913
1914void CEntryStub::Generate(MacroAssembler* masm) {
1915  // Called from JavaScript; parameters are on stack as if calling JS function
1916  // s0: number of arguments including receiver
1917  // s1: size of arguments excluding receiver
1918  // s2: pointer to builtin function
1919  // fp: frame pointer    (restored after C call)
1920  // sp: stack pointer    (restored as callee's sp after C call)
1921  // cp: current context  (C callee-saved)
1922
1923  ProfileEntryHookStub::MaybeCallEntryHook(masm);
1924
1925  // NOTE: Invocations of builtins may return failure objects
1926  // instead of a proper result. The builtin entry handles
1927  // this by performing a garbage collection and retrying the
1928  // builtin once.
1929
1930  // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
1931  // The reason for this is that these arguments would need to be saved anyway
1932  // so it's faster to set them up directly.
1933  // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
1934
1935  // Compute the argv pointer in a callee-saved register.
1936  __ Addu(s1, sp, s1);
1937
1938  // Enter the exit frame that transitions from JavaScript to C++.
1939  FrameScope scope(masm, StackFrame::MANUAL);
1940  __ EnterExitFrame(save_doubles_);
1941
1942  // s0: number of arguments (C callee-saved)
1943  // s1: pointer to first argument (C callee-saved)
1944  // s2: pointer to builtin function (C callee-saved)
1945
1946  Label throw_normal_exception;
1947  Label throw_termination_exception;
1948  Label throw_out_of_memory_exception;
1949
1950  // Call into the runtime system.
1951  GenerateCore(masm,
1952               &throw_normal_exception,
1953               &throw_termination_exception,
1954               &throw_out_of_memory_exception,
1955               false,
1956               false);
1957
1958  // Do space-specific GC and retry runtime call.
1959  GenerateCore(masm,
1960               &throw_normal_exception,
1961               &throw_termination_exception,
1962               &throw_out_of_memory_exception,
1963               true,
1964               false);
1965
1966  // Do full GC and retry runtime call one final time.
1967  Failure* failure = Failure::InternalError();
1968  __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
1969  GenerateCore(masm,
1970               &throw_normal_exception,
1971               &throw_termination_exception,
1972               &throw_out_of_memory_exception,
1973               true,
1974               true);
1975
1976  __ bind(&throw_out_of_memory_exception);
1977  // Set external caught exception to false.
1978  Isolate* isolate = masm->isolate();
1979  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
1980                                    isolate);
1981  __ li(a0, Operand(false, RelocInfo::NONE32));
1982  __ li(a2, Operand(external_caught));
1983  __ sw(a0, MemOperand(a2));
1984
1985  // Set pending exception and v0 to out of memory exception.
1986  Label already_have_failure;
1987  JumpIfOOM(masm, v0, t0, &already_have_failure);
1988  Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
1989  __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
1990  __ bind(&already_have_failure);
1991  __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1992                                      isolate)));
1993  __ sw(v0, MemOperand(a2));
1994  // Fall through to the next label.
1995
1996  __ bind(&throw_termination_exception);
1997  __ ThrowUncatchable(v0);
1998
1999  __ bind(&throw_normal_exception);
2000  __ Throw(v0);
2001}
2002
2003
2004void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
2005  Label invoke, handler_entry, exit;
2006  Isolate* isolate = masm->isolate();
2007
2008  // Registers:
2009  // a0: entry address
2010  // a1: function
2011  // a2: receiver
2012  // a3: argc
2013  //
2014  // Stack:
2015  // 4 args slots
2016  // args
2017
2018  ProfileEntryHookStub::MaybeCallEntryHook(masm);
2019
2020  // Save callee saved registers on the stack.
2021  __ MultiPush(kCalleeSaved | ra.bit());
2022
2023  // Save callee-saved FPU registers.
2024  __ MultiPushFPU(kCalleeSavedFPU);
2025  // Set up the reserved register for 0.0.
2026  __ Move(kDoubleRegZero, 0.0);
2027
2028
2029  // Load argv in s0 register.
2030  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
2031  offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
2032
2033  __ InitializeRootRegister();
2034  __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
2035
2036  // We build an EntryFrame.
2037  __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
2038  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
2039  __ li(t2, Operand(Smi::FromInt(marker)));
2040  __ li(t1, Operand(Smi::FromInt(marker)));
2041  __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2042                                      isolate)));
2043  __ lw(t0, MemOperand(t0));
2044  __ Push(t3, t2, t1, t0);
2045  // Set up frame pointer for the frame to be pushed.
2046  __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
2047
2048  // Registers:
2049  // a0: entry_address
2050  // a1: function
2051  // a2: receiver_pointer
2052  // a3: argc
2053  // s0: argv
2054  //
2055  // Stack:
2056  // caller fp          |
2057  // function slot      | entry frame
2058  // context slot       |
2059  // bad fp (0xff...f)  |
2060  // callee saved registers + ra
2061  // 4 args slots
2062  // args
2063
2064  // If this is the outermost JS call, set js_entry_sp value.
2065  Label non_outermost_js;
2066  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
2067  __ li(t1, Operand(ExternalReference(js_entry_sp)));
2068  __ lw(t2, MemOperand(t1));
2069  __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
2070  __ sw(fp, MemOperand(t1));
2071  __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
2072  Label cont;
2073  __ b(&cont);
2074  __ nop();   // Branch delay slot nop.
2075  __ bind(&non_outermost_js);
2076  __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
2077  __ bind(&cont);
2078  __ push(t0);
2079
2080  // Jump to a faked try block that does the invoke, with a faked catch
2081  // block that sets the pending exception.
2082  __ jmp(&invoke);
2083  __ bind(&handler_entry);
2084  handler_offset_ = handler_entry.pos();
2085  // Caught exception: Store result (exception) in the pending exception
2086  // field in the JSEnv and return a failure sentinel.  Coming in here the
2087  // fp will be invalid because the PushTryHandler below sets it to 0 to
2088  // signal the existence of the JSEntry frame.
2089  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2090                                      isolate)));
2091  __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
2092  __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
2093  __ b(&exit);  // b exposes branch delay slot.
2094  __ nop();   // Branch delay slot nop.
2095
2096  // Invoke: Link this frame into the handler chain.  There's only one
2097  // handler block in this code object, so its index is 0.
2098  __ bind(&invoke);
2099  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
2100  // If an exception not caught by another handler occurs, this handler
2101  // returns control to the code after the bal(&invoke) above, which
2102  // restores all kCalleeSaved registers (including cp and fp) to their
2103  // saved values before returning a failure to C.
2104
2105  // Clear any pending exceptions.
2106  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
2107  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2108                                      isolate)));
2109  __ sw(t1, MemOperand(t0));
2110
2111  // Invoke the function by calling through JS entry trampoline builtin.
2112  // Notice that we cannot store a reference to the trampoline code directly in
2113  // this stub, because runtime stubs are not traversed when doing GC.
2114
2115  // Registers:
2116  // a0: entry_address
2117  // a1: function
2118  // a2: receiver_pointer
2119  // a3: argc
2120  // s0: argv
2121  //
2122  // Stack:
2123  // handler frame
2124  // entry frame
2125  // callee saved registers + ra
2126  // 4 args slots
2127  // args
2128
2129  if (is_construct) {
2130    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
2131                                      isolate);
2132    __ li(t0, Operand(construct_entry));
2133  } else {
2134    ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
2135    __ li(t0, Operand(entry));
2136  }
2137  __ lw(t9, MemOperand(t0));  // Deref address.
2138
2139  // Call JSEntryTrampoline.
2140  __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
2141  __ Call(t9);
2142
2143  // Unlink this frame from the handler chain.
2144  __ PopTryHandler();
2145
2146  __ bind(&exit);  // v0 holds result
2147  // Check if the current stack frame is marked as the outermost JS frame.
2148  Label non_outermost_js_2;
2149  __ pop(t1);
2150  __ Branch(&non_outermost_js_2,
2151            ne,
2152            t1,
2153            Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
2154  __ li(t1, Operand(ExternalReference(js_entry_sp)));
2155  __ sw(zero_reg, MemOperand(t1));
2156  __ bind(&non_outermost_js_2);
2157
2158  // Restore the top frame descriptors from the stack.
2159  __ pop(t1);
2160  __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2161                                      isolate)));
2162  __ sw(t1, MemOperand(t0));
2163
2164  // Reset the stack to the callee saved registers.
2165  __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
2166
2167  // Restore callee-saved fpu registers.
2168  __ MultiPopFPU(kCalleeSavedFPU);
2169
2170  // Restore callee saved registers from the stack.
2171  __ MultiPop(kCalleeSaved | ra.bit());
2172  // Return.
2173  __ Jump(ra);
2174}
2175
2176
2177// Uses registers a0 to t0.
2178// Expected input (depending on whether args are in registers or on the stack):
2179// * object: a0 or at sp + 1 * kPointerSize.
2180// * function: a1 or at sp.
2181//
2182// An inlined call site may have been generated before calling this stub.
2183// In this case the offset to the inline site to patch is passed on the stack,
2184// in the safepoint slot for register t0.
2185void InstanceofStub::Generate(MacroAssembler* masm) {
2186  // Call site inlining and patching implies arguments in registers.
2187  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
2188  // ReturnTrueFalse is only implemented for inlined call sites.
2189  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
2190
2191  // Fixed register usage throughout the stub:
2192  const Register object = a0;  // Object (lhs).
2193  Register map = a3;  // Map of the object.
2194  const Register function = a1;  // Function (rhs).
2195  const Register prototype = t0;  // Prototype of the function.
2196  const Register inline_site = t5;
2197  const Register scratch = a2;
2198
2199  const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
2200
2201  Label slow, loop, is_instance, is_not_instance, not_js_object;
2202
2203  if (!HasArgsInRegisters()) {
2204    __ lw(object, MemOperand(sp, 1 * kPointerSize));
2205    __ lw(function, MemOperand(sp, 0));
2206  }
2207
2208  // Check that the left hand is a JS object and load map.
2209  __ JumpIfSmi(object, &not_js_object);
2210  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
2211
2212  // If there is a call site cache don't look in the global cache, but do the
2213  // real lookup and update the call site cache.
2214  if (!HasCallSiteInlineCheck()) {
2215    Label miss;
2216    __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
2217    __ Branch(&miss, ne, function, Operand(at));
2218    __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
2219    __ Branch(&miss, ne, map, Operand(at));
2220    __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
2221    __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2222
2223    __ bind(&miss);
2224  }
2225
2226  // Get the prototype of the function.
2227  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
2228
2229  // Check that the function prototype is a JS object.
2230  __ JumpIfSmi(prototype, &slow);
2231  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
2232
2233  // Update the global instanceof or call site inlined cache with the current
2234  // map and function. The cached answer will be set when it is known below.
2235  if (!HasCallSiteInlineCheck()) {
2236    __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
2237    __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
2238  } else {
2239    ASSERT(HasArgsInRegisters());
2240    // Patch the (relocated) inlined map check.
2241
2242    // The offset was stored in t0 safepoint slot.
2243    // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
2244    __ LoadFromSafepointRegisterSlot(scratch, t0);
2245    __ Subu(inline_site, ra, scratch);
2246    // Get the map location in scratch and patch it.
2247    __ GetRelocatedValue(inline_site, scratch, v1);  // v1 used as scratch.
2248    __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
2249  }
2250
2251  // Register mapping: a3 is object map and t0 is function prototype.
2252  // Get prototype of object into a2.
2253  __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
2254
2255  // We don't need map any more. Use it as a scratch register.
2256  Register scratch2 = map;
2257  map = no_reg;
2258
2259  // Loop through the prototype chain looking for the function prototype.
2260  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
2261  __ bind(&loop);
2262  __ Branch(&is_instance, eq, scratch, Operand(prototype));
2263  __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
2264  __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
2265  __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
2266  __ Branch(&loop);
2267
2268  __ bind(&is_instance);
2269  ASSERT(Smi::FromInt(0) == 0);
2270  if (!HasCallSiteInlineCheck()) {
2271    __ mov(v0, zero_reg);
2272    __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
2273  } else {
2274    // Patch the call site to return true.
2275    __ LoadRoot(v0, Heap::kTrueValueRootIndex);
2276    __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
2277    // Get the boolean result location in scratch and patch it.
2278    __ PatchRelocatedValue(inline_site, scratch, v0);
2279
2280    if (!ReturnTrueFalseObject()) {
2281      ASSERT_EQ(Smi::FromInt(0), 0);
2282      __ mov(v0, zero_reg);
2283    }
2284  }
2285  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2286
2287  __ bind(&is_not_instance);
2288  if (!HasCallSiteInlineCheck()) {
2289    __ li(v0, Operand(Smi::FromInt(1)));
2290    __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
2291  } else {
2292    // Patch the call site to return false.
2293    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
2294    __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
2295    // Get the boolean result location in scratch and patch it.
2296    __ PatchRelocatedValue(inline_site, scratch, v0);
2297
2298    if (!ReturnTrueFalseObject()) {
2299      __ li(v0, Operand(Smi::FromInt(1)));
2300    }
2301  }
2302
2303  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2304
2305  Label object_not_null, object_not_null_or_smi;
2306  __ bind(&not_js_object);
2307  // Before null, smi and string value checks, check that the rhs is a function
2308  // as for a non-function rhs an exception needs to be thrown.
2309  __ JumpIfSmi(function, &slow);
2310  __ GetObjectType(function, scratch2, scratch);
2311  __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
2312
2313  // Null is not instance of anything.
2314  __ Branch(&object_not_null,
2315            ne,
2316            scratch,
2317            Operand(masm->isolate()->factory()->null_value()));
2318  __ li(v0, Operand(Smi::FromInt(1)));
2319  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2320
2321  __ bind(&object_not_null);
2322  // Smi values are not instances of anything.
2323  __ JumpIfNotSmi(object, &object_not_null_or_smi);
2324  __ li(v0, Operand(Smi::FromInt(1)));
2325  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2326
2327  __ bind(&object_not_null_or_smi);
2328  // String values are not instances of anything.
2329  __ IsObjectJSStringType(object, scratch, &slow);
2330  __ li(v0, Operand(Smi::FromInt(1)));
2331  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2332
2333  // Slow-case.  Tail call builtin.
2334  __ bind(&slow);
2335  if (!ReturnTrueFalseObject()) {
2336    if (HasArgsInRegisters()) {
2337      __ Push(a0, a1);
2338    }
2339  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
2340  } else {
2341    {
2342      FrameScope scope(masm, StackFrame::INTERNAL);
2343      __ Push(a0, a1);
2344      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
2345    }
2346    __ mov(a0, v0);
2347    __ LoadRoot(v0, Heap::kTrueValueRootIndex);
2348    __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
2349    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
2350    __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2351  }
2352}
2353
2354
2355void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
2356  Label miss;
2357  Register receiver;
2358  if (kind() == Code::KEYED_LOAD_IC) {
2359    // ----------- S t a t e -------------
2360    //  -- ra    : return address
2361    //  -- a0    : key
2362    //  -- a1    : receiver
2363    // -----------------------------------
2364    __ Branch(&miss, ne, a0,
2365        Operand(masm->isolate()->factory()->prototype_string()));
2366    receiver = a1;
2367  } else {
2368    ASSERT(kind() == Code::LOAD_IC);
2369    // ----------- S t a t e -------------
2370    //  -- a2    : name
2371    //  -- ra    : return address
2372    //  -- a0    : receiver
2373    //  -- sp[0] : receiver
2374    // -----------------------------------
2375    receiver = a0;
2376  }
2377
2378  StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss);
2379  __ bind(&miss);
2380  StubCompiler::TailCallBuiltin(
2381      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2382}
2383
2384
2385void StringLengthStub::Generate(MacroAssembler* masm) {
2386  Label miss;
2387  Register receiver;
2388  if (kind() == Code::KEYED_LOAD_IC) {
2389    // ----------- S t a t e -------------
2390    //  -- ra    : return address
2391    //  -- a0    : key
2392    //  -- a1    : receiver
2393    // -----------------------------------
2394    __ Branch(&miss, ne, a0,
2395        Operand(masm->isolate()->factory()->length_string()));
2396    receiver = a1;
2397  } else {
2398    ASSERT(kind() == Code::LOAD_IC);
2399    // ----------- S t a t e -------------
2400    //  -- a2    : name
2401    //  -- ra    : return address
2402    //  -- a0    : receiver
2403    //  -- sp[0] : receiver
2404    // -----------------------------------
2405    receiver = a0;
2406  }
2407
2408  StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
2409
2410  __ bind(&miss);
2411  StubCompiler::TailCallBuiltin(
2412      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2413}
2414
2415
2416void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
2417  // This accepts as a receiver anything JSArray::SetElementsLength accepts
2418  // (currently anything except for external arrays which means anything with
2419  // elements of FixedArray type).  Value must be a number, but only smis are
2420  // accepted as the most common case.
2421  Label miss;
2422
2423  Register receiver;
2424  Register value;
2425  if (kind() == Code::KEYED_STORE_IC) {
2426    // ----------- S t a t e -------------
2427    //  -- ra    : return address
2428    //  -- a0    : value
2429    //  -- a1    : key
2430    //  -- a2    : receiver
2431    // -----------------------------------
2432    __ Branch(&miss, ne, a1,
2433        Operand(masm->isolate()->factory()->length_string()));
2434    receiver = a2;
2435    value = a0;
2436  } else {
2437    ASSERT(kind() == Code::STORE_IC);
2438    // ----------- S t a t e -------------
2439    //  -- ra    : return address
2440    //  -- a0    : value
2441    //  -- a1    : receiver
2442    //  -- a2    : key
2443    // -----------------------------------
2444    receiver = a1;
2445    value = a0;
2446  }
2447  Register scratch = a3;
2448
2449  // Check that the receiver isn't a smi.
2450  __ JumpIfSmi(receiver, &miss);
2451
2452  // Check that the object is a JS array.
2453  __ GetObjectType(receiver, scratch, scratch);
2454  __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
2455
2456  // Check that elements are FixedArray.
2457  // We rely on StoreIC_ArrayLength below to deal with all types of
2458  // fast elements (including COW).
2459  __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
2460  __ GetObjectType(scratch, scratch, scratch);
2461  __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
2462
2463  // Check that the array has fast properties, otherwise the length
2464  // property might have been redefined.
2465  __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
2466  __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
2467  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
2468  __ Branch(&miss, eq, scratch, Operand(at));
2469
2470  // Check that value is a smi.
2471  __ JumpIfNotSmi(value, &miss);
2472
2473  // Prepare tail call to StoreIC_ArrayLength.
2474  __ Push(receiver, value);
2475
2476  ExternalReference ref =
2477      ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
2478  __ TailCallExternalReference(ref, 2, 1);
2479
2480  __ bind(&miss);
2481
2482  StubCompiler::TailCallBuiltin(
2483      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2484}
2485
2486
2487Register InstanceofStub::left() { return a0; }
2488
2489
2490Register InstanceofStub::right() { return a1; }
2491
2492
2493void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2494  // The displacement is the offset of the last parameter (if any)
2495  // relative to the frame pointer.
2496  const int kDisplacement =
2497      StandardFrameConstants::kCallerSPOffset - kPointerSize;
2498
2499  // Check that the key is a smiGenerateReadElement.
2500  Label slow;
2501  __ JumpIfNotSmi(a1, &slow);
2502
2503  // Check if the calling frame is an arguments adaptor frame.
2504  Label adaptor;
2505  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2506  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
2507  __ Branch(&adaptor,
2508            eq,
2509            a3,
2510            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2511
2512  // Check index (a1) against formal parameters count limit passed in
2513  // through register a0. Use unsigned comparison to get negative
2514  // check for free.
2515  __ Branch(&slow, hs, a1, Operand(a0));
2516
2517  // Read the argument from the stack and return it.
2518  __ subu(a3, a0, a1);
2519  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
2520  __ Addu(a3, fp, Operand(t3));
2521  __ Ret(USE_DELAY_SLOT);
2522  __ lw(v0, MemOperand(a3, kDisplacement));
2523
2524  // Arguments adaptor case: Check index (a1) against actual arguments
2525  // limit found in the arguments adaptor frame. Use unsigned
2526  // comparison to get negative check for free.
2527  __ bind(&adaptor);
2528  __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2529  __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
2530
2531  // Read the argument from the adaptor frame and return it.
2532  __ subu(a3, a0, a1);
2533  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
2534  __ Addu(a3, a2, Operand(t3));
2535  __ Ret(USE_DELAY_SLOT);
2536  __ lw(v0, MemOperand(a3, kDisplacement));
2537
2538  // Slow-case: Handle non-smi or out-of-bounds access to arguments
2539  // by calling the runtime system.
2540  __ bind(&slow);
2541  __ push(a1);
2542  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2543}
2544
2545
2546void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
2547  // sp[0] : number of parameters
2548  // sp[4] : receiver displacement
2549  // sp[8] : function
2550  // Check if the calling frame is an arguments adaptor frame.
2551  Label runtime;
2552  __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2553  __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
2554  __ Branch(&runtime,
2555            ne,
2556            a2,
2557            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2558
2559  // Patch the arguments.length and the parameters pointer in the current frame.
2560  __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2561  __ sw(a2, MemOperand(sp, 0 * kPointerSize));
2562  __ sll(t3, a2, 1);
2563  __ Addu(a3, a3, Operand(t3));
2564  __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
2565  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2566
2567  __ bind(&runtime);
2568  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2569}
2570
2571
2572void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
2573  // Stack layout:
2574  //  sp[0] : number of parameters (tagged)
2575  //  sp[4] : address of receiver argument
2576  //  sp[8] : function
2577  // Registers used over whole function:
2578  //  t2 : allocated object (tagged)
2579  //  t5 : mapped parameter count (tagged)
2580
2581  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2582  // a1 = parameter count (tagged)
2583
2584  // Check if the calling frame is an arguments adaptor frame.
2585  Label runtime;
2586  Label adaptor_frame, try_allocate;
2587  __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2588  __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
2589  __ Branch(&adaptor_frame,
2590            eq,
2591            a2,
2592            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2593
2594  // No adaptor, parameter count = argument count.
2595  __ mov(a2, a1);
2596  __ b(&try_allocate);
2597  __ nop();   // Branch delay slot nop.
2598
2599  // We have an adaptor frame. Patch the parameters pointer.
2600  __ bind(&adaptor_frame);
2601  __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2602  __ sll(t6, a2, 1);
2603  __ Addu(a3, a3, Operand(t6));
2604  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2605  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2606
2607  // a1 = parameter count (tagged)
2608  // a2 = argument count (tagged)
2609  // Compute the mapped parameter count = min(a1, a2) in a1.
2610  Label skip_min;
2611  __ Branch(&skip_min, lt, a1, Operand(a2));
2612  __ mov(a1, a2);
2613  __ bind(&skip_min);
2614
2615  __ bind(&try_allocate);
2616
2617  // Compute the sizes of backing store, parameter map, and arguments object.
2618  // 1. Parameter map, has 2 extra words containing context and backing store.
2619  const int kParameterMapHeaderSize =
2620      FixedArray::kHeaderSize + 2 * kPointerSize;
2621  // If there are no mapped parameters, we do not need the parameter_map.
2622  Label param_map_size;
2623  ASSERT_EQ(0, Smi::FromInt(0));
2624  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
2625  __ mov(t5, zero_reg);  // In delay slot: param map size = 0 when a1 == 0.
2626  __ sll(t5, a1, 1);
2627  __ addiu(t5, t5, kParameterMapHeaderSize);
2628  __ bind(&param_map_size);
2629
2630  // 2. Backing store.
2631  __ sll(t6, a2, 1);
2632  __ Addu(t5, t5, Operand(t6));
2633  __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
2634
2635  // 3. Arguments object.
2636  __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
2637
2638  // Do the allocation of all three objects in one go.
2639  __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
2640
2641  // v0 = address of new object(s) (tagged)
2642  // a2 = argument count (tagged)
2643  // Get the arguments boilerplate from the current native context into t0.
2644  const int kNormalOffset =
2645      Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
2646  const int kAliasedOffset =
2647      Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
2648
2649  __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2650  __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
2651  Label skip2_ne, skip2_eq;
2652  __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
2653  __ lw(t0, MemOperand(t0, kNormalOffset));
2654  __ bind(&skip2_ne);
2655
2656  __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
2657  __ lw(t0, MemOperand(t0, kAliasedOffset));
2658  __ bind(&skip2_eq);
2659
2660  // v0 = address of new object (tagged)
2661  // a1 = mapped parameter count (tagged)
2662  // a2 = argument count (tagged)
2663  // t0 = address of boilerplate object (tagged)
2664  // Copy the JS object part.
2665  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2666    __ lw(a3, FieldMemOperand(t0, i));
2667    __ sw(a3, FieldMemOperand(v0, i));
2668  }
2669
2670  // Set up the callee in-object property.
2671  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2672  __ lw(a3, MemOperand(sp, 2 * kPointerSize));
2673  const int kCalleeOffset = JSObject::kHeaderSize +
2674      Heap::kArgumentsCalleeIndex * kPointerSize;
2675  __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
2676
2677  // Use the length (smi tagged) and set that as an in-object property too.
2678  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2679  const int kLengthOffset = JSObject::kHeaderSize +
2680      Heap::kArgumentsLengthIndex * kPointerSize;
2681  __ sw(a2, FieldMemOperand(v0, kLengthOffset));
2682
2683  // Set up the elements pointer in the allocated arguments object.
2684  // If we allocated a parameter map, t0 will point there, otherwise
2685  // it will point to the backing store.
2686  __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
2687  __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
2688
2689  // v0 = address of new object (tagged)
2690  // a1 = mapped parameter count (tagged)
2691  // a2 = argument count (tagged)
2692  // t0 = address of parameter map or backing store (tagged)
2693  // Initialize parameter map. If there are no mapped arguments, we're done.
2694  Label skip_parameter_map;
2695  Label skip3;
2696  __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
2697  // Move backing store address to a3, because it is
2698  // expected there when filling in the unmapped arguments.
2699  __ mov(a3, t0);
2700  __ bind(&skip3);
2701
2702  __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
2703
2704  __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
2705  __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
2706  __ Addu(t2, a1, Operand(Smi::FromInt(2)));
2707  __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
2708  __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
2709  __ sll(t6, a1, 1);
2710  __ Addu(t2, t0, Operand(t6));
2711  __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
2712  __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
2713
2714  // Copy the parameter slots and the holes in the arguments.
2715  // We need to fill in mapped_parameter_count slots. They index the context,
2716  // where parameters are stored in reverse order, at
2717  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2718  // The mapped parameter thus need to get indices
2719  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
2720  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2721  // We loop from right to left.
2722  Label parameters_loop, parameters_test;
2723  __ mov(t2, a1);
2724  __ lw(t5, MemOperand(sp, 0 * kPointerSize));
2725  __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
2726  __ Subu(t5, t5, Operand(a1));
2727  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
2728  __ sll(t6, t2, 1);
2729  __ Addu(a3, t0, Operand(t6));
2730  __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
2731
2732  // t2 = loop variable (tagged)
2733  // a1 = mapping index (tagged)
2734  // a3 = address of backing store (tagged)
2735  // t0 = address of parameter map (tagged)
2736  // t1 = temporary scratch (a.o., for address calculation)
2737  // t3 = the hole value
2738  __ jmp(&parameters_test);
2739
2740  __ bind(&parameters_loop);
2741  __ Subu(t2, t2, Operand(Smi::FromInt(1)));
2742  __ sll(t1, t2, 1);
2743  __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
2744  __ Addu(t6, t0, t1);
2745  __ sw(t5, MemOperand(t6));
2746  __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
2747  __ Addu(t6, a3, t1);
2748  __ sw(t3, MemOperand(t6));
2749  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
2750  __ bind(&parameters_test);
2751  __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
2752
2753  __ bind(&skip_parameter_map);
2754  // a2 = argument count (tagged)
2755  // a3 = address of backing store (tagged)
2756  // t1 = scratch
2757  // Copy arguments header and remaining slots (if there are any).
2758  __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
2759  __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
2760  __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
2761
2762  Label arguments_loop, arguments_test;
2763  __ mov(t5, a1);
2764  __ lw(t0, MemOperand(sp, 1 * kPointerSize));
2765  __ sll(t6, t5, 1);
2766  __ Subu(t0, t0, Operand(t6));
2767  __ jmp(&arguments_test);
2768
2769  __ bind(&arguments_loop);
2770  __ Subu(t0, t0, Operand(kPointerSize));
2771  __ lw(t2, MemOperand(t0, 0));
2772  __ sll(t6, t5, 1);
2773  __ Addu(t1, a3, Operand(t6));
2774  __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
2775  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
2776
2777  __ bind(&arguments_test);
2778  __ Branch(&arguments_loop, lt, t5, Operand(a2));
2779
2780  // Return and remove the on-stack parameters.
2781  __ DropAndRet(3);
2782
2783  // Do the runtime call to allocate the arguments object.
2784  // a2 = argument count (tagged)
2785  __ bind(&runtime);
2786  __ sw(a2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
2787  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2788}
2789
2790
2791void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2792  // sp[0] : number of parameters
2793  // sp[4] : receiver displacement
2794  // sp[8] : function
2795  // Check if the calling frame is an arguments adaptor frame.
2796  Label adaptor_frame, try_allocate, runtime;
2797  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2798  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
2799  __ Branch(&adaptor_frame,
2800            eq,
2801            a3,
2802            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2803
2804  // Get the length from the frame.
2805  __ lw(a1, MemOperand(sp, 0));
2806  __ Branch(&try_allocate);
2807
2808  // Patch the arguments.length and the parameters pointer.
2809  __ bind(&adaptor_frame);
2810  __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2811  __ sw(a1, MemOperand(sp, 0));
2812  __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
2813  __ Addu(a3, a2, Operand(at));
2814
2815  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2816  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2817
2818  // Try the new space allocation. Start out with computing the size
2819  // of the arguments object and the elements array in words.
2820  Label add_arguments_object;
2821  __ bind(&try_allocate);
2822  __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
2823  __ srl(a1, a1, kSmiTagSize);
2824
2825  __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
2826  __ bind(&add_arguments_object);
2827  __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
2828
2829  // Do the allocation of both objects in one go.
2830  __ Allocate(a1, v0, a2, a3, &runtime,
2831              static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2832
2833  // Get the arguments boilerplate from the current native context.
2834  __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2835  __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
2836  __ lw(t0, MemOperand(t0, Context::SlotOffset(
2837      Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
2838
2839  // Copy the JS object part.
2840  __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
2841
2842  // Get the length (smi tagged) and set that as an in-object property too.
2843  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2844  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2845  __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
2846      Heap::kArgumentsLengthIndex * kPointerSize));
2847
2848  Label done;
2849  __ Branch(&done, eq, a1, Operand(zero_reg));
2850
2851  // Get the parameters pointer from the stack.
2852  __ lw(a2, MemOperand(sp, 1 * kPointerSize));
2853
2854  // Set up the elements pointer in the allocated arguments object and
2855  // initialize the header in the elements fixed array.
2856  __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
2857  __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
2858  __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
2859  __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
2860  __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
2861  // Untag the length for the loop.
2862  __ srl(a1, a1, kSmiTagSize);
2863
2864  // Copy the fixed array slots.
2865  Label loop;
2866  // Set up t0 to point to the first array slot.
2867  __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2868  __ bind(&loop);
2869  // Pre-decrement a2 with kPointerSize on each iteration.
2870  // Pre-decrement in order to skip receiver.
2871  __ Addu(a2, a2, Operand(-kPointerSize));
2872  __ lw(a3, MemOperand(a2));
2873  // Post-increment t0 with kPointerSize on each iteration.
2874  __ sw(a3, MemOperand(t0));
2875  __ Addu(t0, t0, Operand(kPointerSize));
2876  __ Subu(a1, a1, Operand(1));
2877  __ Branch(&loop, ne, a1, Operand(zero_reg));
2878
2879  // Return and remove the on-stack parameters.
2880  __ bind(&done);
2881  __ DropAndRet(3);
2882
2883  // Do the runtime call to allocate the arguments object.
2884  __ bind(&runtime);
2885  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
2886}
2887
2888
2889void RegExpExecStub::Generate(MacroAssembler* masm) {
2890  // Just jump directly to runtime if native RegExp is not selected at compile
2891  // time or if regexp entry in generated code is turned off runtime switch or
2892  // at compilation.
2893#ifdef V8_INTERPRETED_REGEXP
2894  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2895#else  // V8_INTERPRETED_REGEXP
2896
2897  // Stack frame on entry.
2898  //  sp[0]: last_match_info (expected JSArray)
2899  //  sp[4]: previous index
2900  //  sp[8]: subject string
2901  //  sp[12]: JSRegExp object
2902
2903  const int kLastMatchInfoOffset = 0 * kPointerSize;
2904  const int kPreviousIndexOffset = 1 * kPointerSize;
2905  const int kSubjectOffset = 2 * kPointerSize;
2906  const int kJSRegExpOffset = 3 * kPointerSize;
2907
2908  Isolate* isolate = masm->isolate();
2909
2910  Label runtime;
2911  // Allocation of registers for this function. These are in callee save
2912  // registers and will be preserved by the call to the native RegExp code, as
2913  // this code is called using the normal C calling convention. When calling
2914  // directly from generated code the native RegExp code will not do a GC and
2915  // therefore the content of these registers are safe to use after the call.
2916  // MIPS - using s0..s2, since we are not using CEntry Stub.
2917  Register subject = s0;
2918  Register regexp_data = s1;
2919  Register last_match_info_elements = s2;
2920
2921  // Ensure that a RegExp stack is allocated.
2922  ExternalReference address_of_regexp_stack_memory_address =
2923      ExternalReference::address_of_regexp_stack_memory_address(
2924          isolate);
2925  ExternalReference address_of_regexp_stack_memory_size =
2926      ExternalReference::address_of_regexp_stack_memory_size(isolate);
2927  __ li(a0, Operand(address_of_regexp_stack_memory_size));
2928  __ lw(a0, MemOperand(a0, 0));
2929  __ Branch(&runtime, eq, a0, Operand(zero_reg));
2930
2931  // Check that the first argument is a JSRegExp object.
2932  __ lw(a0, MemOperand(sp, kJSRegExpOffset));
2933  STATIC_ASSERT(kSmiTag == 0);
2934  __ JumpIfSmi(a0, &runtime);
2935  __ GetObjectType(a0, a1, a1);
2936  __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2937
2938  // Check that the RegExp has been compiled (data contains a fixed array).
2939  __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2940  if (FLAG_debug_code) {
2941    __ SmiTst(regexp_data, t0);
2942    __ Check(nz,
2943             kUnexpectedTypeForRegExpDataFixedArrayExpected,
2944             t0,
2945             Operand(zero_reg));
2946    __ GetObjectType(regexp_data, a0, a0);
2947    __ Check(eq,
2948             kUnexpectedTypeForRegExpDataFixedArrayExpected,
2949             a0,
2950             Operand(FIXED_ARRAY_TYPE));
2951  }
2952
2953  // regexp_data: RegExp data (FixedArray)
2954  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2955  __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2956  __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2957
2958  // regexp_data: RegExp data (FixedArray)
2959  // Check that the number of captures fit in the static offsets vector buffer.
2960  __ lw(a2,
2961         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2962  // Check (number_of_captures + 1) * 2 <= offsets vector size
2963  // Or          number_of_captures * 2 <= offsets vector size - 2
2964  // Multiplying by 2 comes for free since a2 is smi-tagged.
2965  STATIC_ASSERT(kSmiTag == 0);
2966  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2967  STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2968  __ Branch(
2969      &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2970
2971  // Reset offset for possibly sliced string.
2972  __ mov(t0, zero_reg);
2973  __ lw(subject, MemOperand(sp, kSubjectOffset));
2974  __ JumpIfSmi(subject, &runtime);
2975  __ mov(a3, subject);  // Make a copy of the original subject string.
2976  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2977  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2978  // subject: subject string
2979  // a3: subject string
2980  // a0: subject string instance type
2981  // regexp_data: RegExp data (FixedArray)
2982  // Handle subject string according to its encoding and representation:
2983  // (1) Sequential string?  If yes, go to (5).
2984  // (2) Anything but sequential or cons?  If yes, go to (6).
2985  // (3) Cons string.  If the string is flat, replace subject with first string.
2986  //     Otherwise bailout.
2987  // (4) Is subject external?  If yes, go to (7).
2988  // (5) Sequential string.  Load regexp code according to encoding.
2989  // (E) Carry on.
2990  /// [...]
2991
2992  // Deferred code at the end of the stub:
2993  // (6) Not a long external string?  If yes, go to (8).
2994  // (7) External string.  Make it, offset-wise, look like a sequential string.
2995  //     Go to (5).
2996  // (8) Short external string or not a string?  If yes, bail out to runtime.
2997  // (9) Sliced string.  Replace subject with parent.  Go to (4).
2998
2999  Label seq_string /* 5 */, external_string /* 7 */,
3000        check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
3001        not_long_external /* 8 */;
3002
3003  // (1) Sequential string?  If yes, go to (5).
3004  __ And(a1,
3005         a0,
3006         Operand(kIsNotStringMask |
3007                 kStringRepresentationMask |
3008                 kShortExternalStringMask));
3009  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
3010  __ Branch(&seq_string, eq, a1, Operand(zero_reg));  // Go to (5).
3011
3012  // (2) Anything but sequential or cons?  If yes, go to (6).
3013  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
3014  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
3015  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
3016  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
3017  // Go to (6).
3018  __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
3019
3020  // (3) Cons string.  Check that it's flat.
3021  // Replace subject with first string and reload instance type.
3022  __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
3023  __ LoadRoot(a1, Heap::kempty_stringRootIndex);
3024  __ Branch(&runtime, ne, a0, Operand(a1));
3025  __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
3026
3027  // (4) Is subject external?  If yes, go to (7).
3028  __ bind(&check_underlying);
3029  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
3030  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
3031  STATIC_ASSERT(kSeqStringTag == 0);
3032  __ And(at, a0, Operand(kStringRepresentationMask));
3033  // The underlying external string is never a short external string.
3034  STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
3035  STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
3036  __ Branch(&external_string, ne, at, Operand(zero_reg));  // Go to (7).
3037
3038  // (5) Sequential string.  Load regexp code according to encoding.
3039  __ bind(&seq_string);
3040  // subject: sequential subject string (or look-alike, external string)
3041  // a3: original subject string
3042  // Load previous index and check range before a3 is overwritten.  We have to
3043  // use a3 instead of subject here because subject might have been only made
3044  // to look like a sequential string when it actually is an external string.
3045  __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
3046  __ JumpIfNotSmi(a1, &runtime);
3047  __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
3048  __ Branch(&runtime, ls, a3, Operand(a1));
3049  __ sra(a1, a1, kSmiTagSize);  // Untag the Smi.
3050
3051  STATIC_ASSERT(kStringEncodingMask == 4);
3052  STATIC_ASSERT(kOneByteStringTag == 4);
3053  STATIC_ASSERT(kTwoByteStringTag == 0);
3054  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for ASCII.
3055  __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
3056  __ sra(a3, a0, 2);  // a3 is 1 for ASCII, 0 for UC16 (used below).
3057  __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
3058  __ Movz(t9, t1, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
3059
3060  // (E) Carry on.  String handling is done.
3061  // t9: irregexp code
3062  // Check that the irregexp code has been generated for the actual string
3063  // encoding. If it has, the field contains a code object otherwise it contains
3064  // a smi (code flushing support).
3065  __ JumpIfSmi(t9, &runtime);
3066
3067  // a1: previous index
3068  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
3069  // t9: code
3070  // subject: Subject string
3071  // regexp_data: RegExp data (FixedArray)
3072  // All checks done. Now push arguments for native regexp code.
3073  __ IncrementCounter(isolate->counters()->regexp_entry_native(),
3074                      1, a0, a2);
3075
3076  // Isolates: note we add an additional parameter here (isolate pointer).
3077  const int kRegExpExecuteArguments = 9;
3078  const int kParameterRegisters = 4;
3079  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
3080
3081  // Stack pointer now points to cell where return address is to be written.
3082  // Arguments are before that on the stack or in registers, meaning we
3083  // treat the return address as argument 5. Thus every argument after that
3084  // needs to be shifted back by 1. Since DirectCEntryStub will handle
3085  // allocating space for the c argument slots, we don't need to calculate
3086  // that into the argument positions on the stack. This is how the stack will
3087  // look (sp meaning the value of sp at this moment):
3088  // [sp + 5] - Argument 9
3089  // [sp + 4] - Argument 8
3090  // [sp + 3] - Argument 7
3091  // [sp + 2] - Argument 6
3092  // [sp + 1] - Argument 5
3093  // [sp + 0] - saved ra
3094
3095  // Argument 9: Pass current isolate address.
3096  // CFunctionArgumentOperand handles MIPS stack argument slots.
3097  __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
3098  __ sw(a0, MemOperand(sp, 5 * kPointerSize));
3099
3100  // Argument 8: Indicate that this is a direct call from JavaScript.
3101  __ li(a0, Operand(1));
3102  __ sw(a0, MemOperand(sp, 4 * kPointerSize));
3103
3104  // Argument 7: Start (high end) of backtracking stack memory area.
3105  __ li(a0, Operand(address_of_regexp_stack_memory_address));
3106  __ lw(a0, MemOperand(a0, 0));
3107  __ li(a2, Operand(address_of_regexp_stack_memory_size));
3108  __ lw(a2, MemOperand(a2, 0));
3109  __ addu(a0, a0, a2);
3110  __ sw(a0, MemOperand(sp, 3 * kPointerSize));
3111
3112  // Argument 6: Set the number of capture registers to zero to force global
3113  // regexps to behave as non-global.  This does not affect non-global regexps.
3114  __ mov(a0, zero_reg);
3115  __ sw(a0, MemOperand(sp, 2 * kPointerSize));
3116
3117  // Argument 5: static offsets vector buffer.
3118  __ li(a0, Operand(
3119        ExternalReference::address_of_static_offsets_vector(isolate)));
3120  __ sw(a0, MemOperand(sp, 1 * kPointerSize));
3121
3122  // For arguments 4 and 3 get string length, calculate start of string data
3123  // and calculate the shift of the index (0 for ASCII and 1 for two byte).
3124  __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
3125  __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
3126  // Load the length from the original subject string from the previous stack
3127  // frame. Therefore we have to use fp, which points exactly to two pointer
3128  // sizes below the previous sp. (Because creating a new stack frame pushes
3129  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
3130  __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
3131  // If slice offset is not 0, load the length from the original sliced string.
3132  // Argument 4, a3: End of string data
3133  // Argument 3, a2: Start of string data
3134  // Prepare start and end index of the input.
3135  __ sllv(t1, t0, a3);
3136  __ addu(t0, t2, t1);
3137  __ sllv(t1, a1, a3);
3138  __ addu(a2, t0, t1);
3139
3140  __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
3141  __ sra(t2, t2, kSmiTagSize);
3142  __ sllv(t1, t2, a3);
3143  __ addu(a3, t0, t1);
3144  // Argument 2 (a1): Previous index.
3145  // Already there
3146
3147  // Argument 1 (a0): Subject string.
3148  __ mov(a0, subject);
3149
3150  // Locate the code entry and call it.
3151  __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
3152  DirectCEntryStub stub;
3153  stub.GenerateCall(masm, t9);
3154
3155  __ LeaveExitFrame(false, no_reg, true);
3156
3157  // v0: result
3158  // subject: subject string (callee saved)
3159  // regexp_data: RegExp data (callee saved)
3160  // last_match_info_elements: Last match info elements (callee saved)
3161  // Check the result.
3162  Label success;
3163  __ Branch(&success, eq, v0, Operand(1));
3164  // We expect exactly one result since we force the called regexp to behave
3165  // as non-global.
3166  Label failure;
3167  __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
3168  // If not exception it can only be retry. Handle that in the runtime system.
3169  __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
3170  // Result must now be exception. If there is no pending exception already a
3171  // stack overflow (on the backtrack stack) was detected in RegExp code but
3172  // haven't created the exception yet. Handle that in the runtime system.
3173  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
3174  __ li(a1, Operand(isolate->factory()->the_hole_value()));
3175  __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3176                                      isolate)));
3177  __ lw(v0, MemOperand(a2, 0));
3178  __ Branch(&runtime, eq, v0, Operand(a1));
3179
3180  __ sw(a1, MemOperand(a2, 0));  // Clear pending exception.
3181
3182  // Check if the exception is a termination. If so, throw as uncatchable.
3183  __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
3184  Label termination_exception;
3185  __ Branch(&termination_exception, eq, v0, Operand(a0));
3186
3187  __ Throw(v0);
3188
3189  __ bind(&termination_exception);
3190  __ ThrowUncatchable(v0);
3191
3192  __ bind(&failure);
3193  // For failure and exception return null.
3194  __ li(v0, Operand(isolate->factory()->null_value()));
3195  __ DropAndRet(4);
3196
3197  // Process the result from the native regexp code.
3198  __ bind(&success);
3199  __ lw(a1,
3200         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
3201  // Calculate number of capture registers (number_of_captures + 1) * 2.
3202  // Multiplying by 2 comes for free since r1 is smi-tagged.
3203  STATIC_ASSERT(kSmiTag == 0);
3204  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3205  __ Addu(a1, a1, Operand(2));  // a1 was a smi.
3206
3207  __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
3208  __ JumpIfSmi(a0, &runtime);
3209  __ GetObjectType(a0, a2, a2);
3210  __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
3211  // Check that the JSArray is in fast case.
3212  __ lw(last_match_info_elements,
3213        FieldMemOperand(a0, JSArray::kElementsOffset));
3214  __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
3215  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
3216  __ Branch(&runtime, ne, a0, Operand(at));
3217  // Check that the last match info has space for the capture registers and the
3218  // additional information.
3219  __ lw(a0,
3220        FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
3221  __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
3222  __ sra(at, a0, kSmiTagSize);
3223  __ Branch(&runtime, gt, a2, Operand(at));
3224
3225  // a1: number of capture registers
3226  // subject: subject string
3227  // Store the capture count.
3228  __ sll(a2, a1, kSmiTagSize + kSmiShiftSize);  // To smi.
3229  __ sw(a2, FieldMemOperand(last_match_info_elements,
3230                             RegExpImpl::kLastCaptureCountOffset));
3231  // Store last subject and last input.
3232  __ sw(subject,
3233         FieldMemOperand(last_match_info_elements,
3234                         RegExpImpl::kLastSubjectOffset));
3235  __ mov(a2, subject);
3236  __ RecordWriteField(last_match_info_elements,
3237                      RegExpImpl::kLastSubjectOffset,
3238                      subject,
3239                      t3,
3240                      kRAHasNotBeenSaved,
3241                      kDontSaveFPRegs);
3242  __ mov(subject, a2);
3243  __ sw(subject,
3244         FieldMemOperand(last_match_info_elements,
3245                         RegExpImpl::kLastInputOffset));
3246  __ RecordWriteField(last_match_info_elements,
3247                      RegExpImpl::kLastInputOffset,
3248                      subject,
3249                      t3,
3250                      kRAHasNotBeenSaved,
3251                      kDontSaveFPRegs);
3252
3253  // Get the static offsets vector filled by the native regexp code.
3254  ExternalReference address_of_static_offsets_vector =
3255      ExternalReference::address_of_static_offsets_vector(isolate);
3256  __ li(a2, Operand(address_of_static_offsets_vector));
3257
3258  // a1: number of capture registers
3259  // a2: offsets vector
3260  Label next_capture, done;
3261  // Capture register counter starts from number of capture registers and
3262  // counts down until wrapping after zero.
3263  __ Addu(a0,
3264         last_match_info_elements,
3265         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
3266  __ bind(&next_capture);
3267  __ Subu(a1, a1, Operand(1));
3268  __ Branch(&done, lt, a1, Operand(zero_reg));
3269  // Read the value from the static offsets vector buffer.
3270  __ lw(a3, MemOperand(a2, 0));
3271  __ addiu(a2, a2, kPointerSize);
3272  // Store the smi value in the last match info.
3273  __ sll(a3, a3, kSmiTagSize);  // Convert to Smi.
3274  __ sw(a3, MemOperand(a0, 0));
3275  __ Branch(&next_capture, USE_DELAY_SLOT);
3276  __ addiu(a0, a0, kPointerSize);  // In branch delay slot.
3277
3278  __ bind(&done);
3279
3280  // Return last match info.
3281  __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
3282  __ DropAndRet(4);
3283
3284  // Do the runtime call to execute the regexp.
3285  __ bind(&runtime);
3286  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3287
3288  // Deferred code for string handling.
3289  // (6) Not a long external string?  If yes, go to (8).
3290  __ bind(&not_seq_nor_cons);
3291  // Go to (8).
3292  __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
3293
3294  // (7) External string.  Make it, offset-wise, look like a sequential string.
3295  __ bind(&external_string);
3296  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
3297  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
3298  if (FLAG_debug_code) {
3299    // Assert that we do not have a cons or slice (indirect strings) here.
3300    // Sequential strings have already been ruled out.
3301    __ And(at, a0, Operand(kIsIndirectStringMask));
3302    __ Assert(eq,
3303              kExternalStringExpectedButNotFound,
3304              at,
3305              Operand(zero_reg));
3306  }
3307  __ lw(subject,
3308        FieldMemOperand(subject, ExternalString::kResourceDataOffset));
3309  // Move the pointer so that offset-wise, it looks like a sequential string.
3310  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3311  __ Subu(subject,
3312          subject,
3313          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
3314  __ jmp(&seq_string);    // Go to (5).
3315
3316  // (8) Short external string or not a string?  If yes, bail out to runtime.
3317  __ bind(&not_long_external);
3318  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
3319  __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
3320  __ Branch(&runtime, ne, at, Operand(zero_reg));
3321
3322  // (9) Sliced string.  Replace subject with parent.  Go to (4).
3323  // Load offset into t0 and replace subject string with parent.
3324  __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
3325  __ sra(t0, t0, kSmiTagSize);
3326  __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
3327  __ jmp(&check_underlying);  // Go to (4).
3328#endif  // V8_INTERPRETED_REGEXP
3329}
3330
3331
3332void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
3333  const int kMaxInlineLength = 100;
3334  Label slowcase;
3335  Label done;
3336  __ lw(a1, MemOperand(sp, kPointerSize * 2));
3337  STATIC_ASSERT(kSmiTag == 0);
3338  STATIC_ASSERT(kSmiTagSize == 1);
3339  __ JumpIfNotSmi(a1, &slowcase);
3340  __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
3341  // Smi-tagging is equivalent to multiplying by 2.
3342  // Allocate RegExpResult followed by FixedArray with size in ebx.
3343  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
3344  // Elements:  [Map][Length][..elements..]
3345  // Size of JSArray with two in-object properties and the header of a
3346  // FixedArray.
3347  int objects_size =
3348      (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
3349  __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
3350  __ Addu(a2, t1, Operand(objects_size));
3351  __ Allocate(
3352      a2,  // In: Size, in words.
3353      v0,  // Out: Start of allocation (tagged).
3354      a3,  // Scratch register.
3355      t0,  // Scratch register.
3356      &slowcase,
3357      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
3358  // v0: Start of allocated area, object-tagged.
3359  // a1: Number of elements in array, as smi.
3360  // t1: Number of elements, untagged.
3361
3362  // Set JSArray map to global.regexp_result_map().
3363  // Set empty properties FixedArray.
3364  // Set elements to point to FixedArray allocated right after the JSArray.
3365  // Interleave operations for better latency.
3366  __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
3367  __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
3368  __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
3369  __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
3370  __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
3371  __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
3372  __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
3373  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
3374
3375  // Set input, index and length fields from arguments.
3376  __ lw(a1, MemOperand(sp, kPointerSize * 0));
3377  __ lw(a2, MemOperand(sp, kPointerSize * 1));
3378  __ lw(t2, MemOperand(sp, kPointerSize * 2));
3379  __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
3380  __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
3381  __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
3382
3383  // Fill out the elements FixedArray.
3384  // v0: JSArray, tagged.
3385  // a3: FixedArray, tagged.
3386  // t1: Number of elements in array, untagged.
3387
3388  // Set map.
3389  __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
3390  __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
3391  // Set FixedArray length.
3392  __ sll(t2, t1, kSmiTagSize);
3393  __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
3394  // Fill contents of fixed-array with undefined.
3395  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3396  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3397  // Fill fixed array elements with undefined.
3398  // v0: JSArray, tagged.
3399  // a2: undefined.
3400  // a3: Start of elements in FixedArray.
3401  // t1: Number of elements to fill.
3402  Label loop;
3403  __ sll(t1, t1, kPointerSizeLog2);  // Convert num elements to num bytes.
3404  __ addu(t1, t1, a3);  // Point past last element to store.
3405  __ bind(&loop);
3406  __ Branch(&done, ge, a3, Operand(t1));  // Break when a3 past end of elem.
3407  __ sw(a2, MemOperand(a3));
3408  __ Branch(&loop, USE_DELAY_SLOT);
3409  __ addiu(a3, a3, kPointerSize);  // In branch delay slot.
3410
3411  __ bind(&done);
3412  __ DropAndRet(3);
3413
3414  __ bind(&slowcase);
3415  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
3416}
3417
3418
3419static void GenerateRecordCallTarget(MacroAssembler* masm) {
3420  // Cache the called function in a global property cell.  Cache states
3421  // are uninitialized, monomorphic (indicated by a JSFunction), and
3422  // megamorphic.
3423  // a0 : number of arguments to the construct function
3424  // a1 : the function to call
3425  // a2 : cache cell for call target
3426  Label initialize, done, miss, megamorphic, not_array_function;
3427
3428  ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
3429            masm->isolate()->heap()->undefined_value());
3430  ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
3431            masm->isolate()->heap()->the_hole_value());
3432
3433  // Load the cache state into a3.
3434  __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
3435
3436  // A monomorphic cache hit or an already megamorphic state: invoke the
3437  // function without changing the state.
3438  __ Branch(&done, eq, a3, Operand(a1));
3439
3440  // If we came here, we need to see if we are the array function.
3441  // If we didn't have a matching function, and we didn't find the megamorph
3442  // sentinel, then we have in the cell either some other function or an
3443  // AllocationSite. Do a map check on the object in a3.
3444  __ lw(t1, FieldMemOperand(a3, 0));
3445  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3446  __ Branch(&miss, ne, t1, Operand(at));
3447
3448  // Make sure the function is the Array() function
3449  __ LoadArrayFunction(a3);
3450  __ Branch(&megamorphic, ne, a1, Operand(a3));
3451  __ jmp(&done);
3452
3453  __ bind(&miss);
3454
3455  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
3456  // megamorphic.
3457  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3458  __ Branch(&initialize, eq, a3, Operand(at));
3459  // MegamorphicSentinel is an immortal immovable object (undefined) so no
3460  // write-barrier is needed.
3461  __ bind(&megamorphic);
3462  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3463  __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
3464  __ jmp(&done);
3465
3466  // An uninitialized cache is patched with the function or sentinel to
3467  // indicate the ElementsKind if function is the Array constructor.
3468  __ bind(&initialize);
3469  // Make sure the function is the Array() function
3470  __ LoadArrayFunction(a3);
3471  __ Branch(&not_array_function, ne, a1, Operand(a3));
3472
3473  // The target function is the Array constructor.
3474  // Create an AllocationSite if we don't already have it, store it in the cell.
3475  {
3476    FrameScope scope(masm, StackFrame::INTERNAL);
3477    const RegList kSavedRegs =
3478        1 << 4  |  // a0
3479        1 << 5  |  // a1
3480        1 << 6;    // a2
3481
3482    // Arguments register must be smi-tagged to call out.
3483    __ SmiTag(a0);
3484    __ MultiPush(kSavedRegs);
3485
3486    CreateAllocationSiteStub create_stub;
3487    __ CallStub(&create_stub);
3488
3489    __ MultiPop(kSavedRegs);
3490    __ SmiUntag(a0);
3491  }
3492  __ Branch(&done);
3493
3494  __ bind(&not_array_function);
3495  __ sw(a1, FieldMemOperand(a2, Cell::kValueOffset));
3496  // No need for a write barrier here - cells are rescanned.
3497
3498  __ bind(&done);
3499}
3500
3501
3502void CallFunctionStub::Generate(MacroAssembler* masm) {
3503  // a1 : the function to call
3504  // a2 : cache cell for call target
3505  Label slow, non_function;
3506
3507  // The receiver might implicitly be the global object. This is
3508  // indicated by passing the hole as the receiver to the call
3509  // function stub.
3510  if (ReceiverMightBeImplicit()) {
3511    Label call;
3512    // Get the receiver from the stack.
3513    // function, receiver [, arguments]
3514    __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
3515    // Call as function is indicated with the hole.
3516    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3517    __ Branch(&call, ne, t0, Operand(at));
3518    // Patch the receiver on the stack with the global receiver object.
3519    __ lw(a3,
3520          MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3521    __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalReceiverOffset));
3522    __ sw(a3, MemOperand(sp, argc_ * kPointerSize));
3523    __ bind(&call);
3524  }
3525
3526  // Check that the function is really a JavaScript function.
3527  // a1: pushed function (to be verified)
3528  __ JumpIfSmi(a1, &non_function);
3529  // Get the map of the function object.
3530  __ GetObjectType(a1, a3, a3);
3531  __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
3532
3533  if (RecordCallTarget()) {
3534    GenerateRecordCallTarget(masm);
3535  }
3536
3537  // Fast-case: Invoke the function now.
3538  // a1: pushed function
3539  ParameterCount actual(argc_);
3540
3541  if (ReceiverMightBeImplicit()) {
3542    Label call_as_function;
3543    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3544    __ Branch(&call_as_function, eq, t0, Operand(at));
3545    __ InvokeFunction(a1,
3546                      actual,
3547                      JUMP_FUNCTION,
3548                      NullCallWrapper(),
3549                      CALL_AS_METHOD);
3550    __ bind(&call_as_function);
3551  }
3552  __ InvokeFunction(a1,
3553                    actual,
3554                    JUMP_FUNCTION,
3555                    NullCallWrapper(),
3556                    CALL_AS_FUNCTION);
3557
3558  // Slow-case: Non-function called.
3559  __ bind(&slow);
3560  if (RecordCallTarget()) {
3561    // If there is a call target cache, mark it megamorphic in the
3562    // non-function case.  MegamorphicSentinel is an immortal immovable
3563    // object (undefined) so no write barrier is needed.
3564    ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
3565              masm->isolate()->heap()->undefined_value());
3566    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3567    __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
3568  }
3569  // Check for function proxy.
3570  __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
3571  __ push(a1);  // Put proxy as additional argument.
3572  __ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
3573  __ li(a2, Operand(0, RelocInfo::NONE32));
3574  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
3575  __ SetCallKind(t1, CALL_AS_METHOD);
3576  {
3577    Handle<Code> adaptor =
3578      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3579    __ Jump(adaptor, RelocInfo::CODE_TARGET);
3580  }
3581
3582  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3583  // of the original receiver from the call site).
3584  __ bind(&non_function);
3585  __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
3586  __ li(a0, Operand(argc_));  // Set up the number of arguments.
3587  __ mov(a2, zero_reg);
3588  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
3589  __ SetCallKind(t1, CALL_AS_METHOD);
3590  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3591          RelocInfo::CODE_TARGET);
3592}
3593
3594
3595void CallConstructStub::Generate(MacroAssembler* masm) {
3596  // a0 : number of arguments
3597  // a1 : the function to call
3598  // a2 : cache cell for call target
3599  Label slow, non_function_call;
3600
3601  // Check that the function is not a smi.
3602  __ JumpIfSmi(a1, &non_function_call);
3603  // Check that the function is a JSFunction.
3604  __ GetObjectType(a1, a3, a3);
3605  __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
3606
3607  if (RecordCallTarget()) {
3608    GenerateRecordCallTarget(masm);
3609  }
3610
3611  // Jump to the function-specific construct stub.
3612  Register jmp_reg = a3;
3613  __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3614  __ lw(jmp_reg, FieldMemOperand(jmp_reg,
3615                                 SharedFunctionInfo::kConstructStubOffset));
3616  __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3617  __ Jump(at);
3618
3619  // a0: number of arguments
3620  // a1: called object
3621  // a3: object type
3622  Label do_call;
3623  __ bind(&slow);
3624  __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
3625  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3626  __ jmp(&do_call);
3627
3628  __ bind(&non_function_call);
3629  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3630  __ bind(&do_call);
3631  // Set expected number of arguments to zero (not changing r0).
3632  __ li(a2, Operand(0, RelocInfo::NONE32));
3633  __ SetCallKind(t1, CALL_AS_METHOD);
3634  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3635          RelocInfo::CODE_TARGET);
3636}
3637
3638
3639// StringCharCodeAtGenerator.
3640void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3641  Label flat_string;
3642  Label ascii_string;
3643  Label got_char_code;
3644  Label sliced_string;
3645
3646  ASSERT(!t0.is(index_));
3647  ASSERT(!t0.is(result_));
3648  ASSERT(!t0.is(object_));
3649
3650  // If the receiver is a smi trigger the non-string case.
3651  __ JumpIfSmi(object_, receiver_not_string_);
3652
3653  // Fetch the instance type of the receiver into result register.
3654  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3655  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3656  // If the receiver is not a string trigger the non-string case.
3657  __ And(t0, result_, Operand(kIsNotStringMask));
3658  __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
3659
3660  // If the index is non-smi trigger the non-smi case.
3661  __ JumpIfNotSmi(index_, &index_not_smi_);
3662
3663  __ bind(&got_smi_index_);
3664
3665  // Check for index out of range.
3666  __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
3667  __ Branch(index_out_of_range_, ls, t0, Operand(index_));
3668
3669  __ sra(index_, index_, kSmiTagSize);
3670
3671  StringCharLoadGenerator::Generate(masm,
3672                                    object_,
3673                                    index_,
3674                                    result_,
3675                                    &call_runtime_);
3676
3677  __ sll(result_, result_, kSmiTagSize);
3678  __ bind(&exit_);
3679}
3680
3681
3682void StringCharCodeAtGenerator::GenerateSlow(
3683    MacroAssembler* masm,
3684    const RuntimeCallHelper& call_helper) {
3685  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3686
3687  // Index is not a smi.
3688  __ bind(&index_not_smi_);
3689  // If index is a heap number, try converting it to an integer.
3690  __ CheckMap(index_,
3691              result_,
3692              Heap::kHeapNumberMapRootIndex,
3693              index_not_number_,
3694              DONT_DO_SMI_CHECK);
3695  call_helper.BeforeCall(masm);
3696  // Consumed by runtime conversion function:
3697  __ Push(object_, index_);
3698  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3699    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3700  } else {
3701    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3702    // NumberToSmi discards numbers that are not exact integers.
3703    __ CallRuntime(Runtime::kNumberToSmi, 1);
3704  }
3705
3706  // Save the conversion result before the pop instructions below
3707  // have a chance to overwrite it.
3708
3709  __ Move(index_, v0);
3710  __ pop(object_);
3711  // Reload the instance type.
3712  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3713  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3714  call_helper.AfterCall(masm);
3715  // If index is still not a smi, it must be out of range.
3716  __ JumpIfNotSmi(index_, index_out_of_range_);
3717  // Otherwise, return to the fast path.
3718  __ Branch(&got_smi_index_);
3719
3720  // Call runtime. We get here when the receiver is a string and the
3721  // index is a number, but the code of getting the actual character
3722  // is too complex (e.g., when the string needs to be flattened).
3723  __ bind(&call_runtime_);
3724  call_helper.BeforeCall(masm);
3725  __ sll(index_, index_, kSmiTagSize);
3726  __ Push(object_, index_);
3727  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
3728
3729  __ Move(result_, v0);
3730
3731  call_helper.AfterCall(masm);
3732  __ jmp(&exit_);
3733
3734  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3735}
3736
3737
3738// -------------------------------------------------------------------------
3739// StringCharFromCodeGenerator
3740
3741void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3742  // Fast case of Heap::LookupSingleCharacterStringFromCode.
3743
3744  ASSERT(!t0.is(result_));
3745  ASSERT(!t0.is(code_));
3746
3747  STATIC_ASSERT(kSmiTag == 0);
3748  STATIC_ASSERT(kSmiShiftSize == 0);
3749  ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
3750  __ And(t0,
3751         code_,
3752         Operand(kSmiTagMask |
3753                 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3754  __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
3755
3756  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3757  // At this point code register contains smi tagged ASCII char code.
3758  STATIC_ASSERT(kSmiTag == 0);
3759  __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
3760  __ Addu(result_, result_, t0);
3761  __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3762  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3763  __ Branch(&slow_case_, eq, result_, Operand(t0));
3764  __ bind(&exit_);
3765}
3766
3767
3768void StringCharFromCodeGenerator::GenerateSlow(
3769    MacroAssembler* masm,
3770    const RuntimeCallHelper& call_helper) {
3771  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3772
3773  __ bind(&slow_case_);
3774  call_helper.BeforeCall(masm);
3775  __ push(code_);
3776  __ CallRuntime(Runtime::kCharFromCode, 1);
3777  __ Move(result_, v0);
3778
3779  call_helper.AfterCall(masm);
3780  __ Branch(&exit_);
3781
3782  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3783}
3784
3785
3786void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3787                                          Register dest,
3788                                          Register src,
3789                                          Register count,
3790                                          Register scratch,
3791                                          bool ascii) {
3792  Label loop;
3793  Label done;
3794  // This loop just copies one character at a time, as it is only used for
3795  // very short strings.
3796  if (!ascii) {
3797    __ addu(count, count, count);
3798  }
3799  __ Branch(&done, eq, count, Operand(zero_reg));
3800  __ addu(count, dest, count);  // Count now points to the last dest byte.
3801
3802  __ bind(&loop);
3803  __ lbu(scratch, MemOperand(src));
3804  __ addiu(src, src, 1);
3805  __ sb(scratch, MemOperand(dest));
3806  __ addiu(dest, dest, 1);
3807  __ Branch(&loop, lt, dest, Operand(count));
3808
3809  __ bind(&done);
3810}
3811
3812
3813enum CopyCharactersFlags {
3814  COPY_ASCII = 1,
3815  DEST_ALWAYS_ALIGNED = 2
3816};
3817
3818
3819void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
3820                                              Register dest,
3821                                              Register src,
3822                                              Register count,
3823                                              Register scratch1,
3824                                              Register scratch2,
3825                                              Register scratch3,
3826                                              Register scratch4,
3827                                              Register scratch5,
3828                                              int flags) {
3829  bool ascii = (flags & COPY_ASCII) != 0;
3830  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
3831
3832  if (dest_always_aligned && FLAG_debug_code) {
3833    // Check that destination is actually word aligned if the flag says
3834    // that it is.
3835    __ And(scratch4, dest, Operand(kPointerAlignmentMask));
3836    __ Check(eq,
3837             kDestinationOfCopyNotAligned,
3838             scratch4,
3839             Operand(zero_reg));
3840  }
3841
3842  const int kReadAlignment = 4;
3843  const int kReadAlignmentMask = kReadAlignment - 1;
3844  // Ensure that reading an entire aligned word containing the last character
3845  // of a string will not read outside the allocated area (because we pad up
3846  // to kObjectAlignment).
3847  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
3848  // Assumes word reads and writes are little endian.
3849  // Nothing to do for zero characters.
3850  Label done;
3851
3852  if (!ascii) {
3853    __ addu(count, count, count);
3854  }
3855  __ Branch(&done, eq, count, Operand(zero_reg));
3856
3857  Label byte_loop;
3858  // Must copy at least eight bytes, otherwise just do it one byte at a time.
3859  __ Subu(scratch1, count, Operand(8));
3860  __ Addu(count, dest, Operand(count));
3861  Register limit = count;  // Read until src equals this.
3862  __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
3863
3864  if (!dest_always_aligned) {
3865    // Align dest by byte copying. Copies between zero and three bytes.
3866    __ And(scratch4, dest, Operand(kReadAlignmentMask));
3867    Label dest_aligned;
3868    __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
3869    Label aligned_loop;
3870    __ bind(&aligned_loop);
3871    __ lbu(scratch1, MemOperand(src));
3872    __ addiu(src, src, 1);
3873    __ sb(scratch1, MemOperand(dest));
3874    __ addiu(dest, dest, 1);
3875    __ addiu(scratch4, scratch4, 1);
3876    __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
3877    __ bind(&dest_aligned);
3878  }
3879
3880  Label simple_loop;
3881
3882  __ And(scratch4, src, Operand(kReadAlignmentMask));
3883  __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
3884
3885  // Loop for src/dst that are not aligned the same way.
3886  // This loop uses lwl and lwr instructions. These instructions
3887  // depend on the endianness, and the implementation assumes little-endian.
3888  {
3889    Label loop;
3890    __ bind(&loop);
3891    __ lwr(scratch1, MemOperand(src));
3892    __ Addu(src, src, Operand(kReadAlignment));
3893    __ lwl(scratch1, MemOperand(src, -1));
3894    __ sw(scratch1, MemOperand(dest));
3895    __ Addu(dest, dest, Operand(kReadAlignment));
3896    __ Subu(scratch2, limit, dest);
3897    __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
3898  }
3899
3900  __ Branch(&byte_loop);
3901
3902  // Simple loop.
3903  // Copy words from src to dest, until less than four bytes left.
3904  // Both src and dest are word aligned.
3905  __ bind(&simple_loop);
3906  {
3907    Label loop;
3908    __ bind(&loop);
3909    __ lw(scratch1, MemOperand(src));
3910    __ Addu(src, src, Operand(kReadAlignment));
3911    __ sw(scratch1, MemOperand(dest));
3912    __ Addu(dest, dest, Operand(kReadAlignment));
3913    __ Subu(scratch2, limit, dest);
3914    __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
3915  }
3916
3917  // Copy bytes from src to dest until dest hits limit.
3918  __ bind(&byte_loop);
3919  // Test if dest has already reached the limit.
3920  __ Branch(&done, ge, dest, Operand(limit));
3921  __ lbu(scratch1, MemOperand(src));
3922  __ addiu(src, src, 1);
3923  __ sb(scratch1, MemOperand(dest));
3924  __ addiu(dest, dest, 1);
3925  __ Branch(&byte_loop);
3926
3927  __ bind(&done);
3928}
3929
3930
3931void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
3932                                                        Register c1,
3933                                                        Register c2,
3934                                                        Register scratch1,
3935                                                        Register scratch2,
3936                                                        Register scratch3,
3937                                                        Register scratch4,
3938                                                        Register scratch5,
3939                                                        Label* not_found) {
3940  // Register scratch3 is the general scratch register in this function.
3941  Register scratch = scratch3;
3942
3943  // Make sure that both characters are not digits as such strings has a
3944  // different hash algorithm. Don't try to look for these in the string table.
3945  Label not_array_index;
3946  __ Subu(scratch, c1, Operand(static_cast<int>('0')));
3947  __ Branch(&not_array_index,
3948            Ugreater,
3949            scratch,
3950            Operand(static_cast<int>('9' - '0')));
3951  __ Subu(scratch, c2, Operand(static_cast<int>('0')));
3952
3953  // If check failed combine both characters into single halfword.
3954  // This is required by the contract of the method: code at the
3955  // not_found branch expects this combination in c1 register.
3956  Label tmp;
3957  __ sll(scratch1, c2, kBitsPerByte);
3958  __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
3959  __ Or(c1, c1, scratch1);
3960  __ bind(&tmp);
3961  __ Branch(
3962      not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
3963
3964  __ bind(&not_array_index);
3965  // Calculate the two character string hash.
3966  Register hash = scratch1;
3967  StringHelper::GenerateHashInit(masm, hash, c1);
3968  StringHelper::GenerateHashAddCharacter(masm, hash, c2);
3969  StringHelper::GenerateHashGetHash(masm, hash);
3970
3971  // Collect the two characters in a register.
3972  Register chars = c1;
3973  __ sll(scratch, c2, kBitsPerByte);
3974  __ Or(chars, chars, scratch);
3975
3976  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
3977  // hash:  hash of two character string.
3978
3979  // Load string table.
3980  // Load address of first element of the string table.
3981  Register string_table = c2;
3982  __ LoadRoot(string_table, Heap::kStringTableRootIndex);
3983
3984  Register undefined = scratch4;
3985  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
3986
3987  // Calculate capacity mask from the string table capacity.
3988  Register mask = scratch2;
3989  __ lw(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
3990  __ sra(mask, mask, 1);
3991  __ Addu(mask, mask, -1);
3992
3993  // Calculate untagged address of the first element of the string table.
3994  Register first_string_table_element = string_table;
3995  __ Addu(first_string_table_element, string_table,
3996         Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
3997
3998  // Registers.
3999  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
4000  // hash:  hash of two character string
4001  // mask:  capacity mask
4002  // first_string_table_element: address of the first element of
4003  //                             the string table
4004  // undefined: the undefined object
4005  // scratch: -
4006
4007  // Perform a number of probes in the string table.
4008  const int kProbes = 4;
4009  Label found_in_string_table;
4010  Label next_probe[kProbes];
4011  Register candidate = scratch5;  // Scratch register contains candidate.
4012  for (int i = 0; i < kProbes; i++) {
4013    // Calculate entry in string table.
4014    if (i > 0) {
4015      __ Addu(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
4016    } else {
4017      __ mov(candidate, hash);
4018    }
4019
4020    __ And(candidate, candidate, Operand(mask));
4021
4022    // Load the entry from the symble table.
4023    STATIC_ASSERT(StringTable::kEntrySize == 1);
4024    __ sll(scratch, candidate, kPointerSizeLog2);
4025    __ Addu(scratch, scratch, first_string_table_element);
4026    __ lw(candidate, MemOperand(scratch));
4027
4028    // If entry is undefined no string with this hash can be found.
4029    Label is_string;
4030    __ GetObjectType(candidate, scratch, scratch);
4031    __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
4032
4033    __ Branch(not_found, eq, undefined, Operand(candidate));
4034    // Must be the hole (deleted entry).
4035    if (FLAG_debug_code) {
4036      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
4037      __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole,
4038          scratch, Operand(candidate));
4039    }
4040    __ jmp(&next_probe[i]);
4041
4042    __ bind(&is_string);
4043
4044    // Check that the candidate is a non-external ASCII string.  The instance
4045    // type is still in the scratch register from the CompareObjectType
4046    // operation.
4047    __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
4048
4049    // If length is not 2 the string is not a candidate.
4050    __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
4051    __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
4052
4053    // Check if the two characters match.
4054    // Assumes that word load is little endian.
4055    __ lhu(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
4056    __ Branch(&found_in_string_table, eq, chars, Operand(scratch));
4057    __ bind(&next_probe[i]);
4058  }
4059
4060  // No matching 2 character string found by probing.
4061  __ jmp(not_found);
4062
4063  // Scratch register contains result when we fall through to here.
4064  Register result = candidate;
4065  __ bind(&found_in_string_table);
4066  __ mov(v0, result);
4067}
4068
4069
4070void StringHelper::GenerateHashInit(MacroAssembler* masm,
4071                                    Register hash,
4072                                    Register character) {
4073  // hash = seed + character + ((seed + character) << 10);
4074  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
4075  // Untag smi seed and add the character.
4076  __ SmiUntag(hash);
4077  __ addu(hash, hash, character);
4078  __ sll(at, hash, 10);
4079  __ addu(hash, hash, at);
4080  // hash ^= hash >> 6;
4081  __ srl(at, hash, 6);
4082  __ xor_(hash, hash, at);
4083}
4084
4085
4086void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
4087                                            Register hash,
4088                                            Register character) {
4089  // hash += character;
4090  __ addu(hash, hash, character);
4091  // hash += hash << 10;
4092  __ sll(at, hash, 10);
4093  __ addu(hash, hash, at);
4094  // hash ^= hash >> 6;
4095  __ srl(at, hash, 6);
4096  __ xor_(hash, hash, at);
4097}
4098
4099
4100void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
4101                                       Register hash) {
4102  // hash += hash << 3;
4103  __ sll(at, hash, 3);
4104  __ addu(hash, hash, at);
4105  // hash ^= hash >> 11;
4106  __ srl(at, hash, 11);
4107  __ xor_(hash, hash, at);
4108  // hash += hash << 15;
4109  __ sll(at, hash, 15);
4110  __ addu(hash, hash, at);
4111
4112  __ li(at, Operand(String::kHashBitMask));
4113  __ and_(hash, hash, at);
4114
4115  // if (hash == 0) hash = 27;
4116  __ ori(at, zero_reg, StringHasher::kZeroHash);
4117  __ Movz(hash, at, hash);
4118}
4119
4120
4121void SubStringStub::Generate(MacroAssembler* masm) {
4122  Label runtime;
4123  // Stack frame on entry.
4124  //  ra: return address
4125  //  sp[0]: to
4126  //  sp[4]: from
4127  //  sp[8]: string
4128
4129  // This stub is called from the native-call %_SubString(...), so
4130  // nothing can be assumed about the arguments. It is tested that:
4131  //  "string" is a sequential string,
4132  //  both "from" and "to" are smis, and
4133  //  0 <= from <= to <= string.length.
4134  // If any of these assumptions fail, we call the runtime system.
4135
4136  const int kToOffset = 0 * kPointerSize;
4137  const int kFromOffset = 1 * kPointerSize;
4138  const int kStringOffset = 2 * kPointerSize;
4139
4140  __ lw(a2, MemOperand(sp, kToOffset));
4141  __ lw(a3, MemOperand(sp, kFromOffset));
4142  STATIC_ASSERT(kFromOffset == kToOffset + 4);
4143  STATIC_ASSERT(kSmiTag == 0);
4144  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4145
4146  // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
4147  // safe in this case.
4148  __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
4149  __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
4150  // Both a2 and a3 are untagged integers.
4151
4152  __ Branch(&runtime, lt, a3, Operand(zero_reg));  // From < 0.
4153
4154  __ Branch(&runtime, gt, a3, Operand(a2));  // Fail if from > to.
4155  __ Subu(a2, a2, a3);
4156
4157  // Make sure first argument is a string.
4158  __ lw(v0, MemOperand(sp, kStringOffset));
4159  __ JumpIfSmi(v0, &runtime);
4160  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
4161  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
4162  __ And(t0, a1, Operand(kIsNotStringMask));
4163
4164  __ Branch(&runtime, ne, t0, Operand(zero_reg));
4165
4166  Label single_char;
4167  __ Branch(&single_char, eq, a2, Operand(1));
4168
4169  // Short-cut for the case of trivial substring.
4170  Label return_v0;
4171  // v0: original string
4172  // a2: result string length
4173  __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
4174  __ sra(t0, t0, 1);
4175  // Return original string.
4176  __ Branch(&return_v0, eq, a2, Operand(t0));
4177  // Longer than original string's length or negative: unsafe arguments.
4178  __ Branch(&runtime, hi, a2, Operand(t0));
4179  // Shorter than original string's length: an actual substring.
4180
4181  // Deal with different string types: update the index if necessary
4182  // and put the underlying string into t1.
4183  // v0: original string
4184  // a1: instance type
4185  // a2: length
4186  // a3: from index (untagged)
4187  Label underlying_unpacked, sliced_string, seq_or_external_string;
4188  // If the string is not indirect, it can only be sequential or external.
4189  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
4190  STATIC_ASSERT(kIsIndirectStringMask != 0);
4191  __ And(t0, a1, Operand(kIsIndirectStringMask));
4192  __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
4193  // t0 is used as a scratch register and can be overwritten in either case.
4194  __ And(t0, a1, Operand(kSlicedNotConsMask));
4195  __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
4196  // Cons string.  Check whether it is flat, then fetch first part.
4197  __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
4198  __ LoadRoot(t0, Heap::kempty_stringRootIndex);
4199  __ Branch(&runtime, ne, t1, Operand(t0));
4200  __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
4201  // Update instance type.
4202  __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
4203  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
4204  __ jmp(&underlying_unpacked);
4205
4206  __ bind(&sliced_string);
4207  // Sliced string.  Fetch parent and correct start index by offset.
4208  __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
4209  __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
4210  __ sra(t0, t0, 1);  // Add offset to index.
4211  __ Addu(a3, a3, t0);
4212  // Update instance type.
4213  __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
4214  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
4215  __ jmp(&underlying_unpacked);
4216
4217  __ bind(&seq_or_external_string);
4218  // Sequential or external string.  Just move string to the expected register.
4219  __ mov(t1, v0);
4220
4221  __ bind(&underlying_unpacked);
4222
4223  if (FLAG_string_slices) {
4224    Label copy_routine;
4225    // t1: underlying subject string
4226    // a1: instance type of underlying subject string
4227    // a2: length
4228    // a3: adjusted start index (untagged)
4229    // Short slice.  Copy instead of slicing.
4230    __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
4231    // Allocate new sliced string.  At this point we do not reload the instance
4232    // type including the string encoding because we simply rely on the info
4233    // provided by the original string.  It does not matter if the original
4234    // string's encoding is wrong because we always have to recheck encoding of
4235    // the newly created string's parent anyways due to externalized strings.
4236    Label two_byte_slice, set_slice_header;
4237    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
4238    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
4239    __ And(t0, a1, Operand(kStringEncodingMask));
4240    __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
4241    __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
4242    __ jmp(&set_slice_header);
4243    __ bind(&two_byte_slice);
4244    __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
4245    __ bind(&set_slice_header);
4246    __ sll(a3, a3, 1);
4247    __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
4248    __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
4249    __ jmp(&return_v0);
4250
4251    __ bind(&copy_routine);
4252  }
4253
4254  // t1: underlying subject string
4255  // a1: instance type of underlying subject string
4256  // a2: length
4257  // a3: adjusted start index (untagged)
4258  Label two_byte_sequential, sequential_string, allocate_result;
4259  STATIC_ASSERT(kExternalStringTag != 0);
4260  STATIC_ASSERT(kSeqStringTag == 0);
4261  __ And(t0, a1, Operand(kExternalStringTag));
4262  __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
4263
4264  // Handle external string.
4265  // Rule out short external strings.
4266  STATIC_CHECK(kShortExternalStringTag != 0);
4267  __ And(t0, a1, Operand(kShortExternalStringTag));
4268  __ Branch(&runtime, ne, t0, Operand(zero_reg));
4269  __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
4270  // t1 already points to the first character of underlying string.
4271  __ jmp(&allocate_result);
4272
4273  __ bind(&sequential_string);
4274  // Locate first character of underlying subject string.
4275  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
4276  __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
4277
4278  __ bind(&allocate_result);
4279  // Sequential acii string.  Allocate the result.
4280  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
4281  __ And(t0, a1, Operand(kStringEncodingMask));
4282  __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
4283
4284  // Allocate and copy the resulting ASCII string.
4285  __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
4286
4287  // Locate first character of substring to copy.
4288  __ Addu(t1, t1, a3);
4289
4290  // Locate first character of result.
4291  __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
4292
4293  // v0: result string
4294  // a1: first character of result string
4295  // a2: result string length
4296  // t1: first character of substring to copy
4297  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4298  StringHelper::GenerateCopyCharactersLong(
4299      masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
4300  __ jmp(&return_v0);
4301
4302  // Allocate and copy the resulting two-byte string.
4303  __ bind(&two_byte_sequential);
4304  __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
4305
4306  // Locate first character of substring to copy.
4307  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
4308  __ sll(t0, a3, 1);
4309  __ Addu(t1, t1, t0);
4310  // Locate first character of result.
4311  __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
4312
4313  // v0: result string.
4314  // a1: first character of result.
4315  // a2: result length.
4316  // t1: first character of substring to copy.
4317  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4318  StringHelper::GenerateCopyCharactersLong(
4319      masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
4320
4321  __ bind(&return_v0);
4322  Counters* counters = masm->isolate()->counters();
4323  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
4324  __ DropAndRet(3);
4325
4326  // Just jump to runtime to create the sub string.
4327  __ bind(&runtime);
4328  __ TailCallRuntime(Runtime::kSubString, 3, 1);
4329
4330  __ bind(&single_char);
4331  // v0: original string
4332  // a1: instance type
4333  // a2: length
4334  // a3: from index (untagged)
4335  __ SmiTag(a3, a3);
4336  StringCharAtGenerator generator(
4337      v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
4338  generator.GenerateFast(masm);
4339  __ DropAndRet(3);
4340  generator.SkipSlow(masm, &runtime);
4341}
4342
4343
4344void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
4345                                                      Register left,
4346                                                      Register right,
4347                                                      Register scratch1,
4348                                                      Register scratch2,
4349                                                      Register scratch3) {
4350  Register length = scratch1;
4351
4352  // Compare lengths.
4353  Label strings_not_equal, check_zero_length;
4354  __ lw(length, FieldMemOperand(left, String::kLengthOffset));
4355  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
4356  __ Branch(&check_zero_length, eq, length, Operand(scratch2));
4357  __ bind(&strings_not_equal);
4358  ASSERT(is_int16(NOT_EQUAL));
4359  __ Ret(USE_DELAY_SLOT);
4360  __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
4361
4362  // Check if the length is zero.
4363  Label compare_chars;
4364  __ bind(&check_zero_length);
4365  STATIC_ASSERT(kSmiTag == 0);
4366  __ Branch(&compare_chars, ne, length, Operand(zero_reg));
4367  ASSERT(is_int16(EQUAL));
4368  __ Ret(USE_DELAY_SLOT);
4369  __ li(v0, Operand(Smi::FromInt(EQUAL)));
4370
4371  // Compare characters.
4372  __ bind(&compare_chars);
4373
4374  GenerateAsciiCharsCompareLoop(masm,
4375                                left, right, length, scratch2, scratch3, v0,
4376                                &strings_not_equal);
4377
4378  // Characters are equal.
4379  __ Ret(USE_DELAY_SLOT);
4380  __ li(v0, Operand(Smi::FromInt(EQUAL)));
4381}
4382
4383
4384void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
4385                                                        Register left,
4386                                                        Register right,
4387                                                        Register scratch1,
4388                                                        Register scratch2,
4389                                                        Register scratch3,
4390                                                        Register scratch4) {
4391  Label result_not_equal, compare_lengths;
4392  // Find minimum length and length difference.
4393  __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
4394  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
4395  __ Subu(scratch3, scratch1, Operand(scratch2));
4396  Register length_delta = scratch3;
4397  __ slt(scratch4, scratch2, scratch1);
4398  __ Movn(scratch1, scratch2, scratch4);
4399  Register min_length = scratch1;
4400  STATIC_ASSERT(kSmiTag == 0);
4401  __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
4402
4403  // Compare loop.
4404  GenerateAsciiCharsCompareLoop(masm,
4405                                left, right, min_length, scratch2, scratch4, v0,
4406                                &result_not_equal);
4407
4408  // Compare lengths - strings up to min-length are equal.
4409  __ bind(&compare_lengths);
4410  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
4411  // Use length_delta as result if it's zero.
4412  __ mov(scratch2, length_delta);
4413  __ mov(scratch4, zero_reg);
4414  __ mov(v0, zero_reg);
4415
4416  __ bind(&result_not_equal);
4417  // Conditionally update the result based either on length_delta or
4418  // the last comparion performed in the loop above.
4419  Label ret;
4420  __ Branch(&ret, eq, scratch2, Operand(scratch4));
4421  __ li(v0, Operand(Smi::FromInt(GREATER)));
4422  __ Branch(&ret, gt, scratch2, Operand(scratch4));
4423  __ li(v0, Operand(Smi::FromInt(LESS)));
4424  __ bind(&ret);
4425  __ Ret();
4426}
4427
4428
4429void StringCompareStub::GenerateAsciiCharsCompareLoop(
4430    MacroAssembler* masm,
4431    Register left,
4432    Register right,
4433    Register length,
4434    Register scratch1,
4435    Register scratch2,
4436    Register scratch3,
4437    Label* chars_not_equal) {
4438  // Change index to run from -length to -1 by adding length to string
4439  // start. This means that loop ends when index reaches zero, which
4440  // doesn't need an additional compare.
4441  __ SmiUntag(length);
4442  __ Addu(scratch1, length,
4443          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
4444  __ Addu(left, left, Operand(scratch1));
4445  __ Addu(right, right, Operand(scratch1));
4446  __ Subu(length, zero_reg, length);
4447  Register index = length;  // index = -length;
4448
4449
4450  // Compare loop.
4451  Label loop;
4452  __ bind(&loop);
4453  __ Addu(scratch3, left, index);
4454  __ lbu(scratch1, MemOperand(scratch3));
4455  __ Addu(scratch3, right, index);
4456  __ lbu(scratch2, MemOperand(scratch3));
4457  __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
4458  __ Addu(index, index, 1);
4459  __ Branch(&loop, ne, index, Operand(zero_reg));
4460}
4461
4462
4463void StringCompareStub::Generate(MacroAssembler* masm) {
4464  Label runtime;
4465
4466  Counters* counters = masm->isolate()->counters();
4467
4468  // Stack frame on entry.
4469  //  sp[0]: right string
4470  //  sp[4]: left string
4471  __ lw(a1, MemOperand(sp, 1 * kPointerSize));  // Left.
4472  __ lw(a0, MemOperand(sp, 0 * kPointerSize));  // Right.
4473
4474  Label not_same;
4475  __ Branch(&not_same, ne, a0, Operand(a1));
4476  STATIC_ASSERT(EQUAL == 0);
4477  STATIC_ASSERT(kSmiTag == 0);
4478  __ li(v0, Operand(Smi::FromInt(EQUAL)));
4479  __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
4480  __ DropAndRet(2);
4481
4482  __ bind(&not_same);
4483
4484  // Check that both objects are sequential ASCII strings.
4485  __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
4486
4487  // Compare flat ASCII strings natively. Remove arguments from stack first.
4488  __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
4489  __ Addu(sp, sp, Operand(2 * kPointerSize));
4490  GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
4491
4492  __ bind(&runtime);
4493  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
4494}
4495
4496
4497void StringAddStub::Generate(MacroAssembler* masm) {
4498  Label call_runtime, call_builtin;
4499  Builtins::JavaScript builtin_id = Builtins::ADD;
4500
4501  Counters* counters = masm->isolate()->counters();
4502
4503  // Stack on entry:
4504  // sp[0]: second argument (right).
4505  // sp[4]: first argument (left).
4506
4507  // Load the two arguments.
4508  __ lw(a0, MemOperand(sp, 1 * kPointerSize));  // First argument.
4509  __ lw(a1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
4510
4511  // Make sure that both arguments are strings if not known in advance.
4512  // Otherwise, at least one of the arguments is definitely a string,
4513  // and we convert the one that is not known to be a string.
4514  if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
4515    ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
4516    ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
4517    __ JumpIfEitherSmi(a0, a1, &call_runtime);
4518    // Load instance types.
4519    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
4520    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
4521    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
4522    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
4523    STATIC_ASSERT(kStringTag == 0);
4524    // If either is not a string, go to runtime.
4525    __ Or(t4, t0, Operand(t1));
4526    __ And(t4, t4, Operand(kIsNotStringMask));
4527    __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
4528  } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
4529    ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
4530    GenerateConvertArgument(
4531        masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
4532    builtin_id = Builtins::STRING_ADD_RIGHT;
4533  } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
4534    ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
4535    GenerateConvertArgument(
4536        masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
4537    builtin_id = Builtins::STRING_ADD_LEFT;
4538  }
4539
4540  // Both arguments are strings.
4541  // a0: first string
4542  // a1: second string
4543  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4544  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4545  {
4546    Label strings_not_empty;
4547    // Check if either of the strings are empty. In that case return the other.
4548    // These tests use zero-length check on string-length whch is an Smi.
4549    // Assert that Smi::FromInt(0) is really 0.
4550    STATIC_ASSERT(kSmiTag == 0);
4551    ASSERT(Smi::FromInt(0) == 0);
4552    __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
4553    __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
4554    __ mov(v0, a0);       // Assume we'll return first string (from a0).
4555    __ Movz(v0, a1, a2);  // If first is empty, return second (from a1).
4556    __ slt(t4, zero_reg, a2);   // if (a2 > 0) t4 = 1.
4557    __ slt(t5, zero_reg, a3);   // if (a3 > 0) t5 = 1.
4558    __ and_(t4, t4, t5);        // Branch if both strings were non-empty.
4559    __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
4560
4561    __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
4562    __ DropAndRet(2);
4563
4564    __ bind(&strings_not_empty);
4565  }
4566
4567  // Untag both string-lengths.
4568  __ sra(a2, a2, kSmiTagSize);
4569  __ sra(a3, a3, kSmiTagSize);
4570
4571  // Both strings are non-empty.
4572  // a0: first string
4573  // a1: second string
4574  // a2: length of first string
4575  // a3: length of second string
4576  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4577  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4578  // Look at the length of the result of adding the two strings.
4579  Label string_add_flat_result, longer_than_two;
4580  // Adding two lengths can't overflow.
4581  STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
4582  __ Addu(t2, a2, Operand(a3));
4583  // Use the string table when adding two one character strings, as it
4584  // helps later optimizations to return a string here.
4585  __ Branch(&longer_than_two, ne, t2, Operand(2));
4586
4587  // Check that both strings are non-external ASCII strings.
4588  if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
4589    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
4590    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
4591    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
4592    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
4593  }
4594  __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
4595                                                 &call_runtime);
4596
4597  // Get the two characters forming the sub string.
4598  __ lbu(a2, FieldMemOperand(a0, SeqOneByteString::kHeaderSize));
4599  __ lbu(a3, FieldMemOperand(a1, SeqOneByteString::kHeaderSize));
4600
4601  // Try to lookup two character string in string table. If it is not found
4602  // just allocate a new one.
4603  Label make_two_character_string;
4604  StringHelper::GenerateTwoCharacterStringTableProbe(
4605      masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
4606  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
4607  __ DropAndRet(2);
4608
4609  __ bind(&make_two_character_string);
4610  // Resulting string has length 2 and first chars of two strings
4611  // are combined into single halfword in a2 register.
4612  // So we can fill resulting string without two loops by a single
4613  // halfword store instruction (which assumes that processor is
4614  // in a little endian mode).
4615  __ li(t2, Operand(2));
4616  __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
4617  __ sh(a2, FieldMemOperand(v0, SeqOneByteString::kHeaderSize));
4618  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
4619  __ DropAndRet(2);
4620
4621  __ bind(&longer_than_two);
4622  // Check if resulting string will be flat.
4623  __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
4624  // Handle exceptionally long strings in the runtime system.
4625  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
4626  ASSERT(IsPowerOf2(String::kMaxLength + 1));
4627  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
4628  __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
4629
4630  // If result is not supposed to be flat, allocate a cons string object.
4631  // If both strings are ASCII the result is an ASCII cons string.
4632  if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
4633    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
4634    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
4635    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
4636    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
4637  }
4638  Label non_ascii, allocated, ascii_data;
4639  STATIC_ASSERT(kTwoByteStringTag == 0);
4640  // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
4641  __ And(t4, t0, Operand(t1));
4642  __ And(t4, t4, Operand(kStringEncodingMask));
4643  __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
4644
4645  // Allocate an ASCII cons string.
4646  __ bind(&ascii_data);
4647  __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
4648  __ bind(&allocated);
4649  // Fill the fields of the cons string.
4650  Label skip_write_barrier, after_writing;
4651  ExternalReference high_promotion_mode = ExternalReference::
4652      new_space_high_promotion_mode_active_address(masm->isolate());
4653  __ li(t0, Operand(high_promotion_mode));
4654  __ lw(t0, MemOperand(t0, 0));
4655  __ Branch(&skip_write_barrier, eq, t0, Operand(zero_reg));
4656
4657  __ mov(t3, v0);
4658  __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
4659  __ RecordWriteField(t3,
4660                      ConsString::kFirstOffset,
4661                      a0,
4662                      t0,
4663                      kRAHasNotBeenSaved,
4664                      kDontSaveFPRegs);
4665  __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
4666  __ RecordWriteField(t3,
4667                      ConsString::kSecondOffset,
4668                      a1,
4669                      t0,
4670                      kRAHasNotBeenSaved,
4671                      kDontSaveFPRegs);
4672  __ jmp(&after_writing);
4673
4674  __ bind(&skip_write_barrier);
4675  __ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
4676  __ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
4677
4678  __ bind(&after_writing);
4679
4680  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
4681  __ DropAndRet(2);
4682
4683  __ bind(&non_ascii);
4684  // At least one of the strings is two-byte. Check whether it happens
4685  // to contain only one byte characters.
4686  // t0: first instance type.
4687  // t1: second instance type.
4688  // Branch to if _both_ instances have kOneByteDataHintMask set.
4689  __ And(at, t0, Operand(kOneByteDataHintMask));
4690  __ and_(at, at, t1);
4691  __ Branch(&ascii_data, ne, at, Operand(zero_reg));
4692  __ Xor(t0, t0, Operand(t1));
4693  STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
4694  __ And(t0, t0, Operand(kOneByteStringTag | kOneByteDataHintTag));
4695  __ Branch(&ascii_data, eq, t0,
4696      Operand(kOneByteStringTag | kOneByteDataHintTag));
4697
4698  // Allocate a two byte cons string.
4699  __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
4700  __ Branch(&allocated);
4701
4702  // We cannot encounter sliced strings or cons strings here since:
4703  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
4704  // Handle creating a flat result from either external or sequential strings.
4705  // Locate the first characters' locations.
4706  // a0: first string
4707  // a1: second string
4708  // a2: length of first string
4709  // a3: length of second string
4710  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4711  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4712  // t2: sum of lengths.
4713  Label first_prepared, second_prepared;
4714  __ bind(&string_add_flat_result);
4715  if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
4716    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
4717    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
4718    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
4719    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
4720  }
4721  // Check whether both strings have same encoding
4722  __ Xor(t3, t0, Operand(t1));
4723  __ And(t3, t3, Operand(kStringEncodingMask));
4724  __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
4725
4726  STATIC_ASSERT(kSeqStringTag == 0);
4727  __ And(t4, t0, Operand(kStringRepresentationMask));
4728
4729  STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4730  Label skip_first_add;
4731  __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
4732  __ Branch(USE_DELAY_SLOT, &first_prepared);
4733  __ addiu(t3, a0, SeqOneByteString::kHeaderSize - kHeapObjectTag);
4734  __ bind(&skip_first_add);
4735  // External string: rule out short external string and load string resource.
4736  STATIC_ASSERT(kShortExternalStringTag != 0);
4737  __ And(t4, t0, Operand(kShortExternalStringMask));
4738  __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
4739  __ lw(t3, FieldMemOperand(a0, ExternalString::kResourceDataOffset));
4740  __ bind(&first_prepared);
4741
4742  STATIC_ASSERT(kSeqStringTag == 0);
4743  __ And(t4, t1, Operand(kStringRepresentationMask));
4744  STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4745  Label skip_second_add;
4746  __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
4747  __ Branch(USE_DELAY_SLOT, &second_prepared);
4748  __ addiu(a1, a1, SeqOneByteString::kHeaderSize - kHeapObjectTag);
4749  __ bind(&skip_second_add);
4750  // External string: rule out short external string and load string resource.
4751  STATIC_ASSERT(kShortExternalStringTag != 0);
4752  __ And(t4, t1, Operand(kShortExternalStringMask));
4753  __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
4754  __ lw(a1, FieldMemOperand(a1, ExternalString::kResourceDataOffset));
4755  __ bind(&second_prepared);
4756
4757  Label non_ascii_string_add_flat_result;
4758  // t3: first character of first string
4759  // a1: first character of second string
4760  // a2: length of first string
4761  // a3: length of second string
4762  // t2: sum of lengths.
4763  // Both strings have the same encoding.
4764  STATIC_ASSERT(kTwoByteStringTag == 0);
4765  __ And(t4, t1, Operand(kStringEncodingMask));
4766  __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
4767
4768  __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
4769  __ Addu(t2, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
4770  // v0: result string.
4771  // t3: first character of first string.
4772  // a1: first character of second string
4773  // a2: length of first string.
4774  // a3: length of second string.
4775  // t2: first character of result.
4776
4777  StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
4778  // t2: next character of result.
4779  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
4780  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
4781  __ DropAndRet(2);
4782
4783  __ bind(&non_ascii_string_add_flat_result);
4784  __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
4785  __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
4786  // v0: result string.
4787  // t3: first character of first string.
4788  // a1: first character of second string.
4789  // a2: length of first string.
4790  // a3: length of second string.
4791  // t2: first character of result.
4792  StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
4793  // t2: next character of result.
4794  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
4795
4796  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
4797  __ DropAndRet(2);
4798
4799  // Just jump to runtime to add the two strings.
4800  __ bind(&call_runtime);
4801  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
4802
4803  if (call_builtin.is_linked()) {
4804    __ bind(&call_builtin);
4805    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
4806  }
4807}
4808
4809
4810void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
4811  __ push(a0);
4812  __ push(a1);
4813}
4814
4815
4816void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) {
4817  __ pop(a1);
4818  __ pop(a0);
4819}
4820
4821
4822void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
4823                                            int stack_offset,
4824                                            Register arg,
4825                                            Register scratch1,
4826                                            Register scratch2,
4827                                            Register scratch3,
4828                                            Register scratch4,
4829                                            Label* slow) {
4830  // First check if the argument is already a string.
4831  Label not_string, done;
4832  __ JumpIfSmi(arg, &not_string);
4833  __ GetObjectType(arg, scratch1, scratch1);
4834  __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
4835
4836  // Check the number to string cache.
4837  __ bind(&not_string);
4838  // Puts the cached result into scratch1.
4839  __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
4840  __ mov(arg, scratch1);
4841  __ sw(arg, MemOperand(sp, stack_offset));
4842  __ bind(&done);
4843}
4844
4845
4846void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
4847  ASSERT(state_ == CompareIC::SMI);
4848  Label miss;
4849  __ Or(a2, a1, a0);
4850  __ JumpIfNotSmi(a2, &miss);
4851
4852  if (GetCondition() == eq) {
4853    // For equality we do not care about the sign of the result.
4854    __ Ret(USE_DELAY_SLOT);
4855    __ Subu(v0, a0, a1);
4856  } else {
4857    // Untag before subtracting to avoid handling overflow.
4858    __ SmiUntag(a1);
4859    __ SmiUntag(a0);
4860    __ Ret(USE_DELAY_SLOT);
4861    __ Subu(v0, a1, a0);
4862  }
4863
4864  __ bind(&miss);
4865  GenerateMiss(masm);
4866}
4867
4868
4869void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
4870  ASSERT(state_ == CompareIC::NUMBER);
4871
4872  Label generic_stub;
4873  Label unordered, maybe_undefined1, maybe_undefined2;
4874  Label miss;
4875
4876  if (left_ == CompareIC::SMI) {
4877    __ JumpIfNotSmi(a1, &miss);
4878  }
4879  if (right_ == CompareIC::SMI) {
4880    __ JumpIfNotSmi(a0, &miss);
4881  }
4882
4883  // Inlining the double comparison and falling back to the general compare
4884  // stub if NaN is involved.
4885  // Load left and right operand.
4886  Label done, left, left_smi, right_smi;
4887  __ JumpIfSmi(a0, &right_smi);
4888  __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
4889              DONT_DO_SMI_CHECK);
4890  __ Subu(a2, a0, Operand(kHeapObjectTag));
4891  __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
4892  __ Branch(&left);
4893  __ bind(&right_smi);
4894  __ SmiUntag(a2, a0);  // Can't clobber a0 yet.
4895  FPURegister single_scratch = f6;
4896  __ mtc1(a2, single_scratch);
4897  __ cvt_d_w(f2, single_scratch);
4898
4899  __ bind(&left);
4900  __ JumpIfSmi(a1, &left_smi);
4901  __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
4902              DONT_DO_SMI_CHECK);
4903  __ Subu(a2, a1, Operand(kHeapObjectTag));
4904  __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
4905  __ Branch(&done);
4906  __ bind(&left_smi);
4907  __ SmiUntag(a2, a1);  // Can't clobber a1 yet.
4908  single_scratch = f8;
4909  __ mtc1(a2, single_scratch);
4910  __ cvt_d_w(f0, single_scratch);
4911
4912  __ bind(&done);
4913
4914  // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
4915  Label fpu_eq, fpu_lt;
4916  // Test if equal, and also handle the unordered/NaN case.
4917  __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
4918
4919  // Test if less (unordered case is already handled).
4920  __ BranchF(&fpu_lt, NULL, lt, f0, f2);
4921
4922  // Otherwise it's greater, so just fall thru, and return.
4923  ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
4924  __ Ret(USE_DELAY_SLOT);
4925  __ li(v0, Operand(GREATER));
4926
4927  __ bind(&fpu_eq);
4928  __ Ret(USE_DELAY_SLOT);
4929  __ li(v0, Operand(EQUAL));
4930
4931  __ bind(&fpu_lt);
4932  __ Ret(USE_DELAY_SLOT);
4933  __ li(v0, Operand(LESS));
4934
4935  __ bind(&unordered);
4936  __ bind(&generic_stub);
4937  ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
4938                     CompareIC::GENERIC);
4939  __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
4940
4941  __ bind(&maybe_undefined1);
4942  if (Token::IsOrderedRelationalCompareOp(op_)) {
4943    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4944    __ Branch(&miss, ne, a0, Operand(at));
4945    __ JumpIfSmi(a1, &unordered);
4946    __ GetObjectType(a1, a2, a2);
4947    __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
4948    __ jmp(&unordered);
4949  }
4950
4951  __ bind(&maybe_undefined2);
4952  if (Token::IsOrderedRelationalCompareOp(op_)) {
4953    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4954    __ Branch(&unordered, eq, a1, Operand(at));
4955  }
4956
4957  __ bind(&miss);
4958  GenerateMiss(masm);
4959}
4960
4961
4962void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
4963  ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
4964  Label miss;
4965
4966  // Registers containing left and right operands respectively.
4967  Register left = a1;
4968  Register right = a0;
4969  Register tmp1 = a2;
4970  Register tmp2 = a3;
4971
4972  // Check that both operands are heap objects.
4973  __ JumpIfEitherSmi(left, right, &miss);
4974
4975  // Check that both operands are internalized strings.
4976  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4977  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4978  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4979  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4980  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4981  __ Or(tmp1, tmp1, Operand(tmp2));
4982  __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4983  __ Branch(&miss, ne, at, Operand(zero_reg));
4984
4985  // Make sure a0 is non-zero. At this point input operands are
4986  // guaranteed to be non-zero.
4987  ASSERT(right.is(a0));
4988  STATIC_ASSERT(EQUAL == 0);
4989  STATIC_ASSERT(kSmiTag == 0);
4990  __ mov(v0, right);
4991  // Internalized strings are compared by identity.
4992  __ Ret(ne, left, Operand(right));
4993  ASSERT(is_int16(EQUAL));
4994  __ Ret(USE_DELAY_SLOT);
4995  __ li(v0, Operand(Smi::FromInt(EQUAL)));
4996
4997  __ bind(&miss);
4998  GenerateMiss(masm);
4999}
5000
5001
5002void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
5003  ASSERT(state_ == CompareIC::UNIQUE_NAME);
5004  ASSERT(GetCondition() == eq);
5005  Label miss;
5006
5007  // Registers containing left and right operands respectively.
5008  Register left = a1;
5009  Register right = a0;
5010  Register tmp1 = a2;
5011  Register tmp2 = a3;
5012
5013  // Check that both operands are heap objects.
5014  __ JumpIfEitherSmi(left, right, &miss);
5015
5016  // Check that both operands are unique names. This leaves the instance
5017  // types loaded in tmp1 and tmp2.
5018  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
5019  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
5020  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
5021  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
5022
5023  __ JumpIfNotUniqueName(tmp1, &miss);
5024  __ JumpIfNotUniqueName(tmp2, &miss);
5025
5026  // Use a0 as result
5027  __ mov(v0, a0);
5028
5029  // Unique names are compared by identity.
5030  Label done;
5031  __ Branch(&done, ne, left, Operand(right));
5032  // Make sure a0 is non-zero. At this point input operands are
5033  // guaranteed to be non-zero.
5034  ASSERT(right.is(a0));
5035  STATIC_ASSERT(EQUAL == 0);
5036  STATIC_ASSERT(kSmiTag == 0);
5037  __ li(v0, Operand(Smi::FromInt(EQUAL)));
5038  __ bind(&done);
5039  __ Ret();
5040
5041  __ bind(&miss);
5042  GenerateMiss(masm);
5043}
5044
5045
5046void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
5047  ASSERT(state_ == CompareIC::STRING);
5048  Label miss;
5049
5050  bool equality = Token::IsEqualityOp(op_);
5051
5052  // Registers containing left and right operands respectively.
5053  Register left = a1;
5054  Register right = a0;
5055  Register tmp1 = a2;
5056  Register tmp2 = a3;
5057  Register tmp3 = t0;
5058  Register tmp4 = t1;
5059  Register tmp5 = t2;
5060
5061  // Check that both operands are heap objects.
5062  __ JumpIfEitherSmi(left, right, &miss);
5063
5064  // Check that both operands are strings. This leaves the instance
5065  // types loaded in tmp1 and tmp2.
5066  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
5067  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
5068  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
5069  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
5070  STATIC_ASSERT(kNotStringTag != 0);
5071  __ Or(tmp3, tmp1, tmp2);
5072  __ And(tmp5, tmp3, Operand(kIsNotStringMask));
5073  __ Branch(&miss, ne, tmp5, Operand(zero_reg));
5074
5075  // Fast check for identical strings.
5076  Label left_ne_right;
5077  STATIC_ASSERT(EQUAL == 0);
5078  STATIC_ASSERT(kSmiTag == 0);
5079  __ Branch(&left_ne_right, ne, left, Operand(right));
5080  __ Ret(USE_DELAY_SLOT);
5081  __ mov(v0, zero_reg);  // In the delay slot.
5082  __ bind(&left_ne_right);
5083
5084  // Handle not identical strings.
5085
5086  // Check that both strings are internalized strings. If they are, we're done
5087  // because we already know they are not identical. We know they are both
5088  // strings.
5089  if (equality) {
5090    ASSERT(GetCondition() == eq);
5091    STATIC_ASSERT(kInternalizedTag == 0);
5092    __ Or(tmp3, tmp1, Operand(tmp2));
5093    __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
5094    Label is_symbol;
5095    __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
5096    // Make sure a0 is non-zero. At this point input operands are
5097    // guaranteed to be non-zero.
5098    ASSERT(right.is(a0));
5099    __ Ret(USE_DELAY_SLOT);
5100    __ mov(v0, a0);  // In the delay slot.
5101    __ bind(&is_symbol);
5102  }
5103
5104  // Check that both strings are sequential ASCII.
5105  Label runtime;
5106  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
5107      tmp1, tmp2, tmp3, tmp4, &runtime);
5108
5109  // Compare flat ASCII strings. Returns when done.
5110  if (equality) {
5111    StringCompareStub::GenerateFlatAsciiStringEquals(
5112        masm, left, right, tmp1, tmp2, tmp3);
5113  } else {
5114    StringCompareStub::GenerateCompareFlatAsciiStrings(
5115        masm, left, right, tmp1, tmp2, tmp3, tmp4);
5116  }
5117
5118  // Handle more complex cases in runtime.
5119  __ bind(&runtime);
5120  __ Push(left, right);
5121  if (equality) {
5122    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
5123  } else {
5124    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
5125  }
5126
5127  __ bind(&miss);
5128  GenerateMiss(masm);
5129}
5130
5131
5132void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
5133  ASSERT(state_ == CompareIC::OBJECT);
5134  Label miss;
5135  __ And(a2, a1, Operand(a0));
5136  __ JumpIfSmi(a2, &miss);
5137
5138  __ GetObjectType(a0, a2, a2);
5139  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
5140  __ GetObjectType(a1, a2, a2);
5141  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
5142
5143  ASSERT(GetCondition() == eq);
5144  __ Ret(USE_DELAY_SLOT);
5145  __ subu(v0, a0, a1);
5146
5147  __ bind(&miss);
5148  GenerateMiss(masm);
5149}
5150
5151
5152void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
5153  Label miss;
5154  __ And(a2, a1, a0);
5155  __ JumpIfSmi(a2, &miss);
5156  __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
5157  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
5158  __ Branch(&miss, ne, a2, Operand(known_map_));
5159  __ Branch(&miss, ne, a3, Operand(known_map_));
5160
5161  __ Ret(USE_DELAY_SLOT);
5162  __ subu(v0, a0, a1);
5163
5164  __ bind(&miss);
5165  GenerateMiss(masm);
5166}
5167
5168
5169void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
5170  {
5171    // Call the runtime system in a fresh internal frame.
5172    ExternalReference miss =
5173        ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
5174    FrameScope scope(masm, StackFrame::INTERNAL);
5175    __ Push(a1, a0);
5176    __ Push(ra, a1, a0);
5177    __ li(t0, Operand(Smi::FromInt(op_)));
5178    __ addiu(sp, sp, -kPointerSize);
5179    __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
5180    __ sw(t0, MemOperand(sp));  // In the delay slot.
5181    // Compute the entry point of the rewritten stub.
5182    __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
5183    // Restore registers.
5184    __ Pop(a1, a0, ra);
5185  }
5186  __ Jump(a2);
5187}
5188
5189
5190void DirectCEntryStub::Generate(MacroAssembler* masm) {
5191  // Make place for arguments to fit C calling convention. Most of the callers
5192  // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
5193  // so they handle stack restoring and we don't have to do that here.
5194  // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
5195  // kCArgsSlotsSize stack space after the call.
5196  __ Subu(sp, sp, Operand(kCArgsSlotsSize));
5197  // Place the return address on the stack, making the call
5198  // GC safe. The RegExp backend also relies on this.
5199  __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
5200  __ Call(t9);  // Call the C++ function.
5201  __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
5202
5203  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
5204    // In case of an error the return address may point to a memory area
5205    // filled with kZapValue by the GC.
5206    // Dereference the address and check for this.
5207    __ lw(t0, MemOperand(t9));
5208    __ Assert(ne, kReceivedInvalidReturnAddress, t0,
5209        Operand(reinterpret_cast<uint32_t>(kZapValue)));
5210  }
5211  __ Jump(t9);
5212}
5213
5214
5215void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
5216                                    Register target) {
5217  intptr_t loc =
5218      reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
5219  __ Move(t9, target);
5220  __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
5221  __ Call(ra);
5222}
5223
5224
5225void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
5226                                                      Label* miss,
5227                                                      Label* done,
5228                                                      Register receiver,
5229                                                      Register properties,
5230                                                      Handle<Name> name,
5231                                                      Register scratch0) {
5232  ASSERT(name->IsUniqueName());
5233  // If names of slots in range from 1 to kProbes - 1 for the hash value are
5234  // not equal to the name and kProbes-th slot is not used (its name is the
5235  // undefined value), it guarantees the hash table doesn't contain the
5236  // property. It's true even if some slots represent deleted properties
5237  // (their names are the hole value).
5238  for (int i = 0; i < kInlinedProbes; i++) {
5239    // scratch0 points to properties hash.
5240    // Compute the masked index: (hash + i + i * i) & mask.
5241    Register index = scratch0;
5242    // Capacity is smi 2^n.
5243    __ lw(index, FieldMemOperand(properties, kCapacityOffset));
5244    __ Subu(index, index, Operand(1));
5245    __ And(index, index, Operand(
5246        Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
5247
5248    // Scale the index by multiplying by the entry size.
5249    ASSERT(NameDictionary::kEntrySize == 3);
5250    __ sll(at, index, 1);
5251    __ Addu(index, index, at);
5252
5253    Register entity_name = scratch0;
5254    // Having undefined at this place means the name is not contained.
5255    ASSERT_EQ(kSmiTagSize, 1);
5256    Register tmp = properties;
5257    __ sll(scratch0, index, 1);
5258    __ Addu(tmp, properties, scratch0);
5259    __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
5260
5261    ASSERT(!tmp.is(entity_name));
5262    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
5263    __ Branch(done, eq, entity_name, Operand(tmp));
5264
5265    // Load the hole ready for use below:
5266    __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
5267
5268    // Stop if found the property.
5269    __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
5270
5271    Label good;
5272    __ Branch(&good, eq, entity_name, Operand(tmp));
5273
5274    // Check if the entry name is not a unique name.
5275    __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
5276    __ lbu(entity_name,
5277           FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
5278    __ JumpIfNotUniqueName(entity_name, miss);
5279    __ bind(&good);
5280
5281    // Restore the properties.
5282    __ lw(properties,
5283          FieldMemOperand(receiver, JSObject::kPropertiesOffset));
5284  }
5285
5286  const int spill_mask =
5287      (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
5288       a2.bit() | a1.bit() | a0.bit() | v0.bit());
5289
5290  __ MultiPush(spill_mask);
5291  __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
5292  __ li(a1, Operand(Handle<Name>(name)));
5293  NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
5294  __ CallStub(&stub);
5295  __ mov(at, v0);
5296  __ MultiPop(spill_mask);
5297
5298  __ Branch(done, eq, at, Operand(zero_reg));
5299  __ Branch(miss, ne, at, Operand(zero_reg));
5300}
5301
5302
5303// Probe the name dictionary in the |elements| register. Jump to the
5304// |done| label if a property with the given name is found. Jump to
5305// the |miss| label otherwise.
5306// If lookup was successful |scratch2| will be equal to elements + 4 * index.
5307void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
5308                                                      Label* miss,
5309                                                      Label* done,
5310                                                      Register elements,
5311                                                      Register name,
5312                                                      Register scratch1,
5313                                                      Register scratch2) {
5314  ASSERT(!elements.is(scratch1));
5315  ASSERT(!elements.is(scratch2));
5316  ASSERT(!name.is(scratch1));
5317  ASSERT(!name.is(scratch2));
5318
5319  __ AssertName(name);
5320
5321  // Compute the capacity mask.
5322  __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
5323  __ sra(scratch1, scratch1, kSmiTagSize);  // convert smi to int
5324  __ Subu(scratch1, scratch1, Operand(1));
5325
5326  // Generate an unrolled loop that performs a few probes before
5327  // giving up. Measurements done on Gmail indicate that 2 probes
5328  // cover ~93% of loads from dictionaries.
5329  for (int i = 0; i < kInlinedProbes; i++) {
5330    // Compute the masked index: (hash + i + i * i) & mask.
5331    __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
5332    if (i > 0) {
5333      // Add the probe offset (i + i * i) left shifted to avoid right shifting
5334      // the hash in a separate instruction. The value hash + i + i * i is right
5335      // shifted in the following and instruction.
5336      ASSERT(NameDictionary::GetProbeOffset(i) <
5337             1 << (32 - Name::kHashFieldOffset));
5338      __ Addu(scratch2, scratch2, Operand(
5339          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
5340    }
5341    __ srl(scratch2, scratch2, Name::kHashShift);
5342    __ And(scratch2, scratch1, scratch2);
5343
5344    // Scale the index by multiplying by the element size.
5345    ASSERT(NameDictionary::kEntrySize == 3);
5346    // scratch2 = scratch2 * 3.
5347
5348    __ sll(at, scratch2, 1);
5349    __ Addu(scratch2, scratch2, at);
5350
5351    // Check if the key is identical to the name.
5352    __ sll(at, scratch2, 2);
5353    __ Addu(scratch2, elements, at);
5354    __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
5355    __ Branch(done, eq, name, Operand(at));
5356  }
5357
5358  const int spill_mask =
5359      (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
5360       a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
5361      ~(scratch1.bit() | scratch2.bit());
5362
5363  __ MultiPush(spill_mask);
5364  if (name.is(a0)) {
5365    ASSERT(!elements.is(a1));
5366    __ Move(a1, name);
5367    __ Move(a0, elements);
5368  } else {
5369    __ Move(a0, elements);
5370    __ Move(a1, name);
5371  }
5372  NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
5373  __ CallStub(&stub);
5374  __ mov(scratch2, a2);
5375  __ mov(at, v0);
5376  __ MultiPop(spill_mask);
5377
5378  __ Branch(done, ne, at, Operand(zero_reg));
5379  __ Branch(miss, eq, at, Operand(zero_reg));
5380}
5381
5382
5383void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
5384  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
5385  // we cannot call anything that could cause a GC from this stub.
5386  // Registers:
5387  //  result: NameDictionary to probe
5388  //  a1: key
5389  //  dictionary: NameDictionary to probe.
5390  //  index: will hold an index of entry if lookup is successful.
5391  //         might alias with result_.
5392  // Returns:
5393  //  result_ is zero if lookup failed, non zero otherwise.
5394
5395  Register result = v0;
5396  Register dictionary = a0;
5397  Register key = a1;
5398  Register index = a2;
5399  Register mask = a3;
5400  Register hash = t0;
5401  Register undefined = t1;
5402  Register entry_key = t2;
5403
5404  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
5405
5406  __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
5407  __ sra(mask, mask, kSmiTagSize);
5408  __ Subu(mask, mask, Operand(1));
5409
5410  __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
5411
5412  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5413
5414  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
5415    // Compute the masked index: (hash + i + i * i) & mask.
5416    // Capacity is smi 2^n.
5417    if (i > 0) {
5418      // Add the probe offset (i + i * i) left shifted to avoid right shifting
5419      // the hash in a separate instruction. The value hash + i + i * i is right
5420      // shifted in the following and instruction.
5421      ASSERT(NameDictionary::GetProbeOffset(i) <
5422             1 << (32 - Name::kHashFieldOffset));
5423      __ Addu(index, hash, Operand(
5424          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
5425    } else {
5426      __ mov(index, hash);
5427    }
5428    __ srl(index, index, Name::kHashShift);
5429    __ And(index, mask, index);
5430
5431    // Scale the index by multiplying by the entry size.
5432    ASSERT(NameDictionary::kEntrySize == 3);
5433    // index *= 3.
5434    __ mov(at, index);
5435    __ sll(index, index, 1);
5436    __ Addu(index, index, at);
5437
5438
5439    ASSERT_EQ(kSmiTagSize, 1);
5440    __ sll(index, index, 2);
5441    __ Addu(index, index, dictionary);
5442    __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
5443
5444    // Having undefined at this place means the name is not contained.
5445    __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
5446
5447    // Stop if found the property.
5448    __ Branch(&in_dictionary, eq, entry_key, Operand(key));
5449
5450    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
5451      // Check if the entry name is not a unique name.
5452      __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
5453      __ lbu(entry_key,
5454             FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
5455      __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
5456    }
5457  }
5458
5459  __ bind(&maybe_in_dictionary);
5460  // If we are doing negative lookup then probing failure should be
5461  // treated as a lookup success. For positive lookup probing failure
5462  // should be treated as lookup failure.
5463  if (mode_ == POSITIVE_LOOKUP) {
5464    __ Ret(USE_DELAY_SLOT);
5465    __ mov(result, zero_reg);
5466  }
5467
5468  __ bind(&in_dictionary);
5469  __ Ret(USE_DELAY_SLOT);
5470  __ li(result, 1);
5471
5472  __ bind(&not_in_dictionary);
5473  __ Ret(USE_DELAY_SLOT);
5474  __ mov(result, zero_reg);
5475}
5476
5477
5478void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
5479    Isolate* isolate) {
5480  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
5481  stub1.GetCode(isolate);
5482  // Hydrogen code stubs need stub2 at snapshot time.
5483  StoreBufferOverflowStub stub2(kSaveFPRegs);
5484  stub2.GetCode(isolate);
5485}
5486
5487
5488bool CodeStub::CanUseFPRegisters() {
5489  return true;  // FPU is a base requirement for V8.
5490}
5491
5492
5493// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
5494// the value has just been written into the object, now this stub makes sure
5495// we keep the GC informed.  The word in the object where the value has been
5496// written is in the address register.
5497void RecordWriteStub::Generate(MacroAssembler* masm) {
5498  Label skip_to_incremental_noncompacting;
5499  Label skip_to_incremental_compacting;
5500
5501  // The first two branch+nop instructions are generated with labels so as to
5502  // get the offset fixed up correctly by the bind(Label*) call.  We patch it
5503  // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
5504  // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
5505  // incremental heap marking.
5506  // See RecordWriteStub::Patch for details.
5507  __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
5508  __ nop();
5509  __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
5510  __ nop();
5511
5512  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
5513    __ RememberedSetHelper(object_,
5514                           address_,
5515                           value_,
5516                           save_fp_regs_mode_,
5517                           MacroAssembler::kReturnAtEnd);
5518  }
5519  __ Ret();
5520
5521  __ bind(&skip_to_incremental_noncompacting);
5522  GenerateIncremental(masm, INCREMENTAL);
5523
5524  __ bind(&skip_to_incremental_compacting);
5525  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
5526
5527  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
5528  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
5529
5530  PatchBranchIntoNop(masm, 0);
5531  PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
5532}
5533
5534
5535void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
5536  regs_.Save(masm);
5537
5538  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
5539    Label dont_need_remembered_set;
5540
5541    __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
5542    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
5543                           regs_.scratch0(),
5544                           &dont_need_remembered_set);
5545
5546    __ CheckPageFlag(regs_.object(),
5547                     regs_.scratch0(),
5548                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
5549                     ne,
5550                     &dont_need_remembered_set);
5551
5552    // First notify the incremental marker if necessary, then update the
5553    // remembered set.
5554    CheckNeedsToInformIncrementalMarker(
5555        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
5556    InformIncrementalMarker(masm, mode);
5557    regs_.Restore(masm);
5558    __ RememberedSetHelper(object_,
5559                           address_,
5560                           value_,
5561                           save_fp_regs_mode_,
5562                           MacroAssembler::kReturnAtEnd);
5563
5564    __ bind(&dont_need_remembered_set);
5565  }
5566
5567  CheckNeedsToInformIncrementalMarker(
5568      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
5569  InformIncrementalMarker(masm, mode);
5570  regs_.Restore(masm);
5571  __ Ret();
5572}
5573
5574
5575void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
5576  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
5577  int argument_count = 3;
5578  __ PrepareCallCFunction(argument_count, regs_.scratch0());
5579  Register address =
5580      a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
5581  ASSERT(!address.is(regs_.object()));
5582  ASSERT(!address.is(a0));
5583  __ Move(address, regs_.address());
5584  __ Move(a0, regs_.object());
5585  __ Move(a1, address);
5586  __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
5587
5588  AllowExternalCallThatCantCauseGC scope(masm);
5589  if (mode == INCREMENTAL_COMPACTION) {
5590    __ CallCFunction(
5591        ExternalReference::incremental_evacuation_record_write_function(
5592            masm->isolate()),
5593        argument_count);
5594  } else {
5595    ASSERT(mode == INCREMENTAL);
5596    __ CallCFunction(
5597        ExternalReference::incremental_marking_record_write_function(
5598            masm->isolate()),
5599        argument_count);
5600  }
5601  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
5602}
5603
5604
5605void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
5606    MacroAssembler* masm,
5607    OnNoNeedToInformIncrementalMarker on_no_need,
5608    Mode mode) {
5609  Label on_black;
5610  Label need_incremental;
5611  Label need_incremental_pop_scratch;
5612
5613  __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
5614  __ lw(regs_.scratch1(),
5615        MemOperand(regs_.scratch0(),
5616                   MemoryChunk::kWriteBarrierCounterOffset));
5617  __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
5618  __ sw(regs_.scratch1(),
5619         MemOperand(regs_.scratch0(),
5620                    MemoryChunk::kWriteBarrierCounterOffset));
5621  __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
5622
5623  // Let's look at the color of the object:  If it is not black we don't have
5624  // to inform the incremental marker.
5625  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
5626
5627  regs_.Restore(masm);
5628  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
5629    __ RememberedSetHelper(object_,
5630                           address_,
5631                           value_,
5632                           save_fp_regs_mode_,
5633                           MacroAssembler::kReturnAtEnd);
5634  } else {
5635    __ Ret();
5636  }
5637
5638  __ bind(&on_black);
5639
5640  // Get the value from the slot.
5641  __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
5642
5643  if (mode == INCREMENTAL_COMPACTION) {
5644    Label ensure_not_white;
5645
5646    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
5647                     regs_.scratch1(),  // Scratch.
5648                     MemoryChunk::kEvacuationCandidateMask,
5649                     eq,
5650                     &ensure_not_white);
5651
5652    __ CheckPageFlag(regs_.object(),
5653                     regs_.scratch1(),  // Scratch.
5654                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
5655                     eq,
5656                     &need_incremental);
5657
5658    __ bind(&ensure_not_white);
5659  }
5660
5661  // We need extra registers for this, so we push the object and the address
5662  // register temporarily.
5663  __ Push(regs_.object(), regs_.address());
5664  __ EnsureNotWhite(regs_.scratch0(),  // The value.
5665                    regs_.scratch1(),  // Scratch.
5666                    regs_.object(),  // Scratch.
5667                    regs_.address(),  // Scratch.
5668                    &need_incremental_pop_scratch);
5669  __ Pop(regs_.object(), regs_.address());
5670
5671  regs_.Restore(masm);
5672  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
5673    __ RememberedSetHelper(object_,
5674                           address_,
5675                           value_,
5676                           save_fp_regs_mode_,
5677                           MacroAssembler::kReturnAtEnd);
5678  } else {
5679    __ Ret();
5680  }
5681
5682  __ bind(&need_incremental_pop_scratch);
5683  __ Pop(regs_.object(), regs_.address());
5684
5685  __ bind(&need_incremental);
5686
5687  // Fall through when we need to inform the incremental marker.
5688}
5689
5690
5691void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
5692  // ----------- S t a t e -------------
5693  //  -- a0    : element value to store
5694  //  -- a3    : element index as smi
5695  //  -- sp[0] : array literal index in function as smi
5696  //  -- sp[4] : array literal
5697  // clobbers a1, a2, t0
5698  // -----------------------------------
5699
5700  Label element_done;
5701  Label double_elements;
5702  Label smi_element;
5703  Label slow_elements;
5704  Label fast_elements;
5705
5706  // Get array literal index, array literal and its map.
5707  __ lw(t0, MemOperand(sp, 0 * kPointerSize));
5708  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
5709  __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
5710
5711  __ CheckFastElements(a2, t1, &double_elements);
5712  // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
5713  __ JumpIfSmi(a0, &smi_element);
5714  __ CheckFastSmiElements(a2, t1, &fast_elements);
5715
5716  // Store into the array literal requires a elements transition. Call into
5717  // the runtime.
5718  __ bind(&slow_elements);
5719  // call.
5720  __ Push(a1, a3, a0);
5721  __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5722  __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
5723  __ Push(t1, t0);
5724  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
5725
5726  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
5727  __ bind(&fast_elements);
5728  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
5729  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
5730  __ Addu(t2, t1, t2);
5731  __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5732  __ sw(a0, MemOperand(t2, 0));
5733  // Update the write barrier for the array store.
5734  __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
5735                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
5736  __ Ret(USE_DELAY_SLOT);
5737  __ mov(v0, a0);
5738
5739  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
5740  // and value is Smi.
5741  __ bind(&smi_element);
5742  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
5743  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
5744  __ Addu(t2, t1, t2);
5745  __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
5746  __ Ret(USE_DELAY_SLOT);
5747  __ mov(v0, a0);
5748
5749  // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
5750  __ bind(&double_elements);
5751  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
5752  __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
5753  __ Ret(USE_DELAY_SLOT);
5754  __ mov(v0, a0);
5755}
5756
5757
5758void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
5759  CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
5760  __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
5761  int parameter_count_offset =
5762      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
5763  __ lw(a1, MemOperand(fp, parameter_count_offset));
5764  if (function_mode_ == JS_FUNCTION_STUB_MODE) {
5765    __ Addu(a1, a1, Operand(1));
5766  }
5767  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
5768  __ sll(a1, a1, kPointerSizeLog2);
5769  __ Ret(USE_DELAY_SLOT);
5770  __ Addu(sp, sp, a1);
5771}
5772
5773
5774void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
5775  CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
5776  __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
5777  __ mov(a1, v0);
5778  int parameter_count_offset =
5779      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
5780  __ lw(a0, MemOperand(fp, parameter_count_offset));
5781  // The parameter count above includes the receiver for the arguments passed to
5782  // the deoptimization handler. Subtract the receiver for the parameter count
5783  // for the call.
5784  __ Subu(a0, a0, 1);
5785  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
5786  ParameterCount argument_count(a0);
5787  __ InvokeFunction(
5788      a1, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
5789}
5790
5791
5792void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
5793  if (masm->isolate()->function_entry_hook() != NULL) {
5794    ProfileEntryHookStub stub;
5795    __ push(ra);
5796    __ CallStub(&stub);
5797    __ pop(ra);
5798  }
5799}
5800
5801
5802void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
5803  // The entry hook is a "push ra" instruction, followed by a call.
5804  // Note: on MIPS "push" is 2 instruction
5805  const int32_t kReturnAddressDistanceFromFunctionStart =
5806      Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
5807
5808  // This should contain all kJSCallerSaved registers.
5809  const RegList kSavedRegs =
5810     kJSCallerSaved |  // Caller saved registers.
5811     s5.bit();         // Saved stack pointer.
5812
5813  // We also save ra, so the count here is one higher than the mask indicates.
5814  const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
5815
5816  // Save all caller-save registers as this may be called from anywhere.
5817  __ MultiPush(kSavedRegs | ra.bit());
5818
5819  // Compute the function's address for the first argument.
5820  __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
5821
5822  // The caller's return address is above the saved temporaries.
5823  // Grab that for the second argument to the hook.
5824  __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
5825
5826  // Align the stack if necessary.
5827  int frame_alignment = masm->ActivationFrameAlignment();
5828  if (frame_alignment > kPointerSize) {
5829    __ mov(s5, sp);
5830    ASSERT(IsPowerOf2(frame_alignment));
5831    __ And(sp, sp, Operand(-frame_alignment));
5832  }
5833
5834#if defined(V8_HOST_ARCH_MIPS)
5835  int32_t entry_hook =
5836      reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
5837  __ li(at, Operand(entry_hook));
5838#else
5839  // Under the simulator we need to indirect the entry hook through a
5840  // trampoline function at a known address.
5841  // It additionally takes an isolate as a third parameter.
5842  __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
5843
5844  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
5845  __ li(at, Operand(ExternalReference(&dispatcher,
5846                                      ExternalReference::BUILTIN_CALL,
5847                                      masm->isolate())));
5848#endif
5849  __ Call(at);
5850
5851  // Restore the stack pointer if needed.
5852  if (frame_alignment > kPointerSize) {
5853    __ mov(sp, s5);
5854  }
5855
5856  // Also pop ra to get Ret(0).
5857  __ MultiPop(kSavedRegs | ra.bit());
5858  __ Ret();
5859}
5860
5861
5862template<class T>
5863static void CreateArrayDispatch(MacroAssembler* masm,
5864                                AllocationSiteOverrideMode mode) {
5865  if (mode == DISABLE_ALLOCATION_SITES) {
5866    T stub(GetInitialFastElementsKind(),
5867           CONTEXT_CHECK_REQUIRED,
5868           mode);
5869    __ TailCallStub(&stub);
5870  } else if (mode == DONT_OVERRIDE) {
5871    int last_index = GetSequenceIndexFromFastElementsKind(
5872        TERMINAL_FAST_ELEMENTS_KIND);
5873    for (int i = 0; i <= last_index; ++i) {
5874      Label next;
5875      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5876      __ Branch(&next, ne, a3, Operand(kind));
5877      T stub(kind);
5878      __ TailCallStub(&stub);
5879      __ bind(&next);
5880    }
5881
5882    // If we reached this point there is a problem.
5883    __ Abort(kUnexpectedElementsKindInArrayConstructor);
5884  } else {
5885    UNREACHABLE();
5886  }
5887}
5888
5889
5890static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
5891                                           AllocationSiteOverrideMode mode) {
5892  // a2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
5893  // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
5894  // a0 - number of arguments
5895  // a1 - constructor?
5896  // sp[0] - last argument
5897  Label normal_sequence;
5898  if (mode == DONT_OVERRIDE) {
5899    ASSERT(FAST_SMI_ELEMENTS == 0);
5900    ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
5901    ASSERT(FAST_ELEMENTS == 2);
5902    ASSERT(FAST_HOLEY_ELEMENTS == 3);
5903    ASSERT(FAST_DOUBLE_ELEMENTS == 4);
5904    ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
5905
5906    // is the low bit set? If so, we are holey and that is good.
5907    __ And(at, a3, Operand(1));
5908    __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
5909  }
5910
5911  // look at the first argument
5912  __ lw(t1, MemOperand(sp, 0));
5913  __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
5914
5915  if (mode == DISABLE_ALLOCATION_SITES) {
5916    ElementsKind initial = GetInitialFastElementsKind();
5917    ElementsKind holey_initial = GetHoleyElementsKind(initial);
5918
5919    ArraySingleArgumentConstructorStub stub_holey(holey_initial,
5920                                                  CONTEXT_CHECK_REQUIRED,
5921                                                  DISABLE_ALLOCATION_SITES);
5922    __ TailCallStub(&stub_holey);
5923
5924    __ bind(&normal_sequence);
5925    ArraySingleArgumentConstructorStub stub(initial,
5926                                            CONTEXT_CHECK_REQUIRED,
5927                                            DISABLE_ALLOCATION_SITES);
5928    __ TailCallStub(&stub);
5929  } else if (mode == DONT_OVERRIDE) {
5930    // We are going to create a holey array, but our kind is non-holey.
5931    // Fix kind and retry (only if we have an allocation site in the cell).
5932    __ Addu(a3, a3, Operand(1));
5933    __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
5934
5935    if (FLAG_debug_code) {
5936      __ lw(t1, FieldMemOperand(t1, 0));
5937      __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
5938      __ Assert(eq, kExpectedAllocationSiteInCell, t1, Operand(at));
5939      __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
5940    }
5941
5942    // Save the resulting elements kind in type info. We can't just store a3
5943    // in the AllocationSite::transition_info field because elements kind is
5944    // restricted to a portion of the field...upper bits need to be left alone.
5945    STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
5946    __ lw(t0, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
5947    __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
5948    __ sw(t0, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
5949
5950
5951    __ bind(&normal_sequence);
5952    int last_index = GetSequenceIndexFromFastElementsKind(
5953        TERMINAL_FAST_ELEMENTS_KIND);
5954    for (int i = 0; i <= last_index; ++i) {
5955      Label next;
5956      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5957      __ Branch(&next, ne, a3, Operand(kind));
5958      ArraySingleArgumentConstructorStub stub(kind);
5959      __ TailCallStub(&stub);
5960      __ bind(&next);
5961    }
5962
5963    // If we reached this point there is a problem.
5964    __ Abort(kUnexpectedElementsKindInArrayConstructor);
5965  } else {
5966    UNREACHABLE();
5967  }
5968}
5969
5970
5971template<class T>
5972static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
5973  ElementsKind initial_kind = GetInitialFastElementsKind();
5974  ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
5975
5976  int to_index = GetSequenceIndexFromFastElementsKind(
5977      TERMINAL_FAST_ELEMENTS_KIND);
5978  for (int i = 0; i <= to_index; ++i) {
5979    ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5980    T stub(kind);
5981    stub.GetCode(isolate);
5982    if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
5983        (!FLAG_track_allocation_sites &&
5984         (kind == initial_kind || kind == initial_holey_kind))) {
5985      T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
5986      stub1.GetCode(isolate);
5987    }
5988  }
5989}
5990
5991
5992void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
5993  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
5994      isolate);
5995  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
5996      isolate);
5997  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
5998      isolate);
5999}
6000
6001
6002void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
6003    Isolate* isolate) {
6004  ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
6005  for (int i = 0; i < 2; i++) {
6006    // For internal arrays we only need a few things.
6007    InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
6008    stubh1.GetCode(isolate);
6009    InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
6010    stubh2.GetCode(isolate);
6011    InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
6012    stubh3.GetCode(isolate);
6013  }
6014}
6015
6016
6017void ArrayConstructorStub::GenerateDispatchToArrayStub(
6018    MacroAssembler* masm,
6019    AllocationSiteOverrideMode mode) {
6020  if (argument_count_ == ANY) {
6021    Label not_zero_case, not_one_case;
6022    __ And(at, a0, a0);
6023    __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
6024    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
6025
6026    __ bind(&not_zero_case);
6027    __ Branch(&not_one_case, gt, a0, Operand(1));
6028    CreateArrayDispatchOneArgument(masm, mode);
6029
6030    __ bind(&not_one_case);
6031    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
6032  } else if (argument_count_ == NONE) {
6033    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
6034  } else if (argument_count_ == ONE) {
6035    CreateArrayDispatchOneArgument(masm, mode);
6036  } else if (argument_count_ == MORE_THAN_ONE) {
6037    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
6038  } else {
6039    UNREACHABLE();
6040  }
6041}
6042
6043
6044void ArrayConstructorStub::Generate(MacroAssembler* masm) {
6045  // ----------- S t a t e -------------
6046  //  -- a0 : argc (only if argument_count_ == ANY)
6047  //  -- a1 : constructor
6048  //  -- a2 : type info cell
6049  //  -- sp[0] : return address
6050  //  -- sp[4] : last argument
6051  // -----------------------------------
6052  if (FLAG_debug_code) {
6053    // The array construct code is only set for the global and natives
6054    // builtin Array functions which always have maps.
6055
6056    // Initial map for the builtin Array function should be a map.
6057    __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
6058    // Will both indicate a NULL and a Smi.
6059    __ SmiTst(a3, at);
6060    __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
6061        at, Operand(zero_reg));
6062    __ GetObjectType(a3, a3, t0);
6063    __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
6064        t0, Operand(MAP_TYPE));
6065
6066    // We should either have undefined in a2 or a valid cell.
6067    Label okay_here;
6068    Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
6069    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6070    __ Branch(&okay_here, eq, a2, Operand(at));
6071    __ lw(a3, FieldMemOperand(a2, 0));
6072    __ Assert(eq, kExpectedPropertyCellInRegisterA2,
6073        a3, Operand(cell_map));
6074    __ bind(&okay_here);
6075  }
6076
6077  Label no_info;
6078  // Get the elements kind and case on that.
6079  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6080  __ Branch(&no_info, eq, a2, Operand(at));
6081  __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
6082
6083  // If the type cell is undefined, or contains anything other than an
6084  // AllocationSite, call an array constructor that doesn't use AllocationSites.
6085  __ lw(t0, FieldMemOperand(a3, 0));
6086  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
6087  __ Branch(&no_info, ne, t0, Operand(at));
6088
6089  __ lw(a3, FieldMemOperand(a3, AllocationSite::kTransitionInfoOffset));
6090  __ SmiUntag(a3);
6091  STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
6092  __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
6093  GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
6094
6095  __ bind(&no_info);
6096  GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
6097}
6098
6099
6100void InternalArrayConstructorStub::GenerateCase(
6101    MacroAssembler* masm, ElementsKind kind) {
6102  Label not_zero_case, not_one_case;
6103  Label normal_sequence;
6104
6105  __ Branch(&not_zero_case, ne, a0, Operand(zero_reg));
6106  InternalArrayNoArgumentConstructorStub stub0(kind);
6107  __ TailCallStub(&stub0);
6108
6109  __ bind(&not_zero_case);
6110  __ Branch(&not_one_case, gt, a0, Operand(1));
6111
6112  if (IsFastPackedElementsKind(kind)) {
6113    // We might need to create a holey array
6114    // look at the first argument.
6115    __ lw(at, MemOperand(sp, 0));
6116    __ Branch(&normal_sequence, eq, at, Operand(zero_reg));
6117
6118    InternalArraySingleArgumentConstructorStub
6119        stub1_holey(GetHoleyElementsKind(kind));
6120    __ TailCallStub(&stub1_holey);
6121  }
6122
6123  __ bind(&normal_sequence);
6124  InternalArraySingleArgumentConstructorStub stub1(kind);
6125  __ TailCallStub(&stub1);
6126
6127  __ bind(&not_one_case);
6128  InternalArrayNArgumentsConstructorStub stubN(kind);
6129  __ TailCallStub(&stubN);
6130}
6131
6132
6133void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
6134  // ----------- S t a t e -------------
6135  //  -- a0 : argc
6136  //  -- a1 : constructor
6137  //  -- sp[0] : return address
6138  //  -- sp[4] : last argument
6139  // -----------------------------------
6140
6141  if (FLAG_debug_code) {
6142    // The array construct code is only set for the global and natives
6143    // builtin Array functions which always have maps.
6144
6145    // Initial map for the builtin Array function should be a map.
6146    __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
6147    // Will both indicate a NULL and a Smi.
6148    __ SmiTst(a3, at);
6149    __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
6150        at, Operand(zero_reg));
6151    __ GetObjectType(a3, a3, t0);
6152    __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
6153        t0, Operand(MAP_TYPE));
6154  }
6155
6156  // Figure out the right elements kind.
6157  __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
6158
6159  // Load the map's "bit field 2" into a3. We only need the first byte,
6160  // but the following bit field extraction takes care of that anyway.
6161  __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
6162  // Retrieve elements_kind from bit field 2.
6163  __ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount);
6164
6165  if (FLAG_debug_code) {
6166    Label done;
6167    __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
6168    __ Assert(
6169        eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
6170        a3, Operand(FAST_HOLEY_ELEMENTS));
6171    __ bind(&done);
6172  }
6173
6174  Label fast_elements_case;
6175  __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
6176  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
6177
6178  __ bind(&fast_elements_case);
6179  GenerateCase(masm, FAST_ELEMENTS);
6180}
6181
6182
6183#undef __
6184
6185} }  // namespace v8::internal
6186
6187#endif  // V8_TARGET_ARCH_MIPS
6188