code-stubs-mips.cc revision 69a99ed0b2b2ef69d393c371b03db3a98aaf880e
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_MIPS)
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
34#include "codegen.h"
35#include "regexp-macro-assembler.h"
36
37namespace v8 {
38namespace internal {
39
40
41#define __ ACCESS_MASM(masm)
42
43static void EmitIdenticalObjectComparison(MacroAssembler* masm,
44                                          Label* slow,
45                                          Condition cc,
46                                          bool never_nan_nan);
47static void EmitSmiNonsmiComparison(MacroAssembler* masm,
48                                    Register lhs,
49                                    Register rhs,
50                                    Label* rhs_not_nan,
51                                    Label* slow,
52                                    bool strict);
53static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
55                                           Register lhs,
56                                           Register rhs);
57
58
59// Check if the operand is a heap number.
60static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
61                                   Register scratch1, Register scratch2,
62                                   Label* not_a_heap_number) {
63  __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
64  __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
65  __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
66}
67
68
69void ToNumberStub::Generate(MacroAssembler* masm) {
70  // The ToNumber stub takes one argument in a0.
71  Label check_heap_number, call_builtin;
72  __ JumpIfNotSmi(a0, &check_heap_number);
73  __ mov(v0, a0);
74  __ Ret();
75
76  __ bind(&check_heap_number);
77  EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
78  __ mov(v0, a0);
79  __ Ret();
80
81  __ bind(&call_builtin);
82  __ push(a0);
83  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
84}
85
86
87void FastNewClosureStub::Generate(MacroAssembler* masm) {
88  // Create a new closure from the given function info in new
89  // space. Set the context to the current context in cp.
90  Label gc;
91
92  // Pop the function info from the stack.
93  __ pop(a3);
94
95  // Attempt to allocate new JSFunction in new space.
96  __ AllocateInNewSpace(JSFunction::kSize,
97                        v0,
98                        a1,
99                        a2,
100                        &gc,
101                        TAG_OBJECT);
102
103  int map_index = strict_mode_ == kStrictMode
104      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
105      : Context::FUNCTION_MAP_INDEX;
106
107  // Compute the function map in the current global context and set that
108  // as the map of the allocated object.
109  __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
110  __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
111  __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
112  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
113
114  // Initialize the rest of the function. We don't have to update the
115  // write barrier because the allocated object is in new space.
116  __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
117  __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
118  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
119  __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
120  __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
121  __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
122  __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
123  __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
124  __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
125  __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
126
127  // Initialize the code pointer in the function to be the one
128  // found in the shared function info object.
129  __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
130  __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
131  __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
132
133  // Return result. The argument function info has been popped already.
134  __ Ret();
135
136  // Create a new closure through the slower runtime call.
137  __ bind(&gc);
138  __ LoadRoot(t0, Heap::kFalseValueRootIndex);
139  __ Push(cp, a3, t0);
140  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
141}
142
143
144void FastNewContextStub::Generate(MacroAssembler* masm) {
145  // Try to allocate the context in new space.
146  Label gc;
147  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
148
149  // Attempt to allocate the context in new space.
150  __ AllocateInNewSpace(FixedArray::SizeFor(length),
151                        v0,
152                        a1,
153                        a2,
154                        &gc,
155                        TAG_OBJECT);
156
157  // Load the function from the stack.
158  __ lw(a3, MemOperand(sp, 0));
159
160  // Setup the object header.
161  __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
162  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
163  __ li(a2, Operand(Smi::FromInt(length)));
164  __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
165
166  // Setup the fixed slots.
167  __ li(a1, Operand(Smi::FromInt(0)));
168  __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
169  __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
170  __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
171
172  // Copy the global object from the previous context.
173  __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
174  __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
175
176  // Initialize the rest of the slots to undefined.
177  __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
178  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
179    __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
180  }
181
182  // Remove the on-stack argument and return.
183  __ mov(cp, v0);
184  __ Pop();
185  __ Ret();
186
187  // Need to collect. Call into runtime system.
188  __ bind(&gc);
189  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
190}
191
192
193void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
194  // Stack layout on entry:
195  // [sp]: constant elements.
196  // [sp + kPointerSize]: literal index.
197  // [sp + (2 * kPointerSize)]: literals array.
198
199  // All sizes here are multiples of kPointerSize.
200  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
201  int size = JSArray::kSize + elements_size;
202
203  // Load boilerplate object into r3 and check if we need to create a
204  // boilerplate.
205  Label slow_case;
206  __ lw(a3, MemOperand(sp, 2 * kPointerSize));
207  __ lw(a0, MemOperand(sp, 1 * kPointerSize));
208  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
209  __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
210  __ Addu(t0, a3, t0);
211  __ lw(a3, MemOperand(t0));
212  __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
213  __ Branch(&slow_case, eq, a3, Operand(t1));
214
215  if (FLAG_debug_code) {
216    const char* message;
217    Heap::RootListIndex expected_map_index;
218    if (mode_ == CLONE_ELEMENTS) {
219      message = "Expected (writable) fixed array";
220      expected_map_index = Heap::kFixedArrayMapRootIndex;
221    } else {
222      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
223      message = "Expected copy-on-write fixed array";
224      expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
225    }
226    __ push(a3);
227    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
228    __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
229    __ LoadRoot(at, expected_map_index);
230    __ Assert(eq, message, a3, Operand(at));
231    __ pop(a3);
232  }
233
234  // Allocate both the JS array and the elements array in one big
235  // allocation. This avoids multiple limit checks.
236  // Return new object in v0.
237  __ AllocateInNewSpace(size,
238                        v0,
239                        a1,
240                        a2,
241                        &slow_case,
242                        TAG_OBJECT);
243
244  // Copy the JS array part.
245  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
246    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
247      __ lw(a1, FieldMemOperand(a3, i));
248      __ sw(a1, FieldMemOperand(v0, i));
249    }
250  }
251
252  if (length_ > 0) {
253    // Get hold of the elements array of the boilerplate and setup the
254    // elements pointer in the resulting object.
255    __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
256    __ Addu(a2, v0, Operand(JSArray::kSize));
257    __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
258
259    // Copy the elements array.
260    __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
261  }
262
263  // Return and remove the on-stack parameters.
264  __ Addu(sp, sp, Operand(3 * kPointerSize));
265  __ Ret();
266
267  __ bind(&slow_case);
268  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
269}
270
271
272// Takes a Smi and converts to an IEEE 64 bit floating point value in two
273// registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
274// 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
275// scratch register.  Destroys the source register.  No GC occurs during this
276// stub so you don't have to set up the frame.
277class ConvertToDoubleStub : public CodeStub {
278 public:
279  ConvertToDoubleStub(Register result_reg_1,
280                      Register result_reg_2,
281                      Register source_reg,
282                      Register scratch_reg)
283      : result1_(result_reg_1),
284        result2_(result_reg_2),
285        source_(source_reg),
286        zeros_(scratch_reg) { }
287
288 private:
289  Register result1_;
290  Register result2_;
291  Register source_;
292  Register zeros_;
293
294  // Minor key encoding in 16 bits.
295  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
296  class OpBits: public BitField<Token::Value, 2, 14> {};
297
298  Major MajorKey() { return ConvertToDouble; }
299  int MinorKey() {
300    // Encode the parameters in a unique 16 bit value.
301    return  result1_.code() +
302           (result2_.code() << 4) +
303           (source_.code() << 8) +
304           (zeros_.code() << 12);
305  }
306
307  void Generate(MacroAssembler* masm);
308};
309
310
311void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
312#ifndef BIG_ENDIAN_FLOATING_POINT
313  Register exponent = result1_;
314  Register mantissa = result2_;
315#else
316  Register exponent = result2_;
317  Register mantissa = result1_;
318#endif
319  Label not_special;
320  // Convert from Smi to integer.
321  __ sra(source_, source_, kSmiTagSize);
322  // Move sign bit from source to destination.  This works because the sign bit
323  // in the exponent word of the double has the same position and polarity as
324  // the 2's complement sign bit in a Smi.
325  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
326  __ And(exponent, source_, Operand(HeapNumber::kSignMask));
327  // Subtract from 0 if source was negative.
328  __ subu(at, zero_reg, source_);
329  __ movn(source_, at, exponent);
330
331  // We have -1, 0 or 1, which we treat specially. Register source_ contains
332  // absolute value: it is either equal to 1 (special case of -1 and 1),
333  // greater than 1 (not a special case) or less than 1 (special case of 0).
334  __ Branch(&not_special, gt, source_, Operand(1));
335
336  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
337  static const uint32_t exponent_word_for_1 =
338      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
339  // Safe to use 'at' as dest reg here.
340  __ Or(at, exponent, Operand(exponent_word_for_1));
341  __ movn(exponent, at, source_);  // Write exp when source not 0.
342  // 1, 0 and -1 all have 0 for the second word.
343  __ mov(mantissa, zero_reg);
344  __ Ret();
345
346  __ bind(&not_special);
347  // Count leading zeros.
348  // Gets the wrong answer for 0, but we already checked for that case above.
349  __ clz(zeros_, source_);
350  // Compute exponent and or it into the exponent register.
351  // We use mantissa as a scratch register here.
352  __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
353  __ subu(mantissa, mantissa, zeros_);
354  __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
355  __ Or(exponent, exponent, mantissa);
356
357  // Shift up the source chopping the top bit off.
358  __ Addu(zeros_, zeros_, Operand(1));
359  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
360  __ sllv(source_, source_, zeros_);
361  // Compute lower part of fraction (last 12 bits).
362  __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
363  // And the top (top 20 bits).
364  __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
365  __ or_(exponent, exponent, source_);
366
367  __ Ret();
368}
369
370
371void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
372                                   FloatingPointHelper::Destination destination,
373                                   Register scratch1,
374                                   Register scratch2) {
375  if (CpuFeatures::IsSupported(FPU)) {
376    CpuFeatures::Scope scope(FPU);
377    __ sra(scratch1, a0, kSmiTagSize);
378    __ mtc1(scratch1, f14);
379    __ cvt_d_w(f14, f14);
380    __ sra(scratch1, a1, kSmiTagSize);
381    __ mtc1(scratch1, f12);
382    __ cvt_d_w(f12, f12);
383    if (destination == kCoreRegisters) {
384      __ Move(a2, a3, f14);
385      __ Move(a0, a1, f12);
386    }
387  } else {
388    ASSERT(destination == kCoreRegisters);
389    // Write Smi from a0 to a3 and a2 in double format.
390    __ mov(scratch1, a0);
391    ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
392    __ push(ra);
393    __ Call(stub1.GetCode());
394    // Write Smi from a1 to a1 and a0 in double format.
395    __ mov(scratch1, a1);
396    ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
397    __ Call(stub2.GetCode());
398    __ pop(ra);
399  }
400}
401
402
403void FloatingPointHelper::LoadOperands(
404    MacroAssembler* masm,
405    FloatingPointHelper::Destination destination,
406    Register heap_number_map,
407    Register scratch1,
408    Register scratch2,
409    Label* slow) {
410
411  // Load right operand (a0) to f12 or a2/a3.
412  LoadNumber(masm, destination,
413             a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
414
415  // Load left operand (a1) to f14 or a0/a1.
416  LoadNumber(masm, destination,
417             a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
418}
419
420
421void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
422                                     Destination destination,
423                                     Register object,
424                                     FPURegister dst,
425                                     Register dst1,
426                                     Register dst2,
427                                     Register heap_number_map,
428                                     Register scratch1,
429                                     Register scratch2,
430                                     Label* not_number) {
431  if (FLAG_debug_code) {
432    __ AbortIfNotRootValue(heap_number_map,
433                           Heap::kHeapNumberMapRootIndex,
434                           "HeapNumberMap register clobbered.");
435  }
436
437  Label is_smi, done;
438
439  __ JumpIfSmi(object, &is_smi);
440  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
441
442  // Handle loading a double from a heap number.
443  if (CpuFeatures::IsSupported(FPU) &&
444      destination == kFPURegisters) {
445    CpuFeatures::Scope scope(FPU);
446    // Load the double from tagged HeapNumber to double register.
447
448    // ARM uses a workaround here because of the unaligned HeapNumber
449    // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
450    // point in generating even more instructions.
451    __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
452  } else {
453    ASSERT(destination == kCoreRegisters);
454    // Load the double from heap number to dst1 and dst2 in double format.
455    __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
456    __ lw(dst2, FieldMemOperand(object,
457        HeapNumber::kValueOffset + kPointerSize));
458  }
459  __ Branch(&done);
460
461  // Handle loading a double from a smi.
462  __ bind(&is_smi);
463  if (CpuFeatures::IsSupported(FPU)) {
464    CpuFeatures::Scope scope(FPU);
465    // Convert smi to double using FPU instructions.
466    __ SmiUntag(scratch1, object);
467    __ mtc1(scratch1, dst);
468    __ cvt_d_w(dst, dst);
469    if (destination == kCoreRegisters) {
470      // Load the converted smi to dst1 and dst2 in double format.
471      __ Move(dst1, dst2, dst);
472    }
473  } else {
474    ASSERT(destination == kCoreRegisters);
475    // Write smi to dst1 and dst2 double format.
476    __ mov(scratch1, object);
477    ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
478    __ push(ra);
479    __ Call(stub.GetCode());
480    __ pop(ra);
481  }
482
483  __ bind(&done);
484}
485
486
487void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
488                                               Register object,
489                                               Register dst,
490                                               Register heap_number_map,
491                                               Register scratch1,
492                                               Register scratch2,
493                                               Register scratch3,
494                                               FPURegister double_scratch,
495                                               Label* not_number) {
496  if (FLAG_debug_code) {
497    __ AbortIfNotRootValue(heap_number_map,
498                           Heap::kHeapNumberMapRootIndex,
499                           "HeapNumberMap register clobbered.");
500  }
501  Label is_smi;
502  Label done;
503  Label not_in_int32_range;
504
505  __ JumpIfSmi(object, &is_smi);
506  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
507  __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
508  __ ConvertToInt32(object,
509                    dst,
510                    scratch1,
511                    scratch2,
512                    double_scratch,
513                    &not_in_int32_range);
514  __ jmp(&done);
515
516  __ bind(&not_in_int32_range);
517  __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
518  __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
519
520  __ EmitOutOfInt32RangeTruncate(dst,
521                                 scratch1,
522                                 scratch2,
523                                 scratch3);
524
525  __ jmp(&done);
526
527  __ bind(&is_smi);
528  __ SmiUntag(dst, object);
529  __ bind(&done);
530}
531
532
533void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
534                                             Register int_scratch,
535                                             Destination destination,
536                                             FPURegister double_dst,
537                                             Register dst1,
538                                             Register dst2,
539                                             Register scratch2,
540                                             FPURegister single_scratch) {
541  ASSERT(!int_scratch.is(scratch2));
542  ASSERT(!int_scratch.is(dst1));
543  ASSERT(!int_scratch.is(dst2));
544
545  Label done;
546
547  if (CpuFeatures::IsSupported(FPU)) {
548    CpuFeatures::Scope scope(FPU);
549    __ mtc1(int_scratch, single_scratch);
550    __ cvt_d_w(double_dst, single_scratch);
551    if (destination == kCoreRegisters) {
552      __ Move(dst1, dst2, double_dst);
553    }
554  } else {
555    Label fewer_than_20_useful_bits;
556    // Expected output:
557    // |         dst2            |         dst1            |
558    // | s |   exp   |              mantissa               |
559
560    // Check for zero.
561    __ mov(dst2, int_scratch);
562    __ mov(dst1, int_scratch);
563    __ Branch(&done, eq, int_scratch, Operand(zero_reg));
564
565    // Preload the sign of the value.
566    __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
567    // Get the absolute value of the object (as an unsigned integer).
568    Label skip_sub;
569    __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
570    __ Subu(int_scratch, zero_reg, int_scratch);
571    __ bind(&skip_sub);
572
573    // Get mantisssa[51:20].
574
575    // Get the position of the first set bit.
576    __ clz(dst1, int_scratch);
577    __ li(scratch2, 31);
578    __ Subu(dst1, scratch2, dst1);
579
580    // Set the exponent.
581    __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
582    __ Ins(dst2, scratch2,
583        HeapNumber::kExponentShift, HeapNumber::kExponentBits);
584
585    // Clear the first non null bit.
586    __ li(scratch2, Operand(1));
587    __ sllv(scratch2, scratch2, dst1);
588    __ li(at, -1);
589    __ Xor(scratch2, scratch2, at);
590    __ And(int_scratch, int_scratch, scratch2);
591
592    // Get the number of bits to set in the lower part of the mantissa.
593    __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
594    __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
595    // Set the higher 20 bits of the mantissa.
596    __ srlv(at, int_scratch, scratch2);
597    __ or_(dst2, dst2, at);
598    __ li(at, 32);
599    __ subu(scratch2, at, scratch2);
600    __ sllv(dst1, int_scratch, scratch2);
601    __ Branch(&done);
602
603    __ bind(&fewer_than_20_useful_bits);
604    __ li(at, HeapNumber::kMantissaBitsInTopWord);
605    __ subu(scratch2, at, dst1);
606    __ sllv(scratch2, int_scratch, scratch2);
607    __ Or(dst2, dst2, scratch2);
608    // Set dst1 to 0.
609    __ mov(dst1, zero_reg);
610  }
611  __ bind(&done);
612}
613
614
615void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
616                                                  Register object,
617                                                  Destination destination,
618                                                  FPURegister double_dst,
619                                                  Register dst1,
620                                                  Register dst2,
621                                                  Register heap_number_map,
622                                                  Register scratch1,
623                                                  Register scratch2,
624                                                  FPURegister single_scratch,
625                                                  Label* not_int32) {
626  ASSERT(!scratch1.is(object) && !scratch2.is(object));
627  ASSERT(!scratch1.is(scratch2));
628  ASSERT(!heap_number_map.is(object) &&
629         !heap_number_map.is(scratch1) &&
630         !heap_number_map.is(scratch2));
631
632  Label done, obj_is_not_smi;
633
634  __ JumpIfNotSmi(object, &obj_is_not_smi);
635  __ SmiUntag(scratch1, object);
636  ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
637                     scratch2, single_scratch);
638  __ Branch(&done);
639
640  __ bind(&obj_is_not_smi);
641  if (FLAG_debug_code) {
642    __ AbortIfNotRootValue(heap_number_map,
643                           Heap::kHeapNumberMapRootIndex,
644                           "HeapNumberMap register clobbered.");
645  }
646  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
647
648  // Load the number.
649  if (CpuFeatures::IsSupported(FPU)) {
650    CpuFeatures::Scope scope(FPU);
651    // Load the double value.
652    __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
653
654    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
655    // On MIPS a lot of things cannot be implemented the same way so right
656    // now it makes a lot more sense to just do things manually.
657
658    // Save FCSR.
659    __ cfc1(scratch1, FCSR);
660    // Disable FPU exceptions.
661    __ ctc1(zero_reg, FCSR);
662    __ trunc_w_d(single_scratch, double_dst);
663    // Retrieve FCSR.
664    __ cfc1(scratch2, FCSR);
665    // Restore FCSR.
666    __ ctc1(scratch1, FCSR);
667
668    // Check for inexact conversion or exception.
669    __ And(scratch2, scratch2, kFCSRFlagMask);
670
671    // Jump to not_int32 if the operation did not succeed.
672    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
673
674    if (destination == kCoreRegisters) {
675      __ Move(dst1, dst2, double_dst);
676    }
677
678  } else {
679    ASSERT(!scratch1.is(object) && !scratch2.is(object));
680    // Load the double value in the destination registers.
681    __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
682    __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
683
684    // Check for 0 and -0.
685    __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
686    __ Or(scratch1, scratch1, Operand(dst2));
687    __ Branch(&done, eq, scratch1, Operand(zero_reg));
688
689    // Check that the value can be exactly represented by a 32-bit integer.
690    // Jump to not_int32 if that's not the case.
691    DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
692
693    // dst1 and dst2 were trashed. Reload the double value.
694    __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
695    __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
696  }
697
698  __ bind(&done);
699}
700
701
702void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
703                                            Register object,
704                                            Register dst,
705                                            Register heap_number_map,
706                                            Register scratch1,
707                                            Register scratch2,
708                                            Register scratch3,
709                                            FPURegister double_scratch,
710                                            Label* not_int32) {
711  ASSERT(!dst.is(object));
712  ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
713  ASSERT(!scratch1.is(scratch2) &&
714         !scratch1.is(scratch3) &&
715         !scratch2.is(scratch3));
716
717  Label done;
718
719  // Untag the object into the destination register.
720  __ SmiUntag(dst, object);
721  // Just return if the object is a smi.
722  __ JumpIfSmi(object, &done);
723
724  if (FLAG_debug_code) {
725    __ AbortIfNotRootValue(heap_number_map,
726                           Heap::kHeapNumberMapRootIndex,
727                           "HeapNumberMap register clobbered.");
728  }
729  __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
730
731  // Object is a heap number.
732  // Convert the floating point value to a 32-bit integer.
733  if (CpuFeatures::IsSupported(FPU)) {
734    CpuFeatures::Scope scope(FPU);
735    // Load the double value.
736    __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
737
738    // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
739    // On MIPS a lot of things cannot be implemented the same way so right
740    // now it makes a lot more sense to just do things manually.
741
742    // Save FCSR.
743    __ cfc1(scratch1, FCSR);
744    // Disable FPU exceptions.
745    __ ctc1(zero_reg, FCSR);
746    __ trunc_w_d(double_scratch, double_scratch);
747    // Retrieve FCSR.
748    __ cfc1(scratch2, FCSR);
749    // Restore FCSR.
750    __ ctc1(scratch1, FCSR);
751
752    // Check for inexact conversion or exception.
753    __ And(scratch2, scratch2, kFCSRFlagMask);
754
755    // Jump to not_int32 if the operation did not succeed.
756    __ Branch(not_int32, ne, scratch2, Operand(zero_reg));
757    // Get the result in the destination register.
758    __ mfc1(dst, double_scratch);
759
760  } else {
761    // Load the double value in the destination registers.
762    __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
763    __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
764
765    // Check for 0 and -0.
766    __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
767    __ Or(dst, scratch2, Operand(dst));
768    __ Branch(&done, eq, dst, Operand(zero_reg));
769
770    DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
771
772    // Registers state after DoubleIs32BitInteger.
773    // dst: mantissa[51:20].
774    // scratch2: 1
775
776    // Shift back the higher bits of the mantissa.
777    __ srlv(dst, dst, scratch3);
778    // Set the implicit first bit.
779    __ li(at, 32);
780    __ subu(scratch3, at, scratch3);
781    __ sllv(scratch2, scratch2, scratch3);
782    __ Or(dst, dst, scratch2);
783    // Set the sign.
784    __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
785    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
786    Label skip_sub;
787    __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
788    __ Subu(dst, zero_reg, dst);
789    __ bind(&skip_sub);
790  }
791
792  __ bind(&done);
793}
794
795
796void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
797                                               Register src1,
798                                               Register src2,
799                                               Register dst,
800                                               Register scratch,
801                                               Label* not_int32) {
802  // Get exponent alone in scratch.
803  __ Ext(scratch,
804         src1,
805         HeapNumber::kExponentShift,
806         HeapNumber::kExponentBits);
807
808  // Substract the bias from the exponent.
809  __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
810
811  // src1: higher (exponent) part of the double value.
812  // src2: lower (mantissa) part of the double value.
813  // scratch: unbiased exponent.
814
815  // Fast cases. Check for obvious non 32-bit integer values.
816  // Negative exponent cannot yield 32-bit integers.
817  __ Branch(not_int32, lt, scratch, Operand(zero_reg));
818  // Exponent greater than 31 cannot yield 32-bit integers.
819  // Also, a positive value with an exponent equal to 31 is outside of the
820  // signed 32-bit integer range.
821  // Another way to put it is that if (exponent - signbit) > 30 then the
822  // number cannot be represented as an int32.
823  Register tmp = dst;
824  __ srl(at, src1, 31);
825  __ subu(tmp, scratch, at);
826  __ Branch(not_int32, gt, tmp, Operand(30));
827  // - Bits [21:0] in the mantissa are not null.
828  __ And(tmp, src2, 0x3fffff);
829  __ Branch(not_int32, ne, tmp, Operand(zero_reg));
830
831  // Otherwise the exponent needs to be big enough to shift left all the
832  // non zero bits left. So we need the (30 - exponent) last bits of the
833  // 31 higher bits of the mantissa to be null.
834  // Because bits [21:0] are null, we can check instead that the
835  // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
836
837  // Get the 32 higher bits of the mantissa in dst.
838  __ Ext(dst,
839         src2,
840         HeapNumber::kMantissaBitsInTopWord,
841         32 - HeapNumber::kMantissaBitsInTopWord);
842  __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
843  __ or_(dst, dst, at);
844
845  // Create the mask and test the lower bits (of the higher bits).
846  __ li(at, 32);
847  __ subu(scratch, at, scratch);
848  __ li(src2, 1);
849  __ sllv(src1, src2, scratch);
850  __ Subu(src1, src1, Operand(1));
851  __ And(src1, dst, src1);
852  __ Branch(not_int32, ne, src1, Operand(zero_reg));
853}
854
855
856void FloatingPointHelper::CallCCodeForDoubleOperation(
857    MacroAssembler* masm,
858    Token::Value op,
859    Register heap_number_result,
860    Register scratch) {
861  // Using core registers:
862  // a0: Left value (least significant part of mantissa).
863  // a1: Left value (sign, exponent, top of mantissa).
864  // a2: Right value (least significant part of mantissa).
865  // a3: Right value (sign, exponent, top of mantissa).
866
867  // Assert that heap_number_result is saved.
868  // We currently always use s0 to pass it.
869  ASSERT(heap_number_result.is(s0));
870
871  // Push the current return address before the C call.
872  __ push(ra);
873  __ PrepareCallCFunction(4, scratch);  // Two doubles are 4 arguments.
874  if (!IsMipsSoftFloatABI) {
875    CpuFeatures::Scope scope(FPU);
876    // We are not using MIPS FPU instructions, and parameters for the runtime
877    // function call are prepaired in a0-a3 registers, but function we are
878    // calling is compiled with hard-float flag and expecting hard float ABI
879    // (parameters in f12/f14 registers). We need to copy parameters from
880    // a0-a3 registers to f12/f14 register pairs.
881    __ Move(f12, a0, a1);
882    __ Move(f14, a2, a3);
883  }
884  // Call C routine that may not cause GC or other trouble.
885  __ CallCFunction(ExternalReference::double_fp_operation(op, masm->isolate()),
886                   4);
887  // Store answer in the overwritable heap number.
888  if (!IsMipsSoftFloatABI) {
889    CpuFeatures::Scope scope(FPU);
890    // Double returned in register f0.
891    __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
892  } else {
893    // Double returned in registers v0 and v1.
894    __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
895    __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
896  }
897  // Place heap_number_result in v0 and return to the pushed return address.
898  __ mov(v0, heap_number_result);
899  __ pop(ra);
900  __ Ret();
901}
902
903
904// See comment for class, this does NOT work for int32's that are in Smi range.
905void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
906  Label max_negative_int;
907  // the_int_ has the answer which is a signed int32 but not a Smi.
908  // We test for the special value that has a different exponent.
909  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
910  // Test sign, and save for later conditionals.
911  __ And(sign_, the_int_, Operand(0x80000000u));
912  __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
913
914  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
915  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
916  uint32_t non_smi_exponent =
917      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
918  __ li(scratch_, Operand(non_smi_exponent));
919  // Set the sign bit in scratch_ if the value was negative.
920  __ or_(scratch_, scratch_, sign_);
921  // Subtract from 0 if the value was negative.
922  __ subu(at, zero_reg, the_int_);
923  __ movn(the_int_, at, sign_);
924  // We should be masking the implict first digit of the mantissa away here,
925  // but it just ends up combining harmlessly with the last digit of the
926  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
927  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
928  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
929  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
930  __ srl(at, the_int_, shift_distance);
931  __ or_(scratch_, scratch_, at);
932  __ sw(scratch_, FieldMemOperand(the_heap_number_,
933                                   HeapNumber::kExponentOffset));
934  __ sll(scratch_, the_int_, 32 - shift_distance);
935  __ sw(scratch_, FieldMemOperand(the_heap_number_,
936                                   HeapNumber::kMantissaOffset));
937  __ Ret();
938
939  __ bind(&max_negative_int);
940  // The max negative int32 is stored as a positive number in the mantissa of
941  // a double because it uses a sign bit instead of using two's complement.
942  // The actual mantissa bits stored are all 0 because the implicit most
943  // significant 1 bit is not stored.
944  non_smi_exponent += 1 << HeapNumber::kExponentShift;
945  __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
946  __ sw(scratch_,
947        FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
948  __ mov(scratch_, zero_reg);
949  __ sw(scratch_,
950        FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
951  __ Ret();
952}
953
954
955// Handle the case where the lhs and rhs are the same object.
956// Equality is almost reflexive (everything but NaN), so this is a test
957// for "identity and not NaN".
958static void EmitIdenticalObjectComparison(MacroAssembler* masm,
959                                          Label* slow,
960                                          Condition cc,
961                                          bool never_nan_nan) {
962  Label not_identical;
963  Label heap_number, return_equal;
964  Register exp_mask_reg = t5;
965
966  __ Branch(&not_identical, ne, a0, Operand(a1));
967
968  // The two objects are identical. If we know that one of them isn't NaN then
969  // we now know they test equal.
970  if (cc != eq || !never_nan_nan) {
971    __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
972
973    // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
974    // so we do the second best thing - test it ourselves.
975    // They are both equal and they are not both Smis so both of them are not
976    // Smis. If it's not a heap number, then return equal.
977    if (cc == less || cc == greater) {
978      __ GetObjectType(a0, t4, t4);
979      __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
980    } else {
981      __ GetObjectType(a0, t4, t4);
982      __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
983      // Comparing JS objects with <=, >= is complicated.
984      if (cc != eq) {
985      __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
986        // Normally here we fall through to return_equal, but undefined is
987        // special: (undefined == undefined) == true, but
988        // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
989        if (cc == less_equal || cc == greater_equal) {
990          __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
991          __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
992          __ Branch(&return_equal, ne, a0, Operand(t2));
993          if (cc == le) {
994            // undefined <= undefined should fail.
995            __ li(v0, Operand(GREATER));
996          } else  {
997            // undefined >= undefined should fail.
998            __ li(v0, Operand(LESS));
999          }
1000          __ Ret();
1001        }
1002      }
1003    }
1004  }
1005
1006  __ bind(&return_equal);
1007  if (cc == less) {
1008    __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
1009  } else if (cc == greater) {
1010    __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
1011  } else {
1012    __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
1013  }
1014  __ Ret();
1015
1016  if (cc != eq || !never_nan_nan) {
1017    // For less and greater we don't have to check for NaN since the result of
1018    // x < x is false regardless.  For the others here is some code to check
1019    // for NaN.
1020    if (cc != lt && cc != gt) {
1021      __ bind(&heap_number);
1022      // It is a heap number, so return non-equal if it's NaN and equal if it's
1023      // not NaN.
1024
1025      // The representation of NaN values has all exponent bits (52..62) set,
1026      // and not all mantissa bits (0..51) clear.
1027      // Read top bits of double representation (second word of value).
1028      __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1029      // Test that exponent bits are all set.
1030      __ And(t3, t2, Operand(exp_mask_reg));
1031      // If all bits not set (ne cond), then not a NaN, objects are equal.
1032      __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
1033
1034      // Shift out flag and all exponent bits, retaining only mantissa.
1035      __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
1036      // Or with all low-bits of mantissa.
1037      __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1038      __ Or(v0, t3, Operand(t2));
1039      // For equal we already have the right value in v0:  Return zero (equal)
1040      // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1041      // not (it's a NaN).  For <= and >= we need to load v0 with the failing
1042      // value if it's a NaN.
1043      if (cc != eq) {
1044        // All-zero means Infinity means equal.
1045        __ Ret(eq, v0, Operand(zero_reg));
1046        if (cc == le) {
1047          __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
1048        } else {
1049          __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
1050        }
1051      }
1052      __ Ret();
1053    }
1054    // No fall through here.
1055  }
1056
1057  __ bind(&not_identical);
1058}
1059
1060
1061static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1062                                    Register lhs,
1063                                    Register rhs,
1064                                    Label* both_loaded_as_doubles,
1065                                    Label* slow,
1066                                    bool strict) {
1067  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1068         (lhs.is(a1) && rhs.is(a0)));
1069
1070  Label lhs_is_smi;
1071  __ And(t0, lhs, Operand(kSmiTagMask));
1072  __ Branch(&lhs_is_smi, eq, t0, Operand(zero_reg));
1073  // Rhs is a Smi.
1074  // Check whether the non-smi is a heap number.
1075  __ GetObjectType(lhs, t4, t4);
1076  if (strict) {
1077    // If lhs was not a number and rhs was a Smi then strict equality cannot
1078    // succeed. Return non-equal (lhs is already not zero).
1079    __ mov(v0, lhs);
1080    __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
1081  } else {
1082    // Smi compared non-strictly with a non-Smi non-heap-number. Call
1083    // the runtime.
1084    __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1085  }
1086
1087  // Rhs is a smi, lhs is a number.
1088  // Convert smi rhs to double.
1089  if (CpuFeatures::IsSupported(FPU)) {
1090    CpuFeatures::Scope scope(FPU);
1091    __ sra(at, rhs, kSmiTagSize);
1092    __ mtc1(at, f14);
1093    __ cvt_d_w(f14, f14);
1094    __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1095  } else {
1096    // Load lhs to a double in a2, a3.
1097    __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1098    __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1099
1100    // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
1101    __ mov(t6, rhs);
1102    ConvertToDoubleStub stub1(a1, a0, t6, t5);
1103    __ push(ra);
1104    __ Call(stub1.GetCode());
1105
1106    __ pop(ra);
1107  }
1108
1109  // We now have both loaded as doubles.
1110  __ jmp(both_loaded_as_doubles);
1111
1112  __ bind(&lhs_is_smi);
1113  // Lhs is a Smi.  Check whether the non-smi is a heap number.
1114  __ GetObjectType(rhs, t4, t4);
1115  if (strict) {
1116    // If lhs was not a number and rhs was a Smi then strict equality cannot
1117    // succeed. Return non-equal.
1118    __ li(v0, Operand(1));
1119    __ Ret(ne, t4, Operand(HEAP_NUMBER_TYPE));
1120  } else {
1121    // Smi compared non-strictly with a non-Smi non-heap-number. Call
1122    // the runtime.
1123    __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1124  }
1125
1126  // Lhs is a smi, rhs is a number.
1127  // Convert smi lhs to double.
1128  if (CpuFeatures::IsSupported(FPU)) {
1129    CpuFeatures::Scope scope(FPU);
1130    __ sra(at, lhs, kSmiTagSize);
1131    __ mtc1(at, f12);
1132    __ cvt_d_w(f12, f12);
1133    __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1134  } else {
1135    // Convert lhs to a double format. t5 is scratch.
1136    __ mov(t6, lhs);
1137    ConvertToDoubleStub stub2(a3, a2, t6, t5);
1138    __ push(ra);
1139    __ Call(stub2.GetCode());
1140    __ pop(ra);
1141    // Load rhs to a double in a1, a0.
1142    if (rhs.is(a0)) {
1143      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1144      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1145    } else {
1146      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1147      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1148    }
1149  }
1150  // Fall through to both_loaded_as_doubles.
1151}
1152
1153
1154void EmitNanCheck(MacroAssembler* masm, Condition cc) {
1155  bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1156  if (CpuFeatures::IsSupported(FPU)) {
1157    CpuFeatures::Scope scope(FPU);
1158    // Lhs and rhs are already loaded to f12 and f14 register pairs.
1159    __ Move(t0, t1, f14);
1160    __ Move(t2, t3, f12);
1161  } else {
1162    // Lhs and rhs are already loaded to GP registers.
1163    __ mov(t0, a0);  // a0 has LS 32 bits of rhs.
1164    __ mov(t1, a1);  // a1 has MS 32 bits of rhs.
1165    __ mov(t2, a2);  // a2 has LS 32 bits of lhs.
1166    __ mov(t3, a3);  // a3 has MS 32 bits of lhs.
1167  }
1168  Register rhs_exponent = exp_first ? t0 : t1;
1169  Register lhs_exponent = exp_first ? t2 : t3;
1170  Register rhs_mantissa = exp_first ? t1 : t0;
1171  Register lhs_mantissa = exp_first ? t3 : t2;
1172  Label one_is_nan, neither_is_nan;
1173  Label lhs_not_nan_exp_mask_is_loaded;
1174
1175  Register exp_mask_reg = t4;
1176  __ li(exp_mask_reg, HeapNumber::kExponentMask);
1177  __ and_(t5, lhs_exponent, exp_mask_reg);
1178  __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
1179
1180  __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1181  __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1182
1183  __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
1184
1185  __ li(exp_mask_reg, HeapNumber::kExponentMask);
1186  __ bind(&lhs_not_nan_exp_mask_is_loaded);
1187  __ and_(t5, rhs_exponent, exp_mask_reg);
1188
1189  __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
1190
1191  __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1192  __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1193
1194  __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
1195
1196  __ bind(&one_is_nan);
1197  // NaN comparisons always fail.
1198  // Load whatever we need in v0 to make the comparison fail.
1199  if (cc == lt || cc == le) {
1200    __ li(v0, Operand(GREATER));
1201  } else {
1202    __ li(v0, Operand(LESS));
1203  }
1204  __ Ret();  // Return.
1205
1206  __ bind(&neither_is_nan);
1207}
1208
1209
1210static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
1211  // f12 and f14 have the two doubles.  Neither is a NaN.
1212  // Call a native function to do a comparison between two non-NaNs.
1213  // Call C routine that may not cause GC or other trouble.
1214  // We use a call_was and return manually because we need arguments slots to
1215  // be freed.
1216
1217  Label return_result_not_equal, return_result_equal;
1218  if (cc == eq) {
1219    // Doubles are not equal unless they have the same bit pattern.
1220    // Exception: 0 and -0.
1221    bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1222    if (CpuFeatures::IsSupported(FPU)) {
1223      CpuFeatures::Scope scope(FPU);
1224      // Lhs and rhs are already loaded to f12 and f14 register pairs.
1225      __ Move(t0, t1, f14);
1226      __ Move(t2, t3, f12);
1227    } else {
1228      // Lhs and rhs are already loaded to GP registers.
1229      __ mov(t0, a0);  // a0 has LS 32 bits of rhs.
1230      __ mov(t1, a1);  // a1 has MS 32 bits of rhs.
1231      __ mov(t2, a2);  // a2 has LS 32 bits of lhs.
1232      __ mov(t3, a3);  // a3 has MS 32 bits of lhs.
1233    }
1234    Register rhs_exponent = exp_first ? t0 : t1;
1235    Register lhs_exponent = exp_first ? t2 : t3;
1236    Register rhs_mantissa = exp_first ? t1 : t0;
1237    Register lhs_mantissa = exp_first ? t3 : t2;
1238
1239    __ xor_(v0, rhs_mantissa, lhs_mantissa);
1240    __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
1241
1242    __ subu(v0, rhs_exponent, lhs_exponent);
1243    __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
1244    // 0, -0 case.
1245    __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
1246    __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
1247    __ or_(t4, rhs_exponent, lhs_exponent);
1248    __ or_(t4, t4, rhs_mantissa);
1249
1250    __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
1251
1252    __ bind(&return_result_equal);
1253    __ li(v0, Operand(EQUAL));
1254    __ Ret();
1255  }
1256
1257  __ bind(&return_result_not_equal);
1258
1259  if (!CpuFeatures::IsSupported(FPU)) {
1260    __ push(ra);
1261    __ PrepareCallCFunction(4, t4);  // Two doubles count as 4 arguments.
1262    if (!IsMipsSoftFloatABI) {
1263      // We are not using MIPS FPU instructions, and parameters for the runtime
1264      // function call are prepaired in a0-a3 registers, but function we are
1265      // calling is compiled with hard-float flag and expecting hard float ABI
1266      // (parameters in f12/f14 registers). We need to copy parameters from
1267      // a0-a3 registers to f12/f14 register pairs.
1268      __ Move(f12, a0, a1);
1269      __ Move(f14, a2, a3);
1270    }
1271    __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), 4);
1272    __ pop(ra);  // Because this function returns int, result is in v0.
1273    __ Ret();
1274  } else {
1275    CpuFeatures::Scope scope(FPU);
1276    Label equal, less_than;
1277    __ c(EQ, D, f12, f14);
1278    __ bc1t(&equal);
1279    __ nop();
1280
1281    __ c(OLT, D, f12, f14);
1282    __ bc1t(&less_than);
1283    __ nop();
1284
1285    // Not equal, not less, not NaN, must be greater.
1286    __ li(v0, Operand(GREATER));
1287    __ Ret();
1288
1289    __ bind(&equal);
1290    __ li(v0, Operand(EQUAL));
1291    __ Ret();
1292
1293    __ bind(&less_than);
1294    __ li(v0, Operand(LESS));
1295    __ Ret();
1296  }
1297}
1298
1299
1300static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1301                                           Register lhs,
1302                                           Register rhs) {
1303    // If either operand is a JS object or an oddball value, then they are
1304    // not equal since their pointers are different.
1305    // There is no test for undetectability in strict equality.
1306    STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
1307    Label first_non_object;
1308    // Get the type of the first operand into a2 and compare it with
1309    // FIRST_SPEC_OBJECT_TYPE.
1310    __ GetObjectType(lhs, a2, a2);
1311    __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1312
1313    // Return non-zero.
1314    Label return_not_equal;
1315    __ bind(&return_not_equal);
1316    __ li(v0, Operand(1));
1317    __ Ret();
1318
1319    __ bind(&first_non_object);
1320    // Check for oddballs: true, false, null, undefined.
1321    __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
1322
1323    __ GetObjectType(rhs, a3, a3);
1324    __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1325
1326    // Check for oddballs: true, false, null, undefined.
1327    __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
1328
1329    // Now that we have the types we might as well check for symbol-symbol.
1330    // Ensure that no non-strings have the symbol bit set.
1331    STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
1332    STATIC_ASSERT(kSymbolTag != 0);
1333    __ And(t2, a2, Operand(a3));
1334    __ And(t0, t2, Operand(kIsSymbolMask));
1335    __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
1336}
1337
1338
1339static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1340                                       Register lhs,
1341                                       Register rhs,
1342                                       Label* both_loaded_as_doubles,
1343                                       Label* not_heap_numbers,
1344                                       Label* slow) {
1345  __ GetObjectType(lhs, a3, a2);
1346  __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1347  __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
1348  // If first was a heap number & second wasn't, go to slow case.
1349  __ Branch(slow, ne, a3, Operand(a2));
1350
1351  // Both are heap numbers. Load them up then jump to the code we have
1352  // for that.
1353  if (CpuFeatures::IsSupported(FPU)) {
1354    CpuFeatures::Scope scope(FPU);
1355    __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1356    __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1357  } else {
1358    __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1359    __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1360    if (rhs.is(a0)) {
1361      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1362      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1363    } else {
1364      __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1365      __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1366    }
1367  }
1368  __ jmp(both_loaded_as_doubles);
1369}
1370
1371
1372// Fast negative check for symbol-to-symbol equality.
1373static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1374                                         Register lhs,
1375                                         Register rhs,
1376                                         Label* possible_strings,
1377                                         Label* not_both_strings) {
1378  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1379         (lhs.is(a1) && rhs.is(a0)));
1380
1381  // a2 is object type of lhs.
1382  // Ensure that no non-strings have the symbol bit set.
1383  Label object_test;
1384  STATIC_ASSERT(kSymbolTag != 0);
1385  __ And(at, a2, Operand(kIsNotStringMask));
1386  __ Branch(&object_test, ne, at, Operand(zero_reg));
1387  __ And(at, a2, Operand(kIsSymbolMask));
1388  __ Branch(possible_strings, eq, at, Operand(zero_reg));
1389  __ GetObjectType(rhs, a3, a3);
1390  __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1391  __ And(at, a3, Operand(kIsSymbolMask));
1392  __ Branch(possible_strings, eq, at, Operand(zero_reg));
1393
1394  // Both are symbols. We already checked they weren't the same pointer
1395  // so they are not equal.
1396  __ li(v0, Operand(1));   // Non-zero indicates not equal.
1397  __ Ret();
1398
1399  __ bind(&object_test);
1400  __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1401  __ GetObjectType(rhs, a2, a3);
1402  __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1403
1404  // If both objects are undetectable, they are equal.  Otherwise, they
1405  // are not equal, since they are different objects and an object is not
1406  // equal to undefined.
1407  __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1408  __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1409  __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1410  __ and_(a0, a2, a3);
1411  __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1412  __ Xor(v0, a0, Operand(1 << Map::kIsUndetectable));
1413  __ Ret();
1414}
1415
1416
1417void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1418                                                         Register object,
1419                                                         Register result,
1420                                                         Register scratch1,
1421                                                         Register scratch2,
1422                                                         Register scratch3,
1423                                                         bool object_is_smi,
1424                                                         Label* not_found) {
1425  // Use of registers. Register result is used as a temporary.
1426  Register number_string_cache = result;
1427  Register mask = scratch3;
1428
1429  // Load the number string cache.
1430  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1431
1432  // Make the hash mask from the length of the number string cache. It
1433  // contains two elements (number and string) for each cache entry.
1434  __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1435  // Divide length by two (length is a smi).
1436  __ sra(mask, mask, kSmiTagSize + 1);
1437  __ Addu(mask, mask, -1);  // Make mask.
1438
1439  // Calculate the entry in the number string cache. The hash value in the
1440  // number string cache for smis is just the smi value, and the hash for
1441  // doubles is the xor of the upper and lower words. See
1442  // Heap::GetNumberStringCache.
1443  Isolate* isolate = masm->isolate();
1444  Label is_smi;
1445  Label load_result_from_cache;
1446  if (!object_is_smi) {
1447    __ JumpIfSmi(object, &is_smi);
1448    if (CpuFeatures::IsSupported(FPU)) {
1449      CpuFeatures::Scope scope(FPU);
1450      __ CheckMap(object,
1451                  scratch1,
1452                  Heap::kHeapNumberMapRootIndex,
1453                  not_found,
1454                  DONT_DO_SMI_CHECK);
1455
1456      STATIC_ASSERT(8 == kDoubleSize);
1457      __ Addu(scratch1,
1458              object,
1459              Operand(HeapNumber::kValueOffset - kHeapObjectTag));
1460      __ lw(scratch2, MemOperand(scratch1, kPointerSize));
1461      __ lw(scratch1, MemOperand(scratch1, 0));
1462      __ Xor(scratch1, scratch1, Operand(scratch2));
1463      __ And(scratch1, scratch1, Operand(mask));
1464
1465      // Calculate address of entry in string cache: each entry consists
1466      // of two pointer sized fields.
1467      __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
1468      __ Addu(scratch1, number_string_cache, scratch1);
1469
1470      Register probe = mask;
1471      __ lw(probe,
1472             FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1473      __ JumpIfSmi(probe, not_found);
1474      __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
1475      __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
1476      __ c(EQ, D, f12, f14);
1477      __ bc1t(&load_result_from_cache);
1478      __ nop();   // bc1t() requires explicit fill of branch delay slot.
1479      __ Branch(not_found);
1480    } else {
1481      // Note that there is no cache check for non-FPU case, even though
1482      // it seems there could be. May be a tiny opimization for non-FPU
1483      // cores.
1484      __ Branch(not_found);
1485    }
1486  }
1487
1488  __ bind(&is_smi);
1489  Register scratch = scratch1;
1490  __ sra(scratch, object, 1);   // Shift away the tag.
1491  __ And(scratch, mask, Operand(scratch));
1492
1493  // Calculate address of entry in string cache: each entry consists
1494  // of two pointer sized fields.
1495  __ sll(scratch, scratch, kPointerSizeLog2 + 1);
1496  __ Addu(scratch, number_string_cache, scratch);
1497
1498  // Check if the entry is the smi we are looking for.
1499  Register probe = mask;
1500  __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1501  __ Branch(not_found, ne, object, Operand(probe));
1502
1503  // Get the result from the cache.
1504  __ bind(&load_result_from_cache);
1505  __ lw(result,
1506         FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1507
1508  __ IncrementCounter(isolate->counters()->number_to_string_native(),
1509                      1,
1510                      scratch1,
1511                      scratch2);
1512}
1513
1514
1515void NumberToStringStub::Generate(MacroAssembler* masm) {
1516  Label runtime;
1517
1518  __ lw(a1, MemOperand(sp, 0));
1519
1520  // Generate code to lookup number in the number string cache.
1521  GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
1522  __ Addu(sp, sp, Operand(1 * kPointerSize));
1523  __ Ret();
1524
1525  __ bind(&runtime);
1526  // Handle number to string in the runtime system if not found in the cache.
1527  __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
1528}
1529
1530
1531// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
1532// On exit, v0 is 0, positive, or negative (smi) to indicate the result
1533// of the comparison.
1534void CompareStub::Generate(MacroAssembler* masm) {
1535  Label slow;  // Call builtin.
1536  Label not_smis, both_loaded_as_doubles;
1537
1538
1539  if (include_smi_compare_) {
1540    Label not_two_smis, smi_done;
1541    __ Or(a2, a1, a0);
1542    __ JumpIfNotSmi(a2, &not_two_smis);
1543    __ sra(a1, a1, 1);
1544    __ sra(a0, a0, 1);
1545    __ Subu(v0, a1, a0);
1546    __ Ret();
1547    __ bind(&not_two_smis);
1548  } else if (FLAG_debug_code) {
1549    __ Or(a2, a1, a0);
1550    __ And(a2, a2, kSmiTagMask);
1551    __ Assert(ne, "CompareStub: unexpected smi operands.",
1552        a2, Operand(zero_reg));
1553  }
1554
1555
1556  // NOTICE! This code is only reached after a smi-fast-case check, so
1557  // it is certain that at least one operand isn't a smi.
1558
1559  // Handle the case where the objects are identical.  Either returns the answer
1560  // or goes to slow.  Only falls through if the objects were not identical.
1561  EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1562
1563  // If either is a Smi (we know that not both are), then they can only
1564  // be strictly equal if the other is a HeapNumber.
1565  STATIC_ASSERT(kSmiTag == 0);
1566  ASSERT_EQ(0, Smi::FromInt(0));
1567  __ And(t2, lhs_, Operand(rhs_));
1568  __ JumpIfNotSmi(t2, &not_smis, t0);
1569  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1570  // 1) Return the answer.
1571  // 2) Go to slow.
1572  // 3) Fall through to both_loaded_as_doubles.
1573  // 4) Jump to rhs_not_nan.
1574  // In cases 3 and 4 we have found out we were dealing with a number-number
1575  // comparison and the numbers have been loaded into f12 and f14 as doubles,
1576  // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1577  EmitSmiNonsmiComparison(masm, lhs_, rhs_,
1578                          &both_loaded_as_doubles, &slow, strict_);
1579
1580  __ bind(&both_loaded_as_doubles);
1581  // f12, f14 are the double representations of the left hand side
1582  // and the right hand side if we have FPU. Otherwise a2, a3 represent
1583  // left hand side and a0, a1 represent right hand side.
1584
1585  Isolate* isolate = masm->isolate();
1586  if (CpuFeatures::IsSupported(FPU)) {
1587    CpuFeatures::Scope scope(FPU);
1588    Label nan;
1589    __ li(t0, Operand(LESS));
1590    __ li(t1, Operand(GREATER));
1591    __ li(t2, Operand(EQUAL));
1592
1593    // Check if either rhs or lhs is NaN.
1594    __ c(UN, D, f12, f14);
1595    __ bc1t(&nan);
1596    __ nop();
1597
1598    // Check if LESS condition is satisfied. If true, move conditionally
1599    // result to v0.
1600    __ c(OLT, D, f12, f14);
1601    __ movt(v0, t0);
1602    // Use previous check to store conditionally to v0 oposite condition
1603    // (GREATER). If rhs is equal to lhs, this will be corrected in next
1604    // check.
1605    __ movf(v0, t1);
1606    // Check if EQUAL condition is satisfied. If true, move conditionally
1607    // result to v0.
1608    __ c(EQ, D, f12, f14);
1609    __ movt(v0, t2);
1610
1611    __ Ret();
1612
1613    __ bind(&nan);
1614    // NaN comparisons always fail.
1615    // Load whatever we need in v0 to make the comparison fail.
1616    if (cc_ == lt || cc_ == le) {
1617      __ li(v0, Operand(GREATER));
1618    } else {
1619      __ li(v0, Operand(LESS));
1620    }
1621    __ Ret();
1622  } else {
1623    // Checks for NaN in the doubles we have loaded.  Can return the answer or
1624    // fall through if neither is a NaN.  Also binds rhs_not_nan.
1625    EmitNanCheck(masm, cc_);
1626
1627    // Compares two doubles that are not NaNs. Returns the answer.
1628    // Never falls through.
1629    EmitTwoNonNanDoubleComparison(masm, cc_);
1630  }
1631
1632  __ bind(&not_smis);
1633  // At this point we know we are dealing with two different objects,
1634  // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1635  if (strict_) {
1636    // This returns non-equal for some object types, or falls through if it
1637    // was not lucky.
1638    EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1639  }
1640
1641  Label check_for_symbols;
1642  Label flat_string_check;
1643  // Check for heap-number-heap-number comparison. Can jump to slow case,
1644  // or load both doubles and jump to the code that handles
1645  // that case. If the inputs are not doubles then jumps to check_for_symbols.
1646  // In this case a2 will contain the type of lhs_.
1647  EmitCheckForTwoHeapNumbers(masm,
1648                             lhs_,
1649                             rhs_,
1650                             &both_loaded_as_doubles,
1651                             &check_for_symbols,
1652                             &flat_string_check);
1653
1654  __ bind(&check_for_symbols);
1655  if (cc_ == eq && !strict_) {
1656    // Returns an answer for two symbols or two detectable objects.
1657    // Otherwise jumps to string case or not both strings case.
1658    // Assumes that a2 is the type of lhs_ on entry.
1659    EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1660  }
1661
1662  // Check for both being sequential ASCII strings, and inline if that is the
1663  // case.
1664  __ bind(&flat_string_check);
1665
1666  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
1667
1668  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1669  if (cc_ == eq) {
1670    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1671                                                     lhs_,
1672                                                     rhs_,
1673                                                     a2,
1674                                                     a3,
1675                                                     t0);
1676  } else {
1677    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1678                                                       lhs_,
1679                                                       rhs_,
1680                                                       a2,
1681                                                       a3,
1682                                                       t0,
1683                                                       t1);
1684  }
1685  // Never falls through to here.
1686
1687  __ bind(&slow);
1688  // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1689  // a1 (rhs) second.
1690  __ Push(lhs_, rhs_);
1691  // Figure out which native to call and setup the arguments.
1692  Builtins::JavaScript native;
1693  if (cc_ == eq) {
1694    native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1695  } else {
1696    native = Builtins::COMPARE;
1697    int ncr;  // NaN compare result.
1698    if (cc_ == lt || cc_ == le) {
1699      ncr = GREATER;
1700    } else {
1701      ASSERT(cc_ == gt || cc_ == ge);  // Remaining cases.
1702      ncr = LESS;
1703    }
1704    __ li(a0, Operand(Smi::FromInt(ncr)));
1705    __ push(a0);
1706  }
1707
1708  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1709  // tagged as a small integer.
1710  __ InvokeBuiltin(native, JUMP_FUNCTION);
1711}
1712
1713
1714// The stub returns zero for false, and a non-zero value for true.
1715void ToBooleanStub::Generate(MacroAssembler* masm) {
1716  // This stub uses FPU instructions.
1717  CpuFeatures::Scope scope(FPU);
1718
1719  Label false_result;
1720  Label not_heap_number;
1721  Register scratch0 = t5.is(tos_) ? t3 : t5;
1722
1723  // undefined -> false
1724  __ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
1725  __ Branch(&false_result, eq, tos_, Operand(scratch0));
1726
1727  // Boolean -> its value
1728  __ LoadRoot(scratch0, Heap::kFalseValueRootIndex);
1729  __ Branch(&false_result, eq, tos_, Operand(scratch0));
1730  __ LoadRoot(scratch0, Heap::kTrueValueRootIndex);
1731  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
1732  // return true if the equal condition is satisfied.
1733  __ Ret(eq, tos_, Operand(scratch0));
1734
1735  // Smis: 0 -> false, all other -> true
1736  __ And(scratch0, tos_, tos_);
1737  __ Branch(&false_result, eq, scratch0, Operand(zero_reg));
1738  __ And(scratch0, tos_, Operand(kSmiTagMask));
1739  // "tos_" is a register and contains a non-zero value.  Hence we implicitly
1740  // return true if the not equal condition is satisfied.
1741  __ Ret(eq, scratch0, Operand(zero_reg));
1742
1743  // 'null' -> false
1744  __ LoadRoot(scratch0, Heap::kNullValueRootIndex);
1745  __ Branch(&false_result, eq, tos_, Operand(scratch0));
1746
1747  // HeapNumber => false if +0, -0, or NaN.
1748  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
1749  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1750  __ Branch(&not_heap_number, ne, scratch0, Operand(at));
1751
1752  __ ldc1(f12, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1753  __ fcmp(f12, 0.0, UEQ);
1754
1755  // "tos_" is a register, and contains a non zero value by default.
1756  // Hence we only need to overwrite "tos_" with zero to return false for
1757  // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1758  __ movt(tos_, zero_reg);
1759  __ Ret();
1760
1761  __ bind(&not_heap_number);
1762
1763  // It can be an undetectable object.
1764  // Undetectable => false.
1765  __ lw(at, FieldMemOperand(tos_, HeapObject::kMapOffset));
1766  __ lbu(scratch0, FieldMemOperand(at, Map::kBitFieldOffset));
1767  __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable));
1768  __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable));
1769
1770  // JavaScript object => true.
1771  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
1772  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
1773
1774  // "tos_" is a register and contains a non-zero value.
1775  // Hence we implicitly return true if the greater than
1776  // condition is satisfied.
1777  __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
1778
1779  // Check for string.
1780  __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset));
1781  __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset));
1782  // "tos_" is a register and contains a non-zero value.
1783  // Hence we implicitly return true if the greater than
1784  // condition is satisfied.
1785  __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE));
1786
1787  // String value => false iff empty, i.e., length is zero.
1788  __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1789  // If length is zero, "tos_" contains zero ==> false.
1790  // If length is not zero, "tos_" contains a non-zero value ==> true.
1791  __ Ret();
1792
1793  // Return 0 in "tos_" for false.
1794  __ bind(&false_result);
1795  __ mov(tos_, zero_reg);
1796  __ Ret();
1797}
1798
1799
1800void UnaryOpStub::PrintName(StringStream* stream) {
1801  const char* op_name = Token::Name(op_);
1802  const char* overwrite_name = NULL;  // Make g++ happy.
1803  switch (mode_) {
1804    case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
1805    case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
1806  }
1807  stream->Add("UnaryOpStub_%s_%s_%s",
1808              op_name,
1809              overwrite_name,
1810              UnaryOpIC::GetName(operand_type_));
1811}
1812
1813
1814// TODO(svenpanne): Use virtual functions instead of switch.
1815void UnaryOpStub::Generate(MacroAssembler* masm) {
1816  switch (operand_type_) {
1817    case UnaryOpIC::UNINITIALIZED:
1818      GenerateTypeTransition(masm);
1819      break;
1820    case UnaryOpIC::SMI:
1821      GenerateSmiStub(masm);
1822      break;
1823    case UnaryOpIC::HEAP_NUMBER:
1824      GenerateHeapNumberStub(masm);
1825      break;
1826    case UnaryOpIC::GENERIC:
1827      GenerateGenericStub(masm);
1828      break;
1829  }
1830}
1831
1832
1833void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1834  // Argument is in a0 and v0 at this point, so we can overwrite a0.
1835  __ li(a2, Operand(Smi::FromInt(op_)));
1836  __ li(a1, Operand(Smi::FromInt(mode_)));
1837  __ li(a0, Operand(Smi::FromInt(operand_type_)));
1838  __ Push(v0, a2, a1, a0);
1839
1840  __ TailCallExternalReference(
1841      ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
1842}
1843
1844
1845// TODO(svenpanne): Use virtual functions instead of switch.
1846void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1847  switch (op_) {
1848    case Token::SUB:
1849      GenerateSmiStubSub(masm);
1850      break;
1851    case Token::BIT_NOT:
1852      GenerateSmiStubBitNot(masm);
1853      break;
1854    default:
1855      UNREACHABLE();
1856  }
1857}
1858
1859
1860void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
1861  Label non_smi, slow;
1862  GenerateSmiCodeSub(masm, &non_smi, &slow);
1863  __ bind(&non_smi);
1864  __ bind(&slow);
1865  GenerateTypeTransition(masm);
1866}
1867
1868
1869void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
1870  Label non_smi;
1871  GenerateSmiCodeBitNot(masm, &non_smi);
1872  __ bind(&non_smi);
1873  GenerateTypeTransition(masm);
1874}
1875
1876
1877void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
1878                                     Label* non_smi,
1879                                     Label* slow) {
1880  __ JumpIfNotSmi(a0, non_smi);
1881
1882  // The result of negating zero or the smallest negative smi is not a smi.
1883  __ And(t0, a0, ~0x80000000);
1884  __ Branch(slow, eq, t0, Operand(zero_reg));
1885
1886  // Return '0 - value'.
1887  __ Subu(v0, zero_reg, a0);
1888  __ Ret();
1889}
1890
1891
1892void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
1893                                        Label* non_smi) {
1894  __ JumpIfNotSmi(a0, non_smi);
1895
1896  // Flip bits and revert inverted smi-tag.
1897  __ Neg(v0, a0);
1898  __ And(v0, v0, ~kSmiTagMask);
1899  __ Ret();
1900}
1901
1902
1903// TODO(svenpanne): Use virtual functions instead of switch.
1904void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1905  switch (op_) {
1906    case Token::SUB:
1907      GenerateHeapNumberStubSub(masm);
1908      break;
1909    case Token::BIT_NOT:
1910      GenerateHeapNumberStubBitNot(masm);
1911      break;
1912    default:
1913      UNREACHABLE();
1914  }
1915}
1916
1917
1918void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
1919  Label non_smi, slow, call_builtin;
1920  GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
1921  __ bind(&non_smi);
1922  GenerateHeapNumberCodeSub(masm, &slow);
1923  __ bind(&slow);
1924  GenerateTypeTransition(masm);
1925  __ bind(&call_builtin);
1926  GenerateGenericCodeFallback(masm);
1927}
1928
1929
1930void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
1931  Label non_smi, slow;
1932  GenerateSmiCodeBitNot(masm, &non_smi);
1933  __ bind(&non_smi);
1934  GenerateHeapNumberCodeBitNot(masm, &slow);
1935  __ bind(&slow);
1936  GenerateTypeTransition(masm);
1937}
1938
1939
1940void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
1941                                            Label* slow) {
1942  EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
1943  // a0 is a heap number.  Get a new heap number in a1.
1944  if (mode_ == UNARY_OVERWRITE) {
1945    __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1946    __ Xor(a2, a2, Operand(HeapNumber::kSignMask));  // Flip sign.
1947    __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1948  } else {
1949    Label slow_allocate_heapnumber, heapnumber_allocated;
1950    __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
1951    __ jmp(&heapnumber_allocated);
1952
1953    __ bind(&slow_allocate_heapnumber);
1954    __ EnterInternalFrame();
1955    __ push(a0);
1956    __ CallRuntime(Runtime::kNumberAlloc, 0);
1957    __ mov(a1, v0);
1958    __ pop(a0);
1959    __ LeaveInternalFrame();
1960
1961    __ bind(&heapnumber_allocated);
1962    __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1963    __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1964    __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
1965    __ Xor(a2, a2, Operand(HeapNumber::kSignMask));  // Flip sign.
1966    __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
1967    __ mov(v0, a1);
1968  }
1969  __ Ret();
1970}
1971
1972
1973void UnaryOpStub::GenerateHeapNumberCodeBitNot(
1974    MacroAssembler* masm,
1975    Label* slow) {
1976  Label impossible;
1977
1978  EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
1979  // Convert the heap number in a0 to an untagged integer in a1.
1980  __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
1981
1982  // Do the bitwise operation and check if the result fits in a smi.
1983  Label try_float;
1984  __ Neg(a1, a1);
1985  __ Addu(a2, a1, Operand(0x40000000));
1986  __ Branch(&try_float, lt, a2, Operand(zero_reg));
1987
1988  // Tag the result as a smi and we're done.
1989  __ SmiTag(v0, a1);
1990  __ Ret();
1991
1992  // Try to store the result in a heap number.
1993  __ bind(&try_float);
1994  if (mode_ == UNARY_NO_OVERWRITE) {
1995    Label slow_allocate_heapnumber, heapnumber_allocated;
1996    // Allocate a new heap number without zapping v0, which we need if it fails.
1997    __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
1998    __ jmp(&heapnumber_allocated);
1999
2000    __ bind(&slow_allocate_heapnumber);
2001    __ EnterInternalFrame();
2002    __ push(v0);  // Push the heap number, not the untagged int32.
2003    __ CallRuntime(Runtime::kNumberAlloc, 0);
2004    __ mov(a2, v0);  // Move the new heap number into a2.
2005    // Get the heap number into v0, now that the new heap number is in a2.
2006    __ pop(v0);
2007    __ LeaveInternalFrame();
2008
2009    // Convert the heap number in v0 to an untagged integer in a1.
2010    // This can't go slow-case because it's the same number we already
2011    // converted once again.
2012    __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
2013    // Negate the result.
2014    __ Xor(a1, a1, -1);
2015
2016    __ bind(&heapnumber_allocated);
2017    __ mov(v0, a2);  // Move newly allocated heap number to v0.
2018  }
2019
2020  if (CpuFeatures::IsSupported(FPU)) {
2021    // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
2022    CpuFeatures::Scope scope(FPU);
2023    __ mtc1(a1, f0);
2024    __ cvt_d_w(f0, f0);
2025    __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2026    __ Ret();
2027  } else {
2028    // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2029    // have to set up a frame.
2030    WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2031    __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2032  }
2033
2034  __ bind(&impossible);
2035  if (FLAG_debug_code) {
2036    __ stop("Incorrect assumption in bit-not stub");
2037  }
2038}
2039
2040
2041// TODO(svenpanne): Use virtual functions instead of switch.
2042void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2043  switch (op_) {
2044    case Token::SUB:
2045      GenerateGenericStubSub(masm);
2046      break;
2047    case Token::BIT_NOT:
2048      GenerateGenericStubBitNot(masm);
2049      break;
2050    default:
2051      UNREACHABLE();
2052  }
2053}
2054
2055
2056void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2057  Label non_smi, slow;
2058  GenerateSmiCodeSub(masm, &non_smi, &slow);
2059  __ bind(&non_smi);
2060  GenerateHeapNumberCodeSub(masm, &slow);
2061  __ bind(&slow);
2062  GenerateGenericCodeFallback(masm);
2063}
2064
2065
2066void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2067  Label non_smi, slow;
2068  GenerateSmiCodeBitNot(masm, &non_smi);
2069  __ bind(&non_smi);
2070  GenerateHeapNumberCodeBitNot(masm, &slow);
2071  __ bind(&slow);
2072  GenerateGenericCodeFallback(masm);
2073}
2074
2075
2076void UnaryOpStub::GenerateGenericCodeFallback(
2077    MacroAssembler* masm) {
2078  // Handle the slow case by jumping to the JavaScript builtin.
2079  __ push(a0);
2080  switch (op_) {
2081    case Token::SUB:
2082      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2083      break;
2084    case Token::BIT_NOT:
2085      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2086      break;
2087    default:
2088      UNREACHABLE();
2089  }
2090}
2091
2092
2093void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2094  Label get_result;
2095
2096  __ Push(a1, a0);
2097
2098  __ li(a2, Operand(Smi::FromInt(MinorKey())));
2099  __ li(a1, Operand(Smi::FromInt(op_)));
2100  __ li(a0, Operand(Smi::FromInt(operands_type_)));
2101  __ Push(a2, a1, a0);
2102
2103  __ TailCallExternalReference(
2104      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2105                        masm->isolate()),
2106      5,
2107      1);
2108}
2109
2110
2111void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2112    MacroAssembler* masm) {
2113  UNIMPLEMENTED();
2114}
2115
2116
2117void BinaryOpStub::Generate(MacroAssembler* masm) {
2118  switch (operands_type_) {
2119    case BinaryOpIC::UNINITIALIZED:
2120      GenerateTypeTransition(masm);
2121      break;
2122    case BinaryOpIC::SMI:
2123      GenerateSmiStub(masm);
2124      break;
2125    case BinaryOpIC::INT32:
2126      GenerateInt32Stub(masm);
2127      break;
2128    case BinaryOpIC::HEAP_NUMBER:
2129      GenerateHeapNumberStub(masm);
2130      break;
2131    case BinaryOpIC::ODDBALL:
2132      GenerateOddballStub(masm);
2133      break;
2134    case BinaryOpIC::BOTH_STRING:
2135      GenerateBothStringStub(masm);
2136      break;
2137    case BinaryOpIC::STRING:
2138      GenerateStringStub(masm);
2139      break;
2140    case BinaryOpIC::GENERIC:
2141      GenerateGeneric(masm);
2142      break;
2143    default:
2144      UNREACHABLE();
2145  }
2146}
2147
2148
2149void BinaryOpStub::PrintName(StringStream* stream) {
2150  const char* op_name = Token::Name(op_);
2151  const char* overwrite_name;
2152  switch (mode_) {
2153    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2154    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2155    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2156    default: overwrite_name = "UnknownOverwrite"; break;
2157  }
2158  stream->Add("BinaryOpStub_%s_%s_%s",
2159              op_name,
2160              overwrite_name,
2161              BinaryOpIC::GetName(operands_type_));
2162}
2163
2164
2165
2166void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2167  Register left = a1;
2168  Register right = a0;
2169
2170  Register scratch1 = t0;
2171  Register scratch2 = t1;
2172
2173  ASSERT(right.is(a0));
2174  STATIC_ASSERT(kSmiTag == 0);
2175
2176  Label not_smi_result;
2177  switch (op_) {
2178    case Token::ADD:
2179      __ AdduAndCheckForOverflow(v0, left, right, scratch1);
2180      __ RetOnNoOverflow(scratch1);
2181      // No need to revert anything - right and left are intact.
2182      break;
2183    case Token::SUB:
2184      __ SubuAndCheckForOverflow(v0, left, right, scratch1);
2185      __ RetOnNoOverflow(scratch1);
2186      // No need to revert anything - right and left are intact.
2187      break;
2188    case Token::MUL: {
2189      // Remove tag from one of the operands. This way the multiplication result
2190      // will be a smi if it fits the smi range.
2191      __ SmiUntag(scratch1, right);
2192      // Do multiplication.
2193      // lo = lower 32 bits of scratch1 * left.
2194      // hi = higher 32 bits of scratch1 * left.
2195      __ Mult(left, scratch1);
2196      // Check for overflowing the smi range - no overflow if higher 33 bits of
2197      // the result are identical.
2198      __ mflo(scratch1);
2199      __ mfhi(scratch2);
2200      __ sra(scratch1, scratch1, 31);
2201      __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
2202      // Go slow on zero result to handle -0.
2203      __ mflo(v0);
2204      __ Ret(ne, v0, Operand(zero_reg));
2205      // We need -0 if we were multiplying a negative number with 0 to get 0.
2206      // We know one of them was zero.
2207      __ Addu(scratch2, right, left);
2208      Label skip;
2209      // ARM uses the 'pl' condition, which is 'ge'.
2210      // Negating it results in 'lt'.
2211      __ Branch(&skip, lt, scratch2, Operand(zero_reg));
2212      ASSERT(Smi::FromInt(0) == 0);
2213      __ mov(v0, zero_reg);
2214      __ Ret();  // Return smi 0 if the non-zero one was positive.
2215      __ bind(&skip);
2216      // We fall through here if we multiplied a negative number with 0, because
2217      // that would mean we should produce -0.
2218      }
2219      break;
2220    case Token::DIV: {
2221      Label done;
2222      __ SmiUntag(scratch2, right);
2223      __ SmiUntag(scratch1, left);
2224      __ Div(scratch1, scratch2);
2225      // A minor optimization: div may be calculated asynchronously, so we check
2226      // for division by zero before getting the result.
2227      __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2228      // If the result is 0, we need to make sure the dividsor (right) is
2229      // positive, otherwise it is a -0 case.
2230      // Quotient is in 'lo', remainder is in 'hi'.
2231      // Check for no remainder first.
2232      __ mfhi(scratch1);
2233      __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2234      __ mflo(scratch1);
2235      __ Branch(&done, ne, scratch1, Operand(zero_reg));
2236      __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2237      __ bind(&done);
2238      // Check that the signed result fits in a Smi.
2239      __ Addu(scratch2, scratch1, Operand(0x40000000));
2240      __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2241      __ SmiTag(v0, scratch1);
2242      __ Ret();
2243      }
2244      break;
2245    case Token::MOD: {
2246      Label done;
2247      __ SmiUntag(scratch2, right);
2248      __ SmiUntag(scratch1, left);
2249      __ Div(scratch1, scratch2);
2250      // A minor optimization: div may be calculated asynchronously, so we check
2251      // for division by 0 before calling mfhi.
2252      // Check for zero on the right hand side.
2253      __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2254      // If the result is 0, we need to make sure the dividend (left) is
2255      // positive (or 0), otherwise it is a -0 case.
2256      // Remainder is in 'hi'.
2257      __ mfhi(scratch2);
2258      __ Branch(&done, ne, scratch2, Operand(zero_reg));
2259      __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2260      __ bind(&done);
2261      // Check that the signed result fits in a Smi.
2262      __ Addu(scratch1, scratch2, Operand(0x40000000));
2263      __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2264      __ SmiTag(v0, scratch2);
2265      __ Ret();
2266      }
2267      break;
2268    case Token::BIT_OR:
2269      __ Or(v0, left, Operand(right));
2270      __ Ret();
2271      break;
2272    case Token::BIT_AND:
2273      __ And(v0, left, Operand(right));
2274      __ Ret();
2275      break;
2276    case Token::BIT_XOR:
2277      __ Xor(v0, left, Operand(right));
2278      __ Ret();
2279      break;
2280    case Token::SAR:
2281      // Remove tags from right operand.
2282      __ GetLeastBitsFromSmi(scratch1, right, 5);
2283      __ srav(scratch1, left, scratch1);
2284      // Smi tag result.
2285      __ And(v0, scratch1, Operand(~kSmiTagMask));
2286      __ Ret();
2287      break;
2288    case Token::SHR:
2289      // Remove tags from operands. We can't do this on a 31 bit number
2290      // because then the 0s get shifted into bit 30 instead of bit 31.
2291      __ SmiUntag(scratch1, left);
2292      __ GetLeastBitsFromSmi(scratch2, right, 5);
2293      __ srlv(v0, scratch1, scratch2);
2294      // Unsigned shift is not allowed to produce a negative number, so
2295      // check the sign bit and the sign bit after Smi tagging.
2296      __ And(scratch1, v0, Operand(0xc0000000));
2297      __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2298      // Smi tag result.
2299      __ SmiTag(v0);
2300      __ Ret();
2301      break;
2302    case Token::SHL:
2303      // Remove tags from operands.
2304      __ SmiUntag(scratch1, left);
2305      __ GetLeastBitsFromSmi(scratch2, right, 5);
2306      __ sllv(scratch1, scratch1, scratch2);
2307      // Check that the signed result fits in a Smi.
2308      __ Addu(scratch2, scratch1, Operand(0x40000000));
2309      __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2310      __ SmiTag(v0, scratch1);
2311      __ Ret();
2312      break;
2313    default:
2314      UNREACHABLE();
2315  }
2316  __ bind(&not_smi_result);
2317}
2318
2319
2320void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2321                                       bool smi_operands,
2322                                       Label* not_numbers,
2323                                       Label* gc_required) {
2324  Register left = a1;
2325  Register right = a0;
2326  Register scratch1 = t3;
2327  Register scratch2 = t5;
2328  Register scratch3 = t0;
2329
2330  ASSERT(smi_operands || (not_numbers != NULL));
2331  if (smi_operands && FLAG_debug_code) {
2332    __ AbortIfNotSmi(left);
2333    __ AbortIfNotSmi(right);
2334  }
2335
2336  Register heap_number_map = t2;
2337  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2338
2339  switch (op_) {
2340    case Token::ADD:
2341    case Token::SUB:
2342    case Token::MUL:
2343    case Token::DIV:
2344    case Token::MOD: {
2345      // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
2346      // depending on whether FPU is available or not.
2347      FloatingPointHelper::Destination destination =
2348          CpuFeatures::IsSupported(FPU) &&
2349          op_ != Token::MOD ?
2350              FloatingPointHelper::kFPURegisters :
2351              FloatingPointHelper::kCoreRegisters;
2352
2353      // Allocate new heap number for result.
2354      Register result = s0;
2355      GenerateHeapResultAllocation(
2356          masm, result, heap_number_map, scratch1, scratch2, gc_required);
2357
2358      // Load the operands.
2359      if (smi_operands) {
2360        FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2361      } else {
2362        FloatingPointHelper::LoadOperands(masm,
2363                                          destination,
2364                                          heap_number_map,
2365                                          scratch1,
2366                                          scratch2,
2367                                          not_numbers);
2368      }
2369
2370      // Calculate the result.
2371      if (destination == FloatingPointHelper::kFPURegisters) {
2372        // Using FPU registers:
2373        // f12: Left value.
2374        // f14: Right value.
2375        CpuFeatures::Scope scope(FPU);
2376        switch (op_) {
2377        case Token::ADD:
2378          __ add_d(f10, f12, f14);
2379          break;
2380        case Token::SUB:
2381          __ sub_d(f10, f12, f14);
2382          break;
2383        case Token::MUL:
2384          __ mul_d(f10, f12, f14);
2385          break;
2386        case Token::DIV:
2387          __ div_d(f10, f12, f14);
2388          break;
2389        default:
2390          UNREACHABLE();
2391        }
2392
2393        // ARM uses a workaround here because of the unaligned HeapNumber
2394        // kValueOffset. On MIPS this workaround is built into sdc1 so
2395        // there's no point in generating even more instructions.
2396        __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
2397        __ mov(v0, result);
2398        __ Ret();
2399      } else {
2400        // Call the C function to handle the double operation.
2401        FloatingPointHelper::CallCCodeForDoubleOperation(masm,
2402                                                         op_,
2403                                                         result,
2404                                                         scratch1);
2405        if (FLAG_debug_code) {
2406          __ stop("Unreachable code.");
2407        }
2408      }
2409      break;
2410    }
2411    case Token::BIT_OR:
2412    case Token::BIT_XOR:
2413    case Token::BIT_AND:
2414    case Token::SAR:
2415    case Token::SHR:
2416    case Token::SHL: {
2417      if (smi_operands) {
2418        __ SmiUntag(a3, left);
2419        __ SmiUntag(a2, right);
2420      } else {
2421        // Convert operands to 32-bit integers. Right in a2 and left in a3.
2422        FloatingPointHelper::ConvertNumberToInt32(masm,
2423                                                  left,
2424                                                  a3,
2425                                                  heap_number_map,
2426                                                  scratch1,
2427                                                  scratch2,
2428                                                  scratch3,
2429                                                  f0,
2430                                                  not_numbers);
2431        FloatingPointHelper::ConvertNumberToInt32(masm,
2432                                                  right,
2433                                                  a2,
2434                                                  heap_number_map,
2435                                                  scratch1,
2436                                                  scratch2,
2437                                                  scratch3,
2438                                                  f0,
2439                                                  not_numbers);
2440      }
2441      Label result_not_a_smi;
2442      switch (op_) {
2443        case Token::BIT_OR:
2444          __ Or(a2, a3, Operand(a2));
2445          break;
2446        case Token::BIT_XOR:
2447          __ Xor(a2, a3, Operand(a2));
2448          break;
2449        case Token::BIT_AND:
2450          __ And(a2, a3, Operand(a2));
2451          break;
2452        case Token::SAR:
2453          // Use only the 5 least significant bits of the shift count.
2454          __ GetLeastBitsFromInt32(a2, a2, 5);
2455          __ srav(a2, a3, a2);
2456          break;
2457        case Token::SHR:
2458          // Use only the 5 least significant bits of the shift count.
2459          __ GetLeastBitsFromInt32(a2, a2, 5);
2460          __ srlv(a2, a3, a2);
2461          // SHR is special because it is required to produce a positive answer.
2462          // The code below for writing into heap numbers isn't capable of
2463          // writing the register as an unsigned int so we go to slow case if we
2464          // hit this case.
2465          if (CpuFeatures::IsSupported(FPU)) {
2466            __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
2467          } else {
2468            __ Branch(not_numbers, lt, a2, Operand(zero_reg));
2469          }
2470          break;
2471        case Token::SHL:
2472          // Use only the 5 least significant bits of the shift count.
2473          __ GetLeastBitsFromInt32(a2, a2, 5);
2474          __ sllv(a2, a3, a2);
2475          break;
2476        default:
2477          UNREACHABLE();
2478      }
2479      // Check that the *signed* result fits in a smi.
2480      __ Addu(a3, a2, Operand(0x40000000));
2481      __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
2482      __ SmiTag(v0, a2);
2483      __ Ret();
2484
2485      // Allocate new heap number for result.
2486      __ bind(&result_not_a_smi);
2487      Register result = t1;
2488      if (smi_operands) {
2489        __ AllocateHeapNumber(
2490            result, scratch1, scratch2, heap_number_map, gc_required);
2491      } else {
2492        GenerateHeapResultAllocation(
2493            masm, result, heap_number_map, scratch1, scratch2, gc_required);
2494      }
2495
2496      // a2: Answer as signed int32.
2497      // t1: Heap number to write answer into.
2498
2499      // Nothing can go wrong now, so move the heap number to v0, which is the
2500      // result.
2501      __ mov(v0, t1);
2502
2503      if (CpuFeatures::IsSupported(FPU)) {
2504        // Convert the int32 in a2 to the heap number in a0. As
2505        // mentioned above SHR needs to always produce a positive result.
2506        CpuFeatures::Scope scope(FPU);
2507        __ mtc1(a2, f0);
2508        if (op_ == Token::SHR) {
2509          __ Cvt_d_uw(f0, f0, f22);
2510        } else {
2511          __ cvt_d_w(f0, f0);
2512        }
2513        // ARM uses a workaround here because of the unaligned HeapNumber
2514        // kValueOffset. On MIPS this workaround is built into sdc1 so
2515        // there's no point in generating even more instructions.
2516        __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2517        __ Ret();
2518      } else {
2519        // Tail call that writes the int32 in a2 to the heap number in v0, using
2520        // a3 and a0 as scratch. v0 is preserved and returned.
2521        WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
2522        __ TailCallStub(&stub);
2523      }
2524      break;
2525    }
2526    default:
2527      UNREACHABLE();
2528  }
2529}
2530
2531
2532// Generate the smi code. If the operation on smis are successful this return is
2533// generated. If the result is not a smi and heap number allocation is not
2534// requested the code falls through. If number allocation is requested but a
2535// heap number cannot be allocated the code jumps to the lable gc_required.
2536void BinaryOpStub::GenerateSmiCode(
2537    MacroAssembler* masm,
2538    Label* use_runtime,
2539    Label* gc_required,
2540    SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2541  Label not_smis;
2542
2543  Register left = a1;
2544  Register right = a0;
2545  Register scratch1 = t3;
2546  Register scratch2 = t5;
2547
2548  // Perform combined smi check on both operands.
2549  __ Or(scratch1, left, Operand(right));
2550  STATIC_ASSERT(kSmiTag == 0);
2551  __ JumpIfNotSmi(scratch1, &not_smis);
2552
2553  // If the smi-smi operation results in a smi return is generated.
2554  GenerateSmiSmiOperation(masm);
2555
2556  // If heap number results are possible generate the result in an allocated
2557  // heap number.
2558  if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2559    GenerateFPOperation(masm, true, use_runtime, gc_required);
2560  }
2561  __ bind(&not_smis);
2562}
2563
2564
2565void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2566  Label not_smis, call_runtime;
2567
2568  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2569      result_type_ == BinaryOpIC::SMI) {
2570    // Only allow smi results.
2571    GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2572  } else {
2573    // Allow heap number result and don't make a transition if a heap number
2574    // cannot be allocated.
2575    GenerateSmiCode(masm,
2576                    &call_runtime,
2577                    &call_runtime,
2578                    ALLOW_HEAPNUMBER_RESULTS);
2579  }
2580
2581  // Code falls through if the result is not returned as either a smi or heap
2582  // number.
2583  GenerateTypeTransition(masm);
2584
2585  __ bind(&call_runtime);
2586  GenerateCallRuntime(masm);
2587}
2588
2589
2590void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2591  ASSERT(operands_type_ == BinaryOpIC::STRING);
2592  // Try to add arguments as strings, otherwise, transition to the generic
2593  // BinaryOpIC type.
2594  GenerateAddStrings(masm);
2595  GenerateTypeTransition(masm);
2596}
2597
2598
2599void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2600  Label call_runtime;
2601  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2602  ASSERT(op_ == Token::ADD);
2603  // If both arguments are strings, call the string add stub.
2604  // Otherwise, do a transition.
2605
2606  // Registers containing left and right operands respectively.
2607  Register left = a1;
2608  Register right = a0;
2609
2610  // Test if left operand is a string.
2611  __ JumpIfSmi(left, &call_runtime);
2612  __ GetObjectType(left, a2, a2);
2613  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2614
2615  // Test if right operand is a string.
2616  __ JumpIfSmi(right, &call_runtime);
2617  __ GetObjectType(right, a2, a2);
2618  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2619
2620  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2621  GenerateRegisterArgsPush(masm);
2622  __ TailCallStub(&string_add_stub);
2623
2624  __ bind(&call_runtime);
2625  GenerateTypeTransition(masm);
2626}
2627
2628
2629void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2630  ASSERT(operands_type_ == BinaryOpIC::INT32);
2631
2632  Register left = a1;
2633  Register right = a0;
2634  Register scratch1 = t3;
2635  Register scratch2 = t5;
2636  FPURegister double_scratch = f0;
2637  FPURegister single_scratch = f6;
2638
2639  Register heap_number_result = no_reg;
2640  Register heap_number_map = t2;
2641  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2642
2643  Label call_runtime;
2644  // Labels for type transition, used for wrong input or output types.
2645  // Both label are currently actually bound to the same position. We use two
2646  // different label to differentiate the cause leading to type transition.
2647  Label transition;
2648
2649  // Smi-smi fast case.
2650  Label skip;
2651  __ Or(scratch1, left, right);
2652  __ JumpIfNotSmi(scratch1, &skip);
2653  GenerateSmiSmiOperation(masm);
2654  // Fall through if the result is not a smi.
2655  __ bind(&skip);
2656
2657  switch (op_) {
2658    case Token::ADD:
2659    case Token::SUB:
2660    case Token::MUL:
2661    case Token::DIV:
2662    case Token::MOD: {
2663      // Load both operands and check that they are 32-bit integer.
2664      // Jump to type transition if they are not. The registers a0 and a1 (right
2665      // and left) are preserved for the runtime call.
2666      FloatingPointHelper::Destination destination =
2667          (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
2668              ? FloatingPointHelper::kFPURegisters
2669              : FloatingPointHelper::kCoreRegisters;
2670
2671      FloatingPointHelper::LoadNumberAsInt32Double(masm,
2672                                                   right,
2673                                                   destination,
2674                                                   f14,
2675                                                   a2,
2676                                                   a3,
2677                                                   heap_number_map,
2678                                                   scratch1,
2679                                                   scratch2,
2680                                                   f2,
2681                                                   &transition);
2682      FloatingPointHelper::LoadNumberAsInt32Double(masm,
2683                                                   left,
2684                                                   destination,
2685                                                   f12,
2686                                                   t0,
2687                                                   t1,
2688                                                   heap_number_map,
2689                                                   scratch1,
2690                                                   scratch2,
2691                                                   f2,
2692                                                   &transition);
2693
2694      if (destination == FloatingPointHelper::kFPURegisters) {
2695        CpuFeatures::Scope scope(FPU);
2696        Label return_heap_number;
2697        switch (op_) {
2698          case Token::ADD:
2699            __ add_d(f10, f12, f14);
2700            break;
2701          case Token::SUB:
2702            __ sub_d(f10, f12, f14);
2703            break;
2704          case Token::MUL:
2705            __ mul_d(f10, f12, f14);
2706            break;
2707          case Token::DIV:
2708            __ div_d(f10, f12, f14);
2709            break;
2710          default:
2711            UNREACHABLE();
2712        }
2713
2714        if (op_ != Token::DIV) {
2715          // These operations produce an integer result.
2716          // Try to return a smi if we can.
2717          // Otherwise return a heap number if allowed, or jump to type
2718          // transition.
2719
2720          // NOTE: ARM uses a MacroAssembler function here (EmitVFPTruncate).
2721          // On MIPS a lot of things cannot be implemented the same way so right
2722          // now it makes a lot more sense to just do things manually.
2723
2724          // Save FCSR.
2725          __ cfc1(scratch1, FCSR);
2726          // Disable FPU exceptions.
2727          __ ctc1(zero_reg, FCSR);
2728          __ trunc_w_d(single_scratch, f10);
2729          // Retrieve FCSR.
2730          __ cfc1(scratch2, FCSR);
2731          // Restore FCSR.
2732          __ ctc1(scratch1, FCSR);
2733
2734          // Check for inexact conversion or exception.
2735          __ And(scratch2, scratch2, kFCSRFlagMask);
2736
2737          if (result_type_ <= BinaryOpIC::INT32) {
2738            // If scratch2 != 0, result does not fit in a 32-bit integer.
2739            __ Branch(&transition, ne, scratch2, Operand(zero_reg));
2740          }
2741
2742          // Check if the result fits in a smi.
2743          __ mfc1(scratch1, single_scratch);
2744          __ Addu(scratch2, scratch1, Operand(0x40000000));
2745          // If not try to return a heap number.
2746          __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
2747          // Check for minus zero. Return heap number for minus zero.
2748          Label not_zero;
2749          __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
2750          __ mfc1(scratch2, f11);
2751          __ And(scratch2, scratch2, HeapNumber::kSignMask);
2752          __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
2753          __ bind(&not_zero);
2754
2755          // Tag the result and return.
2756          __ SmiTag(v0, scratch1);
2757          __ Ret();
2758        } else {
2759          // DIV just falls through to allocating a heap number.
2760        }
2761
2762        __ bind(&return_heap_number);
2763        // Return a heap number, or fall through to type transition or runtime
2764        // call if we can't.
2765        if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
2766                                                 : BinaryOpIC::INT32)) {
2767          // We are using FPU registers so s0 is available.
2768          heap_number_result = s0;
2769          GenerateHeapResultAllocation(masm,
2770                                       heap_number_result,
2771                                       heap_number_map,
2772                                       scratch1,
2773                                       scratch2,
2774                                       &call_runtime);
2775          __ mov(v0, heap_number_result);
2776          __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
2777          __ Ret();
2778        }
2779
2780        // A DIV operation expecting an integer result falls through
2781        // to type transition.
2782
2783      } else {
2784        // We preserved a0 and a1 to be able to call runtime.
2785        // Save the left value on the stack.
2786        __ Push(t1, t0);
2787
2788        Label pop_and_call_runtime;
2789
2790        // Allocate a heap number to store the result.
2791        heap_number_result = s0;
2792        GenerateHeapResultAllocation(masm,
2793                                     heap_number_result,
2794                                     heap_number_map,
2795                                     scratch1,
2796                                     scratch2,
2797                                     &pop_and_call_runtime);
2798
2799        // Load the left value from the value saved on the stack.
2800        __ Pop(a1, a0);
2801
2802        // Call the C function to handle the double operation.
2803        FloatingPointHelper::CallCCodeForDoubleOperation(
2804            masm, op_, heap_number_result, scratch1);
2805        if (FLAG_debug_code) {
2806          __ stop("Unreachable code.");
2807        }
2808
2809        __ bind(&pop_and_call_runtime);
2810        __ Drop(2);
2811        __ Branch(&call_runtime);
2812      }
2813
2814      break;
2815    }
2816
2817    case Token::BIT_OR:
2818    case Token::BIT_XOR:
2819    case Token::BIT_AND:
2820    case Token::SAR:
2821    case Token::SHR:
2822    case Token::SHL: {
2823      Label return_heap_number;
2824      Register scratch3 = t1;
2825      // Convert operands to 32-bit integers. Right in a2 and left in a3. The
2826      // registers a0 and a1 (right and left) are preserved for the runtime
2827      // call.
2828      FloatingPointHelper::LoadNumberAsInt32(masm,
2829                                             left,
2830                                             a3,
2831                                             heap_number_map,
2832                                             scratch1,
2833                                             scratch2,
2834                                             scratch3,
2835                                             f0,
2836                                             &transition);
2837      FloatingPointHelper::LoadNumberAsInt32(masm,
2838                                             right,
2839                                             a2,
2840                                             heap_number_map,
2841                                             scratch1,
2842                                             scratch2,
2843                                             scratch3,
2844                                             f0,
2845                                             &transition);
2846
2847      // The ECMA-262 standard specifies that, for shift operations, only the
2848      // 5 least significant bits of the shift value should be used.
2849      switch (op_) {
2850        case Token::BIT_OR:
2851          __ Or(a2, a3, Operand(a2));
2852          break;
2853        case Token::BIT_XOR:
2854          __ Xor(a2, a3, Operand(a2));
2855          break;
2856        case Token::BIT_AND:
2857          __ And(a2, a3, Operand(a2));
2858          break;
2859        case Token::SAR:
2860          __ And(a2, a2, Operand(0x1f));
2861          __ srav(a2, a3, a2);
2862          break;
2863        case Token::SHR:
2864          __ And(a2, a2, Operand(0x1f));
2865          __ srlv(a2, a3, a2);
2866          // SHR is special because it is required to produce a positive answer.
2867          // We only get a negative result if the shift value (a2) is 0.
2868          // This result cannot be respresented as a signed 32-bit integer, try
2869          // to return a heap number if we can.
2870          // The non FPU code does not support this special case, so jump to
2871          // runtime if we don't support it.
2872          if (CpuFeatures::IsSupported(FPU)) {
2873            __ Branch((result_type_ <= BinaryOpIC::INT32)
2874                        ? &transition
2875                        : &return_heap_number,
2876                       lt,
2877                       a2,
2878                       Operand(zero_reg));
2879          } else {
2880            __ Branch((result_type_ <= BinaryOpIC::INT32)
2881                        ? &transition
2882                        : &call_runtime,
2883                       lt,
2884                       a2,
2885                       Operand(zero_reg));
2886          }
2887          break;
2888        case Token::SHL:
2889          __ And(a2, a2, Operand(0x1f));
2890          __ sllv(a2, a3, a2);
2891          break;
2892        default:
2893          UNREACHABLE();
2894      }
2895
2896      // Check if the result fits in a smi.
2897      __ Addu(scratch1, a2, Operand(0x40000000));
2898      // If not try to return a heap number. (We know the result is an int32.)
2899      __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
2900      // Tag the result and return.
2901      __ SmiTag(v0, a2);
2902      __ Ret();
2903
2904      __ bind(&return_heap_number);
2905      heap_number_result = t1;
2906      GenerateHeapResultAllocation(masm,
2907                                   heap_number_result,
2908                                   heap_number_map,
2909                                   scratch1,
2910                                   scratch2,
2911                                   &call_runtime);
2912
2913      if (CpuFeatures::IsSupported(FPU)) {
2914        CpuFeatures::Scope scope(FPU);
2915
2916        if (op_ != Token::SHR) {
2917          // Convert the result to a floating point value.
2918          __ mtc1(a2, double_scratch);
2919          __ cvt_d_w(double_scratch, double_scratch);
2920        } else {
2921          // The result must be interpreted as an unsigned 32-bit integer.
2922          __ mtc1(a2, double_scratch);
2923          __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
2924        }
2925
2926        // Store the result.
2927        __ mov(v0, heap_number_result);
2928        __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
2929        __ Ret();
2930      } else {
2931        // Tail call that writes the int32 in a2 to the heap number in v0, using
2932        // a3 and a1 as scratch. v0 is preserved and returned.
2933        __ mov(a0, t1);
2934        WriteInt32ToHeapNumberStub stub(a2, v0, a3, a1);
2935        __ TailCallStub(&stub);
2936      }
2937
2938      break;
2939    }
2940
2941    default:
2942      UNREACHABLE();
2943  }
2944
2945  // We never expect DIV to yield an integer result, so we always generate
2946  // type transition code for DIV operations expecting an integer result: the
2947  // code will fall through to this type transition.
2948  if (transition.is_linked() ||
2949      ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
2950    __ bind(&transition);
2951    GenerateTypeTransition(masm);
2952  }
2953
2954  __ bind(&call_runtime);
2955  GenerateCallRuntime(masm);
2956}
2957
2958
2959void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
2960  Label call_runtime;
2961
2962  if (op_ == Token::ADD) {
2963    // Handle string addition here, because it is the only operation
2964    // that does not do a ToNumber conversion on the operands.
2965    GenerateAddStrings(masm);
2966  }
2967
2968  // Convert oddball arguments to numbers.
2969  Label check, done;
2970  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
2971  __ Branch(&check, ne, a1, Operand(t0));
2972  if (Token::IsBitOp(op_)) {
2973    __ li(a1, Operand(Smi::FromInt(0)));
2974  } else {
2975    __ LoadRoot(a1, Heap::kNanValueRootIndex);
2976  }
2977  __ jmp(&done);
2978  __ bind(&check);
2979  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
2980  __ Branch(&done, ne, a0, Operand(t0));
2981  if (Token::IsBitOp(op_)) {
2982    __ li(a0, Operand(Smi::FromInt(0)));
2983  } else {
2984    __ LoadRoot(a0, Heap::kNanValueRootIndex);
2985  }
2986  __ bind(&done);
2987
2988  GenerateHeapNumberStub(masm);
2989}
2990
2991
2992void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2993  Label call_runtime;
2994  GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
2995
2996  __ bind(&call_runtime);
2997  GenerateCallRuntime(masm);
2998}
2999
3000
3001void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3002  Label call_runtime, call_string_add_or_runtime;
3003
3004  GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3005
3006  GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3007
3008  __ bind(&call_string_add_or_runtime);
3009  if (op_ == Token::ADD) {
3010    GenerateAddStrings(masm);
3011  }
3012
3013  __ bind(&call_runtime);
3014  GenerateCallRuntime(masm);
3015}
3016
3017
3018void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3019  ASSERT(op_ == Token::ADD);
3020  Label left_not_string, call_runtime;
3021
3022  Register left = a1;
3023  Register right = a0;
3024
3025  // Check if left argument is a string.
3026  __ JumpIfSmi(left, &left_not_string);
3027  __ GetObjectType(left, a2, a2);
3028  __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3029
3030  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3031  GenerateRegisterArgsPush(masm);
3032  __ TailCallStub(&string_add_left_stub);
3033
3034  // Left operand is not a string, test right.
3035  __ bind(&left_not_string);
3036  __ JumpIfSmi(right, &call_runtime);
3037  __ GetObjectType(right, a2, a2);
3038  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3039
3040  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3041  GenerateRegisterArgsPush(masm);
3042  __ TailCallStub(&string_add_right_stub);
3043
3044  // At least one argument is not a string.
3045  __ bind(&call_runtime);
3046}
3047
3048
3049void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3050  GenerateRegisterArgsPush(masm);
3051  switch (op_) {
3052    case Token::ADD:
3053      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3054      break;
3055    case Token::SUB:
3056      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3057      break;
3058    case Token::MUL:
3059      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3060      break;
3061    case Token::DIV:
3062      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3063      break;
3064    case Token::MOD:
3065      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3066      break;
3067    case Token::BIT_OR:
3068      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3069      break;
3070    case Token::BIT_AND:
3071      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3072      break;
3073    case Token::BIT_XOR:
3074      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3075      break;
3076    case Token::SAR:
3077      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3078      break;
3079    case Token::SHR:
3080      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3081      break;
3082    case Token::SHL:
3083      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3084      break;
3085    default:
3086      UNREACHABLE();
3087  }
3088}
3089
3090
3091void BinaryOpStub::GenerateHeapResultAllocation(
3092    MacroAssembler* masm,
3093    Register result,
3094    Register heap_number_map,
3095    Register scratch1,
3096    Register scratch2,
3097    Label* gc_required) {
3098
3099  // Code below will scratch result if allocation fails. To keep both arguments
3100  // intact for the runtime call result cannot be one of these.
3101  ASSERT(!result.is(a0) && !result.is(a1));
3102
3103  if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3104    Label skip_allocation, allocated;
3105    Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
3106    // If the overwritable operand is already an object, we skip the
3107    // allocation of a heap number.
3108    __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3109    // Allocate a heap number for the result.
3110    __ AllocateHeapNumber(
3111        result, scratch1, scratch2, heap_number_map, gc_required);
3112    __ Branch(&allocated);
3113    __ bind(&skip_allocation);
3114    // Use object holding the overwritable operand for result.
3115    __ mov(result, overwritable_operand);
3116    __ bind(&allocated);
3117  } else {
3118    ASSERT(mode_ == NO_OVERWRITE);
3119    __ AllocateHeapNumber(
3120        result, scratch1, scratch2, heap_number_map, gc_required);
3121  }
3122}
3123
3124
3125void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3126  __ Push(a1, a0);
3127}
3128
3129
3130
3131void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3132  // Untagged case: double input in f4, double result goes
3133  //   into f4.
3134  // Tagged case: tagged input on top of stack and in a0,
3135  //   tagged result (heap number) goes into v0.
3136
3137  Label input_not_smi;
3138  Label loaded;
3139  Label calculate;
3140  Label invalid_cache;
3141  const Register scratch0 = t5;
3142  const Register scratch1 = t3;
3143  const Register cache_entry = a0;
3144  const bool tagged = (argument_type_ == TAGGED);
3145
3146  if (CpuFeatures::IsSupported(FPU)) {
3147    CpuFeatures::Scope scope(FPU);
3148
3149    if (tagged) {
3150      // Argument is a number and is on stack and in a0.
3151      // Load argument and check if it is a smi.
3152      __ JumpIfNotSmi(a0, &input_not_smi);
3153
3154      // Input is a smi. Convert to double and load the low and high words
3155      // of the double into a2, a3.
3156      __ sra(t0, a0, kSmiTagSize);
3157      __ mtc1(t0, f4);
3158      __ cvt_d_w(f4, f4);
3159      __ Move(a2, a3, f4);
3160      __ Branch(&loaded);
3161
3162      __ bind(&input_not_smi);
3163      // Check if input is a HeapNumber.
3164      __ CheckMap(a0,
3165                  a1,
3166                  Heap::kHeapNumberMapRootIndex,
3167                  &calculate,
3168                  DONT_DO_SMI_CHECK);
3169      // Input is a HeapNumber. Store the
3170      // low and high words into a2, a3.
3171      __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
3172      __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
3173    } else {
3174      // Input is untagged double in f4. Output goes to f4.
3175      __ Move(a2, a3, f4);
3176    }
3177    __ bind(&loaded);
3178    // a2 = low 32 bits of double value.
3179    // a3 = high 32 bits of double value.
3180    // Compute hash (the shifts are arithmetic):
3181    //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3182    __ Xor(a1, a2, a3);
3183    __ sra(t0, a1, 16);
3184    __ Xor(a1, a1, t0);
3185    __ sra(t0, a1, 8);
3186    __ Xor(a1, a1, t0);
3187    ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3188    __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3189
3190    // a2 = low 32 bits of double value.
3191    // a3 = high 32 bits of double value.
3192    // a1 = TranscendentalCache::hash(double value).
3193    __ li(cache_entry, Operand(
3194        ExternalReference::transcendental_cache_array_address(
3195            masm->isolate())));
3196    // a0 points to cache array.
3197    __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
3198        Isolate::Current()->transcendental_cache()->caches_[0])));
3199    // a0 points to the cache for the type type_.
3200    // If NULL, the cache hasn't been initialized yet, so go through runtime.
3201    __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
3202
3203#ifdef DEBUG
3204    // Check that the layout of cache elements match expectations.
3205    { TranscendentalCache::SubCache::Element test_elem[2];
3206      char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3207      char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3208      char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3209      char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3210      char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3211      CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
3212      CHECK_EQ(0, elem_in0 - elem_start);
3213      CHECK_EQ(kIntSize, elem_in1 - elem_start);
3214      CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3215    }
3216#endif
3217
3218    // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
3219    __ sll(t0, a1, 1);
3220    __ Addu(a1, a1, t0);
3221    __ sll(t0, a1, 2);
3222    __ Addu(cache_entry, cache_entry, t0);
3223
3224    // Check if cache matches: Double value is stored in uint32_t[2] array.
3225    __ lw(t0, MemOperand(cache_entry, 0));
3226    __ lw(t1, MemOperand(cache_entry, 4));
3227    __ lw(t2, MemOperand(cache_entry, 8));
3228    __ Addu(cache_entry, cache_entry, 12);
3229    __ Branch(&calculate, ne, a2, Operand(t0));
3230    __ Branch(&calculate, ne, a3, Operand(t1));
3231    // Cache hit. Load result, cleanup and return.
3232    if (tagged) {
3233      // Pop input value from stack and load result into v0.
3234      __ Drop(1);
3235      __ mov(v0, t2);
3236    } else {
3237      // Load result into f4.
3238      __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3239    }
3240    __ Ret();
3241  }  // if (CpuFeatures::IsSupported(FPU))
3242
3243  __ bind(&calculate);
3244  if (tagged) {
3245    __ bind(&invalid_cache);
3246    __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
3247                                                   masm->isolate()),
3248                                 1,
3249                                 1);
3250  } else {
3251    if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
3252    CpuFeatures::Scope scope(FPU);
3253
3254    Label no_update;
3255    Label skip_cache;
3256    const Register heap_number_map = t2;
3257
3258    // Call C function to calculate the result and update the cache.
3259    // Register a0 holds precalculated cache entry address; preserve
3260    // it on the stack and pop it into register cache_entry after the
3261    // call.
3262    __ push(cache_entry);
3263    GenerateCallCFunction(masm, scratch0);
3264    __ GetCFunctionDoubleResult(f4);
3265
3266    // Try to update the cache. If we cannot allocate a
3267    // heap number, we return the result without updating.
3268    __ pop(cache_entry);
3269    __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3270    __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
3271    __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3272
3273    __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
3274    __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
3275    __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
3276
3277    __ mov(v0, cache_entry);
3278    __ Ret();
3279
3280    __ bind(&invalid_cache);
3281    // The cache is invalid. Call runtime which will recreate the
3282    // cache.
3283    __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3284    __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3285    __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
3286    __ EnterInternalFrame();
3287    __ push(a0);
3288    __ CallRuntime(RuntimeFunction(), 1);
3289    __ LeaveInternalFrame();
3290    __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
3291    __ Ret();
3292
3293    __ bind(&skip_cache);
3294    // Call C function to calculate the result and answer directly
3295    // without updating the cache.
3296    GenerateCallCFunction(masm, scratch0);
3297    __ GetCFunctionDoubleResult(f4);
3298    __ bind(&no_update);
3299
3300    // We return the value in f4 without adding it to the cache, but
3301    // we cause a scavenging GC so that future allocations will succeed.
3302    __ EnterInternalFrame();
3303
3304    // Allocate an aligned object larger than a HeapNumber.
3305    ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3306    __ li(scratch0, Operand(4 * kPointerSize));
3307    __ push(scratch0);
3308    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3309    __ LeaveInternalFrame();
3310    __ Ret();
3311  }
3312}
3313
3314
3315void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3316                                                    Register scratch) {
3317  __ push(ra);
3318  __ PrepareCallCFunction(2, scratch);
3319  if (IsMipsSoftFloatABI) {
3320    __ Move(v0, v1, f4);
3321  } else {
3322    __ mov_d(f12, f4);
3323  }
3324  switch (type_) {
3325    case TranscendentalCache::SIN:
3326      __ CallCFunction(
3327          ExternalReference::math_sin_double_function(masm->isolate()), 2);
3328      break;
3329    case TranscendentalCache::COS:
3330      __ CallCFunction(
3331          ExternalReference::math_cos_double_function(masm->isolate()), 2);
3332      break;
3333    case TranscendentalCache::LOG:
3334      __ CallCFunction(
3335          ExternalReference::math_log_double_function(masm->isolate()), 2);
3336      break;
3337    default:
3338      UNIMPLEMENTED();
3339      break;
3340  }
3341  __ pop(ra);
3342}
3343
3344
3345Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
3346  switch (type_) {
3347    // Add more cases when necessary.
3348    case TranscendentalCache::SIN: return Runtime::kMath_sin;
3349    case TranscendentalCache::COS: return Runtime::kMath_cos;
3350    case TranscendentalCache::LOG: return Runtime::kMath_log;
3351    default:
3352      UNIMPLEMENTED();
3353      return Runtime::kAbort;
3354  }
3355}
3356
3357
3358void StackCheckStub::Generate(MacroAssembler* masm) {
3359  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3360}
3361
3362
3363void MathPowStub::Generate(MacroAssembler* masm) {
3364  Label call_runtime;
3365
3366  if (CpuFeatures::IsSupported(FPU)) {
3367    CpuFeatures::Scope scope(FPU);
3368
3369    Label base_not_smi;
3370    Label exponent_not_smi;
3371    Label convert_exponent;
3372
3373    const Register base = a0;
3374    const Register exponent = a2;
3375    const Register heapnumbermap = t1;
3376    const Register heapnumber = s0;  // Callee-saved register.
3377    const Register scratch = t2;
3378    const Register scratch2 = t3;
3379
3380    // Alocate FP values in the ABI-parameter-passing regs.
3381    const DoubleRegister double_base = f12;
3382    const DoubleRegister double_exponent = f14;
3383    const DoubleRegister double_result = f0;
3384    const DoubleRegister double_scratch = f2;
3385
3386    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
3387    __ lw(base, MemOperand(sp, 1 * kPointerSize));
3388    __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
3389
3390    // Convert base to double value and store it in f0.
3391    __ JumpIfNotSmi(base, &base_not_smi);
3392    // Base is a Smi. Untag and convert it.
3393    __ SmiUntag(base);
3394    __ mtc1(base, double_scratch);
3395    __ cvt_d_w(double_base, double_scratch);
3396    __ Branch(&convert_exponent);
3397
3398    __ bind(&base_not_smi);
3399    __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3400    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3401    // Base is a heapnumber. Load it into double register.
3402    __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
3403
3404    __ bind(&convert_exponent);
3405    __ JumpIfNotSmi(exponent, &exponent_not_smi);
3406    __ SmiUntag(exponent);
3407
3408    // The base is in a double register and the exponent is
3409    // an untagged smi. Allocate a heap number and call a
3410    // C function for integer exponents. The register containing
3411    // the heap number is callee-saved.
3412    __ AllocateHeapNumber(heapnumber,
3413                          scratch,
3414                          scratch2,
3415                          heapnumbermap,
3416                          &call_runtime);
3417    __ push(ra);
3418    __ PrepareCallCFunction(3, scratch);
3419    __ SetCallCDoubleArguments(double_base, exponent);
3420    __ CallCFunction(
3421        ExternalReference::power_double_int_function(masm->isolate()), 3);
3422    __ pop(ra);
3423    __ GetCFunctionDoubleResult(double_result);
3424    __ sdc1(double_result,
3425            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3426    __ mov(v0, heapnumber);
3427    __ DropAndRet(2 * kPointerSize);
3428
3429    __ bind(&exponent_not_smi);
3430    __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3431    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3432    // Exponent is a heapnumber. Load it into double register.
3433    __ ldc1(double_exponent,
3434            FieldMemOperand(exponent, HeapNumber::kValueOffset));
3435
3436    // The base and the exponent are in double registers.
3437    // Allocate a heap number and call a C function for
3438    // double exponents. The register containing
3439    // the heap number is callee-saved.
3440    __ AllocateHeapNumber(heapnumber,
3441                          scratch,
3442                          scratch2,
3443                          heapnumbermap,
3444                          &call_runtime);
3445    __ push(ra);
3446    __ PrepareCallCFunction(4, scratch);
3447    // ABI (o32) for func(double a, double b): a in f12, b in f14.
3448    ASSERT(double_base.is(f12));
3449    ASSERT(double_exponent.is(f14));
3450    __ SetCallCDoubleArguments(double_base, double_exponent);
3451    __ CallCFunction(
3452        ExternalReference::power_double_double_function(masm->isolate()), 4);
3453    __ pop(ra);
3454    __ GetCFunctionDoubleResult(double_result);
3455    __ sdc1(double_result,
3456            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3457    __ mov(v0, heapnumber);
3458    __ DropAndRet(2 * kPointerSize);
3459  }
3460
3461  __ bind(&call_runtime);
3462  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3463}
3464
3465
3466bool CEntryStub::NeedsImmovableCode() {
3467  return true;
3468}
3469
3470
3471void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
3472  __ Throw(v0);
3473}
3474
3475
3476void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
3477                                          UncatchableExceptionType type) {
3478  __ ThrowUncatchable(type, v0);
3479}
3480
3481
3482void CEntryStub::GenerateCore(MacroAssembler* masm,
3483                              Label* throw_normal_exception,
3484                              Label* throw_termination_exception,
3485                              Label* throw_out_of_memory_exception,
3486                              bool do_gc,
3487                              bool always_allocate) {
3488  // v0: result parameter for PerformGC, if any
3489  // s0: number of arguments including receiver (C callee-saved)
3490  // s1: pointer to the first argument          (C callee-saved)
3491  // s2: pointer to builtin function            (C callee-saved)
3492
3493  if (do_gc) {
3494    // Move result passed in v0 into a0 to call PerformGC.
3495    __ mov(a0, v0);
3496    __ PrepareCallCFunction(1, a1);
3497    __ CallCFunction(
3498        ExternalReference::perform_gc_function(masm->isolate()), 1);
3499  }
3500
3501  ExternalReference scope_depth =
3502      ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
3503  if (always_allocate) {
3504    __ li(a0, Operand(scope_depth));
3505    __ lw(a1, MemOperand(a0));
3506    __ Addu(a1, a1, Operand(1));
3507    __ sw(a1, MemOperand(a0));
3508  }
3509
3510  // Prepare arguments for C routine: a0 = argc, a1 = argv
3511  __ mov(a0, s0);
3512  __ mov(a1, s1);
3513
3514  // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
3515  // also need to reserve the 4 argument slots on the stack.
3516
3517  __ AssertStackIsAligned();
3518
3519  __ li(a2, Operand(ExternalReference::isolate_address()));
3520
3521  // To let the GC traverse the return address of the exit frames, we need to
3522  // know where the return address is. The CEntryStub is unmovable, so
3523  // we can store the address on the stack to be able to find it again and
3524  // we never have to restore it, because it will not change.
3525  { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
3526    // This branch-and-link sequence is needed to find the current PC on mips,
3527    // saved to the ra register.
3528    // Use masm-> here instead of the double-underscore macro since extra
3529    // coverage code can interfere with the proper calculation of ra.
3530    Label find_ra;
3531    masm->bal(&find_ra);  // bal exposes branch delay slot.
3532    masm->nop();  // Branch delay slot nop.
3533    masm->bind(&find_ra);
3534
3535    // Adjust the value in ra to point to the correct return location, 2nd
3536    // instruction past the real call into C code (the jalr(t9)), and push it.
3537    // This is the return address of the exit frame.
3538    const int kNumInstructionsToJump = 6;
3539    masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
3540    masm->sw(ra, MemOperand(sp));  // This spot was reserved in EnterExitFrame.
3541    masm->Subu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
3542    // Stack is still aligned.
3543
3544    // Call the C routine.
3545    masm->mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
3546    masm->jalr(t9);
3547    masm->nop();    // Branch delay slot nop.
3548    // Make sure the stored 'ra' points to this position.
3549    ASSERT_EQ(kNumInstructionsToJump,
3550              masm->InstructionsGeneratedSince(&find_ra));
3551  }
3552
3553  // Restore stack (remove arg slots).
3554  __ Addu(sp, sp, StandardFrameConstants::kCArgsSlotsSize);
3555
3556  if (always_allocate) {
3557    // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
3558    __ li(a2, Operand(scope_depth));
3559    __ lw(a3, MemOperand(a2));
3560    __ Subu(a3, a3, Operand(1));
3561    __ sw(a3, MemOperand(a2));
3562  }
3563
3564  // Check for failure result.
3565  Label failure_returned;
3566  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3567  __ addiu(a2, v0, 1);
3568  __ andi(t0, a2, kFailureTagMask);
3569  __ Branch(&failure_returned, eq, t0, Operand(zero_reg));
3570
3571  // Exit C frame and return.
3572  // v0:v1: result
3573  // sp: stack pointer
3574  // fp: frame pointer
3575  __ LeaveExitFrame(save_doubles_, s0);
3576  __ Ret();
3577
3578  // Check if we should retry or throw exception.
3579  Label retry;
3580  __ bind(&failure_returned);
3581  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3582  __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
3583  __ Branch(&retry, eq, t0, Operand(zero_reg));
3584
3585  // Special handling of out of memory exceptions.
3586  Failure* out_of_memory = Failure::OutOfMemoryException();
3587  __ Branch(throw_out_of_memory_exception, eq,
3588            v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
3589
3590  // Retrieve the pending exception and clear the variable.
3591  __ li(t0,
3592        Operand(ExternalReference::the_hole_value_location(masm->isolate())));
3593  __ lw(a3, MemOperand(t0));
3594  __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
3595                                      masm->isolate())));
3596  __ lw(v0, MemOperand(t0));
3597  __ sw(a3, MemOperand(t0));
3598
3599  // Special handling of termination exceptions which are uncatchable
3600  // by javascript code.
3601  __ Branch(throw_termination_exception, eq,
3602            v0, Operand(masm->isolate()->factory()->termination_exception()));
3603
3604  // Handle normal exception.
3605  __ jmp(throw_normal_exception);
3606
3607  __ bind(&retry);
3608  // Last failure (v0) will be moved to (a0) for parameter when retrying.
3609}
3610
3611
3612void CEntryStub::Generate(MacroAssembler* masm) {
3613  // Called from JavaScript; parameters are on stack as if calling JS function
3614  // a0: number of arguments including receiver
3615  // a1: pointer to builtin function
3616  // fp: frame pointer    (restored after C call)
3617  // sp: stack pointer    (restored as callee's sp after C call)
3618  // cp: current context  (C callee-saved)
3619
3620  // NOTE: Invocations of builtins may return failure objects
3621  // instead of a proper result. The builtin entry handles
3622  // this by performing a garbage collection and retrying the
3623  // builtin once.
3624
3625  // Compute the argv pointer in a callee-saved register.
3626  __ sll(s1, a0, kPointerSizeLog2);
3627  __ Addu(s1, sp, s1);
3628  __ Subu(s1, s1, Operand(kPointerSize));
3629
3630  // Enter the exit frame that transitions from JavaScript to C++.
3631  __ EnterExitFrame(save_doubles_);
3632
3633  // Setup argc and the builtin function in callee-saved registers.
3634  __ mov(s0, a0);
3635  __ mov(s2, a1);
3636
3637  // s0: number of arguments (C callee-saved)
3638  // s1: pointer to first argument (C callee-saved)
3639  // s2: pointer to builtin function (C callee-saved)
3640
3641  Label throw_normal_exception;
3642  Label throw_termination_exception;
3643  Label throw_out_of_memory_exception;
3644
3645  // Call into the runtime system.
3646  GenerateCore(masm,
3647               &throw_normal_exception,
3648               &throw_termination_exception,
3649               &throw_out_of_memory_exception,
3650               false,
3651               false);
3652
3653  // Do space-specific GC and retry runtime call.
3654  GenerateCore(masm,
3655               &throw_normal_exception,
3656               &throw_termination_exception,
3657               &throw_out_of_memory_exception,
3658               true,
3659               false);
3660
3661  // Do full GC and retry runtime call one final time.
3662  Failure* failure = Failure::InternalError();
3663  __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
3664  GenerateCore(masm,
3665               &throw_normal_exception,
3666               &throw_termination_exception,
3667               &throw_out_of_memory_exception,
3668               true,
3669               true);
3670
3671  __ bind(&throw_out_of_memory_exception);
3672  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
3673
3674  __ bind(&throw_termination_exception);
3675  GenerateThrowUncatchable(masm, TERMINATION);
3676
3677  __ bind(&throw_normal_exception);
3678  GenerateThrowTOS(masm);
3679}
3680
3681
3682void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
3683  Label invoke, exit;
3684
3685  // Registers:
3686  // a0: entry address
3687  // a1: function
3688  // a2: reveiver
3689  // a3: argc
3690  //
3691  // Stack:
3692  // 4 args slots
3693  // args
3694
3695  // Save callee saved registers on the stack.
3696  __ MultiPush(kCalleeSaved | ra.bit());
3697
3698  // Load argv in s0 register.
3699  __ lw(s0, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize +
3700                           StandardFrameConstants::kCArgsSlotsSize));
3701
3702  // We build an EntryFrame.
3703  __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
3704  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
3705  __ li(t2, Operand(Smi::FromInt(marker)));
3706  __ li(t1, Operand(Smi::FromInt(marker)));
3707  __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
3708                                      masm->isolate())));
3709  __ lw(t0, MemOperand(t0));
3710  __ Push(t3, t2, t1, t0);
3711  // Setup frame pointer for the frame to be pushed.
3712  __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
3713
3714  // Registers:
3715  // a0: entry_address
3716  // a1: function
3717  // a2: reveiver_pointer
3718  // a3: argc
3719  // s0: argv
3720  //
3721  // Stack:
3722  // caller fp          |
3723  // function slot      | entry frame
3724  // context slot       |
3725  // bad fp (0xff...f)  |
3726  // callee saved registers + ra
3727  // 4 args slots
3728  // args
3729
3730  // If this is the outermost JS call, set js_entry_sp value.
3731  Label non_outermost_js;
3732  ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
3733                                masm->isolate());
3734  __ li(t1, Operand(ExternalReference(js_entry_sp)));
3735  __ lw(t2, MemOperand(t1));
3736  __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
3737  __ sw(fp, MemOperand(t1));
3738  __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
3739  Label cont;
3740  __ b(&cont);
3741  __ nop();   // Branch delay slot nop.
3742  __ bind(&non_outermost_js);
3743  __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
3744  __ bind(&cont);
3745  __ push(t0);
3746
3747  // Call a faked try-block that does the invoke.
3748  __ bal(&invoke);  // bal exposes branch delay slot.
3749  __ nop();   // Branch delay slot nop.
3750
3751  // Caught exception: Store result (exception) in the pending
3752  // exception field in the JSEnv and return a failure sentinel.
3753  // Coming in here the fp will be invalid because the PushTryHandler below
3754  // sets it to 0 to signal the existence of the JSEntry frame.
3755  __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
3756                                      masm->isolate())));
3757  __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
3758  __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
3759  __ b(&exit);  // b exposes branch delay slot.
3760  __ nop();   // Branch delay slot nop.
3761
3762  // Invoke: Link this frame into the handler chain.
3763  __ bind(&invoke);
3764  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
3765  // If an exception not caught by another handler occurs, this handler
3766  // returns control to the code after the bal(&invoke) above, which
3767  // restores all kCalleeSaved registers (including cp and fp) to their
3768  // saved values before returning a failure to C.
3769
3770  // Clear any pending exceptions.
3771  __ li(t0,
3772        Operand(ExternalReference::the_hole_value_location(masm->isolate())));
3773  __ lw(t1, MemOperand(t0));
3774  __ li(t0, Operand(ExternalReference(Isolate::k_pending_exception_address,
3775                                      masm->isolate())));
3776  __ sw(t1, MemOperand(t0));
3777
3778  // Invoke the function by calling through JS entry trampoline builtin.
3779  // Notice that we cannot store a reference to the trampoline code directly in
3780  // this stub, because runtime stubs are not traversed when doing GC.
3781
3782  // Registers:
3783  // a0: entry_address
3784  // a1: function
3785  // a2: reveiver_pointer
3786  // a3: argc
3787  // s0: argv
3788  //
3789  // Stack:
3790  // handler frame
3791  // entry frame
3792  // callee saved registers + ra
3793  // 4 args slots
3794  // args
3795
3796  if (is_construct) {
3797    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
3798                                      masm->isolate());
3799    __ li(t0, Operand(construct_entry));
3800  } else {
3801    ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
3802    __ li(t0, Operand(entry));
3803  }
3804  __ lw(t9, MemOperand(t0));  // Deref address.
3805
3806  // Call JSEntryTrampoline.
3807  __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
3808  __ Call(t9);
3809
3810  // Unlink this frame from the handler chain.
3811  __ PopTryHandler();
3812
3813  __ bind(&exit);  // v0 holds result
3814  // Check if the current stack frame is marked as the outermost JS frame.
3815  Label non_outermost_js_2;
3816  __ pop(t1);
3817  __ Branch(&non_outermost_js_2, ne, t1,
3818            Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
3819  __ li(t1, Operand(ExternalReference(js_entry_sp)));
3820  __ sw(zero_reg, MemOperand(t1));
3821  __ bind(&non_outermost_js_2);
3822
3823  // Restore the top frame descriptors from the stack.
3824  __ pop(t1);
3825  __ li(t0, Operand(ExternalReference(Isolate::k_c_entry_fp_address,
3826                                      masm->isolate())));
3827  __ sw(t1, MemOperand(t0));
3828
3829  // Reset the stack to the callee saved registers.
3830  __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
3831
3832  // Restore callee saved registers from the stack.
3833  __ MultiPop(kCalleeSaved | ra.bit());
3834  // Return.
3835  __ Jump(ra);
3836}
3837
3838
3839// Uses registers a0 to t0.
3840// Expected input (depending on whether args are in registers or on the stack):
3841// * object: a0 or at sp + 1 * kPointerSize.
3842// * function: a1 or at sp.
3843//
3844// Inlined call site patching is a crankshaft-specific feature that is not
3845// implemented on MIPS.
3846void InstanceofStub::Generate(MacroAssembler* masm) {
3847  // This is a crankshaft-specific feature that has not been implemented yet.
3848  ASSERT(!HasCallSiteInlineCheck());
3849  // Call site inlining and patching implies arguments in registers.
3850  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
3851  // ReturnTrueFalse is only implemented for inlined call sites.
3852  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
3853
3854  // Fixed register usage throughout the stub:
3855  const Register object = a0;  // Object (lhs).
3856  Register map = a3;  // Map of the object.
3857  const Register function = a1;  // Function (rhs).
3858  const Register prototype = t0;  // Prototype of the function.
3859  const Register inline_site = t5;
3860  const Register scratch = a2;
3861
3862  Label slow, loop, is_instance, is_not_instance, not_js_object;
3863
3864  if (!HasArgsInRegisters()) {
3865    __ lw(object, MemOperand(sp, 1 * kPointerSize));
3866    __ lw(function, MemOperand(sp, 0));
3867  }
3868
3869  // Check that the left hand is a JS object and load map.
3870  __ JumpIfSmi(object, &not_js_object);
3871  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
3872
3873  // If there is a call site cache don't look in the global cache, but do the
3874  // real lookup and update the call site cache.
3875  if (!HasCallSiteInlineCheck()) {
3876    Label miss;
3877    __ LoadRoot(t1, Heap::kInstanceofCacheFunctionRootIndex);
3878    __ Branch(&miss, ne, function, Operand(t1));
3879    __ LoadRoot(t1, Heap::kInstanceofCacheMapRootIndex);
3880    __ Branch(&miss, ne, map, Operand(t1));
3881    __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
3882    __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3883
3884    __ bind(&miss);
3885  }
3886
3887  // Get the prototype of the function.
3888  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
3889
3890  // Check that the function prototype is a JS object.
3891  __ JumpIfSmi(prototype, &slow);
3892  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
3893
3894  // Update the global instanceof or call site inlined cache with the current
3895  // map and function. The cached answer will be set when it is known below.
3896  if (!HasCallSiteInlineCheck()) {
3897    __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
3898    __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
3899  } else {
3900    UNIMPLEMENTED_MIPS();
3901  }
3902
3903  // Register mapping: a3 is object map and t0 is function prototype.
3904  // Get prototype of object into a2.
3905  __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
3906
3907  // We don't need map any more. Use it as a scratch register.
3908  Register scratch2 = map;
3909  map = no_reg;
3910
3911  // Loop through the prototype chain looking for the function prototype.
3912  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
3913  __ bind(&loop);
3914  __ Branch(&is_instance, eq, scratch, Operand(prototype));
3915  __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
3916  __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
3917  __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
3918  __ Branch(&loop);
3919
3920  __ bind(&is_instance);
3921  ASSERT(Smi::FromInt(0) == 0);
3922  if (!HasCallSiteInlineCheck()) {
3923    __ mov(v0, zero_reg);
3924    __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
3925  } else {
3926    UNIMPLEMENTED_MIPS();
3927  }
3928  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3929
3930  __ bind(&is_not_instance);
3931  if (!HasCallSiteInlineCheck()) {
3932    __ li(v0, Operand(Smi::FromInt(1)));
3933    __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
3934  } else {
3935    UNIMPLEMENTED_MIPS();
3936  }
3937  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3938
3939  Label object_not_null, object_not_null_or_smi;
3940  __ bind(&not_js_object);
3941  // Before null, smi and string value checks, check that the rhs is a function
3942  // as for a non-function rhs an exception needs to be thrown.
3943  __ JumpIfSmi(function, &slow);
3944  __ GetObjectType(function, scratch2, scratch);
3945  __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
3946
3947  // Null is not instance of anything.
3948  __ Branch(&object_not_null, ne, scratch,
3949      Operand(masm->isolate()->factory()->null_value()));
3950  __ li(v0, Operand(Smi::FromInt(1)));
3951  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3952
3953  __ bind(&object_not_null);
3954  // Smi values are not instances of anything.
3955  __ JumpIfNotSmi(object, &object_not_null_or_smi);
3956  __ li(v0, Operand(Smi::FromInt(1)));
3957  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3958
3959  __ bind(&object_not_null_or_smi);
3960  // String values are not instances of anything.
3961  __ IsObjectJSStringType(object, scratch, &slow);
3962  __ li(v0, Operand(Smi::FromInt(1)));
3963  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3964
3965  // Slow-case.  Tail call builtin.
3966  __ bind(&slow);
3967  if (!ReturnTrueFalseObject()) {
3968    if (HasArgsInRegisters()) {
3969      __ Push(a0, a1);
3970    }
3971  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
3972  } else {
3973    __ EnterInternalFrame();
3974    __ Push(a0, a1);
3975    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
3976    __ LeaveInternalFrame();
3977    __ mov(a0, v0);
3978    __ LoadRoot(v0, Heap::kTrueValueRootIndex);
3979    __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
3980    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
3981    __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
3982  }
3983}
3984
3985
3986Register InstanceofStub::left() { return a0; }
3987
3988
3989Register InstanceofStub::right() { return a1; }
3990
3991
3992void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
3993  // The displacement is the offset of the last parameter (if any)
3994  // relative to the frame pointer.
3995  static const int kDisplacement =
3996      StandardFrameConstants::kCallerSPOffset - kPointerSize;
3997
3998  // Check that the key is a smiGenerateReadElement.
3999  Label slow;
4000  __ JumpIfNotSmi(a1, &slow);
4001
4002  // Check if the calling frame is an arguments adaptor frame.
4003  Label adaptor;
4004  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4005  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4006  __ Branch(&adaptor,
4007            eq,
4008            a3,
4009            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4010
4011  // Check index (a1) against formal parameters count limit passed in
4012  // through register a0. Use unsigned comparison to get negative
4013  // check for free.
4014  __ Branch(&slow, hs, a1, Operand(a0));
4015
4016  // Read the argument from the stack and return it.
4017  __ subu(a3, a0, a1);
4018  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4019  __ Addu(a3, fp, Operand(t3));
4020  __ lw(v0, MemOperand(a3, kDisplacement));
4021  __ Ret();
4022
4023  // Arguments adaptor case: Check index (a1) against actual arguments
4024  // limit found in the arguments adaptor frame. Use unsigned
4025  // comparison to get negative check for free.
4026  __ bind(&adaptor);
4027  __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4028  __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
4029
4030  // Read the argument from the adaptor frame and return it.
4031  __ subu(a3, a0, a1);
4032  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4033  __ Addu(a3, a2, Operand(t3));
4034  __ lw(v0, MemOperand(a3, kDisplacement));
4035  __ Ret();
4036
4037  // Slow-case: Handle non-smi or out-of-bounds access to arguments
4038  // by calling the runtime system.
4039  __ bind(&slow);
4040  __ push(a1);
4041  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
4042}
4043
4044
4045void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
4046  // sp[0] : number of parameters
4047  // sp[4] : receiver displacement
4048  // sp[8] : function
4049  // Check if the calling frame is an arguments adaptor frame.
4050  Label runtime;
4051  __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4052  __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4053  __ Branch(&runtime, ne,
4054            a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4055
4056  // Patch the arguments.length and the parameters pointer in the current frame.
4057  __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4058  __ sw(a2, MemOperand(sp, 0 * kPointerSize));
4059  __ sll(t3, a2, 1);
4060  __ Addu(a3, a3, Operand(t3));
4061  __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
4062  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4063
4064  __ bind(&runtime);
4065  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4066}
4067
4068
4069void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4070  // Stack layout:
4071  //  sp[0] : number of parameters (tagged)
4072  //  sp[4] : address of receiver argument
4073  //  sp[8] : function
4074  // Registers used over whole function:
4075  //  t2 : allocated object (tagged)
4076  //  t5 : mapped parameter count (tagged)
4077
4078  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4079  // a1 = parameter count (tagged)
4080
4081  // Check if the calling frame is an arguments adaptor frame.
4082  Label runtime;
4083  Label adaptor_frame, try_allocate;
4084  __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4085  __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4086  __ Branch(&adaptor_frame, eq, a2,
4087            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4088
4089  // No adaptor, parameter count = argument count.
4090  __ mov(a2, a1);
4091  __ b(&try_allocate);
4092  __ nop();   // Branch delay slot nop.
4093
4094  // We have an adaptor frame. Patch the parameters pointer.
4095  __ bind(&adaptor_frame);
4096  __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4097  __ sll(t6, a2, 1);
4098  __ Addu(a3, a3, Operand(t6));
4099  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4100  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4101
4102  // a1 = parameter count (tagged)
4103  // a2 = argument count (tagged)
4104  // Compute the mapped parameter count = min(a1, a2) in a1.
4105  Label skip_min;
4106  __ Branch(&skip_min, lt, a1, Operand(a2));
4107  __ mov(a1, a2);
4108  __ bind(&skip_min);
4109
4110  __ bind(&try_allocate);
4111
4112  // Compute the sizes of backing store, parameter map, and arguments object.
4113  // 1. Parameter map, has 2 extra words containing context and backing store.
4114  const int kParameterMapHeaderSize =
4115      FixedArray::kHeaderSize + 2 * kPointerSize;
4116  // If there are no mapped parameters, we do not need the parameter_map.
4117  Label param_map_size;
4118  ASSERT_EQ(0, Smi::FromInt(0));
4119  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
4120  __ mov(t5, zero_reg);  // In delay slot: param map size = 0 when a1 == 0.
4121  __ sll(t5, a1, 1);
4122  __ addiu(t5, t5, kParameterMapHeaderSize);
4123  __ bind(&param_map_size);
4124
4125  // 2. Backing store.
4126  __ sll(t6, a2, 1);
4127  __ Addu(t5, t5, Operand(t6));
4128  __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
4129
4130  // 3. Arguments object.
4131  __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
4132
4133  // Do the allocation of all three objects in one go.
4134  __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
4135
4136  // v0 = address of new object(s) (tagged)
4137  // a2 = argument count (tagged)
4138  // Get the arguments boilerplate from the current (global) context into t0.
4139  const int kNormalOffset =
4140      Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
4141  const int kAliasedOffset =
4142      Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
4143
4144  __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4145  __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4146  Label skip2_ne, skip2_eq;
4147  __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
4148  __ lw(t0, MemOperand(t0, kNormalOffset));
4149  __ bind(&skip2_ne);
4150
4151  __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
4152  __ lw(t0, MemOperand(t0, kAliasedOffset));
4153  __ bind(&skip2_eq);
4154
4155  // v0 = address of new object (tagged)
4156  // a1 = mapped parameter count (tagged)
4157  // a2 = argument count (tagged)
4158  // t0 = address of boilerplate object (tagged)
4159  // Copy the JS object part.
4160  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
4161    __ lw(a3, FieldMemOperand(t0, i));
4162    __ sw(a3, FieldMemOperand(v0, i));
4163  }
4164
4165  // Setup the callee in-object property.
4166  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
4167  __ lw(a3, MemOperand(sp, 2 * kPointerSize));
4168  const int kCalleeOffset = JSObject::kHeaderSize +
4169      Heap::kArgumentsCalleeIndex * kPointerSize;
4170  __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
4171
4172  // Use the length (smi tagged) and set that as an in-object property too.
4173  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4174  const int kLengthOffset = JSObject::kHeaderSize +
4175      Heap::kArgumentsLengthIndex * kPointerSize;
4176  __ sw(a2, FieldMemOperand(v0, kLengthOffset));
4177
4178  // Setup the elements pointer in the allocated arguments object.
4179  // If we allocated a parameter map, t0 will point there, otherwise
4180  // it will point to the backing store.
4181  __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
4182  __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4183
4184  // v0 = address of new object (tagged)
4185  // a1 = mapped parameter count (tagged)
4186  // a2 = argument count (tagged)
4187  // t0 = address of parameter map or backing store (tagged)
4188  // Initialize parameter map. If there are no mapped arguments, we're done.
4189  Label skip_parameter_map;
4190  Label skip3;
4191  __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
4192  // Move backing store address to a3, because it is
4193  // expected there when filling in the unmapped arguments.
4194  __ mov(a3, t0);
4195  __ bind(&skip3);
4196
4197  __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
4198
4199  __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
4200  __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
4201  __ Addu(t2, a1, Operand(Smi::FromInt(2)));
4202  __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
4203  __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
4204  __ sll(t6, a1, 1);
4205  __ Addu(t2, t0, Operand(t6));
4206  __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
4207  __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
4208
4209  // Copy the parameter slots and the holes in the arguments.
4210  // We need to fill in mapped_parameter_count slots. They index the context,
4211  // where parameters are stored in reverse order, at
4212  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4213  // The mapped parameter thus need to get indices
4214  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
4215  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4216  // We loop from right to left.
4217  Label parameters_loop, parameters_test;
4218  __ mov(t2, a1);
4219  __ lw(t5, MemOperand(sp, 0 * kPointerSize));
4220  __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4221  __ Subu(t5, t5, Operand(a1));
4222  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
4223  __ sll(t6, t2, 1);
4224  __ Addu(a3, t0, Operand(t6));
4225  __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
4226
4227  // t2 = loop variable (tagged)
4228  // a1 = mapping index (tagged)
4229  // a3 = address of backing store (tagged)
4230  // t0 = address of parameter map (tagged)
4231  // t1 = temporary scratch (a.o., for address calculation)
4232  // t3 = the hole value
4233  __ jmp(&parameters_test);
4234
4235  __ bind(&parameters_loop);
4236  __ Subu(t2, t2, Operand(Smi::FromInt(1)));
4237  __ sll(t1, t2, 1);
4238  __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4239  __ Addu(t6, t0, t1);
4240  __ sw(t5, MemOperand(t6));
4241  __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4242  __ Addu(t6, a3, t1);
4243  __ sw(t3, MemOperand(t6));
4244  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4245  __ bind(&parameters_test);
4246  __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
4247
4248  __ bind(&skip_parameter_map);
4249  // a2 = argument count (tagged)
4250  // a3 = address of backing store (tagged)
4251  // t1 = scratch
4252  // Copy arguments header and remaining slots (if there are any).
4253  __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
4254  __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
4255  __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
4256
4257  Label arguments_loop, arguments_test;
4258  __ mov(t5, a1);
4259  __ lw(t0, MemOperand(sp, 1 * kPointerSize));
4260  __ sll(t6, t5, 1);
4261  __ Subu(t0, t0, Operand(t6));
4262  __ jmp(&arguments_test);
4263
4264  __ bind(&arguments_loop);
4265  __ Subu(t0, t0, Operand(kPointerSize));
4266  __ lw(t2, MemOperand(t0, 0));
4267  __ sll(t6, t5, 1);
4268  __ Addu(t1, a3, Operand(t6));
4269  __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
4270  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4271
4272  __ bind(&arguments_test);
4273  __ Branch(&arguments_loop, lt, t5, Operand(a2));
4274
4275  // Return and remove the on-stack parameters.
4276  __ Addu(sp, sp, Operand(3 * kPointerSize));
4277  __ Ret();
4278
4279  // Do the runtime call to allocate the arguments object.
4280  // a2 = argument count (taggged)
4281  __ bind(&runtime);
4282  __ sw(a2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
4283  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4284}
4285
4286
4287void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4288  // sp[0] : number of parameters
4289  // sp[4] : receiver displacement
4290  // sp[8] : function
4291  // Check if the calling frame is an arguments adaptor frame.
4292  Label adaptor_frame, try_allocate, runtime;
4293  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4294  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4295  __ Branch(&adaptor_frame,
4296            eq,
4297            a3,
4298            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4299
4300  // Get the length from the frame.
4301  __ lw(a1, MemOperand(sp, 0));
4302  __ Branch(&try_allocate);
4303
4304  // Patch the arguments.length and the parameters pointer.
4305  __ bind(&adaptor_frame);
4306  __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4307  __ sw(a1, MemOperand(sp, 0));
4308  __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
4309  __ Addu(a3, a2, Operand(at));
4310
4311  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4312  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4313
4314  // Try the new space allocation. Start out with computing the size
4315  // of the arguments object and the elements array in words.
4316  Label add_arguments_object;
4317  __ bind(&try_allocate);
4318  __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
4319  __ srl(a1, a1, kSmiTagSize);
4320
4321  __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
4322  __ bind(&add_arguments_object);
4323  __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
4324
4325  // Do the allocation of both objects in one go.
4326  __ AllocateInNewSpace(a1,
4327                        v0,
4328                        a2,
4329                        a3,
4330                        &runtime,
4331                        static_cast<AllocationFlags>(TAG_OBJECT |
4332                                                     SIZE_IN_WORDS));
4333
4334  // Get the arguments boilerplate from the current (global) context.
4335  __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4336  __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4337  __ lw(t0, MemOperand(t0, Context::SlotOffset(
4338      Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
4339
4340  // Copy the JS object part.
4341  __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
4342
4343  // Get the length (smi tagged) and set that as an in-object property too.
4344  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4345  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4346  __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
4347      Heap::kArgumentsLengthIndex * kPointerSize));
4348
4349  Label done;
4350  __ Branch(&done, eq, a1, Operand(zero_reg));
4351
4352  // Get the parameters pointer from the stack.
4353  __ lw(a2, MemOperand(sp, 1 * kPointerSize));
4354
4355  // Setup the elements pointer in the allocated arguments object and
4356  // initialize the header in the elements fixed array.
4357  __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
4358  __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4359  __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
4360  __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
4361  __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
4362  // Untag the length for the loop.
4363  __ srl(a1, a1, kSmiTagSize);
4364
4365  // Copy the fixed array slots.
4366  Label loop;
4367  // Setup t0 to point to the first array slot.
4368  __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4369  __ bind(&loop);
4370  // Pre-decrement a2 with kPointerSize on each iteration.
4371  // Pre-decrement in order to skip receiver.
4372  __ Addu(a2, a2, Operand(-kPointerSize));
4373  __ lw(a3, MemOperand(a2));
4374  // Post-increment t0 with kPointerSize on each iteration.
4375  __ sw(a3, MemOperand(t0));
4376  __ Addu(t0, t0, Operand(kPointerSize));
4377  __ Subu(a1, a1, Operand(1));
4378  __ Branch(&loop, ne, a1, Operand(zero_reg));
4379
4380  // Return and remove the on-stack parameters.
4381  __ bind(&done);
4382  __ Addu(sp, sp, Operand(3 * kPointerSize));
4383  __ Ret();
4384
4385  // Do the runtime call to allocate the arguments object.
4386  __ bind(&runtime);
4387  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
4388}
4389
4390
4391void RegExpExecStub::Generate(MacroAssembler* masm) {
4392  // Just jump directly to runtime if native RegExp is not selected at compile
4393  // time or if regexp entry in generated code is turned off runtime switch or
4394  // at compilation.
4395#ifdef V8_INTERPRETED_REGEXP
4396  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4397#else  // V8_INTERPRETED_REGEXP
4398  if (!FLAG_regexp_entry_native) {
4399    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4400    return;
4401  }
4402
4403  // Stack frame on entry.
4404  //  sp[0]: last_match_info (expected JSArray)
4405  //  sp[4]: previous index
4406  //  sp[8]: subject string
4407  //  sp[12]: JSRegExp object
4408
4409  static const int kLastMatchInfoOffset = 0 * kPointerSize;
4410  static const int kPreviousIndexOffset = 1 * kPointerSize;
4411  static const int kSubjectOffset = 2 * kPointerSize;
4412  static const int kJSRegExpOffset = 3 * kPointerSize;
4413
4414  Label runtime, invoke_regexp;
4415
4416  // Allocation of registers for this function. These are in callee save
4417  // registers and will be preserved by the call to the native RegExp code, as
4418  // this code is called using the normal C calling convention. When calling
4419  // directly from generated code the native RegExp code will not do a GC and
4420  // therefore the content of these registers are safe to use after the call.
4421  // MIPS - using s0..s2, since we are not using CEntry Stub.
4422  Register subject = s0;
4423  Register regexp_data = s1;
4424  Register last_match_info_elements = s2;
4425
4426  // Ensure that a RegExp stack is allocated.
4427  ExternalReference address_of_regexp_stack_memory_address =
4428      ExternalReference::address_of_regexp_stack_memory_address(
4429          masm->isolate());
4430  ExternalReference address_of_regexp_stack_memory_size =
4431      ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
4432  __ li(a0, Operand(address_of_regexp_stack_memory_size));
4433  __ lw(a0, MemOperand(a0, 0));
4434  __ Branch(&runtime, eq, a0, Operand(zero_reg));
4435
4436  // Check that the first argument is a JSRegExp object.
4437  __ lw(a0, MemOperand(sp, kJSRegExpOffset));
4438  STATIC_ASSERT(kSmiTag == 0);
4439  __ JumpIfSmi(a0, &runtime);
4440  __ GetObjectType(a0, a1, a1);
4441  __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
4442
4443  // Check that the RegExp has been compiled (data contains a fixed array).
4444  __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
4445  if (FLAG_debug_code) {
4446    __ And(t0, regexp_data, Operand(kSmiTagMask));
4447    __ Check(nz,
4448             "Unexpected type for RegExp data, FixedArray expected",
4449             t0,
4450             Operand(zero_reg));
4451    __ GetObjectType(regexp_data, a0, a0);
4452    __ Check(eq,
4453             "Unexpected type for RegExp data, FixedArray expected",
4454             a0,
4455             Operand(FIXED_ARRAY_TYPE));
4456  }
4457
4458  // regexp_data: RegExp data (FixedArray)
4459  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4460  __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4461  __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4462
4463  // regexp_data: RegExp data (FixedArray)
4464  // Check that the number of captures fit in the static offsets vector buffer.
4465  __ lw(a2,
4466         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4467  // Calculate number of capture registers (number_of_captures + 1) * 2. This
4468  // uses the asumption that smis are 2 * their untagged value.
4469  STATIC_ASSERT(kSmiTag == 0);
4470  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4471  __ Addu(a2, a2, Operand(2));  // a2 was a smi.
4472  // Check that the static offsets vector buffer is large enough.
4473  __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
4474
4475  // a2: Number of capture registers
4476  // regexp_data: RegExp data (FixedArray)
4477  // Check that the second argument is a string.
4478  __ lw(subject, MemOperand(sp, kSubjectOffset));
4479  __ JumpIfSmi(subject, &runtime);
4480  __ GetObjectType(subject, a0, a0);
4481  __ And(a0, a0, Operand(kIsNotStringMask));
4482  STATIC_ASSERT(kStringTag == 0);
4483  __ Branch(&runtime, ne, a0, Operand(zero_reg));
4484
4485  // Get the length of the string to r3.
4486  __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
4487
4488  // a2: Number of capture registers
4489  // a3: Length of subject string as a smi
4490  // subject: Subject string
4491  // regexp_data: RegExp data (FixedArray)
4492  // Check that the third argument is a positive smi less than the subject
4493  // string length. A negative value will be greater (unsigned comparison).
4494  __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
4495  __ And(at, a0, Operand(kSmiTagMask));
4496  __ Branch(&runtime, ne, at, Operand(zero_reg));
4497  __ Branch(&runtime, ls, a3, Operand(a0));
4498
4499  // a2: Number of capture registers
4500  // subject: Subject string
4501  // regexp_data: RegExp data (FixedArray)
4502  // Check that the fourth object is a JSArray object.
4503  __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
4504  __ JumpIfSmi(a0, &runtime);
4505  __ GetObjectType(a0, a1, a1);
4506  __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
4507  // Check that the JSArray is in fast case.
4508  __ lw(last_match_info_elements,
4509         FieldMemOperand(a0, JSArray::kElementsOffset));
4510  __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4511  __ Branch(&runtime, ne, a0, Operand(
4512      masm->isolate()->factory()->fixed_array_map()));
4513  // Check that the last match info has space for the capture registers and the
4514  // additional information.
4515  __ lw(a0,
4516         FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4517  __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
4518  __ sra(at, a0, kSmiTagSize);  // Untag length for comparison.
4519  __ Branch(&runtime, gt, a2, Operand(at));
4520
4521  // Reset offset for possibly sliced string.
4522  __ mov(t0, zero_reg);
4523  // subject: Subject string
4524  // regexp_data: RegExp data (FixedArray)
4525  // Check the representation and encoding of the subject string.
4526  Label seq_string;
4527  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4528  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
4529  // First check for flat string.
4530  __ And(a1, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
4531  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
4532  __ Branch(&seq_string, eq, a1, Operand(zero_reg));
4533
4534  // subject: Subject string
4535  // a0: instance type if Subject string
4536  // regexp_data: RegExp data (FixedArray)
4537  // Check for flat cons string or sliced string.
4538  // A flat cons string is a cons string where the second part is the empty
4539  // string. In that case the subject string is just the first part of the cons
4540  // string. Also in this case the first part of the cons string is known to be
4541  // a sequential string or an external string.
4542  // In the case of a sliced string its offset has to be taken into account.
4543  Label cons_string, check_encoding;
4544  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
4545  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
4546  __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
4547  __ Branch(&runtime, eq, a1, Operand(kExternalStringTag));
4548
4549  // String is sliced.
4550  __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
4551  __ sra(t0, t0, kSmiTagSize);
4552  __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
4553  // t5: offset of sliced string, smi-tagged.
4554  __ jmp(&check_encoding);
4555  // String is a cons string, check whether it is flat.
4556  __ bind(&cons_string);
4557  __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
4558  __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
4559  __ Branch(&runtime, ne, a0, Operand(a1));
4560  __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
4561  // Is first part of cons or parent of slice a flat string?
4562  __ bind(&check_encoding);
4563  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4564  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
4565  STATIC_ASSERT(kSeqStringTag == 0);
4566  __ And(at, a0, Operand(kStringRepresentationMask));
4567  __ Branch(&runtime, ne, at, Operand(zero_reg));
4568
4569  __ bind(&seq_string);
4570  // subject: Subject string
4571  // regexp_data: RegExp data (FixedArray)
4572  // a0: Instance type of subject string
4573  STATIC_ASSERT(kStringEncodingMask == 4);
4574  STATIC_ASSERT(kAsciiStringTag == 4);
4575  STATIC_ASSERT(kTwoByteStringTag == 0);
4576  // Find the code object based on the assumptions above.
4577  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for ascii.
4578  __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
4579  __ sra(a3, a0, 2);  // a3 is 1 for ascii, 0 for UC16 (usyed below).
4580  __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
4581  __ movz(t9, t1, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
4582
4583  // Check that the irregexp code has been generated for the actual string
4584  // encoding. If it has, the field contains a code object otherwise it contains
4585  // a smi (code flushing support).
4586  __ JumpIfSmi(t9, &runtime);
4587
4588  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
4589  // t9: code
4590  // subject: Subject string
4591  // regexp_data: RegExp data (FixedArray)
4592  // Load used arguments before starting to push arguments for call to native
4593  // RegExp code to avoid handling changing stack height.
4594  __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
4595  __ sra(a1, a1, kSmiTagSize);  // Untag the Smi.
4596
4597  // a1: previous index
4598  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
4599  // t9: code
4600  // subject: Subject string
4601  // regexp_data: RegExp data (FixedArray)
4602  // All checks done. Now push arguments for native regexp code.
4603  __ IncrementCounter(masm->isolate()->counters()->regexp_entry_native(),
4604                      1, a0, a2);
4605
4606  // Isolates: note we add an additional parameter here (isolate pointer).
4607  static const int kRegExpExecuteArguments = 8;
4608  static const int kParameterRegisters = 4;
4609  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
4610
4611  // Stack pointer now points to cell where return address is to be written.
4612  // Arguments are before that on the stack or in registers, meaning we
4613  // treat the return address as argument 5. Thus every argument after that
4614  // needs to be shifted back by 1. Since DirectCEntryStub will handle
4615  // allocating space for the c argument slots, we don't need to calculate
4616  // that into the argument positions on the stack. This is how the stack will
4617  // look (sp meaning the value of sp at this moment):
4618  // [sp + 4] - Argument 8
4619  // [sp + 3] - Argument 7
4620  // [sp + 2] - Argument 6
4621  // [sp + 1] - Argument 5
4622  // [sp + 0] - saved ra
4623
4624  // Argument 8: Pass current isolate address.
4625  // CFunctionArgumentOperand handles MIPS stack argument slots.
4626  __ li(a0, Operand(ExternalReference::isolate_address()));
4627  __ sw(a0, MemOperand(sp, 4 * kPointerSize));
4628
4629  // Argument 7: Indicate that this is a direct call from JavaScript.
4630  __ li(a0, Operand(1));
4631  __ sw(a0, MemOperand(sp, 3 * kPointerSize));
4632
4633  // Argument 6: Start (high end) of backtracking stack memory area.
4634  __ li(a0, Operand(address_of_regexp_stack_memory_address));
4635  __ lw(a0, MemOperand(a0, 0));
4636  __ li(a2, Operand(address_of_regexp_stack_memory_size));
4637  __ lw(a2, MemOperand(a2, 0));
4638  __ addu(a0, a0, a2);
4639  __ sw(a0, MemOperand(sp, 2 * kPointerSize));
4640
4641  // Argument 5: static offsets vector buffer.
4642  __ li(a0, Operand(
4643        ExternalReference::address_of_static_offsets_vector(masm->isolate())));
4644  __ sw(a0, MemOperand(sp, 1 * kPointerSize));
4645
4646  // For arguments 4 and 3 get string length, calculate start of string data
4647  // and calculate the shift of the index (0 for ASCII and 1 for two byte).
4648  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4649  __ Addu(t2, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
4650  __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
4651  // Load the length from the original subject string from the previous stack
4652  // frame. Therefore we have to use fp, which points exactly to two pointer
4653  // sizes below the previous sp. (Because creating a new stack frame pushes
4654  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
4655  __ lw(a0, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
4656  // If slice offset is not 0, load the length from the original sliced string.
4657  // Argument 4, a3: End of string data
4658  // Argument 3, a2: Start of string data
4659  // Prepare start and end index of the input.
4660  __ sllv(t1, t0, a3);
4661  __ addu(t0, t2, t1);
4662  __ sllv(t1, a1, a3);
4663  __ addu(a2, t0, t1);
4664
4665  __ lw(t2, FieldMemOperand(a0, String::kLengthOffset));
4666  __ sra(t2, t2, kSmiTagSize);
4667  __ sllv(t1, t2, a3);
4668  __ addu(a3, t0, t1);
4669  // Argument 2 (a1): Previous index.
4670  // Already there
4671
4672  // Argument 1 (a0): Subject string.
4673  // Already there
4674
4675  // Locate the code entry and call it.
4676  __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
4677  DirectCEntryStub stub;
4678  stub.GenerateCall(masm, t9);
4679
4680  __ LeaveExitFrame(false, no_reg);
4681
4682  // v0: result
4683  // subject: subject string (callee saved)
4684  // regexp_data: RegExp data (callee saved)
4685  // last_match_info_elements: Last match info elements (callee saved)
4686
4687  // Check the result.
4688
4689  Label success;
4690  __ Branch(&success, eq,
4691            subject, Operand(NativeRegExpMacroAssembler::SUCCESS));
4692  Label failure;
4693  __ Branch(&failure, eq,
4694            subject, Operand(NativeRegExpMacroAssembler::FAILURE));
4695  // If not exception it can only be retry. Handle that in the runtime system.
4696  __ Branch(&runtime, ne,
4697            subject, Operand(NativeRegExpMacroAssembler::EXCEPTION));
4698  // Result must now be exception. If there is no pending exception already a
4699  // stack overflow (on the backtrack stack) was detected in RegExp code but
4700  // haven't created the exception yet. Handle that in the runtime system.
4701  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
4702  __ li(a1, Operand(
4703      ExternalReference::the_hole_value_location(masm->isolate())));
4704  __ lw(a1, MemOperand(a1, 0));
4705  __ li(a2, Operand(ExternalReference(Isolate::k_pending_exception_address,
4706                                      masm->isolate())));
4707  __ lw(v0, MemOperand(a2, 0));
4708  __ Branch(&runtime, eq, subject, Operand(a1));
4709
4710  __ sw(a1, MemOperand(a2, 0));  // Clear pending exception.
4711
4712  // Check if the exception is a termination. If so, throw as uncatchable.
4713  __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
4714  Label termination_exception;
4715  __ Branch(&termination_exception, eq, subject, Operand(a0));
4716
4717  __ Throw(subject);  // Expects thrown value in v0.
4718
4719  __ bind(&termination_exception);
4720  __ ThrowUncatchable(TERMINATION, v0);  // Expects thrown value in v0.
4721
4722  __ bind(&failure);
4723  // For failure and exception return null.
4724  __ li(v0, Operand(masm->isolate()->factory()->null_value()));
4725  __ Addu(sp, sp, Operand(4 * kPointerSize));
4726  __ Ret();
4727
4728  // Process the result from the native regexp code.
4729  __ bind(&success);
4730  __ lw(a1,
4731         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4732  // Calculate number of capture registers (number_of_captures + 1) * 2.
4733  STATIC_ASSERT(kSmiTag == 0);
4734  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4735  __ Addu(a1, a1, Operand(2));  // a1 was a smi.
4736
4737  // a1: number of capture registers
4738  // subject: subject string
4739  // Store the capture count.
4740  __ sll(a2, a1, kSmiTagSize + kSmiShiftSize);  // To smi.
4741  __ sw(a2, FieldMemOperand(last_match_info_elements,
4742                             RegExpImpl::kLastCaptureCountOffset));
4743  // Store last subject and last input.
4744  __ mov(a3, last_match_info_elements);  // Moved up to reduce latency.
4745  __ sw(subject,
4746         FieldMemOperand(last_match_info_elements,
4747                         RegExpImpl::kLastSubjectOffset));
4748  __ RecordWrite(a3, Operand(RegExpImpl::kLastSubjectOffset), a2, t0);
4749  __ sw(subject,
4750         FieldMemOperand(last_match_info_elements,
4751                         RegExpImpl::kLastInputOffset));
4752  __ mov(a3, last_match_info_elements);
4753  __ RecordWrite(a3, Operand(RegExpImpl::kLastInputOffset), a2, t0);
4754
4755  // Get the static offsets vector filled by the native regexp code.
4756  ExternalReference address_of_static_offsets_vector =
4757      ExternalReference::address_of_static_offsets_vector(masm->isolate());
4758  __ li(a2, Operand(address_of_static_offsets_vector));
4759
4760  // a1: number of capture registers
4761  // a2: offsets vector
4762  Label next_capture, done;
4763  // Capture register counter starts from number of capture registers and
4764  // counts down until wrapping after zero.
4765  __ Addu(a0,
4766         last_match_info_elements,
4767         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
4768  __ bind(&next_capture);
4769  __ Subu(a1, a1, Operand(1));
4770  __ Branch(&done, lt, a1, Operand(zero_reg));
4771  // Read the value from the static offsets vector buffer.
4772  __ lw(a3, MemOperand(a2, 0));
4773  __ addiu(a2, a2, kPointerSize);
4774  // Store the smi value in the last match info.
4775  __ sll(a3, a3, kSmiTagSize);  // Convert to Smi.
4776  __ sw(a3, MemOperand(a0, 0));
4777  __ Branch(&next_capture, USE_DELAY_SLOT);
4778  __ addiu(a0, a0, kPointerSize);   // In branch delay slot.
4779
4780  __ bind(&done);
4781
4782  // Return last match info.
4783  __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
4784  __ Addu(sp, sp, Operand(4 * kPointerSize));
4785  __ Ret();
4786
4787  // Do the runtime call to execute the regexp.
4788  __ bind(&runtime);
4789  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4790#endif  // V8_INTERPRETED_REGEXP
4791}
4792
4793
4794void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
4795  const int kMaxInlineLength = 100;
4796  Label slowcase;
4797  Label done;
4798  __ lw(a1, MemOperand(sp, kPointerSize * 2));
4799  STATIC_ASSERT(kSmiTag == 0);
4800  STATIC_ASSERT(kSmiTagSize == 1);
4801  __ JumpIfNotSmi(a1, &slowcase);
4802  __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
4803  // Smi-tagging is equivalent to multiplying by 2.
4804  // Allocate RegExpResult followed by FixedArray with size in ebx.
4805  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
4806  // Elements:  [Map][Length][..elements..]
4807  // Size of JSArray with two in-object properties and the header of a
4808  // FixedArray.
4809  int objects_size =
4810      (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
4811  __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
4812  __ Addu(a2, t1, Operand(objects_size));
4813  __ AllocateInNewSpace(
4814      a2,  // In: Size, in words.
4815      v0,  // Out: Start of allocation (tagged).
4816      a3,  // Scratch register.
4817      t0,  // Scratch register.
4818      &slowcase,
4819      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
4820  // v0: Start of allocated area, object-tagged.
4821  // a1: Number of elements in array, as smi.
4822  // t1: Number of elements, untagged.
4823
4824  // Set JSArray map to global.regexp_result_map().
4825  // Set empty properties FixedArray.
4826  // Set elements to point to FixedArray allocated right after the JSArray.
4827  // Interleave operations for better latency.
4828  __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
4829  __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
4830  __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
4831  __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
4832  __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
4833  __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
4834  __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
4835  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
4836
4837  // Set input, index and length fields from arguments.
4838  __ lw(a1, MemOperand(sp, kPointerSize * 0));
4839  __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
4840  __ lw(a1, MemOperand(sp, kPointerSize * 1));
4841  __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
4842  __ lw(a1, MemOperand(sp, kPointerSize * 2));
4843  __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
4844
4845  // Fill out the elements FixedArray.
4846  // v0: JSArray, tagged.
4847  // a3: FixedArray, tagged.
4848  // t1: Number of elements in array, untagged.
4849
4850  // Set map.
4851  __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
4852  __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
4853  // Set FixedArray length.
4854  __ sll(t2, t1, kSmiTagSize);
4855  __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
4856  // Fill contents of fixed-array with the-hole.
4857  __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
4858  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4859  // Fill fixed array elements with hole.
4860  // v0: JSArray, tagged.
4861  // a2: the hole.
4862  // a3: Start of elements in FixedArray.
4863  // t1: Number of elements to fill.
4864  Label loop;
4865  __ sll(t1, t1, kPointerSizeLog2);  // Convert num elements to num bytes.
4866  __ addu(t1, t1, a3);  // Point past last element to store.
4867  __ bind(&loop);
4868  __ Branch(&done, ge, a3, Operand(t1));  // Break when a3 past end of elem.
4869  __ sw(a2, MemOperand(a3));
4870  __ Branch(&loop, USE_DELAY_SLOT);
4871  __ addiu(a3, a3, kPointerSize);  // In branch delay slot.
4872
4873  __ bind(&done);
4874  __ Addu(sp, sp, Operand(3 * kPointerSize));
4875  __ Ret();
4876
4877  __ bind(&slowcase);
4878  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
4879}
4880
4881
4882void CallFunctionStub::Generate(MacroAssembler* masm) {
4883  Label slow;
4884
4885  // The receiver might implicitly be the global object. This is
4886  // indicated by passing the hole as the receiver to the call
4887  // function stub.
4888  if (ReceiverMightBeImplicit()) {
4889    Label call;
4890    // Get the receiver from the stack.
4891    // function, receiver [, arguments]
4892    __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
4893    // Call as function is indicated with the hole.
4894    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
4895    __ Branch(&call, ne, t0, Operand(at));
4896    // Patch the receiver on the stack with the global receiver object.
4897    __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4898    __ lw(a1, FieldMemOperand(a1, GlobalObject::kGlobalReceiverOffset));
4899    __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
4900    __ bind(&call);
4901  }
4902
4903  // Get the function to call from the stack.
4904  // function, receiver [, arguments]
4905  __ lw(a1, MemOperand(sp, (argc_ + 1) * kPointerSize));
4906
4907  // Check that the function is really a JavaScript function.
4908  // a1: pushed function (to be verified)
4909  __ JumpIfSmi(a1, &slow);
4910  // Get the map of the function object.
4911  __ GetObjectType(a1, a2, a2);
4912  __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
4913
4914  // Fast-case: Invoke the function now.
4915  // a1: pushed function
4916  ParameterCount actual(argc_);
4917
4918  if (ReceiverMightBeImplicit()) {
4919    Label call_as_function;
4920    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
4921    __ Branch(&call_as_function, eq, t0, Operand(at));
4922    __ InvokeFunction(a1,
4923                      actual,
4924                      JUMP_FUNCTION,
4925                      NullCallWrapper(),
4926                      CALL_AS_METHOD);
4927    __ bind(&call_as_function);
4928  }
4929  __ InvokeFunction(a1,
4930                    actual,
4931                    JUMP_FUNCTION,
4932                    NullCallWrapper(),
4933                    CALL_AS_FUNCTION);
4934
4935  // Slow-case: Non-function called.
4936  __ bind(&slow);
4937  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
4938  // of the original receiver from the call site).
4939  __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
4940  __ li(a0, Operand(argc_));  // Setup the number of arguments.
4941  __ mov(a2, zero_reg);
4942  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
4943  __ SetCallKind(t1, CALL_AS_METHOD);
4944  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
4945          RelocInfo::CODE_TARGET);
4946}
4947
4948
4949// Unfortunately you have to run without snapshots to see most of these
4950// names in the profile since most compare stubs end up in the snapshot.
4951void CompareStub::PrintName(StringStream* stream) {
4952  ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
4953         (lhs_.is(a1) && rhs_.is(a0)));
4954  const char* cc_name;
4955  switch (cc_) {
4956    case lt: cc_name = "LT"; break;
4957    case gt: cc_name = "GT"; break;
4958    case le: cc_name = "LE"; break;
4959    case ge: cc_name = "GE"; break;
4960    case eq: cc_name = "EQ"; break;
4961    case ne: cc_name = "NE"; break;
4962    default: cc_name = "UnknownCondition"; break;
4963  }
4964  bool is_equality = cc_ == eq || cc_ == ne;
4965  stream->Add("CompareStub_%s", cc_name);
4966  stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
4967  stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
4968  if (strict_ && is_equality) stream->Add("_STRICT");
4969  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
4970  if (!include_number_compare_) stream->Add("_NO_NUMBER");
4971  if (!include_smi_compare_) stream->Add("_NO_SMI");
4972}
4973
4974
4975int CompareStub::MinorKey() {
4976  // Encode the two parameters in a unique 16 bit value.
4977  ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
4978  ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
4979         (lhs_.is(a1) && rhs_.is(a0)));
4980  return ConditionField::encode(static_cast<unsigned>(cc_))
4981         | RegisterField::encode(lhs_.is(a0))
4982         | StrictField::encode(strict_)
4983         | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
4984         | IncludeSmiCompareField::encode(include_smi_compare_);
4985}
4986
4987
4988// StringCharCodeAtGenerator.
4989void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
4990  Label flat_string;
4991  Label ascii_string;
4992  Label got_char_code;
4993  Label sliced_string;
4994
4995  ASSERT(!t0.is(scratch_));
4996  ASSERT(!t0.is(index_));
4997  ASSERT(!t0.is(result_));
4998  ASSERT(!t0.is(object_));
4999
5000  // If the receiver is a smi trigger the non-string case.
5001  __ JumpIfSmi(object_, receiver_not_string_);
5002
5003  // Fetch the instance type of the receiver into result register.
5004  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5005  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5006  // If the receiver is not a string trigger the non-string case.
5007  __ And(t0, result_, Operand(kIsNotStringMask));
5008  __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
5009
5010  // If the index is non-smi trigger the non-smi case.
5011  __ JumpIfNotSmi(index_, &index_not_smi_);
5012
5013  // Put smi-tagged index into scratch register.
5014  __ mov(scratch_, index_);
5015  __ bind(&got_smi_index_);
5016
5017  // Check for index out of range.
5018  __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
5019  __ Branch(index_out_of_range_, ls, t0, Operand(scratch_));
5020
5021  // We need special handling for non-flat strings.
5022  STATIC_ASSERT(kSeqStringTag == 0);
5023  __ And(t0, result_, Operand(kStringRepresentationMask));
5024  __ Branch(&flat_string, eq, t0, Operand(zero_reg));
5025
5026  // Handle non-flat strings.
5027  __ And(result_, result_, Operand(kStringRepresentationMask));
5028  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
5029  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
5030  __ Branch(&sliced_string, gt, result_, Operand(kExternalStringTag));
5031  __ Branch(&call_runtime_, eq, result_, Operand(kExternalStringTag));
5032
5033  // ConsString.
5034  // Check whether the right hand side is the empty string (i.e. if
5035  // this is really a flat string in a cons string). If that is not
5036  // the case we would rather go to the runtime system now to flatten
5037  // the string.
5038  Label assure_seq_string;
5039  __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
5040  __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
5041  __ Branch(&call_runtime_, ne, result_, Operand(t0));
5042
5043  // Get the first of the two strings and load its instance type.
5044  __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
5045  __ jmp(&assure_seq_string);
5046
5047  // SlicedString, unpack and add offset.
5048  __ bind(&sliced_string);
5049  __ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
5050  __ addu(scratch_, scratch_, result_);
5051  __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
5052
5053  // Assure that we are dealing with a sequential string. Go to runtime if not.
5054  __ bind(&assure_seq_string);
5055  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5056  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5057  // Check that parent is not an external string. Go to runtime otherwise.
5058  STATIC_ASSERT(kSeqStringTag == 0);
5059
5060  __ And(t0, result_, Operand(kStringRepresentationMask));
5061  __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
5062
5063  // Check for 1-byte or 2-byte string.
5064  __ bind(&flat_string);
5065  STATIC_ASSERT(kAsciiStringTag != 0);
5066  __ And(t0, result_, Operand(kStringEncodingMask));
5067  __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
5068
5069  // 2-byte string.
5070  // Load the 2-byte character code into the result register. We can
5071  // add without shifting since the smi tag size is the log2 of the
5072  // number of bytes in a two-byte character.
5073  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
5074  __ Addu(scratch_, object_, Operand(scratch_));
5075  __ lhu(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
5076  __ Branch(&got_char_code);
5077
5078  // ASCII string.
5079  // Load the byte into the result register.
5080  __ bind(&ascii_string);
5081
5082  __ srl(t0, scratch_, kSmiTagSize);
5083  __ Addu(scratch_, object_, t0);
5084
5085  __ lbu(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
5086
5087  __ bind(&got_char_code);
5088  __ sll(result_, result_, kSmiTagSize);
5089  __ bind(&exit_);
5090}
5091
5092
5093void StringCharCodeAtGenerator::GenerateSlow(
5094    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
5095  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5096
5097  // Index is not a smi.
5098  __ bind(&index_not_smi_);
5099  // If index is a heap number, try converting it to an integer.
5100  __ CheckMap(index_,
5101              scratch_,
5102              Heap::kHeapNumberMapRootIndex,
5103              index_not_number_,
5104              DONT_DO_SMI_CHECK);
5105  call_helper.BeforeCall(masm);
5106  // Consumed by runtime conversion function:
5107  __ Push(object_, index_, index_);
5108  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5109    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5110  } else {
5111    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5112    // NumberToSmi discards numbers that are not exact integers.
5113    __ CallRuntime(Runtime::kNumberToSmi, 1);
5114  }
5115
5116  // Save the conversion result before the pop instructions below
5117  // have a chance to overwrite it.
5118
5119  __ Move(scratch_, v0);
5120
5121  __ pop(index_);
5122  __ pop(object_);
5123  // Reload the instance type.
5124  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5125  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5126  call_helper.AfterCall(masm);
5127  // If index is still not a smi, it must be out of range.
5128  __ JumpIfNotSmi(scratch_, index_out_of_range_);
5129  // Otherwise, return to the fast path.
5130  __ Branch(&got_smi_index_);
5131
5132  // Call runtime. We get here when the receiver is a string and the
5133  // index is a number, but the code of getting the actual character
5134  // is too complex (e.g., when the string needs to be flattened).
5135  __ bind(&call_runtime_);
5136  call_helper.BeforeCall(masm);
5137  __ Push(object_, index_);
5138  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5139
5140  __ Move(result_, v0);
5141
5142  call_helper.AfterCall(masm);
5143  __ jmp(&exit_);
5144
5145  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5146}
5147
5148
5149// -------------------------------------------------------------------------
5150// StringCharFromCodeGenerator
5151
5152void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5153  // Fast case of Heap::LookupSingleCharacterStringFromCode.
5154
5155  ASSERT(!t0.is(result_));
5156  ASSERT(!t0.is(code_));
5157
5158  STATIC_ASSERT(kSmiTag == 0);
5159  STATIC_ASSERT(kSmiShiftSize == 0);
5160  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5161  __ And(t0,
5162         code_,
5163         Operand(kSmiTagMask |
5164                 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5165  __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
5166
5167  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5168  // At this point code register contains smi tagged ASCII char code.
5169  STATIC_ASSERT(kSmiTag == 0);
5170  __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
5171  __ Addu(result_, result_, t0);
5172  __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
5173  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
5174  __ Branch(&slow_case_, eq, result_, Operand(t0));
5175  __ bind(&exit_);
5176}
5177
5178
5179void StringCharFromCodeGenerator::GenerateSlow(
5180    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
5181  __ Abort("Unexpected fallthrough to CharFromCode slow case");
5182
5183  __ bind(&slow_case_);
5184  call_helper.BeforeCall(masm);
5185  __ push(code_);
5186  __ CallRuntime(Runtime::kCharFromCode, 1);
5187  __ Move(result_, v0);
5188
5189  call_helper.AfterCall(masm);
5190  __ Branch(&exit_);
5191
5192  __ Abort("Unexpected fallthrough from CharFromCode slow case");
5193}
5194
5195
5196// -------------------------------------------------------------------------
5197// StringCharAtGenerator
5198
5199void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5200  char_code_at_generator_.GenerateFast(masm);
5201  char_from_code_generator_.GenerateFast(masm);
5202}
5203
5204
5205void StringCharAtGenerator::GenerateSlow(
5206    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
5207  char_code_at_generator_.GenerateSlow(masm, call_helper);
5208  char_from_code_generator_.GenerateSlow(masm, call_helper);
5209}
5210
5211
5212class StringHelper : public AllStatic {
5213 public:
5214  // Generate code for copying characters using a simple loop. This should only
5215  // be used in places where the number of characters is small and the
5216  // additional setup and checking in GenerateCopyCharactersLong adds too much
5217  // overhead. Copying of overlapping regions is not supported.
5218  // Dest register ends at the position after the last character written.
5219  static void GenerateCopyCharacters(MacroAssembler* masm,
5220                                     Register dest,
5221                                     Register src,
5222                                     Register count,
5223                                     Register scratch,
5224                                     bool ascii);
5225
5226  // Generate code for copying a large number of characters. This function
5227  // is allowed to spend extra time setting up conditions to make copying
5228  // faster. Copying of overlapping regions is not supported.
5229  // Dest register ends at the position after the last character written.
5230  static void GenerateCopyCharactersLong(MacroAssembler* masm,
5231                                         Register dest,
5232                                         Register src,
5233                                         Register count,
5234                                         Register scratch1,
5235                                         Register scratch2,
5236                                         Register scratch3,
5237                                         Register scratch4,
5238                                         Register scratch5,
5239                                         int flags);
5240
5241
5242  // Probe the symbol table for a two character string. If the string is
5243  // not found by probing a jump to the label not_found is performed. This jump
5244  // does not guarantee that the string is not in the symbol table. If the
5245  // string is found the code falls through with the string in register r0.
5246  // Contents of both c1 and c2 registers are modified. At the exit c1 is
5247  // guaranteed to contain halfword with low and high bytes equal to
5248  // initial contents of c1 and c2 respectively.
5249  static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5250                                                   Register c1,
5251                                                   Register c2,
5252                                                   Register scratch1,
5253                                                   Register scratch2,
5254                                                   Register scratch3,
5255                                                   Register scratch4,
5256                                                   Register scratch5,
5257                                                   Label* not_found);
5258
5259  // Generate string hash.
5260  static void GenerateHashInit(MacroAssembler* masm,
5261                               Register hash,
5262                               Register character);
5263
5264  static void GenerateHashAddCharacter(MacroAssembler* masm,
5265                                       Register hash,
5266                                       Register character);
5267
5268  static void GenerateHashGetHash(MacroAssembler* masm,
5269                                  Register hash);
5270
5271 private:
5272  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
5273};
5274
5275
5276void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5277                                          Register dest,
5278                                          Register src,
5279                                          Register count,
5280                                          Register scratch,
5281                                          bool ascii) {
5282  Label loop;
5283  Label done;
5284  // This loop just copies one character at a time, as it is only used for
5285  // very short strings.
5286  if (!ascii) {
5287    __ addu(count, count, count);
5288  }
5289  __ Branch(&done, eq, count, Operand(zero_reg));
5290  __ addu(count, dest, count);  // Count now points to the last dest byte.
5291
5292  __ bind(&loop);
5293  __ lbu(scratch, MemOperand(src));
5294  __ addiu(src, src, 1);
5295  __ sb(scratch, MemOperand(dest));
5296  __ addiu(dest, dest, 1);
5297  __ Branch(&loop, lt, dest, Operand(count));
5298
5299  __ bind(&done);
5300}
5301
5302
5303enum CopyCharactersFlags {
5304  COPY_ASCII = 1,
5305  DEST_ALWAYS_ALIGNED = 2
5306};
5307
5308
5309void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5310                                              Register dest,
5311                                              Register src,
5312                                              Register count,
5313                                              Register scratch1,
5314                                              Register scratch2,
5315                                              Register scratch3,
5316                                              Register scratch4,
5317                                              Register scratch5,
5318                                              int flags) {
5319  bool ascii = (flags & COPY_ASCII) != 0;
5320  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5321
5322  if (dest_always_aligned && FLAG_debug_code) {
5323    // Check that destination is actually word aligned if the flag says
5324    // that it is.
5325    __ And(scratch4, dest, Operand(kPointerAlignmentMask));
5326    __ Check(eq,
5327             "Destination of copy not aligned.",
5328             scratch4,
5329             Operand(zero_reg));
5330  }
5331
5332  const int kReadAlignment = 4;
5333  const int kReadAlignmentMask = kReadAlignment - 1;
5334  // Ensure that reading an entire aligned word containing the last character
5335  // of a string will not read outside the allocated area (because we pad up
5336  // to kObjectAlignment).
5337  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5338  // Assumes word reads and writes are little endian.
5339  // Nothing to do for zero characters.
5340  Label done;
5341
5342  if (!ascii) {
5343    __ addu(count, count, count);
5344  }
5345  __ Branch(&done, eq, count, Operand(zero_reg));
5346
5347  Label byte_loop;
5348  // Must copy at least eight bytes, otherwise just do it one byte at a time.
5349  __ Subu(scratch1, count, Operand(8));
5350  __ Addu(count, dest, Operand(count));
5351  Register limit = count;  // Read until src equals this.
5352  __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
5353
5354  if (!dest_always_aligned) {
5355    // Align dest by byte copying. Copies between zero and three bytes.
5356    __ And(scratch4, dest, Operand(kReadAlignmentMask));
5357    Label dest_aligned;
5358    __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
5359    Label aligned_loop;
5360    __ bind(&aligned_loop);
5361    __ lbu(scratch1, MemOperand(src));
5362    __ addiu(src, src, 1);
5363    __ sb(scratch1, MemOperand(dest));
5364    __ addiu(dest, dest, 1);
5365    __ addiu(scratch4, scratch4, 1);
5366    __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
5367    __ bind(&dest_aligned);
5368  }
5369
5370  Label simple_loop;
5371
5372  __ And(scratch4, src, Operand(kReadAlignmentMask));
5373  __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
5374
5375  // Loop for src/dst that are not aligned the same way.
5376  // This loop uses lwl and lwr instructions. These instructions
5377  // depend on the endianness, and the implementation assumes little-endian.
5378  {
5379    Label loop;
5380    __ bind(&loop);
5381    __ lwr(scratch1, MemOperand(src));
5382    __ Addu(src, src, Operand(kReadAlignment));
5383    __ lwl(scratch1, MemOperand(src, -1));
5384    __ sw(scratch1, MemOperand(dest));
5385    __ Addu(dest, dest, Operand(kReadAlignment));
5386    __ Subu(scratch2, limit, dest);
5387    __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5388  }
5389
5390  __ Branch(&byte_loop);
5391
5392  // Simple loop.
5393  // Copy words from src to dest, until less than four bytes left.
5394  // Both src and dest are word aligned.
5395  __ bind(&simple_loop);
5396  {
5397    Label loop;
5398    __ bind(&loop);
5399    __ lw(scratch1, MemOperand(src));
5400    __ Addu(src, src, Operand(kReadAlignment));
5401    __ sw(scratch1, MemOperand(dest));
5402    __ Addu(dest, dest, Operand(kReadAlignment));
5403    __ Subu(scratch2, limit, dest);
5404    __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5405  }
5406
5407  // Copy bytes from src to dest until dest hits limit.
5408  __ bind(&byte_loop);
5409  // Test if dest has already reached the limit.
5410  __ Branch(&done, ge, dest, Operand(limit));
5411  __ lbu(scratch1, MemOperand(src));
5412  __ addiu(src, src, 1);
5413  __ sb(scratch1, MemOperand(dest));
5414  __ addiu(dest, dest, 1);
5415  __ Branch(&byte_loop);
5416
5417  __ bind(&done);
5418}
5419
5420
5421void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5422                                                        Register c1,
5423                                                        Register c2,
5424                                                        Register scratch1,
5425                                                        Register scratch2,
5426                                                        Register scratch3,
5427                                                        Register scratch4,
5428                                                        Register scratch5,
5429                                                        Label* not_found) {
5430  // Register scratch3 is the general scratch register in this function.
5431  Register scratch = scratch3;
5432
5433  // Make sure that both characters are not digits as such strings has a
5434  // different hash algorithm. Don't try to look for these in the symbol table.
5435  Label not_array_index;
5436  __ Subu(scratch, c1, Operand(static_cast<int>('0')));
5437  __ Branch(&not_array_index,
5438            Ugreater,
5439            scratch,
5440            Operand(static_cast<int>('9' - '0')));
5441  __ Subu(scratch, c2, Operand(static_cast<int>('0')));
5442
5443  // If check failed combine both characters into single halfword.
5444  // This is required by the contract of the method: code at the
5445  // not_found branch expects this combination in c1 register.
5446  Label tmp;
5447  __ sll(scratch1, c2, kBitsPerByte);
5448  __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
5449  __ Or(c1, c1, scratch1);
5450  __ bind(&tmp);
5451  __ Branch(not_found,
5452            Uless_equal,
5453            scratch,
5454            Operand(static_cast<int>('9' - '0')));
5455
5456  __ bind(&not_array_index);
5457  // Calculate the two character string hash.
5458  Register hash = scratch1;
5459  StringHelper::GenerateHashInit(masm, hash, c1);
5460  StringHelper::GenerateHashAddCharacter(masm, hash, c2);
5461  StringHelper::GenerateHashGetHash(masm, hash);
5462
5463  // Collect the two characters in a register.
5464  Register chars = c1;
5465  __ sll(scratch, c2, kBitsPerByte);
5466  __ Or(chars, chars, scratch);
5467
5468  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5469  // hash:  hash of two character string.
5470
5471  // Load symbol table.
5472  // Load address of first element of the symbol table.
5473  Register symbol_table = c2;
5474  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5475
5476  Register undefined = scratch4;
5477  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5478
5479  // Calculate capacity mask from the symbol table capacity.
5480  Register mask = scratch2;
5481  __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5482  __ sra(mask, mask, 1);
5483  __ Addu(mask, mask, -1);
5484
5485  // Calculate untagged address of the first element of the symbol table.
5486  Register first_symbol_table_element = symbol_table;
5487  __ Addu(first_symbol_table_element, symbol_table,
5488         Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
5489
5490  // Registers.
5491  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5492  // hash:  hash of two character string
5493  // mask:  capacity mask
5494  // first_symbol_table_element: address of the first element of
5495  //                             the symbol table
5496  // undefined: the undefined object
5497  // scratch: -
5498
5499  // Perform a number of probes in the symbol table.
5500  static const int kProbes = 4;
5501  Label found_in_symbol_table;
5502  Label next_probe[kProbes];
5503  Register candidate = scratch5;  // Scratch register contains candidate.
5504  for (int i = 0; i < kProbes; i++) {
5505    // Calculate entry in symbol table.
5506    if (i > 0) {
5507      __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
5508    } else {
5509      __ mov(candidate, hash);
5510    }
5511
5512    __ And(candidate, candidate, Operand(mask));
5513
5514    // Load the entry from the symble table.
5515    STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5516    __ sll(scratch, candidate, kPointerSizeLog2);
5517    __ Addu(scratch, scratch, first_symbol_table_element);
5518    __ lw(candidate, MemOperand(scratch));
5519
5520    // If entry is undefined no string with this hash can be found.
5521    Label is_string;
5522    __ GetObjectType(candidate, scratch, scratch);
5523    __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
5524
5525    __ Branch(not_found, eq, undefined, Operand(candidate));
5526    // Must be null (deleted entry).
5527    if (FLAG_debug_code) {
5528      __ LoadRoot(scratch, Heap::kNullValueRootIndex);
5529      __ Assert(eq, "oddball in symbol table is not undefined or null",
5530          scratch, Operand(candidate));
5531    }
5532    __ jmp(&next_probe[i]);
5533
5534    __ bind(&is_string);
5535
5536    // Check that the candidate is a non-external ASCII string.  The instance
5537    // type is still in the scratch register from the CompareObjectType
5538    // operation.
5539    __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
5540
5541    // If length is not 2 the string is not a candidate.
5542    __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5543    __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
5544
5545    // Check if the two characters match.
5546    // Assumes that word load is little endian.
5547    __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
5548    __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
5549    __ bind(&next_probe[i]);
5550  }
5551
5552  // No matching 2 character string found by probing.
5553  __ jmp(not_found);
5554
5555  // Scratch register contains result when we fall through to here.
5556  Register result = candidate;
5557  __ bind(&found_in_symbol_table);
5558  __ mov(v0, result);
5559}
5560
5561
5562void StringHelper::GenerateHashInit(MacroAssembler* masm,
5563                                      Register hash,
5564                                      Register character) {
5565  // hash = character + (character << 10);
5566  __ sll(hash, character, 10);
5567  __ addu(hash, hash, character);
5568  // hash ^= hash >> 6;
5569  __ sra(at, hash, 6);
5570  __ xor_(hash, hash, at);
5571}
5572
5573
5574void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5575                                              Register hash,
5576                                              Register character) {
5577  // hash += character;
5578  __ addu(hash, hash, character);
5579  // hash += hash << 10;
5580  __ sll(at, hash, 10);
5581  __ addu(hash, hash, at);
5582  // hash ^= hash >> 6;
5583  __ sra(at, hash, 6);
5584  __ xor_(hash, hash, at);
5585}
5586
5587
5588void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5589                                         Register hash) {
5590  // hash += hash << 3;
5591  __ sll(at, hash, 3);
5592  __ addu(hash, hash, at);
5593  // hash ^= hash >> 11;
5594  __ sra(at, hash, 11);
5595  __ xor_(hash, hash, at);
5596  // hash += hash << 15;
5597  __ sll(at, hash, 15);
5598  __ addu(hash, hash, at);
5599
5600  // if (hash == 0) hash = 27;
5601  __ ori(at, zero_reg, 27);
5602  __ movz(hash, at, hash);
5603}
5604
5605
5606void SubStringStub::Generate(MacroAssembler* masm) {
5607  Label sub_string_runtime;
5608  // Stack frame on entry.
5609  //  ra: return address
5610  //  sp[0]: to
5611  //  sp[4]: from
5612  //  sp[8]: string
5613
5614  // This stub is called from the native-call %_SubString(...), so
5615  // nothing can be assumed about the arguments. It is tested that:
5616  //  "string" is a sequential string,
5617  //  both "from" and "to" are smis, and
5618  //  0 <= from <= to <= string.length.
5619  // If any of these assumptions fail, we call the runtime system.
5620
5621  static const int kToOffset = 0 * kPointerSize;
5622  static const int kFromOffset = 1 * kPointerSize;
5623  static const int kStringOffset = 2 * kPointerSize;
5624
5625  Register to = t2;
5626  Register from = t3;
5627
5628  if (FLAG_string_slices) {
5629    __ nop();  // Jumping as first instruction would crash the code generation.
5630    __ jmp(&sub_string_runtime);
5631  }
5632
5633  // Check bounds and smi-ness.
5634  __ lw(to, MemOperand(sp, kToOffset));
5635  __ lw(from, MemOperand(sp, kFromOffset));
5636  STATIC_ASSERT(kFromOffset == kToOffset + 4);
5637  STATIC_ASSERT(kSmiTag == 0);
5638  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5639
5640  __ JumpIfNotSmi(from, &sub_string_runtime);
5641  __ JumpIfNotSmi(to, &sub_string_runtime);
5642
5643  __ sra(a3, from, kSmiTagSize);  // Remove smi tag.
5644  __ sra(t5, to, kSmiTagSize);  // Remove smi tag.
5645
5646  // a3: from index (untagged smi)
5647  // t5: to index (untagged smi)
5648
5649  __ Branch(&sub_string_runtime, lt, a3, Operand(zero_reg));  // From < 0.
5650
5651  __ subu(a2, t5, a3);
5652  __ Branch(&sub_string_runtime, gt, a3, Operand(t5));  // Fail if from > to.
5653
5654  // Special handling of sub-strings of length 1 and 2. One character strings
5655  // are handled in the runtime system (looked up in the single character
5656  // cache). Two character strings are looked for in the symbol cache.
5657  __ Branch(&sub_string_runtime, lt, a2, Operand(2));
5658
5659  // Both to and from are smis.
5660
5661  // a2: result string length
5662  // a3: from index (untagged smi)
5663  // t2: (a.k.a. to): to (smi)
5664  // t3: (a.k.a. from): from offset (smi)
5665  // t5: to index (untagged smi)
5666
5667  // Make sure first argument is a sequential (or flat) string.
5668  __ lw(t1, MemOperand(sp, kStringOffset));
5669  __ Branch(&sub_string_runtime, eq, t1, Operand(kSmiTagMask));
5670
5671  __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
5672  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
5673  __ And(t4, a1, Operand(kIsNotStringMask));
5674
5675  __ Branch(&sub_string_runtime, ne, t4, Operand(zero_reg));
5676
5677  // a1: instance type
5678  // a2: result string length
5679  // a3: from index (untagged smi)
5680  // t1: string
5681  // t2: (a.k.a. to): to (smi)
5682  // t3: (a.k.a. from): from offset (smi)
5683  // t5: to index (untagged smi)
5684
5685  Label seq_string;
5686  __ And(t0, a1, Operand(kStringRepresentationMask));
5687  STATIC_ASSERT(kSeqStringTag < kConsStringTag);
5688  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
5689
5690  // External strings go to runtime.
5691  __ Branch(&sub_string_runtime, gt, t0, Operand(kConsStringTag));
5692
5693  // Sequential strings are handled directly.
5694  __ Branch(&seq_string, lt, t0, Operand(kConsStringTag));
5695
5696  // Cons string. Try to recurse (once) on the first substring.
5697  // (This adds a little more generality than necessary to handle flattened
5698  // cons strings, but not much).
5699  __ lw(t1, FieldMemOperand(t1, ConsString::kFirstOffset));
5700  __ lw(t0, FieldMemOperand(t1, HeapObject::kMapOffset));
5701  __ lbu(a1, FieldMemOperand(t0, Map::kInstanceTypeOffset));
5702  STATIC_ASSERT(kSeqStringTag == 0);
5703  // Cons and External strings go to runtime.
5704  __ Branch(&sub_string_runtime, ne, a1, Operand(kStringRepresentationMask));
5705
5706  // Definitly a sequential string.
5707  __ bind(&seq_string);
5708
5709  // a1: instance type
5710  // a2: result string length
5711  // a3: from index (untagged smi)
5712  // t1: string
5713  // t2: (a.k.a. to): to (smi)
5714  // t3: (a.k.a. from): from offset (smi)
5715  // t5: to index (untagged smi)
5716
5717  __ lw(t0, FieldMemOperand(t1, String::kLengthOffset));
5718  __ Branch(&sub_string_runtime, lt, t0, Operand(to));  // Fail if to > length.
5719  to = no_reg;
5720
5721  // a1: instance type
5722  // a2: result string length
5723  // a3: from index (untagged smi)
5724  // t1: string
5725  // t3: (a.k.a. from): from offset (smi)
5726  // t5: to index (untagged smi)
5727
5728  // Check for flat ASCII string.
5729  Label non_ascii_flat;
5730  STATIC_ASSERT(kTwoByteStringTag == 0);
5731
5732  __ And(t4, a1, Operand(kStringEncodingMask));
5733  __ Branch(&non_ascii_flat, eq, t4, Operand(zero_reg));
5734
5735  Label result_longer_than_two;
5736  __ Branch(&result_longer_than_two, gt, a2, Operand(2));
5737
5738  // Sub string of length 2 requested.
5739  // Get the two characters forming the sub string.
5740  __ Addu(t1, t1, Operand(a3));
5741  __ lbu(a3, FieldMemOperand(t1, SeqAsciiString::kHeaderSize));
5742  __ lbu(t0, FieldMemOperand(t1, SeqAsciiString::kHeaderSize + 1));
5743
5744  // Try to lookup two character string in symbol table.
5745  Label make_two_character_string;
5746  StringHelper::GenerateTwoCharacterSymbolTableProbe(
5747      masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
5748  Counters* counters = masm->isolate()->counters();
5749  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
5750  __ Addu(sp, sp, Operand(3 * kPointerSize));
5751  __ Ret();
5752
5753
5754  // a2: result string length.
5755  // a3: two characters combined into halfword in little endian byte order.
5756  __ bind(&make_two_character_string);
5757  __ AllocateAsciiString(v0, a2, t0, t1, t4, &sub_string_runtime);
5758  __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
5759  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
5760  __ Addu(sp, sp, Operand(3 * kPointerSize));
5761  __ Ret();
5762
5763  __ bind(&result_longer_than_two);
5764
5765  // Allocate the result.
5766  __ AllocateAsciiString(v0, a2, t4, t0, a1, &sub_string_runtime);
5767
5768  // v0: result string.
5769  // a2: result string length.
5770  // a3: from index (untagged smi)
5771  // t1: string.
5772  // t3: (a.k.a. from): from offset (smi)
5773  // Locate first character of result.
5774  __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5775  // Locate 'from' character of string.
5776  __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5777  __ Addu(t1, t1, Operand(a3));
5778
5779  // v0: result string.
5780  // a1: first character of result string.
5781  // a2: result string length.
5782  // t1: first character of sub string to copy.
5783  STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
5784  StringHelper::GenerateCopyCharactersLong(
5785      masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
5786  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
5787  __ Addu(sp, sp, Operand(3 * kPointerSize));
5788  __ Ret();
5789
5790  __ bind(&non_ascii_flat);
5791  // a2: result string length.
5792  // t1: string.
5793  // t3: (a.k.a. from): from offset (smi)
5794  // Check for flat two byte string.
5795
5796  // Allocate the result.
5797  __ AllocateTwoByteString(v0, a2, a1, a3, t0, &sub_string_runtime);
5798
5799  // v0: result string.
5800  // a2: result string length.
5801  // t1: string.
5802  // Locate first character of result.
5803  __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5804  // Locate 'from' character of string.
5805  __ Addu(t1, t1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5806  // As "from" is a smi it is 2 times the value which matches the size of a two
5807  // byte character.
5808  __ Addu(t1, t1, Operand(from));
5809  from = no_reg;
5810
5811  // v0: result string.
5812  // a1: first character of result.
5813  // a2: result length.
5814  // t1: first character of string to copy.
5815  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
5816  StringHelper::GenerateCopyCharactersLong(
5817      masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
5818  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
5819  __ Addu(sp, sp, Operand(3 * kPointerSize));
5820  __ Ret();
5821
5822  // Just jump to runtime to create the sub string.
5823  __ bind(&sub_string_runtime);
5824  __ TailCallRuntime(Runtime::kSubString, 3, 1);
5825}
5826
5827
5828void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
5829                                                      Register left,
5830                                                      Register right,
5831                                                      Register scratch1,
5832                                                      Register scratch2,
5833                                                      Register scratch3) {
5834  Register length = scratch1;
5835
5836  // Compare lengths.
5837  Label strings_not_equal, check_zero_length;
5838  __ lw(length, FieldMemOperand(left, String::kLengthOffset));
5839  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
5840  __ Branch(&check_zero_length, eq, length, Operand(scratch2));
5841  __ bind(&strings_not_equal);
5842  __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
5843  __ Ret();
5844
5845  // Check if the length is zero.
5846  Label compare_chars;
5847  __ bind(&check_zero_length);
5848  STATIC_ASSERT(kSmiTag == 0);
5849  __ Branch(&compare_chars, ne, length, Operand(zero_reg));
5850  __ li(v0, Operand(Smi::FromInt(EQUAL)));
5851  __ Ret();
5852
5853  // Compare characters.
5854  __ bind(&compare_chars);
5855
5856  GenerateAsciiCharsCompareLoop(masm,
5857                                left, right, length, scratch2, scratch3, v0,
5858                                &strings_not_equal);
5859
5860  // Characters are equal.
5861  __ li(v0, Operand(Smi::FromInt(EQUAL)));
5862  __ Ret();
5863}
5864
5865
5866void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
5867                                                        Register left,
5868                                                        Register right,
5869                                                        Register scratch1,
5870                                                        Register scratch2,
5871                                                        Register scratch3,
5872                                                        Register scratch4) {
5873  Label result_not_equal, compare_lengths;
5874  // Find minimum length and length difference.
5875  __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
5876  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
5877  __ Subu(scratch3, scratch1, Operand(scratch2));
5878  Register length_delta = scratch3;
5879  __ slt(scratch4, scratch2, scratch1);
5880  __ movn(scratch1, scratch2, scratch4);
5881  Register min_length = scratch1;
5882  STATIC_ASSERT(kSmiTag == 0);
5883  __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
5884
5885  // Compare loop.
5886  GenerateAsciiCharsCompareLoop(masm,
5887                                left, right, min_length, scratch2, scratch4, v0,
5888                                &result_not_equal);
5889
5890  // Compare lengths - strings up to min-length are equal.
5891  __ bind(&compare_lengths);
5892  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
5893  // Use length_delta as result if it's zero.
5894  __ mov(scratch2, length_delta);
5895  __ mov(scratch4, zero_reg);
5896  __ mov(v0, zero_reg);
5897
5898  __ bind(&result_not_equal);
5899  // Conditionally update the result based either on length_delta or
5900  // the last comparion performed in the loop above.
5901  Label ret;
5902  __ Branch(&ret, eq, scratch2, Operand(scratch4));
5903  __ li(v0, Operand(Smi::FromInt(GREATER)));
5904  __ Branch(&ret, gt, scratch2, Operand(scratch4));
5905  __ li(v0, Operand(Smi::FromInt(LESS)));
5906  __ bind(&ret);
5907  __ Ret();
5908}
5909
5910
5911void StringCompareStub::GenerateAsciiCharsCompareLoop(
5912    MacroAssembler* masm,
5913    Register left,
5914    Register right,
5915    Register length,
5916    Register scratch1,
5917    Register scratch2,
5918    Register scratch3,
5919    Label* chars_not_equal) {
5920  // Change index to run from -length to -1 by adding length to string
5921  // start. This means that loop ends when index reaches zero, which
5922  // doesn't need an additional compare.
5923  __ SmiUntag(length);
5924  __ Addu(scratch1, length,
5925          Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5926  __ Addu(left, left, Operand(scratch1));
5927  __ Addu(right, right, Operand(scratch1));
5928  __ Subu(length, zero_reg, length);
5929  Register index = length;  // index = -length;
5930
5931
5932  // Compare loop.
5933  Label loop;
5934  __ bind(&loop);
5935  __ Addu(scratch3, left, index);
5936  __ lbu(scratch1, MemOperand(scratch3));
5937  __ Addu(scratch3, right, index);
5938  __ lbu(scratch2, MemOperand(scratch3));
5939  __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
5940  __ Addu(index, index, 1);
5941  __ Branch(&loop, ne, index, Operand(zero_reg));
5942}
5943
5944
5945void StringCompareStub::Generate(MacroAssembler* masm) {
5946  Label runtime;
5947
5948  Counters* counters = masm->isolate()->counters();
5949
5950  // Stack frame on entry.
5951  //  sp[0]: right string
5952  //  sp[4]: left string
5953  __ lw(a1, MemOperand(sp, 1 * kPointerSize));  // Left.
5954  __ lw(a0, MemOperand(sp, 0 * kPointerSize));  // Right.
5955
5956  Label not_same;
5957  __ Branch(&not_same, ne, a0, Operand(a1));
5958  STATIC_ASSERT(EQUAL == 0);
5959  STATIC_ASSERT(kSmiTag == 0);
5960  __ li(v0, Operand(Smi::FromInt(EQUAL)));
5961  __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
5962  __ Addu(sp, sp, Operand(2 * kPointerSize));
5963  __ Ret();
5964
5965  __ bind(&not_same);
5966
5967  // Check that both objects are sequential ASCII strings.
5968  __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
5969
5970  // Compare flat ASCII strings natively. Remove arguments from stack first.
5971  __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
5972  __ Addu(sp, sp, Operand(2 * kPointerSize));
5973  GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
5974
5975  __ bind(&runtime);
5976  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
5977}
5978
5979
5980void StringAddStub::Generate(MacroAssembler* masm) {
5981  Label string_add_runtime, call_builtin;
5982  Builtins::JavaScript builtin_id = Builtins::ADD;
5983
5984  Counters* counters = masm->isolate()->counters();
5985
5986  // Stack on entry:
5987  // sp[0]: second argument (right).
5988  // sp[4]: first argument (left).
5989
5990  // Load the two arguments.
5991  __ lw(a0, MemOperand(sp, 1 * kPointerSize));  // First argument.
5992  __ lw(a1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
5993
5994  // Make sure that both arguments are strings if not known in advance.
5995  if (flags_ == NO_STRING_ADD_FLAGS) {
5996    __ JumpIfEitherSmi(a0, a1, &string_add_runtime);
5997    // Load instance types.
5998    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
5999    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6000    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6001    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6002    STATIC_ASSERT(kStringTag == 0);
6003    // If either is not a string, go to runtime.
6004    __ Or(t4, t0, Operand(t1));
6005    __ And(t4, t4, Operand(kIsNotStringMask));
6006    __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
6007  } else {
6008    // Here at least one of the arguments is definitely a string.
6009    // We convert the one that is not known to be a string.
6010    if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6011      ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6012      GenerateConvertArgument(
6013          masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
6014      builtin_id = Builtins::STRING_ADD_RIGHT;
6015    } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6016      ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6017      GenerateConvertArgument(
6018          masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
6019      builtin_id = Builtins::STRING_ADD_LEFT;
6020    }
6021  }
6022
6023  // Both arguments are strings.
6024  // a0: first string
6025  // a1: second string
6026  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6027  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6028  {
6029    Label strings_not_empty;
6030    // Check if either of the strings are empty. In that case return the other.
6031    // These tests use zero-length check on string-length whch is an Smi.
6032    // Assert that Smi::FromInt(0) is really 0.
6033    STATIC_ASSERT(kSmiTag == 0);
6034    ASSERT(Smi::FromInt(0) == 0);
6035    __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
6036    __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
6037    __ mov(v0, a0);       // Assume we'll return first string (from a0).
6038    __ movz(v0, a1, a2);  // If first is empty, return second (from a1).
6039    __ slt(t4, zero_reg, a2);   // if (a2 > 0) t4 = 1.
6040    __ slt(t5, zero_reg, a3);   // if (a3 > 0) t5 = 1.
6041    __ and_(t4, t4, t5);        // Branch if both strings were non-empty.
6042    __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
6043
6044    __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6045    __ Addu(sp, sp, Operand(2 * kPointerSize));
6046    __ Ret();
6047
6048    __ bind(&strings_not_empty);
6049  }
6050
6051  // Untag both string-lengths.
6052  __ sra(a2, a2, kSmiTagSize);
6053  __ sra(a3, a3, kSmiTagSize);
6054
6055  // Both strings are non-empty.
6056  // a0: first string
6057  // a1: second string
6058  // a2: length of first string
6059  // a3: length of second string
6060  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6061  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6062  // Look at the length of the result of adding the two strings.
6063  Label string_add_flat_result, longer_than_two;
6064  // Adding two lengths can't overflow.
6065  STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
6066  __ Addu(t2, a2, Operand(a3));
6067  // Use the symbol table when adding two one character strings, as it
6068  // helps later optimizations to return a symbol here.
6069  __ Branch(&longer_than_two, ne, t2, Operand(2));
6070
6071  // Check that both strings are non-external ASCII strings.
6072  if (flags_ != NO_STRING_ADD_FLAGS) {
6073    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6074    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6075    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6076    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6077  }
6078  __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
6079                                                 &string_add_runtime);
6080
6081  // Get the two characters forming the sub string.
6082  __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
6083  __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
6084
6085  // Try to lookup two character string in symbol table. If it is not found
6086  // just allocate a new one.
6087  Label make_two_character_string;
6088  StringHelper::GenerateTwoCharacterSymbolTableProbe(
6089      masm, a2, a3, t2, t3, t0, t1, t4, &make_two_character_string);
6090  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6091  __ Addu(sp, sp, Operand(2 * kPointerSize));
6092  __ Ret();
6093
6094  __ bind(&make_two_character_string);
6095  // Resulting string has length 2 and first chars of two strings
6096  // are combined into single halfword in a2 register.
6097  // So we can fill resulting string without two loops by a single
6098  // halfword store instruction (which assumes that processor is
6099  // in a little endian mode).
6100  __ li(t2, Operand(2));
6101  __ AllocateAsciiString(v0, t2, t0, t1, t4, &string_add_runtime);
6102  __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6103  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6104  __ Addu(sp, sp, Operand(2 * kPointerSize));
6105  __ Ret();
6106
6107  __ bind(&longer_than_two);
6108  // Check if resulting string will be flat.
6109  __ Branch(&string_add_flat_result, lt, t2,
6110           Operand(String::kMinNonFlatLength));
6111  // Handle exceptionally long strings in the runtime system.
6112  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
6113  ASSERT(IsPowerOf2(String::kMaxLength + 1));
6114  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
6115  __ Branch(&string_add_runtime, hs, t2, Operand(String::kMaxLength + 1));
6116
6117  // If result is not supposed to be flat, allocate a cons string object.
6118  // If both strings are ASCII the result is an ASCII cons string.
6119  if (flags_ != NO_STRING_ADD_FLAGS) {
6120    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6121    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6122    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6123    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6124  }
6125  Label non_ascii, allocated, ascii_data;
6126  STATIC_ASSERT(kTwoByteStringTag == 0);
6127  // Branch to non_ascii if either string-encoding field is zero (non-ascii).
6128  __ And(t4, t0, Operand(t1));
6129  __ And(t4, t4, Operand(kStringEncodingMask));
6130  __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
6131
6132  // Allocate an ASCII cons string.
6133  __ bind(&ascii_data);
6134  __ AllocateAsciiConsString(t3, t2, t0, t1, &string_add_runtime);
6135  __ bind(&allocated);
6136  // Fill the fields of the cons string.
6137  __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
6138  __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
6139  __ mov(v0, t3);
6140  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6141  __ Addu(sp, sp, Operand(2 * kPointerSize));
6142  __ Ret();
6143
6144  __ bind(&non_ascii);
6145  // At least one of the strings is two-byte. Check whether it happens
6146  // to contain only ASCII characters.
6147  // t0: first instance type.
6148  // t1: second instance type.
6149  // Branch to if _both_ instances have kAsciiDataHintMask set.
6150  __ And(at, t0, Operand(kAsciiDataHintMask));
6151  __ and_(at, at, t1);
6152  __ Branch(&ascii_data, ne, at, Operand(zero_reg));
6153
6154  __ xor_(t0, t0, t1);
6155  STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
6156  __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6157  __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6158
6159  // Allocate a two byte cons string.
6160  __ AllocateTwoByteConsString(t3, t2, t0, t1, &string_add_runtime);
6161  __ Branch(&allocated);
6162
6163  // Handle creating a flat result. First check that both strings are
6164  // sequential and that they have the same encoding.
6165  // a0: first string
6166  // a1: second string
6167  // a2: length of first string
6168  // a3: length of second string
6169  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6170  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6171  // t2: sum of lengths.
6172  __ bind(&string_add_flat_result);
6173  if (flags_ != NO_STRING_ADD_FLAGS) {
6174    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6175    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6176    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6177    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6178  }
6179  // Check that both strings are sequential, meaning that we
6180  // branch to runtime if either string tag is non-zero.
6181  STATIC_ASSERT(kSeqStringTag == 0);
6182  __ Or(t4, t0, Operand(t1));
6183  __ And(t4, t4, Operand(kStringRepresentationMask));
6184  __ Branch(&string_add_runtime, ne, t4, Operand(zero_reg));
6185
6186  // Now check if both strings have the same encoding (ASCII/Two-byte).
6187  // a0: first string
6188  // a1: second string
6189  // a2: length of first string
6190  // a3: length of second string
6191  // t0: first string instance type
6192  // t1: second string instance type
6193  // t2: sum of lengths.
6194  Label non_ascii_string_add_flat_result;
6195  ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test.
6196  __ xor_(t3, t1, t0);
6197  __ And(t3, t3, Operand(kStringEncodingMask));
6198  __ Branch(&string_add_runtime, ne, t3, Operand(zero_reg));
6199  // And see if it's ASCII (0) or two-byte (1).
6200  __ And(t3, t0, Operand(kStringEncodingMask));
6201  __ Branch(&non_ascii_string_add_flat_result, eq, t3, Operand(zero_reg));
6202
6203  // Both strings are sequential ASCII strings. We also know that they are
6204  // short (since the sum of the lengths is less than kMinNonFlatLength).
6205  // t2: length of resulting flat string
6206  __ AllocateAsciiString(t3, t2, t0, t1, t4, &string_add_runtime);
6207  // Locate first character of result.
6208  __ Addu(t2, t3, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6209  // Locate first character of first argument.
6210  __ Addu(a0, a0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6211  // a0: first character of first string.
6212  // a1: second string.
6213  // a2: length of first string.
6214  // a3: length of second string.
6215  // t2: first character of result.
6216  // t3: result string.
6217  StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, true);
6218
6219  // Load second argument and locate first character.
6220  __ Addu(a1, a1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6221  // a1: first character of second string.
6222  // a3: length of second string.
6223  // t2: next character of result.
6224  // t3: result string.
6225  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
6226  __ mov(v0, t3);
6227  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6228  __ Addu(sp, sp, Operand(2 * kPointerSize));
6229  __ Ret();
6230
6231  __ bind(&non_ascii_string_add_flat_result);
6232  // Both strings are sequential two byte strings.
6233  // a0: first string.
6234  // a1: second string.
6235  // a2: length of first string.
6236  // a3: length of second string.
6237  // t2: sum of length of strings.
6238  __ AllocateTwoByteString(t3, t2, t0, t1, t4, &string_add_runtime);
6239  // a0: first string.
6240  // a1: second string.
6241  // a2: length of first string.
6242  // a3: length of second string.
6243  // t3: result string.
6244
6245  // Locate first character of result.
6246  __ Addu(t2, t3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6247  // Locate first character of first argument.
6248  __ Addu(a0, a0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6249
6250  // a0: first character of first string.
6251  // a1: second string.
6252  // a2: length of first string.
6253  // a3: length of second string.
6254  // t2: first character of result.
6255  // t3: result string.
6256  StringHelper::GenerateCopyCharacters(masm, t2, a0, a2, t0, false);
6257
6258  // Locate first character of second argument.
6259  __ Addu(a1, a1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6260
6261  // a1: first character of second string.
6262  // a3: length of second string.
6263  // t2: next character of result (after copy of first string).
6264  // t3: result string.
6265  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
6266
6267  __ mov(v0, t3);
6268  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6269  __ Addu(sp, sp, Operand(2 * kPointerSize));
6270  __ Ret();
6271
6272  // Just jump to runtime to add the two strings.
6273  __ bind(&string_add_runtime);
6274  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6275
6276  if (call_builtin.is_linked()) {
6277    __ bind(&call_builtin);
6278    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6279  }
6280}
6281
6282
6283void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6284                                            int stack_offset,
6285                                            Register arg,
6286                                            Register scratch1,
6287                                            Register scratch2,
6288                                            Register scratch3,
6289                                            Register scratch4,
6290                                            Label* slow) {
6291  // First check if the argument is already a string.
6292  Label not_string, done;
6293  __ JumpIfSmi(arg, &not_string);
6294  __ GetObjectType(arg, scratch1, scratch1);
6295  __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
6296
6297  // Check the number to string cache.
6298  Label not_cached;
6299  __ bind(&not_string);
6300  // Puts the cached result into scratch1.
6301  NumberToStringStub::GenerateLookupNumberStringCache(masm,
6302                                                      arg,
6303                                                      scratch1,
6304                                                      scratch2,
6305                                                      scratch3,
6306                                                      scratch4,
6307                                                      false,
6308                                                      &not_cached);
6309  __ mov(arg, scratch1);
6310  __ sw(arg, MemOperand(sp, stack_offset));
6311  __ jmp(&done);
6312
6313  // Check if the argument is a safe string wrapper.
6314  __ bind(&not_cached);
6315  __ JumpIfSmi(arg, slow);
6316  __ GetObjectType(arg, scratch1, scratch2);  // map -> scratch1.
6317  __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
6318  __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6319  __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
6320  __ And(scratch2, scratch2, scratch4);
6321  __ Branch(slow, ne, scratch2, Operand(scratch4));
6322  __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6323  __ sw(arg, MemOperand(sp, stack_offset));
6324
6325  __ bind(&done);
6326}
6327
6328
6329void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6330  ASSERT(state_ == CompareIC::SMIS);
6331  Label miss;
6332  __ Or(a2, a1, a0);
6333  __ JumpIfNotSmi(a2, &miss);
6334
6335  if (GetCondition() == eq) {
6336    // For equality we do not care about the sign of the result.
6337    __ Subu(v0, a0, a1);
6338  } else {
6339    // Untag before subtracting to avoid handling overflow.
6340    __ SmiUntag(a1);
6341    __ SmiUntag(a0);
6342    __ Subu(v0, a1, a0);
6343  }
6344  __ Ret();
6345
6346  __ bind(&miss);
6347  GenerateMiss(masm);
6348}
6349
6350
6351void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6352  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6353
6354  Label generic_stub;
6355  Label unordered;
6356  Label miss;
6357  __ And(a2, a1, Operand(a0));
6358  __ JumpIfSmi(a2, &generic_stub);
6359
6360  __ GetObjectType(a0, a2, a2);
6361  __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
6362  __ GetObjectType(a1, a2, a2);
6363  __ Branch(&miss, ne, a2, Operand(HEAP_NUMBER_TYPE));
6364
6365  // Inlining the double comparison and falling back to the general compare
6366  // stub if NaN is involved or FPU is unsupported.
6367  if (CpuFeatures::IsSupported(FPU)) {
6368    CpuFeatures::Scope scope(FPU);
6369
6370    // Load left and right operand.
6371    __ Subu(a2, a1, Operand(kHeapObjectTag));
6372    __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
6373    __ Subu(a2, a0, Operand(kHeapObjectTag));
6374    __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
6375
6376    Label fpu_eq, fpu_lt, fpu_gt;
6377    // Compare operands (test if unordered).
6378    __ c(UN, D, f0, f2);
6379    // Don't base result on status bits when a NaN is involved.
6380    __ bc1t(&unordered);
6381    __ nop();
6382
6383    // Test if equal.
6384    __ c(EQ, D, f0, f2);
6385    __ bc1t(&fpu_eq);
6386    __ nop();
6387
6388    // Test if unordered or less (unordered case is already handled).
6389    __ c(ULT, D, f0, f2);
6390    __ bc1t(&fpu_lt);
6391    __ nop();
6392
6393    // Otherwise it's greater.
6394    __ bc1f(&fpu_gt);
6395    __ nop();
6396
6397    // Return a result of -1, 0, or 1.
6398    __ bind(&fpu_eq);
6399    __ li(v0, Operand(EQUAL));
6400    __ Ret();
6401
6402    __ bind(&fpu_lt);
6403    __ li(v0, Operand(LESS));
6404    __ Ret();
6405
6406    __ bind(&fpu_gt);
6407    __ li(v0, Operand(GREATER));
6408    __ Ret();
6409
6410    __ bind(&unordered);
6411  }
6412
6413  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
6414  __ bind(&generic_stub);
6415  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6416
6417  __ bind(&miss);
6418  GenerateMiss(masm);
6419}
6420
6421
6422void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6423  ASSERT(state_ == CompareIC::SYMBOLS);
6424  Label miss;
6425
6426  // Registers containing left and right operands respectively.
6427  Register left = a1;
6428  Register right = a0;
6429  Register tmp1 = a2;
6430  Register tmp2 = a3;
6431
6432  // Check that both operands are heap objects.
6433  __ JumpIfEitherSmi(left, right, &miss);
6434
6435  // Check that both operands are symbols.
6436  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6437  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6438  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6439  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6440  STATIC_ASSERT(kSymbolTag != 0);
6441  __ And(tmp1, tmp1, Operand(tmp2));
6442  __ And(tmp1, tmp1, kIsSymbolMask);
6443  __ Branch(&miss, eq, tmp1, Operand(zero_reg));
6444  // Make sure a0 is non-zero. At this point input operands are
6445  // guaranteed to be non-zero.
6446  ASSERT(right.is(a0));
6447  STATIC_ASSERT(EQUAL == 0);
6448  STATIC_ASSERT(kSmiTag == 0);
6449  __ mov(v0, right);
6450  // Symbols are compared by identity.
6451  __ Ret(ne, left, Operand(right));
6452  __ li(v0, Operand(Smi::FromInt(EQUAL)));
6453  __ Ret();
6454
6455  __ bind(&miss);
6456  GenerateMiss(masm);
6457}
6458
6459
6460void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6461  ASSERT(state_ == CompareIC::STRINGS);
6462  Label miss;
6463
6464  // Registers containing left and right operands respectively.
6465  Register left = a1;
6466  Register right = a0;
6467  Register tmp1 = a2;
6468  Register tmp2 = a3;
6469  Register tmp3 = t0;
6470  Register tmp4 = t1;
6471  Register tmp5 = t2;
6472
6473  // Check that both operands are heap objects.
6474  __ JumpIfEitherSmi(left, right, &miss);
6475
6476  // Check that both operands are strings. This leaves the instance
6477  // types loaded in tmp1 and tmp2.
6478  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6479  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6480  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6481  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6482  STATIC_ASSERT(kNotStringTag != 0);
6483  __ Or(tmp3, tmp1, tmp2);
6484  __ And(tmp5, tmp3, Operand(kIsNotStringMask));
6485  __ Branch(&miss, ne, tmp5, Operand(zero_reg));
6486
6487  // Fast check for identical strings.
6488  Label left_ne_right;
6489  STATIC_ASSERT(EQUAL == 0);
6490  STATIC_ASSERT(kSmiTag == 0);
6491  __ Branch(&left_ne_right, ne, left, Operand(right), USE_DELAY_SLOT);
6492  __ mov(v0, zero_reg);  // In the delay slot.
6493  __ Ret();
6494  __ bind(&left_ne_right);
6495
6496  // Handle not identical strings.
6497
6498  // Check that both strings are symbols. If they are, we're done
6499  // because we already know they are not identical.
6500  ASSERT(GetCondition() == eq);
6501  STATIC_ASSERT(kSymbolTag != 0);
6502  __ And(tmp3, tmp1, Operand(tmp2));
6503  __ And(tmp5, tmp3, Operand(kIsSymbolMask));
6504  Label is_symbol;
6505  __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg), USE_DELAY_SLOT);
6506  __ mov(v0, a0);  // In the delay slot.
6507  // Make sure a0 is non-zero. At this point input operands are
6508  // guaranteed to be non-zero.
6509  ASSERT(right.is(a0));
6510  __ Ret();
6511  __ bind(&is_symbol);
6512
6513  // Check that both strings are sequential ASCII.
6514  Label runtime;
6515  __ JumpIfBothInstanceTypesAreNotSequentialAscii(tmp1, tmp2, tmp3, tmp4,
6516                                                  &runtime);
6517
6518  // Compare flat ASCII strings. Returns when done.
6519  StringCompareStub::GenerateFlatAsciiStringEquals(
6520      masm, left, right, tmp1, tmp2, tmp3);
6521
6522  // Handle more complex cases in runtime.
6523  __ bind(&runtime);
6524  __ Push(left, right);
6525  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6526
6527  __ bind(&miss);
6528  GenerateMiss(masm);
6529}
6530
6531
6532void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6533  ASSERT(state_ == CompareIC::OBJECTS);
6534  Label miss;
6535  __ And(a2, a1, Operand(a0));
6536  __ JumpIfSmi(a2, &miss);
6537
6538  __ GetObjectType(a0, a2, a2);
6539  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6540  __ GetObjectType(a1, a2, a2);
6541  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6542
6543  ASSERT(GetCondition() == eq);
6544  __ Subu(v0, a0, Operand(a1));
6545  __ Ret();
6546
6547  __ bind(&miss);
6548  GenerateMiss(masm);
6549}
6550
6551
6552void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6553  __ Push(a1, a0);
6554  __ push(ra);
6555
6556  // Call the runtime system in a fresh internal frame.
6557  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
6558                                             masm->isolate());
6559  __ EnterInternalFrame();
6560  __ Push(a1, a0);
6561  __ li(t0, Operand(Smi::FromInt(op_)));
6562  __ push(t0);
6563  __ CallExternalReference(miss, 3);
6564  __ LeaveInternalFrame();
6565  // Compute the entry point of the rewritten stub.
6566  __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
6567  // Restore registers.
6568  __ pop(ra);
6569  __ pop(a0);
6570  __ pop(a1);
6571  __ Jump(a2);
6572}
6573
6574
6575void DirectCEntryStub::Generate(MacroAssembler* masm) {
6576  // No need to pop or drop anything, LeaveExitFrame will restore the old
6577  // stack, thus dropping the allocated space for the return value.
6578  // The saved ra is after the reserved stack space for the 4 args.
6579  __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
6580
6581  if (FLAG_debug_code && EnableSlowAsserts()) {
6582    // In case of an error the return address may point to a memory area
6583    // filled with kZapValue by the GC.
6584    // Dereference the address and check for this.
6585    __ lw(t0, MemOperand(t9));
6586    __ Assert(ne, "Received invalid return address.", t0,
6587        Operand(reinterpret_cast<uint32_t>(kZapValue)));
6588  }
6589  __ Jump(t9);
6590}
6591
6592
6593void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6594                                    ExternalReference function) {
6595  __ li(t9, Operand(function));
6596  this->GenerateCall(masm, t9);
6597}
6598
6599
6600void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6601                                    Register target) {
6602  __ Move(t9, target);
6603  __ AssertStackIsAligned();
6604  // Allocate space for arg slots.
6605  __ Subu(sp, sp, kCArgsSlotsSize);
6606
6607  // Block the trampoline pool through the whole function to make sure the
6608  // number of generated instructions is constant.
6609  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
6610
6611  // We need to get the current 'pc' value, which is not available on MIPS.
6612  Label find_ra;
6613  masm->bal(&find_ra);  // ra = pc + 8.
6614  masm->nop();  // Branch delay slot nop.
6615  masm->bind(&find_ra);
6616
6617  const int kNumInstructionsToJump = 6;
6618  masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
6619  // Push return address (accessible to GC through exit frame pc).
6620  // This spot for ra was reserved in EnterExitFrame.
6621  masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
6622  masm->li(ra, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
6623                    RelocInfo::CODE_TARGET), true);
6624  // Call the function.
6625  masm->Jump(t9);
6626  // Make sure the stored 'ra' points to this position.
6627  ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
6628}
6629
6630
6631MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
6632    MacroAssembler* masm,
6633    Label* miss,
6634    Label* done,
6635    Register receiver,
6636    Register properties,
6637    String* name,
6638    Register scratch0) {
6639// If names of slots in range from 1 to kProbes - 1 for the hash value are
6640  // not equal to the name and kProbes-th slot is not used (its name is the
6641  // undefined value), it guarantees the hash table doesn't contain the
6642  // property. It's true even if some slots represent deleted properties
6643  // (their names are the null value).
6644  for (int i = 0; i < kInlinedProbes; i++) {
6645    // scratch0 points to properties hash.
6646    // Compute the masked index: (hash + i + i * i) & mask.
6647    Register index = scratch0;
6648    // Capacity is smi 2^n.
6649    __ lw(index, FieldMemOperand(properties, kCapacityOffset));
6650    __ Subu(index, index, Operand(1));
6651    __ And(index, index, Operand(
6652         Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
6653
6654    // Scale the index by multiplying by the entry size.
6655    ASSERT(StringDictionary::kEntrySize == 3);
6656    // index *= 3.
6657    __ mov(at, index);
6658    __ sll(index, index, 1);
6659    __ Addu(index, index, at);
6660
6661    Register entity_name = scratch0;
6662    // Having undefined at this place means the name is not contained.
6663    ASSERT_EQ(kSmiTagSize, 1);
6664    Register tmp = properties;
6665
6666    __ sll(scratch0, index, 1);
6667    __ Addu(tmp, properties, scratch0);
6668    __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
6669
6670    ASSERT(!tmp.is(entity_name));
6671    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
6672    __ Branch(done, eq, entity_name, Operand(tmp));
6673
6674    if (i != kInlinedProbes - 1) {
6675      // Stop if found the property.
6676      __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
6677
6678      // Check if the entry name is not a symbol.
6679      __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
6680      __ lbu(entity_name,
6681             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
6682      __ And(scratch0, entity_name, Operand(kIsSymbolMask));
6683      __ Branch(miss, eq, scratch0, Operand(zero_reg));
6684
6685      // Restore the properties.
6686      __ lw(properties,
6687            FieldMemOperand(receiver, JSObject::kPropertiesOffset));
6688    }
6689  }
6690
6691  const int spill_mask =
6692      (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
6693       a2.bit() | a1.bit() | a0.bit());
6694
6695  __ MultiPush(spill_mask);
6696  __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
6697  __ li(a1, Operand(Handle<String>(name)));
6698  StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
6699  MaybeObject* result = masm->TryCallStub(&stub);
6700  if (result->IsFailure()) return result;
6701  __ MultiPop(spill_mask);
6702
6703  __ Branch(done, eq, v0, Operand(zero_reg));
6704  __ Branch(miss, ne, v0, Operand(zero_reg));
6705  return result;
6706}
6707
6708
6709// Probe the string dictionary in the |elements| register. Jump to the
6710// |done| label if a property with the given name is found. Jump to
6711// the |miss| label otherwise.
6712// If lookup was successful |scratch2| will be equal to elements + 4 * index.
6713void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6714                                                        Label* miss,
6715                                                        Label* done,
6716                                                        Register elements,
6717                                                        Register name,
6718                                                        Register scratch1,
6719                                                        Register scratch2) {
6720  // Assert that name contains a string.
6721  if (FLAG_debug_code) __ AbortIfNotString(name);
6722
6723  // Compute the capacity mask.
6724  __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
6725  __ sra(scratch1, scratch1, kSmiTagSize);  // convert smi to int
6726  __ Subu(scratch1, scratch1, Operand(1));
6727
6728  // Generate an unrolled loop that performs a few probes before
6729  // giving up. Measurements done on Gmail indicate that 2 probes
6730  // cover ~93% of loads from dictionaries.
6731  for (int i = 0; i < kInlinedProbes; i++) {
6732    // Compute the masked index: (hash + i + i * i) & mask.
6733    __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
6734    if (i > 0) {
6735      // Add the probe offset (i + i * i) left shifted to avoid right shifting
6736      // the hash in a separate instruction. The value hash + i + i * i is right
6737      // shifted in the following and instruction.
6738      ASSERT(StringDictionary::GetProbeOffset(i) <
6739             1 << (32 - String::kHashFieldOffset));
6740      __ Addu(scratch2, scratch2, Operand(
6741           StringDictionary::GetProbeOffset(i) << String::kHashShift));
6742    }
6743    __ srl(scratch2, scratch2, String::kHashShift);
6744    __ And(scratch2, scratch1, scratch2);
6745
6746    // Scale the index by multiplying by the element size.
6747    ASSERT(StringDictionary::kEntrySize == 3);
6748    // scratch2 = scratch2 * 3.
6749
6750    __ mov(at, scratch2);
6751    __ sll(scratch2, scratch2, 1);
6752    __ Addu(scratch2, scratch2, at);
6753
6754    // Check if the key is identical to the name.
6755    __ sll(at, scratch2, 2);
6756    __ Addu(scratch2, elements, at);
6757    __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
6758    __ Branch(done, eq, name, Operand(at));
6759  }
6760
6761  const int spill_mask =
6762      (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
6763       a3.bit() | a2.bit() | a1.bit() | a0.bit()) &
6764      ~(scratch1.bit() | scratch2.bit());
6765
6766  __ MultiPush(spill_mask);
6767  __ Move(a0, elements);
6768  __ Move(a1, name);
6769  StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
6770  __ CallStub(&stub);
6771  __ mov(scratch2, a2);
6772  __ MultiPop(spill_mask);
6773
6774  __ Branch(done, ne, v0, Operand(zero_reg));
6775  __ Branch(miss, eq, v0, Operand(zero_reg));
6776}
6777
6778
6779void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
6780  // Registers:
6781  //  result: StringDictionary to probe
6782  //  a1: key
6783  //  : StringDictionary to probe.
6784  //  index_: will hold an index of entry if lookup is successful.
6785  //          might alias with result_.
6786  // Returns:
6787  //  result_ is zero if lookup failed, non zero otherwise.
6788
6789  Register result = v0;
6790  Register dictionary = a0;
6791  Register key = a1;
6792  Register index = a2;
6793  Register mask = a3;
6794  Register hash = t0;
6795  Register undefined = t1;
6796  Register entry_key = t2;
6797
6798  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
6799
6800  __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
6801  __ sra(mask, mask, kSmiTagSize);
6802  __ Subu(mask, mask, Operand(1));
6803
6804  __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
6805
6806  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
6807
6808  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
6809    // Compute the masked index: (hash + i + i * i) & mask.
6810    // Capacity is smi 2^n.
6811    if (i > 0) {
6812      // Add the probe offset (i + i * i) left shifted to avoid right shifting
6813      // the hash in a separate instruction. The value hash + i + i * i is right
6814      // shifted in the following and instruction.
6815      ASSERT(StringDictionary::GetProbeOffset(i) <
6816             1 << (32 - String::kHashFieldOffset));
6817      __ Addu(index, hash, Operand(
6818           StringDictionary::GetProbeOffset(i) << String::kHashShift));
6819    } else {
6820      __ mov(index, hash);
6821    }
6822    __ srl(index, index, String::kHashShift);
6823    __ And(index, mask, index);
6824
6825    // Scale the index by multiplying by the entry size.
6826    ASSERT(StringDictionary::kEntrySize == 3);
6827    // index *= 3.
6828    __ mov(at, index);
6829    __ sll(index, index, 1);
6830    __ Addu(index, index, at);
6831
6832
6833    ASSERT_EQ(kSmiTagSize, 1);
6834    __ sll(index, index, 2);
6835    __ Addu(index, index, dictionary);
6836    __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
6837
6838    // Having undefined at this place means the name is not contained.
6839    __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
6840
6841    // Stop if found the property.
6842    __ Branch(&in_dictionary, eq, entry_key, Operand(key));
6843
6844    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
6845      // Check if the entry name is not a symbol.
6846      __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
6847      __ lbu(entry_key,
6848             FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
6849      __ And(result, entry_key, Operand(kIsSymbolMask));
6850      __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
6851    }
6852  }
6853
6854  __ bind(&maybe_in_dictionary);
6855  // If we are doing negative lookup then probing failure should be
6856  // treated as a lookup success. For positive lookup probing failure
6857  // should be treated as lookup failure.
6858  if (mode_ == POSITIVE_LOOKUP) {
6859    __ mov(result, zero_reg);
6860    __ Ret();
6861  }
6862
6863  __ bind(&in_dictionary);
6864  __ li(result, 1);
6865  __ Ret();
6866
6867  __ bind(&not_in_dictionary);
6868  __ mov(result, zero_reg);
6869  __ Ret();
6870}
6871
6872
6873#undef __
6874
6875} }  // namespace v8::internal
6876
6877#endif  // V8_TARGET_ARCH_MIPS
6878