1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28
29
30#include "v8.h"
31
32#if defined(V8_TARGET_ARCH_MIPS)
33
34#include "codegen.h"
35#include "debug.h"
36#include "deoptimizer.h"
37#include "full-codegen.h"
38#include "runtime.h"
39
40namespace v8 {
41namespace internal {
42
43
44#define __ ACCESS_MASM(masm)
45
46
47void Builtins::Generate_Adaptor(MacroAssembler* masm,
48                                CFunctionId id,
49                                BuiltinExtraArguments extra_args) {
50  // ----------- S t a t e -------------
51  //  -- a0                 : number of arguments excluding receiver
52  //  -- a1                 : called function (only guaranteed when
53  //  --                      extra_args requires it)
54  //  -- cp                 : context
55  //  -- sp[0]              : last argument
56  //  -- ...
57  //  -- sp[4 * (argc - 1)] : first argument
58  //  -- sp[4 * agrc]       : receiver
59  // -----------------------------------
60
61  // Insert extra arguments.
62  int num_extra_args = 0;
63  if (extra_args == NEEDS_CALLED_FUNCTION) {
64    num_extra_args = 1;
65    __ push(a1);
66  } else {
67    ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
68  }
69
70  // JumpToExternalReference expects s0 to contain the number of arguments
71  // including the receiver and the extra arguments.
72  __ Addu(s0, a0, num_extra_args + 1);
73  __ sll(s1, s0, kPointerSizeLog2);
74  __ Subu(s1, s1, kPointerSize);
75  __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
76}
77
78
79// Load the built-in InternalArray function from the current context.
80static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
81                                              Register result) {
82  // Load the global context.
83
84  __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
85  __ lw(result,
86        FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
87  // Load the InternalArray function from the global context.
88  __ lw(result,
89         MemOperand(result,
90                    Context::SlotOffset(
91                        Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
92}
93
94
95// Load the built-in Array function from the current context.
96static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
97  // Load the global context.
98
99  __ lw(result, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
100  __ lw(result,
101        FieldMemOperand(result, GlobalObject::kGlobalContextOffset));
102  // Load the Array function from the global context.
103  __ lw(result,
104        MemOperand(result,
105                   Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
106}
107
108
109// Allocate an empty JSArray. The allocated array is put into the result
110// register. An elements backing store is allocated with size initial_capacity
111// and filled with the hole values.
112static void AllocateEmptyJSArray(MacroAssembler* masm,
113                                 Register array_function,
114                                 Register result,
115                                 Register scratch1,
116                                 Register scratch2,
117                                 Register scratch3,
118                                 Label* gc_required) {
119  const int initial_capacity = JSArray::kPreallocatedArrayElements;
120  STATIC_ASSERT(initial_capacity >= 0);
121  __ LoadInitialArrayMap(array_function, scratch2, scratch1);
122
123  // Allocate the JSArray object together with space for a fixed array with the
124  // requested elements.
125  int size = JSArray::kSize;
126  if (initial_capacity > 0) {
127    size += FixedArray::SizeFor(initial_capacity);
128  }
129  __ AllocateInNewSpace(size,
130                        result,
131                        scratch2,
132                        scratch3,
133                        gc_required,
134                        TAG_OBJECT);
135  // Allocated the JSArray. Now initialize the fields except for the elements
136  // array.
137  // result: JSObject
138  // scratch1: initial map
139  // scratch2: start of next object
140  __ sw(scratch1, FieldMemOperand(result, JSObject::kMapOffset));
141  __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
142  __ sw(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
143  // Field JSArray::kElementsOffset is initialized later.
144  __ mov(scratch3,  zero_reg);
145  __ sw(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
146
147  if (initial_capacity == 0) {
148    __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
149    return;
150  }
151
152  // Calculate the location of the elements array and set elements array member
153  // of the JSArray.
154  // result: JSObject
155  // scratch2: start of next object
156  __ Addu(scratch1, result, Operand(JSArray::kSize));
157  __ sw(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
158
159  // Clear the heap tag on the elements array.
160  __ And(scratch1, scratch1, Operand(~kHeapObjectTagMask));
161
162  // Initialize the FixedArray and fill it with holes. FixedArray length is
163  // stored as a smi.
164  // result: JSObject
165  // scratch1: elements array (untagged)
166  // scratch2: start of next object
167  __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
168  STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset);
169  __ sw(scratch3, MemOperand(scratch1));
170  __ Addu(scratch1, scratch1, kPointerSize);
171  __ li(scratch3,  Operand(Smi::FromInt(initial_capacity)));
172  STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset);
173  __ sw(scratch3, MemOperand(scratch1));
174  __ Addu(scratch1, scratch1, kPointerSize);
175
176  // Fill the FixedArray with the hole value. Inline the code if short.
177  STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize);
178  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
179  static const int kLoopUnfoldLimit = 4;
180  if (initial_capacity <= kLoopUnfoldLimit) {
181    for (int i = 0; i < initial_capacity; i++) {
182      __ sw(scratch3, MemOperand(scratch1, i * kPointerSize));
183    }
184  } else {
185    Label loop, entry;
186    __ Addu(scratch2, scratch1, Operand(initial_capacity * kPointerSize));
187    __ Branch(&entry);
188    __ bind(&loop);
189    __ sw(scratch3, MemOperand(scratch1));
190    __ Addu(scratch1, scratch1, kPointerSize);
191    __ bind(&entry);
192    __ Branch(&loop, lt, scratch1, Operand(scratch2));
193  }
194}
195
196
197// Allocate a JSArray with the number of elements stored in a register. The
198// register array_function holds the built-in Array function and the register
199// array_size holds the size of the array as a smi. The allocated array is put
200// into the result register and beginning and end of the FixedArray elements
201// storage is put into registers elements_array_storage and elements_array_end
202// (see  below for when that is not the case). If the parameter fill_with_holes
203// is true the allocated elements backing store is filled with the hole values
204// otherwise it is left uninitialized. When the backing store is filled the
205// register elements_array_storage is scratched.
206static void AllocateJSArray(MacroAssembler* masm,
207                            Register array_function,  // Array function.
208                            Register array_size,  // As a smi, cannot be 0.
209                            Register result,
210                            Register elements_array_storage,
211                            Register elements_array_end,
212                            Register scratch1,
213                            Register scratch2,
214                            bool fill_with_hole,
215                            Label* gc_required) {
216  // Load the initial map from the array function.
217  __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
218
219  if (FLAG_debug_code) {  // Assert that array size is not zero.
220    __ Assert(
221        ne, "array size is unexpectedly 0", array_size, Operand(zero_reg));
222  }
223
224  // Allocate the JSArray object together with space for a FixedArray with the
225  // requested number of elements.
226  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
227  __ li(elements_array_end,
228        (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize);
229  __ sra(scratch1, array_size, kSmiTagSize);
230  __ Addu(elements_array_end, elements_array_end, scratch1);
231  __ AllocateInNewSpace(
232      elements_array_end,
233      result,
234      scratch1,
235      scratch2,
236      gc_required,
237      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
238
239  // Allocated the JSArray. Now initialize the fields except for the elements
240  // array.
241  // result: JSObject
242  // elements_array_storage: initial map
243  // array_size: size of array (smi)
244  __ sw(elements_array_storage, FieldMemOperand(result, JSObject::kMapOffset));
245  __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex);
246  __ sw(elements_array_storage,
247         FieldMemOperand(result, JSArray::kPropertiesOffset));
248  // Field JSArray::kElementsOffset is initialized later.
249  __ sw(array_size, FieldMemOperand(result, JSArray::kLengthOffset));
250
251  // Calculate the location of the elements array and set elements array member
252  // of the JSArray.
253  // result: JSObject
254  // array_size: size of array (smi)
255  __ Addu(elements_array_storage, result, Operand(JSArray::kSize));
256  __ sw(elements_array_storage,
257         FieldMemOperand(result, JSArray::kElementsOffset));
258
259  // Clear the heap tag on the elements array.
260  __ And(elements_array_storage,
261          elements_array_storage,
262          Operand(~kHeapObjectTagMask));
263  // Initialize the fixed array and fill it with holes. FixedArray length is
264  // stored as a smi.
265  // result: JSObject
266  // elements_array_storage: elements array (untagged)
267  // array_size: size of array (smi)
268  __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
269  ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
270  __ sw(scratch1, MemOperand(elements_array_storage));
271  __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
272
273  // Length of the FixedArray is the number of pre-allocated elements if
274  // the actual JSArray has length 0 and the size of the JSArray for non-empty
275  // JSArrays. The length of a FixedArray is stored as a smi.
276  STATIC_ASSERT(kSmiTag == 0);
277
278  ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
279  __ sw(array_size, MemOperand(elements_array_storage));
280  __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
281
282  // Calculate elements array and elements array end.
283  // result: JSObject
284  // elements_array_storage: elements array element storage
285  // array_size: smi-tagged size of elements array
286  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
287  __ sll(elements_array_end, array_size, kPointerSizeLog2 - kSmiTagSize);
288  __ Addu(elements_array_end, elements_array_storage, elements_array_end);
289
290  // Fill the allocated FixedArray with the hole value if requested.
291  // result: JSObject
292  // elements_array_storage: elements array element storage
293  // elements_array_end: start of next object
294  if (fill_with_hole) {
295    Label loop, entry;
296    __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex);
297    __ Branch(&entry);
298    __ bind(&loop);
299    __ sw(scratch1, MemOperand(elements_array_storage));
300    __ Addu(elements_array_storage, elements_array_storage, kPointerSize);
301
302    __ bind(&entry);
303    __ Branch(&loop, lt, elements_array_storage, Operand(elements_array_end));
304  }
305}
306
307
308// Create a new array for the built-in Array function. This function allocates
309// the JSArray object and the FixedArray elements array and initializes these.
310// If the Array cannot be constructed in native code the runtime is called. This
311// function assumes the following state:
312//   a0: argc
313//   a1: constructor (built-in Array function)
314//   ra: return address
315//   sp[0]: last argument
316// This function is used for both construct and normal calls of Array. The only
317// difference between handling a construct call and a normal call is that for a
318// construct call the constructor function in a1 needs to be preserved for
319// entering the generic code. In both cases argc in a0 needs to be preserved.
320// Both registers are preserved by this code so no need to differentiate between
321// construct call and normal call.
322static void ArrayNativeCode(MacroAssembler* masm,
323                            Label* call_generic_code) {
324  Counters* counters = masm->isolate()->counters();
325  Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array,
326      has_non_smi_element, finish, cant_transition_map, not_double;
327
328  // Check for array construction with zero arguments or one.
329  __ Branch(&argc_one_or_more, ne, a0, Operand(zero_reg));
330  // Handle construction of an empty array.
331  __ bind(&empty_array);
332  AllocateEmptyJSArray(masm,
333                       a1,
334                       a2,
335                       a3,
336                       t0,
337                       t1,
338                       call_generic_code);
339  __ IncrementCounter(counters->array_function_native(), 1, a3, t0);
340  // Set up return value, remove receiver from stack and return.
341  __ mov(v0, a2);
342  __ Addu(sp, sp, Operand(kPointerSize));
343  __ Ret();
344
345  // Check for one argument. Bail out if argument is not smi or if it is
346  // negative.
347  __ bind(&argc_one_or_more);
348  __ Branch(&argc_two_or_more, ne, a0, Operand(1));
349
350  STATIC_ASSERT(kSmiTag == 0);
351  __ lw(a2, MemOperand(sp));  // Get the argument from the stack.
352  __ Branch(&not_empty_array, ne, a2, Operand(zero_reg));
353  __ Drop(1);  // Adjust stack.
354  __ mov(a0, zero_reg);  // Treat this as a call with argc of zero.
355  __ Branch(&empty_array);
356
357  __ bind(&not_empty_array);
358  __ And(a3, a2, Operand(kIntptrSignBit | kSmiTagMask));
359  __ Branch(call_generic_code, eq, a3, Operand(zero_reg));
360
361  // Handle construction of an empty array of a certain size. Bail out if size
362  // is too large to actually allocate an elements array.
363  STATIC_ASSERT(kSmiTag == 0);
364  __ Branch(call_generic_code, Ugreater_equal, a2,
365            Operand(JSObject::kInitialMaxFastElementArray << kSmiTagSize));
366
367  // a0: argc
368  // a1: constructor
369  // a2: array_size (smi)
370  // sp[0]: argument
371  AllocateJSArray(masm,
372                  a1,
373                  a2,
374                  a3,
375                  t0,
376                  t1,
377                  t2,
378                  t3,
379                  true,
380                  call_generic_code);
381  __ IncrementCounter(counters->array_function_native(), 1, a2, t0);
382
383  // Set up return value, remove receiver and argument from stack and return.
384  __ mov(v0, a3);
385  __ Addu(sp, sp, Operand(2 * kPointerSize));
386  __ Ret();
387
388  // Handle construction of an array from a list of arguments.
389  __ bind(&argc_two_or_more);
390  __ sll(a2, a0, kSmiTagSize);  // Convert argc to a smi.
391
392  // a0: argc
393  // a1: constructor
394  // a2: array_size (smi)
395  // sp[0]: last argument
396  AllocateJSArray(masm,
397                  a1,
398                  a2,
399                  a3,
400                  t0,
401                  t1,
402                  t2,
403                  t3,
404                  false,
405                  call_generic_code);
406  __ IncrementCounter(counters->array_function_native(), 1, a2, t2);
407
408  // Fill arguments as array elements. Copy from the top of the stack (last
409  // element) to the array backing store filling it backwards. Note:
410  // elements_array_end points after the backing store.
411  // a0: argc
412  // a3: JSArray
413  // t0: elements_array storage start (untagged)
414  // t1: elements_array_end (untagged)
415  // sp[0]: last argument
416
417  Label loop, entry;
418  __ Branch(USE_DELAY_SLOT, &entry);
419  __ mov(t3, sp);
420  __ bind(&loop);
421  __ lw(a2, MemOperand(t3));
422  if (FLAG_smi_only_arrays) {
423    __ JumpIfNotSmi(a2, &has_non_smi_element);
424  }
425  __ Addu(t3, t3, kPointerSize);
426  __ Addu(t1, t1, -kPointerSize);
427  __ sw(a2, MemOperand(t1));
428  __ bind(&entry);
429  __ Branch(&loop, lt, t0, Operand(t1));
430
431  __ bind(&finish);
432  __ mov(sp, t3);
433
434  // Remove caller arguments and receiver from the stack, setup return value and
435  // return.
436  // a0: argc
437  // a3: JSArray
438  // sp[0]: receiver
439  __ Addu(sp, sp, Operand(kPointerSize));
440  __ mov(v0, a3);
441  __ Ret();
442
443  __ bind(&has_non_smi_element);
444  // Double values are handled by the runtime.
445  __ CheckMap(
446      a2, t5, Heap::kHeapNumberMapRootIndex, &not_double, DONT_DO_SMI_CHECK);
447  __ bind(&cant_transition_map);
448  __ UndoAllocationInNewSpace(a3, t0);
449  __ Branch(call_generic_code);
450
451  __ bind(&not_double);
452  // Transition FAST_SMI_ONLY_ELEMENTS to FAST_ELEMENTS.
453  // a3: JSArray
454  __ lw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
455  __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
456                                         FAST_ELEMENTS,
457                                         a2,
458                                         t5,
459                                         &cant_transition_map);
460  __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
461  __ RecordWriteField(a3,
462                      HeapObject::kMapOffset,
463                      a2,
464                      t5,
465                      kRAHasNotBeenSaved,
466                      kDontSaveFPRegs,
467                      EMIT_REMEMBERED_SET,
468                      OMIT_SMI_CHECK);
469  Label loop2;
470  __ bind(&loop2);
471  __ lw(a2, MemOperand(t3));
472  __ Addu(t3, t3, kPointerSize);
473  __ Subu(t1, t1, kPointerSize);
474  __ sw(a2, MemOperand(t1));
475  __ Branch(&loop2, lt, t0, Operand(t1));
476  __ Branch(&finish);
477}
478
479
480void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
481  // ----------- S t a t e -------------
482  //  -- a0     : number of arguments
483  //  -- ra     : return address
484  //  -- sp[...]: constructor arguments
485  // -----------------------------------
486  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
487
488  // Get the InternalArray function.
489  GenerateLoadInternalArrayFunction(masm, a1);
490
491  if (FLAG_debug_code) {
492    // Initial map for the builtin InternalArray functions should be maps.
493    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
494    __ And(t0, a2, Operand(kSmiTagMask));
495    __ Assert(ne, "Unexpected initial map for InternalArray function",
496              t0, Operand(zero_reg));
497    __ GetObjectType(a2, a3, t0);
498    __ Assert(eq, "Unexpected initial map for InternalArray function",
499              t0, Operand(MAP_TYPE));
500  }
501
502  // Run the native code for the InternalArray function called as a normal
503  // function.
504  ArrayNativeCode(masm, &generic_array_code);
505
506  // Jump to the generic array code if the specialized code cannot handle the
507  // construction.
508  __ bind(&generic_array_code);
509
510  Handle<Code> array_code =
511      masm->isolate()->builtins()->InternalArrayCodeGeneric();
512  __ Jump(array_code, RelocInfo::CODE_TARGET);
513}
514
515
516void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
517  // ----------- S t a t e -------------
518  //  -- a0     : number of arguments
519  //  -- ra     : return address
520  //  -- sp[...]: constructor arguments
521  // -----------------------------------
522  Label generic_array_code;
523
524  // Get the Array function.
525  GenerateLoadArrayFunction(masm, a1);
526
527  if (FLAG_debug_code) {
528    // Initial map for the builtin Array functions should be maps.
529    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
530    __ And(t0, a2, Operand(kSmiTagMask));
531    __ Assert(ne, "Unexpected initial map for Array function (1)",
532              t0, Operand(zero_reg));
533    __ GetObjectType(a2, a3, t0);
534    __ Assert(eq, "Unexpected initial map for Array function (2)",
535              t0, Operand(MAP_TYPE));
536  }
537
538  // Run the native code for the Array function called as a normal function.
539  ArrayNativeCode(masm, &generic_array_code);
540
541  // Jump to the generic array code if the specialized code cannot handle
542  // the construction.
543  __ bind(&generic_array_code);
544
545  Handle<Code> array_code =
546      masm->isolate()->builtins()->ArrayCodeGeneric();
547  __ Jump(array_code, RelocInfo::CODE_TARGET);
548}
549
550
551void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
552  // ----------- S t a t e -------------
553  //  -- a0     : number of arguments
554  //  -- a1     : constructor function
555  //  -- ra     : return address
556  //  -- sp[...]: constructor arguments
557  // -----------------------------------
558  Label generic_constructor;
559
560  if (FLAG_debug_code) {
561    // The array construct code is only set for the builtin and internal
562    // Array functions which always have a map.
563    // Initial map for the builtin Array function should be a map.
564    __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
565    __ And(t0, a2, Operand(kSmiTagMask));
566    __ Assert(ne, "Unexpected initial map for Array function (3)",
567              t0, Operand(zero_reg));
568    __ GetObjectType(a2, a3, t0);
569    __ Assert(eq, "Unexpected initial map for Array function (4)",
570              t0, Operand(MAP_TYPE));
571  }
572
573  // Run the native code for the Array function called as a constructor.
574  ArrayNativeCode(masm, &generic_constructor);
575
576  // Jump to the generic construct code in case the specialized code cannot
577  // handle the construction.
578  __ bind(&generic_constructor);
579
580  Handle<Code> generic_construct_stub =
581      masm->isolate()->builtins()->JSConstructStubGeneric();
582  __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
583}
584
585
586void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
587  // ----------- S t a t e -------------
588  //  -- a0                     : number of arguments
589  //  -- a1                     : constructor function
590  //  -- ra                     : return address
591  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
592  //  -- sp[argc * 4]           : receiver
593  // -----------------------------------
594  Counters* counters = masm->isolate()->counters();
595  __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3);
596
597  Register function = a1;
598  if (FLAG_debug_code) {
599    __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2);
600    __ Assert(eq, "Unexpected String function", function, Operand(a2));
601  }
602
603  // Load the first arguments in a0 and get rid of the rest.
604  Label no_arguments;
605  __ Branch(&no_arguments, eq, a0, Operand(zero_reg));
606  // First args = sp[(argc - 1) * 4].
607  __ Subu(a0, a0, Operand(1));
608  __ sll(a0, a0, kPointerSizeLog2);
609  __ Addu(sp, a0, sp);
610  __ lw(a0, MemOperand(sp));
611  // sp now point to args[0], drop args[0] + receiver.
612  __ Drop(2);
613
614  Register argument = a2;
615  Label not_cached, argument_is_string;
616  NumberToStringStub::GenerateLookupNumberStringCache(
617      masm,
618      a0,        // Input.
619      argument,  // Result.
620      a3,        // Scratch.
621      t0,        // Scratch.
622      t1,        // Scratch.
623      false,     // Is it a Smi?
624      &not_cached);
625  __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0);
626  __ bind(&argument_is_string);
627
628  // ----------- S t a t e -------------
629  //  -- a2     : argument converted to string
630  //  -- a1     : constructor function
631  //  -- ra     : return address
632  // -----------------------------------
633
634  Label gc_required;
635  __ AllocateInNewSpace(JSValue::kSize,
636                        v0,  // Result.
637                        a3,  // Scratch.
638                        t0,  // Scratch.
639                        &gc_required,
640                        TAG_OBJECT);
641
642  // Initialising the String Object.
643  Register map = a3;
644  __ LoadGlobalFunctionInitialMap(function, map, t0);
645  if (FLAG_debug_code) {
646    __ lbu(t0, FieldMemOperand(map, Map::kInstanceSizeOffset));
647    __ Assert(eq, "Unexpected string wrapper instance size",
648        t0, Operand(JSValue::kSize >> kPointerSizeLog2));
649    __ lbu(t0, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
650    __ Assert(eq, "Unexpected unused properties of string wrapper",
651        t0, Operand(zero_reg));
652  }
653  __ sw(map, FieldMemOperand(v0, HeapObject::kMapOffset));
654
655  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
656  __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
657  __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
658
659  __ sw(argument, FieldMemOperand(v0, JSValue::kValueOffset));
660
661  // Ensure the object is fully initialized.
662  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
663
664  __ Ret();
665
666  // The argument was not found in the number to string cache. Check
667  // if it's a string already before calling the conversion builtin.
668  Label convert_argument;
669  __ bind(&not_cached);
670  __ JumpIfSmi(a0, &convert_argument);
671
672  // Is it a String?
673  __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
674  __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
675  STATIC_ASSERT(kNotStringTag != 0);
676  __ And(t0, a3, Operand(kIsNotStringMask));
677  __ Branch(&convert_argument, ne, t0, Operand(zero_reg));
678  __ mov(argument, a0);
679  __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
680  __ Branch(&argument_is_string);
681
682  // Invoke the conversion builtin and put the result into a2.
683  __ bind(&convert_argument);
684  __ push(function);  // Preserve the function.
685  __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
686  {
687    FrameScope scope(masm, StackFrame::INTERNAL);
688    __ push(v0);
689    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
690  }
691  __ pop(function);
692  __ mov(argument, v0);
693  __ Branch(&argument_is_string);
694
695  // Load the empty string into a2, remove the receiver from the
696  // stack, and jump back to the case where the argument is a string.
697  __ bind(&no_arguments);
698  __ LoadRoot(argument, Heap::kEmptyStringRootIndex);
699  __ Drop(1);
700  __ Branch(&argument_is_string);
701
702  // At this point the argument is already a string. Call runtime to
703  // create a string wrapper.
704  __ bind(&gc_required);
705  __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
706  {
707    FrameScope scope(masm, StackFrame::INTERNAL);
708    __ push(argument);
709    __ CallRuntime(Runtime::kNewStringWrapper, 1);
710  }
711  __ Ret();
712}
713
714
715static void Generate_JSConstructStubHelper(MacroAssembler* masm,
716                                           bool is_api_function,
717                                           bool count_constructions) {
718  // ----------- S t a t e -------------
719  //  -- a0     : number of arguments
720  //  -- a1     : constructor function
721  //  -- ra     : return address
722  //  -- sp[...]: constructor arguments
723  // -----------------------------------
724
725  // Should never count constructions for api objects.
726  ASSERT(!is_api_function || !count_constructions);
727
728  Isolate* isolate = masm->isolate();
729
730  // ----------- S t a t e -------------
731  //  -- a0     : number of arguments
732  //  -- a1     : constructor function
733  //  -- ra     : return address
734  //  -- sp[...]: constructor arguments
735  // -----------------------------------
736
737  // Enter a construct frame.
738  {
739    FrameScope scope(masm, StackFrame::CONSTRUCT);
740
741    // Preserve the two incoming parameters on the stack.
742    __ sll(a0, a0, kSmiTagSize);  // Tag arguments count.
743    __ MultiPushReversed(a0.bit() | a1.bit());
744
745    // Use t7 to hold undefined, which is used in several places below.
746    __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
747
748    Label rt_call, allocated;
749    // Try to allocate the object without transitioning into C code. If any of
750    // the preconditions is not met, the code bails out to the runtime call.
751    if (FLAG_inline_new) {
752      Label undo_allocation;
753#ifdef ENABLE_DEBUGGER_SUPPORT
754      ExternalReference debug_step_in_fp =
755          ExternalReference::debug_step_in_fp_address(isolate);
756      __ li(a2, Operand(debug_step_in_fp));
757      __ lw(a2, MemOperand(a2));
758      __ Branch(&rt_call, ne, a2, Operand(zero_reg));
759#endif
760
761      // Load the initial map and verify that it is in fact a map.
762      // a1: constructor function
763      __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
764      __ JumpIfSmi(a2, &rt_call);
765      __ GetObjectType(a2, a3, t4);
766      __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE));
767
768      // Check that the constructor is not constructing a JSFunction (see
769      // comments in Runtime_NewObject in runtime.cc). In which case the
770      // initial map's instance type would be JS_FUNCTION_TYPE.
771      // a1: constructor function
772      // a2: initial map
773      __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
774      __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
775
776      if (count_constructions) {
777        Label allocate;
778        // Decrease generous allocation count.
779        __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
780        MemOperand constructor_count =
781           FieldMemOperand(a3, SharedFunctionInfo::kConstructionCountOffset);
782        __ lbu(t0, constructor_count);
783        __ Subu(t0, t0, Operand(1));
784        __ sb(t0, constructor_count);
785        __ Branch(&allocate, ne, t0, Operand(zero_reg));
786
787        __ Push(a1, a2);
788
789        __ push(a1);  // Constructor.
790        // The call will replace the stub, so the countdown is only done once.
791        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
792
793        __ pop(a2);
794        __ pop(a1);
795
796        __ bind(&allocate);
797      }
798
799      // Now allocate the JSObject on the heap.
800      // a1: constructor function
801      // a2: initial map
802      __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
803      __ AllocateInNewSpace(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS);
804
805      // Allocated the JSObject, now initialize the fields. Map is set to
806      // initial map and properties and elements are set to empty fixed array.
807      // a1: constructor function
808      // a2: initial map
809      // a3: object size
810      // t4: JSObject (not tagged)
811      __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
812      __ mov(t5, t4);
813      __ sw(a2, MemOperand(t5, JSObject::kMapOffset));
814      __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
815      __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
816      __ Addu(t5, t5, Operand(3*kPointerSize));
817      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
818      ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
819      ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
820
821      // Fill all the in-object properties with appropriate filler.
822      // a1: constructor function
823      // a2: initial map
824      // a3: object size (in words)
825      // t4: JSObject (not tagged)
826      // t5: First in-object property of JSObject (not tagged)
827      __ sll(t0, a3, kPointerSizeLog2);
828      __ addu(t6, t4, t0);   // End of object.
829      ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
830      __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
831      if (count_constructions) {
832        __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
833        __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
834                kBitsPerByte);
835        __ sll(t0, a0, kPointerSizeLog2);
836        __ addu(a0, t5, t0);
837        // a0: offset of first field after pre-allocated fields
838        if (FLAG_debug_code) {
839          __ Assert(le, "Unexpected number of pre-allocated property fields.",
840              a0, Operand(t6));
841        }
842        __ InitializeFieldsWithFiller(t5, a0, t7);
843        // To allow for truncation.
844        __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex);
845      }
846      __ InitializeFieldsWithFiller(t5, t6, t7);
847
848      // Add the object tag to make the JSObject real, so that we can continue
849      // and jump into the continuation code at any time from now on. Any
850      // failures need to undo the allocation, so that the heap is in a
851      // consistent state and verifiable.
852      __ Addu(t4, t4, Operand(kHeapObjectTag));
853
854      // Check if a non-empty properties array is needed. Continue with
855      // allocated object if not fall through to runtime call if it is.
856      // a1: constructor function
857      // t4: JSObject
858      // t5: start of next object (not tagged)
859      __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
860      // The field instance sizes contains both pre-allocated property fields
861      // and in-object properties.
862      __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
863      __ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
864             kBitsPerByte);
865      __ Addu(a3, a3, Operand(t6));
866      __ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
867              kBitsPerByte);
868      __ subu(a3, a3, t6);
869
870      // Done if no extra properties are to be allocated.
871      __ Branch(&allocated, eq, a3, Operand(zero_reg));
872      __ Assert(greater_equal, "Property allocation count failed.",
873          a3, Operand(zero_reg));
874
875      // Scale the number of elements by pointer size and add the header for
876      // FixedArrays to the start of the next object calculation from above.
877      // a1: constructor
878      // a3: number of elements in properties array
879      // t4: JSObject
880      // t5: start of next object
881      __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
882      __ AllocateInNewSpace(
883          a0,
884          t5,
885          t6,
886          a2,
887          &undo_allocation,
888          static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
889
890      // Initialize the FixedArray.
891      // a1: constructor
892      // a3: number of elements in properties array (untagged)
893      // t4: JSObject
894      // t5: start of next object
895      __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex);
896      __ mov(a2, t5);
897      __ sw(t6, MemOperand(a2, JSObject::kMapOffset));
898      __ sll(a0, a3, kSmiTagSize);
899      __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
900      __ Addu(a2, a2, Operand(2 * kPointerSize));
901
902      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
903      ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
904
905      // Initialize the fields to undefined.
906      // a1: constructor
907      // a2: First element of FixedArray (not tagged)
908      // a3: number of elements in properties array
909      // t4: JSObject
910      // t5: FixedArray (not tagged)
911      __ sll(t3, a3, kPointerSizeLog2);
912      __ addu(t6, a2, t3);  // End of object.
913      ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
914      { Label loop, entry;
915        if (count_constructions) {
916          __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
917        } else if (FLAG_debug_code) {
918          __ LoadRoot(t8, Heap::kUndefinedValueRootIndex);
919          __ Assert(eq, "Undefined value not loaded.", t7, Operand(t8));
920        }
921        __ jmp(&entry);
922        __ bind(&loop);
923        __ sw(t7, MemOperand(a2));
924        __ addiu(a2, a2, kPointerSize);
925        __ bind(&entry);
926        __ Branch(&loop, less, a2, Operand(t6));
927      }
928
929      // Store the initialized FixedArray into the properties field of
930      // the JSObject.
931      // a1: constructor function
932      // t4: JSObject
933      // t5: FixedArray (not tagged)
934      __ Addu(t5, t5, Operand(kHeapObjectTag));  // Add the heap tag.
935      __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset));
936
937      // Continue with JSObject being successfully allocated.
938      // a1: constructor function
939      // a4: JSObject
940      __ jmp(&allocated);
941
942      // Undo the setting of the new top so that the heap is verifiable. For
943      // example, the map's unused properties potentially do not match the
944      // allocated objects unused properties.
945      // t4: JSObject (previous new top)
946      __ bind(&undo_allocation);
947      __ UndoAllocationInNewSpace(t4, t5);
948    }
949
950    __ bind(&rt_call);
951    // Allocate the new receiver object using the runtime call.
952    // a1: constructor function
953    __ push(a1);  // Argument for Runtime_NewObject.
954    __ CallRuntime(Runtime::kNewObject, 1);
955    __ mov(t4, v0);
956
957    // Receiver for constructor call allocated.
958    // t4: JSObject
959    __ bind(&allocated);
960    __ push(t4);
961    __ push(t4);
962
963    // Reload the number of arguments from the stack.
964    // sp[0]: receiver
965    // sp[1]: receiver
966    // sp[2]: constructor function
967    // sp[3]: number of arguments (smi-tagged)
968    __ lw(a1, MemOperand(sp, 2 * kPointerSize));
969    __ lw(a3, MemOperand(sp, 3 * kPointerSize));
970
971    // Set up pointer to last argument.
972    __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
973
974    // Set up number of arguments for function call below.
975    __ srl(a0, a3, kSmiTagSize);
976
977    // Copy arguments and receiver to the expression stack.
978    // a0: number of arguments
979    // a1: constructor function
980    // a2: address of last argument (caller sp)
981    // a3: number of arguments (smi-tagged)
982    // sp[0]: receiver
983    // sp[1]: receiver
984    // sp[2]: constructor function
985    // sp[3]: number of arguments (smi-tagged)
986    Label loop, entry;
987    __ jmp(&entry);
988    __ bind(&loop);
989    __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
990    __ Addu(t0, a2, Operand(t0));
991    __ lw(t1, MemOperand(t0));
992    __ push(t1);
993    __ bind(&entry);
994    __ Addu(a3, a3, Operand(-2));
995    __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
996
997    // Call the function.
998    // a0: number of arguments
999    // a1: constructor function
1000    if (is_api_function) {
1001      __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
1002      Handle<Code> code =
1003          masm->isolate()->builtins()->HandleApiCallConstruct();
1004      ParameterCount expected(0);
1005      __ InvokeCode(code, expected, expected,
1006                    RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
1007    } else {
1008      ParameterCount actual(a0);
1009      __ InvokeFunction(a1, actual, CALL_FUNCTION,
1010                        NullCallWrapper(), CALL_AS_METHOD);
1011    }
1012
1013    // Store offset of return address for deoptimizer.
1014    if (!is_api_function && !count_constructions) {
1015      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
1016    }
1017
1018    // Restore context from the frame.
1019    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1020
1021    // If the result is an object (in the ECMA sense), we should get rid
1022    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
1023    // on page 74.
1024    Label use_receiver, exit;
1025
1026    // If the result is a smi, it is *not* an object in the ECMA sense.
1027    // v0: result
1028    // sp[0]: receiver (newly allocated object)
1029    // sp[1]: constructor function
1030    // sp[2]: number of arguments (smi-tagged)
1031    __ JumpIfSmi(v0, &use_receiver);
1032
1033    // If the type of the result (stored in its map) is less than
1034    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
1035    __ GetObjectType(v0, a3, a3);
1036    __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1037
1038    // Throw away the result of the constructor invocation and use the
1039    // on-stack receiver as the result.
1040    __ bind(&use_receiver);
1041    __ lw(v0, MemOperand(sp));
1042
1043    // Remove receiver from the stack, remove caller arguments, and
1044    // return.
1045    __ bind(&exit);
1046    // v0: result
1047    // sp[0]: receiver (newly allocated object)
1048    // sp[1]: constructor function
1049    // sp[2]: number of arguments (smi-tagged)
1050    __ lw(a1, MemOperand(sp, 2 * kPointerSize));
1051
1052    // Leave construct frame.
1053  }
1054
1055  __ sll(t0, a1, kPointerSizeLog2 - 1);
1056  __ Addu(sp, sp, t0);
1057  __ Addu(sp, sp, kPointerSize);
1058  __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
1059  __ Ret();
1060}
1061
1062
1063void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
1064  Generate_JSConstructStubHelper(masm, false, true);
1065}
1066
1067
1068void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
1069  Generate_JSConstructStubHelper(masm, false, false);
1070}
1071
1072
1073void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
1074  Generate_JSConstructStubHelper(masm, true, false);
1075}
1076
1077
1078static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
1079                                             bool is_construct) {
1080  // Called from JSEntryStub::GenerateBody
1081
1082  // ----------- S t a t e -------------
1083  //  -- a0: code entry
1084  //  -- a1: function
1085  //  -- a2: receiver_pointer
1086  //  -- a3: argc
1087  //  -- s0: argv
1088  // -----------------------------------
1089
1090  // Clear the context before we push it when entering the JS frame.
1091  __ mov(cp, zero_reg);
1092
1093  // Enter an internal frame.
1094  {
1095    FrameScope scope(masm, StackFrame::INTERNAL);
1096
1097    // Set up the context from the function argument.
1098    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
1099
1100    // Push the function and the receiver onto the stack.
1101    __ Push(a1, a2);
1102
1103    // Copy arguments to the stack in a loop.
1104    // a3: argc
1105    // s0: argv, i.e. points to first arg
1106    Label loop, entry;
1107    __ sll(t0, a3, kPointerSizeLog2);
1108    __ addu(t2, s0, t0);
1109    __ b(&entry);
1110    __ nop();   // Branch delay slot nop.
1111    // t2 points past last arg.
1112    __ bind(&loop);
1113    __ lw(t0, MemOperand(s0));  // Read next parameter.
1114    __ addiu(s0, s0, kPointerSize);
1115    __ lw(t0, MemOperand(t0));  // Dereference handle.
1116    __ push(t0);  // Push parameter.
1117    __ bind(&entry);
1118    __ Branch(&loop, ne, s0, Operand(t2));
1119
1120    // Initialize all JavaScript callee-saved registers, since they will be seen
1121    // by the garbage collector as part of handlers.
1122    __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
1123    __ mov(s1, t0);
1124    __ mov(s2, t0);
1125    __ mov(s3, t0);
1126    __ mov(s4, t0);
1127    __ mov(s5, t0);
1128    // s6 holds the root address. Do not clobber.
1129    // s7 is cp. Do not init.
1130
1131    // Invoke the code and pass argc as a0.
1132    __ mov(a0, a3);
1133    if (is_construct) {
1134      CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
1135      __ CallStub(&stub);
1136    } else {
1137      ParameterCount actual(a0);
1138      __ InvokeFunction(a1, actual, CALL_FUNCTION,
1139                        NullCallWrapper(), CALL_AS_METHOD);
1140    }
1141
1142    // Leave internal frame.
1143  }
1144
1145  __ Jump(ra);
1146}
1147
1148
1149void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
1150  Generate_JSEntryTrampolineHelper(masm, false);
1151}
1152
1153
1154void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
1155  Generate_JSEntryTrampolineHelper(masm, true);
1156}
1157
1158
1159void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
1160  // Enter an internal frame.
1161  {
1162    FrameScope scope(masm, StackFrame::INTERNAL);
1163
1164    // Preserve the function.
1165    __ push(a1);
1166    // Push call kind information.
1167    __ push(t1);
1168
1169    // Push the function on the stack as the argument to the runtime function.
1170    __ push(a1);
1171    // Call the runtime function.
1172    __ CallRuntime(Runtime::kLazyCompile, 1);
1173    // Calculate the entry point.
1174    __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
1175
1176    // Restore call kind information.
1177    __ pop(t1);
1178    // Restore saved function.
1179    __ pop(a1);
1180
1181    // Tear down temporary frame.
1182  }
1183
1184  // Do a tail-call of the compiled function.
1185  __ Jump(t9);
1186}
1187
1188
1189void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
1190  // Enter an internal frame.
1191  {
1192    FrameScope scope(masm, StackFrame::INTERNAL);
1193
1194    // Preserve the function.
1195    __ push(a1);
1196    // Push call kind information.
1197    __ push(t1);
1198
1199    // Push the function on the stack as the argument to the runtime function.
1200    __ push(a1);
1201    __ CallRuntime(Runtime::kLazyRecompile, 1);
1202    // Calculate the entry point.
1203    __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
1204
1205    // Restore call kind information.
1206    __ pop(t1);
1207    // Restore saved function.
1208    __ pop(a1);
1209
1210    // Tear down temporary frame.
1211  }
1212
1213  // Do a tail-call of the compiled function.
1214  __ Jump(t9);
1215}
1216
1217
1218static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
1219                                             Deoptimizer::BailoutType type) {
1220  {
1221    FrameScope scope(masm, StackFrame::INTERNAL);
1222    // Pass the function and deoptimization type to the runtime system.
1223    __ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
1224    __ push(a0);
1225    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
1226  }
1227
1228  // Get the full codegen state from the stack and untag it -> t2.
1229  __ lw(t2, MemOperand(sp, 0 * kPointerSize));
1230  __ SmiUntag(t2);
1231  // Switch on the state.
1232  Label with_tos_register, unknown_state;
1233  __ Branch(&with_tos_register,
1234            ne, t2, Operand(FullCodeGenerator::NO_REGISTERS));
1235  __ Addu(sp, sp, Operand(1 * kPointerSize));  // Remove state.
1236  __ Ret();
1237
1238  __ bind(&with_tos_register);
1239  __ lw(v0, MemOperand(sp, 1 * kPointerSize));
1240  __ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG));
1241
1242  __ Addu(sp, sp, Operand(2 * kPointerSize));  // Remove state.
1243  __ Ret();
1244
1245  __ bind(&unknown_state);
1246  __ stop("no cases left");
1247}
1248
1249
1250void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
1251  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
1252}
1253
1254
1255void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
1256  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
1257}
1258
1259
1260void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
1261  // For now, we are relying on the fact that Runtime::NotifyOSR
1262  // doesn't do any garbage collection which allows us to save/restore
1263  // the registers without worrying about which of them contain
1264  // pointers. This seems a bit fragile.
1265  RegList saved_regs =
1266      (kJSCallerSaved | kCalleeSaved | ra.bit() | fp.bit()) & ~sp.bit();
1267  __ MultiPush(saved_regs);
1268  {
1269    FrameScope scope(masm, StackFrame::INTERNAL);
1270    __ CallRuntime(Runtime::kNotifyOSR, 0);
1271  }
1272  __ MultiPop(saved_regs);
1273  __ Ret();
1274}
1275
1276
1277void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
1278  CpuFeatures::TryForceFeatureScope scope(VFP3);
1279  if (!CpuFeatures::IsSupported(FPU)) {
1280    __ Abort("Unreachable code: Cannot optimize without FPU support.");
1281    return;
1282  }
1283
1284  // Lookup the function in the JavaScript frame and push it as an
1285  // argument to the on-stack replacement function.
1286  __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1287  {
1288    FrameScope scope(masm, StackFrame::INTERNAL);
1289    __ push(a0);
1290    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
1291  }
1292
1293  // If the result was -1 it means that we couldn't optimize the
1294  // function. Just return and continue in the unoptimized version.
1295  __ Ret(eq, v0, Operand(Smi::FromInt(-1)));
1296
1297  // Untag the AST id and push it on the stack.
1298  __ SmiUntag(v0);
1299  __ push(v0);
1300
1301  // Generate the code for doing the frame-to-frame translation using
1302  // the deoptimizer infrastructure.
1303  Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR);
1304  generator.Generate();
1305}
1306
1307
1308void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
1309  // 1. Make sure we have at least one argument.
1310  // a0: actual number of arguments
1311  { Label done;
1312    __ Branch(&done, ne, a0, Operand(zero_reg));
1313    __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
1314    __ push(t2);
1315    __ Addu(a0, a0, Operand(1));
1316    __ bind(&done);
1317  }
1318
1319  // 2. Get the function to call (passed as receiver) from the stack, check
1320  //    if it is a function.
1321  // a0: actual number of arguments
1322  Label slow, non_function;
1323  __ sll(at, a0, kPointerSizeLog2);
1324  __ addu(at, sp, at);
1325  __ lw(a1, MemOperand(at));
1326  __ JumpIfSmi(a1, &non_function);
1327  __ GetObjectType(a1, a2, a2);
1328  __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
1329
1330  // 3a. Patch the first argument if necessary when calling a function.
1331  // a0: actual number of arguments
1332  // a1: function
1333  Label shift_arguments;
1334  __ li(t0, Operand(0, RelocInfo::NONE));  // Indicate regular JS_FUNCTION.
1335  { Label convert_to_object, use_global_receiver, patch_receiver;
1336    // Change context eagerly in case we need the global receiver.
1337    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
1338
1339    // Do not transform the receiver for strict mode functions.
1340    __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1341    __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
1342    __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
1343                                 kSmiTagSize)));
1344    __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
1345
1346    // Do not transform the receiver for native (Compilerhints already in a3).
1347    __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
1348    __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
1349
1350    // Compute the receiver in non-strict mode.
1351    // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
1352    __ sll(at, a0, kPointerSizeLog2);
1353    __ addu(a2, sp, at);
1354    __ lw(a2, MemOperand(a2, -kPointerSize));
1355    // a0: actual number of arguments
1356    // a1: function
1357    // a2: first argument
1358    __ JumpIfSmi(a2, &convert_to_object, t2);
1359
1360    __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
1361    __ Branch(&use_global_receiver, eq, a2, Operand(a3));
1362    __ LoadRoot(a3, Heap::kNullValueRootIndex);
1363    __ Branch(&use_global_receiver, eq, a2, Operand(a3));
1364
1365    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
1366    __ GetObjectType(a2, a3, a3);
1367    __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1368
1369    __ bind(&convert_to_object);
1370    // Enter an internal frame in order to preserve argument count.
1371    {
1372      FrameScope scope(masm, StackFrame::INTERNAL);
1373      __ sll(a0, a0, kSmiTagSize);  // Smi tagged.
1374      __ push(a0);
1375
1376      __ push(a2);
1377      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
1378      __ mov(a2, v0);
1379
1380      __ pop(a0);
1381      __ sra(a0, a0, kSmiTagSize);  // Un-tag.
1382      // Leave internal frame.
1383    }
1384    // Restore the function to a1, and the flag to t0.
1385    __ sll(at, a0, kPointerSizeLog2);
1386    __ addu(at, sp, at);
1387    __ lw(a1, MemOperand(at));
1388    __ li(t0, Operand(0, RelocInfo::NONE));
1389    __ Branch(&patch_receiver);
1390
1391    // Use the global receiver object from the called function as the
1392    // receiver.
1393    __ bind(&use_global_receiver);
1394    const int kGlobalIndex =
1395        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
1396    __ lw(a2, FieldMemOperand(cp, kGlobalIndex));
1397    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
1398    __ lw(a2, FieldMemOperand(a2, kGlobalIndex));
1399    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
1400
1401    __ bind(&patch_receiver);
1402    __ sll(at, a0, kPointerSizeLog2);
1403    __ addu(a3, sp, at);
1404    __ sw(a2, MemOperand(a3, -kPointerSize));
1405
1406    __ Branch(&shift_arguments);
1407  }
1408
1409  // 3b. Check for function proxy.
1410  __ bind(&slow);
1411  __ li(t0, Operand(1, RelocInfo::NONE));  // Indicate function proxy.
1412  __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
1413
1414  __ bind(&non_function);
1415  __ li(t0, Operand(2, RelocInfo::NONE));  // Indicate non-function.
1416
1417  // 3c. Patch the first argument when calling a non-function.  The
1418  //     CALL_NON_FUNCTION builtin expects the non-function callee as
1419  //     receiver, so overwrite the first argument which will ultimately
1420  //     become the receiver.
1421  // a0: actual number of arguments
1422  // a1: function
1423  // t0: call type (0: JS function, 1: function proxy, 2: non-function)
1424  __ sll(at, a0, kPointerSizeLog2);
1425  __ addu(a2, sp, at);
1426  __ sw(a1, MemOperand(a2, -kPointerSize));
1427
1428  // 4. Shift arguments and return address one slot down on the stack
1429  //    (overwriting the original receiver).  Adjust argument count to make
1430  //    the original first argument the new receiver.
1431  // a0: actual number of arguments
1432  // a1: function
1433  // t0: call type (0: JS function, 1: function proxy, 2: non-function)
1434  __ bind(&shift_arguments);
1435  { Label loop;
1436    // Calculate the copy start address (destination). Copy end address is sp.
1437    __ sll(at, a0, kPointerSizeLog2);
1438    __ addu(a2, sp, at);
1439
1440    __ bind(&loop);
1441    __ lw(at, MemOperand(a2, -kPointerSize));
1442    __ sw(at, MemOperand(a2));
1443    __ Subu(a2, a2, Operand(kPointerSize));
1444    __ Branch(&loop, ne, a2, Operand(sp));
1445    // Adjust the actual number of arguments and remove the top element
1446    // (which is a copy of the last argument).
1447    __ Subu(a0, a0, Operand(1));
1448    __ Pop();
1449  }
1450
1451  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
1452  //     or a function proxy via CALL_FUNCTION_PROXY.
1453  // a0: actual number of arguments
1454  // a1: function
1455  // t0: call type (0: JS function, 1: function proxy, 2: non-function)
1456  { Label function, non_proxy;
1457    __ Branch(&function, eq, t0, Operand(zero_reg));
1458    // Expected number of arguments is 0 for CALL_NON_FUNCTION.
1459    __ mov(a2, zero_reg);
1460    __ SetCallKind(t1, CALL_AS_METHOD);
1461    __ Branch(&non_proxy, ne, t0, Operand(1));
1462
1463    __ push(a1);  // Re-add proxy object as additional argument.
1464    __ Addu(a0, a0, Operand(1));
1465    __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
1466    __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
1467            RelocInfo::CODE_TARGET);
1468
1469    __ bind(&non_proxy);
1470    __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
1471    __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
1472            RelocInfo::CODE_TARGET);
1473    __ bind(&function);
1474  }
1475
1476  // 5b. Get the code to call from the function and check that the number of
1477  //     expected arguments matches what we're providing.  If so, jump
1478  //     (tail-call) to the code in register edx without checking arguments.
1479  // a0: actual number of arguments
1480  // a1: function
1481  __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1482  __ lw(a2,
1483         FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
1484  __ sra(a2, a2, kSmiTagSize);
1485  __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
1486  __ SetCallKind(t1, CALL_AS_METHOD);
1487  // Check formal and actual parameter counts.
1488  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
1489          RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
1490
1491  ParameterCount expected(0);
1492  __ InvokeCode(a3, expected, expected, JUMP_FUNCTION,
1493                NullCallWrapper(), CALL_AS_METHOD);
1494}
1495
1496
1497void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
1498  const int kIndexOffset    = -5 * kPointerSize;
1499  const int kLimitOffset    = -4 * kPointerSize;
1500  const int kArgsOffset     =  2 * kPointerSize;
1501  const int kRecvOffset     =  3 * kPointerSize;
1502  const int kFunctionOffset =  4 * kPointerSize;
1503
1504  {
1505    FrameScope frame_scope(masm, StackFrame::INTERNAL);
1506    __ lw(a0, MemOperand(fp, kFunctionOffset));  // Get the function.
1507    __ push(a0);
1508    __ lw(a0, MemOperand(fp, kArgsOffset));  // Get the args array.
1509    __ push(a0);
1510    // Returns (in v0) number of arguments to copy to stack as Smi.
1511    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
1512
1513    // Check the stack for overflow. We are not trying to catch
1514    // interruptions (e.g. debug break and preemption) here, so the "real stack
1515    // limit" is checked.
1516    Label okay;
1517    __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
1518    // Make a2 the space we have left. The stack might already be overflowed
1519    // here which will cause a2 to become negative.
1520    __ subu(a2, sp, a2);
1521    // Check if the arguments will overflow the stack.
1522    __ sll(t3, v0, kPointerSizeLog2 - kSmiTagSize);
1523    __ Branch(&okay, gt, a2, Operand(t3));  // Signed comparison.
1524
1525    // Out of stack space.
1526    __ lw(a1, MemOperand(fp, kFunctionOffset));
1527    __ push(a1);
1528    __ push(v0);
1529    __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
1530    // End of stack check.
1531
1532    // Push current limit and index.
1533    __ bind(&okay);
1534    __ push(v0);  // Limit.
1535    __ mov(a1, zero_reg);  // Initial index.
1536    __ push(a1);
1537
1538    // Get the receiver.
1539    __ lw(a0, MemOperand(fp, kRecvOffset));
1540
1541    // Check that the function is a JS function (otherwise it must be a proxy).
1542    Label push_receiver;
1543    __ lw(a1, MemOperand(fp, kFunctionOffset));
1544    __ GetObjectType(a1, a2, a2);
1545    __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE));
1546
1547    // Change context eagerly to get the right global object if necessary.
1548    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
1549    // Load the shared function info while the function is still in a1.
1550    __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
1551
1552    // Compute the receiver.
1553    // Do not transform the receiver for strict mode functions.
1554    Label call_to_object, use_global_receiver;
1555    __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
1556    __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
1557                                 kSmiTagSize)));
1558    __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
1559
1560    // Do not transform the receiver for native (Compilerhints already in a2).
1561    __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
1562    __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
1563
1564    // Compute the receiver in non-strict mode.
1565    __ JumpIfSmi(a0, &call_to_object);
1566    __ LoadRoot(a1, Heap::kNullValueRootIndex);
1567    __ Branch(&use_global_receiver, eq, a0, Operand(a1));
1568    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
1569    __ Branch(&use_global_receiver, eq, a0, Operand(a2));
1570
1571    // Check if the receiver is already a JavaScript object.
1572    // a0: receiver
1573    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
1574    __ GetObjectType(a0, a1, a1);
1575    __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
1576
1577    // Convert the receiver to a regular object.
1578    // a0: receiver
1579    __ bind(&call_to_object);
1580    __ push(a0);
1581    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
1582    __ mov(a0, v0);  // Put object in a0 to match other paths to push_receiver.
1583    __ Branch(&push_receiver);
1584
1585    // Use the current global receiver object as the receiver.
1586    __ bind(&use_global_receiver);
1587    const int kGlobalOffset =
1588        Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
1589    __ lw(a0, FieldMemOperand(cp, kGlobalOffset));
1590    __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
1591    __ lw(a0, FieldMemOperand(a0, kGlobalOffset));
1592    __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
1593
1594    // Push the receiver.
1595    // a0: receiver
1596    __ bind(&push_receiver);
1597    __ push(a0);
1598
1599    // Copy all arguments from the array to the stack.
1600    Label entry, loop;
1601    __ lw(a0, MemOperand(fp, kIndexOffset));
1602    __ Branch(&entry);
1603
1604    // Load the current argument from the arguments array and push it to the
1605    // stack.
1606    // a0: current argument index
1607    __ bind(&loop);
1608    __ lw(a1, MemOperand(fp, kArgsOffset));
1609    __ push(a1);
1610    __ push(a0);
1611
1612    // Call the runtime to access the property in the arguments array.
1613    __ CallRuntime(Runtime::kGetProperty, 2);
1614    __ push(v0);
1615
1616    // Use inline caching to access the arguments.
1617    __ lw(a0, MemOperand(fp, kIndexOffset));
1618    __ Addu(a0, a0, Operand(1 << kSmiTagSize));
1619    __ sw(a0, MemOperand(fp, kIndexOffset));
1620
1621    // Test if the copy loop has finished copying all the elements from the
1622    // arguments object.
1623    __ bind(&entry);
1624    __ lw(a1, MemOperand(fp, kLimitOffset));
1625    __ Branch(&loop, ne, a0, Operand(a1));
1626
1627    // Invoke the function.
1628    Label call_proxy;
1629    ParameterCount actual(a0);
1630    __ sra(a0, a0, kSmiTagSize);
1631    __ lw(a1, MemOperand(fp, kFunctionOffset));
1632    __ GetObjectType(a1, a2, a2);
1633    __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
1634
1635    __ InvokeFunction(a1, actual, CALL_FUNCTION,
1636                      NullCallWrapper(), CALL_AS_METHOD);
1637
1638    frame_scope.GenerateLeaveFrame();
1639    __ Ret(USE_DELAY_SLOT);
1640    __ Addu(sp, sp, Operand(3 * kPointerSize));  // In delay slot.
1641
1642    // Invoke the function proxy.
1643    __ bind(&call_proxy);
1644    __ push(a1);  // Add function proxy as last argument.
1645    __ Addu(a0, a0, Operand(1));
1646    __ li(a2, Operand(0, RelocInfo::NONE));
1647    __ SetCallKind(t1, CALL_AS_METHOD);
1648    __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
1649    __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
1650            RelocInfo::CODE_TARGET);
1651    // Tear down the internal frame and remove function, receiver and args.
1652  }
1653
1654  __ Ret(USE_DELAY_SLOT);
1655  __ Addu(sp, sp, Operand(3 * kPointerSize));  // In delay slot.
1656}
1657
1658
1659static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
1660  __ sll(a0, a0, kSmiTagSize);
1661  __ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1662  __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit());
1663  __ Addu(fp, sp, Operand(3 * kPointerSize));
1664}
1665
1666
1667static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
1668  // ----------- S t a t e -------------
1669  //  -- v0 : result being passed through
1670  // -----------------------------------
1671  // Get the number of arguments passed (as a smi), tear down the frame and
1672  // then tear down the parameters.
1673  __ lw(a1, MemOperand(fp, -3 * kPointerSize));
1674  __ mov(sp, fp);
1675  __ MultiPop(fp.bit() | ra.bit());
1676  __ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize);
1677  __ Addu(sp, sp, t0);
1678  // Adjust for the receiver.
1679  __ Addu(sp, sp, Operand(kPointerSize));
1680}
1681
1682
1683void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
1684  // State setup as expected by MacroAssembler::InvokePrologue.
1685  // ----------- S t a t e -------------
1686  //  -- a0: actual arguments count
1687  //  -- a1: function (passed through to callee)
1688  //  -- a2: expected arguments count
1689  //  -- a3: callee code entry
1690  //  -- t1: call kind information
1691  // -----------------------------------
1692
1693  Label invoke, dont_adapt_arguments;
1694
1695  Label enough, too_few;
1696  __ Branch(&dont_adapt_arguments, eq,
1697      a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
1698  // We use Uless as the number of argument should always be greater than 0.
1699  __ Branch(&too_few, Uless, a0, Operand(a2));
1700
1701  {  // Enough parameters: actual >= expected.
1702    // a0: actual number of arguments as a smi
1703    // a1: function
1704    // a2: expected number of arguments
1705    // a3: code entry to call
1706    __ bind(&enough);
1707    EnterArgumentsAdaptorFrame(masm);
1708
1709    // Calculate copy start address into a0 and copy end address into a2.
1710    __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
1711    __ Addu(a0, fp, a0);
1712    // Adjust for return address and receiver.
1713    __ Addu(a0, a0, Operand(2 * kPointerSize));
1714    // Compute copy end address.
1715    __ sll(a2, a2, kPointerSizeLog2);
1716    __ subu(a2, a0, a2);
1717
1718    // Copy the arguments (including the receiver) to the new stack frame.
1719    // a0: copy start address
1720    // a1: function
1721    // a2: copy end address
1722    // a3: code entry to call
1723
1724    Label copy;
1725    __ bind(&copy);
1726    __ lw(t0, MemOperand(a0));
1727    __ push(t0);
1728    __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a2));
1729    __ addiu(a0, a0, -kPointerSize);  // In delay slot.
1730
1731    __ jmp(&invoke);
1732  }
1733
1734  {  // Too few parameters: Actual < expected.
1735    __ bind(&too_few);
1736    EnterArgumentsAdaptorFrame(masm);
1737
1738    // Calculate copy start address into a0 and copy end address is fp.
1739    // a0: actual number of arguments as a smi
1740    // a1: function
1741    // a2: expected number of arguments
1742    // a3: code entry to call
1743    __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
1744    __ Addu(a0, fp, a0);
1745    // Adjust for return address and receiver.
1746    __ Addu(a0, a0, Operand(2 * kPointerSize));
1747    // Compute copy end address. Also adjust for return address.
1748    __ Addu(t3, fp, kPointerSize);
1749
1750    // Copy the arguments (including the receiver) to the new stack frame.
1751    // a0: copy start address
1752    // a1: function
1753    // a2: expected number of arguments
1754    // a3: code entry to call
1755    // t3: copy end address
1756    Label copy;
1757    __ bind(&copy);
1758    __ lw(t0, MemOperand(a0));  // Adjusted above for return addr and receiver.
1759    __ Subu(sp, sp, kPointerSize);
1760    __ Subu(a0, a0, kPointerSize);
1761    __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(t3));
1762    __ sw(t0, MemOperand(sp));  // In the delay slot.
1763
1764    // Fill the remaining expected arguments with undefined.
1765    // a1: function
1766    // a2: expected number of arguments
1767    // a3: code entry to call
1768    __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
1769    __ sll(t2, a2, kPointerSizeLog2);
1770    __ Subu(a2, fp, Operand(t2));
1771    __ Addu(a2, a2, Operand(-4 * kPointerSize));  // Adjust for frame.
1772
1773    Label fill;
1774    __ bind(&fill);
1775    __ Subu(sp, sp, kPointerSize);
1776    __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a2));
1777    __ sw(t0, MemOperand(sp));
1778  }
1779
1780  // Call the entry point.
1781  __ bind(&invoke);
1782
1783  __ Call(a3);
1784
1785  // Store offset of return address for deoptimizer.
1786  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
1787
1788  // Exit frame and return.
1789  LeaveArgumentsAdaptorFrame(masm);
1790  __ Ret();
1791
1792
1793  // -------------------------------------------
1794  // Don't adapt arguments.
1795  // -------------------------------------------
1796  __ bind(&dont_adapt_arguments);
1797  __ Jump(a3);
1798}
1799
1800
1801#undef __
1802
1803} }  // namespace v8::internal
1804
1805#endif  // V8_TARGET_ARCH_MIPS
1806