1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_X64)
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
34#include "regexp-macro-assembler.h"
35
36namespace v8 {
37namespace internal {
38
39#define __ ACCESS_MASM(masm)
40
41void ToNumberStub::Generate(MacroAssembler* masm) {
42  // The ToNumber stub takes one argument in eax.
43  Label check_heap_number, call_builtin;
44  __ SmiTest(rax);
45  __ j(not_zero, &check_heap_number, Label::kNear);
46  __ Ret();
47
48  __ bind(&check_heap_number);
49  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
50                 Heap::kHeapNumberMapRootIndex);
51  __ j(not_equal, &call_builtin, Label::kNear);
52  __ Ret();
53
54  __ bind(&call_builtin);
55  __ pop(rcx);  // Pop return address.
56  __ push(rax);
57  __ push(rcx);  // Push return address.
58  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
59}
60
61
62void FastNewClosureStub::Generate(MacroAssembler* masm) {
63  // Create a new closure from the given function info in new
64  // space. Set the context to the current context in rsi.
65  Label gc;
66  __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
67
68  // Get the function info from the stack.
69  __ movq(rdx, Operand(rsp, 1 * kPointerSize));
70
71  int map_index = (language_mode_ == CLASSIC_MODE)
72      ? Context::FUNCTION_MAP_INDEX
73      : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
74
75  // Compute the function map in the current global context and set that
76  // as the map of the allocated object.
77  __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
78  __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
79  __ movq(rcx, Operand(rcx, Context::SlotOffset(map_index)));
80  __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
81
82  // Initialize the rest of the function. We don't have to update the
83  // write barrier because the allocated object is in new space.
84  __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
85  __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
86  __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
87  __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
88  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
89  __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
90  __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
91  __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
92  __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
93  __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdi);
94
95  // Initialize the code pointer in the function to be the one
96  // found in the shared function info object.
97  __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
98  __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
99  __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
100
101
102  // Return and remove the on-stack parameter.
103  __ ret(1 * kPointerSize);
104
105  // Create a new closure through the slower runtime call.
106  __ bind(&gc);
107  __ pop(rcx);  // Temporarily remove return address.
108  __ pop(rdx);
109  __ push(rsi);
110  __ push(rdx);
111  __ PushRoot(Heap::kFalseValueRootIndex);
112  __ push(rcx);  // Restore return address.
113  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
114}
115
116
117void FastNewContextStub::Generate(MacroAssembler* masm) {
118  // Try to allocate the context in new space.
119  Label gc;
120  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
121  __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
122                        rax, rbx, rcx, &gc, TAG_OBJECT);
123
124  // Get the function from the stack.
125  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
126
127  // Set up the object header.
128  __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
129  __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
130  __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
131
132  // Set up the fixed slots.
133  __ Set(rbx, 0);  // Set to NULL.
134  __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
135  __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
136  __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
137
138  // Copy the global object from the previous context.
139  __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
140  __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
141
142  // Initialize the rest of the slots to undefined.
143  __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
144  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
145    __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
146  }
147
148  // Return and remove the on-stack parameter.
149  __ movq(rsi, rax);
150  __ ret(1 * kPointerSize);
151
152  // Need to collect. Call into runtime system.
153  __ bind(&gc);
154  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
155}
156
157
158void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
159  // Stack layout on entry:
160  //
161  // [rsp + (1 * kPointerSize)]: function
162  // [rsp + (2 * kPointerSize)]: serialized scope info
163
164  // Try to allocate the context in new space.
165  Label gc;
166  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
167  __ AllocateInNewSpace(FixedArray::SizeFor(length),
168                        rax, rbx, rcx, &gc, TAG_OBJECT);
169
170  // Get the function from the stack.
171  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
172
173  // Get the serialized scope info from the stack.
174  __ movq(rbx, Operand(rsp, 2 * kPointerSize));
175
176  // Set up the object header.
177  __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
178  __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
179  __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
180
181  // If this block context is nested in the global context we get a smi
182  // sentinel instead of a function. The block context should get the
183  // canonical empty function of the global context as its closure which
184  // we still have to look up.
185  Label after_sentinel;
186  __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
187  if (FLAG_debug_code) {
188    const char* message = "Expected 0 as a Smi sentinel";
189    __ cmpq(rcx, Immediate(0));
190    __ Assert(equal, message);
191  }
192  __ movq(rcx, GlobalObjectOperand());
193  __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
194  __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
195  __ bind(&after_sentinel);
196
197  // Set up the fixed slots.
198  __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
199  __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
200  __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
201
202  // Copy the global object from the previous context.
203  __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_INDEX));
204  __ movq(ContextOperand(rax, Context::GLOBAL_INDEX), rbx);
205
206  // Initialize the rest of the slots to the hole value.
207  __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
208  for (int i = 0; i < slots_; i++) {
209    __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
210  }
211
212  // Return and remove the on-stack parameter.
213  __ movq(rsi, rax);
214  __ ret(2 * kPointerSize);
215
216  // Need to collect. Call into runtime system.
217  __ bind(&gc);
218  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
219}
220
221
222static void GenerateFastCloneShallowArrayCommon(
223    MacroAssembler* masm,
224    int length,
225    FastCloneShallowArrayStub::Mode mode,
226    Label* fail) {
227  // Registers on entry:
228  //
229  // rcx: boilerplate literal array.
230  ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
231
232  // All sizes here are multiples of kPointerSize.
233  int elements_size = 0;
234  if (length > 0) {
235    elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
236        ? FixedDoubleArray::SizeFor(length)
237        : FixedArray::SizeFor(length);
238  }
239  int size = JSArray::kSize + elements_size;
240
241  // Allocate both the JS array and the elements array in one big
242  // allocation. This avoids multiple limit checks.
243  __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
244
245  // Copy the JS array part.
246  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
247    if ((i != JSArray::kElementsOffset) || (length == 0)) {
248      __ movq(rbx, FieldOperand(rcx, i));
249      __ movq(FieldOperand(rax, i), rbx);
250    }
251  }
252
253  if (length > 0) {
254    // Get hold of the elements array of the boilerplate and setup the
255    // elements pointer in the resulting object.
256    __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
257    __ lea(rdx, Operand(rax, JSArray::kSize));
258    __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
259
260    // Copy the elements array.
261    if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
262      for (int i = 0; i < elements_size; i += kPointerSize) {
263        __ movq(rbx, FieldOperand(rcx, i));
264        __ movq(FieldOperand(rdx, i), rbx);
265      }
266    } else {
267      ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
268      int i;
269      for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
270        __ movq(rbx, FieldOperand(rcx, i));
271        __ movq(FieldOperand(rdx, i), rbx);
272      }
273      while (i < elements_size) {
274        __ movsd(xmm0, FieldOperand(rcx, i));
275        __ movsd(FieldOperand(rdx, i), xmm0);
276        i += kDoubleSize;
277      }
278      ASSERT(i == elements_size);
279    }
280  }
281}
282
283void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
284  // Stack layout on entry:
285  //
286  // [rsp + kPointerSize]: constant elements.
287  // [rsp + (2 * kPointerSize)]: literal index.
288  // [rsp + (3 * kPointerSize)]: literals array.
289
290  // Load boilerplate object into rcx and check if we need to create a
291  // boilerplate.
292  __ movq(rcx, Operand(rsp, 3 * kPointerSize));
293  __ movq(rax, Operand(rsp, 2 * kPointerSize));
294  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
295  __ movq(rcx,
296          FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
297  __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
298  Label slow_case;
299  __ j(equal, &slow_case);
300
301  FastCloneShallowArrayStub::Mode mode = mode_;
302  // rcx is boilerplate object.
303  Factory* factory = masm->isolate()->factory();
304  if (mode == CLONE_ANY_ELEMENTS) {
305    Label double_elements, check_fast_elements;
306    __ movq(rbx, FieldOperand(rcx, JSArray::kElementsOffset));
307    __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
308           factory->fixed_cow_array_map());
309    __ j(not_equal, &check_fast_elements);
310    GenerateFastCloneShallowArrayCommon(masm, 0,
311                                        COPY_ON_WRITE_ELEMENTS, &slow_case);
312    __ ret(3 * kPointerSize);
313
314    __ bind(&check_fast_elements);
315    __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
316           factory->fixed_array_map());
317    __ j(not_equal, &double_elements);
318    GenerateFastCloneShallowArrayCommon(masm, length_,
319                                        CLONE_ELEMENTS, &slow_case);
320    __ ret(3 * kPointerSize);
321
322    __ bind(&double_elements);
323    mode = CLONE_DOUBLE_ELEMENTS;
324    // Fall through to generate the code to handle double elements.
325  }
326
327  if (FLAG_debug_code) {
328    const char* message;
329    Heap::RootListIndex expected_map_index;
330    if (mode == CLONE_ELEMENTS) {
331      message = "Expected (writable) fixed array";
332      expected_map_index = Heap::kFixedArrayMapRootIndex;
333    } else if (mode == CLONE_DOUBLE_ELEMENTS) {
334      message = "Expected (writable) fixed double array";
335      expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
336    } else {
337      ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
338      message = "Expected copy-on-write fixed array";
339      expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
340    }
341    __ push(rcx);
342    __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
343    __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
344                   expected_map_index);
345    __ Assert(equal, message);
346    __ pop(rcx);
347  }
348
349  GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
350  __ ret(3 * kPointerSize);
351
352  __ bind(&slow_case);
353  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
354}
355
356
357void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
358  // Stack layout on entry:
359  //
360  // [rsp + kPointerSize]: object literal flags.
361  // [rsp + (2 * kPointerSize)]: constant properties.
362  // [rsp + (3 * kPointerSize)]: literal index.
363  // [rsp + (4 * kPointerSize)]: literals array.
364
365  // Load boilerplate object into ecx and check if we need to create a
366  // boilerplate.
367  Label slow_case;
368  __ movq(rcx, Operand(rsp, 4 * kPointerSize));
369  __ movq(rax, Operand(rsp, 3 * kPointerSize));
370  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
371  __ movq(rcx,
372          FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
373  __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
374  __ j(equal, &slow_case);
375
376  // Check that the boilerplate contains only fast properties and we can
377  // statically determine the instance size.
378  int size = JSObject::kHeaderSize + length_ * kPointerSize;
379  __ movq(rax, FieldOperand(rcx, HeapObject::kMapOffset));
380  __ movzxbq(rax, FieldOperand(rax, Map::kInstanceSizeOffset));
381  __ cmpq(rax, Immediate(size >> kPointerSizeLog2));
382  __ j(not_equal, &slow_case);
383
384  // Allocate the JS object and copy header together with all in-object
385  // properties from the boilerplate.
386  __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
387  for (int i = 0; i < size; i += kPointerSize) {
388    __ movq(rbx, FieldOperand(rcx, i));
389    __ movq(FieldOperand(rax, i), rbx);
390  }
391
392  // Return and remove the on-stack parameters.
393  __ ret(4 * kPointerSize);
394
395  __ bind(&slow_case);
396  __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
397}
398
399
400// The stub expects its argument on the stack and returns its result in tos_:
401// zero for false, and a non-zero value for true.
402void ToBooleanStub::Generate(MacroAssembler* masm) {
403  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
404  // we cannot call anything that could cause a GC from this stub.
405  Label patch;
406  const Register argument = rax;
407  const Register map = rdx;
408
409  if (!types_.IsEmpty()) {
410    __ movq(argument, Operand(rsp, 1 * kPointerSize));
411  }
412
413  // undefined -> false
414  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
415
416  // Boolean -> its value
417  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
418  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
419
420  // 'null' -> false.
421  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
422
423  if (types_.Contains(SMI)) {
424    // Smis: 0 -> false, all other -> true
425    Label not_smi;
426    __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
427    // argument contains the correct return value already
428    if (!tos_.is(argument)) {
429      __ movq(tos_, argument);
430    }
431    __ ret(1 * kPointerSize);
432    __ bind(&not_smi);
433  } else if (types_.NeedsMap()) {
434    // If we need a map later and have a Smi -> patch.
435    __ JumpIfSmi(argument, &patch, Label::kNear);
436  }
437
438  if (types_.NeedsMap()) {
439    __ movq(map, FieldOperand(argument, HeapObject::kMapOffset));
440
441    if (types_.CanBeUndetectable()) {
442      __ testb(FieldOperand(map, Map::kBitFieldOffset),
443               Immediate(1 << Map::kIsUndetectable));
444      // Undetectable -> false.
445      Label not_undetectable;
446      __ j(zero, &not_undetectable, Label::kNear);
447      __ Set(tos_, 0);
448      __ ret(1 * kPointerSize);
449      __ bind(&not_undetectable);
450    }
451  }
452
453  if (types_.Contains(SPEC_OBJECT)) {
454    // spec object -> true.
455    Label not_js_object;
456    __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
457    __ j(below, &not_js_object, Label::kNear);
458    // argument contains the correct return value already.
459    if (!tos_.is(argument)) {
460      __ Set(tos_, 1);
461    }
462    __ ret(1 * kPointerSize);
463    __ bind(&not_js_object);
464  }
465
466  if (types_.Contains(STRING)) {
467    // String value -> false iff empty.
468    Label not_string;
469    __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
470    __ j(above_equal, &not_string, Label::kNear);
471    __ movq(tos_, FieldOperand(argument, String::kLengthOffset));
472    __ ret(1 * kPointerSize);  // the string length is OK as the return value
473    __ bind(&not_string);
474  }
475
476  if (types_.Contains(HEAP_NUMBER)) {
477    // heap number -> false iff +0, -0, or NaN.
478    Label not_heap_number, false_result;
479    __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
480    __ j(not_equal, &not_heap_number, Label::kNear);
481    __ xorps(xmm0, xmm0);
482    __ ucomisd(xmm0, FieldOperand(argument, HeapNumber::kValueOffset));
483    __ j(zero, &false_result, Label::kNear);
484    // argument contains the correct return value already.
485    if (!tos_.is(argument)) {
486      __ Set(tos_, 1);
487    }
488    __ ret(1 * kPointerSize);
489    __ bind(&false_result);
490    __ Set(tos_, 0);
491    __ ret(1 * kPointerSize);
492    __ bind(&not_heap_number);
493  }
494
495  __ bind(&patch);
496  GenerateTypeTransition(masm);
497}
498
499
500void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
501  __ PushCallerSaved(save_doubles_);
502  const int argument_count = 1;
503  __ PrepareCallCFunction(argument_count);
504#ifdef _WIN64
505  __ LoadAddress(rcx, ExternalReference::isolate_address());
506#else
507  __ LoadAddress(rdi, ExternalReference::isolate_address());
508#endif
509
510  AllowExternalCallThatCantCauseGC scope(masm);
511  __ CallCFunction(
512      ExternalReference::store_buffer_overflow_function(masm->isolate()),
513      argument_count);
514  __ PopCallerSaved(save_doubles_);
515  __ ret(0);
516}
517
518
519void ToBooleanStub::CheckOddball(MacroAssembler* masm,
520                                 Type type,
521                                 Heap::RootListIndex value,
522                                 bool result) {
523  const Register argument = rax;
524  if (types_.Contains(type)) {
525    // If we see an expected oddball, return its ToBoolean value tos_.
526    Label different_value;
527    __ CompareRoot(argument, value);
528    __ j(not_equal, &different_value, Label::kNear);
529    if (!result) {
530      // If we have to return zero, there is no way around clearing tos_.
531      __ Set(tos_, 0);
532    } else if (!tos_.is(argument)) {
533      // If we have to return non-zero, we can re-use the argument if it is the
534      // same register as the result, because we never see Smi-zero here.
535      __ Set(tos_, 1);
536    }
537    __ ret(1 * kPointerSize);
538    __ bind(&different_value);
539  }
540}
541
542
543void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
544  __ pop(rcx);  // Get return address, operand is now on top of stack.
545  __ Push(Smi::FromInt(tos_.code()));
546  __ Push(Smi::FromInt(types_.ToByte()));
547  __ push(rcx);  // Push return address.
548  // Patch the caller to an appropriate specialized stub and return the
549  // operation result to the caller of the stub.
550  __ TailCallExternalReference(
551      ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
552      3,
553      1);
554}
555
556
557class FloatingPointHelper : public AllStatic {
558 public:
559  // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
560  // If the operands are not both numbers, jump to not_numbers.
561  // Leaves rdx and rax unchanged.  SmiOperands assumes both are smis.
562  // NumberOperands assumes both are smis or heap numbers.
563  static void LoadSSE2SmiOperands(MacroAssembler* masm);
564  static void LoadSSE2NumberOperands(MacroAssembler* masm);
565  static void LoadSSE2UnknownOperands(MacroAssembler* masm,
566                                      Label* not_numbers);
567
568  // Takes the operands in rdx and rax and loads them as integers in rax
569  // and rcx.
570  static void LoadAsIntegers(MacroAssembler* masm,
571                             Label* operand_conversion_failure,
572                             Register heap_number_map);
573  // As above, but we know the operands to be numbers. In that case,
574  // conversion can't fail.
575  static void LoadNumbersAsIntegers(MacroAssembler* masm);
576
577  // Tries to convert two values to smis losslessly.
578  // This fails if either argument is not a Smi nor a HeapNumber,
579  // or if it's a HeapNumber with a value that can't be converted
580  // losslessly to a Smi. In that case, control transitions to the
581  // on_not_smis label.
582  // On success, either control goes to the on_success label (if one is
583  // provided), or it falls through at the end of the code (if on_success
584  // is NULL).
585  // On success, both first and second holds Smi tagged values.
586  // One of first or second must be non-Smi when entering.
587  static void NumbersToSmis(MacroAssembler* masm,
588                            Register first,
589                            Register second,
590                            Register scratch1,
591                            Register scratch2,
592                            Register scratch3,
593                            Label* on_success,
594                            Label* on_not_smis);
595};
596
597
598// Get the integer part of a heap number.
599// Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
600void IntegerConvert(MacroAssembler* masm,
601                    Register result,
602                    Register source) {
603  // Result may be rcx. If result and source are the same register, source will
604  // be overwritten.
605  ASSERT(!result.is(rdi) && !result.is(rbx));
606  // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
607  // cvttsd2si (32-bit version) directly.
608  Register double_exponent = rbx;
609  Register double_value = rdi;
610  Label done, exponent_63_plus;
611  // Get double and extract exponent.
612  __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
613  // Clear result preemptively, in case we need to return zero.
614  __ xorl(result, result);
615  __ movq(xmm0, double_value);  // Save copy in xmm0 in case we need it there.
616  // Double to remove sign bit, shift exponent down to least significant bits.
617  // and subtract bias to get the unshifted, unbiased exponent.
618  __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
619  __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
620  __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
621  // Check whether the exponent is too big for a 63 bit unsigned integer.
622  __ cmpl(double_exponent, Immediate(63));
623  __ j(above_equal, &exponent_63_plus, Label::kNear);
624  // Handle exponent range 0..62.
625  __ cvttsd2siq(result, xmm0);
626  __ jmp(&done, Label::kNear);
627
628  __ bind(&exponent_63_plus);
629  // Exponent negative or 63+.
630  __ cmpl(double_exponent, Immediate(83));
631  // If exponent negative or above 83, number contains no significant bits in
632  // the range 0..2^31, so result is zero, and rcx already holds zero.
633  __ j(above, &done, Label::kNear);
634
635  // Exponent in rage 63..83.
636  // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
637  // the least significant exponent-52 bits.
638
639  // Negate low bits of mantissa if value is negative.
640  __ addq(double_value, double_value);  // Move sign bit to carry.
641  __ sbbl(result, result);  // And convert carry to -1 in result register.
642  // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
643  __ addl(double_value, result);
644  // Do xor in opposite directions depending on where we want the result
645  // (depending on whether result is rcx or not).
646
647  if (result.is(rcx)) {
648    __ xorl(double_value, result);
649    // Left shift mantissa by (exponent - mantissabits - 1) to save the
650    // bits that have positional values below 2^32 (the extra -1 comes from the
651    // doubling done above to move the sign bit into the carry flag).
652    __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
653    __ shll_cl(double_value);
654    __ movl(result, double_value);
655  } else {
656    // As the then-branch, but move double-value to result before shifting.
657    __ xorl(result, double_value);
658    __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
659    __ shll_cl(result);
660  }
661
662  __ bind(&done);
663}
664
665
666void UnaryOpStub::Generate(MacroAssembler* masm) {
667  switch (operand_type_) {
668    case UnaryOpIC::UNINITIALIZED:
669      GenerateTypeTransition(masm);
670      break;
671    case UnaryOpIC::SMI:
672      GenerateSmiStub(masm);
673      break;
674    case UnaryOpIC::HEAP_NUMBER:
675      GenerateHeapNumberStub(masm);
676      break;
677    case UnaryOpIC::GENERIC:
678      GenerateGenericStub(masm);
679      break;
680  }
681}
682
683
684void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
685  __ pop(rcx);  // Save return address.
686
687  __ push(rax);  // the operand
688  __ Push(Smi::FromInt(op_));
689  __ Push(Smi::FromInt(mode_));
690  __ Push(Smi::FromInt(operand_type_));
691
692  __ push(rcx);  // Push return address.
693
694  // Patch the caller to an appropriate specialized stub and return the
695  // operation result to the caller of the stub.
696  __ TailCallExternalReference(
697      ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
698}
699
700
701// TODO(svenpanne): Use virtual functions instead of switch.
702void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
703  switch (op_) {
704    case Token::SUB:
705      GenerateSmiStubSub(masm);
706      break;
707    case Token::BIT_NOT:
708      GenerateSmiStubBitNot(masm);
709      break;
710    default:
711      UNREACHABLE();
712  }
713}
714
715
716void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
717  Label slow;
718  GenerateSmiCodeSub(masm, &slow, &slow, Label::kNear, Label::kNear);
719  __ bind(&slow);
720  GenerateTypeTransition(masm);
721}
722
723
724void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
725  Label non_smi;
726  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
727  __ bind(&non_smi);
728  GenerateTypeTransition(masm);
729}
730
731
732void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
733                                     Label* non_smi,
734                                     Label* slow,
735                                     Label::Distance non_smi_near,
736                                     Label::Distance slow_near) {
737  Label done;
738  __ JumpIfNotSmi(rax, non_smi, non_smi_near);
739  __ SmiNeg(rax, rax, &done, Label::kNear);
740  __ jmp(slow, slow_near);
741  __ bind(&done);
742  __ ret(0);
743}
744
745
746void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
747                                        Label* non_smi,
748                                        Label::Distance non_smi_near) {
749  __ JumpIfNotSmi(rax, non_smi, non_smi_near);
750  __ SmiNot(rax, rax);
751  __ ret(0);
752}
753
754
755// TODO(svenpanne): Use virtual functions instead of switch.
756void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
757  switch (op_) {
758    case Token::SUB:
759      GenerateHeapNumberStubSub(masm);
760      break;
761    case Token::BIT_NOT:
762      GenerateHeapNumberStubBitNot(masm);
763      break;
764    default:
765      UNREACHABLE();
766  }
767}
768
769
770void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
771  Label non_smi, slow, call_builtin;
772  GenerateSmiCodeSub(masm, &non_smi, &call_builtin, Label::kNear);
773  __ bind(&non_smi);
774  GenerateHeapNumberCodeSub(masm, &slow);
775  __ bind(&slow);
776  GenerateTypeTransition(masm);
777  __ bind(&call_builtin);
778  GenerateGenericCodeFallback(masm);
779}
780
781
782void UnaryOpStub::GenerateHeapNumberStubBitNot(
783    MacroAssembler* masm) {
784  Label non_smi, slow;
785  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
786  __ bind(&non_smi);
787  GenerateHeapNumberCodeBitNot(masm, &slow);
788  __ bind(&slow);
789  GenerateTypeTransition(masm);
790}
791
792
793void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
794                                            Label* slow) {
795  // Check if the operand is a heap number.
796  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
797                 Heap::kHeapNumberMapRootIndex);
798  __ j(not_equal, slow);
799
800  // Operand is a float, negate its value by flipping the sign bit.
801  if (mode_ == UNARY_OVERWRITE) {
802    __ Set(kScratchRegister, 0x01);
803    __ shl(kScratchRegister, Immediate(63));
804    __ xor_(FieldOperand(rax, HeapNumber::kValueOffset), kScratchRegister);
805  } else {
806    // Allocate a heap number before calculating the answer,
807    // so we don't have an untagged double around during GC.
808    Label slow_allocate_heapnumber, heapnumber_allocated;
809    __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
810    __ jmp(&heapnumber_allocated);
811
812    __ bind(&slow_allocate_heapnumber);
813    {
814      FrameScope scope(masm, StackFrame::INTERNAL);
815      __ push(rax);
816      __ CallRuntime(Runtime::kNumberAlloc, 0);
817      __ movq(rcx, rax);
818      __ pop(rax);
819    }
820    __ bind(&heapnumber_allocated);
821    // rcx: allocated 'empty' number
822
823    // Copy the double value to the new heap number, flipping the sign.
824    __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
825    __ Set(kScratchRegister, 0x01);
826    __ shl(kScratchRegister, Immediate(63));
827    __ xor_(rdx, kScratchRegister);  // Flip sign.
828    __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
829    __ movq(rax, rcx);
830  }
831  __ ret(0);
832}
833
834
835void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
836                                               Label* slow) {
837  // Check if the operand is a heap number.
838  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
839                 Heap::kHeapNumberMapRootIndex);
840  __ j(not_equal, slow);
841
842  // Convert the heap number in rax to an untagged integer in rcx.
843  IntegerConvert(masm, rax, rax);
844
845  // Do the bitwise operation and smi tag the result.
846  __ notl(rax);
847  __ Integer32ToSmi(rax, rax);
848  __ ret(0);
849}
850
851
852// TODO(svenpanne): Use virtual functions instead of switch.
853void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
854  switch (op_) {
855    case Token::SUB:
856      GenerateGenericStubSub(masm);
857      break;
858    case Token::BIT_NOT:
859      GenerateGenericStubBitNot(masm);
860      break;
861    default:
862      UNREACHABLE();
863  }
864}
865
866
867void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
868  Label non_smi, slow;
869  GenerateSmiCodeSub(masm, &non_smi, &slow, Label::kNear);
870  __ bind(&non_smi);
871  GenerateHeapNumberCodeSub(masm, &slow);
872  __ bind(&slow);
873  GenerateGenericCodeFallback(masm);
874}
875
876
877void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
878  Label non_smi, slow;
879  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
880  __ bind(&non_smi);
881  GenerateHeapNumberCodeBitNot(masm, &slow);
882  __ bind(&slow);
883  GenerateGenericCodeFallback(masm);
884}
885
886
887void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
888  // Handle the slow case by jumping to the JavaScript builtin.
889  __ pop(rcx);  // pop return address
890  __ push(rax);
891  __ push(rcx);  // push return address
892  switch (op_) {
893    case Token::SUB:
894      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
895      break;
896    case Token::BIT_NOT:
897      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
898      break;
899    default:
900      UNREACHABLE();
901  }
902}
903
904
905void UnaryOpStub::PrintName(StringStream* stream) {
906  const char* op_name = Token::Name(op_);
907  const char* overwrite_name = NULL;  // Make g++ happy.
908  switch (mode_) {
909    case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
910    case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
911  }
912  stream->Add("UnaryOpStub_%s_%s_%s",
913              op_name,
914              overwrite_name,
915              UnaryOpIC::GetName(operand_type_));
916}
917
918
919void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
920  __ pop(rcx);  // Save return address.
921  __ push(rdx);
922  __ push(rax);
923  // Left and right arguments are now on top.
924  // Push this stub's key. Although the operation and the type info are
925  // encoded into the key, the encoding is opaque, so push them too.
926  __ Push(Smi::FromInt(MinorKey()));
927  __ Push(Smi::FromInt(op_));
928  __ Push(Smi::FromInt(operands_type_));
929
930  __ push(rcx);  // Push return address.
931
932  // Patch the caller to an appropriate specialized stub and return the
933  // operation result to the caller of the stub.
934  __ TailCallExternalReference(
935      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
936                        masm->isolate()),
937      5,
938      1);
939}
940
941
942void BinaryOpStub::Generate(MacroAssembler* masm) {
943  // Explicitly allow generation of nested stubs. It is safe here because
944  // generation code does not use any raw pointers.
945  AllowStubCallsScope allow_stub_calls(masm, true);
946
947  switch (operands_type_) {
948    case BinaryOpIC::UNINITIALIZED:
949      GenerateTypeTransition(masm);
950      break;
951    case BinaryOpIC::SMI:
952      GenerateSmiStub(masm);
953      break;
954    case BinaryOpIC::INT32:
955      UNREACHABLE();
956      // The int32 case is identical to the Smi case.  We avoid creating this
957      // ic state on x64.
958      break;
959    case BinaryOpIC::HEAP_NUMBER:
960      GenerateHeapNumberStub(masm);
961      break;
962    case BinaryOpIC::ODDBALL:
963      GenerateOddballStub(masm);
964      break;
965    case BinaryOpIC::BOTH_STRING:
966      GenerateBothStringStub(masm);
967      break;
968    case BinaryOpIC::STRING:
969      GenerateStringStub(masm);
970      break;
971    case BinaryOpIC::GENERIC:
972      GenerateGeneric(masm);
973      break;
974    default:
975      UNREACHABLE();
976  }
977}
978
979
980void BinaryOpStub::PrintName(StringStream* stream) {
981  const char* op_name = Token::Name(op_);
982  const char* overwrite_name;
983  switch (mode_) {
984    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
985    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
986    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
987    default: overwrite_name = "UnknownOverwrite"; break;
988  }
989  stream->Add("BinaryOpStub_%s_%s_%s",
990              op_name,
991              overwrite_name,
992              BinaryOpIC::GetName(operands_type_));
993}
994
995
996void BinaryOpStub::GenerateSmiCode(
997    MacroAssembler* masm,
998    Label* slow,
999    SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1000
1001  // Arguments to BinaryOpStub are in rdx and rax.
1002  Register left = rdx;
1003  Register right = rax;
1004
1005  // We only generate heapnumber answers for overflowing calculations
1006  // for the four basic arithmetic operations and logical right shift by 0.
1007  bool generate_inline_heapnumber_results =
1008      (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
1009      (op_ == Token::ADD || op_ == Token::SUB ||
1010       op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
1011
1012  // Smi check of both operands.  If op is BIT_OR, the check is delayed
1013  // until after the OR operation.
1014  Label not_smis;
1015  Label use_fp_on_smis;
1016  Label fail;
1017
1018  if (op_ != Token::BIT_OR) {
1019    Comment smi_check_comment(masm, "-- Smi check arguments");
1020    __ JumpIfNotBothSmi(left, right, &not_smis);
1021  }
1022
1023  Label smi_values;
1024  __ bind(&smi_values);
1025  // Perform the operation.
1026  Comment perform_smi(masm, "-- Perform smi operation");
1027  switch (op_) {
1028    case Token::ADD:
1029      ASSERT(right.is(rax));
1030      __ SmiAdd(right, right, left, &use_fp_on_smis);  // ADD is commutative.
1031      break;
1032
1033    case Token::SUB:
1034      __ SmiSub(left, left, right, &use_fp_on_smis);
1035      __ movq(rax, left);
1036      break;
1037
1038    case Token::MUL:
1039      ASSERT(right.is(rax));
1040      __ SmiMul(right, right, left, &use_fp_on_smis);  // MUL is commutative.
1041      break;
1042
1043    case Token::DIV:
1044      // SmiDiv will not accept left in rdx or right in rax.
1045      left = rcx;
1046      right = rbx;
1047      __ movq(rbx, rax);
1048      __ movq(rcx, rdx);
1049      __ SmiDiv(rax, left, right, &use_fp_on_smis);
1050      break;
1051
1052    case Token::MOD:
1053      // SmiMod will not accept left in rdx or right in rax.
1054      left = rcx;
1055      right = rbx;
1056      __ movq(rbx, rax);
1057      __ movq(rcx, rdx);
1058      __ SmiMod(rax, left, right, &use_fp_on_smis);
1059      break;
1060
1061    case Token::BIT_OR: {
1062      ASSERT(right.is(rax));
1063      __ SmiOrIfSmis(right, right, left, &not_smis);  // BIT_OR is commutative.
1064      break;
1065      }
1066    case Token::BIT_XOR:
1067      ASSERT(right.is(rax));
1068      __ SmiXor(right, right, left);  // BIT_XOR is commutative.
1069      break;
1070
1071    case Token::BIT_AND:
1072      ASSERT(right.is(rax));
1073      __ SmiAnd(right, right, left);  // BIT_AND is commutative.
1074      break;
1075
1076    case Token::SHL:
1077      __ SmiShiftLeft(left, left, right);
1078      __ movq(rax, left);
1079      break;
1080
1081    case Token::SAR:
1082      __ SmiShiftArithmeticRight(left, left, right);
1083      __ movq(rax, left);
1084      break;
1085
1086    case Token::SHR:
1087      __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
1088      __ movq(rax, left);
1089      break;
1090
1091    default:
1092      UNREACHABLE();
1093  }
1094
1095  // 5. Emit return of result in rax.  Some operations have registers pushed.
1096  __ ret(0);
1097
1098  if (use_fp_on_smis.is_linked()) {
1099    // 6. For some operations emit inline code to perform floating point
1100    //    operations on known smis (e.g., if the result of the operation
1101    //    overflowed the smi range).
1102    __ bind(&use_fp_on_smis);
1103    if (op_ == Token::DIV || op_ == Token::MOD) {
1104      // Restore left and right to rdx and rax.
1105      __ movq(rdx, rcx);
1106      __ movq(rax, rbx);
1107    }
1108
1109    if (generate_inline_heapnumber_results) {
1110      __ AllocateHeapNumber(rcx, rbx, slow);
1111      Comment perform_float(masm, "-- Perform float operation on smis");
1112      if (op_ == Token::SHR) {
1113        __ SmiToInteger32(left, left);
1114        __ cvtqsi2sd(xmm0, left);
1115      } else {
1116        FloatingPointHelper::LoadSSE2SmiOperands(masm);
1117        switch (op_) {
1118        case Token::ADD: __ addsd(xmm0, xmm1); break;
1119        case Token::SUB: __ subsd(xmm0, xmm1); break;
1120        case Token::MUL: __ mulsd(xmm0, xmm1); break;
1121        case Token::DIV: __ divsd(xmm0, xmm1); break;
1122        default: UNREACHABLE();
1123        }
1124      }
1125      __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
1126      __ movq(rax, rcx);
1127      __ ret(0);
1128    } else {
1129      __ jmp(&fail);
1130    }
1131  }
1132
1133  // 7. Non-smi operands reach the end of the code generated by
1134  //    GenerateSmiCode, and fall through to subsequent code,
1135  //    with the operands in rdx and rax.
1136  //    But first we check if non-smi values are HeapNumbers holding
1137  //    values that could be smi.
1138  __ bind(&not_smis);
1139  Comment done_comment(masm, "-- Enter non-smi code");
1140  FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
1141                                     &smi_values, &fail);
1142  __ jmp(&smi_values);
1143  __ bind(&fail);
1144}
1145
1146
1147void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
1148                                             Label* allocation_failure,
1149                                             Label* non_numeric_failure) {
1150  switch (op_) {
1151    case Token::ADD:
1152    case Token::SUB:
1153    case Token::MUL:
1154    case Token::DIV: {
1155      FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
1156
1157      switch (op_) {
1158        case Token::ADD: __ addsd(xmm0, xmm1); break;
1159        case Token::SUB: __ subsd(xmm0, xmm1); break;
1160        case Token::MUL: __ mulsd(xmm0, xmm1); break;
1161        case Token::DIV: __ divsd(xmm0, xmm1); break;
1162        default: UNREACHABLE();
1163      }
1164      GenerateHeapResultAllocation(masm, allocation_failure);
1165      __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
1166      __ ret(0);
1167      break;
1168    }
1169    case Token::MOD: {
1170      // For MOD we jump to the allocation_failure label, to call runtime.
1171      __ jmp(allocation_failure);
1172      break;
1173    }
1174    case Token::BIT_OR:
1175    case Token::BIT_AND:
1176    case Token::BIT_XOR:
1177    case Token::SAR:
1178    case Token::SHL:
1179    case Token::SHR: {
1180      Label non_smi_shr_result;
1181      Register heap_number_map = r9;
1182      __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1183      FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
1184                                          heap_number_map);
1185      switch (op_) {
1186        case Token::BIT_OR:  __ orl(rax, rcx); break;
1187        case Token::BIT_AND: __ andl(rax, rcx); break;
1188        case Token::BIT_XOR: __ xorl(rax, rcx); break;
1189        case Token::SAR: __ sarl_cl(rax); break;
1190        case Token::SHL: __ shll_cl(rax); break;
1191        case Token::SHR: {
1192          __ shrl_cl(rax);
1193          // Check if result is negative. This can only happen for a shift
1194          // by zero.
1195          __ testl(rax, rax);
1196          __ j(negative, &non_smi_shr_result);
1197          break;
1198        }
1199        default: UNREACHABLE();
1200      }
1201      STATIC_ASSERT(kSmiValueSize == 32);
1202      // Tag smi result and return.
1203      __ Integer32ToSmi(rax, rax);
1204      __ Ret();
1205
1206      // Logical shift right can produce an unsigned int32 that is not
1207      // an int32, and so is not in the smi range.  Allocate a heap number
1208      // in that case.
1209      if (op_ == Token::SHR) {
1210        __ bind(&non_smi_shr_result);
1211        Label allocation_failed;
1212        __ movl(rbx, rax);  // rbx holds result value (uint32 value as int64).
1213        // Allocate heap number in new space.
1214        // Not using AllocateHeapNumber macro in order to reuse
1215        // already loaded heap_number_map.
1216        __ AllocateInNewSpace(HeapNumber::kSize,
1217                              rax,
1218                              rdx,
1219                              no_reg,
1220                              &allocation_failed,
1221                              TAG_OBJECT);
1222        // Set the map.
1223        if (FLAG_debug_code) {
1224          __ AbortIfNotRootValue(heap_number_map,
1225                                 Heap::kHeapNumberMapRootIndex,
1226                                 "HeapNumberMap register clobbered.");
1227        }
1228        __ movq(FieldOperand(rax, HeapObject::kMapOffset),
1229                heap_number_map);
1230        __ cvtqsi2sd(xmm0, rbx);
1231        __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
1232        __ Ret();
1233
1234        __ bind(&allocation_failed);
1235        // We need tagged values in rdx and rax for the following code,
1236        // not int32 in rax and rcx.
1237        __ Integer32ToSmi(rax, rcx);
1238        __ Integer32ToSmi(rdx, rbx);
1239        __ jmp(allocation_failure);
1240      }
1241      break;
1242    }
1243    default: UNREACHABLE(); break;
1244  }
1245  // No fall-through from this generated code.
1246  if (FLAG_debug_code) {
1247    __ Abort("Unexpected fall-through in "
1248             "BinaryStub::GenerateFloatingPointCode.");
1249  }
1250}
1251
1252
1253void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
1254  ASSERT(op_ == Token::ADD);
1255  Label left_not_string, call_runtime;
1256
1257  // Registers containing left and right operands respectively.
1258  Register left = rdx;
1259  Register right = rax;
1260
1261  // Test if left operand is a string.
1262  __ JumpIfSmi(left, &left_not_string, Label::kNear);
1263  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
1264  __ j(above_equal, &left_not_string, Label::kNear);
1265  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
1266  GenerateRegisterArgsPush(masm);
1267  __ TailCallStub(&string_add_left_stub);
1268
1269  // Left operand is not a string, test right.
1270  __ bind(&left_not_string);
1271  __ JumpIfSmi(right, &call_runtime, Label::kNear);
1272  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
1273  __ j(above_equal, &call_runtime, Label::kNear);
1274
1275  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
1276  GenerateRegisterArgsPush(masm);
1277  __ TailCallStub(&string_add_right_stub);
1278
1279  // Neither argument is a string.
1280  __ bind(&call_runtime);
1281}
1282
1283
1284void BinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
1285  GenerateRegisterArgsPush(masm);
1286  switch (op_) {
1287    case Token::ADD:
1288      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1289      break;
1290    case Token::SUB:
1291      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1292      break;
1293    case Token::MUL:
1294      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1295      break;
1296    case Token::DIV:
1297      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1298      break;
1299    case Token::MOD:
1300      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1301      break;
1302    case Token::BIT_OR:
1303      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1304      break;
1305    case Token::BIT_AND:
1306      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1307      break;
1308    case Token::BIT_XOR:
1309      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1310      break;
1311    case Token::SAR:
1312      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1313      break;
1314    case Token::SHL:
1315      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1316      break;
1317    case Token::SHR:
1318      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1319      break;
1320    default:
1321      UNREACHABLE();
1322  }
1323}
1324
1325
1326void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1327  Label call_runtime;
1328  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1329      result_type_ == BinaryOpIC::SMI) {
1330    // Only allow smi results.
1331    GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
1332  } else {
1333    // Allow heap number result and don't make a transition if a heap number
1334    // cannot be allocated.
1335    GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1336  }
1337
1338  // Code falls through if the result is not returned as either a smi or heap
1339  // number.
1340  GenerateTypeTransition(masm);
1341
1342  if (call_runtime.is_linked()) {
1343    __ bind(&call_runtime);
1344    GenerateCallRuntimeCode(masm);
1345  }
1346}
1347
1348
1349void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
1350  ASSERT(operands_type_ == BinaryOpIC::STRING);
1351  ASSERT(op_ == Token::ADD);
1352  GenerateStringAddCode(masm);
1353  // Try to add arguments as strings, otherwise, transition to the generic
1354  // BinaryOpIC type.
1355  GenerateTypeTransition(masm);
1356}
1357
1358
1359void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1360  Label call_runtime;
1361  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
1362  ASSERT(op_ == Token::ADD);
1363  // If both arguments are strings, call the string add stub.
1364  // Otherwise, do a transition.
1365
1366  // Registers containing left and right operands respectively.
1367  Register left = rdx;
1368  Register right = rax;
1369
1370  // Test if left operand is a string.
1371  __ JumpIfSmi(left, &call_runtime);
1372  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
1373  __ j(above_equal, &call_runtime);
1374
1375  // Test if right operand is a string.
1376  __ JumpIfSmi(right, &call_runtime);
1377  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
1378  __ j(above_equal, &call_runtime);
1379
1380  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
1381  GenerateRegisterArgsPush(masm);
1382  __ TailCallStub(&string_add_stub);
1383
1384  __ bind(&call_runtime);
1385  GenerateTypeTransition(masm);
1386}
1387
1388
1389void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
1390  Label call_runtime;
1391
1392  if (op_ == Token::ADD) {
1393    // Handle string addition here, because it is the only operation
1394    // that does not do a ToNumber conversion on the operands.
1395    GenerateStringAddCode(masm);
1396  }
1397
1398  // Convert oddball arguments to numbers.
1399  Label check, done;
1400  __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1401  __ j(not_equal, &check, Label::kNear);
1402  if (Token::IsBitOp(op_)) {
1403    __ xor_(rdx, rdx);
1404  } else {
1405    __ LoadRoot(rdx, Heap::kNanValueRootIndex);
1406  }
1407  __ jmp(&done, Label::kNear);
1408  __ bind(&check);
1409  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1410  __ j(not_equal, &done, Label::kNear);
1411  if (Token::IsBitOp(op_)) {
1412    __ xor_(rax, rax);
1413  } else {
1414    __ LoadRoot(rax, Heap::kNanValueRootIndex);
1415  }
1416  __ bind(&done);
1417
1418  GenerateHeapNumberStub(masm);
1419}
1420
1421
1422void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1423  Label gc_required, not_number;
1424  GenerateFloatingPointCode(masm, &gc_required, &not_number);
1425
1426  __ bind(&not_number);
1427  GenerateTypeTransition(masm);
1428
1429  __ bind(&gc_required);
1430  GenerateCallRuntimeCode(masm);
1431}
1432
1433
1434void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
1435  Label call_runtime, call_string_add_or_runtime;
1436
1437  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1438
1439  GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
1440
1441  __ bind(&call_string_add_or_runtime);
1442  if (op_ == Token::ADD) {
1443    GenerateStringAddCode(masm);
1444  }
1445
1446  __ bind(&call_runtime);
1447  GenerateCallRuntimeCode(masm);
1448}
1449
1450
1451void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
1452                                                Label* alloc_failure) {
1453  Label skip_allocation;
1454  OverwriteMode mode = mode_;
1455  switch (mode) {
1456    case OVERWRITE_LEFT: {
1457      // If the argument in rdx is already an object, we skip the
1458      // allocation of a heap number.
1459      __ JumpIfNotSmi(rdx, &skip_allocation);
1460      // Allocate a heap number for the result. Keep eax and edx intact
1461      // for the possible runtime call.
1462      __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1463      // Now rdx can be overwritten losing one of the arguments as we are
1464      // now done and will not need it any more.
1465      __ movq(rdx, rbx);
1466      __ bind(&skip_allocation);
1467      // Use object in rdx as a result holder
1468      __ movq(rax, rdx);
1469      break;
1470    }
1471    case OVERWRITE_RIGHT:
1472      // If the argument in rax is already an object, we skip the
1473      // allocation of a heap number.
1474      __ JumpIfNotSmi(rax, &skip_allocation);
1475      // Fall through!
1476    case NO_OVERWRITE:
1477      // Allocate a heap number for the result. Keep rax and rdx intact
1478      // for the possible runtime call.
1479      __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1480      // Now rax can be overwritten losing one of the arguments as we are
1481      // now done and will not need it any more.
1482      __ movq(rax, rbx);
1483      __ bind(&skip_allocation);
1484      break;
1485    default: UNREACHABLE();
1486  }
1487}
1488
1489
1490void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1491  __ pop(rcx);
1492  __ push(rdx);
1493  __ push(rax);
1494  __ push(rcx);
1495}
1496
1497
1498void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1499  // TAGGED case:
1500  //   Input:
1501  //     rsp[8]: argument (should be number).
1502  //     rsp[0]: return address.
1503  //   Output:
1504  //     rax: tagged double result.
1505  // UNTAGGED case:
1506  //   Input::
1507  //     rsp[0]: return address.
1508  //     xmm1: untagged double input argument
1509  //   Output:
1510  //     xmm1: untagged double result.
1511
1512  Label runtime_call;
1513  Label runtime_call_clear_stack;
1514  Label skip_cache;
1515  const bool tagged = (argument_type_ == TAGGED);
1516  if (tagged) {
1517    Label input_not_smi, loaded;
1518    // Test that rax is a number.
1519    __ movq(rax, Operand(rsp, kPointerSize));
1520    __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
1521    // Input is a smi. Untag and load it onto the FPU stack.
1522    // Then load the bits of the double into rbx.
1523    __ SmiToInteger32(rax, rax);
1524    __ subq(rsp, Immediate(kDoubleSize));
1525    __ cvtlsi2sd(xmm1, rax);
1526    __ movsd(Operand(rsp, 0), xmm1);
1527    __ movq(rbx, xmm1);
1528    __ movq(rdx, xmm1);
1529    __ fld_d(Operand(rsp, 0));
1530    __ addq(rsp, Immediate(kDoubleSize));
1531    __ jmp(&loaded, Label::kNear);
1532
1533    __ bind(&input_not_smi);
1534    // Check if input is a HeapNumber.
1535    __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
1536    __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
1537    __ j(not_equal, &runtime_call);
1538    // Input is a HeapNumber. Push it on the FPU stack and load its
1539    // bits into rbx.
1540    __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
1541    __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
1542    __ movq(rdx, rbx);
1543
1544    __ bind(&loaded);
1545  } else {  // UNTAGGED.
1546    __ movq(rbx, xmm1);
1547    __ movq(rdx, xmm1);
1548  }
1549
1550  // ST[0] == double value, if TAGGED.
1551  // rbx = bits of double value.
1552  // rdx = also bits of double value.
1553  // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
1554  //   h = h0 = bits ^ (bits >> 32);
1555  //   h ^= h >> 16;
1556  //   h ^= h >> 8;
1557  //   h = h & (cacheSize - 1);
1558  // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
1559  __ sar(rdx, Immediate(32));
1560  __ xorl(rdx, rbx);
1561  __ movl(rcx, rdx);
1562  __ movl(rax, rdx);
1563  __ movl(rdi, rdx);
1564  __ sarl(rdx, Immediate(8));
1565  __ sarl(rcx, Immediate(16));
1566  __ sarl(rax, Immediate(24));
1567  __ xorl(rcx, rdx);
1568  __ xorl(rax, rdi);
1569  __ xorl(rcx, rax);
1570  ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
1571  __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
1572
1573  // ST[0] == double value.
1574  // rbx = bits of double value.
1575  // rcx = TranscendentalCache::hash(double value).
1576  ExternalReference cache_array =
1577      ExternalReference::transcendental_cache_array_address(masm->isolate());
1578  __ movq(rax, cache_array);
1579  int cache_array_index =
1580      type_ * sizeof(Isolate::Current()->transcendental_cache()->caches_[0]);
1581  __ movq(rax, Operand(rax, cache_array_index));
1582  // rax points to the cache for the type type_.
1583  // If NULL, the cache hasn't been initialized yet, so go through runtime.
1584  __ testq(rax, rax);
1585  __ j(zero, &runtime_call_clear_stack);  // Only clears stack if TAGGED.
1586#ifdef DEBUG
1587  // Check that the layout of cache elements match expectations.
1588  {  // NOLINT - doesn't like a single brace on a line.
1589    TranscendentalCache::SubCache::Element test_elem[2];
1590    char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
1591    char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
1592    char* elem_in0  = reinterpret_cast<char*>(&(test_elem[0].in[0]));
1593    char* elem_in1  = reinterpret_cast<char*>(&(test_elem[0].in[1]));
1594    char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
1595    // Two uint_32's and a pointer per element.
1596    CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
1597    CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
1598    CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
1599    CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
1600  }
1601#endif
1602  // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
1603  __ addl(rcx, rcx);
1604  __ lea(rcx, Operand(rax, rcx, times_8, 0));
1605  // Check if cache matches: Double value is stored in uint32_t[2] array.
1606  Label cache_miss;
1607  __ cmpq(rbx, Operand(rcx, 0));
1608  __ j(not_equal, &cache_miss, Label::kNear);
1609  // Cache hit!
1610  Counters* counters = masm->isolate()->counters();
1611  __ IncrementCounter(counters->transcendental_cache_hit(), 1);
1612  __ movq(rax, Operand(rcx, 2 * kIntSize));
1613  if (tagged) {
1614    __ fstp(0);  // Clear FPU stack.
1615    __ ret(kPointerSize);
1616  } else {  // UNTAGGED.
1617    __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1618    __ Ret();
1619  }
1620
1621  __ bind(&cache_miss);
1622  __ IncrementCounter(counters->transcendental_cache_miss(), 1);
1623  // Update cache with new value.
1624  if (tagged) {
1625  __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
1626  } else {  // UNTAGGED.
1627    __ AllocateHeapNumber(rax, rdi, &skip_cache);
1628    __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
1629    __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
1630  }
1631  GenerateOperation(masm, type_);
1632  __ movq(Operand(rcx, 0), rbx);
1633  __ movq(Operand(rcx, 2 * kIntSize), rax);
1634  __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
1635  if (tagged) {
1636    __ ret(kPointerSize);
1637  } else {  // UNTAGGED.
1638    __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1639    __ Ret();
1640
1641    // Skip cache and return answer directly, only in untagged case.
1642    __ bind(&skip_cache);
1643    __ subq(rsp, Immediate(kDoubleSize));
1644    __ movsd(Operand(rsp, 0), xmm1);
1645    __ fld_d(Operand(rsp, 0));
1646    GenerateOperation(masm, type_);
1647    __ fstp_d(Operand(rsp, 0));
1648    __ movsd(xmm1, Operand(rsp, 0));
1649    __ addq(rsp, Immediate(kDoubleSize));
1650    // We return the value in xmm1 without adding it to the cache, but
1651    // we cause a scavenging GC so that future allocations will succeed.
1652    {
1653      FrameScope scope(masm, StackFrame::INTERNAL);
1654      // Allocate an unused object bigger than a HeapNumber.
1655      __ Push(Smi::FromInt(2 * kDoubleSize));
1656      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
1657    }
1658    __ Ret();
1659  }
1660
1661  // Call runtime, doing whatever allocation and cleanup is necessary.
1662  if (tagged) {
1663    __ bind(&runtime_call_clear_stack);
1664    __ fstp(0);
1665    __ bind(&runtime_call);
1666    __ TailCallExternalReference(
1667        ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
1668  } else {  // UNTAGGED.
1669    __ bind(&runtime_call_clear_stack);
1670    __ bind(&runtime_call);
1671    __ AllocateHeapNumber(rax, rdi, &skip_cache);
1672    __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
1673    {
1674      FrameScope scope(masm, StackFrame::INTERNAL);
1675      __ push(rax);
1676      __ CallRuntime(RuntimeFunction(), 1);
1677    }
1678    __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1679    __ Ret();
1680  }
1681}
1682
1683
1684Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
1685  switch (type_) {
1686    // Add more cases when necessary.
1687    case TranscendentalCache::SIN: return Runtime::kMath_sin;
1688    case TranscendentalCache::COS: return Runtime::kMath_cos;
1689    case TranscendentalCache::TAN: return Runtime::kMath_tan;
1690    case TranscendentalCache::LOG: return Runtime::kMath_log;
1691    default:
1692      UNIMPLEMENTED();
1693      return Runtime::kAbort;
1694  }
1695}
1696
1697
1698void TranscendentalCacheStub::GenerateOperation(
1699    MacroAssembler* masm, TranscendentalCache::Type type) {
1700  // Registers:
1701  // rax: Newly allocated HeapNumber, which must be preserved.
1702  // rbx: Bits of input double. Must be preserved.
1703  // rcx: Pointer to cache entry. Must be preserved.
1704  // st(0): Input double
1705  Label done;
1706  if (type == TranscendentalCache::SIN ||
1707      type == TranscendentalCache::COS ||
1708      type == TranscendentalCache::TAN) {
1709    // Both fsin and fcos require arguments in the range +/-2^63 and
1710    // return NaN for infinities and NaN. They can share all code except
1711    // the actual fsin/fcos operation.
1712    Label in_range;
1713    // If argument is outside the range -2^63..2^63, fsin/cos doesn't
1714    // work. We must reduce it to the appropriate range.
1715    __ movq(rdi, rbx);
1716    // Move exponent and sign bits to low bits.
1717    __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
1718    // Remove sign bit.
1719    __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
1720    int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
1721    __ cmpl(rdi, Immediate(supported_exponent_limit));
1722    __ j(below, &in_range);
1723    // Check for infinity and NaN. Both return NaN for sin.
1724    __ cmpl(rdi, Immediate(0x7ff));
1725    Label non_nan_result;
1726    __ j(not_equal, &non_nan_result, Label::kNear);
1727    // Input is +/-Infinity or NaN. Result is NaN.
1728    __ fstp(0);
1729    // NaN is represented by 0x7ff8000000000000.
1730    __ subq(rsp, Immediate(kPointerSize));
1731    __ movl(Operand(rsp, 4), Immediate(0x7ff80000));
1732    __ movl(Operand(rsp, 0), Immediate(0x00000000));
1733    __ fld_d(Operand(rsp, 0));
1734    __ addq(rsp, Immediate(kPointerSize));
1735    __ jmp(&done);
1736
1737    __ bind(&non_nan_result);
1738
1739    // Use fpmod to restrict argument to the range +/-2*PI.
1740    __ movq(rdi, rax);  // Save rax before using fnstsw_ax.
1741    __ fldpi();
1742    __ fadd(0);
1743    __ fld(1);
1744    // FPU Stack: input, 2*pi, input.
1745    {
1746      Label no_exceptions;
1747      __ fwait();
1748      __ fnstsw_ax();
1749      // Clear if Illegal Operand or Zero Division exceptions are set.
1750      __ testl(rax, Immediate(5));  // #IO and #ZD flags of FPU status word.
1751      __ j(zero, &no_exceptions);
1752      __ fnclex();
1753      __ bind(&no_exceptions);
1754    }
1755
1756    // Compute st(0) % st(1)
1757    {
1758      Label partial_remainder_loop;
1759      __ bind(&partial_remainder_loop);
1760      __ fprem1();
1761      __ fwait();
1762      __ fnstsw_ax();
1763      __ testl(rax, Immediate(0x400));  // Check C2 bit of FPU status word.
1764      // If C2 is set, computation only has partial result. Loop to
1765      // continue computation.
1766      __ j(not_zero, &partial_remainder_loop);
1767  }
1768    // FPU Stack: input, 2*pi, input % 2*pi
1769    __ fstp(2);
1770    // FPU Stack: input % 2*pi, 2*pi,
1771    __ fstp(0);
1772    // FPU Stack: input % 2*pi
1773    __ movq(rax, rdi);  // Restore rax, pointer to the new HeapNumber.
1774    __ bind(&in_range);
1775    switch (type) {
1776      case TranscendentalCache::SIN:
1777        __ fsin();
1778        break;
1779      case TranscendentalCache::COS:
1780        __ fcos();
1781        break;
1782      case TranscendentalCache::TAN:
1783        // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
1784        // FP register stack.
1785        __ fptan();
1786        __ fstp(0);  // Pop FP register stack.
1787        break;
1788      default:
1789        UNREACHABLE();
1790    }
1791    __ bind(&done);
1792  } else {
1793    ASSERT(type == TranscendentalCache::LOG);
1794    __ fldln2();
1795    __ fxch();
1796    __ fyl2x();
1797  }
1798}
1799
1800
1801// Input: rdx, rax are the left and right objects of a bit op.
1802// Output: rax, rcx are left and right integers for a bit op.
1803void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
1804  // Check float operands.
1805  Label done;
1806  Label rax_is_smi;
1807  Label rax_is_object;
1808  Label rdx_is_object;
1809
1810  __ JumpIfNotSmi(rdx, &rdx_is_object);
1811  __ SmiToInteger32(rdx, rdx);
1812  __ JumpIfSmi(rax, &rax_is_smi);
1813
1814  __ bind(&rax_is_object);
1815  IntegerConvert(masm, rcx, rax);  // Uses rdi, rcx and rbx.
1816  __ jmp(&done);
1817
1818  __ bind(&rdx_is_object);
1819  IntegerConvert(masm, rdx, rdx);  // Uses rdi, rcx and rbx.
1820  __ JumpIfNotSmi(rax, &rax_is_object);
1821  __ bind(&rax_is_smi);
1822  __ SmiToInteger32(rcx, rax);
1823
1824  __ bind(&done);
1825  __ movl(rax, rdx);
1826}
1827
1828
1829// Input: rdx, rax are the left and right objects of a bit op.
1830// Output: rax, rcx are left and right integers for a bit op.
1831// Jump to conversion_failure: rdx and rax are unchanged.
1832void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
1833                                         Label* conversion_failure,
1834                                         Register heap_number_map) {
1835  // Check float operands.
1836  Label arg1_is_object, check_undefined_arg1;
1837  Label arg2_is_object, check_undefined_arg2;
1838  Label load_arg2, done;
1839
1840  __ JumpIfNotSmi(rdx, &arg1_is_object);
1841  __ SmiToInteger32(r8, rdx);
1842  __ jmp(&load_arg2);
1843
1844  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1845  __ bind(&check_undefined_arg1);
1846  __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1847  __ j(not_equal, conversion_failure);
1848  __ Set(r8, 0);
1849  __ jmp(&load_arg2);
1850
1851  __ bind(&arg1_is_object);
1852  __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
1853  __ j(not_equal, &check_undefined_arg1);
1854  // Get the untagged integer version of the rdx heap number in rcx.
1855  IntegerConvert(masm, r8, rdx);
1856
1857  // Here r8 has the untagged integer, rax has a Smi or a heap number.
1858  __ bind(&load_arg2);
1859  // Test if arg2 is a Smi.
1860  __ JumpIfNotSmi(rax, &arg2_is_object);
1861  __ SmiToInteger32(rcx, rax);
1862  __ jmp(&done);
1863
1864  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1865  __ bind(&check_undefined_arg2);
1866  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1867  __ j(not_equal, conversion_failure);
1868  __ Set(rcx, 0);
1869  __ jmp(&done);
1870
1871  __ bind(&arg2_is_object);
1872  __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
1873  __ j(not_equal, &check_undefined_arg2);
1874  // Get the untagged integer version of the rax heap number in rcx.
1875  IntegerConvert(masm, rcx, rax);
1876  __ bind(&done);
1877  __ movl(rax, r8);
1878}
1879
1880
1881void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
1882  __ SmiToInteger32(kScratchRegister, rdx);
1883  __ cvtlsi2sd(xmm0, kScratchRegister);
1884  __ SmiToInteger32(kScratchRegister, rax);
1885  __ cvtlsi2sd(xmm1, kScratchRegister);
1886}
1887
1888
1889void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
1890  Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
1891  // Load operand in rdx into xmm0.
1892  __ JumpIfSmi(rdx, &load_smi_rdx);
1893  __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1894  // Load operand in rax into xmm1.
1895  __ JumpIfSmi(rax, &load_smi_rax);
1896  __ bind(&load_nonsmi_rax);
1897  __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1898  __ jmp(&done);
1899
1900  __ bind(&load_smi_rdx);
1901  __ SmiToInteger32(kScratchRegister, rdx);
1902  __ cvtlsi2sd(xmm0, kScratchRegister);
1903  __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1904
1905  __ bind(&load_smi_rax);
1906  __ SmiToInteger32(kScratchRegister, rax);
1907  __ cvtlsi2sd(xmm1, kScratchRegister);
1908
1909  __ bind(&done);
1910}
1911
1912
1913void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
1914                                                  Label* not_numbers) {
1915  Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
1916  // Load operand in rdx into xmm0, or branch to not_numbers.
1917  __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
1918  __ JumpIfSmi(rdx, &load_smi_rdx);
1919  __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
1920  __ j(not_equal, not_numbers);  // Argument in rdx is not a number.
1921  __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1922  // Load operand in rax into xmm1, or branch to not_numbers.
1923  __ JumpIfSmi(rax, &load_smi_rax);
1924
1925  __ bind(&load_nonsmi_rax);
1926  __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
1927  __ j(not_equal, not_numbers);
1928  __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1929  __ jmp(&done);
1930
1931  __ bind(&load_smi_rdx);
1932  __ SmiToInteger32(kScratchRegister, rdx);
1933  __ cvtlsi2sd(xmm0, kScratchRegister);
1934  __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1935
1936  __ bind(&load_smi_rax);
1937  __ SmiToInteger32(kScratchRegister, rax);
1938  __ cvtlsi2sd(xmm1, kScratchRegister);
1939  __ bind(&done);
1940}
1941
1942
1943void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
1944                                        Register first,
1945                                        Register second,
1946                                        Register scratch1,
1947                                        Register scratch2,
1948                                        Register scratch3,
1949                                        Label* on_success,
1950                                        Label* on_not_smis)   {
1951  Register heap_number_map = scratch3;
1952  Register smi_result = scratch1;
1953  Label done;
1954
1955  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1956
1957  Label first_smi;
1958  __ JumpIfSmi(first, &first_smi, Label::kNear);
1959  __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
1960  __ j(not_equal, on_not_smis);
1961  // Convert HeapNumber to smi if possible.
1962  __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
1963  __ movq(scratch2, xmm0);
1964  __ cvttsd2siq(smi_result, xmm0);
1965  // Check if conversion was successful by converting back and
1966  // comparing to the original double's bits.
1967  __ cvtlsi2sd(xmm1, smi_result);
1968  __ movq(kScratchRegister, xmm1);
1969  __ cmpq(scratch2, kScratchRegister);
1970  __ j(not_equal, on_not_smis);
1971  __ Integer32ToSmi(first, smi_result);
1972
1973  __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
1974  __ bind(&first_smi);
1975  if (FLAG_debug_code) {
1976    // Second should be non-smi if we get here.
1977    __ AbortIfSmi(second);
1978  }
1979  __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
1980  __ j(not_equal, on_not_smis);
1981  // Convert second to smi, if possible.
1982  __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
1983  __ movq(scratch2, xmm0);
1984  __ cvttsd2siq(smi_result, xmm0);
1985  __ cvtlsi2sd(xmm1, smi_result);
1986  __ movq(kScratchRegister, xmm1);
1987  __ cmpq(scratch2, kScratchRegister);
1988  __ j(not_equal, on_not_smis);
1989  __ Integer32ToSmi(second, smi_result);
1990  if (on_success != NULL) {
1991    __ jmp(on_success);
1992  } else {
1993    __ bind(&done);
1994  }
1995}
1996
1997
1998void MathPowStub::Generate(MacroAssembler* masm) {
1999  // Choose register conforming to calling convention (when bailing out).
2000#ifdef _WIN64
2001  const Register exponent = rdx;
2002#else
2003  const Register exponent = rdi;
2004#endif
2005  const Register base = rax;
2006  const Register scratch = rcx;
2007  const XMMRegister double_result = xmm3;
2008  const XMMRegister double_base = xmm2;
2009  const XMMRegister double_exponent = xmm1;
2010  const XMMRegister double_scratch = xmm4;
2011
2012  Label call_runtime, done, exponent_not_smi, int_exponent;
2013
2014  // Save 1 in double_result - we need this several times later on.
2015  __ movq(scratch, Immediate(1));
2016  __ cvtlsi2sd(double_result, scratch);
2017
2018  if (exponent_type_ == ON_STACK) {
2019    Label base_is_smi, unpack_exponent;
2020    // The exponent and base are supplied as arguments on the stack.
2021    // This can only happen if the stub is called from non-optimized code.
2022    // Load input parameters from stack.
2023    __ movq(base, Operand(rsp, 2 * kPointerSize));
2024    __ movq(exponent, Operand(rsp, 1 * kPointerSize));
2025    __ JumpIfSmi(base, &base_is_smi, Label::kNear);
2026    __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
2027                   Heap::kHeapNumberMapRootIndex);
2028    __ j(not_equal, &call_runtime);
2029
2030    __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
2031    __ jmp(&unpack_exponent, Label::kNear);
2032
2033    __ bind(&base_is_smi);
2034    __ SmiToInteger32(base, base);
2035    __ cvtlsi2sd(double_base, base);
2036    __ bind(&unpack_exponent);
2037
2038    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
2039    __ SmiToInteger32(exponent, exponent);
2040    __ jmp(&int_exponent);
2041
2042    __ bind(&exponent_not_smi);
2043    __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
2044                   Heap::kHeapNumberMapRootIndex);
2045    __ j(not_equal, &call_runtime);
2046    __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
2047  } else if (exponent_type_ == TAGGED) {
2048    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
2049    __ SmiToInteger32(exponent, exponent);
2050    __ jmp(&int_exponent);
2051
2052    __ bind(&exponent_not_smi);
2053    __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
2054  }
2055
2056  if (exponent_type_ != INTEGER) {
2057    Label fast_power;
2058    // Detect integer exponents stored as double.
2059    __ cvttsd2si(exponent, double_exponent);
2060    // Skip to runtime if possibly NaN (indicated by the indefinite integer).
2061    __ cmpl(exponent, Immediate(0x80000000u));
2062    __ j(equal, &call_runtime);
2063    __ cvtlsi2sd(double_scratch, exponent);
2064    // Already ruled out NaNs for exponent.
2065    __ ucomisd(double_exponent, double_scratch);
2066    __ j(equal, &int_exponent);
2067
2068    if (exponent_type_ == ON_STACK) {
2069      // Detect square root case.  Crankshaft detects constant +/-0.5 at
2070      // compile time and uses DoMathPowHalf instead.  We then skip this check
2071      // for non-constant cases of +/-0.5 as these hardly occur.
2072      Label continue_sqrt, continue_rsqrt, not_plus_half;
2073      // Test for 0.5.
2074      // Load double_scratch with 0.5.
2075      __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE);
2076      __ movq(double_scratch, scratch);
2077      // Already ruled out NaNs for exponent.
2078      __ ucomisd(double_scratch, double_exponent);
2079      __ j(not_equal, &not_plus_half, Label::kNear);
2080
2081      // Calculates square root of base.  Check for the special case of
2082      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
2083      // According to IEEE-754, double-precision -Infinity has the highest
2084      // 12 bits set and the lowest 52 bits cleared.
2085      __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
2086      __ movq(double_scratch, scratch);
2087      __ ucomisd(double_scratch, double_base);
2088      // Comparing -Infinity with NaN results in "unordered", which sets the
2089      // zero flag as if both were equal.  However, it also sets the carry flag.
2090      __ j(not_equal, &continue_sqrt, Label::kNear);
2091      __ j(carry, &continue_sqrt, Label::kNear);
2092
2093      // Set result to Infinity in the special case.
2094      __ xorps(double_result, double_result);
2095      __ subsd(double_result, double_scratch);
2096      __ jmp(&done);
2097
2098      __ bind(&continue_sqrt);
2099      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
2100      __ xorps(double_scratch, double_scratch);
2101      __ addsd(double_scratch, double_base);  // Convert -0 to 0.
2102      __ sqrtsd(double_result, double_scratch);
2103      __ jmp(&done);
2104
2105      // Test for -0.5.
2106      __ bind(&not_plus_half);
2107      // Load double_scratch with -0.5 by substracting 1.
2108      __ subsd(double_scratch, double_result);
2109      // Already ruled out NaNs for exponent.
2110      __ ucomisd(double_scratch, double_exponent);
2111      __ j(not_equal, &fast_power, Label::kNear);
2112
2113      // Calculates reciprocal of square root of base.  Check for the special
2114      // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
2115      // According to IEEE-754, double-precision -Infinity has the highest
2116      // 12 bits set and the lowest 52 bits cleared.
2117      __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
2118      __ movq(double_scratch, scratch);
2119      __ ucomisd(double_scratch, double_base);
2120      // Comparing -Infinity with NaN results in "unordered", which sets the
2121      // zero flag as if both were equal.  However, it also sets the carry flag.
2122      __ j(not_equal, &continue_rsqrt, Label::kNear);
2123      __ j(carry, &continue_rsqrt, Label::kNear);
2124
2125      // Set result to 0 in the special case.
2126      __ xorps(double_result, double_result);
2127      __ jmp(&done);
2128
2129      __ bind(&continue_rsqrt);
2130      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
2131      __ xorps(double_exponent, double_exponent);
2132      __ addsd(double_exponent, double_base);  // Convert -0 to +0.
2133      __ sqrtsd(double_exponent, double_exponent);
2134      __ divsd(double_result, double_exponent);
2135      __ jmp(&done);
2136    }
2137
2138    // Using FPU instructions to calculate power.
2139    Label fast_power_failed;
2140    __ bind(&fast_power);
2141    __ fnclex();  // Clear flags to catch exceptions later.
2142    // Transfer (B)ase and (E)xponent onto the FPU register stack.
2143    __ subq(rsp, Immediate(kDoubleSize));
2144    __ movsd(Operand(rsp, 0), double_exponent);
2145    __ fld_d(Operand(rsp, 0));  // E
2146    __ movsd(Operand(rsp, 0), double_base);
2147    __ fld_d(Operand(rsp, 0));  // B, E
2148
2149    // Exponent is in st(1) and base is in st(0)
2150    // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
2151    // FYL2X calculates st(1) * log2(st(0))
2152    __ fyl2x();    // X
2153    __ fld(0);     // X, X
2154    __ frndint();  // rnd(X), X
2155    __ fsub(1);    // rnd(X), X-rnd(X)
2156    __ fxch(1);    // X - rnd(X), rnd(X)
2157    // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
2158    __ f2xm1();    // 2^(X-rnd(X)) - 1, rnd(X)
2159    __ fld1();     // 1, 2^(X-rnd(X)) - 1, rnd(X)
2160    __ faddp(1);   // 1, 2^(X-rnd(X)), rnd(X)
2161    // FSCALE calculates st(0) * 2^st(1)
2162    __ fscale();   // 2^X, rnd(X)
2163    __ fstp(1);
2164    // Bail out to runtime in case of exceptions in the status word.
2165    __ fnstsw_ax();
2166    __ testb(rax, Immediate(0x5F));  // Check for all but precision exception.
2167    __ j(not_zero, &fast_power_failed, Label::kNear);
2168    __ fstp_d(Operand(rsp, 0));
2169    __ movsd(double_result, Operand(rsp, 0));
2170    __ addq(rsp, Immediate(kDoubleSize));
2171    __ jmp(&done);
2172
2173    __ bind(&fast_power_failed);
2174    __ fninit();
2175    __ addq(rsp, Immediate(kDoubleSize));
2176    __ jmp(&call_runtime);
2177  }
2178
2179  // Calculate power with integer exponent.
2180  __ bind(&int_exponent);
2181  const XMMRegister double_scratch2 = double_exponent;
2182  // Back up exponent as we need to check if exponent is negative later.
2183  __ movq(scratch, exponent);  // Back up exponent.
2184  __ movsd(double_scratch, double_base);  // Back up base.
2185  __ movsd(double_scratch2, double_result);  // Load double_exponent with 1.
2186
2187  // Get absolute value of exponent.
2188  Label no_neg, while_true, no_multiply;
2189  __ testl(scratch, scratch);
2190  __ j(positive, &no_neg, Label::kNear);
2191  __ negl(scratch);
2192  __ bind(&no_neg);
2193
2194  __ bind(&while_true);
2195  __ shrl(scratch, Immediate(1));
2196  __ j(not_carry, &no_multiply, Label::kNear);
2197  __ mulsd(double_result, double_scratch);
2198  __ bind(&no_multiply);
2199
2200  __ mulsd(double_scratch, double_scratch);
2201  __ j(not_zero, &while_true);
2202
2203  // If the exponent is negative, return 1/result.
2204  __ testl(exponent, exponent);
2205  __ j(greater, &done);
2206  __ divsd(double_scratch2, double_result);
2207  __ movsd(double_result, double_scratch2);
2208  // Test whether result is zero.  Bail out to check for subnormal result.
2209  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
2210  __ xorps(double_scratch2, double_scratch2);
2211  __ ucomisd(double_scratch2, double_result);
2212  // double_exponent aliased as double_scratch2 has already been overwritten
2213  // and may not have contained the exponent value in the first place when the
2214  // input was a smi.  We reset it with exponent value before bailing out.
2215  __ j(not_equal, &done);
2216  __ cvtlsi2sd(double_exponent, exponent);
2217
2218  // Returning or bailing out.
2219  Counters* counters = masm->isolate()->counters();
2220  if (exponent_type_ == ON_STACK) {
2221    // The arguments are still on the stack.
2222    __ bind(&call_runtime);
2223    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
2224
2225    // The stub is called from non-optimized code, which expects the result
2226    // as heap number in eax.
2227    __ bind(&done);
2228    __ AllocateHeapNumber(rax, rcx, &call_runtime);
2229    __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
2230    __ IncrementCounter(counters->math_pow(), 1);
2231    __ ret(2 * kPointerSize);
2232  } else {
2233    __ bind(&call_runtime);
2234    // Move base to the correct argument register.  Exponent is already in xmm1.
2235    __ movsd(xmm0, double_base);
2236    ASSERT(double_exponent.is(xmm1));
2237    {
2238      AllowExternalCallThatCantCauseGC scope(masm);
2239      __ PrepareCallCFunction(2);
2240      __ CallCFunction(
2241          ExternalReference::power_double_double_function(masm->isolate()), 2);
2242    }
2243    // Return value is in xmm0.
2244    __ movsd(double_result, xmm0);
2245    // Restore context register.
2246    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2247
2248    __ bind(&done);
2249    __ IncrementCounter(counters->math_pow(), 1);
2250    __ ret(0);
2251  }
2252}
2253
2254
2255void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2256  // The key is in rdx and the parameter count is in rax.
2257
2258  // The displacement is used for skipping the frame pointer on the
2259  // stack. It is the offset of the last parameter (if any) relative
2260  // to the frame pointer.
2261  static const int kDisplacement = 1 * kPointerSize;
2262
2263  // Check that the key is a smi.
2264  Label slow;
2265  __ JumpIfNotSmi(rdx, &slow);
2266
2267  // Check if the calling frame is an arguments adaptor frame.  We look at the
2268  // context offset, and if the frame is not a regular one, then we find a
2269  // Smi instead of the context.  We can't use SmiCompare here, because that
2270  // only works for comparing two smis.
2271  Label adaptor;
2272  __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2273  __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
2274         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2275  __ j(equal, &adaptor);
2276
2277  // Check index against formal parameters count limit passed in
2278  // through register rax. Use unsigned comparison to get negative
2279  // check for free.
2280  __ cmpq(rdx, rax);
2281  __ j(above_equal, &slow);
2282
2283  // Read the argument from the stack and return it.
2284  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
2285  __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
2286  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
2287  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
2288  __ Ret();
2289
2290  // Arguments adaptor case: Check index against actual arguments
2291  // limit found in the arguments adaptor frame. Use unsigned
2292  // comparison to get negative check for free.
2293  __ bind(&adaptor);
2294  __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2295  __ cmpq(rdx, rcx);
2296  __ j(above_equal, &slow);
2297
2298  // Read the argument from the stack and return it.
2299  index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
2300  __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
2301  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
2302  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
2303  __ Ret();
2304
2305  // Slow-case: Handle non-smi or out-of-bounds access to arguments
2306  // by calling the runtime system.
2307  __ bind(&slow);
2308  __ pop(rbx);  // Return address.
2309  __ push(rdx);
2310  __ push(rbx);
2311  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2312}
2313
2314
2315void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
2316  // Stack layout:
2317  //  rsp[0] : return address
2318  //  rsp[8] : number of parameters (tagged)
2319  //  rsp[16] : receiver displacement
2320  //  rsp[24] : function
2321  // Registers used over the whole function:
2322  //  rbx: the mapped parameter count (untagged)
2323  //  rax: the allocated object (tagged).
2324
2325  Factory* factory = masm->isolate()->factory();
2326
2327  __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize));
2328  // rbx = parameter count (untagged)
2329
2330  // Check if the calling frame is an arguments adaptor frame.
2331  Label runtime;
2332  Label adaptor_frame, try_allocate;
2333  __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2334  __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
2335  __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2336  __ j(equal, &adaptor_frame);
2337
2338  // No adaptor, parameter count = argument count.
2339  __ movq(rcx, rbx);
2340  __ jmp(&try_allocate, Label::kNear);
2341
2342  // We have an adaptor frame. Patch the parameters pointer.
2343  __ bind(&adaptor_frame);
2344  __ SmiToInteger64(rcx,
2345                    Operand(rdx,
2346                            ArgumentsAdaptorFrameConstants::kLengthOffset));
2347  __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
2348                      StandardFrameConstants::kCallerSPOffset));
2349  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
2350
2351  // rbx = parameter count (untagged)
2352  // rcx = argument count (untagged)
2353  // Compute the mapped parameter count = min(rbx, rcx) in rbx.
2354  __ cmpq(rbx, rcx);
2355  __ j(less_equal, &try_allocate, Label::kNear);
2356  __ movq(rbx, rcx);
2357
2358  __ bind(&try_allocate);
2359
2360  // Compute the sizes of backing store, parameter map, and arguments object.
2361  // 1. Parameter map, has 2 extra words containing context and backing store.
2362  const int kParameterMapHeaderSize =
2363      FixedArray::kHeaderSize + 2 * kPointerSize;
2364  Label no_parameter_map;
2365  __ xor_(r8, r8);
2366  __ testq(rbx, rbx);
2367  __ j(zero, &no_parameter_map, Label::kNear);
2368  __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
2369  __ bind(&no_parameter_map);
2370
2371  // 2. Backing store.
2372  __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
2373
2374  // 3. Arguments object.
2375  __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
2376
2377  // Do the allocation of all three objects in one go.
2378  __ AllocateInNewSpace(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
2379
2380  // rax = address of new object(s) (tagged)
2381  // rcx = argument count (untagged)
2382  // Get the arguments boilerplate from the current (global) context into rdi.
2383  Label has_mapped_parameters, copy;
2384  __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
2385  __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
2386  __ testq(rbx, rbx);
2387  __ j(not_zero, &has_mapped_parameters, Label::kNear);
2388
2389  const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
2390  __ movq(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
2391  __ jmp(&copy, Label::kNear);
2392
2393  const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
2394  __ bind(&has_mapped_parameters);
2395  __ movq(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
2396  __ bind(&copy);
2397
2398  // rax = address of new object (tagged)
2399  // rbx = mapped parameter count (untagged)
2400  // rcx = argument count (untagged)
2401  // rdi = address of boilerplate object (tagged)
2402  // Copy the JS object part.
2403  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2404    __ movq(rdx, FieldOperand(rdi, i));
2405    __ movq(FieldOperand(rax, i), rdx);
2406  }
2407
2408  // Set up the callee in-object property.
2409  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2410  __ movq(rdx, Operand(rsp, 3 * kPointerSize));
2411  __ movq(FieldOperand(rax, JSObject::kHeaderSize +
2412                       Heap::kArgumentsCalleeIndex * kPointerSize),
2413          rdx);
2414
2415  // Use the length (smi tagged) and set that as an in-object property too.
2416  // Note: rcx is tagged from here on.
2417  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2418  __ Integer32ToSmi(rcx, rcx);
2419  __ movq(FieldOperand(rax, JSObject::kHeaderSize +
2420                       Heap::kArgumentsLengthIndex * kPointerSize),
2421          rcx);
2422
2423  // Set up the elements pointer in the allocated arguments object.
2424  // If we allocated a parameter map, edi will point there, otherwise to the
2425  // backing store.
2426  __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
2427  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
2428
2429  // rax = address of new object (tagged)
2430  // rbx = mapped parameter count (untagged)
2431  // rcx = argument count (tagged)
2432  // rdi = address of parameter map or backing store (tagged)
2433
2434  // Initialize parameter map. If there are no mapped arguments, we're done.
2435  Label skip_parameter_map;
2436  __ testq(rbx, rbx);
2437  __ j(zero, &skip_parameter_map);
2438
2439  __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
2440  // rbx contains the untagged argument count. Add 2 and tag to write.
2441  __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
2442  __ Integer64PlusConstantToSmi(r9, rbx, 2);
2443  __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
2444  __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
2445  __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
2446  __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
2447
2448  // Copy the parameter slots and the holes in the arguments.
2449  // We need to fill in mapped_parameter_count slots. They index the context,
2450  // where parameters are stored in reverse order, at
2451  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2452  // The mapped parameter thus need to get indices
2453  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
2454  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2455  // We loop from right to left.
2456  Label parameters_loop, parameters_test;
2457
2458  // Load tagged parameter count into r9.
2459  __ Integer32ToSmi(r9, rbx);
2460  __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
2461  __ addq(r8, Operand(rsp, 1 * kPointerSize));
2462  __ subq(r8, r9);
2463  __ Move(r11, factory->the_hole_value());
2464  __ movq(rdx, rdi);
2465  __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
2466  // r9 = loop variable (tagged)
2467  // r8 = mapping index (tagged)
2468  // r11 = the hole value
2469  // rdx = address of parameter map (tagged)
2470  // rdi = address of backing store (tagged)
2471  __ jmp(&parameters_test, Label::kNear);
2472
2473  __ bind(&parameters_loop);
2474  __ SmiSubConstant(r9, r9, Smi::FromInt(1));
2475  __ SmiToInteger64(kScratchRegister, r9);
2476  __ movq(FieldOperand(rdx, kScratchRegister,
2477                       times_pointer_size,
2478                       kParameterMapHeaderSize),
2479          r8);
2480  __ movq(FieldOperand(rdi, kScratchRegister,
2481                       times_pointer_size,
2482                       FixedArray::kHeaderSize),
2483          r11);
2484  __ SmiAddConstant(r8, r8, Smi::FromInt(1));
2485  __ bind(&parameters_test);
2486  __ SmiTest(r9);
2487  __ j(not_zero, &parameters_loop, Label::kNear);
2488
2489  __ bind(&skip_parameter_map);
2490
2491  // rcx = argument count (tagged)
2492  // rdi = address of backing store (tagged)
2493  // Copy arguments header and remaining slots (if there are any).
2494  __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
2495          factory->fixed_array_map());
2496  __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
2497
2498  Label arguments_loop, arguments_test;
2499  __ movq(r8, rbx);
2500  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
2501  // Untag rcx for the loop below.
2502  __ SmiToInteger64(rcx, rcx);
2503  __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
2504  __ subq(rdx, kScratchRegister);
2505  __ jmp(&arguments_test, Label::kNear);
2506
2507  __ bind(&arguments_loop);
2508  __ subq(rdx, Immediate(kPointerSize));
2509  __ movq(r9, Operand(rdx, 0));
2510  __ movq(FieldOperand(rdi, r8,
2511                       times_pointer_size,
2512                       FixedArray::kHeaderSize),
2513          r9);
2514  __ addq(r8, Immediate(1));
2515
2516  __ bind(&arguments_test);
2517  __ cmpq(r8, rcx);
2518  __ j(less, &arguments_loop, Label::kNear);
2519
2520  // Return and remove the on-stack parameters.
2521  __ ret(3 * kPointerSize);
2522
2523  // Do the runtime call to allocate the arguments object.
2524  // rcx = argument count (untagged)
2525  __ bind(&runtime);
2526  __ Integer32ToSmi(rcx, rcx);
2527  __ movq(Operand(rsp, 1 * kPointerSize), rcx);  // Patch argument count.
2528  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
2529}
2530
2531
2532void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
2533  // esp[0] : return address
2534  // esp[8] : number of parameters
2535  // esp[16] : receiver displacement
2536  // esp[24] : function
2537
2538  // Check if the calling frame is an arguments adaptor frame.
2539  Label runtime;
2540  __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2541  __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
2542  __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2543  __ j(not_equal, &runtime);
2544
2545  // Patch the arguments.length and the parameters pointer.
2546  __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2547  __ movq(Operand(rsp, 1 * kPointerSize), rcx);
2548  __ SmiToInteger64(rcx, rcx);
2549  __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
2550              StandardFrameConstants::kCallerSPOffset));
2551  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
2552
2553  __ bind(&runtime);
2554  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2555}
2556
2557
2558void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2559  // rsp[0] : return address
2560  // rsp[8] : number of parameters
2561  // rsp[16] : receiver displacement
2562  // rsp[24] : function
2563
2564  // Check if the calling frame is an arguments adaptor frame.
2565  Label adaptor_frame, try_allocate, runtime;
2566  __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2567  __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
2568  __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2569  __ j(equal, &adaptor_frame);
2570
2571  // Get the length from the frame.
2572  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
2573  __ SmiToInteger64(rcx, rcx);
2574  __ jmp(&try_allocate);
2575
2576  // Patch the arguments.length and the parameters pointer.
2577  __ bind(&adaptor_frame);
2578  __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2579  __ movq(Operand(rsp, 1 * kPointerSize), rcx);
2580  __ SmiToInteger64(rcx, rcx);
2581  __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
2582                      StandardFrameConstants::kCallerSPOffset));
2583  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
2584
2585  // Try the new space allocation. Start out with computing the size of
2586  // the arguments object and the elements array.
2587  Label add_arguments_object;
2588  __ bind(&try_allocate);
2589  __ testq(rcx, rcx);
2590  __ j(zero, &add_arguments_object, Label::kNear);
2591  __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
2592  __ bind(&add_arguments_object);
2593  __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
2594
2595  // Do the allocation of both objects in one go.
2596  __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
2597
2598  // Get the arguments boilerplate from the current (global) context.
2599  __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
2600  __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
2601  const int offset =
2602      Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
2603  __ movq(rdi, Operand(rdi, offset));
2604
2605  // Copy the JS object part.
2606  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2607    __ movq(rbx, FieldOperand(rdi, i));
2608    __ movq(FieldOperand(rax, i), rbx);
2609  }
2610
2611  // Get the length (smi tagged) and set that as an in-object property too.
2612  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2613  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
2614  __ movq(FieldOperand(rax, JSObject::kHeaderSize +
2615                       Heap::kArgumentsLengthIndex * kPointerSize),
2616          rcx);
2617
2618  // If there are no actual arguments, we're done.
2619  Label done;
2620  __ testq(rcx, rcx);
2621  __ j(zero, &done);
2622
2623  // Get the parameters pointer from the stack.
2624  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
2625
2626  // Set up the elements pointer in the allocated arguments object and
2627  // initialize the header in the elements fixed array.
2628  __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
2629  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
2630  __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
2631  __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
2632
2633
2634  __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
2635  // Untag the length for the loop below.
2636  __ SmiToInteger64(rcx, rcx);
2637
2638  // Copy the fixed array slots.
2639  Label loop;
2640  __ bind(&loop);
2641  __ movq(rbx, Operand(rdx, -1 * kPointerSize));  // Skip receiver.
2642  __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
2643  __ addq(rdi, Immediate(kPointerSize));
2644  __ subq(rdx, Immediate(kPointerSize));
2645  __ decq(rcx);
2646  __ j(not_zero, &loop);
2647
2648  // Return and remove the on-stack parameters.
2649  __ bind(&done);
2650  __ ret(3 * kPointerSize);
2651
2652  // Do the runtime call to allocate the arguments object.
2653  __ bind(&runtime);
2654  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
2655}
2656
2657
2658void RegExpExecStub::Generate(MacroAssembler* masm) {
2659  // Just jump directly to runtime if native RegExp is not selected at compile
2660  // time or if regexp entry in generated code is turned off runtime switch or
2661  // at compilation.
2662#ifdef V8_INTERPRETED_REGEXP
2663  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2664#else  // V8_INTERPRETED_REGEXP
2665
2666  // Stack frame on entry.
2667  //  rsp[0]: return address
2668  //  rsp[8]: last_match_info (expected JSArray)
2669  //  rsp[16]: previous index
2670  //  rsp[24]: subject string
2671  //  rsp[32]: JSRegExp object
2672
2673  static const int kLastMatchInfoOffset = 1 * kPointerSize;
2674  static const int kPreviousIndexOffset = 2 * kPointerSize;
2675  static const int kSubjectOffset = 3 * kPointerSize;
2676  static const int kJSRegExpOffset = 4 * kPointerSize;
2677
2678  Label runtime;
2679  // Ensure that a RegExp stack is allocated.
2680  Isolate* isolate = masm->isolate();
2681  ExternalReference address_of_regexp_stack_memory_address =
2682      ExternalReference::address_of_regexp_stack_memory_address(isolate);
2683  ExternalReference address_of_regexp_stack_memory_size =
2684      ExternalReference::address_of_regexp_stack_memory_size(isolate);
2685  __ Load(kScratchRegister, address_of_regexp_stack_memory_size);
2686  __ testq(kScratchRegister, kScratchRegister);
2687  __ j(zero, &runtime);
2688
2689  // Check that the first argument is a JSRegExp object.
2690  __ movq(rax, Operand(rsp, kJSRegExpOffset));
2691  __ JumpIfSmi(rax, &runtime);
2692  __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
2693  __ j(not_equal, &runtime);
2694  // Check that the RegExp has been compiled (data contains a fixed array).
2695  __ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset));
2696  if (FLAG_debug_code) {
2697    Condition is_smi = masm->CheckSmi(rax);
2698    __ Check(NegateCondition(is_smi),
2699        "Unexpected type for RegExp data, FixedArray expected");
2700    __ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
2701    __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
2702  }
2703
2704  // rax: RegExp data (FixedArray)
2705  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2706  __ SmiToInteger32(rbx, FieldOperand(rax, JSRegExp::kDataTagOffset));
2707  __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
2708  __ j(not_equal, &runtime);
2709
2710  // rax: RegExp data (FixedArray)
2711  // Check that the number of captures fit in the static offsets vector buffer.
2712  __ SmiToInteger32(rdx,
2713                    FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset));
2714  // Calculate number of capture registers (number_of_captures + 1) * 2.
2715  __ leal(rdx, Operand(rdx, rdx, times_1, 2));
2716  // Check that the static offsets vector buffer is large enough.
2717  __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
2718  __ j(above, &runtime);
2719
2720  // rax: RegExp data (FixedArray)
2721  // rdx: Number of capture registers
2722  // Check that the second argument is a string.
2723  __ movq(rdi, Operand(rsp, kSubjectOffset));
2724  __ JumpIfSmi(rdi, &runtime);
2725  Condition is_string = masm->IsObjectStringType(rdi, rbx, rbx);
2726  __ j(NegateCondition(is_string), &runtime);
2727
2728  // rdi: Subject string.
2729  // rax: RegExp data (FixedArray).
2730  // rdx: Number of capture registers.
2731  // Check that the third argument is a positive smi less than the string
2732  // length. A negative value will be greater (unsigned comparison).
2733  __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
2734  __ JumpIfNotSmi(rbx, &runtime);
2735  __ SmiCompare(rbx, FieldOperand(rdi, String::kLengthOffset));
2736  __ j(above_equal, &runtime);
2737
2738  // rax: RegExp data (FixedArray)
2739  // rdx: Number of capture registers
2740  // Check that the fourth object is a JSArray object.
2741  __ movq(rdi, Operand(rsp, kLastMatchInfoOffset));
2742  __ JumpIfSmi(rdi, &runtime);
2743  __ CmpObjectType(rdi, JS_ARRAY_TYPE, kScratchRegister);
2744  __ j(not_equal, &runtime);
2745  // Check that the JSArray is in fast case.
2746  __ movq(rbx, FieldOperand(rdi, JSArray::kElementsOffset));
2747  __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
2748  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
2749                 Heap::kFixedArrayMapRootIndex);
2750  __ j(not_equal, &runtime);
2751  // Check that the last match info has space for the capture registers and the
2752  // additional information. Ensure no overflow in add.
2753  STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
2754  __ SmiToInteger32(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
2755  __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
2756  __ cmpl(rdx, rdi);
2757  __ j(greater, &runtime);
2758
2759  // Reset offset for possibly sliced string.
2760  __ Set(r14, 0);
2761  // rax: RegExp data (FixedArray)
2762  // Check the representation and encoding of the subject string.
2763  Label seq_ascii_string, seq_two_byte_string, check_code;
2764  __ movq(rdi, Operand(rsp, kSubjectOffset));
2765  // Make a copy of the original subject string.
2766  __ movq(r15, rdi);
2767  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
2768  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
2769  // First check for flat two byte string.
2770  __ andb(rbx, Immediate(kIsNotStringMask |
2771                         kStringRepresentationMask |
2772                         kStringEncodingMask |
2773                         kShortExternalStringMask));
2774  STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
2775  __ j(zero, &seq_two_byte_string, Label::kNear);
2776  // Any other flat string must be a flat ASCII string.  None of the following
2777  // string type tests will succeed if subject is not a string or a short
2778  // external string.
2779  __ andb(rbx, Immediate(kIsNotStringMask |
2780                         kStringRepresentationMask |
2781                         kShortExternalStringMask));
2782  __ j(zero, &seq_ascii_string, Label::kNear);
2783
2784  // rbx: whether subject is a string and if yes, its string representation
2785  // Check for flat cons string or sliced string.
2786  // A flat cons string is a cons string where the second part is the empty
2787  // string. In that case the subject string is just the first part of the cons
2788  // string. Also in this case the first part of the cons string is known to be
2789  // a sequential string or an external string.
2790  // In the case of a sliced string its offset has to be taken into account.
2791  Label cons_string, external_string, check_encoding;
2792  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2793  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2794  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2795  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2796  __ cmpq(rbx, Immediate(kExternalStringTag));
2797  __ j(less, &cons_string, Label::kNear);
2798  __ j(equal, &external_string);
2799
2800  // Catch non-string subject or short external string.
2801  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2802  __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
2803  __ j(not_zero, &runtime);
2804
2805  // String is sliced.
2806  __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
2807  __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
2808  // r14: slice offset
2809  // r15: original subject string
2810  // rdi: parent string
2811  __ jmp(&check_encoding, Label::kNear);
2812  // String is a cons string, check whether it is flat.
2813  __ bind(&cons_string);
2814  __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
2815                 Heap::kEmptyStringRootIndex);
2816  __ j(not_equal, &runtime);
2817  __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
2818  // rdi: first part of cons string or parent of sliced string.
2819  // rbx: map of first part of cons string or map of parent of sliced string.
2820  // Is first part of cons or parent of slice a flat two byte string?
2821  __ bind(&check_encoding);
2822  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
2823  __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
2824           Immediate(kStringRepresentationMask | kStringEncodingMask));
2825  STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
2826  __ j(zero, &seq_two_byte_string, Label::kNear);
2827  // Any other flat string must be sequential ASCII or external.
2828  __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
2829           Immediate(kStringRepresentationMask));
2830  __ j(not_zero, &external_string);
2831
2832  __ bind(&seq_ascii_string);
2833  // rdi: subject string (sequential ASCII)
2834  // rax: RegExp data (FixedArray)
2835  __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
2836  __ Set(rcx, 1);  // Type is ASCII.
2837  __ jmp(&check_code, Label::kNear);
2838
2839  __ bind(&seq_two_byte_string);
2840  // rdi: subject string (flat two-byte)
2841  // rax: RegExp data (FixedArray)
2842  __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
2843  __ Set(rcx, 0);  // Type is two byte.
2844
2845  __ bind(&check_code);
2846  // Check that the irregexp code has been generated for the actual string
2847  // encoding. If it has, the field contains a code object otherwise it contains
2848  // smi (code flushing support)
2849  __ JumpIfSmi(r11, &runtime);
2850
2851  // rdi: subject string
2852  // rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
2853  // r11: code
2854  // Load used arguments before starting to push arguments for call to native
2855  // RegExp code to avoid handling changing stack height.
2856  __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
2857
2858  // rdi: subject string
2859  // rbx: previous index
2860  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
2861  // r11: code
2862  // All checks done. Now push arguments for native regexp code.
2863  Counters* counters = masm->isolate()->counters();
2864  __ IncrementCounter(counters->regexp_entry_native(), 1);
2865
2866  // Isolates: note we add an additional parameter here (isolate pointer).
2867  static const int kRegExpExecuteArguments = 8;
2868  int argument_slots_on_stack =
2869      masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
2870  __ EnterApiExitFrame(argument_slots_on_stack);
2871
2872  // Argument 8: Pass current isolate address.
2873  // __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
2874  //     Immediate(ExternalReference::isolate_address()));
2875  __ LoadAddress(kScratchRegister, ExternalReference::isolate_address());
2876  __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
2877          kScratchRegister);
2878
2879  // Argument 7: Indicate that this is a direct call from JavaScript.
2880  __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
2881          Immediate(1));
2882
2883  // Argument 6: Start (high end) of backtracking stack memory area.
2884  __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
2885  __ movq(r9, Operand(kScratchRegister, 0));
2886  __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
2887  __ addq(r9, Operand(kScratchRegister, 0));
2888  // Argument 6 passed in r9 on Linux and on the stack on Windows.
2889#ifdef _WIN64
2890  __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
2891#endif
2892
2893  // Argument 5: static offsets vector buffer.
2894  __ LoadAddress(r8,
2895                 ExternalReference::address_of_static_offsets_vector(isolate));
2896  // Argument 5 passed in r8 on Linux and on the stack on Windows.
2897#ifdef _WIN64
2898  __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), r8);
2899#endif
2900
2901  // First four arguments are passed in registers on both Linux and Windows.
2902#ifdef _WIN64
2903  Register arg4 = r9;
2904  Register arg3 = r8;
2905  Register arg2 = rdx;
2906  Register arg1 = rcx;
2907#else
2908  Register arg4 = rcx;
2909  Register arg3 = rdx;
2910  Register arg2 = rsi;
2911  Register arg1 = rdi;
2912#endif
2913
2914  // Keep track on aliasing between argX defined above and the registers used.
2915  // rdi: subject string
2916  // rbx: previous index
2917  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
2918  // r11: code
2919  // r14: slice offset
2920  // r15: original subject string
2921
2922  // Argument 2: Previous index.
2923  __ movq(arg2, rbx);
2924
2925  // Argument 4: End of string data
2926  // Argument 3: Start of string data
2927  Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
2928  // Prepare start and end index of the input.
2929  // Load the length from the original sliced string if that is the case.
2930  __ addq(rbx, r14);
2931  __ SmiToInteger32(arg3, FieldOperand(r15, String::kLengthOffset));
2932  __ addq(r14, arg3);  // Using arg3 as scratch.
2933
2934  // rbx: start index of the input
2935  // r14: end index of the input
2936  // r15: original subject string
2937  __ testb(rcx, rcx);  // Last use of rcx as encoding of subject string.
2938  __ j(zero, &setup_two_byte, Label::kNear);
2939  __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqAsciiString::kHeaderSize));
2940  __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
2941  __ jmp(&setup_rest, Label::kNear);
2942  __ bind(&setup_two_byte);
2943  __ lea(arg4, FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
2944  __ lea(arg3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
2945  __ bind(&setup_rest);
2946
2947  // Argument 1: Original subject string.
2948  // The original subject is in the previous stack frame. Therefore we have to
2949  // use rbp, which points exactly to one pointer size below the previous rsp.
2950  // (Because creating a new stack frame pushes the previous rbp onto the stack
2951  // and thereby moves up rsp by one kPointerSize.)
2952  __ movq(arg1, r15);
2953
2954  // Locate the code entry and call it.
2955  __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
2956  __ call(r11);
2957
2958  __ LeaveApiExitFrame();
2959
2960  // Check the result.
2961  Label success;
2962  Label exception;
2963  __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
2964  __ j(equal, &success, Label::kNear);
2965  __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
2966  __ j(equal, &exception);
2967  __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
2968  // If none of the above, it can only be retry.
2969  // Handle that in the runtime system.
2970  __ j(not_equal, &runtime);
2971
2972  // For failure return null.
2973  __ LoadRoot(rax, Heap::kNullValueRootIndex);
2974  __ ret(4 * kPointerSize);
2975
2976  // Load RegExp data.
2977  __ bind(&success);
2978  __ movq(rax, Operand(rsp, kJSRegExpOffset));
2979  __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
2980  __ SmiToInteger32(rax,
2981                    FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
2982  // Calculate number of capture registers (number_of_captures + 1) * 2.
2983  __ leal(rdx, Operand(rax, rax, times_1, 2));
2984
2985  // rdx: Number of capture registers
2986  // Load last_match_info which is still known to be a fast case JSArray.
2987  __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
2988  __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
2989
2990  // rbx: last_match_info backing store (FixedArray)
2991  // rdx: number of capture registers
2992  // Store the capture count.
2993  __ Integer32ToSmi(kScratchRegister, rdx);
2994  __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
2995          kScratchRegister);
2996  // Store last subject and last input.
2997  __ movq(rax, Operand(rsp, kSubjectOffset));
2998  __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
2999  __ RecordWriteField(rbx,
3000                      RegExpImpl::kLastSubjectOffset,
3001                      rax,
3002                      rdi,
3003                      kDontSaveFPRegs);
3004  __ movq(rax, Operand(rsp, kSubjectOffset));
3005  __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
3006  __ RecordWriteField(rbx,
3007                      RegExpImpl::kLastInputOffset,
3008                      rax,
3009                      rdi,
3010                      kDontSaveFPRegs);
3011
3012  // Get the static offsets vector filled by the native regexp code.
3013  __ LoadAddress(rcx,
3014                 ExternalReference::address_of_static_offsets_vector(isolate));
3015
3016  // rbx: last_match_info backing store (FixedArray)
3017  // rcx: offsets vector
3018  // rdx: number of capture registers
3019  Label next_capture, done;
3020  // Capture register counter starts from number of capture registers and
3021  // counts down until wraping after zero.
3022  __ bind(&next_capture);
3023  __ subq(rdx, Immediate(1));
3024  __ j(negative, &done, Label::kNear);
3025  // Read the value from the static offsets vector buffer and make it a smi.
3026  __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
3027  __ Integer32ToSmi(rdi, rdi);
3028  // Store the smi value in the last match info.
3029  __ movq(FieldOperand(rbx,
3030                       rdx,
3031                       times_pointer_size,
3032                       RegExpImpl::kFirstCaptureOffset),
3033          rdi);
3034  __ jmp(&next_capture);
3035  __ bind(&done);
3036
3037  // Return last match info.
3038  __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
3039  __ ret(4 * kPointerSize);
3040
3041  __ bind(&exception);
3042  // Result must now be exception. If there is no pending exception already a
3043  // stack overflow (on the backtrack stack) was detected in RegExp code but
3044  // haven't created the exception yet. Handle that in the runtime system.
3045  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
3046  ExternalReference pending_exception_address(
3047      Isolate::kPendingExceptionAddress, isolate);
3048  Operand pending_exception_operand =
3049      masm->ExternalOperand(pending_exception_address, rbx);
3050  __ movq(rax, pending_exception_operand);
3051  __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
3052  __ cmpq(rax, rdx);
3053  __ j(equal, &runtime);
3054  __ movq(pending_exception_operand, rdx);
3055
3056  __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
3057  Label termination_exception;
3058  __ j(equal, &termination_exception, Label::kNear);
3059  __ Throw(rax);
3060
3061  __ bind(&termination_exception);
3062  __ ThrowUncatchable(rax);
3063
3064  // External string.  Short external strings have already been ruled out.
3065  // rdi: subject string (expected to be external)
3066  // rbx: scratch
3067  __ bind(&external_string);
3068  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
3069  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
3070  if (FLAG_debug_code) {
3071    // Assert that we do not have a cons or slice (indirect strings) here.
3072    // Sequential strings have already been ruled out.
3073    __ testb(rbx, Immediate(kIsIndirectStringMask));
3074    __ Assert(zero, "external string expected, but not found");
3075  }
3076  __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
3077  // Move the pointer so that offset-wise, it looks like a sequential string.
3078  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
3079  __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3080  STATIC_ASSERT(kTwoByteStringTag == 0);
3081  __ testb(rbx, Immediate(kStringEncodingMask));
3082  __ j(not_zero, &seq_ascii_string);
3083  __ jmp(&seq_two_byte_string);
3084
3085  // Do the runtime call to execute the regexp.
3086  __ bind(&runtime);
3087  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3088#endif  // V8_INTERPRETED_REGEXP
3089}
3090
3091
3092void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
3093  const int kMaxInlineLength = 100;
3094  Label slowcase;
3095  Label done;
3096  __ movq(r8, Operand(rsp, kPointerSize * 3));
3097  __ JumpIfNotSmi(r8, &slowcase);
3098  __ SmiToInteger32(rbx, r8);
3099  __ cmpl(rbx, Immediate(kMaxInlineLength));
3100  __ j(above, &slowcase);
3101  // Smi-tagging is equivalent to multiplying by 2.
3102  STATIC_ASSERT(kSmiTag == 0);
3103  STATIC_ASSERT(kSmiTagSize == 1);
3104  // Allocate RegExpResult followed by FixedArray with size in rbx.
3105  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
3106  // Elements:  [Map][Length][..elements..]
3107  __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
3108                        times_pointer_size,
3109                        rbx,  // In: Number of elements.
3110                        rax,  // Out: Start of allocation (tagged).
3111                        rcx,  // Out: End of allocation.
3112                        rdx,  // Scratch register
3113                        &slowcase,
3114                        TAG_OBJECT);
3115  // rax: Start of allocated area, object-tagged.
3116  // rbx: Number of array elements as int32.
3117  // r8: Number of array elements as smi.
3118
3119  // Set JSArray map to global.regexp_result_map().
3120  __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
3121  __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
3122  __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
3123  __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
3124
3125  // Set empty properties FixedArray.
3126  __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
3127  __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
3128
3129  // Set elements to point to FixedArray allocated right after the JSArray.
3130  __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
3131  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
3132
3133  // Set input, index and length fields from arguments.
3134  __ movq(r8, Operand(rsp, kPointerSize * 1));
3135  __ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
3136  __ movq(r8, Operand(rsp, kPointerSize * 2));
3137  __ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
3138  __ movq(r8, Operand(rsp, kPointerSize * 3));
3139  __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
3140
3141  // Fill out the elements FixedArray.
3142  // rax: JSArray.
3143  // rcx: FixedArray.
3144  // rbx: Number of elements in array as int32.
3145
3146  // Set map.
3147  __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
3148  __ movq(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister);
3149  // Set length.
3150  __ Integer32ToSmi(rdx, rbx);
3151  __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
3152  // Fill contents of fixed-array with the-hole.
3153  __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
3154  __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
3155  // Fill fixed array elements with hole.
3156  // rax: JSArray.
3157  // rbx: Number of elements in array that remains to be filled, as int32.
3158  // rcx: Start of elements in FixedArray.
3159  // rdx: the hole.
3160  Label loop;
3161  __ testl(rbx, rbx);
3162  __ bind(&loop);
3163  __ j(less_equal, &done);  // Jump if rcx is negative or zero.
3164  __ subl(rbx, Immediate(1));
3165  __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
3166  __ jmp(&loop);
3167
3168  __ bind(&done);
3169  __ ret(3 * kPointerSize);
3170
3171  __ bind(&slowcase);
3172  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
3173}
3174
3175
3176void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
3177                                                         Register object,
3178                                                         Register result,
3179                                                         Register scratch1,
3180                                                         Register scratch2,
3181                                                         bool object_is_smi,
3182                                                         Label* not_found) {
3183  // Use of registers. Register result is used as a temporary.
3184  Register number_string_cache = result;
3185  Register mask = scratch1;
3186  Register scratch = scratch2;
3187
3188  // Load the number string cache.
3189  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
3190
3191  // Make the hash mask from the length of the number string cache. It
3192  // contains two elements (number and string) for each cache entry.
3193  __ SmiToInteger32(
3194      mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
3195  __ shrl(mask, Immediate(1));
3196  __ subq(mask, Immediate(1));  // Make mask.
3197
3198  // Calculate the entry in the number string cache. The hash value in the
3199  // number string cache for smis is just the smi value, and the hash for
3200  // doubles is the xor of the upper and lower words. See
3201  // Heap::GetNumberStringCache.
3202  Label is_smi;
3203  Label load_result_from_cache;
3204  Factory* factory = masm->isolate()->factory();
3205  if (!object_is_smi) {
3206    __ JumpIfSmi(object, &is_smi);
3207    __ CheckMap(object,
3208                factory->heap_number_map(),
3209                not_found,
3210                DONT_DO_SMI_CHECK);
3211
3212    STATIC_ASSERT(8 == kDoubleSize);
3213    __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
3214    __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
3215    GenerateConvertHashCodeToIndex(masm, scratch, mask);
3216
3217    Register index = scratch;
3218    Register probe = mask;
3219    __ movq(probe,
3220            FieldOperand(number_string_cache,
3221                         index,
3222                         times_1,
3223                         FixedArray::kHeaderSize));
3224    __ JumpIfSmi(probe, not_found);
3225    __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
3226    __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
3227    __ ucomisd(xmm0, xmm1);
3228    __ j(parity_even, not_found);  // Bail out if NaN is involved.
3229    __ j(not_equal, not_found);  // The cache did not contain this value.
3230    __ jmp(&load_result_from_cache);
3231  }
3232
3233  __ bind(&is_smi);
3234  __ SmiToInteger32(scratch, object);
3235  GenerateConvertHashCodeToIndex(masm, scratch, mask);
3236
3237  Register index = scratch;
3238  // Check if the entry is the smi we are looking for.
3239  __ cmpq(object,
3240          FieldOperand(number_string_cache,
3241                       index,
3242                       times_1,
3243                       FixedArray::kHeaderSize));
3244  __ j(not_equal, not_found);
3245
3246  // Get the result from the cache.
3247  __ bind(&load_result_from_cache);
3248  __ movq(result,
3249          FieldOperand(number_string_cache,
3250                       index,
3251                       times_1,
3252                       FixedArray::kHeaderSize + kPointerSize));
3253  Counters* counters = masm->isolate()->counters();
3254  __ IncrementCounter(counters->number_to_string_native(), 1);
3255}
3256
3257
3258void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
3259                                                        Register hash,
3260                                                        Register mask) {
3261  __ and_(hash, mask);
3262  // Each entry in string cache consists of two pointer sized fields,
3263  // but times_twice_pointer_size (multiplication by 16) scale factor
3264  // is not supported by addrmode on x64 platform.
3265  // So we have to premultiply entry index before lookup.
3266  __ shl(hash, Immediate(kPointerSizeLog2 + 1));
3267}
3268
3269
3270void NumberToStringStub::Generate(MacroAssembler* masm) {
3271  Label runtime;
3272
3273  __ movq(rbx, Operand(rsp, kPointerSize));
3274
3275  // Generate code to lookup number in the number string cache.
3276  GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
3277  __ ret(1 * kPointerSize);
3278
3279  __ bind(&runtime);
3280  // Handle number to string in the runtime system if not found in the cache.
3281  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
3282}
3283
3284
3285static int NegativeComparisonResult(Condition cc) {
3286  ASSERT(cc != equal);
3287  ASSERT((cc == less) || (cc == less_equal)
3288      || (cc == greater) || (cc == greater_equal));
3289  return (cc == greater || cc == greater_equal) ? LESS : GREATER;
3290}
3291
3292
3293void CompareStub::Generate(MacroAssembler* masm) {
3294  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
3295
3296  Label check_unequal_objects, done;
3297  Factory* factory = masm->isolate()->factory();
3298
3299  // Compare two smis if required.
3300  if (include_smi_compare_) {
3301    Label non_smi, smi_done;
3302    __ JumpIfNotBothSmi(rax, rdx, &non_smi);
3303    __ subq(rdx, rax);
3304    __ j(no_overflow, &smi_done);
3305    __ not_(rdx);  // Correct sign in case of overflow. rdx cannot be 0 here.
3306    __ bind(&smi_done);
3307    __ movq(rax, rdx);
3308    __ ret(0);
3309    __ bind(&non_smi);
3310  } else if (FLAG_debug_code) {
3311    Label ok;
3312    __ JumpIfNotSmi(rdx, &ok);
3313    __ JumpIfNotSmi(rax, &ok);
3314    __ Abort("CompareStub: smi operands");
3315    __ bind(&ok);
3316  }
3317
3318  // The compare stub returns a positive, negative, or zero 64-bit integer
3319  // value in rax, corresponding to result of comparing the two inputs.
3320  // NOTICE! This code is only reached after a smi-fast-case check, so
3321  // it is certain that at least one operand isn't a smi.
3322
3323  // Two identical objects are equal unless they are both NaN or undefined.
3324  {
3325    Label not_identical;
3326    __ cmpq(rax, rdx);
3327    __ j(not_equal, &not_identical, Label::kNear);
3328
3329    if (cc_ != equal) {
3330      // Check for undefined.  undefined OP undefined is false even though
3331      // undefined == undefined.
3332      Label check_for_nan;
3333      __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
3334      __ j(not_equal, &check_for_nan, Label::kNear);
3335      __ Set(rax, NegativeComparisonResult(cc_));
3336      __ ret(0);
3337      __ bind(&check_for_nan);
3338    }
3339
3340    // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
3341    // so we do the second best thing - test it ourselves.
3342    // Note: if cc_ != equal, never_nan_nan_ is not used.
3343    // We cannot set rax to EQUAL until just before return because
3344    // rax must be unchanged on jump to not_identical.
3345    if (never_nan_nan_ && (cc_ == equal)) {
3346      __ Set(rax, EQUAL);
3347      __ ret(0);
3348    } else {
3349      Label heap_number;
3350      // If it's not a heap number, then return equal for (in)equality operator.
3351      __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
3352             factory->heap_number_map());
3353      __ j(equal, &heap_number, Label::kNear);
3354      if (cc_ != equal) {
3355        // Call runtime on identical objects.  Otherwise return equal.
3356        __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
3357        __ j(above_equal, &not_identical, Label::kNear);
3358      }
3359      __ Set(rax, EQUAL);
3360      __ ret(0);
3361
3362      __ bind(&heap_number);
3363      // It is a heap number, so return  equal if it's not NaN.
3364      // For NaN, return 1 for every condition except greater and
3365      // greater-equal.  Return -1 for them, so the comparison yields
3366      // false for all conditions except not-equal.
3367      __ Set(rax, EQUAL);
3368      __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
3369      __ ucomisd(xmm0, xmm0);
3370      __ setcc(parity_even, rax);
3371      // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
3372      if (cc_ == greater_equal || cc_ == greater) {
3373        __ neg(rax);
3374      }
3375      __ ret(0);
3376    }
3377
3378    __ bind(&not_identical);
3379  }
3380
3381  if (cc_ == equal) {  // Both strict and non-strict.
3382    Label slow;  // Fallthrough label.
3383
3384    // If we're doing a strict equality comparison, we don't have to do
3385    // type conversion, so we generate code to do fast comparison for objects
3386    // and oddballs. Non-smi numbers and strings still go through the usual
3387    // slow-case code.
3388    if (strict_) {
3389      // If either is a Smi (we know that not both are), then they can only
3390      // be equal if the other is a HeapNumber. If so, use the slow case.
3391      {
3392        Label not_smis;
3393        __ SelectNonSmi(rbx, rax, rdx, &not_smis);
3394
3395        // Check if the non-smi operand is a heap number.
3396        __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
3397               factory->heap_number_map());
3398        // If heap number, handle it in the slow case.
3399        __ j(equal, &slow);
3400        // Return non-equal.  ebx (the lower half of rbx) is not zero.
3401        __ movq(rax, rbx);
3402        __ ret(0);
3403
3404        __ bind(&not_smis);
3405      }
3406
3407      // If either operand is a JSObject or an oddball value, then they are not
3408      // equal since their pointers are different
3409      // There is no test for undetectability in strict equality.
3410
3411      // If the first object is a JS object, we have done pointer comparison.
3412      STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
3413      Label first_non_object;
3414      __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
3415      __ j(below, &first_non_object, Label::kNear);
3416      // Return non-zero (eax (not rax) is not zero)
3417      Label return_not_equal;
3418      STATIC_ASSERT(kHeapObjectTag != 0);
3419      __ bind(&return_not_equal);
3420      __ ret(0);
3421
3422      __ bind(&first_non_object);
3423      // Check for oddballs: true, false, null, undefined.
3424      __ CmpInstanceType(rcx, ODDBALL_TYPE);
3425      __ j(equal, &return_not_equal);
3426
3427      __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
3428      __ j(above_equal, &return_not_equal);
3429
3430      // Check for oddballs: true, false, null, undefined.
3431      __ CmpInstanceType(rcx, ODDBALL_TYPE);
3432      __ j(equal, &return_not_equal);
3433
3434      // Fall through to the general case.
3435    }
3436    __ bind(&slow);
3437  }
3438
3439  // Generate the number comparison code.
3440  if (include_number_compare_) {
3441    Label non_number_comparison;
3442    Label unordered;
3443    FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
3444    __ xorl(rax, rax);
3445    __ xorl(rcx, rcx);
3446    __ ucomisd(xmm0, xmm1);
3447
3448    // Don't base result on EFLAGS when a NaN is involved.
3449    __ j(parity_even, &unordered, Label::kNear);
3450    // Return a result of -1, 0, or 1, based on EFLAGS.
3451    __ setcc(above, rax);
3452    __ setcc(below, rcx);
3453    __ subq(rax, rcx);
3454    __ ret(0);
3455
3456    // If one of the numbers was NaN, then the result is always false.
3457    // The cc is never not-equal.
3458    __ bind(&unordered);
3459    ASSERT(cc_ != not_equal);
3460    if (cc_ == less || cc_ == less_equal) {
3461      __ Set(rax, 1);
3462    } else {
3463      __ Set(rax, -1);
3464    }
3465    __ ret(0);
3466
3467    // The number comparison code did not provide a valid result.
3468    __ bind(&non_number_comparison);
3469  }
3470
3471  // Fast negative check for symbol-to-symbol equality.
3472  Label check_for_strings;
3473  if (cc_ == equal) {
3474    BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
3475    BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
3476
3477    // We've already checked for object identity, so if both operands
3478    // are symbols they aren't equal. Register eax (not rax) already holds a
3479    // non-zero value, which indicates not equal, so just return.
3480    __ ret(0);
3481  }
3482
3483  __ bind(&check_for_strings);
3484
3485  __ JumpIfNotBothSequentialAsciiStrings(
3486      rdx, rax, rcx, rbx, &check_unequal_objects);
3487
3488  // Inline comparison of ASCII strings.
3489  if (cc_ == equal) {
3490    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
3491                                                     rdx,
3492                                                     rax,
3493                                                     rcx,
3494                                                     rbx);
3495  } else {
3496    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
3497                                                       rdx,
3498                                                       rax,
3499                                                       rcx,
3500                                                       rbx,
3501                                                       rdi,
3502                                                       r8);
3503  }
3504
3505#ifdef DEBUG
3506  __ Abort("Unexpected fall-through from string comparison");
3507#endif
3508
3509  __ bind(&check_unequal_objects);
3510  if (cc_ == equal && !strict_) {
3511    // Not strict equality.  Objects are unequal if
3512    // they are both JSObjects and not undetectable,
3513    // and their pointers are different.
3514    Label not_both_objects, return_unequal;
3515    // At most one is a smi, so we can test for smi by adding the two.
3516    // A smi plus a heap object has the low bit set, a heap object plus
3517    // a heap object has the low bit clear.
3518    STATIC_ASSERT(kSmiTag == 0);
3519    STATIC_ASSERT(kSmiTagMask == 1);
3520    __ lea(rcx, Operand(rax, rdx, times_1, 0));
3521    __ testb(rcx, Immediate(kSmiTagMask));
3522    __ j(not_zero, &not_both_objects, Label::kNear);
3523    __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
3524    __ j(below, &not_both_objects, Label::kNear);
3525    __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
3526    __ j(below, &not_both_objects, Label::kNear);
3527    __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
3528             Immediate(1 << Map::kIsUndetectable));
3529    __ j(zero, &return_unequal, Label::kNear);
3530    __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
3531             Immediate(1 << Map::kIsUndetectable));
3532    __ j(zero, &return_unequal, Label::kNear);
3533    // The objects are both undetectable, so they both compare as the value
3534    // undefined, and are equal.
3535    __ Set(rax, EQUAL);
3536    __ bind(&return_unequal);
3537    // Return non-equal by returning the non-zero object pointer in rax,
3538    // or return equal if we fell through to here.
3539    __ ret(0);
3540    __ bind(&not_both_objects);
3541  }
3542
3543  // Push arguments below the return address to prepare jump to builtin.
3544  __ pop(rcx);
3545  __ push(rdx);
3546  __ push(rax);
3547
3548  // Figure out which native to call and setup the arguments.
3549  Builtins::JavaScript builtin;
3550  if (cc_ == equal) {
3551    builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
3552  } else {
3553    builtin = Builtins::COMPARE;
3554    __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
3555  }
3556
3557  // Restore return address on the stack.
3558  __ push(rcx);
3559
3560  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
3561  // tagged as a small integer.
3562  __ InvokeBuiltin(builtin, JUMP_FUNCTION);
3563}
3564
3565
3566void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
3567                                    Label* label,
3568                                    Register object,
3569                                    Register scratch) {
3570  __ JumpIfSmi(object, label);
3571  __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
3572  __ movzxbq(scratch,
3573             FieldOperand(scratch, Map::kInstanceTypeOffset));
3574  // Ensure that no non-strings have the symbol bit set.
3575  STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
3576  STATIC_ASSERT(kSymbolTag != 0);
3577  __ testb(scratch, Immediate(kIsSymbolMask));
3578  __ j(zero, label);
3579}
3580
3581
3582void StackCheckStub::Generate(MacroAssembler* masm) {
3583  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3584}
3585
3586
3587void InterruptStub::Generate(MacroAssembler* masm) {
3588  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3589}
3590
3591
3592static void GenerateRecordCallTarget(MacroAssembler* masm) {
3593  // Cache the called function in a global property cell.  Cache states
3594  // are uninitialized, monomorphic (indicated by a JSFunction), and
3595  // megamorphic.
3596  // rbx : cache cell for call target
3597  // rdi : the function to call
3598  Isolate* isolate = masm->isolate();
3599  Label initialize, done;
3600
3601  // Load the cache state into rcx.
3602  __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
3603
3604  // A monomorphic cache hit or an already megamorphic state: invoke the
3605  // function without changing the state.
3606  __ cmpq(rcx, rdi);
3607  __ j(equal, &done, Label::kNear);
3608  __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
3609  __ j(equal, &done, Label::kNear);
3610
3611  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
3612  // megamorphic.
3613  __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
3614  __ j(equal, &initialize, Label::kNear);
3615  // MegamorphicSentinel is an immortal immovable object (undefined) so no
3616  // write-barrier is needed.
3617  __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
3618          TypeFeedbackCells::MegamorphicSentinel(isolate));
3619  __ jmp(&done, Label::kNear);
3620
3621  // An uninitialized cache is patched with the function.
3622  __ bind(&initialize);
3623  __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi);
3624  // No need for a write barrier here - cells are rescanned.
3625
3626  __ bind(&done);
3627}
3628
3629
3630void CallFunctionStub::Generate(MacroAssembler* masm) {
3631  // rdi : the function to call
3632  // rbx : cache cell for call target
3633  Label slow, non_function;
3634
3635  // The receiver might implicitly be the global object. This is
3636  // indicated by passing the hole as the receiver to the call
3637  // function stub.
3638  if (ReceiverMightBeImplicit()) {
3639    Label call;
3640    // Get the receiver from the stack.
3641    // +1 ~ return address
3642    __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
3643    // Call as function is indicated with the hole.
3644    __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
3645    __ j(not_equal, &call, Label::kNear);
3646    // Patch the receiver on the stack with the global receiver object.
3647    __ movq(rbx, GlobalObjectOperand());
3648    __ movq(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
3649    __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rbx);
3650    __ bind(&call);
3651  }
3652
3653  // Check that the function really is a JavaScript function.
3654  __ JumpIfSmi(rdi, &non_function);
3655  // Goto slow case if we do not have a function.
3656  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
3657  __ j(not_equal, &slow);
3658
3659  // Fast-case: Just invoke the function.
3660  ParameterCount actual(argc_);
3661
3662  if (ReceiverMightBeImplicit()) {
3663    Label call_as_function;
3664    __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
3665    __ j(equal, &call_as_function);
3666    __ InvokeFunction(rdi,
3667                      actual,
3668                      JUMP_FUNCTION,
3669                      NullCallWrapper(),
3670                      CALL_AS_METHOD);
3671    __ bind(&call_as_function);
3672  }
3673  __ InvokeFunction(rdi,
3674                    actual,
3675                    JUMP_FUNCTION,
3676                    NullCallWrapper(),
3677                    CALL_AS_FUNCTION);
3678
3679  // Slow-case: Non-function called.
3680  __ bind(&slow);
3681  // Check for function proxy.
3682  __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
3683  __ j(not_equal, &non_function);
3684  __ pop(rcx);
3685  __ push(rdi);  // put proxy as additional argument under return address
3686  __ push(rcx);
3687  __ Set(rax, argc_ + 1);
3688  __ Set(rbx, 0);
3689  __ SetCallKind(rcx, CALL_AS_METHOD);
3690  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
3691  {
3692    Handle<Code> adaptor =
3693      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3694    __ jmp(adaptor, RelocInfo::CODE_TARGET);
3695  }
3696
3697  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3698  // of the original receiver from the call site).
3699  __ bind(&non_function);
3700  __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
3701  __ Set(rax, argc_);
3702  __ Set(rbx, 0);
3703  __ SetCallKind(rcx, CALL_AS_METHOD);
3704  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
3705  Handle<Code> adaptor =
3706      Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
3707  __ Jump(adaptor, RelocInfo::CODE_TARGET);
3708}
3709
3710
3711void CallConstructStub::Generate(MacroAssembler* masm) {
3712  // rax : number of arguments
3713  // rbx : cache cell for call target
3714  // rdi : constructor function
3715  Label slow, non_function_call;
3716
3717  // Check that function is not a smi.
3718  __ JumpIfSmi(rdi, &non_function_call);
3719  // Check that function is a JSFunction.
3720  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
3721  __ j(not_equal, &slow);
3722
3723  if (RecordCallTarget()) {
3724    GenerateRecordCallTarget(masm);
3725  }
3726
3727  // Jump to the function-specific construct stub.
3728  __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
3729  __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
3730  __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
3731  __ jmp(rbx);
3732
3733  // rdi: called object
3734  // rax: number of arguments
3735  // rcx: object map
3736  Label do_call;
3737  __ bind(&slow);
3738  __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
3739  __ j(not_equal, &non_function_call);
3740  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3741  __ jmp(&do_call);
3742
3743  __ bind(&non_function_call);
3744  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3745  __ bind(&do_call);
3746  // Set expected number of arguments to zero (not changing rax).
3747  __ Set(rbx, 0);
3748  __ SetCallKind(rcx, CALL_AS_METHOD);
3749  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3750          RelocInfo::CODE_TARGET);
3751}
3752
3753
3754bool CEntryStub::NeedsImmovableCode() {
3755  return false;
3756}
3757
3758
3759bool CEntryStub::IsPregenerated() {
3760#ifdef _WIN64
3761  return result_size_ == 1;
3762#else
3763  return true;
3764#endif
3765}
3766
3767
3768void CodeStub::GenerateStubsAheadOfTime() {
3769  CEntryStub::GenerateAheadOfTime();
3770  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
3771  // It is important that the store buffer overflow stubs are generated first.
3772  RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
3773}
3774
3775
3776void CodeStub::GenerateFPStubs() {
3777}
3778
3779
3780void CEntryStub::GenerateAheadOfTime() {
3781  CEntryStub stub(1, kDontSaveFPRegs);
3782  stub.GetCode()->set_is_pregenerated(true);
3783  CEntryStub save_doubles(1, kSaveFPRegs);
3784  save_doubles.GetCode()->set_is_pregenerated(true);
3785}
3786
3787
3788void CEntryStub::GenerateCore(MacroAssembler* masm,
3789                              Label* throw_normal_exception,
3790                              Label* throw_termination_exception,
3791                              Label* throw_out_of_memory_exception,
3792                              bool do_gc,
3793                              bool always_allocate_scope) {
3794  // rax: result parameter for PerformGC, if any.
3795  // rbx: pointer to C function  (C callee-saved).
3796  // rbp: frame pointer  (restored after C call).
3797  // rsp: stack pointer  (restored after C call).
3798  // r14: number of arguments including receiver (C callee-saved).
3799  // r15: pointer to the first argument (C callee-saved).
3800  //      This pointer is reused in LeaveExitFrame(), so it is stored in a
3801  //      callee-saved register.
3802
3803  // Simple results returned in rax (both AMD64 and Win64 calling conventions).
3804  // Complex results must be written to address passed as first argument.
3805  // AMD64 calling convention: a struct of two pointers in rax+rdx
3806
3807  // Check stack alignment.
3808  if (FLAG_debug_code) {
3809    __ CheckStackAlignment();
3810  }
3811
3812  if (do_gc) {
3813    // Pass failure code returned from last attempt as first argument to
3814    // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
3815    // stack is known to be aligned. This function takes one argument which is
3816    // passed in register.
3817#ifdef _WIN64
3818    __ movq(rcx, rax);
3819#else  // _WIN64
3820    __ movq(rdi, rax);
3821#endif
3822    __ movq(kScratchRegister,
3823            FUNCTION_ADDR(Runtime::PerformGC),
3824            RelocInfo::RUNTIME_ENTRY);
3825    __ call(kScratchRegister);
3826  }
3827
3828  ExternalReference scope_depth =
3829      ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
3830  if (always_allocate_scope) {
3831    Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
3832    __ incl(scope_depth_operand);
3833  }
3834
3835  // Call C function.
3836#ifdef _WIN64
3837  // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
3838  // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
3839  __ movq(StackSpaceOperand(0), r14);  // argc.
3840  __ movq(StackSpaceOperand(1), r15);  // argv.
3841  if (result_size_ < 2) {
3842    // Pass a pointer to the Arguments object as the first argument.
3843    // Return result in single register (rax).
3844    __ lea(rcx, StackSpaceOperand(0));
3845    __ LoadAddress(rdx, ExternalReference::isolate_address());
3846  } else {
3847    ASSERT_EQ(2, result_size_);
3848    // Pass a pointer to the result location as the first argument.
3849    __ lea(rcx, StackSpaceOperand(2));
3850    // Pass a pointer to the Arguments object as the second argument.
3851    __ lea(rdx, StackSpaceOperand(0));
3852    __ LoadAddress(r8, ExternalReference::isolate_address());
3853  }
3854
3855#else  // _WIN64
3856  // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
3857  __ movq(rdi, r14);  // argc.
3858  __ movq(rsi, r15);  // argv.
3859  __ movq(rdx, ExternalReference::isolate_address());
3860#endif
3861  __ call(rbx);
3862  // Result is in rax - do not destroy this register!
3863
3864  if (always_allocate_scope) {
3865    Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
3866    __ decl(scope_depth_operand);
3867  }
3868
3869  // Check for failure result.
3870  Label failure_returned;
3871  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3872#ifdef _WIN64
3873  // If return value is on the stack, pop it to registers.
3874  if (result_size_ > 1) {
3875    ASSERT_EQ(2, result_size_);
3876    // Read result values stored on stack. Result is stored
3877    // above the four argument mirror slots and the two
3878    // Arguments object slots.
3879    __ movq(rax, Operand(rsp, 6 * kPointerSize));
3880    __ movq(rdx, Operand(rsp, 7 * kPointerSize));
3881  }
3882#endif
3883  __ lea(rcx, Operand(rax, 1));
3884  // Lower 2 bits of rcx are 0 iff rax has failure tag.
3885  __ testl(rcx, Immediate(kFailureTagMask));
3886  __ j(zero, &failure_returned);
3887
3888  // Exit the JavaScript to C++ exit frame.
3889  __ LeaveExitFrame(save_doubles_);
3890  __ ret(0);
3891
3892  // Handling of failure.
3893  __ bind(&failure_returned);
3894
3895  Label retry;
3896  // If the returned exception is RETRY_AFTER_GC continue at retry label
3897  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3898  __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
3899  __ j(zero, &retry, Label::kNear);
3900
3901  // Special handling of out of memory exceptions.
3902  __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
3903  __ cmpq(rax, kScratchRegister);
3904  __ j(equal, throw_out_of_memory_exception);
3905
3906  // Retrieve the pending exception and clear the variable.
3907  ExternalReference pending_exception_address(
3908      Isolate::kPendingExceptionAddress, masm->isolate());
3909  Operand pending_exception_operand =
3910      masm->ExternalOperand(pending_exception_address);
3911  __ movq(rax, pending_exception_operand);
3912  __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
3913  __ movq(pending_exception_operand, rdx);
3914
3915  // Special handling of termination exceptions which are uncatchable
3916  // by javascript code.
3917  __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
3918  __ j(equal, throw_termination_exception);
3919
3920  // Handle normal exception.
3921  __ jmp(throw_normal_exception);
3922
3923  // Retry.
3924  __ bind(&retry);
3925}
3926
3927
3928void CEntryStub::Generate(MacroAssembler* masm) {
3929  // rax: number of arguments including receiver
3930  // rbx: pointer to C function  (C callee-saved)
3931  // rbp: frame pointer of calling JS frame (restored after C call)
3932  // rsp: stack pointer  (restored after C call)
3933  // rsi: current context (restored)
3934
3935  // NOTE: Invocations of builtins may return failure objects
3936  // instead of a proper result. The builtin entry handles
3937  // this by performing a garbage collection and retrying the
3938  // builtin once.
3939
3940  // Enter the exit frame that transitions from JavaScript to C++.
3941#ifdef _WIN64
3942  int arg_stack_space = (result_size_ < 2 ? 2 : 4);
3943#else
3944  int arg_stack_space = 0;
3945#endif
3946  __ EnterExitFrame(arg_stack_space, save_doubles_);
3947
3948  // rax: Holds the context at this point, but should not be used.
3949  //      On entry to code generated by GenerateCore, it must hold
3950  //      a failure result if the collect_garbage argument to GenerateCore
3951  //      is true.  This failure result can be the result of code
3952  //      generated by a previous call to GenerateCore.  The value
3953  //      of rax is then passed to Runtime::PerformGC.
3954  // rbx: pointer to builtin function  (C callee-saved).
3955  // rbp: frame pointer of exit frame  (restored after C call).
3956  // rsp: stack pointer (restored after C call).
3957  // r14: number of arguments including receiver (C callee-saved).
3958  // r15: argv pointer (C callee-saved).
3959
3960  Label throw_normal_exception;
3961  Label throw_termination_exception;
3962  Label throw_out_of_memory_exception;
3963
3964  // Call into the runtime system.
3965  GenerateCore(masm,
3966               &throw_normal_exception,
3967               &throw_termination_exception,
3968               &throw_out_of_memory_exception,
3969               false,
3970               false);
3971
3972  // Do space-specific GC and retry runtime call.
3973  GenerateCore(masm,
3974               &throw_normal_exception,
3975               &throw_termination_exception,
3976               &throw_out_of_memory_exception,
3977               true,
3978               false);
3979
3980  // Do full GC and retry runtime call one final time.
3981  Failure* failure = Failure::InternalError();
3982  __ movq(rax, failure, RelocInfo::NONE);
3983  GenerateCore(masm,
3984               &throw_normal_exception,
3985               &throw_termination_exception,
3986               &throw_out_of_memory_exception,
3987               true,
3988               true);
3989
3990  __ bind(&throw_out_of_memory_exception);
3991  // Set external caught exception to false.
3992  Isolate* isolate = masm->isolate();
3993  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
3994                                    isolate);
3995  __ Set(rax, static_cast<int64_t>(false));
3996  __ Store(external_caught, rax);
3997
3998  // Set pending exception and rax to out of memory exception.
3999  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
4000                                      isolate);
4001  __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
4002  __ Store(pending_exception, rax);
4003  // Fall through to the next label.
4004
4005  __ bind(&throw_termination_exception);
4006  __ ThrowUncatchable(rax);
4007
4008  __ bind(&throw_normal_exception);
4009  __ Throw(rax);
4010}
4011
4012
4013void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4014  Label invoke, handler_entry, exit;
4015  Label not_outermost_js, not_outermost_js_2;
4016  {  // NOLINT. Scope block confuses linter.
4017    MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
4018    // Set up frame.
4019    __ push(rbp);
4020    __ movq(rbp, rsp);
4021
4022    // Push the stack frame type marker twice.
4023    int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4024    // Scratch register is neither callee-save, nor an argument register on any
4025    // platform. It's free to use at this point.
4026    // Cannot use smi-register for loading yet.
4027    __ movq(kScratchRegister,
4028            reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
4029            RelocInfo::NONE);
4030    __ push(kScratchRegister);  // context slot
4031    __ push(kScratchRegister);  // function slot
4032    // Save callee-saved registers (X64/Win64 calling conventions).
4033    __ push(r12);
4034    __ push(r13);
4035    __ push(r14);
4036    __ push(r15);
4037#ifdef _WIN64
4038    __ push(rdi);  // Only callee save in Win64 ABI, argument in AMD64 ABI.
4039    __ push(rsi);  // Only callee save in Win64 ABI, argument in AMD64 ABI.
4040#endif
4041    __ push(rbx);
4042    // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
4043    // callee save as well.
4044
4045    // Set up the roots and smi constant registers.
4046    // Needs to be done before any further smi loads.
4047    __ InitializeSmiConstantRegister();
4048    __ InitializeRootRegister();
4049  }
4050
4051  Isolate* isolate = masm->isolate();
4052
4053  // Save copies of the top frame descriptor on the stack.
4054  ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
4055  {
4056    Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
4057    __ push(c_entry_fp_operand);
4058  }
4059
4060  // If this is the outermost JS call, set js_entry_sp value.
4061  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
4062  __ Load(rax, js_entry_sp);
4063  __ testq(rax, rax);
4064  __ j(not_zero, &not_outermost_js);
4065  __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
4066  __ movq(rax, rbp);
4067  __ Store(js_entry_sp, rax);
4068  Label cont;
4069  __ jmp(&cont);
4070  __ bind(&not_outermost_js);
4071  __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
4072  __ bind(&cont);
4073
4074  // Jump to a faked try block that does the invoke, with a faked catch
4075  // block that sets the pending exception.
4076  __ jmp(&invoke);
4077  __ bind(&handler_entry);
4078  handler_offset_ = handler_entry.pos();
4079  // Caught exception: Store result (exception) in the pending exception
4080  // field in the JSEnv and return a failure sentinel.
4081  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
4082                                      isolate);
4083  __ Store(pending_exception, rax);
4084  __ movq(rax, Failure::Exception(), RelocInfo::NONE);
4085  __ jmp(&exit);
4086
4087  // Invoke: Link this frame into the handler chain.  There's only one
4088  // handler block in this code object, so its index is 0.
4089  __ bind(&invoke);
4090  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
4091
4092  // Clear any pending exceptions.
4093  __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
4094  __ Store(pending_exception, rax);
4095
4096  // Fake a receiver (NULL).
4097  __ push(Immediate(0));  // receiver
4098
4099  // Invoke the function by calling through JS entry trampoline builtin and
4100  // pop the faked function when we return. We load the address from an
4101  // external reference instead of inlining the call target address directly
4102  // in the code, because the builtin stubs may not have been generated yet
4103  // at the time this code is generated.
4104  if (is_construct) {
4105    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4106                                      isolate);
4107    __ Load(rax, construct_entry);
4108  } else {
4109    ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
4110    __ Load(rax, entry);
4111  }
4112  __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
4113  __ call(kScratchRegister);
4114
4115  // Unlink this frame from the handler chain.
4116  __ PopTryHandler();
4117
4118  __ bind(&exit);
4119  // Check if the current stack frame is marked as the outermost JS frame.
4120  __ pop(rbx);
4121  __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
4122  __ j(not_equal, &not_outermost_js_2);
4123  __ movq(kScratchRegister, js_entry_sp);
4124  __ movq(Operand(kScratchRegister, 0), Immediate(0));
4125  __ bind(&not_outermost_js_2);
4126
4127  // Restore the top frame descriptor from the stack.
4128  { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
4129    __ pop(c_entry_fp_operand);
4130  }
4131
4132  // Restore callee-saved registers (X64 conventions).
4133  __ pop(rbx);
4134#ifdef _WIN64
4135  // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
4136  __ pop(rsi);
4137  __ pop(rdi);
4138#endif
4139  __ pop(r15);
4140  __ pop(r14);
4141  __ pop(r13);
4142  __ pop(r12);
4143  __ addq(rsp, Immediate(2 * kPointerSize));  // remove markers
4144
4145  // Restore frame pointer and return.
4146  __ pop(rbp);
4147  __ ret(0);
4148}
4149
4150
4151void InstanceofStub::Generate(MacroAssembler* masm) {
4152  // Implements "value instanceof function" operator.
4153  // Expected input state with no inline cache:
4154  //   rsp[0] : return address
4155  //   rsp[1] : function pointer
4156  //   rsp[2] : value
4157  // Expected input state with an inline one-element cache:
4158  //   rsp[0] : return address
4159  //   rsp[1] : offset from return address to location of inline cache
4160  //   rsp[2] : function pointer
4161  //   rsp[3] : value
4162  // Returns a bitwise zero to indicate that the value
4163  // is and instance of the function and anything else to
4164  // indicate that the value is not an instance.
4165
4166  static const int kOffsetToMapCheckValue = 2;
4167  static const int kOffsetToResultValue = 18;
4168  // The last 4 bytes of the instruction sequence
4169  //   movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
4170  //   Move(kScratchRegister, FACTORY->the_hole_value())
4171  // in front of the hole value address.
4172  static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
4173  // The last 4 bytes of the instruction sequence
4174  //   __ j(not_equal, &cache_miss);
4175  //   __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
4176  // before the offset of the hole value in the root array.
4177  static const unsigned int kWordBeforeResultValue = 0x458B4909;
4178  // Only the inline check flag is supported on X64.
4179  ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
4180  int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0;
4181
4182  // Get the object - go slow case if it's a smi.
4183  Label slow;
4184
4185  __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space));
4186  __ JumpIfSmi(rax, &slow);
4187
4188  // Check that the left hand is a JS object. Leave its map in rax.
4189  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
4190  __ j(below, &slow);
4191  __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
4192  __ j(above, &slow);
4193
4194  // Get the prototype of the function.
4195  __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space));
4196  // rdx is function, rax is map.
4197
4198  // If there is a call site cache don't look in the global cache, but do the
4199  // real lookup and update the call site cache.
4200  if (!HasCallSiteInlineCheck()) {
4201    // Look up the function and the map in the instanceof cache.
4202    Label miss;
4203    __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
4204    __ j(not_equal, &miss, Label::kNear);
4205    __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
4206    __ j(not_equal, &miss, Label::kNear);
4207    __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
4208    __ ret(2 * kPointerSize);
4209    __ bind(&miss);
4210  }
4211
4212  __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
4213
4214  // Check that the function prototype is a JS object.
4215  __ JumpIfSmi(rbx, &slow);
4216  __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
4217  __ j(below, &slow);
4218  __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
4219  __ j(above, &slow);
4220
4221  // Register mapping:
4222  //   rax is object map.
4223  //   rdx is function.
4224  //   rbx is function prototype.
4225  if (!HasCallSiteInlineCheck()) {
4226    __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
4227    __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
4228  } else {
4229    // Get return address and delta to inlined map check.
4230    __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
4231    __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
4232    if (FLAG_debug_code) {
4233      __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
4234      __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
4235      __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
4236    }
4237    __ movq(kScratchRegister,
4238            Operand(kScratchRegister, kOffsetToMapCheckValue));
4239    __ movq(Operand(kScratchRegister, 0), rax);
4240  }
4241
4242  __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
4243
4244  // Loop through the prototype chain looking for the function prototype.
4245  Label loop, is_instance, is_not_instance;
4246  __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
4247  __ bind(&loop);
4248  __ cmpq(rcx, rbx);
4249  __ j(equal, &is_instance, Label::kNear);
4250  __ cmpq(rcx, kScratchRegister);
4251  // The code at is_not_instance assumes that kScratchRegister contains a
4252  // non-zero GCable value (the null object in this case).
4253  __ j(equal, &is_not_instance, Label::kNear);
4254  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
4255  __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
4256  __ jmp(&loop);
4257
4258  __ bind(&is_instance);
4259  if (!HasCallSiteInlineCheck()) {
4260    __ xorl(rax, rax);
4261    // Store bitwise zero in the cache.  This is a Smi in GC terms.
4262    STATIC_ASSERT(kSmiTag == 0);
4263    __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
4264  } else {
4265    // Store offset of true in the root array at the inline check site.
4266    int true_offset = 0x100 +
4267        (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
4268    // Assert it is a 1-byte signed value.
4269    ASSERT(true_offset >= 0 && true_offset < 0x100);
4270    __ movl(rax, Immediate(true_offset));
4271    __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
4272    __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
4273    __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
4274    if (FLAG_debug_code) {
4275      __ movl(rax, Immediate(kWordBeforeResultValue));
4276      __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
4277      __ Assert(equal, "InstanceofStub unexpected call site cache (mov).");
4278    }
4279    __ Set(rax, 0);
4280  }
4281  __ ret(2 * kPointerSize + extra_stack_space);
4282
4283  __ bind(&is_not_instance);
4284  if (!HasCallSiteInlineCheck()) {
4285    // We have to store a non-zero value in the cache.
4286    __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
4287  } else {
4288    // Store offset of false in the root array at the inline check site.
4289    int false_offset = 0x100 +
4290        (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
4291    // Assert it is a 1-byte signed value.
4292    ASSERT(false_offset >= 0 && false_offset < 0x100);
4293    __ movl(rax, Immediate(false_offset));
4294    __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
4295    __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
4296    __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
4297    if (FLAG_debug_code) {
4298      __ movl(rax, Immediate(kWordBeforeResultValue));
4299      __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
4300      __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
4301    }
4302  }
4303  __ ret(2 * kPointerSize + extra_stack_space);
4304
4305  // Slow-case: Go through the JavaScript implementation.
4306  __ bind(&slow);
4307  if (HasCallSiteInlineCheck()) {
4308    // Remove extra value from the stack.
4309    __ pop(rcx);
4310    __ pop(rax);
4311    __ push(rcx);
4312  }
4313  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4314}
4315
4316
4317// Passing arguments in registers is not supported.
4318Register InstanceofStub::left() { return no_reg; }
4319
4320
4321Register InstanceofStub::right() { return no_reg; }
4322
4323
4324int CompareStub::MinorKey() {
4325  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
4326  // stubs the never NaN NaN condition is only taken into account if the
4327  // condition is equals.
4328  ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
4329  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4330  return ConditionField::encode(static_cast<unsigned>(cc_))
4331         | RegisterField::encode(false)    // lhs_ and rhs_ are not used
4332         | StrictField::encode(strict_)
4333         | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
4334         | IncludeNumberCompareField::encode(include_number_compare_)
4335         | IncludeSmiCompareField::encode(include_smi_compare_);
4336}
4337
4338
4339// Unfortunately you have to run without snapshots to see most of these
4340// names in the profile since most compare stubs end up in the snapshot.
4341void CompareStub::PrintName(StringStream* stream) {
4342  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4343  const char* cc_name;
4344  switch (cc_) {
4345    case less: cc_name = "LT"; break;
4346    case greater: cc_name = "GT"; break;
4347    case less_equal: cc_name = "LE"; break;
4348    case greater_equal: cc_name = "GE"; break;
4349    case equal: cc_name = "EQ"; break;
4350    case not_equal: cc_name = "NE"; break;
4351    default: cc_name = "UnknownCondition"; break;
4352  }
4353  bool is_equality = cc_ == equal || cc_ == not_equal;
4354  stream->Add("CompareStub_%s", cc_name);
4355  if (strict_ && is_equality) stream->Add("_STRICT");
4356  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
4357  if (!include_number_compare_) stream->Add("_NO_NUMBER");
4358  if (!include_smi_compare_) stream->Add("_NO_SMI");
4359}
4360
4361
4362// -------------------------------------------------------------------------
4363// StringCharCodeAtGenerator
4364
4365void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
4366  Label flat_string;
4367  Label ascii_string;
4368  Label got_char_code;
4369  Label sliced_string;
4370
4371  // If the receiver is a smi trigger the non-string case.
4372  __ JumpIfSmi(object_, receiver_not_string_);
4373
4374  // Fetch the instance type of the receiver into result register.
4375  __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
4376  __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4377  // If the receiver is not a string trigger the non-string case.
4378  __ testb(result_, Immediate(kIsNotStringMask));
4379  __ j(not_zero, receiver_not_string_);
4380
4381  // If the index is non-smi trigger the non-smi case.
4382  __ JumpIfNotSmi(index_, &index_not_smi_);
4383  __ bind(&got_smi_index_);
4384
4385  // Check for index out of range.
4386  __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
4387  __ j(above_equal, index_out_of_range_);
4388
4389  __ SmiToInteger32(index_, index_);
4390
4391  StringCharLoadGenerator::Generate(
4392      masm, object_, index_, result_, &call_runtime_);
4393
4394  __ Integer32ToSmi(result_, result_);
4395  __ bind(&exit_);
4396}
4397
4398
4399void StringCharCodeAtGenerator::GenerateSlow(
4400    MacroAssembler* masm,
4401    const RuntimeCallHelper& call_helper) {
4402  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
4403
4404  Factory* factory = masm->isolate()->factory();
4405  // Index is not a smi.
4406  __ bind(&index_not_smi_);
4407  // If index is a heap number, try converting it to an integer.
4408  __ CheckMap(index_,
4409              factory->heap_number_map(),
4410              index_not_number_,
4411              DONT_DO_SMI_CHECK);
4412  call_helper.BeforeCall(masm);
4413  __ push(object_);
4414  __ push(index_);  // Consumed by runtime conversion function.
4415  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
4416    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
4417  } else {
4418    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
4419    // NumberToSmi discards numbers that are not exact integers.
4420    __ CallRuntime(Runtime::kNumberToSmi, 1);
4421  }
4422  if (!index_.is(rax)) {
4423    // Save the conversion result before the pop instructions below
4424    // have a chance to overwrite it.
4425    __ movq(index_, rax);
4426  }
4427  __ pop(object_);
4428  // Reload the instance type.
4429  __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
4430  __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4431  call_helper.AfterCall(masm);
4432  // If index is still not a smi, it must be out of range.
4433  __ JumpIfNotSmi(index_, index_out_of_range_);
4434  // Otherwise, return to the fast path.
4435  __ jmp(&got_smi_index_);
4436
4437  // Call runtime. We get here when the receiver is a string and the
4438  // index is a number, but the code of getting the actual character
4439  // is too complex (e.g., when the string needs to be flattened).
4440  __ bind(&call_runtime_);
4441  call_helper.BeforeCall(masm);
4442  __ push(object_);
4443  __ Integer32ToSmi(index_, index_);
4444  __ push(index_);
4445  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
4446  if (!result_.is(rax)) {
4447    __ movq(result_, rax);
4448  }
4449  call_helper.AfterCall(masm);
4450  __ jmp(&exit_);
4451
4452  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
4453}
4454
4455
4456// -------------------------------------------------------------------------
4457// StringCharFromCodeGenerator
4458
4459void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
4460  // Fast case of Heap::LookupSingleCharacterStringFromCode.
4461  __ JumpIfNotSmi(code_, &slow_case_);
4462  __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode));
4463  __ j(above, &slow_case_);
4464
4465  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
4466  SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
4467  __ movq(result_, FieldOperand(result_, index.reg, index.scale,
4468                                FixedArray::kHeaderSize));
4469  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
4470  __ j(equal, &slow_case_);
4471  __ bind(&exit_);
4472}
4473
4474
4475void StringCharFromCodeGenerator::GenerateSlow(
4476    MacroAssembler* masm,
4477    const RuntimeCallHelper& call_helper) {
4478  __ Abort("Unexpected fallthrough to CharFromCode slow case");
4479
4480  __ bind(&slow_case_);
4481  call_helper.BeforeCall(masm);
4482  __ push(code_);
4483  __ CallRuntime(Runtime::kCharFromCode, 1);
4484  if (!result_.is(rax)) {
4485    __ movq(result_, rax);
4486  }
4487  call_helper.AfterCall(masm);
4488  __ jmp(&exit_);
4489
4490  __ Abort("Unexpected fallthrough from CharFromCode slow case");
4491}
4492
4493
4494// -------------------------------------------------------------------------
4495// StringCharAtGenerator
4496
4497void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
4498  char_code_at_generator_.GenerateFast(masm);
4499  char_from_code_generator_.GenerateFast(masm);
4500}
4501
4502
4503void StringCharAtGenerator::GenerateSlow(
4504    MacroAssembler* masm,
4505    const RuntimeCallHelper& call_helper) {
4506  char_code_at_generator_.GenerateSlow(masm, call_helper);
4507  char_from_code_generator_.GenerateSlow(masm, call_helper);
4508}
4509
4510
4511void StringAddStub::Generate(MacroAssembler* masm) {
4512  Label call_runtime, call_builtin;
4513  Builtins::JavaScript builtin_id = Builtins::ADD;
4514
4515  // Load the two arguments.
4516  __ movq(rax, Operand(rsp, 2 * kPointerSize));  // First argument (left).
4517  __ movq(rdx, Operand(rsp, 1 * kPointerSize));  // Second argument (right).
4518
4519  // Make sure that both arguments are strings if not known in advance.
4520  if (flags_ == NO_STRING_ADD_FLAGS) {
4521    __ JumpIfSmi(rax, &call_runtime);
4522    __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
4523    __ j(above_equal, &call_runtime);
4524
4525    // First argument is a a string, test second.
4526    __ JumpIfSmi(rdx, &call_runtime);
4527    __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
4528    __ j(above_equal, &call_runtime);
4529  } else {
4530    // Here at least one of the arguments is definitely a string.
4531    // We convert the one that is not known to be a string.
4532    if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
4533      ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
4534      GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
4535                              &call_builtin);
4536      builtin_id = Builtins::STRING_ADD_RIGHT;
4537    } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
4538      ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
4539      GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
4540                              &call_builtin);
4541      builtin_id = Builtins::STRING_ADD_LEFT;
4542    }
4543  }
4544
4545  // Both arguments are strings.
4546  // rax: first string
4547  // rdx: second string
4548  // Check if either of the strings are empty. In that case return the other.
4549  Label second_not_zero_length, both_not_zero_length;
4550  __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
4551  __ SmiTest(rcx);
4552  __ j(not_zero, &second_not_zero_length, Label::kNear);
4553  // Second string is empty, result is first string which is already in rax.
4554  Counters* counters = masm->isolate()->counters();
4555  __ IncrementCounter(counters->string_add_native(), 1);
4556  __ ret(2 * kPointerSize);
4557  __ bind(&second_not_zero_length);
4558  __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
4559  __ SmiTest(rbx);
4560  __ j(not_zero, &both_not_zero_length, Label::kNear);
4561  // First string is empty, result is second string which is in rdx.
4562  __ movq(rax, rdx);
4563  __ IncrementCounter(counters->string_add_native(), 1);
4564  __ ret(2 * kPointerSize);
4565
4566  // Both strings are non-empty.
4567  // rax: first string
4568  // rbx: length of first string
4569  // rcx: length of second string
4570  // rdx: second string
4571  // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS)
4572  // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS)
4573  Label string_add_flat_result, longer_than_two;
4574  __ bind(&both_not_zero_length);
4575
4576  // If arguments where known to be strings, maps are not loaded to r8 and r9
4577  // by the code above.
4578  if (flags_ != NO_STRING_ADD_FLAGS) {
4579    __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
4580    __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
4581  }
4582  // Get the instance types of the two strings as they will be needed soon.
4583  __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
4584  __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
4585
4586  // Look at the length of the result of adding the two strings.
4587  STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
4588  __ SmiAdd(rbx, rbx, rcx);
4589  // Use the symbol table when adding two one character strings, as it
4590  // helps later optimizations to return a symbol here.
4591  __ SmiCompare(rbx, Smi::FromInt(2));
4592  __ j(not_equal, &longer_than_two);
4593
4594  // Check that both strings are non-external ASCII strings.
4595  __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
4596                                                  &call_runtime);
4597
4598  // Get the two characters forming the sub string.
4599  __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
4600  __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
4601
4602  // Try to lookup two character string in symbol table. If it is not found
4603  // just allocate a new one.
4604  Label make_two_character_string, make_flat_ascii_string;
4605  StringHelper::GenerateTwoCharacterSymbolTableProbe(
4606      masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
4607  __ IncrementCounter(counters->string_add_native(), 1);
4608  __ ret(2 * kPointerSize);
4609
4610  __ bind(&make_two_character_string);
4611  __ Set(rdi, 2);
4612  __ AllocateAsciiString(rax, rdi, r8, r9, r11, &call_runtime);
4613  // rbx - first byte: first character
4614  // rbx - second byte: *maybe* second character
4615  // Make sure that the second byte of rbx contains the second character.
4616  __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
4617  __ shll(rcx, Immediate(kBitsPerByte));
4618  __ orl(rbx, rcx);
4619  // Write both characters to the new string.
4620  __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
4621  __ IncrementCounter(counters->string_add_native(), 1);
4622  __ ret(2 * kPointerSize);
4623
4624  __ bind(&longer_than_two);
4625  // Check if resulting string will be flat.
4626  __ SmiCompare(rbx, Smi::FromInt(ConsString::kMinLength));
4627  __ j(below, &string_add_flat_result);
4628  // Handle exceptionally long strings in the runtime system.
4629  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
4630  __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
4631  __ j(above, &call_runtime);
4632
4633  // If result is not supposed to be flat, allocate a cons string object. If
4634  // both strings are ASCII the result is an ASCII cons string.
4635  // rax: first string
4636  // rbx: length of resulting flat string
4637  // rdx: second string
4638  // r8: instance type of first string
4639  // r9: instance type of second string
4640  Label non_ascii, allocated, ascii_data;
4641  __ movl(rcx, r8);
4642  __ and_(rcx, r9);
4643  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
4644  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
4645  __ testl(rcx, Immediate(kStringEncodingMask));
4646  __ j(zero, &non_ascii);
4647  __ bind(&ascii_data);
4648  // Allocate an ASCII cons string.
4649  __ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
4650  __ bind(&allocated);
4651  // Fill the fields of the cons string.
4652  __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
4653  __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
4654          Immediate(String::kEmptyHashField));
4655  __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
4656  __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
4657  __ movq(rax, rcx);
4658  __ IncrementCounter(counters->string_add_native(), 1);
4659  __ ret(2 * kPointerSize);
4660  __ bind(&non_ascii);
4661  // At least one of the strings is two-byte. Check whether it happens
4662  // to contain only ASCII characters.
4663  // rcx: first instance type AND second instance type.
4664  // r8: first instance type.
4665  // r9: second instance type.
4666  __ testb(rcx, Immediate(kAsciiDataHintMask));
4667  __ j(not_zero, &ascii_data);
4668  __ xor_(r8, r9);
4669  STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
4670  __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
4671  __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
4672  __ j(equal, &ascii_data);
4673  // Allocate a two byte cons string.
4674  __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
4675  __ jmp(&allocated);
4676
4677  // We cannot encounter sliced strings or cons strings here since:
4678  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
4679  // Handle creating a flat result from either external or sequential strings.
4680  // Locate the first characters' locations.
4681  // rax: first string
4682  // rbx: length of resulting flat string as smi
4683  // rdx: second string
4684  // r8: instance type of first string
4685  // r9: instance type of first string
4686  Label first_prepared, second_prepared;
4687  Label first_is_sequential, second_is_sequential;
4688  __ bind(&string_add_flat_result);
4689
4690  __ SmiToInteger32(r14, FieldOperand(rax, SeqString::kLengthOffset));
4691  // r14: length of first string
4692  STATIC_ASSERT(kSeqStringTag == 0);
4693  __ testb(r8, Immediate(kStringRepresentationMask));
4694  __ j(zero, &first_is_sequential, Label::kNear);
4695  // Rule out short external string and load string resource.
4696  STATIC_ASSERT(kShortExternalStringTag != 0);
4697  __ testb(r8, Immediate(kShortExternalStringMask));
4698  __ j(not_zero, &call_runtime);
4699  __ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
4700  __ jmp(&first_prepared, Label::kNear);
4701  __ bind(&first_is_sequential);
4702  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4703  __ lea(rcx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
4704  __ bind(&first_prepared);
4705
4706  // Check whether both strings have same encoding.
4707  __ xorl(r8, r9);
4708  __ testb(r8, Immediate(kStringEncodingMask));
4709  __ j(not_zero, &call_runtime);
4710
4711  __ SmiToInteger32(r15, FieldOperand(rdx, SeqString::kLengthOffset));
4712  // r15: length of second string
4713  STATIC_ASSERT(kSeqStringTag == 0);
4714  __ testb(r9, Immediate(kStringRepresentationMask));
4715  __ j(zero, &second_is_sequential, Label::kNear);
4716  // Rule out short external string and load string resource.
4717  STATIC_ASSERT(kShortExternalStringTag != 0);
4718  __ testb(r9, Immediate(kShortExternalStringMask));
4719  __ j(not_zero, &call_runtime);
4720  __ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
4721  __ jmp(&second_prepared, Label::kNear);
4722  __ bind(&second_is_sequential);
4723  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4724  __ lea(rdx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
4725  __ bind(&second_prepared);
4726
4727  Label non_ascii_string_add_flat_result;
4728  // r9: instance type of second string
4729  // First string and second string have the same encoding.
4730  STATIC_ASSERT(kTwoByteStringTag == 0);
4731  __ SmiToInteger32(rbx, rbx);
4732  __ testb(r9, Immediate(kStringEncodingMask));
4733  __ j(zero, &non_ascii_string_add_flat_result);
4734
4735  __ bind(&make_flat_ascii_string);
4736  // Both strings are ASCII strings. As they are short they are both flat.
4737  __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
4738  // rax: result string
4739  // Locate first character of result.
4740  __ lea(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
4741  // rcx: first char of first string
4742  // rbx: first character of result
4743  // r14: length of first string
4744  StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, true);
4745  // rbx: next character of result
4746  // rdx: first char of second string
4747  // r15: length of second string
4748  StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, true);
4749  __ IncrementCounter(counters->string_add_native(), 1);
4750  __ ret(2 * kPointerSize);
4751
4752  __ bind(&non_ascii_string_add_flat_result);
4753  // Both strings are ASCII strings. As they are short they are both flat.
4754  __ AllocateTwoByteString(rax, rbx, rdi, r8, r9, &call_runtime);
4755  // rax: result string
4756  // Locate first character of result.
4757  __ lea(rbx, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
4758  // rcx: first char of first string
4759  // rbx: first character of result
4760  // r14: length of first string
4761  StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, false);
4762  // rbx: next character of result
4763  // rdx: first char of second string
4764  // r15: length of second string
4765  StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, false);
4766  __ IncrementCounter(counters->string_add_native(), 1);
4767  __ ret(2 * kPointerSize);
4768
4769  // Just jump to runtime to add the two strings.
4770  __ bind(&call_runtime);
4771  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
4772
4773  if (call_builtin.is_linked()) {
4774    __ bind(&call_builtin);
4775    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
4776  }
4777}
4778
4779
4780void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
4781                                            int stack_offset,
4782                                            Register arg,
4783                                            Register scratch1,
4784                                            Register scratch2,
4785                                            Register scratch3,
4786                                            Label* slow) {
4787  // First check if the argument is already a string.
4788  Label not_string, done;
4789  __ JumpIfSmi(arg, &not_string);
4790  __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
4791  __ j(below, &done);
4792
4793  // Check the number to string cache.
4794  Label not_cached;
4795  __ bind(&not_string);
4796  // Puts the cached result into scratch1.
4797  NumberToStringStub::GenerateLookupNumberStringCache(masm,
4798                                                      arg,
4799                                                      scratch1,
4800                                                      scratch2,
4801                                                      scratch3,
4802                                                      false,
4803                                                      &not_cached);
4804  __ movq(arg, scratch1);
4805  __ movq(Operand(rsp, stack_offset), arg);
4806  __ jmp(&done);
4807
4808  // Check if the argument is a safe string wrapper.
4809  __ bind(&not_cached);
4810  __ JumpIfSmi(arg, slow);
4811  __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1);  // map -> scratch1.
4812  __ j(not_equal, slow);
4813  __ testb(FieldOperand(scratch1, Map::kBitField2Offset),
4814           Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
4815  __ j(zero, slow);
4816  __ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
4817  __ movq(Operand(rsp, stack_offset), arg);
4818
4819  __ bind(&done);
4820}
4821
4822
4823void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
4824                                          Register dest,
4825                                          Register src,
4826                                          Register count,
4827                                          bool ascii) {
4828  Label loop;
4829  __ bind(&loop);
4830  // This loop just copies one character at a time, as it is only used for very
4831  // short strings.
4832  if (ascii) {
4833    __ movb(kScratchRegister, Operand(src, 0));
4834    __ movb(Operand(dest, 0), kScratchRegister);
4835    __ incq(src);
4836    __ incq(dest);
4837  } else {
4838    __ movzxwl(kScratchRegister, Operand(src, 0));
4839    __ movw(Operand(dest, 0), kScratchRegister);
4840    __ addq(src, Immediate(2));
4841    __ addq(dest, Immediate(2));
4842  }
4843  __ decl(count);
4844  __ j(not_zero, &loop);
4845}
4846
4847
4848void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
4849                                             Register dest,
4850                                             Register src,
4851                                             Register count,
4852                                             bool ascii) {
4853  // Copy characters using rep movs of doublewords. Align destination on 4 byte
4854  // boundary before starting rep movs. Copy remaining characters after running
4855  // rep movs.
4856  // Count is positive int32, dest and src are character pointers.
4857  ASSERT(dest.is(rdi));  // rep movs destination
4858  ASSERT(src.is(rsi));  // rep movs source
4859  ASSERT(count.is(rcx));  // rep movs count
4860
4861  // Nothing to do for zero characters.
4862  Label done;
4863  __ testl(count, count);
4864  __ j(zero, &done, Label::kNear);
4865
4866  // Make count the number of bytes to copy.
4867  if (!ascii) {
4868    STATIC_ASSERT(2 == sizeof(uc16));
4869    __ addl(count, count);
4870  }
4871
4872  // Don't enter the rep movs if there are less than 4 bytes to copy.
4873  Label last_bytes;
4874  __ testl(count, Immediate(~7));
4875  __ j(zero, &last_bytes, Label::kNear);
4876
4877  // Copy from edi to esi using rep movs instruction.
4878  __ movl(kScratchRegister, count);
4879  __ shr(count, Immediate(3));  // Number of doublewords to copy.
4880  __ repmovsq();
4881
4882  // Find number of bytes left.
4883  __ movl(count, kScratchRegister);
4884  __ and_(count, Immediate(7));
4885
4886  // Check if there are more bytes to copy.
4887  __ bind(&last_bytes);
4888  __ testl(count, count);
4889  __ j(zero, &done, Label::kNear);
4890
4891  // Copy remaining characters.
4892  Label loop;
4893  __ bind(&loop);
4894  __ movb(kScratchRegister, Operand(src, 0));
4895  __ movb(Operand(dest, 0), kScratchRegister);
4896  __ incq(src);
4897  __ incq(dest);
4898  __ decl(count);
4899  __ j(not_zero, &loop);
4900
4901  __ bind(&done);
4902}
4903
4904void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
4905                                                        Register c1,
4906                                                        Register c2,
4907                                                        Register scratch1,
4908                                                        Register scratch2,
4909                                                        Register scratch3,
4910                                                        Register scratch4,
4911                                                        Label* not_found) {
4912  // Register scratch3 is the general scratch register in this function.
4913  Register scratch = scratch3;
4914
4915  // Make sure that both characters are not digits as such strings has a
4916  // different hash algorithm. Don't try to look for these in the symbol table.
4917  Label not_array_index;
4918  __ leal(scratch, Operand(c1, -'0'));
4919  __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
4920  __ j(above, &not_array_index, Label::kNear);
4921  __ leal(scratch, Operand(c2, -'0'));
4922  __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
4923  __ j(below_equal, not_found);
4924
4925  __ bind(&not_array_index);
4926  // Calculate the two character string hash.
4927  Register hash = scratch1;
4928  GenerateHashInit(masm, hash, c1, scratch);
4929  GenerateHashAddCharacter(masm, hash, c2, scratch);
4930  GenerateHashGetHash(masm, hash, scratch);
4931
4932  // Collect the two characters in a register.
4933  Register chars = c1;
4934  __ shl(c2, Immediate(kBitsPerByte));
4935  __ orl(chars, c2);
4936
4937  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
4938  // hash:  hash of two character string.
4939
4940  // Load the symbol table.
4941  Register symbol_table = c2;
4942  __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
4943
4944  // Calculate capacity mask from the symbol table capacity.
4945  Register mask = scratch2;
4946  __ SmiToInteger32(mask,
4947                    FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
4948  __ decl(mask);
4949
4950  Register map = scratch4;
4951
4952  // Registers
4953  // chars:        two character string, char 1 in byte 0 and char 2 in byte 1.
4954  // hash:         hash of two character string (32-bit int)
4955  // symbol_table: symbol table
4956  // mask:         capacity mask (32-bit int)
4957  // map:          -
4958  // scratch:      -
4959
4960  // Perform a number of probes in the symbol table.
4961  static const int kProbes = 4;
4962  Label found_in_symbol_table;
4963  Label next_probe[kProbes];
4964  Register candidate = scratch;  // Scratch register contains candidate.
4965  for (int i = 0; i < kProbes; i++) {
4966    // Calculate entry in symbol table.
4967    __ movl(scratch, hash);
4968    if (i > 0) {
4969      __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
4970    }
4971    __ andl(scratch, mask);
4972
4973    // Load the entry from the symbol table.
4974    STATIC_ASSERT(SymbolTable::kEntrySize == 1);
4975    __ movq(candidate,
4976            FieldOperand(symbol_table,
4977                         scratch,
4978                         times_pointer_size,
4979                         SymbolTable::kElementsStartOffset));
4980
4981    // If entry is undefined no string with this hash can be found.
4982    Label is_string;
4983    __ CmpObjectType(candidate, ODDBALL_TYPE, map);
4984    __ j(not_equal, &is_string, Label::kNear);
4985
4986    __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
4987    __ j(equal, not_found);
4988    // Must be the hole (deleted entry).
4989    if (FLAG_debug_code) {
4990      __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
4991      __ cmpq(kScratchRegister, candidate);
4992      __ Assert(equal, "oddball in symbol table is not undefined or the hole");
4993    }
4994    __ jmp(&next_probe[i]);
4995
4996    __ bind(&is_string);
4997
4998    // If length is not 2 the string is not a candidate.
4999    __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
5000                  Smi::FromInt(2));
5001    __ j(not_equal, &next_probe[i]);
5002
5003    // We use kScratchRegister as a temporary register in assumption that
5004    // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
5005    Register temp = kScratchRegister;
5006
5007    // Check that the candidate is a non-external ASCII string.
5008    __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
5009    __ JumpIfInstanceTypeIsNotSequentialAscii(
5010        temp, temp, &next_probe[i]);
5011
5012    // Check if the two characters match.
5013    __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
5014    __ andl(temp, Immediate(0x0000ffff));
5015    __ cmpl(chars, temp);
5016    __ j(equal, &found_in_symbol_table);
5017    __ bind(&next_probe[i]);
5018  }
5019
5020  // No matching 2 character string found by probing.
5021  __ jmp(not_found);
5022
5023  // Scratch register contains result when we fall through to here.
5024  Register result = candidate;
5025  __ bind(&found_in_symbol_table);
5026  if (!result.is(rax)) {
5027    __ movq(rax, result);
5028  }
5029}
5030
5031
5032void StringHelper::GenerateHashInit(MacroAssembler* masm,
5033                                    Register hash,
5034                                    Register character,
5035                                    Register scratch) {
5036  // hash = (seed + character) + ((seed + character) << 10);
5037  __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
5038  __ SmiToInteger32(scratch, scratch);
5039  __ addl(scratch, character);
5040  __ movl(hash, scratch);
5041  __ shll(scratch, Immediate(10));
5042  __ addl(hash, scratch);
5043  // hash ^= hash >> 6;
5044  __ movl(scratch, hash);
5045  __ shrl(scratch, Immediate(6));
5046  __ xorl(hash, scratch);
5047}
5048
5049
5050void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5051                                            Register hash,
5052                                            Register character,
5053                                            Register scratch) {
5054  // hash += character;
5055  __ addl(hash, character);
5056  // hash += hash << 10;
5057  __ movl(scratch, hash);
5058  __ shll(scratch, Immediate(10));
5059  __ addl(hash, scratch);
5060  // hash ^= hash >> 6;
5061  __ movl(scratch, hash);
5062  __ shrl(scratch, Immediate(6));
5063  __ xorl(hash, scratch);
5064}
5065
5066
5067void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5068                                       Register hash,
5069                                       Register scratch) {
5070  // hash += hash << 3;
5071  __ leal(hash, Operand(hash, hash, times_8, 0));
5072  // hash ^= hash >> 11;
5073  __ movl(scratch, hash);
5074  __ shrl(scratch, Immediate(11));
5075  __ xorl(hash, scratch);
5076  // hash += hash << 15;
5077  __ movl(scratch, hash);
5078  __ shll(scratch, Immediate(15));
5079  __ addl(hash, scratch);
5080
5081  __ andl(hash, Immediate(String::kHashBitMask));
5082
5083  // if (hash == 0) hash = 27;
5084  Label hash_not_zero;
5085  __ j(not_zero, &hash_not_zero);
5086  __ Set(hash, StringHasher::kZeroHash);
5087  __ bind(&hash_not_zero);
5088}
5089
5090void SubStringStub::Generate(MacroAssembler* masm) {
5091  Label runtime;
5092
5093  // Stack frame on entry.
5094  //  rsp[0]: return address
5095  //  rsp[8]: to
5096  //  rsp[16]: from
5097  //  rsp[24]: string
5098
5099  const int kToOffset = 1 * kPointerSize;
5100  const int kFromOffset = kToOffset + kPointerSize;
5101  const int kStringOffset = kFromOffset + kPointerSize;
5102  const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
5103
5104  // Make sure first argument is a string.
5105  __ movq(rax, Operand(rsp, kStringOffset));
5106  STATIC_ASSERT(kSmiTag == 0);
5107  __ testl(rax, Immediate(kSmiTagMask));
5108  __ j(zero, &runtime);
5109  Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
5110  __ j(NegateCondition(is_string), &runtime);
5111
5112  // rax: string
5113  // rbx: instance type
5114  // Calculate length of sub string using the smi values.
5115  Label result_longer_than_two;
5116  __ movq(rcx, Operand(rsp, kToOffset));
5117  __ movq(rdx, Operand(rsp, kFromOffset));
5118  __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
5119
5120  __ SmiSub(rcx, rcx, rdx);  // Overflow doesn't happen.
5121  __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
5122  Label not_original_string;
5123  __ j(not_equal, &not_original_string, Label::kNear);
5124  Counters* counters = masm->isolate()->counters();
5125  __ IncrementCounter(counters->sub_string_native(), 1);
5126  __ ret(kArgumentsSize);
5127  __ bind(&not_original_string);
5128  // Special handling of sub-strings of length 1 and 2. One character strings
5129  // are handled in the runtime system (looked up in the single character
5130  // cache). Two character strings are looked for in the symbol cache.
5131  __ SmiToInteger32(rcx, rcx);
5132  __ cmpl(rcx, Immediate(2));
5133  __ j(greater, &result_longer_than_two);
5134  __ j(less, &runtime);
5135
5136  // Sub string of length 2 requested.
5137  // rax: string
5138  // rbx: instance type
5139  // rcx: sub string length (value is 2)
5140  // rdx: from index (smi)
5141  __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime);
5142
5143  // Get the two characters forming the sub string.
5144  __ SmiToInteger32(rdx, rdx);  // From index is no longer smi.
5145  __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
5146  __ movzxbq(rdi,
5147             FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
5148
5149  // Try to lookup two character string in symbol table.
5150  Label make_two_character_string;
5151  StringHelper::GenerateTwoCharacterSymbolTableProbe(
5152      masm, rbx, rdi, r9, r11, r14, r15, &make_two_character_string);
5153  __ IncrementCounter(counters->sub_string_native(), 1);
5154  __ ret(3 * kPointerSize);
5155
5156  __ bind(&make_two_character_string);
5157  // Set up registers for allocating the two character string.
5158  __ movzxwq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
5159  __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
5160  __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
5161  __ IncrementCounter(counters->sub_string_native(), 1);
5162  __ ret(3 * kPointerSize);
5163
5164  __ bind(&result_longer_than_two);
5165  // rax: string
5166  // rbx: instance type
5167  // rcx: sub string length
5168  // rdx: from index (smi)
5169  // Deal with different string types: update the index if necessary
5170  // and put the underlying string into edi.
5171  Label underlying_unpacked, sliced_string, seq_or_external_string;
5172  // If the string is not indirect, it can only be sequential or external.
5173  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
5174  STATIC_ASSERT(kIsIndirectStringMask != 0);
5175  __ testb(rbx, Immediate(kIsIndirectStringMask));
5176  __ j(zero, &seq_or_external_string, Label::kNear);
5177
5178  __ testb(rbx, Immediate(kSlicedNotConsMask));
5179  __ j(not_zero, &sliced_string, Label::kNear);
5180  // Cons string.  Check whether it is flat, then fetch first part.
5181  // Flat cons strings have an empty second part.
5182  __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
5183                 Heap::kEmptyStringRootIndex);
5184  __ j(not_equal, &runtime);
5185  __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
5186  // Update instance type.
5187  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
5188  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
5189  __ jmp(&underlying_unpacked, Label::kNear);
5190
5191  __ bind(&sliced_string);
5192  // Sliced string.  Fetch parent and correct start index by offset.
5193  __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
5194  __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
5195  // Update instance type.
5196  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
5197  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
5198  __ jmp(&underlying_unpacked, Label::kNear);
5199
5200  __ bind(&seq_or_external_string);
5201  // Sequential or external string.  Just move string to the correct register.
5202  __ movq(rdi, rax);
5203
5204  __ bind(&underlying_unpacked);
5205
5206  if (FLAG_string_slices) {
5207    Label copy_routine;
5208    // rdi: underlying subject string
5209    // rbx: instance type of underlying subject string
5210    // rdx: adjusted start index (smi)
5211    // rcx: length
5212    // If coming from the make_two_character_string path, the string
5213    // is too short to be sliced anyways.
5214    __ cmpq(rcx, Immediate(SlicedString::kMinLength));
5215    // Short slice.  Copy instead of slicing.
5216    __ j(less, &copy_routine);
5217    // Allocate new sliced string.  At this point we do not reload the instance
5218    // type including the string encoding because we simply rely on the info
5219    // provided by the original string.  It does not matter if the original
5220    // string's encoding is wrong because we always have to recheck encoding of
5221    // the newly created string's parent anyways due to externalized strings.
5222    Label two_byte_slice, set_slice_header;
5223    STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
5224    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5225    __ testb(rbx, Immediate(kStringEncodingMask));
5226    __ j(zero, &two_byte_slice, Label::kNear);
5227    __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
5228    __ jmp(&set_slice_header, Label::kNear);
5229    __ bind(&two_byte_slice);
5230    __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
5231    __ bind(&set_slice_header);
5232    __ Integer32ToSmi(rcx, rcx);
5233    __ movq(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
5234    __ movq(FieldOperand(rax, SlicedString::kHashFieldOffset),
5235           Immediate(String::kEmptyHashField));
5236    __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
5237    __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
5238    __ IncrementCounter(counters->sub_string_native(), 1);
5239    __ ret(kArgumentsSize);
5240
5241    __ bind(&copy_routine);
5242  }
5243
5244  // rdi: underlying subject string
5245  // rbx: instance type of underlying subject string
5246  // rdx: adjusted start index (smi)
5247  // rcx: length
5248  // The subject string can only be external or sequential string of either
5249  // encoding at this point.
5250  Label two_byte_sequential, sequential_string;
5251  STATIC_ASSERT(kExternalStringTag != 0);
5252  STATIC_ASSERT(kSeqStringTag == 0);
5253  __ testb(rbx, Immediate(kExternalStringTag));
5254  __ j(zero, &sequential_string);
5255
5256  // Handle external string.
5257  // Rule out short external strings.
5258  STATIC_CHECK(kShortExternalStringTag != 0);
5259  __ testb(rbx, Immediate(kShortExternalStringMask));
5260  __ j(not_zero, &runtime);
5261  __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
5262  // Move the pointer so that offset-wise, it looks like a sequential string.
5263  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
5264  __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5265
5266  __ bind(&sequential_string);
5267  STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
5268  __ testb(rbx, Immediate(kStringEncodingMask));
5269  __ j(zero, &two_byte_sequential);
5270
5271  // Allocate the result.
5272  __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
5273
5274  // rax: result string
5275  // rcx: result string length
5276  __ movq(r14, rsi);  // esi used by following code.
5277  {  // Locate character of sub string start.
5278    SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
5279    __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
5280                        SeqAsciiString::kHeaderSize - kHeapObjectTag));
5281  }
5282  // Locate first character of result.
5283  __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
5284
5285  // rax: result string
5286  // rcx: result length
5287  // rdi: first character of result
5288  // rsi: character of sub string start
5289  // r14: original value of rsi
5290  StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
5291  __ movq(rsi, r14);  // Restore rsi.
5292  __ IncrementCounter(counters->sub_string_native(), 1);
5293  __ ret(kArgumentsSize);
5294
5295  __ bind(&two_byte_sequential);
5296  // Allocate the result.
5297  __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
5298
5299  // rax: result string
5300  // rcx: result string length
5301  __ movq(r14, rsi);  // esi used by following code.
5302  {  // Locate character of sub string start.
5303    SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
5304    __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
5305                        SeqAsciiString::kHeaderSize - kHeapObjectTag));
5306  }
5307  // Locate first character of result.
5308  __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
5309
5310  // rax: result string
5311  // rcx: result length
5312  // rdi: first character of result
5313  // rsi: character of sub string start
5314  // r14: original value of rsi
5315  StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
5316  __ movq(rsi, r14);  // Restore esi.
5317  __ IncrementCounter(counters->sub_string_native(), 1);
5318  __ ret(kArgumentsSize);
5319
5320  // Just jump to runtime to create the sub string.
5321  __ bind(&runtime);
5322  __ TailCallRuntime(Runtime::kSubString, 3, 1);
5323}
5324
5325
5326void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
5327                                                      Register left,
5328                                                      Register right,
5329                                                      Register scratch1,
5330                                                      Register scratch2) {
5331  Register length = scratch1;
5332
5333  // Compare lengths.
5334  Label check_zero_length;
5335  __ movq(length, FieldOperand(left, String::kLengthOffset));
5336  __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
5337  __ j(equal, &check_zero_length, Label::kNear);
5338  __ Move(rax, Smi::FromInt(NOT_EQUAL));
5339  __ ret(0);
5340
5341  // Check if the length is zero.
5342  Label compare_chars;
5343  __ bind(&check_zero_length);
5344  STATIC_ASSERT(kSmiTag == 0);
5345  __ SmiTest(length);
5346  __ j(not_zero, &compare_chars, Label::kNear);
5347  __ Move(rax, Smi::FromInt(EQUAL));
5348  __ ret(0);
5349
5350  // Compare characters.
5351  __ bind(&compare_chars);
5352  Label strings_not_equal;
5353  GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
5354                                &strings_not_equal, Label::kNear);
5355
5356  // Characters are equal.
5357  __ Move(rax, Smi::FromInt(EQUAL));
5358  __ ret(0);
5359
5360  // Characters are not equal.
5361  __ bind(&strings_not_equal);
5362  __ Move(rax, Smi::FromInt(NOT_EQUAL));
5363  __ ret(0);
5364}
5365
5366
5367void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
5368                                                        Register left,
5369                                                        Register right,
5370                                                        Register scratch1,
5371                                                        Register scratch2,
5372                                                        Register scratch3,
5373                                                        Register scratch4) {
5374  // Ensure that you can always subtract a string length from a non-negative
5375  // number (e.g. another length).
5376  STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
5377
5378  // Find minimum length and length difference.
5379  __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
5380  __ movq(scratch4, scratch1);
5381  __ SmiSub(scratch4,
5382            scratch4,
5383            FieldOperand(right, String::kLengthOffset));
5384  // Register scratch4 now holds left.length - right.length.
5385  const Register length_difference = scratch4;
5386  Label left_shorter;
5387  __ j(less, &left_shorter, Label::kNear);
5388  // The right string isn't longer that the left one.
5389  // Get the right string's length by subtracting the (non-negative) difference
5390  // from the left string's length.
5391  __ SmiSub(scratch1, scratch1, length_difference);
5392  __ bind(&left_shorter);
5393  // Register scratch1 now holds Min(left.length, right.length).
5394  const Register min_length = scratch1;
5395
5396  Label compare_lengths;
5397  // If min-length is zero, go directly to comparing lengths.
5398  __ SmiTest(min_length);
5399  __ j(zero, &compare_lengths, Label::kNear);
5400
5401  // Compare loop.
5402  Label result_not_equal;
5403  GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
5404                                &result_not_equal, Label::kNear);
5405
5406  // Completed loop without finding different characters.
5407  // Compare lengths (precomputed).
5408  __ bind(&compare_lengths);
5409  __ SmiTest(length_difference);
5410  __ j(not_zero, &result_not_equal, Label::kNear);
5411
5412  // Result is EQUAL.
5413  __ Move(rax, Smi::FromInt(EQUAL));
5414  __ ret(0);
5415
5416  Label result_greater;
5417  __ bind(&result_not_equal);
5418  // Unequal comparison of left to right, either character or length.
5419  __ j(greater, &result_greater, Label::kNear);
5420
5421  // Result is LESS.
5422  __ Move(rax, Smi::FromInt(LESS));
5423  __ ret(0);
5424
5425  // Result is GREATER.
5426  __ bind(&result_greater);
5427  __ Move(rax, Smi::FromInt(GREATER));
5428  __ ret(0);
5429}
5430
5431
5432void StringCompareStub::GenerateAsciiCharsCompareLoop(
5433    MacroAssembler* masm,
5434    Register left,
5435    Register right,
5436    Register length,
5437    Register scratch,
5438    Label* chars_not_equal,
5439    Label::Distance near_jump) {
5440  // Change index to run from -length to -1 by adding length to string
5441  // start. This means that loop ends when index reaches zero, which
5442  // doesn't need an additional compare.
5443  __ SmiToInteger32(length, length);
5444  __ lea(left,
5445         FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
5446  __ lea(right,
5447         FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
5448  __ neg(length);
5449  Register index = length;  // index = -length;
5450
5451  // Compare loop.
5452  Label loop;
5453  __ bind(&loop);
5454  __ movb(scratch, Operand(left, index, times_1, 0));
5455  __ cmpb(scratch, Operand(right, index, times_1, 0));
5456  __ j(not_equal, chars_not_equal, near_jump);
5457  __ incq(index);
5458  __ j(not_zero, &loop);
5459}
5460
5461
5462void StringCompareStub::Generate(MacroAssembler* masm) {
5463  Label runtime;
5464
5465  // Stack frame on entry.
5466  //  rsp[0]: return address
5467  //  rsp[8]: right string
5468  //  rsp[16]: left string
5469
5470  __ movq(rdx, Operand(rsp, 2 * kPointerSize));  // left
5471  __ movq(rax, Operand(rsp, 1 * kPointerSize));  // right
5472
5473  // Check for identity.
5474  Label not_same;
5475  __ cmpq(rdx, rax);
5476  __ j(not_equal, &not_same, Label::kNear);
5477  __ Move(rax, Smi::FromInt(EQUAL));
5478  Counters* counters = masm->isolate()->counters();
5479  __ IncrementCounter(counters->string_compare_native(), 1);
5480  __ ret(2 * kPointerSize);
5481
5482  __ bind(&not_same);
5483
5484  // Check that both are sequential ASCII strings.
5485  __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
5486
5487  // Inline comparison of ASCII strings.
5488  __ IncrementCounter(counters->string_compare_native(), 1);
5489  // Drop arguments from the stack
5490  __ pop(rcx);
5491  __ addq(rsp, Immediate(2 * kPointerSize));
5492  __ push(rcx);
5493  GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
5494
5495  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
5496  // tagged as a small integer.
5497  __ bind(&runtime);
5498  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
5499}
5500
5501
5502void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
5503  ASSERT(state_ == CompareIC::SMIS);
5504  Label miss;
5505  __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
5506
5507  if (GetCondition() == equal) {
5508    // For equality we do not care about the sign of the result.
5509    __ subq(rax, rdx);
5510  } else {
5511    Label done;
5512    __ subq(rdx, rax);
5513    __ j(no_overflow, &done, Label::kNear);
5514    // Correct sign of result in case of overflow.
5515    __ SmiNot(rdx, rdx);
5516    __ bind(&done);
5517    __ movq(rax, rdx);
5518  }
5519  __ ret(0);
5520
5521  __ bind(&miss);
5522  GenerateMiss(masm);
5523}
5524
5525
5526void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
5527  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
5528
5529  Label generic_stub;
5530  Label unordered, maybe_undefined1, maybe_undefined2;
5531  Label miss;
5532  Condition either_smi = masm->CheckEitherSmi(rax, rdx);
5533  __ j(either_smi, &generic_stub, Label::kNear);
5534
5535  __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
5536  __ j(not_equal, &maybe_undefined1, Label::kNear);
5537  __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
5538  __ j(not_equal, &maybe_undefined2, Label::kNear);
5539
5540  // Load left and right operand
5541  __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
5542  __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
5543
5544  // Compare operands
5545  __ ucomisd(xmm0, xmm1);
5546
5547  // Don't base result on EFLAGS when a NaN is involved.
5548  __ j(parity_even, &unordered, Label::kNear);
5549
5550  // Return a result of -1, 0, or 1, based on EFLAGS.
5551  // Performing mov, because xor would destroy the flag register.
5552  __ movl(rax, Immediate(0));
5553  __ movl(rcx, Immediate(0));
5554  __ setcc(above, rax);  // Add one to zero if carry clear and not equal.
5555  __ sbbq(rax, rcx);  // Subtract one if below (aka. carry set).
5556  __ ret(0);
5557
5558  __ bind(&unordered);
5559  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
5560  __ bind(&generic_stub);
5561  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
5562
5563  __ bind(&maybe_undefined1);
5564  if (Token::IsOrderedRelationalCompareOp(op_)) {
5565    __ Cmp(rax, masm->isolate()->factory()->undefined_value());
5566    __ j(not_equal, &miss);
5567    __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
5568    __ j(not_equal, &maybe_undefined2, Label::kNear);
5569    __ jmp(&unordered);
5570  }
5571
5572  __ bind(&maybe_undefined2);
5573  if (Token::IsOrderedRelationalCompareOp(op_)) {
5574    __ Cmp(rdx, masm->isolate()->factory()->undefined_value());
5575    __ j(equal, &unordered);
5576  }
5577
5578  __ bind(&miss);
5579  GenerateMiss(masm);
5580}
5581
5582
5583void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
5584  ASSERT(state_ == CompareIC::SYMBOLS);
5585  ASSERT(GetCondition() == equal);
5586
5587  // Registers containing left and right operands respectively.
5588  Register left = rdx;
5589  Register right = rax;
5590  Register tmp1 = rcx;
5591  Register tmp2 = rbx;
5592
5593  // Check that both operands are heap objects.
5594  Label miss;
5595  Condition cond = masm->CheckEitherSmi(left, right, tmp1);
5596  __ j(cond, &miss, Label::kNear);
5597
5598  // Check that both operands are symbols.
5599  __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
5600  __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
5601  __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
5602  __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
5603  STATIC_ASSERT(kSymbolTag != 0);
5604  __ and_(tmp1, tmp2);
5605  __ testb(tmp1, Immediate(kIsSymbolMask));
5606  __ j(zero, &miss, Label::kNear);
5607
5608  // Symbols are compared by identity.
5609  Label done;
5610  __ cmpq(left, right);
5611  // Make sure rax is non-zero. At this point input operands are
5612  // guaranteed to be non-zero.
5613  ASSERT(right.is(rax));
5614  __ j(not_equal, &done, Label::kNear);
5615  STATIC_ASSERT(EQUAL == 0);
5616  STATIC_ASSERT(kSmiTag == 0);
5617  __ Move(rax, Smi::FromInt(EQUAL));
5618  __ bind(&done);
5619  __ ret(0);
5620
5621  __ bind(&miss);
5622  GenerateMiss(masm);
5623}
5624
5625
5626void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
5627  ASSERT(state_ == CompareIC::STRINGS);
5628  Label miss;
5629
5630  bool equality = Token::IsEqualityOp(op_);
5631
5632  // Registers containing left and right operands respectively.
5633  Register left = rdx;
5634  Register right = rax;
5635  Register tmp1 = rcx;
5636  Register tmp2 = rbx;
5637  Register tmp3 = rdi;
5638
5639  // Check that both operands are heap objects.
5640  Condition cond = masm->CheckEitherSmi(left, right, tmp1);
5641  __ j(cond, &miss);
5642
5643  // Check that both operands are strings. This leaves the instance
5644  // types loaded in tmp1 and tmp2.
5645  __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
5646  __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
5647  __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
5648  __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
5649  __ movq(tmp3, tmp1);
5650  STATIC_ASSERT(kNotStringTag != 0);
5651  __ or_(tmp3, tmp2);
5652  __ testb(tmp3, Immediate(kIsNotStringMask));
5653  __ j(not_zero, &miss);
5654
5655  // Fast check for identical strings.
5656  Label not_same;
5657  __ cmpq(left, right);
5658  __ j(not_equal, &not_same, Label::kNear);
5659  STATIC_ASSERT(EQUAL == 0);
5660  STATIC_ASSERT(kSmiTag == 0);
5661  __ Move(rax, Smi::FromInt(EQUAL));
5662  __ ret(0);
5663
5664  // Handle not identical strings.
5665  __ bind(&not_same);
5666
5667  // Check that both strings are symbols. If they are, we're done
5668  // because we already know they are not identical.
5669  if (equality) {
5670    Label do_compare;
5671    STATIC_ASSERT(kSymbolTag != 0);
5672    __ and_(tmp1, tmp2);
5673    __ testb(tmp1, Immediate(kIsSymbolMask));
5674    __ j(zero, &do_compare, Label::kNear);
5675    // Make sure rax is non-zero. At this point input operands are
5676    // guaranteed to be non-zero.
5677    ASSERT(right.is(rax));
5678    __ ret(0);
5679    __ bind(&do_compare);
5680  }
5681
5682  // Check that both strings are sequential ASCII.
5683  Label runtime;
5684  __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
5685
5686  // Compare flat ASCII strings. Returns when done.
5687  if (equality) {
5688    StringCompareStub::GenerateFlatAsciiStringEquals(
5689        masm, left, right, tmp1, tmp2);
5690  } else {
5691    StringCompareStub::GenerateCompareFlatAsciiStrings(
5692        masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
5693  }
5694
5695  // Handle more complex cases in runtime.
5696  __ bind(&runtime);
5697  __ pop(tmp1);  // Return address.
5698  __ push(left);
5699  __ push(right);
5700  __ push(tmp1);
5701  if (equality) {
5702    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
5703  } else {
5704    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
5705  }
5706
5707  __ bind(&miss);
5708  GenerateMiss(masm);
5709}
5710
5711
5712void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
5713  ASSERT(state_ == CompareIC::OBJECTS);
5714  Label miss;
5715  Condition either_smi = masm->CheckEitherSmi(rdx, rax);
5716  __ j(either_smi, &miss, Label::kNear);
5717
5718  __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
5719  __ j(not_equal, &miss, Label::kNear);
5720  __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
5721  __ j(not_equal, &miss, Label::kNear);
5722
5723  ASSERT(GetCondition() == equal);
5724  __ subq(rax, rdx);
5725  __ ret(0);
5726
5727  __ bind(&miss);
5728  GenerateMiss(masm);
5729}
5730
5731
5732void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
5733  Label miss;
5734  Condition either_smi = masm->CheckEitherSmi(rdx, rax);
5735  __ j(either_smi, &miss, Label::kNear);
5736
5737  __ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
5738  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
5739  __ Cmp(rcx, known_map_);
5740  __ j(not_equal, &miss, Label::kNear);
5741  __ Cmp(rbx, known_map_);
5742  __ j(not_equal, &miss, Label::kNear);
5743
5744  __ subq(rax, rdx);
5745  __ ret(0);
5746
5747  __ bind(&miss);
5748  GenerateMiss(masm);
5749}
5750
5751
5752void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
5753  {
5754    // Call the runtime system in a fresh internal frame.
5755    ExternalReference miss =
5756        ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
5757
5758    FrameScope scope(masm, StackFrame::INTERNAL);
5759    __ push(rdx);
5760    __ push(rax);
5761    __ push(rdx);
5762    __ push(rax);
5763    __ Push(Smi::FromInt(op_));
5764    __ CallExternalReference(miss, 3);
5765
5766    // Compute the entry point of the rewritten stub.
5767    __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
5768    __ pop(rax);
5769    __ pop(rdx);
5770  }
5771
5772  // Do a tail call to the rewritten stub.
5773  __ jmp(rdi);
5774}
5775
5776
5777void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
5778                                                        Label* miss,
5779                                                        Label* done,
5780                                                        Register properties,
5781                                                        Handle<String> name,
5782                                                        Register r0) {
5783  // If names of slots in range from 1 to kProbes - 1 for the hash value are
5784  // not equal to the name and kProbes-th slot is not used (its name is the
5785  // undefined value), it guarantees the hash table doesn't contain the
5786  // property. It's true even if some slots represent deleted properties
5787  // (their names are the hole value).
5788  for (int i = 0; i < kInlinedProbes; i++) {
5789    // r0 points to properties hash.
5790    // Compute the masked index: (hash + i + i * i) & mask.
5791    Register index = r0;
5792    // Capacity is smi 2^n.
5793    __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
5794    __ decl(index);
5795    __ and_(index,
5796            Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
5797
5798    // Scale the index by multiplying by the entry size.
5799    ASSERT(StringDictionary::kEntrySize == 3);
5800    __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
5801
5802    Register entity_name = r0;
5803    // Having undefined at this place means the name is not contained.
5804    ASSERT_EQ(kSmiTagSize, 1);
5805    __ movq(entity_name, Operand(properties,
5806                                 index,
5807                                 times_pointer_size,
5808                                 kElementsStartOffset - kHeapObjectTag));
5809    __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
5810    __ j(equal, done);
5811
5812    // Stop if found the property.
5813    __ Cmp(entity_name, Handle<String>(name));
5814    __ j(equal, miss);
5815
5816    Label the_hole;
5817    // Check for the hole and skip.
5818    __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
5819    __ j(equal, &the_hole, Label::kNear);
5820
5821    // Check if the entry name is not a symbol.
5822    __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
5823    __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
5824             Immediate(kIsSymbolMask));
5825    __ j(zero, miss);
5826
5827    __ bind(&the_hole);
5828  }
5829
5830  StringDictionaryLookupStub stub(properties,
5831                                  r0,
5832                                  r0,
5833                                  StringDictionaryLookupStub::NEGATIVE_LOOKUP);
5834  __ Push(Handle<Object>(name));
5835  __ push(Immediate(name->Hash()));
5836  __ CallStub(&stub);
5837  __ testq(r0, r0);
5838  __ j(not_zero, miss);
5839  __ jmp(done);
5840}
5841
5842
5843// Probe the string dictionary in the |elements| register. Jump to the
5844// |done| label if a property with the given name is found leaving the
5845// index into the dictionary in |r1|. Jump to the |miss| label
5846// otherwise.
5847void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
5848                                                        Label* miss,
5849                                                        Label* done,
5850                                                        Register elements,
5851                                                        Register name,
5852                                                        Register r0,
5853                                                        Register r1) {
5854  ASSERT(!elements.is(r0));
5855  ASSERT(!elements.is(r1));
5856  ASSERT(!name.is(r0));
5857  ASSERT(!name.is(r1));
5858
5859  // Assert that name contains a string.
5860  if (FLAG_debug_code) __ AbortIfNotString(name);
5861
5862  __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
5863  __ decl(r0);
5864
5865  for (int i = 0; i < kInlinedProbes; i++) {
5866    // Compute the masked index: (hash + i + i * i) & mask.
5867    __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
5868    __ shrl(r1, Immediate(String::kHashShift));
5869    if (i > 0) {
5870      __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
5871    }
5872    __ and_(r1, r0);
5873
5874    // Scale the index by multiplying by the entry size.
5875    ASSERT(StringDictionary::kEntrySize == 3);
5876    __ lea(r1, Operand(r1, r1, times_2, 0));  // r1 = r1 * 3
5877
5878    // Check if the key is identical to the name.
5879    __ cmpq(name, Operand(elements, r1, times_pointer_size,
5880                          kElementsStartOffset - kHeapObjectTag));
5881    __ j(equal, done);
5882  }
5883
5884  StringDictionaryLookupStub stub(elements,
5885                                  r0,
5886                                  r1,
5887                                  POSITIVE_LOOKUP);
5888  __ push(name);
5889  __ movl(r0, FieldOperand(name, String::kHashFieldOffset));
5890  __ shrl(r0, Immediate(String::kHashShift));
5891  __ push(r0);
5892  __ CallStub(&stub);
5893
5894  __ testq(r0, r0);
5895  __ j(zero, miss);
5896  __ jmp(done);
5897}
5898
5899
5900void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
5901  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
5902  // we cannot call anything that could cause a GC from this stub.
5903  // Stack frame on entry:
5904  //  esp[0 * kPointerSize]: return address.
5905  //  esp[1 * kPointerSize]: key's hash.
5906  //  esp[2 * kPointerSize]: key.
5907  // Registers:
5908  //  dictionary_: StringDictionary to probe.
5909  //  result_: used as scratch.
5910  //  index_: will hold an index of entry if lookup is successful.
5911  //          might alias with result_.
5912  // Returns:
5913  //  result_ is zero if lookup failed, non zero otherwise.
5914
5915  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
5916
5917  Register scratch = result_;
5918
5919  __ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
5920  __ decl(scratch);
5921  __ push(scratch);
5922
5923  // If names of slots in range from 1 to kProbes - 1 for the hash value are
5924  // not equal to the name and kProbes-th slot is not used (its name is the
5925  // undefined value), it guarantees the hash table doesn't contain the
5926  // property. It's true even if some slots represent deleted properties
5927  // (their names are the null value).
5928  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
5929    // Compute the masked index: (hash + i + i * i) & mask.
5930    __ movq(scratch, Operand(rsp, 2 * kPointerSize));
5931    if (i > 0) {
5932      __ addl(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
5933    }
5934    __ and_(scratch, Operand(rsp, 0));
5935
5936    // Scale the index by multiplying by the entry size.
5937    ASSERT(StringDictionary::kEntrySize == 3);
5938    __ lea(index_, Operand(scratch, scratch, times_2, 0));  // index *= 3.
5939
5940    // Having undefined at this place means the name is not contained.
5941    __ movq(scratch, Operand(dictionary_,
5942                             index_,
5943                             times_pointer_size,
5944                             kElementsStartOffset - kHeapObjectTag));
5945
5946    __ Cmp(scratch, masm->isolate()->factory()->undefined_value());
5947    __ j(equal, &not_in_dictionary);
5948
5949    // Stop if found the property.
5950    __ cmpq(scratch, Operand(rsp, 3 * kPointerSize));
5951    __ j(equal, &in_dictionary);
5952
5953    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
5954      // If we hit a non symbol key during negative lookup
5955      // we have to bailout as this key might be equal to the
5956      // key we are looking for.
5957
5958      // Check if the entry name is not a symbol.
5959      __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
5960      __ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
5961               Immediate(kIsSymbolMask));
5962      __ j(zero, &maybe_in_dictionary);
5963    }
5964  }
5965
5966  __ bind(&maybe_in_dictionary);
5967  // If we are doing negative lookup then probing failure should be
5968  // treated as a lookup success. For positive lookup probing failure
5969  // should be treated as lookup failure.
5970  if (mode_ == POSITIVE_LOOKUP) {
5971    __ movq(scratch, Immediate(0));
5972    __ Drop(1);
5973    __ ret(2 * kPointerSize);
5974  }
5975
5976  __ bind(&in_dictionary);
5977  __ movq(scratch, Immediate(1));
5978  __ Drop(1);
5979  __ ret(2 * kPointerSize);
5980
5981  __ bind(&not_in_dictionary);
5982  __ movq(scratch, Immediate(0));
5983  __ Drop(1);
5984  __ ret(2 * kPointerSize);
5985}
5986
5987
5988struct AheadOfTimeWriteBarrierStubList {
5989  Register object, value, address;
5990  RememberedSetAction action;
5991};
5992
5993
5994#define REG(Name) { kRegister_ ## Name ## _Code }
5995
5996struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
5997  // Used in RegExpExecStub.
5998  { REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
5999  // Used in CompileArrayPushCall.
6000  { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
6001  // Used in CompileStoreGlobal.
6002  { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
6003  // Used in StoreStubCompiler::CompileStoreField and
6004  // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
6005  { REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
6006  // GenerateStoreField calls the stub with two different permutations of
6007  // registers.  This is the second.
6008  { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
6009  // StoreIC::GenerateNormal via GenerateDictionaryStore.
6010  { REG(rbx), REG(r8), REG(r9), EMIT_REMEMBERED_SET },
6011  // KeyedStoreIC::GenerateGeneric.
6012  { REG(rbx), REG(rdx), REG(rcx), EMIT_REMEMBERED_SET},
6013  // KeyedStoreStubCompiler::GenerateStoreFastElement.
6014  { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
6015  { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
6016  // ElementsTransitionGenerator::GenerateSmiOnlyToObject
6017  // and ElementsTransitionGenerator::GenerateSmiOnlyToObject
6018  // and ElementsTransitionGenerator::GenerateDoubleToObject
6019  { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
6020  { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
6021  // ElementsTransitionGenerator::GenerateSmiOnlyToDouble
6022  // and ElementsTransitionGenerator::GenerateDoubleToObject
6023  { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
6024  // ElementsTransitionGenerator::GenerateDoubleToObject
6025  { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
6026  // StoreArrayLiteralElementStub::Generate
6027  { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
6028  // Null termination.
6029  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
6030};
6031
6032#undef REG
6033
6034bool RecordWriteStub::IsPregenerated() {
6035  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6036       !entry->object.is(no_reg);
6037       entry++) {
6038    if (object_.is(entry->object) &&
6039        value_.is(entry->value) &&
6040        address_.is(entry->address) &&
6041        remembered_set_action_ == entry->action &&
6042        save_fp_regs_mode_ == kDontSaveFPRegs) {
6043      return true;
6044    }
6045  }
6046  return false;
6047}
6048
6049
6050void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
6051  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
6052  stub1.GetCode()->set_is_pregenerated(true);
6053  StoreBufferOverflowStub stub2(kSaveFPRegs);
6054  stub2.GetCode()->set_is_pregenerated(true);
6055}
6056
6057
6058void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
6059  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6060       !entry->object.is(no_reg);
6061       entry++) {
6062    RecordWriteStub stub(entry->object,
6063                         entry->value,
6064                         entry->address,
6065                         entry->action,
6066                         kDontSaveFPRegs);
6067    stub.GetCode()->set_is_pregenerated(true);
6068  }
6069}
6070
6071
6072// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
6073// the value has just been written into the object, now this stub makes sure
6074// we keep the GC informed.  The word in the object where the value has been
6075// written is in the address register.
6076void RecordWriteStub::Generate(MacroAssembler* masm) {
6077  Label skip_to_incremental_noncompacting;
6078  Label skip_to_incremental_compacting;
6079
6080  // The first two instructions are generated with labels so as to get the
6081  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
6082  // forth between a compare instructions (a nop in this position) and the
6083  // real branch when we start and stop incremental heap marking.
6084  // See RecordWriteStub::Patch for details.
6085  __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
6086  __ jmp(&skip_to_incremental_compacting, Label::kFar);
6087
6088  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
6089    __ RememberedSetHelper(object_,
6090                           address_,
6091                           value_,
6092                           save_fp_regs_mode_,
6093                           MacroAssembler::kReturnAtEnd);
6094  } else {
6095    __ ret(0);
6096  }
6097
6098  __ bind(&skip_to_incremental_noncompacting);
6099  GenerateIncremental(masm, INCREMENTAL);
6100
6101  __ bind(&skip_to_incremental_compacting);
6102  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
6103
6104  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
6105  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
6106  masm->set_byte_at(0, kTwoByteNopInstruction);
6107  masm->set_byte_at(2, kFiveByteNopInstruction);
6108}
6109
6110
6111void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
6112  regs_.Save(masm);
6113
6114  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
6115    Label dont_need_remembered_set;
6116
6117    __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
6118    __ JumpIfNotInNewSpace(regs_.scratch0(),
6119                           regs_.scratch0(),
6120                           &dont_need_remembered_set);
6121
6122    __ CheckPageFlag(regs_.object(),
6123                     regs_.scratch0(),
6124                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
6125                     not_zero,
6126                     &dont_need_remembered_set);
6127
6128    // First notify the incremental marker if necessary, then update the
6129    // remembered set.
6130    CheckNeedsToInformIncrementalMarker(
6131        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
6132    InformIncrementalMarker(masm, mode);
6133    regs_.Restore(masm);
6134    __ RememberedSetHelper(object_,
6135                           address_,
6136                           value_,
6137                           save_fp_regs_mode_,
6138                           MacroAssembler::kReturnAtEnd);
6139
6140    __ bind(&dont_need_remembered_set);
6141  }
6142
6143  CheckNeedsToInformIncrementalMarker(
6144      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
6145  InformIncrementalMarker(masm, mode);
6146  regs_.Restore(masm);
6147  __ ret(0);
6148}
6149
6150
6151void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
6152  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
6153#ifdef _WIN64
6154  Register arg3 = r8;
6155  Register arg2 = rdx;
6156  Register arg1 = rcx;
6157#else
6158  Register arg3 = rdx;
6159  Register arg2 = rsi;
6160  Register arg1 = rdi;
6161#endif
6162  Register address =
6163      arg1.is(regs_.address()) ? kScratchRegister : regs_.address();
6164  ASSERT(!address.is(regs_.object()));
6165  ASSERT(!address.is(arg1));
6166  __ Move(address, regs_.address());
6167  __ Move(arg1, regs_.object());
6168  if (mode == INCREMENTAL_COMPACTION) {
6169    // TODO(gc) Can we just set address arg2 in the beginning?
6170    __ Move(arg2, address);
6171  } else {
6172    ASSERT(mode == INCREMENTAL);
6173    __ movq(arg2, Operand(address, 0));
6174  }
6175  __ LoadAddress(arg3, ExternalReference::isolate_address());
6176  int argument_count = 3;
6177
6178  AllowExternalCallThatCantCauseGC scope(masm);
6179  __ PrepareCallCFunction(argument_count);
6180  if (mode == INCREMENTAL_COMPACTION) {
6181    __ CallCFunction(
6182        ExternalReference::incremental_evacuation_record_write_function(
6183            masm->isolate()),
6184        argument_count);
6185  } else {
6186    ASSERT(mode == INCREMENTAL);
6187    __ CallCFunction(
6188        ExternalReference::incremental_marking_record_write_function(
6189            masm->isolate()),
6190        argument_count);
6191  }
6192  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
6193}
6194
6195
6196void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
6197    MacroAssembler* masm,
6198    OnNoNeedToInformIncrementalMarker on_no_need,
6199    Mode mode) {
6200  Label on_black;
6201  Label need_incremental;
6202  Label need_incremental_pop_object;
6203
6204  // Let's look at the color of the object:  If it is not black we don't have
6205  // to inform the incremental marker.
6206  __ JumpIfBlack(regs_.object(),
6207                 regs_.scratch0(),
6208                 regs_.scratch1(),
6209                 &on_black,
6210                 Label::kNear);
6211
6212  regs_.Restore(masm);
6213  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
6214    __ RememberedSetHelper(object_,
6215                           address_,
6216                           value_,
6217                           save_fp_regs_mode_,
6218                           MacroAssembler::kReturnAtEnd);
6219  } else {
6220    __ ret(0);
6221  }
6222
6223  __ bind(&on_black);
6224
6225  // Get the value from the slot.
6226  __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
6227
6228  if (mode == INCREMENTAL_COMPACTION) {
6229    Label ensure_not_white;
6230
6231    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
6232                     regs_.scratch1(),  // Scratch.
6233                     MemoryChunk::kEvacuationCandidateMask,
6234                     zero,
6235                     &ensure_not_white,
6236                     Label::kNear);
6237
6238    __ CheckPageFlag(regs_.object(),
6239                     regs_.scratch1(),  // Scratch.
6240                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
6241                     zero,
6242                     &need_incremental);
6243
6244    __ bind(&ensure_not_white);
6245  }
6246
6247  // We need an extra register for this, so we push the object register
6248  // temporarily.
6249  __ push(regs_.object());
6250  __ EnsureNotWhite(regs_.scratch0(),  // The value.
6251                    regs_.scratch1(),  // Scratch.
6252                    regs_.object(),  // Scratch.
6253                    &need_incremental_pop_object,
6254                    Label::kNear);
6255  __ pop(regs_.object());
6256
6257  regs_.Restore(masm);
6258  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
6259    __ RememberedSetHelper(object_,
6260                           address_,
6261                           value_,
6262                           save_fp_regs_mode_,
6263                           MacroAssembler::kReturnAtEnd);
6264  } else {
6265    __ ret(0);
6266  }
6267
6268  __ bind(&need_incremental_pop_object);
6269  __ pop(regs_.object());
6270
6271  __ bind(&need_incremental);
6272
6273  // Fall through when we need to inform the incremental marker.
6274}
6275
6276
6277void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
6278  // ----------- S t a t e -------------
6279  //  -- rax    : element value to store
6280  //  -- rbx    : array literal
6281  //  -- rdi    : map of array literal
6282  //  -- rcx    : element index as smi
6283  //  -- rdx    : array literal index in function
6284  //  -- rsp[0] : return address
6285  // -----------------------------------
6286
6287  Label element_done;
6288  Label double_elements;
6289  Label smi_element;
6290  Label slow_elements;
6291  Label fast_elements;
6292
6293  __ CheckFastElements(rdi, &double_elements);
6294
6295  // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
6296  __ JumpIfSmi(rax, &smi_element);
6297  __ CheckFastSmiOnlyElements(rdi, &fast_elements);
6298
6299  // Store into the array literal requires a elements transition. Call into
6300  // the runtime.
6301
6302  __ bind(&slow_elements);
6303  __ pop(rdi);  // Pop return address and remember to put back later for tail
6304                // call.
6305  __ push(rbx);
6306  __ push(rcx);
6307  __ push(rax);
6308  __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
6309  __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
6310  __ push(rdx);
6311  __ push(rdi);  // Return return address so that tail call returns to right
6312                 // place.
6313  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
6314
6315  // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
6316  __ bind(&fast_elements);
6317  __ SmiToInteger32(kScratchRegister, rcx);
6318  __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
6319  __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
6320                           FixedArrayBase::kHeaderSize));
6321  __ movq(Operand(rcx, 0), rax);
6322  // Update the write barrier for the array store.
6323  __ RecordWrite(rbx, rcx, rax,
6324                 kDontSaveFPRegs,
6325                 EMIT_REMEMBERED_SET,
6326                 OMIT_SMI_CHECK);
6327  __ ret(0);
6328
6329  // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
6330  // FAST_ELEMENTS, and value is Smi.
6331  __ bind(&smi_element);
6332  __ SmiToInteger32(kScratchRegister, rcx);
6333  __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
6334  __ movq(FieldOperand(rbx, kScratchRegister, times_pointer_size,
6335                       FixedArrayBase::kHeaderSize), rax);
6336  __ ret(0);
6337
6338  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
6339  __ bind(&double_elements);
6340
6341  __ movq(r9, FieldOperand(rbx, JSObject::kElementsOffset));
6342  __ SmiToInteger32(r11, rcx);
6343  __ StoreNumberToDoubleElements(rax,
6344                                 r9,
6345                                 r11,
6346                                 xmm0,
6347                                 &slow_elements);
6348  __ ret(0);
6349}
6350
6351#undef __
6352
6353} }  // namespace v8::internal
6354
6355#endif  // V8_TARGET_ARCH_X64
6356