1// Copyright 2013 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if V8_TARGET_ARCH_X64
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
34#include "regexp-macro-assembler.h"
35#include "stub-cache.h"
36#include "runtime.h"
37
38namespace v8 {
39namespace internal {
40
41
42void ToNumberStub::InitializeInterfaceDescriptor(
43    Isolate* isolate,
44    CodeStubInterfaceDescriptor* descriptor) {
45  static Register registers[] = { rax };
46  descriptor->register_param_count_ = 1;
47  descriptor->register_params_ = registers;
48  descriptor->deoptimization_handler_ = NULL;
49}
50
51
52void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
53    Isolate* isolate,
54    CodeStubInterfaceDescriptor* descriptor) {
55  static Register registers[] = { rax, rbx, rcx };
56  descriptor->register_param_count_ = 3;
57  descriptor->register_params_ = registers;
58  descriptor->deoptimization_handler_ =
59      Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
60}
61
62
63void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
64    Isolate* isolate,
65    CodeStubInterfaceDescriptor* descriptor) {
66  static Register registers[] = { rax, rbx, rcx, rdx };
67  descriptor->register_param_count_ = 4;
68  descriptor->register_params_ = registers;
69  descriptor->deoptimization_handler_ =
70      Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
71}
72
73
74void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
75    Isolate* isolate,
76    CodeStubInterfaceDescriptor* descriptor) {
77  static Register registers[] = { rbx };
78  descriptor->register_param_count_ = 1;
79  descriptor->register_params_ = registers;
80  descriptor->deoptimization_handler_ = NULL;
81}
82
83
84void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
85    Isolate* isolate,
86    CodeStubInterfaceDescriptor* descriptor) {
87  static Register registers[] = { rdx, rax };
88  descriptor->register_param_count_ = 2;
89  descriptor->register_params_ = registers;
90  descriptor->deoptimization_handler_ =
91      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
92}
93
94
95void LoadFieldStub::InitializeInterfaceDescriptor(
96    Isolate* isolate,
97    CodeStubInterfaceDescriptor* descriptor) {
98  static Register registers[] = { rax };
99  descriptor->register_param_count_ = 1;
100  descriptor->register_params_ = registers;
101  descriptor->deoptimization_handler_ = NULL;
102}
103
104
105void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
106    Isolate* isolate,
107    CodeStubInterfaceDescriptor* descriptor) {
108  static Register registers[] = { rdx };
109  descriptor->register_param_count_ = 1;
110  descriptor->register_params_ = registers;
111  descriptor->deoptimization_handler_ = NULL;
112}
113
114
115void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
116    Isolate* isolate,
117    CodeStubInterfaceDescriptor* descriptor) {
118  static Register registers[] = { rdx, rcx, rax };
119  descriptor->register_param_count_ = 3;
120  descriptor->register_params_ = registers;
121  descriptor->deoptimization_handler_ =
122      FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
123}
124
125
126void TransitionElementsKindStub::InitializeInterfaceDescriptor(
127    Isolate* isolate,
128    CodeStubInterfaceDescriptor* descriptor) {
129  static Register registers[] = { rax, rbx };
130  descriptor->register_param_count_ = 2;
131  descriptor->register_params_ = registers;
132  descriptor->deoptimization_handler_ =
133      Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
134}
135
136
137static void InitializeArrayConstructorDescriptor(
138    Isolate* isolate,
139    CodeStubInterfaceDescriptor* descriptor,
140    int constant_stack_parameter_count) {
141  // register state
142  // rax -- number of arguments
143  // rdi -- function
144  // rbx -- type info cell with elements kind
145  static Register registers[] = { rdi, rbx };
146  descriptor->register_param_count_ = 2;
147  if (constant_stack_parameter_count != 0) {
148    // stack param count needs (constructor pointer, and single argument)
149    descriptor->stack_parameter_count_ = &rax;
150  }
151  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
152  descriptor->register_params_ = registers;
153  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
154  descriptor->deoptimization_handler_ =
155      Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
156}
157
158
159static void InitializeInternalArrayConstructorDescriptor(
160    Isolate* isolate,
161    CodeStubInterfaceDescriptor* descriptor,
162    int constant_stack_parameter_count) {
163  // register state
164  // rax -- number of arguments
165  // rdi -- constructor function
166  static Register registers[] = { rdi };
167  descriptor->register_param_count_ = 1;
168
169  if (constant_stack_parameter_count != 0) {
170    // stack param count needs (constructor pointer, and single argument)
171    descriptor->stack_parameter_count_ = &rax;
172  }
173  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
174  descriptor->register_params_ = registers;
175  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
176  descriptor->deoptimization_handler_ =
177      Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
178}
179
180
181void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
182    Isolate* isolate,
183    CodeStubInterfaceDescriptor* descriptor) {
184  InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
185}
186
187
188void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
189    Isolate* isolate,
190    CodeStubInterfaceDescriptor* descriptor) {
191  InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
192}
193
194
195void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
196    Isolate* isolate,
197    CodeStubInterfaceDescriptor* descriptor) {
198  InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
199}
200
201
202void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
203    Isolate* isolate,
204    CodeStubInterfaceDescriptor* descriptor) {
205  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
206}
207
208
209void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
210    Isolate* isolate,
211    CodeStubInterfaceDescriptor* descriptor) {
212  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
213}
214
215
216void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
217    Isolate* isolate,
218    CodeStubInterfaceDescriptor* descriptor) {
219  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
220}
221
222
223void CompareNilICStub::InitializeInterfaceDescriptor(
224    Isolate* isolate,
225    CodeStubInterfaceDescriptor* descriptor) {
226  static Register registers[] = { rax };
227  descriptor->register_param_count_ = 1;
228  descriptor->register_params_ = registers;
229  descriptor->deoptimization_handler_ =
230      FUNCTION_ADDR(CompareNilIC_Miss);
231  descriptor->SetMissHandler(
232      ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
233}
234
235
236void ToBooleanStub::InitializeInterfaceDescriptor(
237    Isolate* isolate,
238    CodeStubInterfaceDescriptor* descriptor) {
239  static Register registers[] = { rax };
240  descriptor->register_param_count_ = 1;
241  descriptor->register_params_ = registers;
242  descriptor->deoptimization_handler_ =
243     FUNCTION_ADDR(ToBooleanIC_Miss);
244  descriptor->SetMissHandler(
245     ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
246}
247
248
249void StoreGlobalStub::InitializeInterfaceDescriptor(
250    Isolate* isolate,
251    CodeStubInterfaceDescriptor* descriptor) {
252  static Register registers[] = { rdx, rcx, rax };
253  descriptor->register_param_count_ = 3;
254  descriptor->register_params_ = registers;
255  descriptor->deoptimization_handler_ =
256      FUNCTION_ADDR(StoreIC_MissFromStubFailure);
257}
258
259
260void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
261    Isolate* isolate,
262    CodeStubInterfaceDescriptor* descriptor) {
263  static Register registers[] = { rax, rbx, rcx, rdx };
264  descriptor->register_param_count_ = 4;
265  descriptor->register_params_ = registers;
266  descriptor->deoptimization_handler_ =
267      FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
268}
269
270
271#define __ ACCESS_MASM(masm)
272
273
274void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
275  // Update the static counter each time a new code stub is generated.
276  Isolate* isolate = masm->isolate();
277  isolate->counters()->code_stubs()->Increment();
278
279  CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
280  int param_count = descriptor->register_param_count_;
281  {
282    // Call the runtime system in a fresh internal frame.
283    FrameScope scope(masm, StackFrame::INTERNAL);
284    ASSERT(descriptor->register_param_count_ == 0 ||
285           rax.is(descriptor->register_params_[param_count - 1]));
286    // Push arguments
287    for (int i = 0; i < param_count; ++i) {
288      __ push(descriptor->register_params_[i]);
289    }
290    ExternalReference miss = descriptor->miss_handler();
291    __ CallExternalReference(miss, descriptor->register_param_count_);
292  }
293
294  __ Ret();
295}
296
297
298void FastNewClosureStub::Generate(MacroAssembler* masm) {
299  // Create a new closure from the given function info in new
300  // space. Set the context to the current context in rsi.
301  Counters* counters = masm->isolate()->counters();
302
303  Label gc;
304  __ Allocate(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
305
306  __ IncrementCounter(counters->fast_new_closure_total(), 1);
307
308  // Get the function info from the stack.
309  __ movq(rdx, Operand(rsp, 1 * kPointerSize));
310
311  int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
312
313  // Compute the function map in the current native context and set that
314  // as the map of the allocated object.
315  __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
316  __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
317  __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index)));
318  __ movq(FieldOperand(rax, JSObject::kMapOffset), rbx);
319
320  // Initialize the rest of the function. We don't have to update the
321  // write barrier because the allocated object is in new space.
322  __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
323  __ LoadRoot(r8, Heap::kTheHoleValueRootIndex);
324  __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
325  __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
326  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
327  __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), r8);
328  __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
329  __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
330  __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
331
332  // Initialize the code pointer in the function to be the one
333  // found in the shared function info object.
334  // But first check if there is an optimized version for our context.
335  Label check_optimized;
336  Label install_unoptimized;
337  if (FLAG_cache_optimized_code) {
338    __ movq(rbx,
339            FieldOperand(rdx, SharedFunctionInfo::kOptimizedCodeMapOffset));
340    __ testq(rbx, rbx);
341    __ j(not_zero, &check_optimized, Label::kNear);
342  }
343  __ bind(&install_unoptimized);
344  __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset),
345          rdi);  // Initialize with undefined.
346  __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
347  __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
348  __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
349
350  // Return and remove the on-stack parameter.
351  __ ret(1 * kPointerSize);
352
353  __ bind(&check_optimized);
354
355  __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1);
356
357  // rcx holds native context, rbx points to fixed array of 3-element entries
358  // (native context, optimized code, literals).
359  // The optimized code map must never be empty, so check the first elements.
360  Label install_optimized;
361  // Speculatively move code object into edx.
362  __ movq(rdx, FieldOperand(rbx, SharedFunctionInfo::kFirstCodeSlot));
363  __ cmpq(rcx, FieldOperand(rbx, SharedFunctionInfo::kFirstContextSlot));
364  __ j(equal, &install_optimized);
365
366  // Iterate through the rest of map backwards. rdx holds an index.
367  Label loop;
368  Label restore;
369  __ movq(rdx, FieldOperand(rbx, FixedArray::kLengthOffset));
370  __ SmiToInteger32(rdx, rdx);
371  __ bind(&loop);
372  // Do not double check first entry.
373  __ cmpq(rdx, Immediate(SharedFunctionInfo::kSecondEntryIndex));
374  __ j(equal, &restore);
375  __ subq(rdx, Immediate(SharedFunctionInfo::kEntryLength));
376  __ cmpq(rcx, FieldOperand(rbx,
377                            rdx,
378                            times_pointer_size,
379                            FixedArray::kHeaderSize));
380  __ j(not_equal, &loop, Label::kNear);
381  // Hit: fetch the optimized code.
382  __ movq(rdx, FieldOperand(rbx,
383                            rdx,
384                            times_pointer_size,
385                            FixedArray::kHeaderSize + 1 * kPointerSize));
386
387  __ bind(&install_optimized);
388  __ IncrementCounter(counters->fast_new_closure_install_optimized(), 1);
389
390  // TODO(fschneider): Idea: store proper code pointers in the map and either
391  // unmangle them on marking or do nothing as the whole map is discarded on
392  // major GC anyway.
393  __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
394  __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
395
396  // Now link a function into a list of optimized functions.
397  __ movq(rdx, ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST));
398
399  __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdx);
400  // No need for write barrier as JSFunction (rax) is in the new space.
401
402  __ movq(ContextOperand(rcx, Context::OPTIMIZED_FUNCTIONS_LIST), rax);
403  // Store JSFunction (rax) into rdx before issuing write barrier as
404  // it clobbers all the registers passed.
405  __ movq(rdx, rax);
406  __ RecordWriteContextSlot(
407      rcx,
408      Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST),
409      rdx,
410      rbx,
411      kDontSaveFPRegs);
412
413  // Return and remove the on-stack parameter.
414  __ ret(1 * kPointerSize);
415
416  __ bind(&restore);
417  __ movq(rdx, Operand(rsp, 1 * kPointerSize));
418  __ jmp(&install_unoptimized);
419
420  // Create a new closure through the slower runtime call.
421  __ bind(&gc);
422  __ PopReturnAddressTo(rcx);
423  __ pop(rdx);
424  __ push(rsi);
425  __ push(rdx);
426  __ PushRoot(Heap::kFalseValueRootIndex);
427  __ PushReturnAddressFrom(rcx);
428  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
429}
430
431
432void FastNewContextStub::Generate(MacroAssembler* masm) {
433  // Try to allocate the context in new space.
434  Label gc;
435  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
436  __ Allocate((length * kPointerSize) + FixedArray::kHeaderSize,
437              rax, rbx, rcx, &gc, TAG_OBJECT);
438
439  // Get the function from the stack.
440  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
441
442  // Set up the object header.
443  __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
444  __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
445  __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
446
447  // Set up the fixed slots.
448  __ Set(rbx, 0);  // Set to NULL.
449  __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
450  __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
451  __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
452
453  // Copy the global object from the previous context.
454  __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
455  __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), rbx);
456
457  // Initialize the rest of the slots to undefined.
458  __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
459  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
460    __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
461  }
462
463  // Return and remove the on-stack parameter.
464  __ movq(rsi, rax);
465  __ ret(1 * kPointerSize);
466
467  // Need to collect. Call into runtime system.
468  __ bind(&gc);
469  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
470}
471
472
473void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
474  // Stack layout on entry:
475  //
476  // [rsp + (1 * kPointerSize)] : function
477  // [rsp + (2 * kPointerSize)] : serialized scope info
478
479  // Try to allocate the context in new space.
480  Label gc;
481  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
482  __ Allocate(FixedArray::SizeFor(length),
483              rax, rbx, rcx, &gc, TAG_OBJECT);
484
485  // Get the function from the stack.
486  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
487
488  // Get the serialized scope info from the stack.
489  __ movq(rbx, Operand(rsp, 2 * kPointerSize));
490
491  // Set up the object header.
492  __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
493  __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
494  __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
495
496  // If this block context is nested in the native context we get a smi
497  // sentinel instead of a function. The block context should get the
498  // canonical empty function of the native context as its closure which
499  // we still have to look up.
500  Label after_sentinel;
501  __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
502  if (FLAG_debug_code) {
503    __ cmpq(rcx, Immediate(0));
504    __ Assert(equal, kExpected0AsASmiSentinel);
505  }
506  __ movq(rcx, GlobalObjectOperand());
507  __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset));
508  __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
509  __ bind(&after_sentinel);
510
511  // Set up the fixed slots.
512  __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
513  __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
514  __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
515
516  // Copy the global object from the previous context.
517  __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
518  __ movq(ContextOperand(rax, Context::GLOBAL_OBJECT_INDEX), rbx);
519
520  // Initialize the rest of the slots to the hole value.
521  __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
522  for (int i = 0; i < slots_; i++) {
523    __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
524  }
525
526  // Return and remove the on-stack parameter.
527  __ movq(rsi, rax);
528  __ ret(2 * kPointerSize);
529
530  // Need to collect. Call into runtime system.
531  __ bind(&gc);
532  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
533}
534
535
536void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
537  __ PushCallerSaved(save_doubles_);
538  const int argument_count = 1;
539  __ PrepareCallCFunction(argument_count);
540  __ LoadAddress(arg_reg_1,
541                 ExternalReference::isolate_address(masm->isolate()));
542
543  AllowExternalCallThatCantCauseGC scope(masm);
544  __ CallCFunction(
545      ExternalReference::store_buffer_overflow_function(masm->isolate()),
546      argument_count);
547  __ PopCallerSaved(save_doubles_);
548  __ ret(0);
549}
550
551
552class FloatingPointHelper : public AllStatic {
553 public:
554  enum ConvertUndefined {
555    CONVERT_UNDEFINED_TO_ZERO,
556    BAILOUT_ON_UNDEFINED
557  };
558  // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
559  // If the operands are not both numbers, jump to not_numbers.
560  // Leaves rdx and rax unchanged.  SmiOperands assumes both are smis.
561  // NumberOperands assumes both are smis or heap numbers.
562  static void LoadSSE2SmiOperands(MacroAssembler* masm);
563  static void LoadSSE2NumberOperands(MacroAssembler* masm);
564  static void LoadSSE2UnknownOperands(MacroAssembler* masm,
565                                      Label* not_numbers);
566
567  // Takes the operands in rdx and rax and loads them as integers in rax
568  // and rcx.
569  static void LoadAsIntegers(MacroAssembler* masm,
570                             Label* operand_conversion_failure,
571                             Register heap_number_map);
572  // As above, but we know the operands to be numbers. In that case,
573  // conversion can't fail.
574  static void LoadNumbersAsIntegers(MacroAssembler* masm);
575
576  // Tries to convert two values to smis losslessly.
577  // This fails if either argument is not a Smi nor a HeapNumber,
578  // or if it's a HeapNumber with a value that can't be converted
579  // losslessly to a Smi. In that case, control transitions to the
580  // on_not_smis label.
581  // On success, either control goes to the on_success label (if one is
582  // provided), or it falls through at the end of the code (if on_success
583  // is NULL).
584  // On success, both first and second holds Smi tagged values.
585  // One of first or second must be non-Smi when entering.
586  static void NumbersToSmis(MacroAssembler* masm,
587                            Register first,
588                            Register second,
589                            Register scratch1,
590                            Register scratch2,
591                            Register scratch3,
592                            Label* on_success,
593                            Label* on_not_smis,
594                            ConvertUndefined convert_undefined);
595};
596
597
598void DoubleToIStub::Generate(MacroAssembler* masm) {
599    Register input_reg = this->source();
600    Register final_result_reg = this->destination();
601    ASSERT(is_truncating());
602
603    Label check_negative, process_64_bits, done;
604
605    int double_offset = offset();
606
607    // Account for return address and saved regs if input is rsp.
608    if (input_reg.is(rsp)) double_offset += 3 * kPointerSize;
609
610    MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
611    MemOperand exponent_operand(MemOperand(input_reg,
612                                           double_offset + kDoubleSize / 2));
613
614    Register scratch1;
615    Register scratch_candidates[3] = { rbx, rdx, rdi };
616    for (int i = 0; i < 3; i++) {
617      scratch1 = scratch_candidates[i];
618      if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
619    }
620
621    // Since we must use rcx for shifts below, use some other register (rax)
622    // to calculate the result if ecx is the requested return register.
623    Register result_reg = final_result_reg.is(rcx) ? rax : final_result_reg;
624    // Save ecx if it isn't the return register and therefore volatile, or if it
625    // is the return register, then save the temp register we use in its stead
626    // for the result.
627    Register save_reg = final_result_reg.is(rcx) ? rax : rcx;
628    __ push(scratch1);
629    __ push(save_reg);
630
631    bool stash_exponent_copy = !input_reg.is(rsp);
632    __ movl(scratch1, mantissa_operand);
633    __ movsd(xmm0, mantissa_operand);
634    __ movl(rcx, exponent_operand);
635    if (stash_exponent_copy) __ push(rcx);
636
637    __ andl(rcx, Immediate(HeapNumber::kExponentMask));
638    __ shrl(rcx, Immediate(HeapNumber::kExponentShift));
639    __ leal(result_reg, MemOperand(rcx, -HeapNumber::kExponentBias));
640    __ cmpl(result_reg, Immediate(HeapNumber::kMantissaBits));
641    __ j(below, &process_64_bits);
642
643    // Result is entirely in lower 32-bits of mantissa
644    int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
645    __ subl(rcx, Immediate(delta));
646    __ xorl(result_reg, result_reg);
647    __ cmpl(rcx, Immediate(31));
648    __ j(above, &done);
649    __ shll_cl(scratch1);
650    __ jmp(&check_negative);
651
652    __ bind(&process_64_bits);
653    __ cvttsd2siq(result_reg, xmm0);
654    __ jmp(&done, Label::kNear);
655
656    // If the double was negative, negate the integer result.
657    __ bind(&check_negative);
658    __ movl(result_reg, scratch1);
659    __ negl(result_reg);
660    if (stash_exponent_copy) {
661        __ cmpl(MemOperand(rsp, 0), Immediate(0));
662    } else {
663        __ cmpl(exponent_operand, Immediate(0));
664    }
665    __ cmovl(greater, result_reg, scratch1);
666
667    // Restore registers
668    __ bind(&done);
669    if (stash_exponent_copy) {
670        __ addq(rsp, Immediate(kDoubleSize));
671    }
672    if (!final_result_reg.is(result_reg)) {
673        ASSERT(final_result_reg.is(rcx));
674        __ movl(final_result_reg, result_reg);
675    }
676    __ pop(save_reg);
677    __ pop(scratch1);
678    __ ret(0);
679}
680
681
682void BinaryOpStub::Initialize() {}
683
684
685void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
686  __ PopReturnAddressTo(rcx);
687  __ push(rdx);
688  __ push(rax);
689  // Left and right arguments are now on top.
690  __ Push(Smi::FromInt(MinorKey()));
691
692  __ PushReturnAddressFrom(rcx);
693
694  // Patch the caller to an appropriate specialized stub and return the
695  // operation result to the caller of the stub.
696  __ TailCallExternalReference(
697      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
698                        masm->isolate()),
699      3,
700      1);
701}
702
703
704static void BinaryOpStub_GenerateSmiCode(
705    MacroAssembler* masm,
706    Label* slow,
707    BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
708    Token::Value op) {
709
710  // Arguments to BinaryOpStub are in rdx and rax.
711  const Register left = rdx;
712  const Register right = rax;
713
714  // We only generate heapnumber answers for overflowing calculations
715  // for the four basic arithmetic operations and logical right shift by 0.
716  bool generate_inline_heapnumber_results =
717      (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
718      (op == Token::ADD || op == Token::SUB ||
719       op == Token::MUL || op == Token::DIV || op == Token::SHR);
720
721  // Smi check of both operands.  If op is BIT_OR, the check is delayed
722  // until after the OR operation.
723  Label not_smis;
724  Label use_fp_on_smis;
725  Label fail;
726
727  if (op != Token::BIT_OR) {
728    Comment smi_check_comment(masm, "-- Smi check arguments");
729    __ JumpIfNotBothSmi(left, right, &not_smis);
730  }
731
732  Label smi_values;
733  __ bind(&smi_values);
734  // Perform the operation.
735  Comment perform_smi(masm, "-- Perform smi operation");
736  switch (op) {
737    case Token::ADD:
738      ASSERT(right.is(rax));
739      __ SmiAdd(right, right, left, &use_fp_on_smis);  // ADD is commutative.
740      break;
741
742    case Token::SUB:
743      __ SmiSub(left, left, right, &use_fp_on_smis);
744      __ movq(rax, left);
745      break;
746
747    case Token::MUL:
748      ASSERT(right.is(rax));
749      __ SmiMul(right, right, left, &use_fp_on_smis);  // MUL is commutative.
750      break;
751
752    case Token::DIV:
753      // SmiDiv will not accept left in rdx or right in rax.
754      __ movq(rbx, rax);
755      __ movq(rcx, rdx);
756      __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis);
757      break;
758
759    case Token::MOD:
760      // SmiMod will not accept left in rdx or right in rax.
761      __ movq(rbx, rax);
762      __ movq(rcx, rdx);
763      __ SmiMod(rax, rcx, rbx, &use_fp_on_smis);
764      break;
765
766    case Token::BIT_OR: {
767      ASSERT(right.is(rax));
768      __ SmiOrIfSmis(right, right, left, &not_smis);  // BIT_OR is commutative.
769      break;
770      }
771    case Token::BIT_XOR:
772      ASSERT(right.is(rax));
773      __ SmiXor(right, right, left);  // BIT_XOR is commutative.
774      break;
775
776    case Token::BIT_AND:
777      ASSERT(right.is(rax));
778      __ SmiAnd(right, right, left);  // BIT_AND is commutative.
779      break;
780
781    case Token::SHL:
782      __ SmiShiftLeft(left, left, right);
783      __ movq(rax, left);
784      break;
785
786    case Token::SAR:
787      __ SmiShiftArithmeticRight(left, left, right);
788      __ movq(rax, left);
789      break;
790
791    case Token::SHR:
792      __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
793      __ movq(rax, left);
794      break;
795
796    default:
797      UNREACHABLE();
798  }
799
800  // 5. Emit return of result in rax.  Some operations have registers pushed.
801  __ ret(0);
802
803  if (use_fp_on_smis.is_linked()) {
804    // 6. For some operations emit inline code to perform floating point
805    //    operations on known smis (e.g., if the result of the operation
806    //    overflowed the smi range).
807    __ bind(&use_fp_on_smis);
808    if (op == Token::DIV || op == Token::MOD) {
809      // Restore left and right to rdx and rax.
810      __ movq(rdx, rcx);
811      __ movq(rax, rbx);
812    }
813
814    if (generate_inline_heapnumber_results) {
815      __ AllocateHeapNumber(rcx, rbx, slow);
816      Comment perform_float(masm, "-- Perform float operation on smis");
817      if (op == Token::SHR) {
818        __ SmiToInteger32(left, left);
819        __ cvtqsi2sd(xmm0, left);
820      } else {
821        FloatingPointHelper::LoadSSE2SmiOperands(masm);
822        switch (op) {
823        case Token::ADD: __ addsd(xmm0, xmm1); break;
824        case Token::SUB: __ subsd(xmm0, xmm1); break;
825        case Token::MUL: __ mulsd(xmm0, xmm1); break;
826        case Token::DIV: __ divsd(xmm0, xmm1); break;
827        default: UNREACHABLE();
828        }
829      }
830      __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
831      __ movq(rax, rcx);
832      __ ret(0);
833    } else {
834      __ jmp(&fail);
835    }
836  }
837
838  // 7. Non-smi operands reach the end of the code generated by
839  //    GenerateSmiCode, and fall through to subsequent code,
840  //    with the operands in rdx and rax.
841  //    But first we check if non-smi values are HeapNumbers holding
842  //    values that could be smi.
843  __ bind(&not_smis);
844  Comment done_comment(masm, "-- Enter non-smi code");
845  FloatingPointHelper::ConvertUndefined convert_undefined =
846      FloatingPointHelper::BAILOUT_ON_UNDEFINED;
847  // This list must be in sync with BinaryOpPatch() behavior in ic.cc.
848  if (op == Token::BIT_AND ||
849      op == Token::BIT_OR ||
850      op == Token::BIT_XOR ||
851      op == Token::SAR ||
852      op == Token::SHL ||
853      op == Token::SHR) {
854    convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
855  }
856  FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
857                                     &smi_values, &fail, convert_undefined);
858  __ jmp(&smi_values);
859  __ bind(&fail);
860}
861
862
863static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
864                                                      Label* alloc_failure,
865                                                      OverwriteMode mode);
866
867
868static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
869                                                   Label* allocation_failure,
870                                                   Label* non_numeric_failure,
871                                                   Token::Value op,
872                                                   OverwriteMode mode) {
873  switch (op) {
874    case Token::ADD:
875    case Token::SUB:
876    case Token::MUL:
877    case Token::DIV: {
878      FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
879
880      switch (op) {
881        case Token::ADD: __ addsd(xmm0, xmm1); break;
882        case Token::SUB: __ subsd(xmm0, xmm1); break;
883        case Token::MUL: __ mulsd(xmm0, xmm1); break;
884        case Token::DIV: __ divsd(xmm0, xmm1); break;
885        default: UNREACHABLE();
886      }
887      BinaryOpStub_GenerateHeapResultAllocation(
888          masm, allocation_failure, mode);
889      __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
890      __ ret(0);
891      break;
892    }
893    case Token::MOD: {
894      // For MOD we jump to the allocation_failure label, to call runtime.
895      __ jmp(allocation_failure);
896      break;
897    }
898    case Token::BIT_OR:
899    case Token::BIT_AND:
900    case Token::BIT_XOR:
901    case Token::SAR:
902    case Token::SHL:
903    case Token::SHR: {
904      Label non_smi_shr_result;
905      Register heap_number_map = r9;
906      __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
907      FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
908                                          heap_number_map);
909      switch (op) {
910        case Token::BIT_OR:  __ orl(rax, rcx); break;
911        case Token::BIT_AND: __ andl(rax, rcx); break;
912        case Token::BIT_XOR: __ xorl(rax, rcx); break;
913        case Token::SAR: __ sarl_cl(rax); break;
914        case Token::SHL: __ shll_cl(rax); break;
915        case Token::SHR: {
916          __ shrl_cl(rax);
917          // Check if result is negative. This can only happen for a shift
918          // by zero.
919          __ testl(rax, rax);
920          __ j(negative, &non_smi_shr_result);
921          break;
922        }
923        default: UNREACHABLE();
924      }
925      STATIC_ASSERT(kSmiValueSize == 32);
926      // Tag smi result and return.
927      __ Integer32ToSmi(rax, rax);
928      __ Ret();
929
930      // Logical shift right can produce an unsigned int32 that is not
931      // an int32, and so is not in the smi range.  Allocate a heap number
932      // in that case.
933      if (op == Token::SHR) {
934        __ bind(&non_smi_shr_result);
935        Label allocation_failed;
936        __ movl(rbx, rax);  // rbx holds result value (uint32 value as int64).
937        // Allocate heap number in new space.
938        // Not using AllocateHeapNumber macro in order to reuse
939        // already loaded heap_number_map.
940        __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed,
941                    TAG_OBJECT);
942        // Set the map.
943        __ AssertRootValue(heap_number_map,
944                           Heap::kHeapNumberMapRootIndex,
945                           kHeapNumberMapRegisterClobbered);
946        __ movq(FieldOperand(rax, HeapObject::kMapOffset),
947                heap_number_map);
948        __ cvtqsi2sd(xmm0, rbx);
949        __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
950        __ Ret();
951
952        __ bind(&allocation_failed);
953        // We need tagged values in rdx and rax for the following code,
954        // not int32 in rax and rcx.
955        __ Integer32ToSmi(rax, rcx);
956        __ Integer32ToSmi(rdx, rbx);
957        __ jmp(allocation_failure);
958      }
959      break;
960    }
961    default: UNREACHABLE(); break;
962  }
963  // No fall-through from this generated code.
964  if (FLAG_debug_code) {
965    __ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode);
966  }
967}
968
969
970static void BinaryOpStub_GenerateRegisterArgsPushUnderReturn(
971    MacroAssembler* masm) {
972  // Push arguments, but ensure they are under the return address
973  // for a tail call.
974  __ PopReturnAddressTo(rcx);
975  __ push(rdx);
976  __ push(rax);
977  __ PushReturnAddressFrom(rcx);
978}
979
980
981void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
982  ASSERT(op_ == Token::ADD);
983  Label left_not_string, call_runtime;
984
985  // Registers containing left and right operands respectively.
986  Register left = rdx;
987  Register right = rax;
988
989  // Test if left operand is a string.
990  __ JumpIfSmi(left, &left_not_string, Label::kNear);
991  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
992  __ j(above_equal, &left_not_string, Label::kNear);
993  StringAddStub string_add_left_stub(
994      (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
995  BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
996  __ TailCallStub(&string_add_left_stub);
997
998  // Left operand is not a string, test right.
999  __ bind(&left_not_string);
1000  __ JumpIfSmi(right, &call_runtime, Label::kNear);
1001  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
1002  __ j(above_equal, &call_runtime, Label::kNear);
1003
1004  StringAddStub string_add_right_stub(
1005      (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
1006  BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
1007  __ TailCallStub(&string_add_right_stub);
1008
1009  // Neither argument is a string.
1010  __ bind(&call_runtime);
1011}
1012
1013
1014void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1015  Label right_arg_changed, call_runtime;
1016
1017  if (op_ == Token::MOD && encoded_right_arg_.has_value) {
1018    // It is guaranteed that the value will fit into a Smi, because if it
1019    // didn't, we wouldn't be here, see BinaryOp_Patch.
1020    __ Cmp(rax, Smi::FromInt(fixed_right_arg_value()));
1021    __ j(not_equal, &right_arg_changed);
1022  }
1023
1024  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1025      result_type_ == BinaryOpIC::SMI) {
1026    // Only allow smi results.
1027    BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
1028  } else {
1029    // Allow heap number result and don't make a transition if a heap number
1030    // cannot be allocated.
1031    BinaryOpStub_GenerateSmiCode(
1032        masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
1033  }
1034
1035  // Code falls through if the result is not returned as either a smi or heap
1036  // number.
1037  __ bind(&right_arg_changed);
1038  GenerateTypeTransition(masm);
1039
1040  if (call_runtime.is_linked()) {
1041    __ bind(&call_runtime);
1042    {
1043      FrameScope scope(masm, StackFrame::INTERNAL);
1044      GenerateRegisterArgsPush(masm);
1045      GenerateCallRuntime(masm);
1046    }
1047    __ Ret();
1048  }
1049}
1050
1051
1052void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1053  // The int32 case is identical to the Smi case.  We avoid creating this
1054  // ic state on x64.
1055  UNREACHABLE();
1056}
1057
1058
1059void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1060  Label call_runtime;
1061  ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
1062  ASSERT(op_ == Token::ADD);
1063  // If both arguments are strings, call the string add stub.
1064  // Otherwise, do a transition.
1065
1066  // Registers containing left and right operands respectively.
1067  Register left = rdx;
1068  Register right = rax;
1069
1070  // Test if left operand is a string.
1071  __ JumpIfSmi(left, &call_runtime);
1072  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
1073  __ j(above_equal, &call_runtime);
1074
1075  // Test if right operand is a string.
1076  __ JumpIfSmi(right, &call_runtime);
1077  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
1078  __ j(above_equal, &call_runtime);
1079
1080  StringAddStub string_add_stub(
1081      (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
1082  BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
1083  __ TailCallStub(&string_add_stub);
1084
1085  __ bind(&call_runtime);
1086  GenerateTypeTransition(masm);
1087}
1088
1089
1090void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
1091  Label call_runtime;
1092
1093  if (op_ == Token::ADD) {
1094    // Handle string addition here, because it is the only operation
1095    // that does not do a ToNumber conversion on the operands.
1096    GenerateAddStrings(masm);
1097  }
1098
1099  // Convert oddball arguments to numbers.
1100  Label check, done;
1101  __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1102  __ j(not_equal, &check, Label::kNear);
1103  if (Token::IsBitOp(op_)) {
1104    __ xor_(rdx, rdx);
1105  } else {
1106    __ LoadRoot(rdx, Heap::kNanValueRootIndex);
1107  }
1108  __ jmp(&done, Label::kNear);
1109  __ bind(&check);
1110  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1111  __ j(not_equal, &done, Label::kNear);
1112  if (Token::IsBitOp(op_)) {
1113    __ xor_(rax, rax);
1114  } else {
1115    __ LoadRoot(rax, Heap::kNanValueRootIndex);
1116  }
1117  __ bind(&done);
1118
1119  GenerateNumberStub(masm);
1120}
1121
1122
1123static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
1124                                       Register input,
1125                                       Label* fail) {
1126  Label ok;
1127  __ JumpIfSmi(input, &ok, Label::kNear);
1128  Register heap_number_map = r8;
1129  Register scratch1 = r9;
1130  Register scratch2 = r10;
1131  // HeapNumbers containing 32bit integer values are also allowed.
1132  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1133  __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
1134  __ j(not_equal, fail);
1135  __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
1136  // Convert, convert back, and compare the two doubles' bits.
1137  __ cvttsd2siq(scratch2, xmm0);
1138  __ cvtlsi2sd(xmm1, scratch2);
1139  __ movq(scratch1, xmm0);
1140  __ movq(scratch2, xmm1);
1141  __ cmpq(scratch1, scratch2);
1142  __ j(not_equal, fail);
1143  __ bind(&ok);
1144}
1145
1146
1147void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
1148  Label gc_required, not_number;
1149
1150  // It could be that only SMIs have been seen at either the left
1151  // or the right operand. For precise type feedback, patch the IC
1152  // again if this changes.
1153  if (left_type_ == BinaryOpIC::SMI) {
1154    BinaryOpStub_CheckSmiInput(masm, rdx, &not_number);
1155  }
1156  if (right_type_ == BinaryOpIC::SMI) {
1157    BinaryOpStub_CheckSmiInput(masm, rax, &not_number);
1158  }
1159
1160  BinaryOpStub_GenerateFloatingPointCode(
1161      masm, &gc_required, &not_number, op_, mode_);
1162
1163  __ bind(&not_number);
1164  GenerateTypeTransition(masm);
1165
1166  __ bind(&gc_required);
1167  {
1168    FrameScope scope(masm, StackFrame::INTERNAL);
1169    GenerateRegisterArgsPush(masm);
1170    GenerateCallRuntime(masm);
1171  }
1172  __ Ret();
1173}
1174
1175
1176void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
1177  Label call_runtime, call_string_add_or_runtime;
1178
1179  BinaryOpStub_GenerateSmiCode(
1180      masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
1181
1182  BinaryOpStub_GenerateFloatingPointCode(
1183      masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
1184
1185  __ bind(&call_string_add_or_runtime);
1186  if (op_ == Token::ADD) {
1187    GenerateAddStrings(masm);
1188  }
1189
1190  __ bind(&call_runtime);
1191  {
1192    FrameScope scope(masm, StackFrame::INTERNAL);
1193    GenerateRegisterArgsPush(masm);
1194    GenerateCallRuntime(masm);
1195  }
1196  __ Ret();
1197}
1198
1199
1200static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
1201                                                      Label* alloc_failure,
1202                                                      OverwriteMode mode) {
1203  Label skip_allocation;
1204  switch (mode) {
1205    case OVERWRITE_LEFT: {
1206      // If the argument in rdx is already an object, we skip the
1207      // allocation of a heap number.
1208      __ JumpIfNotSmi(rdx, &skip_allocation);
1209      // Allocate a heap number for the result. Keep rax and rdx intact
1210      // for the possible runtime call.
1211      __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1212      // Now rdx can be overwritten losing one of the arguments as we are
1213      // now done and will not need it any more.
1214      __ movq(rdx, rbx);
1215      __ bind(&skip_allocation);
1216      // Use object in rdx as a result holder
1217      __ movq(rax, rdx);
1218      break;
1219    }
1220    case OVERWRITE_RIGHT:
1221      // If the argument in rax is already an object, we skip the
1222      // allocation of a heap number.
1223      __ JumpIfNotSmi(rax, &skip_allocation);
1224      // Fall through!
1225    case NO_OVERWRITE:
1226      // Allocate a heap number for the result. Keep rax and rdx intact
1227      // for the possible runtime call.
1228      __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1229      // Now rax can be overwritten losing one of the arguments as we are
1230      // now done and will not need it any more.
1231      __ movq(rax, rbx);
1232      __ bind(&skip_allocation);
1233      break;
1234    default: UNREACHABLE();
1235  }
1236}
1237
1238
1239void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1240  __ push(rdx);
1241  __ push(rax);
1242}
1243
1244
1245void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1246  // TAGGED case:
1247  //   Input:
1248  //     rsp[8] : argument (should be number).
1249  //     rsp[0] : return address.
1250  //   Output:
1251  //     rax: tagged double result.
1252  // UNTAGGED case:
1253  //   Input::
1254  //     rsp[0] : return address.
1255  //     xmm1   : untagged double input argument
1256  //   Output:
1257  //     xmm1   : untagged double result.
1258
1259  Label runtime_call;
1260  Label runtime_call_clear_stack;
1261  Label skip_cache;
1262  const bool tagged = (argument_type_ == TAGGED);
1263  if (tagged) {
1264    Label input_not_smi, loaded;
1265    // Test that rax is a number.
1266    __ movq(rax, Operand(rsp, kPointerSize));
1267    __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
1268    // Input is a smi. Untag and load it onto the FPU stack.
1269    // Then load the bits of the double into rbx.
1270    __ SmiToInteger32(rax, rax);
1271    __ subq(rsp, Immediate(kDoubleSize));
1272    __ cvtlsi2sd(xmm1, rax);
1273    __ movsd(Operand(rsp, 0), xmm1);
1274    __ movq(rbx, xmm1);
1275    __ movq(rdx, xmm1);
1276    __ fld_d(Operand(rsp, 0));
1277    __ addq(rsp, Immediate(kDoubleSize));
1278    __ jmp(&loaded, Label::kNear);
1279
1280    __ bind(&input_not_smi);
1281    // Check if input is a HeapNumber.
1282    __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
1283    __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
1284    __ j(not_equal, &runtime_call);
1285    // Input is a HeapNumber. Push it on the FPU stack and load its
1286    // bits into rbx.
1287    __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
1288    __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
1289    __ movq(rdx, rbx);
1290
1291    __ bind(&loaded);
1292  } else {  // UNTAGGED.
1293    __ movq(rbx, xmm1);
1294    __ movq(rdx, xmm1);
1295  }
1296
1297  // ST[0] == double value, if TAGGED.
1298  // rbx = bits of double value.
1299  // rdx = also bits of double value.
1300  // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
1301  //   h = h0 = bits ^ (bits >> 32);
1302  //   h ^= h >> 16;
1303  //   h ^= h >> 8;
1304  //   h = h & (cacheSize - 1);
1305  // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
1306  __ sar(rdx, Immediate(32));
1307  __ xorl(rdx, rbx);
1308  __ movl(rcx, rdx);
1309  __ movl(rax, rdx);
1310  __ movl(rdi, rdx);
1311  __ sarl(rdx, Immediate(8));
1312  __ sarl(rcx, Immediate(16));
1313  __ sarl(rax, Immediate(24));
1314  __ xorl(rcx, rdx);
1315  __ xorl(rax, rdi);
1316  __ xorl(rcx, rax);
1317  ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
1318  __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
1319
1320  // ST[0] == double value.
1321  // rbx = bits of double value.
1322  // rcx = TranscendentalCache::hash(double value).
1323  ExternalReference cache_array =
1324      ExternalReference::transcendental_cache_array_address(masm->isolate());
1325  __ movq(rax, cache_array);
1326  int cache_array_index =
1327      type_ * sizeof(Isolate::Current()->transcendental_cache()->caches_[0]);
1328  __ movq(rax, Operand(rax, cache_array_index));
1329  // rax points to the cache for the type type_.
1330  // If NULL, the cache hasn't been initialized yet, so go through runtime.
1331  __ testq(rax, rax);
1332  __ j(zero, &runtime_call_clear_stack);  // Only clears stack if TAGGED.
1333#ifdef DEBUG
1334  // Check that the layout of cache elements match expectations.
1335  {  // NOLINT - doesn't like a single brace on a line.
1336    TranscendentalCache::SubCache::Element test_elem[2];
1337    char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
1338    char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
1339    char* elem_in0  = reinterpret_cast<char*>(&(test_elem[0].in[0]));
1340    char* elem_in1  = reinterpret_cast<char*>(&(test_elem[0].in[1]));
1341    char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
1342    // Two uint_32's and a pointer per element.
1343    CHECK_EQ(2 * kIntSize + 1 * kPointerSize,
1344             static_cast<int>(elem2_start - elem_start));
1345    CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
1346    CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
1347    CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
1348  }
1349#endif
1350  // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
1351  __ addl(rcx, rcx);
1352  __ lea(rcx, Operand(rax, rcx, times_8, 0));
1353  // Check if cache matches: Double value is stored in uint32_t[2] array.
1354  Label cache_miss;
1355  __ cmpq(rbx, Operand(rcx, 0));
1356  __ j(not_equal, &cache_miss, Label::kNear);
1357  // Cache hit!
1358  Counters* counters = masm->isolate()->counters();
1359  __ IncrementCounter(counters->transcendental_cache_hit(), 1);
1360  __ movq(rax, Operand(rcx, 2 * kIntSize));
1361  if (tagged) {
1362    __ fstp(0);  // Clear FPU stack.
1363    __ ret(kPointerSize);
1364  } else {  // UNTAGGED.
1365    __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1366    __ Ret();
1367  }
1368
1369  __ bind(&cache_miss);
1370  __ IncrementCounter(counters->transcendental_cache_miss(), 1);
1371  // Update cache with new value.
1372  if (tagged) {
1373  __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
1374  } else {  // UNTAGGED.
1375    __ AllocateHeapNumber(rax, rdi, &skip_cache);
1376    __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
1377    __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
1378  }
1379  GenerateOperation(masm, type_);
1380  __ movq(Operand(rcx, 0), rbx);
1381  __ movq(Operand(rcx, 2 * kIntSize), rax);
1382  __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
1383  if (tagged) {
1384    __ ret(kPointerSize);
1385  } else {  // UNTAGGED.
1386    __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1387    __ Ret();
1388
1389    // Skip cache and return answer directly, only in untagged case.
1390    __ bind(&skip_cache);
1391    __ subq(rsp, Immediate(kDoubleSize));
1392    __ movsd(Operand(rsp, 0), xmm1);
1393    __ fld_d(Operand(rsp, 0));
1394    GenerateOperation(masm, type_);
1395    __ fstp_d(Operand(rsp, 0));
1396    __ movsd(xmm1, Operand(rsp, 0));
1397    __ addq(rsp, Immediate(kDoubleSize));
1398    // We return the value in xmm1 without adding it to the cache, but
1399    // we cause a scavenging GC so that future allocations will succeed.
1400    {
1401      FrameScope scope(masm, StackFrame::INTERNAL);
1402      // Allocate an unused object bigger than a HeapNumber.
1403      __ Push(Smi::FromInt(2 * kDoubleSize));
1404      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
1405    }
1406    __ Ret();
1407  }
1408
1409  // Call runtime, doing whatever allocation and cleanup is necessary.
1410  if (tagged) {
1411    __ bind(&runtime_call_clear_stack);
1412    __ fstp(0);
1413    __ bind(&runtime_call);
1414    __ TailCallExternalReference(
1415        ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
1416  } else {  // UNTAGGED.
1417    __ bind(&runtime_call_clear_stack);
1418    __ bind(&runtime_call);
1419    __ AllocateHeapNumber(rax, rdi, &skip_cache);
1420    __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
1421    {
1422      FrameScope scope(masm, StackFrame::INTERNAL);
1423      __ push(rax);
1424      __ CallRuntime(RuntimeFunction(), 1);
1425    }
1426    __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1427    __ Ret();
1428  }
1429}
1430
1431
1432Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
1433  switch (type_) {
1434    // Add more cases when necessary.
1435    case TranscendentalCache::SIN: return Runtime::kMath_sin;
1436    case TranscendentalCache::COS: return Runtime::kMath_cos;
1437    case TranscendentalCache::TAN: return Runtime::kMath_tan;
1438    case TranscendentalCache::LOG: return Runtime::kMath_log;
1439    default:
1440      UNIMPLEMENTED();
1441      return Runtime::kAbort;
1442  }
1443}
1444
1445
1446void TranscendentalCacheStub::GenerateOperation(
1447    MacroAssembler* masm, TranscendentalCache::Type type) {
1448  // Registers:
1449  // rax: Newly allocated HeapNumber, which must be preserved.
1450  // rbx: Bits of input double. Must be preserved.
1451  // rcx: Pointer to cache entry. Must be preserved.
1452  // st(0): Input double
1453  Label done;
1454  if (type == TranscendentalCache::SIN ||
1455      type == TranscendentalCache::COS ||
1456      type == TranscendentalCache::TAN) {
1457    // Both fsin and fcos require arguments in the range +/-2^63 and
1458    // return NaN for infinities and NaN. They can share all code except
1459    // the actual fsin/fcos operation.
1460    Label in_range;
1461    // If argument is outside the range -2^63..2^63, fsin/cos doesn't
1462    // work. We must reduce it to the appropriate range.
1463    __ movq(rdi, rbx);
1464    // Move exponent and sign bits to low bits.
1465    __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
1466    // Remove sign bit.
1467    __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
1468    int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
1469    __ cmpl(rdi, Immediate(supported_exponent_limit));
1470    __ j(below, &in_range);
1471    // Check for infinity and NaN. Both return NaN for sin.
1472    __ cmpl(rdi, Immediate(0x7ff));
1473    Label non_nan_result;
1474    __ j(not_equal, &non_nan_result, Label::kNear);
1475    // Input is +/-Infinity or NaN. Result is NaN.
1476    __ fstp(0);
1477    // NaN is represented by 0x7ff8000000000000.
1478    __ subq(rsp, Immediate(kPointerSize));
1479    __ movl(Operand(rsp, 4), Immediate(0x7ff80000));
1480    __ movl(Operand(rsp, 0), Immediate(0x00000000));
1481    __ fld_d(Operand(rsp, 0));
1482    __ addq(rsp, Immediate(kPointerSize));
1483    __ jmp(&done);
1484
1485    __ bind(&non_nan_result);
1486
1487    // Use fpmod to restrict argument to the range +/-2*PI.
1488    __ movq(rdi, rax);  // Save rax before using fnstsw_ax.
1489    __ fldpi();
1490    __ fadd(0);
1491    __ fld(1);
1492    // FPU Stack: input, 2*pi, input.
1493    {
1494      Label no_exceptions;
1495      __ fwait();
1496      __ fnstsw_ax();
1497      // Clear if Illegal Operand or Zero Division exceptions are set.
1498      __ testl(rax, Immediate(5));  // #IO and #ZD flags of FPU status word.
1499      __ j(zero, &no_exceptions);
1500      __ fnclex();
1501      __ bind(&no_exceptions);
1502    }
1503
1504    // Compute st(0) % st(1)
1505    {
1506      Label partial_remainder_loop;
1507      __ bind(&partial_remainder_loop);
1508      __ fprem1();
1509      __ fwait();
1510      __ fnstsw_ax();
1511      __ testl(rax, Immediate(0x400));  // Check C2 bit of FPU status word.
1512      // If C2 is set, computation only has partial result. Loop to
1513      // continue computation.
1514      __ j(not_zero, &partial_remainder_loop);
1515  }
1516    // FPU Stack: input, 2*pi, input % 2*pi
1517    __ fstp(2);
1518    // FPU Stack: input % 2*pi, 2*pi,
1519    __ fstp(0);
1520    // FPU Stack: input % 2*pi
1521    __ movq(rax, rdi);  // Restore rax, pointer to the new HeapNumber.
1522    __ bind(&in_range);
1523    switch (type) {
1524      case TranscendentalCache::SIN:
1525        __ fsin();
1526        break;
1527      case TranscendentalCache::COS:
1528        __ fcos();
1529        break;
1530      case TranscendentalCache::TAN:
1531        // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
1532        // FP register stack.
1533        __ fptan();
1534        __ fstp(0);  // Pop FP register stack.
1535        break;
1536      default:
1537        UNREACHABLE();
1538    }
1539    __ bind(&done);
1540  } else {
1541    ASSERT(type == TranscendentalCache::LOG);
1542    __ fldln2();
1543    __ fxch();
1544    __ fyl2x();
1545  }
1546}
1547
1548
1549// Input: rdx, rax are the left and right objects of a bit op.
1550// Output: rax, rcx are left and right integers for a bit op.
1551void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
1552  // Check float operands.
1553  Label done;
1554  Label rax_is_smi;
1555  Label rax_is_object;
1556  Label rdx_is_object;
1557
1558  __ JumpIfNotSmi(rdx, &rdx_is_object);
1559  __ SmiToInteger32(rdx, rdx);
1560  __ JumpIfSmi(rax, &rax_is_smi);
1561
1562  __ bind(&rax_is_object);
1563  DoubleToIStub stub1(rax, rcx, HeapNumber::kValueOffset - kHeapObjectTag,
1564                     true);
1565  __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
1566
1567  __ jmp(&done);
1568
1569  __ bind(&rdx_is_object);
1570  DoubleToIStub stub2(rdx, rdx, HeapNumber::kValueOffset - kHeapObjectTag,
1571                     true);
1572  __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
1573  __ JumpIfNotSmi(rax, &rax_is_object);
1574
1575  __ bind(&rax_is_smi);
1576  __ SmiToInteger32(rcx, rax);
1577
1578  __ bind(&done);
1579  __ movl(rax, rdx);
1580}
1581
1582
1583// Input: rdx, rax are the left and right objects of a bit op.
1584// Output: rax, rcx are left and right integers for a bit op.
1585// Jump to conversion_failure: rdx and rax are unchanged.
1586void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
1587                                         Label* conversion_failure,
1588                                         Register heap_number_map) {
1589  // Check float operands.
1590  Label arg1_is_object, check_undefined_arg1;
1591  Label arg2_is_object, check_undefined_arg2;
1592  Label load_arg2, done;
1593
1594  __ JumpIfNotSmi(rdx, &arg1_is_object);
1595  __ SmiToInteger32(r8, rdx);
1596  __ jmp(&load_arg2);
1597
1598  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1599  __ bind(&check_undefined_arg1);
1600  __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1601  __ j(not_equal, conversion_failure);
1602  __ Set(r8, 0);
1603  __ jmp(&load_arg2);
1604
1605  __ bind(&arg1_is_object);
1606  __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
1607  __ j(not_equal, &check_undefined_arg1);
1608  // Get the untagged integer version of the rdx heap number in rcx.
1609  DoubleToIStub stub1(rdx, r8, HeapNumber::kValueOffset - kHeapObjectTag,
1610                      true);
1611  __ call(stub1.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
1612
1613  // Here r8 has the untagged integer, rax has a Smi or a heap number.
1614  __ bind(&load_arg2);
1615  // Test if arg2 is a Smi.
1616  __ JumpIfNotSmi(rax, &arg2_is_object);
1617  __ SmiToInteger32(rcx, rax);
1618  __ jmp(&done);
1619
1620  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1621  __ bind(&check_undefined_arg2);
1622  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1623  __ j(not_equal, conversion_failure);
1624  __ Set(rcx, 0);
1625  __ jmp(&done);
1626
1627  __ bind(&arg2_is_object);
1628  __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
1629  __ j(not_equal, &check_undefined_arg2);
1630  // Get the untagged integer version of the rax heap number in rcx.
1631  DoubleToIStub stub2(rax, rcx, HeapNumber::kValueOffset - kHeapObjectTag,
1632                      true);
1633  __ call(stub2.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
1634
1635  __ bind(&done);
1636  __ movl(rax, r8);
1637}
1638
1639
1640void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
1641  __ SmiToInteger32(kScratchRegister, rdx);
1642  __ cvtlsi2sd(xmm0, kScratchRegister);
1643  __ SmiToInteger32(kScratchRegister, rax);
1644  __ cvtlsi2sd(xmm1, kScratchRegister);
1645}
1646
1647
1648void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
1649  Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
1650  // Load operand in rdx into xmm0.
1651  __ JumpIfSmi(rdx, &load_smi_rdx);
1652  __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1653  // Load operand in rax into xmm1.
1654  __ JumpIfSmi(rax, &load_smi_rax);
1655  __ bind(&load_nonsmi_rax);
1656  __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1657  __ jmp(&done);
1658
1659  __ bind(&load_smi_rdx);
1660  __ SmiToInteger32(kScratchRegister, rdx);
1661  __ cvtlsi2sd(xmm0, kScratchRegister);
1662  __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1663
1664  __ bind(&load_smi_rax);
1665  __ SmiToInteger32(kScratchRegister, rax);
1666  __ cvtlsi2sd(xmm1, kScratchRegister);
1667
1668  __ bind(&done);
1669}
1670
1671
1672void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
1673                                                  Label* not_numbers) {
1674  Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
1675  // Load operand in rdx into xmm0, or branch to not_numbers.
1676  __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
1677  __ JumpIfSmi(rdx, &load_smi_rdx);
1678  __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
1679  __ j(not_equal, not_numbers);  // Argument in rdx is not a number.
1680  __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1681  // Load operand in rax into xmm1, or branch to not_numbers.
1682  __ JumpIfSmi(rax, &load_smi_rax);
1683
1684  __ bind(&load_nonsmi_rax);
1685  __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
1686  __ j(not_equal, not_numbers);
1687  __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1688  __ jmp(&done);
1689
1690  __ bind(&load_smi_rdx);
1691  __ SmiToInteger32(kScratchRegister, rdx);
1692  __ cvtlsi2sd(xmm0, kScratchRegister);
1693  __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1694
1695  __ bind(&load_smi_rax);
1696  __ SmiToInteger32(kScratchRegister, rax);
1697  __ cvtlsi2sd(xmm1, kScratchRegister);
1698  __ bind(&done);
1699}
1700
1701
1702void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
1703                                        Register first,
1704                                        Register second,
1705                                        Register scratch1,
1706                                        Register scratch2,
1707                                        Register scratch3,
1708                                        Label* on_success,
1709                                        Label* on_not_smis,
1710                                        ConvertUndefined convert_undefined) {
1711  Register heap_number_map = scratch3;
1712  Register smi_result = scratch1;
1713  Label done, maybe_undefined_first, maybe_undefined_second, first_done;
1714
1715  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1716
1717  Label first_smi;
1718  __ JumpIfSmi(first, &first_smi, Label::kNear);
1719  __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
1720  __ j(not_equal,
1721       (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
1722           ? &maybe_undefined_first
1723           : on_not_smis);
1724  // Convert HeapNumber to smi if possible.
1725  __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
1726  __ movq(scratch2, xmm0);
1727  __ cvttsd2siq(smi_result, xmm0);
1728  // Check if conversion was successful by converting back and
1729  // comparing to the original double's bits.
1730  __ cvtlsi2sd(xmm1, smi_result);
1731  __ movq(kScratchRegister, xmm1);
1732  __ cmpq(scratch2, kScratchRegister);
1733  __ j(not_equal, on_not_smis);
1734  __ Integer32ToSmi(first, smi_result);
1735
1736  __ bind(&first_done);
1737  __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
1738  __ bind(&first_smi);
1739  __ AssertNotSmi(second);
1740  __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
1741  __ j(not_equal,
1742       (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
1743           ? &maybe_undefined_second
1744           : on_not_smis);
1745  // Convert second to smi, if possible.
1746  __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
1747  __ movq(scratch2, xmm0);
1748  __ cvttsd2siq(smi_result, xmm0);
1749  __ cvtlsi2sd(xmm1, smi_result);
1750  __ movq(kScratchRegister, xmm1);
1751  __ cmpq(scratch2, kScratchRegister);
1752  __ j(not_equal, on_not_smis);
1753  __ Integer32ToSmi(second, smi_result);
1754  if (on_success != NULL) {
1755    __ jmp(on_success);
1756  } else {
1757    __ jmp(&done);
1758  }
1759
1760  __ bind(&maybe_undefined_first);
1761  __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
1762  __ j(not_equal, on_not_smis);
1763  __ xor_(first, first);
1764  __ jmp(&first_done);
1765
1766  __ bind(&maybe_undefined_second);
1767  __ CompareRoot(second, Heap::kUndefinedValueRootIndex);
1768  __ j(not_equal, on_not_smis);
1769  __ xor_(second, second);
1770  if (on_success != NULL) {
1771    __ jmp(on_success);
1772  }
1773  // Else: fall through.
1774
1775  __ bind(&done);
1776}
1777
1778
1779void MathPowStub::Generate(MacroAssembler* masm) {
1780  const Register exponent = rdx;
1781  const Register base = rax;
1782  const Register scratch = rcx;
1783  const XMMRegister double_result = xmm3;
1784  const XMMRegister double_base = xmm2;
1785  const XMMRegister double_exponent = xmm1;
1786  const XMMRegister double_scratch = xmm4;
1787
1788  Label call_runtime, done, exponent_not_smi, int_exponent;
1789
1790  // Save 1 in double_result - we need this several times later on.
1791  __ movq(scratch, Immediate(1));
1792  __ cvtlsi2sd(double_result, scratch);
1793
1794  if (exponent_type_ == ON_STACK) {
1795    Label base_is_smi, unpack_exponent;
1796    // The exponent and base are supplied as arguments on the stack.
1797    // This can only happen if the stub is called from non-optimized code.
1798    // Load input parameters from stack.
1799    __ movq(base, Operand(rsp, 2 * kPointerSize));
1800    __ movq(exponent, Operand(rsp, 1 * kPointerSize));
1801    __ JumpIfSmi(base, &base_is_smi, Label::kNear);
1802    __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
1803                   Heap::kHeapNumberMapRootIndex);
1804    __ j(not_equal, &call_runtime);
1805
1806    __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
1807    __ jmp(&unpack_exponent, Label::kNear);
1808
1809    __ bind(&base_is_smi);
1810    __ SmiToInteger32(base, base);
1811    __ cvtlsi2sd(double_base, base);
1812    __ bind(&unpack_exponent);
1813
1814    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
1815    __ SmiToInteger32(exponent, exponent);
1816    __ jmp(&int_exponent);
1817
1818    __ bind(&exponent_not_smi);
1819    __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
1820                   Heap::kHeapNumberMapRootIndex);
1821    __ j(not_equal, &call_runtime);
1822    __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
1823  } else if (exponent_type_ == TAGGED) {
1824    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
1825    __ SmiToInteger32(exponent, exponent);
1826    __ jmp(&int_exponent);
1827
1828    __ bind(&exponent_not_smi);
1829    __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
1830  }
1831
1832  if (exponent_type_ != INTEGER) {
1833    Label fast_power;
1834    // Detect integer exponents stored as double.
1835    __ cvttsd2si(exponent, double_exponent);
1836    // Skip to runtime if possibly NaN (indicated by the indefinite integer).
1837    __ cmpl(exponent, Immediate(0x80000000u));
1838    __ j(equal, &call_runtime);
1839    __ cvtlsi2sd(double_scratch, exponent);
1840    // Already ruled out NaNs for exponent.
1841    __ ucomisd(double_exponent, double_scratch);
1842    __ j(equal, &int_exponent);
1843
1844    if (exponent_type_ == ON_STACK) {
1845      // Detect square root case.  Crankshaft detects constant +/-0.5 at
1846      // compile time and uses DoMathPowHalf instead.  We then skip this check
1847      // for non-constant cases of +/-0.5 as these hardly occur.
1848      Label continue_sqrt, continue_rsqrt, not_plus_half;
1849      // Test for 0.5.
1850      // Load double_scratch with 0.5.
1851      __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE64);
1852      __ movq(double_scratch, scratch);
1853      // Already ruled out NaNs for exponent.
1854      __ ucomisd(double_scratch, double_exponent);
1855      __ j(not_equal, &not_plus_half, Label::kNear);
1856
1857      // Calculates square root of base.  Check for the special case of
1858      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
1859      // According to IEEE-754, double-precision -Infinity has the highest
1860      // 12 bits set and the lowest 52 bits cleared.
1861      __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
1862      __ movq(double_scratch, scratch);
1863      __ ucomisd(double_scratch, double_base);
1864      // Comparing -Infinity with NaN results in "unordered", which sets the
1865      // zero flag as if both were equal.  However, it also sets the carry flag.
1866      __ j(not_equal, &continue_sqrt, Label::kNear);
1867      __ j(carry, &continue_sqrt, Label::kNear);
1868
1869      // Set result to Infinity in the special case.
1870      __ xorps(double_result, double_result);
1871      __ subsd(double_result, double_scratch);
1872      __ jmp(&done);
1873
1874      __ bind(&continue_sqrt);
1875      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
1876      __ xorps(double_scratch, double_scratch);
1877      __ addsd(double_scratch, double_base);  // Convert -0 to 0.
1878      __ sqrtsd(double_result, double_scratch);
1879      __ jmp(&done);
1880
1881      // Test for -0.5.
1882      __ bind(&not_plus_half);
1883      // Load double_scratch with -0.5 by substracting 1.
1884      __ subsd(double_scratch, double_result);
1885      // Already ruled out NaNs for exponent.
1886      __ ucomisd(double_scratch, double_exponent);
1887      __ j(not_equal, &fast_power, Label::kNear);
1888
1889      // Calculates reciprocal of square root of base.  Check for the special
1890      // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
1891      // According to IEEE-754, double-precision -Infinity has the highest
1892      // 12 bits set and the lowest 52 bits cleared.
1893      __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
1894      __ movq(double_scratch, scratch);
1895      __ ucomisd(double_scratch, double_base);
1896      // Comparing -Infinity with NaN results in "unordered", which sets the
1897      // zero flag as if both were equal.  However, it also sets the carry flag.
1898      __ j(not_equal, &continue_rsqrt, Label::kNear);
1899      __ j(carry, &continue_rsqrt, Label::kNear);
1900
1901      // Set result to 0 in the special case.
1902      __ xorps(double_result, double_result);
1903      __ jmp(&done);
1904
1905      __ bind(&continue_rsqrt);
1906      // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
1907      __ xorps(double_exponent, double_exponent);
1908      __ addsd(double_exponent, double_base);  // Convert -0 to +0.
1909      __ sqrtsd(double_exponent, double_exponent);
1910      __ divsd(double_result, double_exponent);
1911      __ jmp(&done);
1912    }
1913
1914    // Using FPU instructions to calculate power.
1915    Label fast_power_failed;
1916    __ bind(&fast_power);
1917    __ fnclex();  // Clear flags to catch exceptions later.
1918    // Transfer (B)ase and (E)xponent onto the FPU register stack.
1919    __ subq(rsp, Immediate(kDoubleSize));
1920    __ movsd(Operand(rsp, 0), double_exponent);
1921    __ fld_d(Operand(rsp, 0));  // E
1922    __ movsd(Operand(rsp, 0), double_base);
1923    __ fld_d(Operand(rsp, 0));  // B, E
1924
1925    // Exponent is in st(1) and base is in st(0)
1926    // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
1927    // FYL2X calculates st(1) * log2(st(0))
1928    __ fyl2x();    // X
1929    __ fld(0);     // X, X
1930    __ frndint();  // rnd(X), X
1931    __ fsub(1);    // rnd(X), X-rnd(X)
1932    __ fxch(1);    // X - rnd(X), rnd(X)
1933    // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
1934    __ f2xm1();    // 2^(X-rnd(X)) - 1, rnd(X)
1935    __ fld1();     // 1, 2^(X-rnd(X)) - 1, rnd(X)
1936    __ faddp(1);   // 2^(X-rnd(X)), rnd(X)
1937    // FSCALE calculates st(0) * 2^st(1)
1938    __ fscale();   // 2^X, rnd(X)
1939    __ fstp(1);
1940    // Bail out to runtime in case of exceptions in the status word.
1941    __ fnstsw_ax();
1942    __ testb(rax, Immediate(0x5F));  // Check for all but precision exception.
1943    __ j(not_zero, &fast_power_failed, Label::kNear);
1944    __ fstp_d(Operand(rsp, 0));
1945    __ movsd(double_result, Operand(rsp, 0));
1946    __ addq(rsp, Immediate(kDoubleSize));
1947    __ jmp(&done);
1948
1949    __ bind(&fast_power_failed);
1950    __ fninit();
1951    __ addq(rsp, Immediate(kDoubleSize));
1952    __ jmp(&call_runtime);
1953  }
1954
1955  // Calculate power with integer exponent.
1956  __ bind(&int_exponent);
1957  const XMMRegister double_scratch2 = double_exponent;
1958  // Back up exponent as we need to check if exponent is negative later.
1959  __ movq(scratch, exponent);  // Back up exponent.
1960  __ movsd(double_scratch, double_base);  // Back up base.
1961  __ movsd(double_scratch2, double_result);  // Load double_exponent with 1.
1962
1963  // Get absolute value of exponent.
1964  Label no_neg, while_true, while_false;
1965  __ testl(scratch, scratch);
1966  __ j(positive, &no_neg, Label::kNear);
1967  __ negl(scratch);
1968  __ bind(&no_neg);
1969
1970  __ j(zero, &while_false, Label::kNear);
1971  __ shrl(scratch, Immediate(1));
1972  // Above condition means CF==0 && ZF==0.  This means that the
1973  // bit that has been shifted out is 0 and the result is not 0.
1974  __ j(above, &while_true, Label::kNear);
1975  __ movsd(double_result, double_scratch);
1976  __ j(zero, &while_false, Label::kNear);
1977
1978  __ bind(&while_true);
1979  __ shrl(scratch, Immediate(1));
1980  __ mulsd(double_scratch, double_scratch);
1981  __ j(above, &while_true, Label::kNear);
1982  __ mulsd(double_result, double_scratch);
1983  __ j(not_zero, &while_true);
1984
1985  __ bind(&while_false);
1986  // If the exponent is negative, return 1/result.
1987  __ testl(exponent, exponent);
1988  __ j(greater, &done);
1989  __ divsd(double_scratch2, double_result);
1990  __ movsd(double_result, double_scratch2);
1991  // Test whether result is zero.  Bail out to check for subnormal result.
1992  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1993  __ xorps(double_scratch2, double_scratch2);
1994  __ ucomisd(double_scratch2, double_result);
1995  // double_exponent aliased as double_scratch2 has already been overwritten
1996  // and may not have contained the exponent value in the first place when the
1997  // input was a smi.  We reset it with exponent value before bailing out.
1998  __ j(not_equal, &done);
1999  __ cvtlsi2sd(double_exponent, exponent);
2000
2001  // Returning or bailing out.
2002  Counters* counters = masm->isolate()->counters();
2003  if (exponent_type_ == ON_STACK) {
2004    // The arguments are still on the stack.
2005    __ bind(&call_runtime);
2006    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
2007
2008    // The stub is called from non-optimized code, which expects the result
2009    // as heap number in rax.
2010    __ bind(&done);
2011    __ AllocateHeapNumber(rax, rcx, &call_runtime);
2012    __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
2013    __ IncrementCounter(counters->math_pow(), 1);
2014    __ ret(2 * kPointerSize);
2015  } else {
2016    __ bind(&call_runtime);
2017    // Move base to the correct argument register.  Exponent is already in xmm1.
2018    __ movsd(xmm0, double_base);
2019    ASSERT(double_exponent.is(xmm1));
2020    {
2021      AllowExternalCallThatCantCauseGC scope(masm);
2022      __ PrepareCallCFunction(2);
2023      __ CallCFunction(
2024          ExternalReference::power_double_double_function(masm->isolate()), 2);
2025    }
2026    // Return value is in xmm0.
2027    __ movsd(double_result, xmm0);
2028    // Restore context register.
2029    __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2030
2031    __ bind(&done);
2032    __ IncrementCounter(counters->math_pow(), 1);
2033    __ ret(0);
2034  }
2035}
2036
2037
2038void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
2039  Label miss;
2040  Register receiver;
2041  if (kind() == Code::KEYED_LOAD_IC) {
2042    // ----------- S t a t e -------------
2043    //  -- rax    : key
2044    //  -- rdx    : receiver
2045    //  -- rsp[0] : return address
2046    // -----------------------------------
2047    __ Cmp(rax, masm->isolate()->factory()->prototype_string());
2048    __ j(not_equal, &miss);
2049    receiver = rdx;
2050  } else {
2051    ASSERT(kind() == Code::LOAD_IC);
2052    // ----------- S t a t e -------------
2053    //  -- rax    : receiver
2054    //  -- rcx    : name
2055    //  -- rsp[0] : return address
2056    // -----------------------------------
2057    receiver = rax;
2058  }
2059
2060  StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8, r9, &miss);
2061  __ bind(&miss);
2062  StubCompiler::TailCallBuiltin(
2063      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2064}
2065
2066
2067void StringLengthStub::Generate(MacroAssembler* masm) {
2068  Label miss;
2069  Register receiver;
2070  if (kind() == Code::KEYED_LOAD_IC) {
2071    // ----------- S t a t e -------------
2072    //  -- rax    : key
2073    //  -- rdx    : receiver
2074    //  -- rsp[0] : return address
2075    // -----------------------------------
2076    __ Cmp(rax, masm->isolate()->factory()->length_string());
2077    __ j(not_equal, &miss);
2078    receiver = rdx;
2079  } else {
2080    ASSERT(kind() == Code::LOAD_IC);
2081    // ----------- S t a t e -------------
2082    //  -- rax    : receiver
2083    //  -- rcx    : name
2084    //  -- rsp[0] : return address
2085    // -----------------------------------
2086    receiver = rax;
2087  }
2088
2089  StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss,
2090                                         support_wrapper_);
2091  __ bind(&miss);
2092  StubCompiler::TailCallBuiltin(
2093      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2094}
2095
2096
2097void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
2098  // ----------- S t a t e -------------
2099  //  -- rax    : value
2100  //  -- rcx    : key
2101  //  -- rdx    : receiver
2102  //  -- rsp[0] : return address
2103  // -----------------------------------
2104  //
2105  // This accepts as a receiver anything JSArray::SetElementsLength accepts
2106  // (currently anything except for external arrays which means anything with
2107  // elements of FixedArray type).  Value must be a number, but only smis are
2108  // accepted as the most common case.
2109
2110  Label miss;
2111
2112  Register receiver = rdx;
2113  Register value = rax;
2114  Register scratch = rbx;
2115  if (kind() == Code::KEYED_STORE_IC) {
2116    __ Cmp(rcx, masm->isolate()->factory()->length_string());
2117    __ j(not_equal, &miss);
2118  }
2119
2120  // Check that the receiver isn't a smi.
2121  __ JumpIfSmi(receiver, &miss);
2122
2123  // Check that the object is a JS array.
2124  __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
2125  __ j(not_equal, &miss);
2126
2127  // Check that elements are FixedArray.
2128  // We rely on StoreIC_ArrayLength below to deal with all types of
2129  // fast elements (including COW).
2130  __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
2131  __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
2132  __ j(not_equal, &miss);
2133
2134  // Check that the array has fast properties, otherwise the length
2135  // property might have been redefined.
2136  __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
2137  __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
2138                 Heap::kHashTableMapRootIndex);
2139  __ j(equal, &miss);
2140
2141  // Check that value is a smi.
2142  __ JumpIfNotSmi(value, &miss);
2143
2144  // Prepare tail call to StoreIC_ArrayLength.
2145  __ PopReturnAddressTo(scratch);
2146  __ push(receiver);
2147  __ push(value);
2148  __ PushReturnAddressFrom(scratch);
2149
2150  ExternalReference ref =
2151      ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
2152  __ TailCallExternalReference(ref, 2, 1);
2153
2154  __ bind(&miss);
2155
2156  StubCompiler::TailCallBuiltin(
2157      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2158}
2159
2160
2161void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2162  // The key is in rdx and the parameter count is in rax.
2163
2164  // The displacement is used for skipping the frame pointer on the
2165  // stack. It is the offset of the last parameter (if any) relative
2166  // to the frame pointer.
2167  static const int kDisplacement = 1 * kPointerSize;
2168
2169  // Check that the key is a smi.
2170  Label slow;
2171  __ JumpIfNotSmi(rdx, &slow);
2172
2173  // Check if the calling frame is an arguments adaptor frame.  We look at the
2174  // context offset, and if the frame is not a regular one, then we find a
2175  // Smi instead of the context.  We can't use SmiCompare here, because that
2176  // only works for comparing two smis.
2177  Label adaptor;
2178  __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2179  __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
2180         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2181  __ j(equal, &adaptor);
2182
2183  // Check index against formal parameters count limit passed in
2184  // through register rax. Use unsigned comparison to get negative
2185  // check for free.
2186  __ cmpq(rdx, rax);
2187  __ j(above_equal, &slow);
2188
2189  // Read the argument from the stack and return it.
2190  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
2191  __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
2192  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
2193  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
2194  __ Ret();
2195
2196  // Arguments adaptor case: Check index against actual arguments
2197  // limit found in the arguments adaptor frame. Use unsigned
2198  // comparison to get negative check for free.
2199  __ bind(&adaptor);
2200  __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2201  __ cmpq(rdx, rcx);
2202  __ j(above_equal, &slow);
2203
2204  // Read the argument from the stack and return it.
2205  index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
2206  __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
2207  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
2208  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
2209  __ Ret();
2210
2211  // Slow-case: Handle non-smi or out-of-bounds access to arguments
2212  // by calling the runtime system.
2213  __ bind(&slow);
2214  __ PopReturnAddressTo(rbx);
2215  __ push(rdx);
2216  __ PushReturnAddressFrom(rbx);
2217  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2218}
2219
2220
2221void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
2222  // Stack layout:
2223  //  rsp[0]  : return address
2224  //  rsp[8]  : number of parameters (tagged)
2225  //  rsp[16] : receiver displacement
2226  //  rsp[24] : function
2227  // Registers used over the whole function:
2228  //  rbx: the mapped parameter count (untagged)
2229  //  rax: the allocated object (tagged).
2230
2231  Factory* factory = masm->isolate()->factory();
2232
2233  __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize));
2234  // rbx = parameter count (untagged)
2235
2236  // Check if the calling frame is an arguments adaptor frame.
2237  Label runtime;
2238  Label adaptor_frame, try_allocate;
2239  __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2240  __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
2241  __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2242  __ j(equal, &adaptor_frame);
2243
2244  // No adaptor, parameter count = argument count.
2245  __ movq(rcx, rbx);
2246  __ jmp(&try_allocate, Label::kNear);
2247
2248  // We have an adaptor frame. Patch the parameters pointer.
2249  __ bind(&adaptor_frame);
2250  __ SmiToInteger64(rcx,
2251                    Operand(rdx,
2252                            ArgumentsAdaptorFrameConstants::kLengthOffset));
2253  __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
2254                      StandardFrameConstants::kCallerSPOffset));
2255  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
2256
2257  // rbx = parameter count (untagged)
2258  // rcx = argument count (untagged)
2259  // Compute the mapped parameter count = min(rbx, rcx) in rbx.
2260  __ cmpq(rbx, rcx);
2261  __ j(less_equal, &try_allocate, Label::kNear);
2262  __ movq(rbx, rcx);
2263
2264  __ bind(&try_allocate);
2265
2266  // Compute the sizes of backing store, parameter map, and arguments object.
2267  // 1. Parameter map, has 2 extra words containing context and backing store.
2268  const int kParameterMapHeaderSize =
2269      FixedArray::kHeaderSize + 2 * kPointerSize;
2270  Label no_parameter_map;
2271  __ xor_(r8, r8);
2272  __ testq(rbx, rbx);
2273  __ j(zero, &no_parameter_map, Label::kNear);
2274  __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
2275  __ bind(&no_parameter_map);
2276
2277  // 2. Backing store.
2278  __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
2279
2280  // 3. Arguments object.
2281  __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
2282
2283  // Do the allocation of all three objects in one go.
2284  __ Allocate(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
2285
2286  // rax = address of new object(s) (tagged)
2287  // rcx = argument count (untagged)
2288  // Get the arguments boilerplate from the current native context into rdi.
2289  Label has_mapped_parameters, copy;
2290  __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2291  __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
2292  __ testq(rbx, rbx);
2293  __ j(not_zero, &has_mapped_parameters, Label::kNear);
2294
2295  const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
2296  __ movq(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
2297  __ jmp(&copy, Label::kNear);
2298
2299  const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
2300  __ bind(&has_mapped_parameters);
2301  __ movq(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
2302  __ bind(&copy);
2303
2304  // rax = address of new object (tagged)
2305  // rbx = mapped parameter count (untagged)
2306  // rcx = argument count (untagged)
2307  // rdi = address of boilerplate object (tagged)
2308  // Copy the JS object part.
2309  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2310    __ movq(rdx, FieldOperand(rdi, i));
2311    __ movq(FieldOperand(rax, i), rdx);
2312  }
2313
2314  // Set up the callee in-object property.
2315  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2316  __ movq(rdx, Operand(rsp, 3 * kPointerSize));
2317  __ movq(FieldOperand(rax, JSObject::kHeaderSize +
2318                       Heap::kArgumentsCalleeIndex * kPointerSize),
2319          rdx);
2320
2321  // Use the length (smi tagged) and set that as an in-object property too.
2322  // Note: rcx is tagged from here on.
2323  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2324  __ Integer32ToSmi(rcx, rcx);
2325  __ movq(FieldOperand(rax, JSObject::kHeaderSize +
2326                       Heap::kArgumentsLengthIndex * kPointerSize),
2327          rcx);
2328
2329  // Set up the elements pointer in the allocated arguments object.
2330  // If we allocated a parameter map, edi will point there, otherwise to the
2331  // backing store.
2332  __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
2333  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
2334
2335  // rax = address of new object (tagged)
2336  // rbx = mapped parameter count (untagged)
2337  // rcx = argument count (tagged)
2338  // rdi = address of parameter map or backing store (tagged)
2339
2340  // Initialize parameter map. If there are no mapped arguments, we're done.
2341  Label skip_parameter_map;
2342  __ testq(rbx, rbx);
2343  __ j(zero, &skip_parameter_map);
2344
2345  __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
2346  // rbx contains the untagged argument count. Add 2 and tag to write.
2347  __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
2348  __ Integer64PlusConstantToSmi(r9, rbx, 2);
2349  __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
2350  __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
2351  __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
2352  __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
2353
2354  // Copy the parameter slots and the holes in the arguments.
2355  // We need to fill in mapped_parameter_count slots. They index the context,
2356  // where parameters are stored in reverse order, at
2357  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2358  // The mapped parameter thus need to get indices
2359  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
2360  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2361  // We loop from right to left.
2362  Label parameters_loop, parameters_test;
2363
2364  // Load tagged parameter count into r9.
2365  __ Integer32ToSmi(r9, rbx);
2366  __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
2367  __ addq(r8, Operand(rsp, 1 * kPointerSize));
2368  __ subq(r8, r9);
2369  __ Move(r11, factory->the_hole_value());
2370  __ movq(rdx, rdi);
2371  __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
2372  // r9 = loop variable (tagged)
2373  // r8 = mapping index (tagged)
2374  // r11 = the hole value
2375  // rdx = address of parameter map (tagged)
2376  // rdi = address of backing store (tagged)
2377  __ jmp(&parameters_test, Label::kNear);
2378
2379  __ bind(&parameters_loop);
2380  __ SmiSubConstant(r9, r9, Smi::FromInt(1));
2381  __ SmiToInteger64(kScratchRegister, r9);
2382  __ movq(FieldOperand(rdx, kScratchRegister,
2383                       times_pointer_size,
2384                       kParameterMapHeaderSize),
2385          r8);
2386  __ movq(FieldOperand(rdi, kScratchRegister,
2387                       times_pointer_size,
2388                       FixedArray::kHeaderSize),
2389          r11);
2390  __ SmiAddConstant(r8, r8, Smi::FromInt(1));
2391  __ bind(&parameters_test);
2392  __ SmiTest(r9);
2393  __ j(not_zero, &parameters_loop, Label::kNear);
2394
2395  __ bind(&skip_parameter_map);
2396
2397  // rcx = argument count (tagged)
2398  // rdi = address of backing store (tagged)
2399  // Copy arguments header and remaining slots (if there are any).
2400  __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
2401          factory->fixed_array_map());
2402  __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
2403
2404  Label arguments_loop, arguments_test;
2405  __ movq(r8, rbx);
2406  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
2407  // Untag rcx for the loop below.
2408  __ SmiToInteger64(rcx, rcx);
2409  __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
2410  __ subq(rdx, kScratchRegister);
2411  __ jmp(&arguments_test, Label::kNear);
2412
2413  __ bind(&arguments_loop);
2414  __ subq(rdx, Immediate(kPointerSize));
2415  __ movq(r9, Operand(rdx, 0));
2416  __ movq(FieldOperand(rdi, r8,
2417                       times_pointer_size,
2418                       FixedArray::kHeaderSize),
2419          r9);
2420  __ addq(r8, Immediate(1));
2421
2422  __ bind(&arguments_test);
2423  __ cmpq(r8, rcx);
2424  __ j(less, &arguments_loop, Label::kNear);
2425
2426  // Return and remove the on-stack parameters.
2427  __ ret(3 * kPointerSize);
2428
2429  // Do the runtime call to allocate the arguments object.
2430  // rcx = argument count (untagged)
2431  __ bind(&runtime);
2432  __ Integer32ToSmi(rcx, rcx);
2433  __ movq(Operand(rsp, 1 * kPointerSize), rcx);  // Patch argument count.
2434  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2435}
2436
2437
2438void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
2439  // rsp[0]  : return address
2440  // rsp[8]  : number of parameters
2441  // rsp[16] : receiver displacement
2442  // rsp[24] : function
2443
2444  // Check if the calling frame is an arguments adaptor frame.
2445  Label runtime;
2446  __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2447  __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
2448  __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2449  __ j(not_equal, &runtime);
2450
2451  // Patch the arguments.length and the parameters pointer.
2452  __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2453  __ movq(Operand(rsp, 1 * kPointerSize), rcx);
2454  __ SmiToInteger64(rcx, rcx);
2455  __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
2456              StandardFrameConstants::kCallerSPOffset));
2457  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
2458
2459  __ bind(&runtime);
2460  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2461}
2462
2463
2464void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2465  // rsp[0]  : return address
2466  // rsp[8]  : number of parameters
2467  // rsp[16] : receiver displacement
2468  // rsp[24] : function
2469
2470  // Check if the calling frame is an arguments adaptor frame.
2471  Label adaptor_frame, try_allocate, runtime;
2472  __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2473  __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
2474  __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2475  __ j(equal, &adaptor_frame);
2476
2477  // Get the length from the frame.
2478  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
2479  __ SmiToInteger64(rcx, rcx);
2480  __ jmp(&try_allocate);
2481
2482  // Patch the arguments.length and the parameters pointer.
2483  __ bind(&adaptor_frame);
2484  __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2485  __ movq(Operand(rsp, 1 * kPointerSize), rcx);
2486  __ SmiToInteger64(rcx, rcx);
2487  __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
2488                      StandardFrameConstants::kCallerSPOffset));
2489  __ movq(Operand(rsp, 2 * kPointerSize), rdx);
2490
2491  // Try the new space allocation. Start out with computing the size of
2492  // the arguments object and the elements array.
2493  Label add_arguments_object;
2494  __ bind(&try_allocate);
2495  __ testq(rcx, rcx);
2496  __ j(zero, &add_arguments_object, Label::kNear);
2497  __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
2498  __ bind(&add_arguments_object);
2499  __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
2500
2501  // Do the allocation of both objects in one go.
2502  __ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
2503
2504  // Get the arguments boilerplate from the current native context.
2505  __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2506  __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
2507  const int offset =
2508      Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
2509  __ movq(rdi, Operand(rdi, offset));
2510
2511  // Copy the JS object part.
2512  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2513    __ movq(rbx, FieldOperand(rdi, i));
2514    __ movq(FieldOperand(rax, i), rbx);
2515  }
2516
2517  // Get the length (smi tagged) and set that as an in-object property too.
2518  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2519  __ movq(rcx, Operand(rsp, 1 * kPointerSize));
2520  __ movq(FieldOperand(rax, JSObject::kHeaderSize +
2521                       Heap::kArgumentsLengthIndex * kPointerSize),
2522          rcx);
2523
2524  // If there are no actual arguments, we're done.
2525  Label done;
2526  __ testq(rcx, rcx);
2527  __ j(zero, &done);
2528
2529  // Get the parameters pointer from the stack.
2530  __ movq(rdx, Operand(rsp, 2 * kPointerSize));
2531
2532  // Set up the elements pointer in the allocated arguments object and
2533  // initialize the header in the elements fixed array.
2534  __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
2535  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
2536  __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
2537  __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
2538
2539
2540  __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
2541  // Untag the length for the loop below.
2542  __ SmiToInteger64(rcx, rcx);
2543
2544  // Copy the fixed array slots.
2545  Label loop;
2546  __ bind(&loop);
2547  __ movq(rbx, Operand(rdx, -1 * kPointerSize));  // Skip receiver.
2548  __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
2549  __ addq(rdi, Immediate(kPointerSize));
2550  __ subq(rdx, Immediate(kPointerSize));
2551  __ decq(rcx);
2552  __ j(not_zero, &loop);
2553
2554  // Return and remove the on-stack parameters.
2555  __ bind(&done);
2556  __ ret(3 * kPointerSize);
2557
2558  // Do the runtime call to allocate the arguments object.
2559  __ bind(&runtime);
2560  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
2561}
2562
2563
2564void RegExpExecStub::Generate(MacroAssembler* masm) {
2565  // Just jump directly to runtime if native RegExp is not selected at compile
2566  // time or if regexp entry in generated code is turned off runtime switch or
2567  // at compilation.
2568#ifdef V8_INTERPRETED_REGEXP
2569  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2570#else  // V8_INTERPRETED_REGEXP
2571
2572  // Stack frame on entry.
2573  //  rsp[0]  : return address
2574  //  rsp[8]  : last_match_info (expected JSArray)
2575  //  rsp[16] : previous index
2576  //  rsp[24] : subject string
2577  //  rsp[32] : JSRegExp object
2578
2579  static const int kLastMatchInfoOffset = 1 * kPointerSize;
2580  static const int kPreviousIndexOffset = 2 * kPointerSize;
2581  static const int kSubjectOffset = 3 * kPointerSize;
2582  static const int kJSRegExpOffset = 4 * kPointerSize;
2583
2584  Label runtime;
2585  // Ensure that a RegExp stack is allocated.
2586  Isolate* isolate = masm->isolate();
2587  ExternalReference address_of_regexp_stack_memory_address =
2588      ExternalReference::address_of_regexp_stack_memory_address(isolate);
2589  ExternalReference address_of_regexp_stack_memory_size =
2590      ExternalReference::address_of_regexp_stack_memory_size(isolate);
2591  __ Load(kScratchRegister, address_of_regexp_stack_memory_size);
2592  __ testq(kScratchRegister, kScratchRegister);
2593  __ j(zero, &runtime);
2594
2595  // Check that the first argument is a JSRegExp object.
2596  __ movq(rax, Operand(rsp, kJSRegExpOffset));
2597  __ JumpIfSmi(rax, &runtime);
2598  __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
2599  __ j(not_equal, &runtime);
2600
2601  // Check that the RegExp has been compiled (data contains a fixed array).
2602  __ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset));
2603  if (FLAG_debug_code) {
2604    Condition is_smi = masm->CheckSmi(rax);
2605    __ Check(NegateCondition(is_smi),
2606        kUnexpectedTypeForRegExpDataFixedArrayExpected);
2607    __ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
2608    __ Check(equal, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2609  }
2610
2611  // rax: RegExp data (FixedArray)
2612  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2613  __ SmiToInteger32(rbx, FieldOperand(rax, JSRegExp::kDataTagOffset));
2614  __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
2615  __ j(not_equal, &runtime);
2616
2617  // rax: RegExp data (FixedArray)
2618  // Check that the number of captures fit in the static offsets vector buffer.
2619  __ SmiToInteger32(rdx,
2620                    FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset));
2621  // Check (number_of_captures + 1) * 2 <= offsets vector size
2622  // Or              number_of_captures <= offsets vector size / 2 - 1
2623  STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2624  __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1));
2625  __ j(above, &runtime);
2626
2627  // Reset offset for possibly sliced string.
2628  __ Set(r14, 0);
2629  __ movq(rdi, Operand(rsp, kSubjectOffset));
2630  __ JumpIfSmi(rdi, &runtime);
2631  __ movq(r15, rdi);  // Make a copy of the original subject string.
2632  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
2633  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
2634  // rax: RegExp data (FixedArray)
2635  // rdi: subject string
2636  // r15: subject string
2637  // Handle subject string according to its encoding and representation:
2638  // (1) Sequential two byte?  If yes, go to (9).
2639  // (2) Sequential one byte?  If yes, go to (6).
2640  // (3) Anything but sequential or cons?  If yes, go to (7).
2641  // (4) Cons string.  If the string is flat, replace subject with first string.
2642  //     Otherwise bailout.
2643  // (5a) Is subject sequential two byte?  If yes, go to (9).
2644  // (5b) Is subject external?  If yes, go to (8).
2645  // (6) One byte sequential.  Load regexp code for one byte.
2646  // (E) Carry on.
2647  /// [...]
2648
2649  // Deferred code at the end of the stub:
2650  // (7) Not a long external string?  If yes, go to (10).
2651  // (8) External string.  Make it, offset-wise, look like a sequential string.
2652  // (8a) Is the external string one byte?  If yes, go to (6).
2653  // (9) Two byte sequential.  Load regexp code for one byte. Go to (E).
2654  // (10) Short external string or not a string?  If yes, bail out to runtime.
2655  // (11) Sliced string.  Replace subject with parent. Go to (5a).
2656
2657  Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
2658        external_string /* 8 */, check_underlying /* 5a */,
2659        not_seq_nor_cons /* 7 */, check_code /* E */,
2660        not_long_external /* 10 */;
2661
2662  // (1) Sequential two byte?  If yes, go to (9).
2663  __ andb(rbx, Immediate(kIsNotStringMask |
2664                         kStringRepresentationMask |
2665                         kStringEncodingMask |
2666                         kShortExternalStringMask));
2667  STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
2668  __ j(zero, &seq_two_byte_string);  // Go to (9).
2669
2670  // (2) Sequential one byte?  If yes, go to (6).
2671  // Any other sequential string must be one byte.
2672  __ andb(rbx, Immediate(kIsNotStringMask |
2673                         kStringRepresentationMask |
2674                         kShortExternalStringMask));
2675  __ j(zero, &seq_one_byte_string, Label::kNear);  // Go to (6).
2676
2677  // (3) Anything but sequential or cons?  If yes, go to (7).
2678  // We check whether the subject string is a cons, since sequential strings
2679  // have already been covered.
2680  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2681  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2682  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2683  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2684  __ cmpq(rbx, Immediate(kExternalStringTag));
2685  __ j(greater_equal, &not_seq_nor_cons);  // Go to (7).
2686
2687  // (4) Cons string.  Check that it's flat.
2688  // Replace subject with first string and reload instance type.
2689  __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
2690                 Heap::kempty_stringRootIndex);
2691  __ j(not_equal, &runtime);
2692  __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
2693  __ bind(&check_underlying);
2694  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
2695  __ movq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
2696
2697  // (5a) Is subject sequential two byte?  If yes, go to (9).
2698  __ testb(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
2699  STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
2700  __ j(zero, &seq_two_byte_string);  // Go to (9).
2701  // (5b) Is subject external?  If yes, go to (8).
2702  __ testb(rbx, Immediate(kStringRepresentationMask));
2703  // The underlying external string is never a short external string.
2704  STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
2705  STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2706  __ j(not_zero, &external_string);  // Go to (8)
2707
2708  // (6) One byte sequential.  Load regexp code for one byte.
2709  __ bind(&seq_one_byte_string);
2710  // rax: RegExp data (FixedArray)
2711  __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
2712  __ Set(rcx, 1);  // Type is one byte.
2713
2714  // (E) Carry on.  String handling is done.
2715  __ bind(&check_code);
2716  // r11: irregexp code
2717  // Check that the irregexp code has been generated for the actual string
2718  // encoding. If it has, the field contains a code object otherwise it contains
2719  // smi (code flushing support)
2720  __ JumpIfSmi(r11, &runtime);
2721
2722  // rdi: sequential subject string (or look-alike, external string)
2723  // r15: original subject string
2724  // rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
2725  // r11: code
2726  // Load used arguments before starting to push arguments for call to native
2727  // RegExp code to avoid handling changing stack height.
2728  // We have to use r15 instead of rdi to load the length because rdi might
2729  // have been only made to look like a sequential string when it actually
2730  // is an external string.
2731  __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
2732  __ JumpIfNotSmi(rbx, &runtime);
2733  __ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
2734  __ j(above_equal, &runtime);
2735  __ SmiToInteger64(rbx, rbx);
2736
2737  // rdi: subject string
2738  // rbx: previous index
2739  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
2740  // r11: code
2741  // All checks done. Now push arguments for native regexp code.
2742  Counters* counters = masm->isolate()->counters();
2743  __ IncrementCounter(counters->regexp_entry_native(), 1);
2744
2745  // Isolates: note we add an additional parameter here (isolate pointer).
2746  static const int kRegExpExecuteArguments = 9;
2747  int argument_slots_on_stack =
2748      masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
2749  __ EnterApiExitFrame(argument_slots_on_stack);
2750
2751  // Argument 9: Pass current isolate address.
2752  __ LoadAddress(kScratchRegister,
2753                 ExternalReference::isolate_address(masm->isolate()));
2754  __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
2755          kScratchRegister);
2756
2757  // Argument 8: Indicate that this is a direct call from JavaScript.
2758  __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
2759          Immediate(1));
2760
2761  // Argument 7: Start (high end) of backtracking stack memory area.
2762  __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
2763  __ movq(r9, Operand(kScratchRegister, 0));
2764  __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
2765  __ addq(r9, Operand(kScratchRegister, 0));
2766  __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
2767
2768  // Argument 6: Set the number of capture registers to zero to force global
2769  // regexps to behave as non-global.  This does not affect non-global regexps.
2770  // Argument 6 is passed in r9 on Linux and on the stack on Windows.
2771#ifdef _WIN64
2772  __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize),
2773          Immediate(0));
2774#else
2775  __ Set(r9, 0);
2776#endif
2777
2778  // Argument 5: static offsets vector buffer.
2779  __ LoadAddress(r8,
2780                 ExternalReference::address_of_static_offsets_vector(isolate));
2781  // Argument 5 passed in r8 on Linux and on the stack on Windows.
2782#ifdef _WIN64
2783  __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8);
2784#endif
2785
2786  // rdi: subject string
2787  // rbx: previous index
2788  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
2789  // r11: code
2790  // r14: slice offset
2791  // r15: original subject string
2792
2793  // Argument 2: Previous index.
2794  __ movq(arg_reg_2, rbx);
2795
2796  // Argument 4: End of string data
2797  // Argument 3: Start of string data
2798  Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
2799  // Prepare start and end index of the input.
2800  // Load the length from the original sliced string if that is the case.
2801  __ addq(rbx, r14);
2802  __ SmiToInteger32(arg_reg_3, FieldOperand(r15, String::kLengthOffset));
2803  __ addq(r14, arg_reg_3);  // Using arg3 as scratch.
2804
2805  // rbx: start index of the input
2806  // r14: end index of the input
2807  // r15: original subject string
2808  __ testb(rcx, rcx);  // Last use of rcx as encoding of subject string.
2809  __ j(zero, &setup_two_byte, Label::kNear);
2810  __ lea(arg_reg_4,
2811         FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
2812  __ lea(arg_reg_3,
2813         FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
2814  __ jmp(&setup_rest, Label::kNear);
2815  __ bind(&setup_two_byte);
2816  __ lea(arg_reg_4,
2817         FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
2818  __ lea(arg_reg_3,
2819         FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
2820  __ bind(&setup_rest);
2821
2822  // Argument 1: Original subject string.
2823  // The original subject is in the previous stack frame. Therefore we have to
2824  // use rbp, which points exactly to one pointer size below the previous rsp.
2825  // (Because creating a new stack frame pushes the previous rbp onto the stack
2826  // and thereby moves up rsp by one kPointerSize.)
2827  __ movq(arg_reg_1, r15);
2828
2829  // Locate the code entry and call it.
2830  __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
2831  __ call(r11);
2832
2833  __ LeaveApiExitFrame();
2834
2835  // Check the result.
2836  Label success;
2837  Label exception;
2838  __ cmpl(rax, Immediate(1));
2839  // We expect exactly one result since we force the called regexp to behave
2840  // as non-global.
2841  __ j(equal, &success, Label::kNear);
2842  __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
2843  __ j(equal, &exception);
2844  __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
2845  // If none of the above, it can only be retry.
2846  // Handle that in the runtime system.
2847  __ j(not_equal, &runtime);
2848
2849  // For failure return null.
2850  __ LoadRoot(rax, Heap::kNullValueRootIndex);
2851  __ ret(4 * kPointerSize);
2852
2853  // Load RegExp data.
2854  __ bind(&success);
2855  __ movq(rax, Operand(rsp, kJSRegExpOffset));
2856  __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
2857  __ SmiToInteger32(rax,
2858                    FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
2859  // Calculate number of capture registers (number_of_captures + 1) * 2.
2860  __ leal(rdx, Operand(rax, rax, times_1, 2));
2861
2862  // rdx: Number of capture registers
2863  // Check that the fourth object is a JSArray object.
2864  __ movq(r15, Operand(rsp, kLastMatchInfoOffset));
2865  __ JumpIfSmi(r15, &runtime);
2866  __ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
2867  __ j(not_equal, &runtime);
2868  // Check that the JSArray is in fast case.
2869  __ movq(rbx, FieldOperand(r15, JSArray::kElementsOffset));
2870  __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
2871  __ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
2872  __ j(not_equal, &runtime);
2873  // Check that the last match info has space for the capture registers and the
2874  // additional information. Ensure no overflow in add.
2875  STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
2876  __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
2877  __ subl(rax, Immediate(RegExpImpl::kLastMatchOverhead));
2878  __ cmpl(rdx, rax);
2879  __ j(greater, &runtime);
2880
2881  // rbx: last_match_info backing store (FixedArray)
2882  // rdx: number of capture registers
2883  // Store the capture count.
2884  __ Integer32ToSmi(kScratchRegister, rdx);
2885  __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
2886          kScratchRegister);
2887  // Store last subject and last input.
2888  __ movq(rax, Operand(rsp, kSubjectOffset));
2889  __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
2890  __ movq(rcx, rax);
2891  __ RecordWriteField(rbx,
2892                      RegExpImpl::kLastSubjectOffset,
2893                      rax,
2894                      rdi,
2895                      kDontSaveFPRegs);
2896  __ movq(rax, rcx);
2897  __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
2898  __ RecordWriteField(rbx,
2899                      RegExpImpl::kLastInputOffset,
2900                      rax,
2901                      rdi,
2902                      kDontSaveFPRegs);
2903
2904  // Get the static offsets vector filled by the native regexp code.
2905  __ LoadAddress(rcx,
2906                 ExternalReference::address_of_static_offsets_vector(isolate));
2907
2908  // rbx: last_match_info backing store (FixedArray)
2909  // rcx: offsets vector
2910  // rdx: number of capture registers
2911  Label next_capture, done;
2912  // Capture register counter starts from number of capture registers and
2913  // counts down until wraping after zero.
2914  __ bind(&next_capture);
2915  __ subq(rdx, Immediate(1));
2916  __ j(negative, &done, Label::kNear);
2917  // Read the value from the static offsets vector buffer and make it a smi.
2918  __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
2919  __ Integer32ToSmi(rdi, rdi);
2920  // Store the smi value in the last match info.
2921  __ movq(FieldOperand(rbx,
2922                       rdx,
2923                       times_pointer_size,
2924                       RegExpImpl::kFirstCaptureOffset),
2925          rdi);
2926  __ jmp(&next_capture);
2927  __ bind(&done);
2928
2929  // Return last match info.
2930  __ movq(rax, r15);
2931  __ ret(4 * kPointerSize);
2932
2933  __ bind(&exception);
2934  // Result must now be exception. If there is no pending exception already a
2935  // stack overflow (on the backtrack stack) was detected in RegExp code but
2936  // haven't created the exception yet. Handle that in the runtime system.
2937  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2938  ExternalReference pending_exception_address(
2939      Isolate::kPendingExceptionAddress, isolate);
2940  Operand pending_exception_operand =
2941      masm->ExternalOperand(pending_exception_address, rbx);
2942  __ movq(rax, pending_exception_operand);
2943  __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
2944  __ cmpq(rax, rdx);
2945  __ j(equal, &runtime);
2946  __ movq(pending_exception_operand, rdx);
2947
2948  __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
2949  Label termination_exception;
2950  __ j(equal, &termination_exception, Label::kNear);
2951  __ Throw(rax);
2952
2953  __ bind(&termination_exception);
2954  __ ThrowUncatchable(rax);
2955
2956  // Do the runtime call to execute the regexp.
2957  __ bind(&runtime);
2958  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2959
2960  // Deferred code for string handling.
2961  // (7) Not a long external string?  If yes, go to (10).
2962  __ bind(&not_seq_nor_cons);
2963  // Compare flags are still set from (3).
2964  __ j(greater, &not_long_external, Label::kNear);  // Go to (10).
2965
2966  // (8) External string.  Short external strings have been ruled out.
2967  __ bind(&external_string);
2968  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
2969  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
2970  if (FLAG_debug_code) {
2971    // Assert that we do not have a cons or slice (indirect strings) here.
2972    // Sequential strings have already been ruled out.
2973    __ testb(rbx, Immediate(kIsIndirectStringMask));
2974    __ Assert(zero, kExternalStringExpectedButNotFound);
2975  }
2976  __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
2977  // Move the pointer so that offset-wise, it looks like a sequential string.
2978  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2979  __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
2980  STATIC_ASSERT(kTwoByteStringTag == 0);
2981  // (8a) Is the external string one byte?  If yes, go to (6).
2982  __ testb(rbx, Immediate(kStringEncodingMask));
2983  __ j(not_zero, &seq_one_byte_string);  // Goto (6).
2984
2985  // rdi: subject string (flat two-byte)
2986  // rax: RegExp data (FixedArray)
2987  // (9) Two byte sequential.  Load regexp code for one byte.  Go to (E).
2988  __ bind(&seq_two_byte_string);
2989  __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
2990  __ Set(rcx, 0);  // Type is two byte.
2991  __ jmp(&check_code);  // Go to (E).
2992
2993  // (10) Not a string or a short external string?  If yes, bail out to runtime.
2994  __ bind(&not_long_external);
2995  // Catch non-string subject or short external string.
2996  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2997  __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
2998  __ j(not_zero, &runtime);
2999
3000  // (11) Sliced string.  Replace subject with parent. Go to (5a).
3001  // Load offset into r14 and replace subject string with parent.
3002  __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
3003  __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
3004  __ jmp(&check_underlying);
3005#endif  // V8_INTERPRETED_REGEXP
3006}
3007
3008
3009void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
3010  const int kMaxInlineLength = 100;
3011  Label slowcase;
3012  Label done;
3013  __ movq(r8, Operand(rsp, kPointerSize * 3));
3014  __ JumpIfNotSmi(r8, &slowcase);
3015  __ SmiToInteger32(rbx, r8);
3016  __ cmpl(rbx, Immediate(kMaxInlineLength));
3017  __ j(above, &slowcase);
3018  // Smi-tagging is equivalent to multiplying by 2.
3019  STATIC_ASSERT(kSmiTag == 0);
3020  STATIC_ASSERT(kSmiTagSize == 1);
3021  // Allocate RegExpResult followed by FixedArray with size in rbx.
3022  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
3023  // Elements:  [Map][Length][..elements..]
3024  __ Allocate(JSRegExpResult::kSize + FixedArray::kHeaderSize,
3025              times_pointer_size,
3026              rbx,  // In: Number of elements.
3027              rax,  // Out: Start of allocation (tagged).
3028              rcx,  // Out: End of allocation.
3029              rdx,  // Scratch register
3030              &slowcase,
3031              TAG_OBJECT);
3032  // rax: Start of allocated area, object-tagged.
3033  // rbx: Number of array elements as int32.
3034  // r8: Number of array elements as smi.
3035
3036  // Set JSArray map to global.regexp_result_map().
3037  __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX));
3038  __ movq(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
3039  __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
3040  __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
3041
3042  // Set empty properties FixedArray.
3043  __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
3044  __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
3045
3046  // Set elements to point to FixedArray allocated right after the JSArray.
3047  __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
3048  __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
3049
3050  // Set input, index and length fields from arguments.
3051  __ movq(r8, Operand(rsp, kPointerSize * 1));
3052  __ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
3053  __ movq(r8, Operand(rsp, kPointerSize * 2));
3054  __ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
3055  __ movq(r8, Operand(rsp, kPointerSize * 3));
3056  __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
3057
3058  // Fill out the elements FixedArray.
3059  // rax: JSArray.
3060  // rcx: FixedArray.
3061  // rbx: Number of elements in array as int32.
3062
3063  // Set map.
3064  __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
3065  __ movq(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister);
3066  // Set length.
3067  __ Integer32ToSmi(rdx, rbx);
3068  __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
3069  // Fill contents of fixed-array with undefined.
3070  __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
3071  __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
3072  // Fill fixed array elements with undefined.
3073  // rax: JSArray.
3074  // rbx: Number of elements in array that remains to be filled, as int32.
3075  // rcx: Start of elements in FixedArray.
3076  // rdx: undefined.
3077  Label loop;
3078  __ testl(rbx, rbx);
3079  __ bind(&loop);
3080  __ j(less_equal, &done);  // Jump if rcx is negative or zero.
3081  __ subl(rbx, Immediate(1));
3082  __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
3083  __ jmp(&loop);
3084
3085  __ bind(&done);
3086  __ ret(3 * kPointerSize);
3087
3088  __ bind(&slowcase);
3089  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
3090}
3091
3092
3093void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
3094                                                         Register object,
3095                                                         Register result,
3096                                                         Register scratch1,
3097                                                         Register scratch2,
3098                                                         Label* not_found) {
3099  // Use of registers. Register result is used as a temporary.
3100  Register number_string_cache = result;
3101  Register mask = scratch1;
3102  Register scratch = scratch2;
3103
3104  // Load the number string cache.
3105  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
3106
3107  // Make the hash mask from the length of the number string cache. It
3108  // contains two elements (number and string) for each cache entry.
3109  __ SmiToInteger32(
3110      mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
3111  __ shrl(mask, Immediate(1));
3112  __ subq(mask, Immediate(1));  // Make mask.
3113
3114  // Calculate the entry in the number string cache. The hash value in the
3115  // number string cache for smis is just the smi value, and the hash for
3116  // doubles is the xor of the upper and lower words. See
3117  // Heap::GetNumberStringCache.
3118  Label is_smi;
3119  Label load_result_from_cache;
3120  Factory* factory = masm->isolate()->factory();
3121  __ JumpIfSmi(object, &is_smi);
3122  __ CheckMap(object,
3123              factory->heap_number_map(),
3124              not_found,
3125              DONT_DO_SMI_CHECK);
3126
3127  STATIC_ASSERT(8 == kDoubleSize);
3128  __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
3129  __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
3130  GenerateConvertHashCodeToIndex(masm, scratch, mask);
3131
3132  Register index = scratch;
3133  Register probe = mask;
3134  __ movq(probe,
3135          FieldOperand(number_string_cache,
3136                        index,
3137                        times_1,
3138                        FixedArray::kHeaderSize));
3139  __ JumpIfSmi(probe, not_found);
3140  __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
3141  __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
3142  __ ucomisd(xmm0, xmm1);
3143  __ j(parity_even, not_found);  // Bail out if NaN is involved.
3144  __ j(not_equal, not_found);  // The cache did not contain this value.
3145  __ jmp(&load_result_from_cache);
3146
3147  __ bind(&is_smi);
3148  __ SmiToInteger32(scratch, object);
3149  GenerateConvertHashCodeToIndex(masm, scratch, mask);
3150
3151  // Check if the entry is the smi we are looking for.
3152  __ cmpq(object,
3153          FieldOperand(number_string_cache,
3154                       index,
3155                       times_1,
3156                       FixedArray::kHeaderSize));
3157  __ j(not_equal, not_found);
3158
3159  // Get the result from the cache.
3160  __ bind(&load_result_from_cache);
3161  __ movq(result,
3162          FieldOperand(number_string_cache,
3163                       index,
3164                       times_1,
3165                       FixedArray::kHeaderSize + kPointerSize));
3166  Counters* counters = masm->isolate()->counters();
3167  __ IncrementCounter(counters->number_to_string_native(), 1);
3168}
3169
3170
3171void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
3172                                                        Register hash,
3173                                                        Register mask) {
3174  __ and_(hash, mask);
3175  // Each entry in string cache consists of two pointer sized fields,
3176  // but times_twice_pointer_size (multiplication by 16) scale factor
3177  // is not supported by addrmode on x64 platform.
3178  // So we have to premultiply entry index before lookup.
3179  __ shl(hash, Immediate(kPointerSizeLog2 + 1));
3180}
3181
3182
3183void NumberToStringStub::Generate(MacroAssembler* masm) {
3184  Label runtime;
3185
3186  __ movq(rbx, Operand(rsp, kPointerSize));
3187
3188  // Generate code to lookup number in the number string cache.
3189  GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, &runtime);
3190  __ ret(1 * kPointerSize);
3191
3192  __ bind(&runtime);
3193  // Handle number to string in the runtime system if not found in the cache.
3194  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
3195}
3196
3197
3198static int NegativeComparisonResult(Condition cc) {
3199  ASSERT(cc != equal);
3200  ASSERT((cc == less) || (cc == less_equal)
3201      || (cc == greater) || (cc == greater_equal));
3202  return (cc == greater || cc == greater_equal) ? LESS : GREATER;
3203}
3204
3205
3206static void CheckInputType(MacroAssembler* masm,
3207                           Register input,
3208                           CompareIC::State expected,
3209                           Label* fail) {
3210  Label ok;
3211  if (expected == CompareIC::SMI) {
3212    __ JumpIfNotSmi(input, fail);
3213  } else if (expected == CompareIC::NUMBER) {
3214    __ JumpIfSmi(input, &ok);
3215    __ CompareMap(input, masm->isolate()->factory()->heap_number_map(), NULL);
3216    __ j(not_equal, fail);
3217  }
3218  // We could be strict about internalized/non-internalized here, but as long as
3219  // hydrogen doesn't care, the stub doesn't have to care either.
3220  __ bind(&ok);
3221}
3222
3223
3224static void BranchIfNotInternalizedString(MacroAssembler* masm,
3225                                          Label* label,
3226                                          Register object,
3227                                          Register scratch) {
3228  __ JumpIfSmi(object, label);
3229  __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
3230  __ movzxbq(scratch,
3231             FieldOperand(scratch, Map::kInstanceTypeOffset));
3232  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3233  __ testb(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
3234  __ j(not_zero, label);
3235}
3236
3237
3238void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
3239  Label check_unequal_objects, done;
3240  Condition cc = GetCondition();
3241  Factory* factory = masm->isolate()->factory();
3242
3243  Label miss;
3244  CheckInputType(masm, rdx, left_, &miss);
3245  CheckInputType(masm, rax, right_, &miss);
3246
3247  // Compare two smis.
3248  Label non_smi, smi_done;
3249  __ JumpIfNotBothSmi(rax, rdx, &non_smi);
3250  __ subq(rdx, rax);
3251  __ j(no_overflow, &smi_done);
3252  __ not_(rdx);  // Correct sign in case of overflow. rdx cannot be 0 here.
3253  __ bind(&smi_done);
3254  __ movq(rax, rdx);
3255  __ ret(0);
3256  __ bind(&non_smi);
3257
3258  // The compare stub returns a positive, negative, or zero 64-bit integer
3259  // value in rax, corresponding to result of comparing the two inputs.
3260  // NOTICE! This code is only reached after a smi-fast-case check, so
3261  // it is certain that at least one operand isn't a smi.
3262
3263  // Two identical objects are equal unless they are both NaN or undefined.
3264  {
3265    Label not_identical;
3266    __ cmpq(rax, rdx);
3267    __ j(not_equal, &not_identical, Label::kNear);
3268
3269    if (cc != equal) {
3270      // Check for undefined.  undefined OP undefined is false even though
3271      // undefined == undefined.
3272      Label check_for_nan;
3273      __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
3274      __ j(not_equal, &check_for_nan, Label::kNear);
3275      __ Set(rax, NegativeComparisonResult(cc));
3276      __ ret(0);
3277      __ bind(&check_for_nan);
3278    }
3279
3280    // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
3281    // so we do the second best thing - test it ourselves.
3282    Label heap_number;
3283    // If it's not a heap number, then return equal for (in)equality operator.
3284    __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
3285           factory->heap_number_map());
3286    __ j(equal, &heap_number, Label::kNear);
3287    if (cc != equal) {
3288      // Call runtime on identical objects.  Otherwise return equal.
3289      __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
3290      __ j(above_equal, &not_identical, Label::kNear);
3291    }
3292    __ Set(rax, EQUAL);
3293    __ ret(0);
3294
3295    __ bind(&heap_number);
3296    // It is a heap number, so return  equal if it's not NaN.
3297    // For NaN, return 1 for every condition except greater and
3298    // greater-equal.  Return -1 for them, so the comparison yields
3299    // false for all conditions except not-equal.
3300    __ Set(rax, EQUAL);
3301    __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
3302    __ ucomisd(xmm0, xmm0);
3303    __ setcc(parity_even, rax);
3304    // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
3305    if (cc == greater_equal || cc == greater) {
3306      __ neg(rax);
3307    }
3308    __ ret(0);
3309
3310    __ bind(&not_identical);
3311  }
3312
3313  if (cc == equal) {  // Both strict and non-strict.
3314    Label slow;  // Fallthrough label.
3315
3316    // If we're doing a strict equality comparison, we don't have to do
3317    // type conversion, so we generate code to do fast comparison for objects
3318    // and oddballs. Non-smi numbers and strings still go through the usual
3319    // slow-case code.
3320    if (strict()) {
3321      // If either is a Smi (we know that not both are), then they can only
3322      // be equal if the other is a HeapNumber. If so, use the slow case.
3323      {
3324        Label not_smis;
3325        __ SelectNonSmi(rbx, rax, rdx, &not_smis);
3326
3327        // Check if the non-smi operand is a heap number.
3328        __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
3329               factory->heap_number_map());
3330        // If heap number, handle it in the slow case.
3331        __ j(equal, &slow);
3332        // Return non-equal.  ebx (the lower half of rbx) is not zero.
3333        __ movq(rax, rbx);
3334        __ ret(0);
3335
3336        __ bind(&not_smis);
3337      }
3338
3339      // If either operand is a JSObject or an oddball value, then they are not
3340      // equal since their pointers are different
3341      // There is no test for undetectability in strict equality.
3342
3343      // If the first object is a JS object, we have done pointer comparison.
3344      STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
3345      Label first_non_object;
3346      __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
3347      __ j(below, &first_non_object, Label::kNear);
3348      // Return non-zero (rax (not rax) is not zero)
3349      Label return_not_equal;
3350      STATIC_ASSERT(kHeapObjectTag != 0);
3351      __ bind(&return_not_equal);
3352      __ ret(0);
3353
3354      __ bind(&first_non_object);
3355      // Check for oddballs: true, false, null, undefined.
3356      __ CmpInstanceType(rcx, ODDBALL_TYPE);
3357      __ j(equal, &return_not_equal);
3358
3359      __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
3360      __ j(above_equal, &return_not_equal);
3361
3362      // Check for oddballs: true, false, null, undefined.
3363      __ CmpInstanceType(rcx, ODDBALL_TYPE);
3364      __ j(equal, &return_not_equal);
3365
3366      // Fall through to the general case.
3367    }
3368    __ bind(&slow);
3369  }
3370
3371  // Generate the number comparison code.
3372  Label non_number_comparison;
3373  Label unordered;
3374  FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
3375  __ xorl(rax, rax);
3376  __ xorl(rcx, rcx);
3377  __ ucomisd(xmm0, xmm1);
3378
3379  // Don't base result on EFLAGS when a NaN is involved.
3380  __ j(parity_even, &unordered, Label::kNear);
3381  // Return a result of -1, 0, or 1, based on EFLAGS.
3382  __ setcc(above, rax);
3383  __ setcc(below, rcx);
3384  __ subq(rax, rcx);
3385  __ ret(0);
3386
3387  // If one of the numbers was NaN, then the result is always false.
3388  // The cc is never not-equal.
3389  __ bind(&unordered);
3390  ASSERT(cc != not_equal);
3391  if (cc == less || cc == less_equal) {
3392    __ Set(rax, 1);
3393  } else {
3394    __ Set(rax, -1);
3395  }
3396  __ ret(0);
3397
3398  // The number comparison code did not provide a valid result.
3399  __ bind(&non_number_comparison);
3400
3401  // Fast negative check for internalized-to-internalized equality.
3402  Label check_for_strings;
3403  if (cc == equal) {
3404    BranchIfNotInternalizedString(
3405        masm, &check_for_strings, rax, kScratchRegister);
3406    BranchIfNotInternalizedString(
3407        masm, &check_for_strings, rdx, kScratchRegister);
3408
3409    // We've already checked for object identity, so if both operands are
3410    // internalized strings they aren't equal. Register rax (not rax) already
3411    // holds a non-zero value, which indicates not equal, so just return.
3412    __ ret(0);
3413  }
3414
3415  __ bind(&check_for_strings);
3416
3417  __ JumpIfNotBothSequentialAsciiStrings(
3418      rdx, rax, rcx, rbx, &check_unequal_objects);
3419
3420  // Inline comparison of ASCII strings.
3421  if (cc == equal) {
3422    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
3423                                                     rdx,
3424                                                     rax,
3425                                                     rcx,
3426                                                     rbx);
3427  } else {
3428    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
3429                                                       rdx,
3430                                                       rax,
3431                                                       rcx,
3432                                                       rbx,
3433                                                       rdi,
3434                                                       r8);
3435  }
3436
3437#ifdef DEBUG
3438  __ Abort(kUnexpectedFallThroughFromStringComparison);
3439#endif
3440
3441  __ bind(&check_unequal_objects);
3442  if (cc == equal && !strict()) {
3443    // Not strict equality.  Objects are unequal if
3444    // they are both JSObjects and not undetectable,
3445    // and their pointers are different.
3446    Label not_both_objects, return_unequal;
3447    // At most one is a smi, so we can test for smi by adding the two.
3448    // A smi plus a heap object has the low bit set, a heap object plus
3449    // a heap object has the low bit clear.
3450    STATIC_ASSERT(kSmiTag == 0);
3451    STATIC_ASSERT(kSmiTagMask == 1);
3452    __ lea(rcx, Operand(rax, rdx, times_1, 0));
3453    __ testb(rcx, Immediate(kSmiTagMask));
3454    __ j(not_zero, &not_both_objects, Label::kNear);
3455    __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
3456    __ j(below, &not_both_objects, Label::kNear);
3457    __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
3458    __ j(below, &not_both_objects, Label::kNear);
3459    __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
3460             Immediate(1 << Map::kIsUndetectable));
3461    __ j(zero, &return_unequal, Label::kNear);
3462    __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
3463             Immediate(1 << Map::kIsUndetectable));
3464    __ j(zero, &return_unequal, Label::kNear);
3465    // The objects are both undetectable, so they both compare as the value
3466    // undefined, and are equal.
3467    __ Set(rax, EQUAL);
3468    __ bind(&return_unequal);
3469    // Return non-equal by returning the non-zero object pointer in rax,
3470    // or return equal if we fell through to here.
3471    __ ret(0);
3472    __ bind(&not_both_objects);
3473  }
3474
3475  // Push arguments below the return address to prepare jump to builtin.
3476  __ PopReturnAddressTo(rcx);
3477  __ push(rdx);
3478  __ push(rax);
3479
3480  // Figure out which native to call and setup the arguments.
3481  Builtins::JavaScript builtin;
3482  if (cc == equal) {
3483    builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
3484  } else {
3485    builtin = Builtins::COMPARE;
3486    __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
3487  }
3488
3489  __ PushReturnAddressFrom(rcx);
3490
3491  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
3492  // tagged as a small integer.
3493  __ InvokeBuiltin(builtin, JUMP_FUNCTION);
3494
3495  __ bind(&miss);
3496  GenerateMiss(masm);
3497}
3498
3499
3500void StackCheckStub::Generate(MacroAssembler* masm) {
3501  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3502}
3503
3504
3505void InterruptStub::Generate(MacroAssembler* masm) {
3506  __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3507}
3508
3509
3510static void GenerateRecordCallTarget(MacroAssembler* masm) {
3511  // Cache the called function in a global property cell.  Cache states
3512  // are uninitialized, monomorphic (indicated by a JSFunction), and
3513  // megamorphic.
3514  // rbx : cache cell for call target
3515  // rdi : the function to call
3516  Isolate* isolate = masm->isolate();
3517  Label initialize, done, miss, megamorphic, not_array_function;
3518
3519  // Load the cache state into rcx.
3520  __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
3521
3522  // A monomorphic cache hit or an already megamorphic state: invoke the
3523  // function without changing the state.
3524  __ cmpq(rcx, rdi);
3525  __ j(equal, &done);
3526  __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
3527  __ j(equal, &done);
3528
3529  // If we came here, we need to see if we are the array function.
3530  // If we didn't have a matching function, and we didn't find the megamorph
3531  // sentinel, then we have in the cell either some other function or an
3532  // AllocationSite. Do a map check on the object in rcx.
3533  Handle<Map> allocation_site_map(
3534      masm->isolate()->heap()->allocation_site_map(),
3535      masm->isolate());
3536  __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
3537  __ j(not_equal, &miss);
3538
3539  // Make sure the function is the Array() function
3540  __ LoadArrayFunction(rcx);
3541  __ cmpq(rdi, rcx);
3542  __ j(not_equal, &megamorphic);
3543  __ jmp(&done);
3544
3545  __ bind(&miss);
3546
3547  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
3548  // megamorphic.
3549  __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
3550  __ j(equal, &initialize);
3551  // MegamorphicSentinel is an immortal immovable object (undefined) so no
3552  // write-barrier is needed.
3553  __ bind(&megamorphic);
3554  __ Move(FieldOperand(rbx, Cell::kValueOffset),
3555          TypeFeedbackCells::MegamorphicSentinel(isolate));
3556  __ jmp(&done);
3557
3558  // An uninitialized cache is patched with the function or sentinel to
3559  // indicate the ElementsKind if function is the Array constructor.
3560  __ bind(&initialize);
3561  // Make sure the function is the Array() function
3562  __ LoadArrayFunction(rcx);
3563  __ cmpq(rdi, rcx);
3564  __ j(not_equal, &not_array_function);
3565
3566  // The target function is the Array constructor,
3567  // Create an AllocationSite if we don't already have it, store it in the cell
3568  {
3569    FrameScope scope(masm, StackFrame::INTERNAL);
3570
3571    __ Integer32ToSmi(rax, rax);
3572    __ push(rax);
3573    __ push(rdi);
3574    __ push(rbx);
3575
3576    CreateAllocationSiteStub create_stub;
3577    __ CallStub(&create_stub);
3578
3579    __ pop(rbx);
3580    __ pop(rdi);
3581    __ pop(rax);
3582    __ SmiToInteger32(rax, rax);
3583  }
3584  __ jmp(&done);
3585
3586  __ bind(&not_array_function);
3587  __ movq(FieldOperand(rbx, Cell::kValueOffset), rdi);
3588  // No need for a write barrier here - cells are rescanned.
3589
3590  __ bind(&done);
3591}
3592
3593
3594void CallFunctionStub::Generate(MacroAssembler* masm) {
3595  // rbx : cache cell for call target
3596  // rdi : the function to call
3597  Isolate* isolate = masm->isolate();
3598  Label slow, non_function;
3599
3600  // The receiver might implicitly be the global object. This is
3601  // indicated by passing the hole as the receiver to the call
3602  // function stub.
3603  if (ReceiverMightBeImplicit()) {
3604    Label call;
3605    // Get the receiver from the stack.
3606    // +1 ~ return address
3607    __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
3608    // Call as function is indicated with the hole.
3609    __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
3610    __ j(not_equal, &call, Label::kNear);
3611    // Patch the receiver on the stack with the global receiver object.
3612    __ movq(rcx, GlobalObjectOperand());
3613    __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
3614    __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx);
3615    __ bind(&call);
3616  }
3617
3618  // Check that the function really is a JavaScript function.
3619  __ JumpIfSmi(rdi, &non_function);
3620  // Goto slow case if we do not have a function.
3621  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
3622  __ j(not_equal, &slow);
3623
3624  if (RecordCallTarget()) {
3625    GenerateRecordCallTarget(masm);
3626  }
3627
3628  // Fast-case: Just invoke the function.
3629  ParameterCount actual(argc_);
3630
3631  if (ReceiverMightBeImplicit()) {
3632    Label call_as_function;
3633    __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
3634    __ j(equal, &call_as_function);
3635    __ InvokeFunction(rdi,
3636                      actual,
3637                      JUMP_FUNCTION,
3638                      NullCallWrapper(),
3639                      CALL_AS_METHOD);
3640    __ bind(&call_as_function);
3641  }
3642  __ InvokeFunction(rdi,
3643                    actual,
3644                    JUMP_FUNCTION,
3645                    NullCallWrapper(),
3646                    CALL_AS_FUNCTION);
3647
3648  // Slow-case: Non-function called.
3649  __ bind(&slow);
3650  if (RecordCallTarget()) {
3651    // If there is a call target cache, mark it megamorphic in the
3652    // non-function case.  MegamorphicSentinel is an immortal immovable
3653    // object (undefined) so no write barrier is needed.
3654    __ Move(FieldOperand(rbx, Cell::kValueOffset),
3655            TypeFeedbackCells::MegamorphicSentinel(isolate));
3656  }
3657  // Check for function proxy.
3658  __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
3659  __ j(not_equal, &non_function);
3660  __ PopReturnAddressTo(rcx);
3661  __ push(rdi);  // put proxy as additional argument under return address
3662  __ PushReturnAddressFrom(rcx);
3663  __ Set(rax, argc_ + 1);
3664  __ Set(rbx, 0);
3665  __ SetCallKind(rcx, CALL_AS_METHOD);
3666  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
3667  {
3668    Handle<Code> adaptor =
3669      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3670    __ jmp(adaptor, RelocInfo::CODE_TARGET);
3671  }
3672
3673  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3674  // of the original receiver from the call site).
3675  __ bind(&non_function);
3676  __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
3677  __ Set(rax, argc_);
3678  __ Set(rbx, 0);
3679  __ SetCallKind(rcx, CALL_AS_METHOD);
3680  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
3681  Handle<Code> adaptor =
3682      Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
3683  __ Jump(adaptor, RelocInfo::CODE_TARGET);
3684}
3685
3686
3687void CallConstructStub::Generate(MacroAssembler* masm) {
3688  // rax : number of arguments
3689  // rbx : cache cell for call target
3690  // rdi : constructor function
3691  Label slow, non_function_call;
3692
3693  // Check that function is not a smi.
3694  __ JumpIfSmi(rdi, &non_function_call);
3695  // Check that function is a JSFunction.
3696  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
3697  __ j(not_equal, &slow);
3698
3699  if (RecordCallTarget()) {
3700    GenerateRecordCallTarget(masm);
3701  }
3702
3703  // Jump to the function-specific construct stub.
3704  Register jmp_reg = rcx;
3705  __ movq(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
3706  __ movq(jmp_reg, FieldOperand(jmp_reg,
3707                                SharedFunctionInfo::kConstructStubOffset));
3708  __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
3709  __ jmp(jmp_reg);
3710
3711  // rdi: called object
3712  // rax: number of arguments
3713  // rcx: object map
3714  Label do_call;
3715  __ bind(&slow);
3716  __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
3717  __ j(not_equal, &non_function_call);
3718  __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3719  __ jmp(&do_call);
3720
3721  __ bind(&non_function_call);
3722  __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3723  __ bind(&do_call);
3724  // Set expected number of arguments to zero (not changing rax).
3725  __ Set(rbx, 0);
3726  __ SetCallKind(rcx, CALL_AS_METHOD);
3727  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3728          RelocInfo::CODE_TARGET);
3729}
3730
3731
3732bool CEntryStub::NeedsImmovableCode() {
3733  return false;
3734}
3735
3736
3737bool CEntryStub::IsPregenerated() {
3738#ifdef _WIN64
3739  return result_size_ == 1;
3740#else
3741  return true;
3742#endif
3743}
3744
3745
3746void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
3747  CEntryStub::GenerateAheadOfTime(isolate);
3748  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
3749  StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
3750  // It is important that the store buffer overflow stubs are generated first.
3751  RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
3752  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
3753  CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
3754}
3755
3756
3757void CodeStub::GenerateFPStubs(Isolate* isolate) {
3758}
3759
3760
3761void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
3762  CEntryStub stub(1, kDontSaveFPRegs);
3763  stub.GetCode(isolate)->set_is_pregenerated(true);
3764  CEntryStub save_doubles(1, kSaveFPRegs);
3765  save_doubles.GetCode(isolate)->set_is_pregenerated(true);
3766}
3767
3768
3769static void JumpIfOOM(MacroAssembler* masm,
3770                      Register value,
3771                      Register scratch,
3772                      Label* oom_label) {
3773  __ movq(scratch, value);
3774  STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
3775  STATIC_ASSERT(kFailureTag == 3);
3776  __ and_(scratch, Immediate(0xf));
3777  __ cmpq(scratch, Immediate(0xf));
3778  __ j(equal, oom_label);
3779}
3780
3781
3782void CEntryStub::GenerateCore(MacroAssembler* masm,
3783                              Label* throw_normal_exception,
3784                              Label* throw_termination_exception,
3785                              Label* throw_out_of_memory_exception,
3786                              bool do_gc,
3787                              bool always_allocate_scope) {
3788  // rax: result parameter for PerformGC, if any.
3789  // rbx: pointer to C function  (C callee-saved).
3790  // rbp: frame pointer  (restored after C call).
3791  // rsp: stack pointer  (restored after C call).
3792  // r14: number of arguments including receiver (C callee-saved).
3793  // r15: pointer to the first argument (C callee-saved).
3794  //      This pointer is reused in LeaveExitFrame(), so it is stored in a
3795  //      callee-saved register.
3796
3797  // Simple results returned in rax (both AMD64 and Win64 calling conventions).
3798  // Complex results must be written to address passed as first argument.
3799  // AMD64 calling convention: a struct of two pointers in rax+rdx
3800
3801  // Check stack alignment.
3802  if (FLAG_debug_code) {
3803    __ CheckStackAlignment();
3804  }
3805
3806  if (do_gc) {
3807    // Pass failure code returned from last attempt as first argument to
3808    // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
3809    // stack is known to be aligned. This function takes one argument which is
3810    // passed in register.
3811    __ movq(arg_reg_1, rax);
3812    __ movq(kScratchRegister,
3813            ExternalReference::perform_gc_function(masm->isolate()));
3814    __ call(kScratchRegister);
3815  }
3816
3817  ExternalReference scope_depth =
3818      ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
3819  if (always_allocate_scope) {
3820    Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
3821    __ incl(scope_depth_operand);
3822  }
3823
3824  // Call C function.
3825#ifdef _WIN64
3826  // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
3827  // Pass argv and argc as two parameters. The arguments object will
3828  // be created by stubs declared by DECLARE_RUNTIME_FUNCTION().
3829  if (result_size_ < 2) {
3830    // Pass a pointer to the Arguments object as the first argument.
3831    // Return result in single register (rax).
3832    __ movq(rcx, r14);  // argc.
3833    __ movq(rdx, r15);  // argv.
3834    __ movq(r8, ExternalReference::isolate_address(masm->isolate()));
3835  } else {
3836    ASSERT_EQ(2, result_size_);
3837    // Pass a pointer to the result location as the first argument.
3838    __ lea(rcx, StackSpaceOperand(2));
3839    // Pass a pointer to the Arguments object as the second argument.
3840    __ movq(rdx, r14);  // argc.
3841    __ movq(r8, r15);   // argv.
3842    __ movq(r9, ExternalReference::isolate_address(masm->isolate()));
3843  }
3844
3845#else  // _WIN64
3846  // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
3847  __ movq(rdi, r14);  // argc.
3848  __ movq(rsi, r15);  // argv.
3849  __ movq(rdx, ExternalReference::isolate_address(masm->isolate()));
3850#endif
3851  __ call(rbx);
3852  // Result is in rax - do not destroy this register!
3853
3854  if (always_allocate_scope) {
3855    Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
3856    __ decl(scope_depth_operand);
3857  }
3858
3859  // Check for failure result.
3860  Label failure_returned;
3861  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3862#ifdef _WIN64
3863  // If return value is on the stack, pop it to registers.
3864  if (result_size_ > 1) {
3865    ASSERT_EQ(2, result_size_);
3866    // Read result values stored on stack. Result is stored
3867    // above the four argument mirror slots and the two
3868    // Arguments object slots.
3869    __ movq(rax, Operand(rsp, 6 * kPointerSize));
3870    __ movq(rdx, Operand(rsp, 7 * kPointerSize));
3871  }
3872#endif
3873  __ lea(rcx, Operand(rax, 1));
3874  // Lower 2 bits of rcx are 0 iff rax has failure tag.
3875  __ testl(rcx, Immediate(kFailureTagMask));
3876  __ j(zero, &failure_returned);
3877
3878  // Exit the JavaScript to C++ exit frame.
3879  __ LeaveExitFrame(save_doubles_);
3880  __ ret(0);
3881
3882  // Handling of failure.
3883  __ bind(&failure_returned);
3884
3885  Label retry;
3886  // If the returned exception is RETRY_AFTER_GC continue at retry label
3887  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3888  __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
3889  __ j(zero, &retry, Label::kNear);
3890
3891  // Special handling of out of memory exceptions.
3892  JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
3893
3894  // Retrieve the pending exception.
3895  ExternalReference pending_exception_address(
3896      Isolate::kPendingExceptionAddress, masm->isolate());
3897  Operand pending_exception_operand =
3898      masm->ExternalOperand(pending_exception_address);
3899  __ movq(rax, pending_exception_operand);
3900
3901  // See if we just retrieved an OOM exception.
3902  JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
3903
3904  // Clear the pending exception.
3905  pending_exception_operand =
3906      masm->ExternalOperand(pending_exception_address);
3907  __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
3908  __ movq(pending_exception_operand, rdx);
3909
3910  // Special handling of termination exceptions which are uncatchable
3911  // by javascript code.
3912  __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
3913  __ j(equal, throw_termination_exception);
3914
3915  // Handle normal exception.
3916  __ jmp(throw_normal_exception);
3917
3918  // Retry.
3919  __ bind(&retry);
3920}
3921
3922
3923void CEntryStub::Generate(MacroAssembler* masm) {
3924  // rax: number of arguments including receiver
3925  // rbx: pointer to C function  (C callee-saved)
3926  // rbp: frame pointer of calling JS frame (restored after C call)
3927  // rsp: stack pointer  (restored after C call)
3928  // rsi: current context (restored)
3929
3930  // NOTE: Invocations of builtins may return failure objects
3931  // instead of a proper result. The builtin entry handles
3932  // this by performing a garbage collection and retrying the
3933  // builtin once.
3934
3935  ProfileEntryHookStub::MaybeCallEntryHook(masm);
3936
3937  // Enter the exit frame that transitions from JavaScript to C++.
3938#ifdef _WIN64
3939  int arg_stack_space = (result_size_ < 2 ? 2 : 4);
3940#else
3941  int arg_stack_space = 0;
3942#endif
3943  __ EnterExitFrame(arg_stack_space, save_doubles_);
3944
3945  // rax: Holds the context at this point, but should not be used.
3946  //      On entry to code generated by GenerateCore, it must hold
3947  //      a failure result if the collect_garbage argument to GenerateCore
3948  //      is true.  This failure result can be the result of code
3949  //      generated by a previous call to GenerateCore.  The value
3950  //      of rax is then passed to Runtime::PerformGC.
3951  // rbx: pointer to builtin function  (C callee-saved).
3952  // rbp: frame pointer of exit frame  (restored after C call).
3953  // rsp: stack pointer (restored after C call).
3954  // r14: number of arguments including receiver (C callee-saved).
3955  // r15: argv pointer (C callee-saved).
3956
3957  Label throw_normal_exception;
3958  Label throw_termination_exception;
3959  Label throw_out_of_memory_exception;
3960
3961  // Call into the runtime system.
3962  GenerateCore(masm,
3963               &throw_normal_exception,
3964               &throw_termination_exception,
3965               &throw_out_of_memory_exception,
3966               false,
3967               false);
3968
3969  // Do space-specific GC and retry runtime call.
3970  GenerateCore(masm,
3971               &throw_normal_exception,
3972               &throw_termination_exception,
3973               &throw_out_of_memory_exception,
3974               true,
3975               false);
3976
3977  // Do full GC and retry runtime call one final time.
3978  Failure* failure = Failure::InternalError();
3979  __ movq(rax, failure, RelocInfo::NONE64);
3980  GenerateCore(masm,
3981               &throw_normal_exception,
3982               &throw_termination_exception,
3983               &throw_out_of_memory_exception,
3984               true,
3985               true);
3986
3987  __ bind(&throw_out_of_memory_exception);
3988  // Set external caught exception to false.
3989  Isolate* isolate = masm->isolate();
3990  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
3991                                    isolate);
3992  __ Set(rax, static_cast<int64_t>(false));
3993  __ Store(external_caught, rax);
3994
3995  // Set pending exception and rax to out of memory exception.
3996  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
3997                                      isolate);
3998  Label already_have_failure;
3999  JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure);
4000  __ movq(rax, Failure::OutOfMemoryException(0x1), RelocInfo::NONE64);
4001  __ bind(&already_have_failure);
4002  __ Store(pending_exception, rax);
4003  // Fall through to the next label.
4004
4005  __ bind(&throw_termination_exception);
4006  __ ThrowUncatchable(rax);
4007
4008  __ bind(&throw_normal_exception);
4009  __ Throw(rax);
4010}
4011
4012
4013void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4014  Label invoke, handler_entry, exit;
4015  Label not_outermost_js, not_outermost_js_2;
4016
4017  ProfileEntryHookStub::MaybeCallEntryHook(masm);
4018
4019  {  // NOLINT. Scope block confuses linter.
4020    MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
4021    // Set up frame.
4022    __ push(rbp);
4023    __ movq(rbp, rsp);
4024
4025    // Push the stack frame type marker twice.
4026    int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4027    // Scratch register is neither callee-save, nor an argument register on any
4028    // platform. It's free to use at this point.
4029    // Cannot use smi-register for loading yet.
4030    __ movq(kScratchRegister,
4031            reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
4032            RelocInfo::NONE64);
4033    __ push(kScratchRegister);  // context slot
4034    __ push(kScratchRegister);  // function slot
4035    // Save callee-saved registers (X64/Win64 calling conventions).
4036    __ push(r12);
4037    __ push(r13);
4038    __ push(r14);
4039    __ push(r15);
4040#ifdef _WIN64
4041    __ push(rdi);  // Only callee save in Win64 ABI, argument in AMD64 ABI.
4042    __ push(rsi);  // Only callee save in Win64 ABI, argument in AMD64 ABI.
4043#endif
4044    __ push(rbx);
4045
4046#ifdef _WIN64
4047    // On Win64 XMM6-XMM15 are callee-save
4048    __ subq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
4049    __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
4050    __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
4051    __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
4052    __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3), xmm9);
4053    __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4), xmm10);
4054    __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5), xmm11);
4055    __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6), xmm12);
4056    __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7), xmm13);
4057    __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8), xmm14);
4058    __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9), xmm15);
4059#endif
4060
4061    // Set up the roots and smi constant registers.
4062    // Needs to be done before any further smi loads.
4063    __ InitializeSmiConstantRegister();
4064    __ InitializeRootRegister();
4065  }
4066
4067  Isolate* isolate = masm->isolate();
4068
4069  // Save copies of the top frame descriptor on the stack.
4070  ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
4071  {
4072    Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
4073    __ push(c_entry_fp_operand);
4074  }
4075
4076  // If this is the outermost JS call, set js_entry_sp value.
4077  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
4078  __ Load(rax, js_entry_sp);
4079  __ testq(rax, rax);
4080  __ j(not_zero, &not_outermost_js);
4081  __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
4082  __ movq(rax, rbp);
4083  __ Store(js_entry_sp, rax);
4084  Label cont;
4085  __ jmp(&cont);
4086  __ bind(&not_outermost_js);
4087  __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
4088  __ bind(&cont);
4089
4090  // Jump to a faked try block that does the invoke, with a faked catch
4091  // block that sets the pending exception.
4092  __ jmp(&invoke);
4093  __ bind(&handler_entry);
4094  handler_offset_ = handler_entry.pos();
4095  // Caught exception: Store result (exception) in the pending exception
4096  // field in the JSEnv and return a failure sentinel.
4097  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
4098                                      isolate);
4099  __ Store(pending_exception, rax);
4100  __ movq(rax, Failure::Exception(), RelocInfo::NONE64);
4101  __ jmp(&exit);
4102
4103  // Invoke: Link this frame into the handler chain.  There's only one
4104  // handler block in this code object, so its index is 0.
4105  __ bind(&invoke);
4106  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
4107
4108  // Clear any pending exceptions.
4109  __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
4110  __ Store(pending_exception, rax);
4111
4112  // Fake a receiver (NULL).
4113  __ push(Immediate(0));  // receiver
4114
4115  // Invoke the function by calling through JS entry trampoline builtin and
4116  // pop the faked function when we return. We load the address from an
4117  // external reference instead of inlining the call target address directly
4118  // in the code, because the builtin stubs may not have been generated yet
4119  // at the time this code is generated.
4120  if (is_construct) {
4121    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4122                                      isolate);
4123    __ Load(rax, construct_entry);
4124  } else {
4125    ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
4126    __ Load(rax, entry);
4127  }
4128  __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
4129  __ call(kScratchRegister);
4130
4131  // Unlink this frame from the handler chain.
4132  __ PopTryHandler();
4133
4134  __ bind(&exit);
4135  // Check if the current stack frame is marked as the outermost JS frame.
4136  __ pop(rbx);
4137  __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
4138  __ j(not_equal, &not_outermost_js_2);
4139  __ movq(kScratchRegister, js_entry_sp);
4140  __ movq(Operand(kScratchRegister, 0), Immediate(0));
4141  __ bind(&not_outermost_js_2);
4142
4143  // Restore the top frame descriptor from the stack.
4144  { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
4145    __ pop(c_entry_fp_operand);
4146  }
4147
4148  // Restore callee-saved registers (X64 conventions).
4149#ifdef _WIN64
4150  // On Win64 XMM6-XMM15 are callee-save
4151  __ movdqu(xmm6, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0));
4152  __ movdqu(xmm7, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1));
4153  __ movdqu(xmm8, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2));
4154  __ movdqu(xmm9, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3));
4155  __ movdqu(xmm10, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4));
4156  __ movdqu(xmm11, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5));
4157  __ movdqu(xmm12, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6));
4158  __ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
4159  __ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
4160  __ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
4161  __ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
4162#endif
4163
4164  __ pop(rbx);
4165#ifdef _WIN64
4166  // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
4167  __ pop(rsi);
4168  __ pop(rdi);
4169#endif
4170  __ pop(r15);
4171  __ pop(r14);
4172  __ pop(r13);
4173  __ pop(r12);
4174  __ addq(rsp, Immediate(2 * kPointerSize));  // remove markers
4175
4176  // Restore frame pointer and return.
4177  __ pop(rbp);
4178  __ ret(0);
4179}
4180
4181
4182void InstanceofStub::Generate(MacroAssembler* masm) {
4183  // Implements "value instanceof function" operator.
4184  // Expected input state with no inline cache:
4185  //   rsp[0]  : return address
4186  //   rsp[8]  : function pointer
4187  //   rsp[16] : value
4188  // Expected input state with an inline one-element cache:
4189  //   rsp[0]  : return address
4190  //   rsp[8]  : offset from return address to location of inline cache
4191  //   rsp[16] : function pointer
4192  //   rsp[24] : value
4193  // Returns a bitwise zero to indicate that the value
4194  // is and instance of the function and anything else to
4195  // indicate that the value is not an instance.
4196
4197  static const int kOffsetToMapCheckValue = 2;
4198  static const int kOffsetToResultValue = 18;
4199  // The last 4 bytes of the instruction sequence
4200  //   movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
4201  //   Move(kScratchRegister, Factory::the_hole_value())
4202  // in front of the hole value address.
4203  static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
4204  // The last 4 bytes of the instruction sequence
4205  //   __ j(not_equal, &cache_miss);
4206  //   __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
4207  // before the offset of the hole value in the root array.
4208  static const unsigned int kWordBeforeResultValue = 0x458B4909;
4209  // Only the inline check flag is supported on X64.
4210  ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
4211  int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0;
4212
4213  // Get the object - go slow case if it's a smi.
4214  Label slow;
4215
4216  __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space));
4217  __ JumpIfSmi(rax, &slow);
4218
4219  // Check that the left hand is a JS object. Leave its map in rax.
4220  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
4221  __ j(below, &slow);
4222  __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
4223  __ j(above, &slow);
4224
4225  // Get the prototype of the function.
4226  __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space));
4227  // rdx is function, rax is map.
4228
4229  // If there is a call site cache don't look in the global cache, but do the
4230  // real lookup and update the call site cache.
4231  if (!HasCallSiteInlineCheck()) {
4232    // Look up the function and the map in the instanceof cache.
4233    Label miss;
4234    __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
4235    __ j(not_equal, &miss, Label::kNear);
4236    __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
4237    __ j(not_equal, &miss, Label::kNear);
4238    __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
4239    __ ret(2 * kPointerSize);
4240    __ bind(&miss);
4241  }
4242
4243  __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
4244
4245  // Check that the function prototype is a JS object.
4246  __ JumpIfSmi(rbx, &slow);
4247  __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
4248  __ j(below, &slow);
4249  __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
4250  __ j(above, &slow);
4251
4252  // Register mapping:
4253  //   rax is object map.
4254  //   rdx is function.
4255  //   rbx is function prototype.
4256  if (!HasCallSiteInlineCheck()) {
4257    __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
4258    __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
4259  } else {
4260    // Get return address and delta to inlined map check.
4261    __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
4262    __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
4263    if (FLAG_debug_code) {
4264      __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
4265      __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
4266      __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck);
4267    }
4268    __ movq(kScratchRegister,
4269            Operand(kScratchRegister, kOffsetToMapCheckValue));
4270    __ movq(Operand(kScratchRegister, 0), rax);
4271  }
4272
4273  __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
4274
4275  // Loop through the prototype chain looking for the function prototype.
4276  Label loop, is_instance, is_not_instance;
4277  __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
4278  __ bind(&loop);
4279  __ cmpq(rcx, rbx);
4280  __ j(equal, &is_instance, Label::kNear);
4281  __ cmpq(rcx, kScratchRegister);
4282  // The code at is_not_instance assumes that kScratchRegister contains a
4283  // non-zero GCable value (the null object in this case).
4284  __ j(equal, &is_not_instance, Label::kNear);
4285  __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
4286  __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
4287  __ jmp(&loop);
4288
4289  __ bind(&is_instance);
4290  if (!HasCallSiteInlineCheck()) {
4291    __ xorl(rax, rax);
4292    // Store bitwise zero in the cache.  This is a Smi in GC terms.
4293    STATIC_ASSERT(kSmiTag == 0);
4294    __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
4295  } else {
4296    // Store offset of true in the root array at the inline check site.
4297    int true_offset = 0x100 +
4298        (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
4299    // Assert it is a 1-byte signed value.
4300    ASSERT(true_offset >= 0 && true_offset < 0x100);
4301    __ movl(rax, Immediate(true_offset));
4302    __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
4303    __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
4304    __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
4305    if (FLAG_debug_code) {
4306      __ movl(rax, Immediate(kWordBeforeResultValue));
4307      __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
4308      __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
4309    }
4310    __ Set(rax, 0);
4311  }
4312  __ ret(2 * kPointerSize + extra_stack_space);
4313
4314  __ bind(&is_not_instance);
4315  if (!HasCallSiteInlineCheck()) {
4316    // We have to store a non-zero value in the cache.
4317    __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
4318  } else {
4319    // Store offset of false in the root array at the inline check site.
4320    int false_offset = 0x100 +
4321        (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
4322    // Assert it is a 1-byte signed value.
4323    ASSERT(false_offset >= 0 && false_offset < 0x100);
4324    __ movl(rax, Immediate(false_offset));
4325    __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
4326    __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
4327    __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
4328    if (FLAG_debug_code) {
4329      __ movl(rax, Immediate(kWordBeforeResultValue));
4330      __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
4331      __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
4332    }
4333  }
4334  __ ret(2 * kPointerSize + extra_stack_space);
4335
4336  // Slow-case: Go through the JavaScript implementation.
4337  __ bind(&slow);
4338  if (HasCallSiteInlineCheck()) {
4339    // Remove extra value from the stack.
4340    __ PopReturnAddressTo(rcx);
4341    __ pop(rax);
4342    __ PushReturnAddressFrom(rcx);
4343  }
4344  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4345}
4346
4347
4348// Passing arguments in registers is not supported.
4349Register InstanceofStub::left() { return no_reg; }
4350
4351
4352Register InstanceofStub::right() { return no_reg; }
4353
4354
4355// -------------------------------------------------------------------------
4356// StringCharCodeAtGenerator
4357
4358void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
4359  Label flat_string;
4360  Label ascii_string;
4361  Label got_char_code;
4362  Label sliced_string;
4363
4364  // If the receiver is a smi trigger the non-string case.
4365  __ JumpIfSmi(object_, receiver_not_string_);
4366
4367  // Fetch the instance type of the receiver into result register.
4368  __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
4369  __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4370  // If the receiver is not a string trigger the non-string case.
4371  __ testb(result_, Immediate(kIsNotStringMask));
4372  __ j(not_zero, receiver_not_string_);
4373
4374  // If the index is non-smi trigger the non-smi case.
4375  __ JumpIfNotSmi(index_, &index_not_smi_);
4376  __ bind(&got_smi_index_);
4377
4378  // Check for index out of range.
4379  __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
4380  __ j(above_equal, index_out_of_range_);
4381
4382  __ SmiToInteger32(index_, index_);
4383
4384  StringCharLoadGenerator::Generate(
4385      masm, object_, index_, result_, &call_runtime_);
4386
4387  __ Integer32ToSmi(result_, result_);
4388  __ bind(&exit_);
4389}
4390
4391
4392void StringCharCodeAtGenerator::GenerateSlow(
4393    MacroAssembler* masm,
4394    const RuntimeCallHelper& call_helper) {
4395  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
4396
4397  Factory* factory = masm->isolate()->factory();
4398  // Index is not a smi.
4399  __ bind(&index_not_smi_);
4400  // If index is a heap number, try converting it to an integer.
4401  __ CheckMap(index_,
4402              factory->heap_number_map(),
4403              index_not_number_,
4404              DONT_DO_SMI_CHECK);
4405  call_helper.BeforeCall(masm);
4406  __ push(object_);
4407  __ push(index_);  // Consumed by runtime conversion function.
4408  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
4409    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
4410  } else {
4411    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
4412    // NumberToSmi discards numbers that are not exact integers.
4413    __ CallRuntime(Runtime::kNumberToSmi, 1);
4414  }
4415  if (!index_.is(rax)) {
4416    // Save the conversion result before the pop instructions below
4417    // have a chance to overwrite it.
4418    __ movq(index_, rax);
4419  }
4420  __ pop(object_);
4421  // Reload the instance type.
4422  __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
4423  __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4424  call_helper.AfterCall(masm);
4425  // If index is still not a smi, it must be out of range.
4426  __ JumpIfNotSmi(index_, index_out_of_range_);
4427  // Otherwise, return to the fast path.
4428  __ jmp(&got_smi_index_);
4429
4430  // Call runtime. We get here when the receiver is a string and the
4431  // index is a number, but the code of getting the actual character
4432  // is too complex (e.g., when the string needs to be flattened).
4433  __ bind(&call_runtime_);
4434  call_helper.BeforeCall(masm);
4435  __ push(object_);
4436  __ Integer32ToSmi(index_, index_);
4437  __ push(index_);
4438  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
4439  if (!result_.is(rax)) {
4440    __ movq(result_, rax);
4441  }
4442  call_helper.AfterCall(masm);
4443  __ jmp(&exit_);
4444
4445  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
4446}
4447
4448
4449// -------------------------------------------------------------------------
4450// StringCharFromCodeGenerator
4451
4452void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
4453  // Fast case of Heap::LookupSingleCharacterStringFromCode.
4454  __ JumpIfNotSmi(code_, &slow_case_);
4455  __ SmiCompare(code_, Smi::FromInt(String::kMaxOneByteCharCode));
4456  __ j(above, &slow_case_);
4457
4458  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
4459  SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
4460  __ movq(result_, FieldOperand(result_, index.reg, index.scale,
4461                                FixedArray::kHeaderSize));
4462  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
4463  __ j(equal, &slow_case_);
4464  __ bind(&exit_);
4465}
4466
4467
4468void StringCharFromCodeGenerator::GenerateSlow(
4469    MacroAssembler* masm,
4470    const RuntimeCallHelper& call_helper) {
4471  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
4472
4473  __ bind(&slow_case_);
4474  call_helper.BeforeCall(masm);
4475  __ push(code_);
4476  __ CallRuntime(Runtime::kCharFromCode, 1);
4477  if (!result_.is(rax)) {
4478    __ movq(result_, rax);
4479  }
4480  call_helper.AfterCall(masm);
4481  __ jmp(&exit_);
4482
4483  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
4484}
4485
4486
4487void StringAddStub::Generate(MacroAssembler* masm) {
4488  Label call_runtime, call_builtin;
4489  Builtins::JavaScript builtin_id = Builtins::ADD;
4490
4491  // Load the two arguments.
4492  __ movq(rax, Operand(rsp, 2 * kPointerSize));  // First argument (left).
4493  __ movq(rdx, Operand(rsp, 1 * kPointerSize));  // Second argument (right).
4494
4495  // Make sure that both arguments are strings if not known in advance.
4496  // Otherwise, at least one of the arguments is definitely a string,
4497  // and we convert the one that is not known to be a string.
4498  if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
4499    ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
4500    ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
4501    __ JumpIfSmi(rax, &call_runtime);
4502    __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
4503    __ j(above_equal, &call_runtime);
4504
4505    // First argument is a a string, test second.
4506    __ JumpIfSmi(rdx, &call_runtime);
4507    __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
4508    __ j(above_equal, &call_runtime);
4509  } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
4510    ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
4511    GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
4512                            &call_builtin);
4513    builtin_id = Builtins::STRING_ADD_RIGHT;
4514  } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
4515    ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
4516    GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
4517                            &call_builtin);
4518    builtin_id = Builtins::STRING_ADD_LEFT;
4519  }
4520
4521  // Both arguments are strings.
4522  // rax: first string
4523  // rdx: second string
4524  // Check if either of the strings are empty. In that case return the other.
4525  Label second_not_zero_length, both_not_zero_length;
4526  __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
4527  __ SmiTest(rcx);
4528  __ j(not_zero, &second_not_zero_length, Label::kNear);
4529  // Second string is empty, result is first string which is already in rax.
4530  Counters* counters = masm->isolate()->counters();
4531  __ IncrementCounter(counters->string_add_native(), 1);
4532  __ ret(2 * kPointerSize);
4533  __ bind(&second_not_zero_length);
4534  __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
4535  __ SmiTest(rbx);
4536  __ j(not_zero, &both_not_zero_length, Label::kNear);
4537  // First string is empty, result is second string which is in rdx.
4538  __ movq(rax, rdx);
4539  __ IncrementCounter(counters->string_add_native(), 1);
4540  __ ret(2 * kPointerSize);
4541
4542  // Both strings are non-empty.
4543  // rax: first string
4544  // rbx: length of first string
4545  // rcx: length of second string
4546  // rdx: second string
4547  // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS)
4548  // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS)
4549  Label string_add_flat_result, longer_than_two;
4550  __ bind(&both_not_zero_length);
4551
4552  // If arguments where known to be strings, maps are not loaded to r8 and r9
4553  // by the code above.
4554  if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
4555    __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
4556    __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
4557  }
4558  // Get the instance types of the two strings as they will be needed soon.
4559  __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
4560  __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
4561
4562  // Look at the length of the result of adding the two strings.
4563  STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
4564  __ SmiAdd(rbx, rbx, rcx);
4565  // Use the string table when adding two one character strings, as it
4566  // helps later optimizations to return an internalized string here.
4567  __ SmiCompare(rbx, Smi::FromInt(2));
4568  __ j(not_equal, &longer_than_two);
4569
4570  // Check that both strings are non-external ASCII strings.
4571  __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
4572                                                  &call_runtime);
4573
4574  // Get the two characters forming the sub string.
4575  __ movzxbq(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
4576  __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
4577
4578  // Try to lookup two character string in string table. If it is not found
4579  // just allocate a new one.
4580  Label make_two_character_string, make_flat_ascii_string;
4581  StringHelper::GenerateTwoCharacterStringTableProbe(
4582      masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
4583  __ IncrementCounter(counters->string_add_native(), 1);
4584  __ ret(2 * kPointerSize);
4585
4586  __ bind(&make_two_character_string);
4587  __ Set(rdi, 2);
4588  __ AllocateAsciiString(rax, rdi, r8, r9, r11, &call_runtime);
4589  // rbx - first byte: first character
4590  // rbx - second byte: *maybe* second character
4591  // Make sure that the second byte of rbx contains the second character.
4592  __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
4593  __ shll(rcx, Immediate(kBitsPerByte));
4594  __ orl(rbx, rcx);
4595  // Write both characters to the new string.
4596  __ movw(FieldOperand(rax, SeqOneByteString::kHeaderSize), rbx);
4597  __ IncrementCounter(counters->string_add_native(), 1);
4598  __ ret(2 * kPointerSize);
4599
4600  __ bind(&longer_than_two);
4601  // Check if resulting string will be flat.
4602  __ SmiCompare(rbx, Smi::FromInt(ConsString::kMinLength));
4603  __ j(below, &string_add_flat_result);
4604  // Handle exceptionally long strings in the runtime system.
4605  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
4606  __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
4607  __ j(above, &call_runtime);
4608
4609  // If result is not supposed to be flat, allocate a cons string object. If
4610  // both strings are ASCII the result is an ASCII cons string.
4611  // rax: first string
4612  // rbx: length of resulting flat string
4613  // rdx: second string
4614  // r8: instance type of first string
4615  // r9: instance type of second string
4616  Label non_ascii, allocated, ascii_data;
4617  __ movl(rcx, r8);
4618  __ and_(rcx, r9);
4619  STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
4620  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
4621  __ testl(rcx, Immediate(kStringEncodingMask));
4622  __ j(zero, &non_ascii);
4623  __ bind(&ascii_data);
4624  // Allocate an ASCII cons string.
4625  __ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
4626  __ bind(&allocated);
4627  // Fill the fields of the cons string.
4628  __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
4629  __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
4630          Immediate(String::kEmptyHashField));
4631
4632  Label skip_write_barrier, after_writing;
4633  ExternalReference high_promotion_mode = ExternalReference::
4634      new_space_high_promotion_mode_active_address(masm->isolate());
4635  __ Load(rbx, high_promotion_mode);
4636  __ testb(rbx, Immediate(1));
4637  __ j(zero, &skip_write_barrier);
4638
4639  __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
4640  __ RecordWriteField(rcx,
4641                      ConsString::kFirstOffset,
4642                      rax,
4643                      rbx,
4644                      kDontSaveFPRegs);
4645  __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
4646  __ RecordWriteField(rcx,
4647                      ConsString::kSecondOffset,
4648                      rdx,
4649                      rbx,
4650                      kDontSaveFPRegs);
4651  __ jmp(&after_writing);
4652
4653  __ bind(&skip_write_barrier);
4654  __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
4655  __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
4656
4657  __ bind(&after_writing);
4658
4659  __ movq(rax, rcx);
4660  __ IncrementCounter(counters->string_add_native(), 1);
4661  __ ret(2 * kPointerSize);
4662  __ bind(&non_ascii);
4663  // At least one of the strings is two-byte. Check whether it happens
4664  // to contain only one byte characters.
4665  // rcx: first instance type AND second instance type.
4666  // r8: first instance type.
4667  // r9: second instance type.
4668  __ testb(rcx, Immediate(kOneByteDataHintMask));
4669  __ j(not_zero, &ascii_data);
4670  __ xor_(r8, r9);
4671  STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
4672  __ andb(r8, Immediate(kOneByteStringTag | kOneByteDataHintTag));
4673  __ cmpb(r8, Immediate(kOneByteStringTag | kOneByteDataHintTag));
4674  __ j(equal, &ascii_data);
4675  // Allocate a two byte cons string.
4676  __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
4677  __ jmp(&allocated);
4678
4679  // We cannot encounter sliced strings or cons strings here since:
4680  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
4681  // Handle creating a flat result from either external or sequential strings.
4682  // Locate the first characters' locations.
4683  // rax: first string
4684  // rbx: length of resulting flat string as smi
4685  // rdx: second string
4686  // r8: instance type of first string
4687  // r9: instance type of first string
4688  Label first_prepared, second_prepared;
4689  Label first_is_sequential, second_is_sequential;
4690  __ bind(&string_add_flat_result);
4691
4692  __ SmiToInteger32(r14, FieldOperand(rax, SeqString::kLengthOffset));
4693  // r14: length of first string
4694  STATIC_ASSERT(kSeqStringTag == 0);
4695  __ testb(r8, Immediate(kStringRepresentationMask));
4696  __ j(zero, &first_is_sequential, Label::kNear);
4697  // Rule out short external string and load string resource.
4698  STATIC_ASSERT(kShortExternalStringTag != 0);
4699  __ testb(r8, Immediate(kShortExternalStringMask));
4700  __ j(not_zero, &call_runtime);
4701  __ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
4702  __ jmp(&first_prepared, Label::kNear);
4703  __ bind(&first_is_sequential);
4704  STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4705  __ lea(rcx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
4706  __ bind(&first_prepared);
4707
4708  // Check whether both strings have same encoding.
4709  __ xorl(r8, r9);
4710  __ testb(r8, Immediate(kStringEncodingMask));
4711  __ j(not_zero, &call_runtime);
4712
4713  __ SmiToInteger32(r15, FieldOperand(rdx, SeqString::kLengthOffset));
4714  // r15: length of second string
4715  STATIC_ASSERT(kSeqStringTag == 0);
4716  __ testb(r9, Immediate(kStringRepresentationMask));
4717  __ j(zero, &second_is_sequential, Label::kNear);
4718  // Rule out short external string and load string resource.
4719  STATIC_ASSERT(kShortExternalStringTag != 0);
4720  __ testb(r9, Immediate(kShortExternalStringMask));
4721  __ j(not_zero, &call_runtime);
4722  __ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
4723  __ jmp(&second_prepared, Label::kNear);
4724  __ bind(&second_is_sequential);
4725  STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4726  __ lea(rdx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
4727  __ bind(&second_prepared);
4728
4729  Label non_ascii_string_add_flat_result;
4730  // r9: instance type of second string
4731  // First string and second string have the same encoding.
4732  STATIC_ASSERT(kTwoByteStringTag == 0);
4733  __ SmiToInteger32(rbx, rbx);
4734  __ testb(r9, Immediate(kStringEncodingMask));
4735  __ j(zero, &non_ascii_string_add_flat_result);
4736
4737  __ bind(&make_flat_ascii_string);
4738  // Both strings are ASCII strings. As they are short they are both flat.
4739  __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
4740  // rax: result string
4741  // Locate first character of result.
4742  __ lea(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
4743  // rcx: first char of first string
4744  // rbx: first character of result
4745  // r14: length of first string
4746  StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, true);
4747  // rbx: next character of result
4748  // rdx: first char of second string
4749  // r15: length of second string
4750  StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, true);
4751  __ IncrementCounter(counters->string_add_native(), 1);
4752  __ ret(2 * kPointerSize);
4753
4754  __ bind(&non_ascii_string_add_flat_result);
4755  // Both strings are ASCII strings. As they are short they are both flat.
4756  __ AllocateTwoByteString(rax, rbx, rdi, r8, r9, &call_runtime);
4757  // rax: result string
4758  // Locate first character of result.
4759  __ lea(rbx, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
4760  // rcx: first char of first string
4761  // rbx: first character of result
4762  // r14: length of first string
4763  StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, false);
4764  // rbx: next character of result
4765  // rdx: first char of second string
4766  // r15: length of second string
4767  StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, false);
4768  __ IncrementCounter(counters->string_add_native(), 1);
4769  __ ret(2 * kPointerSize);
4770
4771  // Just jump to runtime to add the two strings.
4772  __ bind(&call_runtime);
4773
4774  if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
4775    GenerateRegisterArgsPop(masm, rcx);
4776    // Build a frame
4777    {
4778      FrameScope scope(masm, StackFrame::INTERNAL);
4779      GenerateRegisterArgsPush(masm);
4780      __ CallRuntime(Runtime::kStringAdd, 2);
4781    }
4782    __ Ret();
4783  } else {
4784    __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
4785  }
4786
4787  if (call_builtin.is_linked()) {
4788    __ bind(&call_builtin);
4789    if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
4790      GenerateRegisterArgsPop(masm, rcx);
4791      // Build a frame
4792      {
4793        FrameScope scope(masm, StackFrame::INTERNAL);
4794        GenerateRegisterArgsPush(masm);
4795        __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
4796      }
4797      __ Ret();
4798    } else {
4799      __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
4800    }
4801  }
4802}
4803
4804
4805void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
4806  __ push(rax);
4807  __ push(rdx);
4808}
4809
4810
4811void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm,
4812                                            Register temp) {
4813  __ PopReturnAddressTo(temp);
4814  __ pop(rdx);
4815  __ pop(rax);
4816  __ PushReturnAddressFrom(temp);
4817}
4818
4819
4820void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
4821                                            int stack_offset,
4822                                            Register arg,
4823                                            Register scratch1,
4824                                            Register scratch2,
4825                                            Register scratch3,
4826                                            Label* slow) {
4827  // First check if the argument is already a string.
4828  Label not_string, done;
4829  __ JumpIfSmi(arg, &not_string);
4830  __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
4831  __ j(below, &done);
4832
4833  // Check the number to string cache.
4834  Label not_cached;
4835  __ bind(&not_string);
4836  // Puts the cached result into scratch1.
4837  NumberToStringStub::GenerateLookupNumberStringCache(masm,
4838                                                      arg,
4839                                                      scratch1,
4840                                                      scratch2,
4841                                                      scratch3,
4842                                                      &not_cached);
4843  __ movq(arg, scratch1);
4844  __ movq(Operand(rsp, stack_offset), arg);
4845  __ jmp(&done);
4846
4847  // Check if the argument is a safe string wrapper.
4848  __ bind(&not_cached);
4849  __ JumpIfSmi(arg, slow);
4850  __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1);  // map -> scratch1.
4851  __ j(not_equal, slow);
4852  __ testb(FieldOperand(scratch1, Map::kBitField2Offset),
4853           Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
4854  __ j(zero, slow);
4855  __ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
4856  __ movq(Operand(rsp, stack_offset), arg);
4857
4858  __ bind(&done);
4859}
4860
4861
4862void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
4863                                          Register dest,
4864                                          Register src,
4865                                          Register count,
4866                                          bool ascii) {
4867  Label loop;
4868  __ bind(&loop);
4869  // This loop just copies one character at a time, as it is only used for very
4870  // short strings.
4871  if (ascii) {
4872    __ movb(kScratchRegister, Operand(src, 0));
4873    __ movb(Operand(dest, 0), kScratchRegister);
4874    __ incq(src);
4875    __ incq(dest);
4876  } else {
4877    __ movzxwl(kScratchRegister, Operand(src, 0));
4878    __ movw(Operand(dest, 0), kScratchRegister);
4879    __ addq(src, Immediate(2));
4880    __ addq(dest, Immediate(2));
4881  }
4882  __ decl(count);
4883  __ j(not_zero, &loop);
4884}
4885
4886
4887void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
4888                                             Register dest,
4889                                             Register src,
4890                                             Register count,
4891                                             bool ascii) {
4892  // Copy characters using rep movs of doublewords. Align destination on 4 byte
4893  // boundary before starting rep movs. Copy remaining characters after running
4894  // rep movs.
4895  // Count is positive int32, dest and src are character pointers.
4896  ASSERT(dest.is(rdi));  // rep movs destination
4897  ASSERT(src.is(rsi));  // rep movs source
4898  ASSERT(count.is(rcx));  // rep movs count
4899
4900  // Nothing to do for zero characters.
4901  Label done;
4902  __ testl(count, count);
4903  __ j(zero, &done, Label::kNear);
4904
4905  // Make count the number of bytes to copy.
4906  if (!ascii) {
4907    STATIC_ASSERT(2 == sizeof(uc16));
4908    __ addl(count, count);
4909  }
4910
4911  // Don't enter the rep movs if there are less than 4 bytes to copy.
4912  Label last_bytes;
4913  __ testl(count, Immediate(~(kPointerSize - 1)));
4914  __ j(zero, &last_bytes, Label::kNear);
4915
4916  // Copy from edi to esi using rep movs instruction.
4917  __ movl(kScratchRegister, count);
4918  __ shr(count, Immediate(kPointerSizeLog2));  // Number of doublewords to copy.
4919  __ repmovsq();
4920
4921  // Find number of bytes left.
4922  __ movl(count, kScratchRegister);
4923  __ and_(count, Immediate(kPointerSize - 1));
4924
4925  // Check if there are more bytes to copy.
4926  __ bind(&last_bytes);
4927  __ testl(count, count);
4928  __ j(zero, &done, Label::kNear);
4929
4930  // Copy remaining characters.
4931  Label loop;
4932  __ bind(&loop);
4933  __ movb(kScratchRegister, Operand(src, 0));
4934  __ movb(Operand(dest, 0), kScratchRegister);
4935  __ incq(src);
4936  __ incq(dest);
4937  __ decl(count);
4938  __ j(not_zero, &loop);
4939
4940  __ bind(&done);
4941}
4942
4943void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
4944                                                        Register c1,
4945                                                        Register c2,
4946                                                        Register scratch1,
4947                                                        Register scratch2,
4948                                                        Register scratch3,
4949                                                        Register scratch4,
4950                                                        Label* not_found) {
4951  // Register scratch3 is the general scratch register in this function.
4952  Register scratch = scratch3;
4953
4954  // Make sure that both characters are not digits as such strings has a
4955  // different hash algorithm. Don't try to look for these in the string table.
4956  Label not_array_index;
4957  __ leal(scratch, Operand(c1, -'0'));
4958  __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
4959  __ j(above, &not_array_index, Label::kNear);
4960  __ leal(scratch, Operand(c2, -'0'));
4961  __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
4962  __ j(below_equal, not_found);
4963
4964  __ bind(&not_array_index);
4965  // Calculate the two character string hash.
4966  Register hash = scratch1;
4967  GenerateHashInit(masm, hash, c1, scratch);
4968  GenerateHashAddCharacter(masm, hash, c2, scratch);
4969  GenerateHashGetHash(masm, hash, scratch);
4970
4971  // Collect the two characters in a register.
4972  Register chars = c1;
4973  __ shl(c2, Immediate(kBitsPerByte));
4974  __ orl(chars, c2);
4975
4976  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
4977  // hash:  hash of two character string.
4978
4979  // Load the string table.
4980  Register string_table = c2;
4981  __ LoadRoot(string_table, Heap::kStringTableRootIndex);
4982
4983  // Calculate capacity mask from the string table capacity.
4984  Register mask = scratch2;
4985  __ SmiToInteger32(mask,
4986                    FieldOperand(string_table, StringTable::kCapacityOffset));
4987  __ decl(mask);
4988
4989  Register map = scratch4;
4990
4991  // Registers
4992  // chars:        two character string, char 1 in byte 0 and char 2 in byte 1.
4993  // hash:         hash of two character string (32-bit int)
4994  // string_table: string table
4995  // mask:         capacity mask (32-bit int)
4996  // map:          -
4997  // scratch:      -
4998
4999  // Perform a number of probes in the string table.
5000  static const int kProbes = 4;
5001  Label found_in_string_table;
5002  Label next_probe[kProbes];
5003  Register candidate = scratch;  // Scratch register contains candidate.
5004  for (int i = 0; i < kProbes; i++) {
5005    // Calculate entry in string table.
5006    __ movl(scratch, hash);
5007    if (i > 0) {
5008      __ addl(scratch, Immediate(StringTable::GetProbeOffset(i)));
5009    }
5010    __ andl(scratch, mask);
5011
5012    // Load the entry from the string table.
5013    STATIC_ASSERT(StringTable::kEntrySize == 1);
5014    __ movq(candidate,
5015            FieldOperand(string_table,
5016                         scratch,
5017                         times_pointer_size,
5018                         StringTable::kElementsStartOffset));
5019
5020    // If entry is undefined no string with this hash can be found.
5021    Label is_string;
5022    __ CmpObjectType(candidate, ODDBALL_TYPE, map);
5023    __ j(not_equal, &is_string, Label::kNear);
5024
5025    __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
5026    __ j(equal, not_found);
5027    // Must be the hole (deleted entry).
5028    if (FLAG_debug_code) {
5029      __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
5030      __ cmpq(kScratchRegister, candidate);
5031      __ Assert(equal, kOddballInStringTableIsNotUndefinedOrTheHole);
5032    }
5033    __ jmp(&next_probe[i]);
5034
5035    __ bind(&is_string);
5036
5037    // If length is not 2 the string is not a candidate.
5038    __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
5039                  Smi::FromInt(2));
5040    __ j(not_equal, &next_probe[i]);
5041
5042    // We use kScratchRegister as a temporary register in assumption that
5043    // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
5044    Register temp = kScratchRegister;
5045
5046    // Check that the candidate is a non-external ASCII string.
5047    __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
5048    __ JumpIfInstanceTypeIsNotSequentialAscii(
5049        temp, temp, &next_probe[i]);
5050
5051    // Check if the two characters match.
5052    __ movl(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
5053    __ andl(temp, Immediate(0x0000ffff));
5054    __ cmpl(chars, temp);
5055    __ j(equal, &found_in_string_table);
5056    __ bind(&next_probe[i]);
5057  }
5058
5059  // No matching 2 character string found by probing.
5060  __ jmp(not_found);
5061
5062  // Scratch register contains result when we fall through to here.
5063  Register result = candidate;
5064  __ bind(&found_in_string_table);
5065  if (!result.is(rax)) {
5066    __ movq(rax, result);
5067  }
5068}
5069
5070
5071void StringHelper::GenerateHashInit(MacroAssembler* masm,
5072                                    Register hash,
5073                                    Register character,
5074                                    Register scratch) {
5075  // hash = (seed + character) + ((seed + character) << 10);
5076  __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
5077  __ SmiToInteger32(scratch, scratch);
5078  __ addl(scratch, character);
5079  __ movl(hash, scratch);
5080  __ shll(scratch, Immediate(10));
5081  __ addl(hash, scratch);
5082  // hash ^= hash >> 6;
5083  __ movl(scratch, hash);
5084  __ shrl(scratch, Immediate(6));
5085  __ xorl(hash, scratch);
5086}
5087
5088
5089void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5090                                            Register hash,
5091                                            Register character,
5092                                            Register scratch) {
5093  // hash += character;
5094  __ addl(hash, character);
5095  // hash += hash << 10;
5096  __ movl(scratch, hash);
5097  __ shll(scratch, Immediate(10));
5098  __ addl(hash, scratch);
5099  // hash ^= hash >> 6;
5100  __ movl(scratch, hash);
5101  __ shrl(scratch, Immediate(6));
5102  __ xorl(hash, scratch);
5103}
5104
5105
5106void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5107                                       Register hash,
5108                                       Register scratch) {
5109  // hash += hash << 3;
5110  __ leal(hash, Operand(hash, hash, times_8, 0));
5111  // hash ^= hash >> 11;
5112  __ movl(scratch, hash);
5113  __ shrl(scratch, Immediate(11));
5114  __ xorl(hash, scratch);
5115  // hash += hash << 15;
5116  __ movl(scratch, hash);
5117  __ shll(scratch, Immediate(15));
5118  __ addl(hash, scratch);
5119
5120  __ andl(hash, Immediate(String::kHashBitMask));
5121
5122  // if (hash == 0) hash = 27;
5123  Label hash_not_zero;
5124  __ j(not_zero, &hash_not_zero);
5125  __ Set(hash, StringHasher::kZeroHash);
5126  __ bind(&hash_not_zero);
5127}
5128
5129
5130void SubStringStub::Generate(MacroAssembler* masm) {
5131  Label runtime;
5132
5133  // Stack frame on entry.
5134  //  rsp[0]  : return address
5135  //  rsp[8]  : to
5136  //  rsp[16] : from
5137  //  rsp[24] : string
5138
5139  const int kToOffset = 1 * kPointerSize;
5140  const int kFromOffset = kToOffset + kPointerSize;
5141  const int kStringOffset = kFromOffset + kPointerSize;
5142  const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
5143
5144  // Make sure first argument is a string.
5145  __ movq(rax, Operand(rsp, kStringOffset));
5146  STATIC_ASSERT(kSmiTag == 0);
5147  __ testl(rax, Immediate(kSmiTagMask));
5148  __ j(zero, &runtime);
5149  Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
5150  __ j(NegateCondition(is_string), &runtime);
5151
5152  // rax: string
5153  // rbx: instance type
5154  // Calculate length of sub string using the smi values.
5155  __ movq(rcx, Operand(rsp, kToOffset));
5156  __ movq(rdx, Operand(rsp, kFromOffset));
5157  __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
5158
5159  __ SmiSub(rcx, rcx, rdx);  // Overflow doesn't happen.
5160  __ cmpq(rcx, FieldOperand(rax, String::kLengthOffset));
5161  Label not_original_string;
5162  // Shorter than original string's length: an actual substring.
5163  __ j(below, &not_original_string, Label::kNear);
5164  // Longer than original string's length or negative: unsafe arguments.
5165  __ j(above, &runtime);
5166  // Return original string.
5167  Counters* counters = masm->isolate()->counters();
5168  __ IncrementCounter(counters->sub_string_native(), 1);
5169  __ ret(kArgumentsSize);
5170  __ bind(&not_original_string);
5171
5172  Label single_char;
5173  __ SmiCompare(rcx, Smi::FromInt(1));
5174  __ j(equal, &single_char);
5175
5176  __ SmiToInteger32(rcx, rcx);
5177
5178  // rax: string
5179  // rbx: instance type
5180  // rcx: sub string length
5181  // rdx: from index (smi)
5182  // Deal with different string types: update the index if necessary
5183  // and put the underlying string into edi.
5184  Label underlying_unpacked, sliced_string, seq_or_external_string;
5185  // If the string is not indirect, it can only be sequential or external.
5186  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
5187  STATIC_ASSERT(kIsIndirectStringMask != 0);
5188  __ testb(rbx, Immediate(kIsIndirectStringMask));
5189  __ j(zero, &seq_or_external_string, Label::kNear);
5190
5191  __ testb(rbx, Immediate(kSlicedNotConsMask));
5192  __ j(not_zero, &sliced_string, Label::kNear);
5193  // Cons string.  Check whether it is flat, then fetch first part.
5194  // Flat cons strings have an empty second part.
5195  __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
5196                 Heap::kempty_stringRootIndex);
5197  __ j(not_equal, &runtime);
5198  __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
5199  // Update instance type.
5200  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
5201  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
5202  __ jmp(&underlying_unpacked, Label::kNear);
5203
5204  __ bind(&sliced_string);
5205  // Sliced string.  Fetch parent and correct start index by offset.
5206  __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
5207  __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
5208  // Update instance type.
5209  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
5210  __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
5211  __ jmp(&underlying_unpacked, Label::kNear);
5212
5213  __ bind(&seq_or_external_string);
5214  // Sequential or external string.  Just move string to the correct register.
5215  __ movq(rdi, rax);
5216
5217  __ bind(&underlying_unpacked);
5218
5219  if (FLAG_string_slices) {
5220    Label copy_routine;
5221    // rdi: underlying subject string
5222    // rbx: instance type of underlying subject string
5223    // rdx: adjusted start index (smi)
5224    // rcx: length
5225    // If coming from the make_two_character_string path, the string
5226    // is too short to be sliced anyways.
5227    __ cmpq(rcx, Immediate(SlicedString::kMinLength));
5228    // Short slice.  Copy instead of slicing.
5229    __ j(less, &copy_routine);
5230    // Allocate new sliced string.  At this point we do not reload the instance
5231    // type including the string encoding because we simply rely on the info
5232    // provided by the original string.  It does not matter if the original
5233    // string's encoding is wrong because we always have to recheck encoding of
5234    // the newly created string's parent anyways due to externalized strings.
5235    Label two_byte_slice, set_slice_header;
5236    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
5237    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5238    __ testb(rbx, Immediate(kStringEncodingMask));
5239    __ j(zero, &two_byte_slice, Label::kNear);
5240    __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
5241    __ jmp(&set_slice_header, Label::kNear);
5242    __ bind(&two_byte_slice);
5243    __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
5244    __ bind(&set_slice_header);
5245    __ Integer32ToSmi(rcx, rcx);
5246    __ movq(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
5247    __ movq(FieldOperand(rax, SlicedString::kHashFieldOffset),
5248           Immediate(String::kEmptyHashField));
5249    __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
5250    __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
5251    __ IncrementCounter(counters->sub_string_native(), 1);
5252    __ ret(kArgumentsSize);
5253
5254    __ bind(&copy_routine);
5255  }
5256
5257  // rdi: underlying subject string
5258  // rbx: instance type of underlying subject string
5259  // rdx: adjusted start index (smi)
5260  // rcx: length
5261  // The subject string can only be external or sequential string of either
5262  // encoding at this point.
5263  Label two_byte_sequential, sequential_string;
5264  STATIC_ASSERT(kExternalStringTag != 0);
5265  STATIC_ASSERT(kSeqStringTag == 0);
5266  __ testb(rbx, Immediate(kExternalStringTag));
5267  __ j(zero, &sequential_string);
5268
5269  // Handle external string.
5270  // Rule out short external strings.
5271  STATIC_CHECK(kShortExternalStringTag != 0);
5272  __ testb(rbx, Immediate(kShortExternalStringMask));
5273  __ j(not_zero, &runtime);
5274  __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
5275  // Move the pointer so that offset-wise, it looks like a sequential string.
5276  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
5277  __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5278
5279  __ bind(&sequential_string);
5280  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
5281  __ testb(rbx, Immediate(kStringEncodingMask));
5282  __ j(zero, &two_byte_sequential);
5283
5284  // Allocate the result.
5285  __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
5286
5287  // rax: result string
5288  // rcx: result string length
5289  __ movq(r14, rsi);  // esi used by following code.
5290  {  // Locate character of sub string start.
5291    SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
5292    __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
5293                        SeqOneByteString::kHeaderSize - kHeapObjectTag));
5294  }
5295  // Locate first character of result.
5296  __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
5297
5298  // rax: result string
5299  // rcx: result length
5300  // rdi: first character of result
5301  // rsi: character of sub string start
5302  // r14: original value of rsi
5303  StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
5304  __ movq(rsi, r14);  // Restore rsi.
5305  __ IncrementCounter(counters->sub_string_native(), 1);
5306  __ ret(kArgumentsSize);
5307
5308  __ bind(&two_byte_sequential);
5309  // Allocate the result.
5310  __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
5311
5312  // rax: result string
5313  // rcx: result string length
5314  __ movq(r14, rsi);  // esi used by following code.
5315  {  // Locate character of sub string start.
5316    SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
5317    __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
5318                        SeqOneByteString::kHeaderSize - kHeapObjectTag));
5319  }
5320  // Locate first character of result.
5321  __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
5322
5323  // rax: result string
5324  // rcx: result length
5325  // rdi: first character of result
5326  // rsi: character of sub string start
5327  // r14: original value of rsi
5328  StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
5329  __ movq(rsi, r14);  // Restore esi.
5330  __ IncrementCounter(counters->sub_string_native(), 1);
5331  __ ret(kArgumentsSize);
5332
5333  // Just jump to runtime to create the sub string.
5334  __ bind(&runtime);
5335  __ TailCallRuntime(Runtime::kSubString, 3, 1);
5336
5337  __ bind(&single_char);
5338  // rax: string
5339  // rbx: instance type
5340  // rcx: sub string length (smi)
5341  // rdx: from index (smi)
5342  StringCharAtGenerator generator(
5343      rax, rdx, rcx, rax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
5344  generator.GenerateFast(masm);
5345  __ ret(kArgumentsSize);
5346  generator.SkipSlow(masm, &runtime);
5347}
5348
5349
5350void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
5351                                                      Register left,
5352                                                      Register right,
5353                                                      Register scratch1,
5354                                                      Register scratch2) {
5355  Register length = scratch1;
5356
5357  // Compare lengths.
5358  Label check_zero_length;
5359  __ movq(length, FieldOperand(left, String::kLengthOffset));
5360  __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
5361  __ j(equal, &check_zero_length, Label::kNear);
5362  __ Move(rax, Smi::FromInt(NOT_EQUAL));
5363  __ ret(0);
5364
5365  // Check if the length is zero.
5366  Label compare_chars;
5367  __ bind(&check_zero_length);
5368  STATIC_ASSERT(kSmiTag == 0);
5369  __ SmiTest(length);
5370  __ j(not_zero, &compare_chars, Label::kNear);
5371  __ Move(rax, Smi::FromInt(EQUAL));
5372  __ ret(0);
5373
5374  // Compare characters.
5375  __ bind(&compare_chars);
5376  Label strings_not_equal;
5377  GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
5378                                &strings_not_equal, Label::kNear);
5379
5380  // Characters are equal.
5381  __ Move(rax, Smi::FromInt(EQUAL));
5382  __ ret(0);
5383
5384  // Characters are not equal.
5385  __ bind(&strings_not_equal);
5386  __ Move(rax, Smi::FromInt(NOT_EQUAL));
5387  __ ret(0);
5388}
5389
5390
5391void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
5392                                                        Register left,
5393                                                        Register right,
5394                                                        Register scratch1,
5395                                                        Register scratch2,
5396                                                        Register scratch3,
5397                                                        Register scratch4) {
5398  // Ensure that you can always subtract a string length from a non-negative
5399  // number (e.g. another length).
5400  STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
5401
5402  // Find minimum length and length difference.
5403  __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
5404  __ movq(scratch4, scratch1);
5405  __ SmiSub(scratch4,
5406            scratch4,
5407            FieldOperand(right, String::kLengthOffset));
5408  // Register scratch4 now holds left.length - right.length.
5409  const Register length_difference = scratch4;
5410  Label left_shorter;
5411  __ j(less, &left_shorter, Label::kNear);
5412  // The right string isn't longer that the left one.
5413  // Get the right string's length by subtracting the (non-negative) difference
5414  // from the left string's length.
5415  __ SmiSub(scratch1, scratch1, length_difference);
5416  __ bind(&left_shorter);
5417  // Register scratch1 now holds Min(left.length, right.length).
5418  const Register min_length = scratch1;
5419
5420  Label compare_lengths;
5421  // If min-length is zero, go directly to comparing lengths.
5422  __ SmiTest(min_length);
5423  __ j(zero, &compare_lengths, Label::kNear);
5424
5425  // Compare loop.
5426  Label result_not_equal;
5427  GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
5428                                &result_not_equal, Label::kNear);
5429
5430  // Completed loop without finding different characters.
5431  // Compare lengths (precomputed).
5432  __ bind(&compare_lengths);
5433  __ SmiTest(length_difference);
5434  Label length_not_equal;
5435  __ j(not_zero, &length_not_equal, Label::kNear);
5436
5437  // Result is EQUAL.
5438  __ Move(rax, Smi::FromInt(EQUAL));
5439  __ ret(0);
5440
5441  Label result_greater;
5442  Label result_less;
5443  __ bind(&length_not_equal);
5444  __ j(greater, &result_greater, Label::kNear);
5445  __ jmp(&result_less, Label::kNear);
5446  __ bind(&result_not_equal);
5447  // Unequal comparison of left to right, either character or length.
5448  __ j(above, &result_greater, Label::kNear);
5449  __ bind(&result_less);
5450
5451  // Result is LESS.
5452  __ Move(rax, Smi::FromInt(LESS));
5453  __ ret(0);
5454
5455  // Result is GREATER.
5456  __ bind(&result_greater);
5457  __ Move(rax, Smi::FromInt(GREATER));
5458  __ ret(0);
5459}
5460
5461
5462void StringCompareStub::GenerateAsciiCharsCompareLoop(
5463    MacroAssembler* masm,
5464    Register left,
5465    Register right,
5466    Register length,
5467    Register scratch,
5468    Label* chars_not_equal,
5469    Label::Distance near_jump) {
5470  // Change index to run from -length to -1 by adding length to string
5471  // start. This means that loop ends when index reaches zero, which
5472  // doesn't need an additional compare.
5473  __ SmiToInteger32(length, length);
5474  __ lea(left,
5475         FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
5476  __ lea(right,
5477         FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
5478  __ neg(length);
5479  Register index = length;  // index = -length;
5480
5481  // Compare loop.
5482  Label loop;
5483  __ bind(&loop);
5484  __ movb(scratch, Operand(left, index, times_1, 0));
5485  __ cmpb(scratch, Operand(right, index, times_1, 0));
5486  __ j(not_equal, chars_not_equal, near_jump);
5487  __ incq(index);
5488  __ j(not_zero, &loop);
5489}
5490
5491
5492void StringCompareStub::Generate(MacroAssembler* masm) {
5493  Label runtime;
5494
5495  // Stack frame on entry.
5496  //  rsp[0]  : return address
5497  //  rsp[8]  : right string
5498  //  rsp[16] : left string
5499
5500  __ movq(rdx, Operand(rsp, 2 * kPointerSize));  // left
5501  __ movq(rax, Operand(rsp, 1 * kPointerSize));  // right
5502
5503  // Check for identity.
5504  Label not_same;
5505  __ cmpq(rdx, rax);
5506  __ j(not_equal, &not_same, Label::kNear);
5507  __ Move(rax, Smi::FromInt(EQUAL));
5508  Counters* counters = masm->isolate()->counters();
5509  __ IncrementCounter(counters->string_compare_native(), 1);
5510  __ ret(2 * kPointerSize);
5511
5512  __ bind(&not_same);
5513
5514  // Check that both are sequential ASCII strings.
5515  __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
5516
5517  // Inline comparison of ASCII strings.
5518  __ IncrementCounter(counters->string_compare_native(), 1);
5519  // Drop arguments from the stack
5520  __ PopReturnAddressTo(rcx);
5521  __ addq(rsp, Immediate(2 * kPointerSize));
5522  __ PushReturnAddressFrom(rcx);
5523  GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
5524
5525  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
5526  // tagged as a small integer.
5527  __ bind(&runtime);
5528  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
5529}
5530
5531
5532void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
5533  ASSERT(state_ == CompareIC::SMI);
5534  Label miss;
5535  __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
5536
5537  if (GetCondition() == equal) {
5538    // For equality we do not care about the sign of the result.
5539    __ subq(rax, rdx);
5540  } else {
5541    Label done;
5542    __ subq(rdx, rax);
5543    __ j(no_overflow, &done, Label::kNear);
5544    // Correct sign of result in case of overflow.
5545    __ not_(rdx);
5546    __ bind(&done);
5547    __ movq(rax, rdx);
5548  }
5549  __ ret(0);
5550
5551  __ bind(&miss);
5552  GenerateMiss(masm);
5553}
5554
5555
5556void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
5557  ASSERT(state_ == CompareIC::NUMBER);
5558
5559  Label generic_stub;
5560  Label unordered, maybe_undefined1, maybe_undefined2;
5561  Label miss;
5562
5563  if (left_ == CompareIC::SMI) {
5564    __ JumpIfNotSmi(rdx, &miss);
5565  }
5566  if (right_ == CompareIC::SMI) {
5567    __ JumpIfNotSmi(rax, &miss);
5568  }
5569
5570  // Load left and right operand.
5571  Label done, left, left_smi, right_smi;
5572  __ JumpIfSmi(rax, &right_smi, Label::kNear);
5573  __ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL);
5574  __ j(not_equal, &maybe_undefined1, Label::kNear);
5575  __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
5576  __ jmp(&left, Label::kNear);
5577  __ bind(&right_smi);
5578  __ SmiToInteger32(rcx, rax);  // Can't clobber rax yet.
5579  __ cvtlsi2sd(xmm1, rcx);
5580
5581  __ bind(&left);
5582  __ JumpIfSmi(rdx, &left_smi, Label::kNear);
5583  __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL);
5584  __ j(not_equal, &maybe_undefined2, Label::kNear);
5585  __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
5586  __ jmp(&done);
5587  __ bind(&left_smi);
5588  __ SmiToInteger32(rcx, rdx);  // Can't clobber rdx yet.
5589  __ cvtlsi2sd(xmm0, rcx);
5590
5591  __ bind(&done);
5592  // Compare operands
5593  __ ucomisd(xmm0, xmm1);
5594
5595  // Don't base result on EFLAGS when a NaN is involved.
5596  __ j(parity_even, &unordered, Label::kNear);
5597
5598  // Return a result of -1, 0, or 1, based on EFLAGS.
5599  // Performing mov, because xor would destroy the flag register.
5600  __ movl(rax, Immediate(0));
5601  __ movl(rcx, Immediate(0));
5602  __ setcc(above, rax);  // Add one to zero if carry clear and not equal.
5603  __ sbbq(rax, rcx);  // Subtract one if below (aka. carry set).
5604  __ ret(0);
5605
5606  __ bind(&unordered);
5607  __ bind(&generic_stub);
5608  ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
5609                     CompareIC::GENERIC);
5610  __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
5611
5612  __ bind(&maybe_undefined1);
5613  if (Token::IsOrderedRelationalCompareOp(op_)) {
5614    __ Cmp(rax, masm->isolate()->factory()->undefined_value());
5615    __ j(not_equal, &miss);
5616    __ JumpIfSmi(rdx, &unordered);
5617    __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
5618    __ j(not_equal, &maybe_undefined2, Label::kNear);
5619    __ jmp(&unordered);
5620  }
5621
5622  __ bind(&maybe_undefined2);
5623  if (Token::IsOrderedRelationalCompareOp(op_)) {
5624    __ Cmp(rdx, masm->isolate()->factory()->undefined_value());
5625    __ j(equal, &unordered);
5626  }
5627
5628  __ bind(&miss);
5629  GenerateMiss(masm);
5630}
5631
5632
5633void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
5634  ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
5635  ASSERT(GetCondition() == equal);
5636
5637  // Registers containing left and right operands respectively.
5638  Register left = rdx;
5639  Register right = rax;
5640  Register tmp1 = rcx;
5641  Register tmp2 = rbx;
5642
5643  // Check that both operands are heap objects.
5644  Label miss;
5645  Condition cond = masm->CheckEitherSmi(left, right, tmp1);
5646  __ j(cond, &miss, Label::kNear);
5647
5648  // Check that both operands are internalized strings.
5649  __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
5650  __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
5651  __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
5652  __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
5653  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
5654  __ or_(tmp1, tmp2);
5655  __ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
5656  __ j(not_zero, &miss, Label::kNear);
5657
5658  // Internalized strings are compared by identity.
5659  Label done;
5660  __ cmpq(left, right);
5661  // Make sure rax is non-zero. At this point input operands are
5662  // guaranteed to be non-zero.
5663  ASSERT(right.is(rax));
5664  __ j(not_equal, &done, Label::kNear);
5665  STATIC_ASSERT(EQUAL == 0);
5666  STATIC_ASSERT(kSmiTag == 0);
5667  __ Move(rax, Smi::FromInt(EQUAL));
5668  __ bind(&done);
5669  __ ret(0);
5670
5671  __ bind(&miss);
5672  GenerateMiss(masm);
5673}
5674
5675
5676void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
5677  ASSERT(state_ == CompareIC::UNIQUE_NAME);
5678  ASSERT(GetCondition() == equal);
5679
5680  // Registers containing left and right operands respectively.
5681  Register left = rdx;
5682  Register right = rax;
5683  Register tmp1 = rcx;
5684  Register tmp2 = rbx;
5685
5686  // Check that both operands are heap objects.
5687  Label miss;
5688  Condition cond = masm->CheckEitherSmi(left, right, tmp1);
5689  __ j(cond, &miss, Label::kNear);
5690
5691  // Check that both operands are unique names. This leaves the instance
5692  // types loaded in tmp1 and tmp2.
5693  __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
5694  __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
5695  __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
5696  __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
5697
5698  __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
5699  __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
5700
5701  // Unique names are compared by identity.
5702  Label done;
5703  __ cmpq(left, right);
5704  // Make sure rax is non-zero. At this point input operands are
5705  // guaranteed to be non-zero.
5706  ASSERT(right.is(rax));
5707  __ j(not_equal, &done, Label::kNear);
5708  STATIC_ASSERT(EQUAL == 0);
5709  STATIC_ASSERT(kSmiTag == 0);
5710  __ Move(rax, Smi::FromInt(EQUAL));
5711  __ bind(&done);
5712  __ ret(0);
5713
5714  __ bind(&miss);
5715  GenerateMiss(masm);
5716}
5717
5718
5719void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
5720  ASSERT(state_ == CompareIC::STRING);
5721  Label miss;
5722
5723  bool equality = Token::IsEqualityOp(op_);
5724
5725  // Registers containing left and right operands respectively.
5726  Register left = rdx;
5727  Register right = rax;
5728  Register tmp1 = rcx;
5729  Register tmp2 = rbx;
5730  Register tmp3 = rdi;
5731
5732  // Check that both operands are heap objects.
5733  Condition cond = masm->CheckEitherSmi(left, right, tmp1);
5734  __ j(cond, &miss);
5735
5736  // Check that both operands are strings. This leaves the instance
5737  // types loaded in tmp1 and tmp2.
5738  __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
5739  __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
5740  __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
5741  __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
5742  __ movq(tmp3, tmp1);
5743  STATIC_ASSERT(kNotStringTag != 0);
5744  __ or_(tmp3, tmp2);
5745  __ testb(tmp3, Immediate(kIsNotStringMask));
5746  __ j(not_zero, &miss);
5747
5748  // Fast check for identical strings.
5749  Label not_same;
5750  __ cmpq(left, right);
5751  __ j(not_equal, &not_same, Label::kNear);
5752  STATIC_ASSERT(EQUAL == 0);
5753  STATIC_ASSERT(kSmiTag == 0);
5754  __ Move(rax, Smi::FromInt(EQUAL));
5755  __ ret(0);
5756
5757  // Handle not identical strings.
5758  __ bind(&not_same);
5759
5760  // Check that both strings are internalized strings. If they are, we're done
5761  // because we already know they are not identical. We also know they are both
5762  // strings.
5763  if (equality) {
5764    Label do_compare;
5765    STATIC_ASSERT(kInternalizedTag == 0);
5766    __ or_(tmp1, tmp2);
5767    __ testb(tmp1, Immediate(kIsNotInternalizedMask));
5768    __ j(not_zero, &do_compare, Label::kNear);
5769    // Make sure rax is non-zero. At this point input operands are
5770    // guaranteed to be non-zero.
5771    ASSERT(right.is(rax));
5772    __ ret(0);
5773    __ bind(&do_compare);
5774  }
5775
5776  // Check that both strings are sequential ASCII.
5777  Label runtime;
5778  __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
5779
5780  // Compare flat ASCII strings. Returns when done.
5781  if (equality) {
5782    StringCompareStub::GenerateFlatAsciiStringEquals(
5783        masm, left, right, tmp1, tmp2);
5784  } else {
5785    StringCompareStub::GenerateCompareFlatAsciiStrings(
5786        masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
5787  }
5788
5789  // Handle more complex cases in runtime.
5790  __ bind(&runtime);
5791  __ PopReturnAddressTo(tmp1);
5792  __ push(left);
5793  __ push(right);
5794  __ PushReturnAddressFrom(tmp1);
5795  if (equality) {
5796    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
5797  } else {
5798    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
5799  }
5800
5801  __ bind(&miss);
5802  GenerateMiss(masm);
5803}
5804
5805
5806void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
5807  ASSERT(state_ == CompareIC::OBJECT);
5808  Label miss;
5809  Condition either_smi = masm->CheckEitherSmi(rdx, rax);
5810  __ j(either_smi, &miss, Label::kNear);
5811
5812  __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
5813  __ j(not_equal, &miss, Label::kNear);
5814  __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
5815  __ j(not_equal, &miss, Label::kNear);
5816
5817  ASSERT(GetCondition() == equal);
5818  __ subq(rax, rdx);
5819  __ ret(0);
5820
5821  __ bind(&miss);
5822  GenerateMiss(masm);
5823}
5824
5825
5826void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
5827  Label miss;
5828  Condition either_smi = masm->CheckEitherSmi(rdx, rax);
5829  __ j(either_smi, &miss, Label::kNear);
5830
5831  __ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
5832  __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
5833  __ Cmp(rcx, known_map_);
5834  __ j(not_equal, &miss, Label::kNear);
5835  __ Cmp(rbx, known_map_);
5836  __ j(not_equal, &miss, Label::kNear);
5837
5838  __ subq(rax, rdx);
5839  __ ret(0);
5840
5841  __ bind(&miss);
5842  GenerateMiss(masm);
5843}
5844
5845
5846void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
5847  {
5848    // Call the runtime system in a fresh internal frame.
5849    ExternalReference miss =
5850        ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
5851
5852    FrameScope scope(masm, StackFrame::INTERNAL);
5853    __ push(rdx);
5854    __ push(rax);
5855    __ push(rdx);
5856    __ push(rax);
5857    __ Push(Smi::FromInt(op_));
5858    __ CallExternalReference(miss, 3);
5859
5860    // Compute the entry point of the rewritten stub.
5861    __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
5862    __ pop(rax);
5863    __ pop(rdx);
5864  }
5865
5866  // Do a tail call to the rewritten stub.
5867  __ jmp(rdi);
5868}
5869
5870
5871void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
5872                                                      Label* miss,
5873                                                      Label* done,
5874                                                      Register properties,
5875                                                      Handle<Name> name,
5876                                                      Register r0) {
5877  ASSERT(name->IsUniqueName());
5878  // If names of slots in range from 1 to kProbes - 1 for the hash value are
5879  // not equal to the name and kProbes-th slot is not used (its name is the
5880  // undefined value), it guarantees the hash table doesn't contain the
5881  // property. It's true even if some slots represent deleted properties
5882  // (their names are the hole value).
5883  for (int i = 0; i < kInlinedProbes; i++) {
5884    // r0 points to properties hash.
5885    // Compute the masked index: (hash + i + i * i) & mask.
5886    Register index = r0;
5887    // Capacity is smi 2^n.
5888    __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
5889    __ decl(index);
5890    __ and_(index,
5891            Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
5892
5893    // Scale the index by multiplying by the entry size.
5894    ASSERT(NameDictionary::kEntrySize == 3);
5895    __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
5896
5897    Register entity_name = r0;
5898    // Having undefined at this place means the name is not contained.
5899    ASSERT_EQ(kSmiTagSize, 1);
5900    __ movq(entity_name, Operand(properties,
5901                                 index,
5902                                 times_pointer_size,
5903                                 kElementsStartOffset - kHeapObjectTag));
5904    __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
5905    __ j(equal, done);
5906
5907    // Stop if found the property.
5908    __ Cmp(entity_name, Handle<Name>(name));
5909    __ j(equal, miss);
5910
5911    Label good;
5912    // Check for the hole and skip.
5913    __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
5914    __ j(equal, &good, Label::kNear);
5915
5916    // Check if the entry name is not a unique name.
5917    __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
5918    __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
5919                           miss);
5920    __ bind(&good);
5921  }
5922
5923  NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP);
5924  __ Push(Handle<Object>(name));
5925  __ push(Immediate(name->Hash()));
5926  __ CallStub(&stub);
5927  __ testq(r0, r0);
5928  __ j(not_zero, miss);
5929  __ jmp(done);
5930}
5931
5932
5933// Probe the name dictionary in the |elements| register. Jump to the
5934// |done| label if a property with the given name is found leaving the
5935// index into the dictionary in |r1|. Jump to the |miss| label
5936// otherwise.
5937void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
5938                                                      Label* miss,
5939                                                      Label* done,
5940                                                      Register elements,
5941                                                      Register name,
5942                                                      Register r0,
5943                                                      Register r1) {
5944  ASSERT(!elements.is(r0));
5945  ASSERT(!elements.is(r1));
5946  ASSERT(!name.is(r0));
5947  ASSERT(!name.is(r1));
5948
5949  __ AssertName(name);
5950
5951  __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
5952  __ decl(r0);
5953
5954  for (int i = 0; i < kInlinedProbes; i++) {
5955    // Compute the masked index: (hash + i + i * i) & mask.
5956    __ movl(r1, FieldOperand(name, Name::kHashFieldOffset));
5957    __ shrl(r1, Immediate(Name::kHashShift));
5958    if (i > 0) {
5959      __ addl(r1, Immediate(NameDictionary::GetProbeOffset(i)));
5960    }
5961    __ and_(r1, r0);
5962
5963    // Scale the index by multiplying by the entry size.
5964    ASSERT(NameDictionary::kEntrySize == 3);
5965    __ lea(r1, Operand(r1, r1, times_2, 0));  // r1 = r1 * 3
5966
5967    // Check if the key is identical to the name.
5968    __ cmpq(name, Operand(elements, r1, times_pointer_size,
5969                          kElementsStartOffset - kHeapObjectTag));
5970    __ j(equal, done);
5971  }
5972
5973  NameDictionaryLookupStub stub(elements, r0, r1, POSITIVE_LOOKUP);
5974  __ push(name);
5975  __ movl(r0, FieldOperand(name, Name::kHashFieldOffset));
5976  __ shrl(r0, Immediate(Name::kHashShift));
5977  __ push(r0);
5978  __ CallStub(&stub);
5979
5980  __ testq(r0, r0);
5981  __ j(zero, miss);
5982  __ jmp(done);
5983}
5984
5985
5986void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
5987  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
5988  // we cannot call anything that could cause a GC from this stub.
5989  // Stack frame on entry:
5990  //  rsp[0 * kPointerSize] : return address.
5991  //  rsp[1 * kPointerSize] : key's hash.
5992  //  rsp[2 * kPointerSize] : key.
5993  // Registers:
5994  //  dictionary_: NameDictionary to probe.
5995  //  result_: used as scratch.
5996  //  index_: will hold an index of entry if lookup is successful.
5997  //          might alias with result_.
5998  // Returns:
5999  //  result_ is zero if lookup failed, non zero otherwise.
6000
6001  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
6002
6003  Register scratch = result_;
6004
6005  __ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
6006  __ decl(scratch);
6007  __ push(scratch);
6008
6009  // If names of slots in range from 1 to kProbes - 1 for the hash value are
6010  // not equal to the name and kProbes-th slot is not used (its name is the
6011  // undefined value), it guarantees the hash table doesn't contain the
6012  // property. It's true even if some slots represent deleted properties
6013  // (their names are the null value).
6014  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
6015    // Compute the masked index: (hash + i + i * i) & mask.
6016    __ movq(scratch, Operand(rsp, 2 * kPointerSize));
6017    if (i > 0) {
6018      __ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
6019    }
6020    __ and_(scratch, Operand(rsp, 0));
6021
6022    // Scale the index by multiplying by the entry size.
6023    ASSERT(NameDictionary::kEntrySize == 3);
6024    __ lea(index_, Operand(scratch, scratch, times_2, 0));  // index *= 3.
6025
6026    // Having undefined at this place means the name is not contained.
6027    __ movq(scratch, Operand(dictionary_,
6028                             index_,
6029                             times_pointer_size,
6030                             kElementsStartOffset - kHeapObjectTag));
6031
6032    __ Cmp(scratch, masm->isolate()->factory()->undefined_value());
6033    __ j(equal, &not_in_dictionary);
6034
6035    // Stop if found the property.
6036    __ cmpq(scratch, Operand(rsp, 3 * kPointerSize));
6037    __ j(equal, &in_dictionary);
6038
6039    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
6040      // If we hit a key that is not a unique name during negative
6041      // lookup we have to bailout as this key might be equal to the
6042      // key we are looking for.
6043
6044      // Check if the entry name is not a unique name.
6045      __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
6046      __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
6047                             &maybe_in_dictionary);
6048    }
6049  }
6050
6051  __ bind(&maybe_in_dictionary);
6052  // If we are doing negative lookup then probing failure should be
6053  // treated as a lookup success. For positive lookup probing failure
6054  // should be treated as lookup failure.
6055  if (mode_ == POSITIVE_LOOKUP) {
6056    __ movq(scratch, Immediate(0));
6057    __ Drop(1);
6058    __ ret(2 * kPointerSize);
6059  }
6060
6061  __ bind(&in_dictionary);
6062  __ movq(scratch, Immediate(1));
6063  __ Drop(1);
6064  __ ret(2 * kPointerSize);
6065
6066  __ bind(&not_in_dictionary);
6067  __ movq(scratch, Immediate(0));
6068  __ Drop(1);
6069  __ ret(2 * kPointerSize);
6070}
6071
6072
6073struct AheadOfTimeWriteBarrierStubList {
6074  Register object, value, address;
6075  RememberedSetAction action;
6076};
6077
6078
6079#define REG(Name) { kRegister_ ## Name ## _Code }
6080
6081struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
6082  // Used in RegExpExecStub.
6083  { REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
6084  // Used in CompileArrayPushCall.
6085  { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
6086  // Used in CompileStoreGlobal.
6087  { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
6088  // Used in StoreStubCompiler::CompileStoreField and
6089  // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
6090  { REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
6091  // GenerateStoreField calls the stub with two different permutations of
6092  // registers.  This is the second.
6093  { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
6094  // StoreIC::GenerateNormal via GenerateDictionaryStore.
6095  { REG(rbx), REG(r8), REG(r9), EMIT_REMEMBERED_SET },
6096  // KeyedStoreIC::GenerateGeneric.
6097  { REG(rbx), REG(rdx), REG(rcx), EMIT_REMEMBERED_SET},
6098  // KeyedStoreStubCompiler::GenerateStoreFastElement.
6099  { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
6100  { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
6101  // ElementsTransitionGenerator::GenerateMapChangeElementTransition
6102  // and ElementsTransitionGenerator::GenerateSmiToDouble
6103  // and ElementsTransitionGenerator::GenerateDoubleToObject
6104  { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
6105  { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
6106  // ElementsTransitionGenerator::GenerateSmiToDouble
6107  // and ElementsTransitionGenerator::GenerateDoubleToObject
6108  { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
6109  // ElementsTransitionGenerator::GenerateDoubleToObject
6110  { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
6111  // StoreArrayLiteralElementStub::Generate
6112  { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
6113  // FastNewClosureStub::Generate and
6114  // StringAddStub::Generate
6115  { REG(rcx), REG(rdx), REG(rbx), EMIT_REMEMBERED_SET},
6116  // StringAddStub::Generate
6117  { REG(rcx), REG(rax), REG(rbx), EMIT_REMEMBERED_SET},
6118  // Null termination.
6119  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
6120};
6121
6122#undef REG
6123
6124bool RecordWriteStub::IsPregenerated() {
6125  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6126       !entry->object.is(no_reg);
6127       entry++) {
6128    if (object_.is(entry->object) &&
6129        value_.is(entry->value) &&
6130        address_.is(entry->address) &&
6131        remembered_set_action_ == entry->action &&
6132        save_fp_regs_mode_ == kDontSaveFPRegs) {
6133      return true;
6134    }
6135  }
6136  return false;
6137}
6138
6139
6140void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
6141    Isolate* isolate) {
6142  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
6143  stub1.GetCode(isolate)->set_is_pregenerated(true);
6144  StoreBufferOverflowStub stub2(kSaveFPRegs);
6145  stub2.GetCode(isolate)->set_is_pregenerated(true);
6146}
6147
6148
6149void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
6150  for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6151       !entry->object.is(no_reg);
6152       entry++) {
6153    RecordWriteStub stub(entry->object,
6154                         entry->value,
6155                         entry->address,
6156                         entry->action,
6157                         kDontSaveFPRegs);
6158    stub.GetCode(isolate)->set_is_pregenerated(true);
6159  }
6160}
6161
6162
6163bool CodeStub::CanUseFPRegisters() {
6164  return true;  // Always have SSE2 on x64.
6165}
6166
6167
6168// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
6169// the value has just been written into the object, now this stub makes sure
6170// we keep the GC informed.  The word in the object where the value has been
6171// written is in the address register.
6172void RecordWriteStub::Generate(MacroAssembler* masm) {
6173  Label skip_to_incremental_noncompacting;
6174  Label skip_to_incremental_compacting;
6175
6176  // The first two instructions are generated with labels so as to get the
6177  // offset fixed up correctly by the bind(Label*) call.  We patch it back and
6178  // forth between a compare instructions (a nop in this position) and the
6179  // real branch when we start and stop incremental heap marking.
6180  // See RecordWriteStub::Patch for details.
6181  __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
6182  __ jmp(&skip_to_incremental_compacting, Label::kFar);
6183
6184  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
6185    __ RememberedSetHelper(object_,
6186                           address_,
6187                           value_,
6188                           save_fp_regs_mode_,
6189                           MacroAssembler::kReturnAtEnd);
6190  } else {
6191    __ ret(0);
6192  }
6193
6194  __ bind(&skip_to_incremental_noncompacting);
6195  GenerateIncremental(masm, INCREMENTAL);
6196
6197  __ bind(&skip_to_incremental_compacting);
6198  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
6199
6200  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
6201  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
6202  masm->set_byte_at(0, kTwoByteNopInstruction);
6203  masm->set_byte_at(2, kFiveByteNopInstruction);
6204}
6205
6206
6207void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
6208  regs_.Save(masm);
6209
6210  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
6211    Label dont_need_remembered_set;
6212
6213    __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
6214    __ JumpIfNotInNewSpace(regs_.scratch0(),
6215                           regs_.scratch0(),
6216                           &dont_need_remembered_set);
6217
6218    __ CheckPageFlag(regs_.object(),
6219                     regs_.scratch0(),
6220                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
6221                     not_zero,
6222                     &dont_need_remembered_set);
6223
6224    // First notify the incremental marker if necessary, then update the
6225    // remembered set.
6226    CheckNeedsToInformIncrementalMarker(
6227        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
6228    InformIncrementalMarker(masm, mode);
6229    regs_.Restore(masm);
6230    __ RememberedSetHelper(object_,
6231                           address_,
6232                           value_,
6233                           save_fp_regs_mode_,
6234                           MacroAssembler::kReturnAtEnd);
6235
6236    __ bind(&dont_need_remembered_set);
6237  }
6238
6239  CheckNeedsToInformIncrementalMarker(
6240      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
6241  InformIncrementalMarker(masm, mode);
6242  regs_.Restore(masm);
6243  __ ret(0);
6244}
6245
6246
6247void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
6248  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
6249  Register address =
6250      arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address();
6251  ASSERT(!address.is(regs_.object()));
6252  ASSERT(!address.is(arg_reg_1));
6253  __ Move(address, regs_.address());
6254  __ Move(arg_reg_1, regs_.object());
6255  // TODO(gc) Can we just set address arg2 in the beginning?
6256  __ Move(arg_reg_2, address);
6257  __ LoadAddress(arg_reg_3,
6258                 ExternalReference::isolate_address(masm->isolate()));
6259  int argument_count = 3;
6260
6261  AllowExternalCallThatCantCauseGC scope(masm);
6262  __ PrepareCallCFunction(argument_count);
6263  if (mode == INCREMENTAL_COMPACTION) {
6264    __ CallCFunction(
6265        ExternalReference::incremental_evacuation_record_write_function(
6266            masm->isolate()),
6267        argument_count);
6268  } else {
6269    ASSERT(mode == INCREMENTAL);
6270    __ CallCFunction(
6271        ExternalReference::incremental_marking_record_write_function(
6272            masm->isolate()),
6273        argument_count);
6274  }
6275  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
6276}
6277
6278
6279void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
6280    MacroAssembler* masm,
6281    OnNoNeedToInformIncrementalMarker on_no_need,
6282    Mode mode) {
6283  Label on_black;
6284  Label need_incremental;
6285  Label need_incremental_pop_object;
6286
6287  __ movq(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
6288  __ and_(regs_.scratch0(), regs_.object());
6289  __ movq(regs_.scratch1(),
6290         Operand(regs_.scratch0(),
6291                 MemoryChunk::kWriteBarrierCounterOffset));
6292  __ subq(regs_.scratch1(), Immediate(1));
6293  __ movq(Operand(regs_.scratch0(),
6294                 MemoryChunk::kWriteBarrierCounterOffset),
6295         regs_.scratch1());
6296  __ j(negative, &need_incremental);
6297
6298  // Let's look at the color of the object:  If it is not black we don't have
6299  // to inform the incremental marker.
6300  __ JumpIfBlack(regs_.object(),
6301                 regs_.scratch0(),
6302                 regs_.scratch1(),
6303                 &on_black,
6304                 Label::kNear);
6305
6306  regs_.Restore(masm);
6307  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
6308    __ RememberedSetHelper(object_,
6309                           address_,
6310                           value_,
6311                           save_fp_regs_mode_,
6312                           MacroAssembler::kReturnAtEnd);
6313  } else {
6314    __ ret(0);
6315  }
6316
6317  __ bind(&on_black);
6318
6319  // Get the value from the slot.
6320  __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
6321
6322  if (mode == INCREMENTAL_COMPACTION) {
6323    Label ensure_not_white;
6324
6325    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
6326                     regs_.scratch1(),  // Scratch.
6327                     MemoryChunk::kEvacuationCandidateMask,
6328                     zero,
6329                     &ensure_not_white,
6330                     Label::kNear);
6331
6332    __ CheckPageFlag(regs_.object(),
6333                     regs_.scratch1(),  // Scratch.
6334                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
6335                     zero,
6336                     &need_incremental);
6337
6338    __ bind(&ensure_not_white);
6339  }
6340
6341  // We need an extra register for this, so we push the object register
6342  // temporarily.
6343  __ push(regs_.object());
6344  __ EnsureNotWhite(regs_.scratch0(),  // The value.
6345                    regs_.scratch1(),  // Scratch.
6346                    regs_.object(),  // Scratch.
6347                    &need_incremental_pop_object,
6348                    Label::kNear);
6349  __ pop(regs_.object());
6350
6351  regs_.Restore(masm);
6352  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
6353    __ RememberedSetHelper(object_,
6354                           address_,
6355                           value_,
6356                           save_fp_regs_mode_,
6357                           MacroAssembler::kReturnAtEnd);
6358  } else {
6359    __ ret(0);
6360  }
6361
6362  __ bind(&need_incremental_pop_object);
6363  __ pop(regs_.object());
6364
6365  __ bind(&need_incremental);
6366
6367  // Fall through when we need to inform the incremental marker.
6368}
6369
6370
6371void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
6372  // ----------- S t a t e -------------
6373  //  -- rax     : element value to store
6374  //  -- rcx     : element index as smi
6375  //  -- rsp[0]  : return address
6376  //  -- rsp[8]  : array literal index in function
6377  //  -- rsp[16] : array literal
6378  // clobbers rbx, rdx, rdi
6379  // -----------------------------------
6380
6381  Label element_done;
6382  Label double_elements;
6383  Label smi_element;
6384  Label slow_elements;
6385  Label fast_elements;
6386
6387  // Get array literal index, array literal and its map.
6388  __ movq(rdx, Operand(rsp, 1 * kPointerSize));
6389  __ movq(rbx, Operand(rsp, 2 * kPointerSize));
6390  __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset));
6391
6392  __ CheckFastElements(rdi, &double_elements);
6393
6394  // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
6395  __ JumpIfSmi(rax, &smi_element);
6396  __ CheckFastSmiElements(rdi, &fast_elements);
6397
6398  // Store into the array literal requires a elements transition. Call into
6399  // the runtime.
6400
6401  __ bind(&slow_elements);
6402  __ PopReturnAddressTo(rdi);
6403  __ push(rbx);
6404  __ push(rcx);
6405  __ push(rax);
6406  __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
6407  __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
6408  __ push(rdx);
6409  __ PushReturnAddressFrom(rdi);
6410  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
6411
6412  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
6413  __ bind(&fast_elements);
6414  __ SmiToInteger32(kScratchRegister, rcx);
6415  __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
6416  __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
6417                           FixedArrayBase::kHeaderSize));
6418  __ movq(Operand(rcx, 0), rax);
6419  // Update the write barrier for the array store.
6420  __ RecordWrite(rbx, rcx, rax,
6421                 kDontSaveFPRegs,
6422                 EMIT_REMEMBERED_SET,
6423                 OMIT_SMI_CHECK);
6424  __ ret(0);
6425
6426  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or
6427  // FAST_*_ELEMENTS, and value is Smi.
6428  __ bind(&smi_element);
6429  __ SmiToInteger32(kScratchRegister, rcx);
6430  __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
6431  __ movq(FieldOperand(rbx, kScratchRegister, times_pointer_size,
6432                       FixedArrayBase::kHeaderSize), rax);
6433  __ ret(0);
6434
6435  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
6436  __ bind(&double_elements);
6437
6438  __ movq(r9, FieldOperand(rbx, JSObject::kElementsOffset));
6439  __ SmiToInteger32(r11, rcx);
6440  __ StoreNumberToDoubleElements(rax,
6441                                 r9,
6442                                 r11,
6443                                 xmm0,
6444                                 &slow_elements);
6445  __ ret(0);
6446}
6447
6448
6449void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
6450  CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
6451  __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
6452  int parameter_count_offset =
6453      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
6454  __ movq(rbx, MemOperand(rbp, parameter_count_offset));
6455  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
6456  __ PopReturnAddressTo(rcx);
6457  int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
6458      ? kPointerSize
6459      : 0;
6460  __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
6461  __ jmp(rcx);  // Return to IC Miss stub, continuation still on stack.
6462}
6463
6464
6465void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
6466  if (masm->isolate()->function_entry_hook() != NULL) {
6467    // It's always safe to call the entry hook stub, as the hook itself
6468    // is not allowed to call back to V8.
6469    AllowStubCallsScope allow_stub_calls(masm, true);
6470
6471    ProfileEntryHookStub stub;
6472    masm->CallStub(&stub);
6473  }
6474}
6475
6476
6477void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
6478  // This stub can be called from essentially anywhere, so it needs to save
6479  // all volatile and callee-save registers.
6480  const size_t kNumSavedRegisters = 2;
6481  __ push(arg_reg_1);
6482  __ push(arg_reg_2);
6483
6484  // Calculate the original stack pointer and store it in the second arg.
6485  __ lea(arg_reg_2, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize));
6486
6487  // Calculate the function address to the first arg.
6488  __ movq(arg_reg_1, Operand(rsp, kNumSavedRegisters * kPointerSize));
6489  __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
6490
6491  // Save the remainder of the volatile registers.
6492  masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
6493
6494  // Call the entry hook function.
6495  __ movq(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()),
6496          RelocInfo::NONE64);
6497
6498  AllowExternalCallThatCantCauseGC scope(masm);
6499
6500  const int kArgumentCount = 2;
6501  __ PrepareCallCFunction(kArgumentCount);
6502  __ CallCFunction(rax, kArgumentCount);
6503
6504  // Restore volatile regs.
6505  masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
6506  __ pop(arg_reg_2);
6507  __ pop(arg_reg_1);
6508
6509  __ Ret();
6510}
6511
6512
6513template<class T>
6514static void CreateArrayDispatch(MacroAssembler* masm) {
6515  int last_index = GetSequenceIndexFromFastElementsKind(
6516      TERMINAL_FAST_ELEMENTS_KIND);
6517  for (int i = 0; i <= last_index; ++i) {
6518    Label next;
6519    ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
6520    __ cmpl(rdx, Immediate(kind));
6521    __ j(not_equal, &next);
6522    T stub(kind);
6523    __ TailCallStub(&stub);
6524    __ bind(&next);
6525  }
6526
6527  // If we reached this point there is a problem.
6528  __ Abort(kUnexpectedElementsKindInArrayConstructor);
6529}
6530
6531
6532static void CreateArrayDispatchOneArgument(MacroAssembler* masm) {
6533  // rbx - type info cell
6534  // rdx - kind
6535  // rax - number of arguments
6536  // rdi - constructor?
6537  // rsp[0] - return address
6538  // rsp[8] - last argument
6539  ASSERT(FAST_SMI_ELEMENTS == 0);
6540  ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
6541  ASSERT(FAST_ELEMENTS == 2);
6542  ASSERT(FAST_HOLEY_ELEMENTS == 3);
6543  ASSERT(FAST_DOUBLE_ELEMENTS == 4);
6544  ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
6545
6546  Handle<Object> undefined_sentinel(
6547      masm->isolate()->heap()->undefined_value(),
6548      masm->isolate());
6549
6550  // is the low bit set? If so, we are holey and that is good.
6551  __ testb(rdx, Immediate(1));
6552  Label normal_sequence;
6553  __ j(not_zero, &normal_sequence);
6554
6555  // look at the first argument
6556  __ movq(rcx, Operand(rsp, kPointerSize));
6557  __ testq(rcx, rcx);
6558  __ j(zero, &normal_sequence);
6559
6560  // We are going to create a holey array, but our kind is non-holey.
6561  // Fix kind and retry (only if we have an allocation site in the cell).
6562  __ incl(rdx);
6563  __ Cmp(rbx, undefined_sentinel);
6564  __ j(equal, &normal_sequence);
6565  __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
6566  Handle<Map> allocation_site_map(
6567      masm->isolate()->heap()->allocation_site_map(),
6568      masm->isolate());
6569  __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
6570  __ j(not_equal, &normal_sequence);
6571
6572  // Save the resulting elements kind in type info
6573  __ Integer32ToSmi(rdx, rdx);
6574  __ movq(FieldOperand(rcx, AllocationSite::kTransitionInfoOffset), rdx);
6575  __ SmiToInteger32(rdx, rdx);
6576
6577  __ bind(&normal_sequence);
6578  int last_index = GetSequenceIndexFromFastElementsKind(
6579      TERMINAL_FAST_ELEMENTS_KIND);
6580  for (int i = 0; i <= last_index; ++i) {
6581    Label next;
6582    ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
6583    __ cmpl(rdx, Immediate(kind));
6584    __ j(not_equal, &next);
6585    ArraySingleArgumentConstructorStub stub(kind);
6586    __ TailCallStub(&stub);
6587    __ bind(&next);
6588  }
6589
6590  // If we reached this point there is a problem.
6591  __ Abort(kUnexpectedElementsKindInArrayConstructor);
6592}
6593
6594
6595template<class T>
6596static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
6597  int to_index = GetSequenceIndexFromFastElementsKind(
6598      TERMINAL_FAST_ELEMENTS_KIND);
6599  for (int i = 0; i <= to_index; ++i) {
6600    ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
6601    T stub(kind);
6602    stub.GetCode(isolate)->set_is_pregenerated(true);
6603    if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
6604      T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
6605      stub1.GetCode(isolate)->set_is_pregenerated(true);
6606    }
6607  }
6608}
6609
6610
6611void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
6612  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
6613      isolate);
6614  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
6615      isolate);
6616  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
6617      isolate);
6618}
6619
6620
6621void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
6622    Isolate* isolate) {
6623  ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
6624  for (int i = 0; i < 2; i++) {
6625    // For internal arrays we only need a few things
6626    InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
6627    stubh1.GetCode(isolate)->set_is_pregenerated(true);
6628    InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
6629    stubh2.GetCode(isolate)->set_is_pregenerated(true);
6630    InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
6631    stubh3.GetCode(isolate)->set_is_pregenerated(true);
6632  }
6633}
6634
6635
6636void ArrayConstructorStub::Generate(MacroAssembler* masm) {
6637  // ----------- S t a t e -------------
6638  //  -- rax    : argc
6639  //  -- rbx    : type info cell
6640  //  -- rdi    : constructor
6641  //  -- rsp[0] : return address
6642  //  -- rsp[8] : last argument
6643  // -----------------------------------
6644  Handle<Object> undefined_sentinel(
6645      masm->isolate()->heap()->undefined_value(),
6646      masm->isolate());
6647
6648  if (FLAG_debug_code) {
6649    // The array construct code is only set for the global and natives
6650    // builtin Array functions which always have maps.
6651
6652    // Initial map for the builtin Array function should be a map.
6653    __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
6654    // Will both indicate a NULL and a Smi.
6655    STATIC_ASSERT(kSmiTag == 0);
6656    Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
6657    __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
6658    __ CmpObjectType(rcx, MAP_TYPE, rcx);
6659    __ Check(equal, kUnexpectedInitialMapForArrayFunction);
6660
6661    // We should either have undefined in rbx or a valid cell
6662    Label okay_here;
6663    Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
6664    __ Cmp(rbx, undefined_sentinel);
6665    __ j(equal, &okay_here);
6666    __ Cmp(FieldOperand(rbx, 0), cell_map);
6667    __ Assert(equal, kExpectedPropertyCellInRegisterRbx);
6668    __ bind(&okay_here);
6669  }
6670
6671  Label no_info, switch_ready;
6672  // Get the elements kind and case on that.
6673  __ Cmp(rbx, undefined_sentinel);
6674  __ j(equal, &no_info);
6675  __ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
6676
6677  // The type cell may have undefined in its value.
6678  __ Cmp(rdx, undefined_sentinel);
6679  __ j(equal, &no_info);
6680
6681  // The type cell has either an AllocationSite or a JSFunction
6682  __ Cmp(FieldOperand(rdx, 0),
6683         Handle<Map>(masm->isolate()->heap()->allocation_site_map()));
6684  __ j(not_equal, &no_info);
6685
6686  __ movq(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset));
6687  __ SmiToInteger32(rdx, rdx);
6688  __ jmp(&switch_ready);
6689  __ bind(&no_info);
6690  __ movq(rdx, Immediate(GetInitialFastElementsKind()));
6691  __ bind(&switch_ready);
6692
6693  if (argument_count_ == ANY) {
6694    Label not_zero_case, not_one_case;
6695    __ testq(rax, rax);
6696    __ j(not_zero, &not_zero_case);
6697    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
6698
6699    __ bind(&not_zero_case);
6700    __ cmpl(rax, Immediate(1));
6701    __ j(greater, &not_one_case);
6702    CreateArrayDispatchOneArgument(masm);
6703
6704    __ bind(&not_one_case);
6705    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
6706  } else if (argument_count_ == NONE) {
6707    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm);
6708  } else if (argument_count_ == ONE) {
6709    CreateArrayDispatchOneArgument(masm);
6710  } else if (argument_count_ == MORE_THAN_ONE) {
6711    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm);
6712  } else {
6713    UNREACHABLE();
6714  }
6715}
6716
6717
6718void InternalArrayConstructorStub::GenerateCase(
6719    MacroAssembler* masm, ElementsKind kind) {
6720  Label not_zero_case, not_one_case;
6721  Label normal_sequence;
6722
6723  __ testq(rax, rax);
6724  __ j(not_zero, &not_zero_case);
6725  InternalArrayNoArgumentConstructorStub stub0(kind);
6726  __ TailCallStub(&stub0);
6727
6728  __ bind(&not_zero_case);
6729  __ cmpl(rax, Immediate(1));
6730  __ j(greater, &not_one_case);
6731
6732  if (IsFastPackedElementsKind(kind)) {
6733    // We might need to create a holey array
6734    // look at the first argument
6735    __ movq(rcx, Operand(rsp, kPointerSize));
6736    __ testq(rcx, rcx);
6737    __ j(zero, &normal_sequence);
6738
6739    InternalArraySingleArgumentConstructorStub
6740        stub1_holey(GetHoleyElementsKind(kind));
6741    __ TailCallStub(&stub1_holey);
6742  }
6743
6744  __ bind(&normal_sequence);
6745  InternalArraySingleArgumentConstructorStub stub1(kind);
6746  __ TailCallStub(&stub1);
6747
6748  __ bind(&not_one_case);
6749  InternalArrayNArgumentsConstructorStub stubN(kind);
6750  __ TailCallStub(&stubN);
6751}
6752
6753
6754void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
6755  // ----------- S t a t e -------------
6756  //  -- rax    : argc
6757  //  -- rbx    : type info cell
6758  //  -- rdi    : constructor
6759  //  -- rsp[0] : return address
6760  //  -- rsp[8] : last argument
6761  // -----------------------------------
6762
6763  if (FLAG_debug_code) {
6764    // The array construct code is only set for the global and natives
6765    // builtin Array functions which always have maps.
6766
6767    // Initial map for the builtin Array function should be a map.
6768    __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
6769    // Will both indicate a NULL and a Smi.
6770    STATIC_ASSERT(kSmiTag == 0);
6771    Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
6772    __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
6773    __ CmpObjectType(rcx, MAP_TYPE, rcx);
6774    __ Check(equal, kUnexpectedInitialMapForArrayFunction);
6775  }
6776
6777  // Figure out the right elements kind
6778  __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
6779
6780  // Load the map's "bit field 2" into |result|. We only need the first byte,
6781  // but the following masking takes care of that anyway.
6782  __ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
6783  // Retrieve elements_kind from bit field 2.
6784  __ and_(rcx, Immediate(Map::kElementsKindMask));
6785  __ shr(rcx, Immediate(Map::kElementsKindShift));
6786
6787  if (FLAG_debug_code) {
6788    Label done;
6789    __ cmpl(rcx, Immediate(FAST_ELEMENTS));
6790    __ j(equal, &done);
6791    __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
6792    __ Assert(equal,
6793              kInvalidElementsKindForInternalArrayOrInternalPackedArray);
6794    __ bind(&done);
6795  }
6796
6797  Label fast_elements_case;
6798  __ cmpl(rcx, Immediate(FAST_ELEMENTS));
6799  __ j(equal, &fast_elements_case);
6800  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
6801
6802  __ bind(&fast_elements_case);
6803  GenerateCase(masm, FAST_ELEMENTS);
6804}
6805
6806
6807#undef __
6808
6809} }  // namespace v8::internal
6810
6811#endif  // V8_TARGET_ARCH_X64
6812