code-stubs-ia32.cc revision e0cee9b3ed82e2391fd85d118aeaa4ea361c687d
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_IA32)
31
32#include "code-stubs.h"
33#include "bootstrapper.h"
34#include "jsregexp.h"
35#include "regexp-macro-assembler.h"
36
37namespace v8 {
38namespace internal {
39
40#define __ ACCESS_MASM(masm)
41
42void ToNumberStub::Generate(MacroAssembler* masm) {
43  // The ToNumber stub takes one argument in eax.
44  NearLabel check_heap_number, call_builtin;
45  __ test(eax, Immediate(kSmiTagMask));
46  __ j(not_zero, &check_heap_number);
47  __ ret(0);
48
49  __ bind(&check_heap_number);
50  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
51  __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
52  __ j(not_equal, &call_builtin);
53  __ ret(0);
54
55  __ bind(&call_builtin);
56  __ pop(ecx);  // Pop return address.
57  __ push(eax);
58  __ push(ecx);  // Push return address.
59  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
60}
61
62
63void FastNewClosureStub::Generate(MacroAssembler* masm) {
64  // Create a new closure from the given function info in new
65  // space. Set the context to the current context in esi.
66  Label gc;
67  __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
68
69  // Get the function info from the stack.
70  __ mov(edx, Operand(esp, 1 * kPointerSize));
71
72  // Compute the function map in the current global context and set that
73  // as the map of the allocated object.
74  __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
75  __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
76  __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
77  __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
78
79  // Initialize the rest of the function. We don't have to update the
80  // write barrier because the allocated object is in new space.
81  __ mov(ebx, Immediate(Factory::empty_fixed_array()));
82  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
83  __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
84  __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
85         Immediate(Factory::the_hole_value()));
86  __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
87  __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
88  __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
89  __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
90         Immediate(Factory::undefined_value()));
91
92  // Initialize the code pointer in the function to be the one
93  // found in the shared function info object.
94  __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
95  __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
96  __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
97
98  // Return and remove the on-stack parameter.
99  __ ret(1 * kPointerSize);
100
101  // Create a new closure through the slower runtime call.
102  __ bind(&gc);
103  __ pop(ecx);  // Temporarily remove return address.
104  __ pop(edx);
105  __ push(esi);
106  __ push(edx);
107  __ push(Immediate(Factory::false_value()));
108  __ push(ecx);  // Restore return address.
109  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
110}
111
112
113void FastNewContextStub::Generate(MacroAssembler* masm) {
114  // Try to allocate the context in new space.
115  Label gc;
116  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
117  __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
118                        eax, ebx, ecx, &gc, TAG_OBJECT);
119
120  // Get the function from the stack.
121  __ mov(ecx, Operand(esp, 1 * kPointerSize));
122
123  // Setup the object header.
124  __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
125  __ mov(FieldOperand(eax, Context::kLengthOffset),
126         Immediate(Smi::FromInt(length)));
127
128  // Setup the fixed slots.
129  __ Set(ebx, Immediate(0));  // Set to NULL.
130  __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
131  __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
132  __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
133  __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
134
135  // Copy the global object from the surrounding context. We go through the
136  // context in the function (ecx) to match the allocation behavior we have
137  // in the runtime system (see Heap::AllocateFunctionContext).
138  __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset));
139  __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX)));
140  __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
141
142  // Initialize the rest of the slots to undefined.
143  __ mov(ebx, Factory::undefined_value());
144  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
145    __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
146  }
147
148  // Return and remove the on-stack parameter.
149  __ mov(esi, Operand(eax));
150  __ ret(1 * kPointerSize);
151
152  // Need to collect. Call into runtime system.
153  __ bind(&gc);
154  __ TailCallRuntime(Runtime::kNewContext, 1, 1);
155}
156
157
158void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
159  // Stack layout on entry:
160  //
161  // [esp + kPointerSize]: constant elements.
162  // [esp + (2 * kPointerSize)]: literal index.
163  // [esp + (3 * kPointerSize)]: literals array.
164
165  // All sizes here are multiples of kPointerSize.
166  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
167  int size = JSArray::kSize + elements_size;
168
169  // Load boilerplate object into ecx and check if we need to create a
170  // boilerplate.
171  Label slow_case;
172  __ mov(ecx, Operand(esp, 3 * kPointerSize));
173  __ mov(eax, Operand(esp, 2 * kPointerSize));
174  STATIC_ASSERT(kPointerSize == 4);
175  STATIC_ASSERT(kSmiTagSize == 1);
176  STATIC_ASSERT(kSmiTag == 0);
177  __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
178                           FixedArray::kHeaderSize));
179  __ cmp(ecx, Factory::undefined_value());
180  __ j(equal, &slow_case);
181
182  if (FLAG_debug_code) {
183    const char* message;
184    Handle<Map> expected_map;
185    if (mode_ == CLONE_ELEMENTS) {
186      message = "Expected (writable) fixed array";
187      expected_map = Factory::fixed_array_map();
188    } else {
189      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
190      message = "Expected copy-on-write fixed array";
191      expected_map = Factory::fixed_cow_array_map();
192    }
193    __ push(ecx);
194    __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
195    __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
196    __ Assert(equal, message);
197    __ pop(ecx);
198  }
199
200  // Allocate both the JS array and the elements array in one big
201  // allocation. This avoids multiple limit checks.
202  __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
203
204  // Copy the JS array part.
205  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
206    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
207      __ mov(ebx, FieldOperand(ecx, i));
208      __ mov(FieldOperand(eax, i), ebx);
209    }
210  }
211
212  if (length_ > 0) {
213    // Get hold of the elements array of the boilerplate and setup the
214    // elements pointer in the resulting object.
215    __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
216    __ lea(edx, Operand(eax, JSArray::kSize));
217    __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
218
219    // Copy the elements array.
220    for (int i = 0; i < elements_size; i += kPointerSize) {
221      __ mov(ebx, FieldOperand(ecx, i));
222      __ mov(FieldOperand(edx, i), ebx);
223    }
224  }
225
226  // Return and remove the on-stack parameters.
227  __ ret(3 * kPointerSize);
228
229  __ bind(&slow_case);
230  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
231}
232
233
234// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
235void ToBooleanStub::Generate(MacroAssembler* masm) {
236  NearLabel false_result, true_result, not_string;
237  __ mov(eax, Operand(esp, 1 * kPointerSize));
238
239  // 'null' => false.
240  __ cmp(eax, Factory::null_value());
241  __ j(equal, &false_result);
242
243  // Get the map and type of the heap object.
244  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
245  __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
246
247  // Undetectable => false.
248  __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
249            1 << Map::kIsUndetectable);
250  __ j(not_zero, &false_result);
251
252  // JavaScript object => true.
253  __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
254  __ j(above_equal, &true_result);
255
256  // String value => false iff empty.
257  __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
258  __ j(above_equal, &not_string);
259  STATIC_ASSERT(kSmiTag == 0);
260  __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
261  __ j(zero, &false_result);
262  __ jmp(&true_result);
263
264  __ bind(&not_string);
265  // HeapNumber => false iff +0, -0, or NaN.
266  __ cmp(edx, Factory::heap_number_map());
267  __ j(not_equal, &true_result);
268  __ fldz();
269  __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
270  __ FCmp();
271  __ j(zero, &false_result);
272  // Fall through to |true_result|.
273
274  // Return 1/0 for true/false in eax.
275  __ bind(&true_result);
276  __ mov(eax, 1);
277  __ ret(1 * kPointerSize);
278  __ bind(&false_result);
279  __ mov(eax, 0);
280  __ ret(1 * kPointerSize);
281}
282
283
284const char* GenericBinaryOpStub::GetName() {
285  if (name_ != NULL) return name_;
286  const int kMaxNameLength = 100;
287  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
288  if (name_ == NULL) return "OOM";
289  const char* op_name = Token::Name(op_);
290  const char* overwrite_name;
291  switch (mode_) {
292    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
293    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
294    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
295    default: overwrite_name = "UnknownOverwrite"; break;
296  }
297
298  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
299               "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
300               op_name,
301               overwrite_name,
302               (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
303               args_in_registers_ ? "RegArgs" : "StackArgs",
304               args_reversed_ ? "_R" : "",
305               static_operands_type_.ToString(),
306               BinaryOpIC::GetName(runtime_operands_type_));
307  return name_;
308}
309
310
311void GenericBinaryOpStub::GenerateCall(
312    MacroAssembler* masm,
313    Register left,
314    Register right) {
315  if (!ArgsInRegistersSupported()) {
316    // Pass arguments on the stack.
317    __ push(left);
318    __ push(right);
319  } else {
320    // The calling convention with registers is left in edx and right in eax.
321    Register left_arg = edx;
322    Register right_arg = eax;
323    if (!(left.is(left_arg) && right.is(right_arg))) {
324      if (left.is(right_arg) && right.is(left_arg)) {
325        if (IsOperationCommutative()) {
326          SetArgsReversed();
327        } else {
328          __ xchg(left, right);
329        }
330      } else if (left.is(left_arg)) {
331        __ mov(right_arg, right);
332      } else if (right.is(right_arg)) {
333        __ mov(left_arg, left);
334      } else if (left.is(right_arg)) {
335        if (IsOperationCommutative()) {
336          __ mov(left_arg, right);
337          SetArgsReversed();
338        } else {
339          // Order of moves important to avoid destroying left argument.
340          __ mov(left_arg, left);
341          __ mov(right_arg, right);
342        }
343      } else if (right.is(left_arg)) {
344        if (IsOperationCommutative()) {
345          __ mov(right_arg, left);
346          SetArgsReversed();
347        } else {
348          // Order of moves important to avoid destroying right argument.
349          __ mov(right_arg, right);
350          __ mov(left_arg, left);
351        }
352      } else {
353        // Order of moves is not important.
354        __ mov(left_arg, left);
355        __ mov(right_arg, right);
356      }
357    }
358
359    // Update flags to indicate that arguments are in registers.
360    SetArgsInRegisters();
361    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
362  }
363
364  // Call the stub.
365  __ CallStub(this);
366}
367
368
369void GenericBinaryOpStub::GenerateCall(
370    MacroAssembler* masm,
371    Register left,
372    Smi* right) {
373  if (!ArgsInRegistersSupported()) {
374    // Pass arguments on the stack.
375    __ push(left);
376    __ push(Immediate(right));
377  } else {
378    // The calling convention with registers is left in edx and right in eax.
379    Register left_arg = edx;
380    Register right_arg = eax;
381    if (left.is(left_arg)) {
382      __ mov(right_arg, Immediate(right));
383    } else if (left.is(right_arg) && IsOperationCommutative()) {
384      __ mov(left_arg, Immediate(right));
385      SetArgsReversed();
386    } else {
387      // For non-commutative operations, left and right_arg might be
388      // the same register.  Therefore, the order of the moves is
389      // important here in order to not overwrite left before moving
390      // it to left_arg.
391      __ mov(left_arg, left);
392      __ mov(right_arg, Immediate(right));
393    }
394
395    // Update flags to indicate that arguments are in registers.
396    SetArgsInRegisters();
397    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
398  }
399
400  // Call the stub.
401  __ CallStub(this);
402}
403
404
405void GenericBinaryOpStub::GenerateCall(
406    MacroAssembler* masm,
407    Smi* left,
408    Register right) {
409  if (!ArgsInRegistersSupported()) {
410    // Pass arguments on the stack.
411    __ push(Immediate(left));
412    __ push(right);
413  } else {
414    // The calling convention with registers is left in edx and right in eax.
415    Register left_arg = edx;
416    Register right_arg = eax;
417    if (right.is(right_arg)) {
418      __ mov(left_arg, Immediate(left));
419    } else if (right.is(left_arg) && IsOperationCommutative()) {
420      __ mov(right_arg, Immediate(left));
421      SetArgsReversed();
422    } else {
423      // For non-commutative operations, right and left_arg might be
424      // the same register.  Therefore, the order of the moves is
425      // important here in order to not overwrite right before moving
426      // it to right_arg.
427      __ mov(right_arg, right);
428      __ mov(left_arg, Immediate(left));
429    }
430    // Update flags to indicate that arguments are in registers.
431    SetArgsInRegisters();
432    __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
433  }
434
435  // Call the stub.
436  __ CallStub(this);
437}
438
439
440class FloatingPointHelper : public AllStatic {
441 public:
442
443  enum ArgLocation {
444    ARGS_ON_STACK,
445    ARGS_IN_REGISTERS
446  };
447
448  // Code pattern for loading a floating point value. Input value must
449  // be either a smi or a heap number object (fp value). Requirements:
450  // operand in register number. Returns operand as floating point number
451  // on FPU stack.
452  static void LoadFloatOperand(MacroAssembler* masm, Register number);
453
454  // Code pattern for loading floating point values. Input values must
455  // be either smi or heap number objects (fp values). Requirements:
456  // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
457  // Returns operands as floating point numbers on FPU stack.
458  static void LoadFloatOperands(MacroAssembler* masm,
459                                Register scratch,
460                                ArgLocation arg_location = ARGS_ON_STACK);
461
462  // Similar to LoadFloatOperand but assumes that both operands are smis.
463  // Expects operands in edx, eax.
464  static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
465
466  // Test if operands are smi or number objects (fp). Requirements:
467  // operand_1 in eax, operand_2 in edx; falls through on float
468  // operands, jumps to the non_float label otherwise.
469  static void CheckFloatOperands(MacroAssembler* masm,
470                                 Label* non_float,
471                                 Register scratch);
472
473  // Checks that the two floating point numbers on top of the FPU stack
474  // have int32 values.
475  static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
476                                         Label* non_int32);
477
478  // Takes the operands in edx and eax and loads them as integers in eax
479  // and ecx.
480  static void LoadAsIntegers(MacroAssembler* masm,
481                             TypeInfo type_info,
482                             bool use_sse3,
483                             Label* operand_conversion_failure);
484  static void LoadNumbersAsIntegers(MacroAssembler* masm,
485                                    TypeInfo type_info,
486                                    bool use_sse3,
487                                    Label* operand_conversion_failure);
488  static void LoadUnknownsAsIntegers(MacroAssembler* masm,
489                                     bool use_sse3,
490                                     Label* operand_conversion_failure);
491
492  // Must only be called after LoadUnknownsAsIntegers.  Assumes that the
493  // operands are pushed on the stack, and that their conversions to int32
494  // are in eax and ecx.  Checks that the original numbers were in the int32
495  // range.
496  static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
497                                           bool use_sse3,
498                                           Label* not_int32);
499
500  // Assumes that operands are smis or heap numbers and loads them
501  // into xmm0 and xmm1. Operands are in edx and eax.
502  // Leaves operands unchanged.
503  static void LoadSSE2Operands(MacroAssembler* masm);
504
505  // Test if operands are numbers (smi or HeapNumber objects), and load
506  // them into xmm0 and xmm1 if they are.  Jump to label not_numbers if
507  // either operand is not a number.  Operands are in edx and eax.
508  // Leaves operands unchanged.
509  static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
510
511  // Similar to LoadSSE2Operands but assumes that both operands are smis.
512  // Expects operands in edx, eax.
513  static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
514
515  // Checks that the two floating point numbers loaded into xmm0 and xmm1
516  // have int32 values.
517  static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
518                                        Label* non_int32,
519                                        Register scratch);
520};
521
522
523void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
524  // 1. Move arguments into edx, eax except for DIV and MOD, which need the
525  // dividend in eax and edx free for the division.  Use eax, ebx for those.
526  Comment load_comment(masm, "-- Load arguments");
527  Register left = edx;
528  Register right = eax;
529  if (op_ == Token::DIV || op_ == Token::MOD) {
530    left = eax;
531    right = ebx;
532    if (HasArgsInRegisters()) {
533      __ mov(ebx, eax);
534      __ mov(eax, edx);
535    }
536  }
537  if (!HasArgsInRegisters()) {
538    __ mov(right, Operand(esp, 1 * kPointerSize));
539    __ mov(left, Operand(esp, 2 * kPointerSize));
540  }
541
542  if (static_operands_type_.IsSmi()) {
543    if (FLAG_debug_code) {
544      __ AbortIfNotSmi(left);
545      __ AbortIfNotSmi(right);
546    }
547    if (op_ == Token::BIT_OR) {
548      __ or_(right, Operand(left));
549      GenerateReturn(masm);
550      return;
551    } else if (op_ == Token::BIT_AND) {
552      __ and_(right, Operand(left));
553      GenerateReturn(masm);
554      return;
555    } else if (op_ == Token::BIT_XOR) {
556      __ xor_(right, Operand(left));
557      GenerateReturn(masm);
558      return;
559    }
560  }
561
562  // 2. Prepare the smi check of both operands by oring them together.
563  Comment smi_check_comment(masm, "-- Smi check arguments");
564  Label not_smis;
565  Register combined = ecx;
566  ASSERT(!left.is(combined) && !right.is(combined));
567  switch (op_) {
568    case Token::BIT_OR:
569      // Perform the operation into eax and smi check the result.  Preserve
570      // eax in case the result is not a smi.
571      ASSERT(!left.is(ecx) && !right.is(ecx));
572      __ mov(ecx, right);
573      __ or_(right, Operand(left));  // Bitwise or is commutative.
574      combined = right;
575      break;
576
577    case Token::BIT_XOR:
578    case Token::BIT_AND:
579    case Token::ADD:
580    case Token::SUB:
581    case Token::MUL:
582    case Token::DIV:
583    case Token::MOD:
584      __ mov(combined, right);
585      __ or_(combined, Operand(left));
586      break;
587
588    case Token::SHL:
589    case Token::SAR:
590    case Token::SHR:
591      // Move the right operand into ecx for the shift operation, use eax
592      // for the smi check register.
593      ASSERT(!left.is(ecx) && !right.is(ecx));
594      __ mov(ecx, right);
595      __ or_(right, Operand(left));
596      combined = right;
597      break;
598
599    default:
600      break;
601  }
602
603  // 3. Perform the smi check of the operands.
604  STATIC_ASSERT(kSmiTag == 0);  // Adjust zero check if not the case.
605  __ test(combined, Immediate(kSmiTagMask));
606  __ j(not_zero, &not_smis, not_taken);
607
608  // 4. Operands are both smis, perform the operation leaving the result in
609  // eax and check the result if necessary.
610  Comment perform_smi(masm, "-- Perform smi operation");
611  Label use_fp_on_smis;
612  switch (op_) {
613    case Token::BIT_OR:
614      // Nothing to do.
615      break;
616
617    case Token::BIT_XOR:
618      ASSERT(right.is(eax));
619      __ xor_(right, Operand(left));  // Bitwise xor is commutative.
620      break;
621
622    case Token::BIT_AND:
623      ASSERT(right.is(eax));
624      __ and_(right, Operand(left));  // Bitwise and is commutative.
625      break;
626
627    case Token::SHL:
628      // Remove tags from operands (but keep sign).
629      __ SmiUntag(left);
630      __ SmiUntag(ecx);
631      // Perform the operation.
632      __ shl_cl(left);
633      // Check that the *signed* result fits in a smi.
634      __ cmp(left, 0xc0000000);
635      __ j(sign, &use_fp_on_smis, not_taken);
636      // Tag the result and store it in register eax.
637      __ SmiTag(left);
638      __ mov(eax, left);
639      break;
640
641    case Token::SAR:
642      // Remove tags from operands (but keep sign).
643      __ SmiUntag(left);
644      __ SmiUntag(ecx);
645      // Perform the operation.
646      __ sar_cl(left);
647      // Tag the result and store it in register eax.
648      __ SmiTag(left);
649      __ mov(eax, left);
650      break;
651
652    case Token::SHR:
653      // Remove tags from operands (but keep sign).
654      __ SmiUntag(left);
655      __ SmiUntag(ecx);
656      // Perform the operation.
657      __ shr_cl(left);
658      // Check that the *unsigned* result fits in a smi.
659      // Neither of the two high-order bits can be set:
660      // - 0x80000000: high bit would be lost when smi tagging.
661      // - 0x40000000: this number would convert to negative when
662      // Smi tagging these two cases can only happen with shifts
663      // by 0 or 1 when handed a valid smi.
664      __ test(left, Immediate(0xc0000000));
665      __ j(not_zero, slow, not_taken);
666      // Tag the result and store it in register eax.
667      __ SmiTag(left);
668      __ mov(eax, left);
669      break;
670
671    case Token::ADD:
672      ASSERT(right.is(eax));
673      __ add(right, Operand(left));  // Addition is commutative.
674      __ j(overflow, &use_fp_on_smis, not_taken);
675      break;
676
677    case Token::SUB:
678      __ sub(left, Operand(right));
679      __ j(overflow, &use_fp_on_smis, not_taken);
680      __ mov(eax, left);
681      break;
682
683    case Token::MUL:
684      // If the smi tag is 0 we can just leave the tag on one operand.
685      STATIC_ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
686      // We can't revert the multiplication if the result is not a smi
687      // so save the right operand.
688      __ mov(ebx, right);
689      // Remove tag from one of the operands (but keep sign).
690      __ SmiUntag(right);
691      // Do multiplication.
692      __ imul(right, Operand(left));  // Multiplication is commutative.
693      __ j(overflow, &use_fp_on_smis, not_taken);
694      // Check for negative zero result.  Use combined = left | right.
695      __ NegativeZeroTest(right, combined, &use_fp_on_smis);
696      break;
697
698    case Token::DIV:
699      // We can't revert the division if the result is not a smi so
700      // save the left operand.
701      __ mov(edi, left);
702      // Check for 0 divisor.
703      __ test(right, Operand(right));
704      __ j(zero, &use_fp_on_smis, not_taken);
705      // Sign extend left into edx:eax.
706      ASSERT(left.is(eax));
707      __ cdq();
708      // Divide edx:eax by right.
709      __ idiv(right);
710      // Check for the corner case of dividing the most negative smi by
711      // -1. We cannot use the overflow flag, since it is not set by idiv
712      // instruction.
713      STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
714      __ cmp(eax, 0x40000000);
715      __ j(equal, &use_fp_on_smis);
716      // Check for negative zero result.  Use combined = left | right.
717      __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
718      // Check that the remainder is zero.
719      __ test(edx, Operand(edx));
720      __ j(not_zero, &use_fp_on_smis);
721      // Tag the result and store it in register eax.
722      __ SmiTag(eax);
723      break;
724
725    case Token::MOD:
726      // Check for 0 divisor.
727      __ test(right, Operand(right));
728      __ j(zero, &not_smis, not_taken);
729
730      // Sign extend left into edx:eax.
731      ASSERT(left.is(eax));
732      __ cdq();
733      // Divide edx:eax by right.
734      __ idiv(right);
735      // Check for negative zero result.  Use combined = left | right.
736      __ NegativeZeroTest(edx, combined, slow);
737      // Move remainder to register eax.
738      __ mov(eax, edx);
739      break;
740
741    default:
742      UNREACHABLE();
743  }
744
745  // 5. Emit return of result in eax.
746  GenerateReturn(masm);
747
748  // 6. For some operations emit inline code to perform floating point
749  // operations on known smis (e.g., if the result of the operation
750  // overflowed the smi range).
751  switch (op_) {
752    case Token::SHL: {
753      Comment perform_float(masm, "-- Perform float operation on smis");
754      __ bind(&use_fp_on_smis);
755      if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
756        // Result we want is in left == edx, so we can put the allocated heap
757        // number in eax.
758        __ AllocateHeapNumber(eax, ecx, ebx, slow);
759        // Store the result in the HeapNumber and return.
760        if (CpuFeatures::IsSupported(SSE2)) {
761          CpuFeatures::Scope use_sse2(SSE2);
762          __ cvtsi2sd(xmm0, Operand(left));
763          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
764        } else {
765          // It's OK to overwrite the right argument on the stack because we
766          // are about to return.
767          __ mov(Operand(esp, 1 * kPointerSize), left);
768          __ fild_s(Operand(esp, 1 * kPointerSize));
769          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
770        }
771        GenerateReturn(masm);
772      } else {
773        ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
774        __ jmp(slow);
775      }
776      break;
777    }
778
779    case Token::ADD:
780    case Token::SUB:
781    case Token::MUL:
782    case Token::DIV: {
783      Comment perform_float(masm, "-- Perform float operation on smis");
784      __ bind(&use_fp_on_smis);
785      // Restore arguments to edx, eax.
786      switch (op_) {
787        case Token::ADD:
788          // Revert right = right + left.
789          __ sub(right, Operand(left));
790          break;
791        case Token::SUB:
792          // Revert left = left - right.
793          __ add(left, Operand(right));
794          break;
795        case Token::MUL:
796          // Right was clobbered but a copy is in ebx.
797          __ mov(right, ebx);
798          break;
799        case Token::DIV:
800          // Left was clobbered but a copy is in edi.  Right is in ebx for
801          // division.
802          __ mov(edx, edi);
803          __ mov(eax, right);
804          break;
805        default: UNREACHABLE();
806          break;
807      }
808      if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
809        __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
810        if (CpuFeatures::IsSupported(SSE2)) {
811          CpuFeatures::Scope use_sse2(SSE2);
812          FloatingPointHelper::LoadSSE2Smis(masm, ebx);
813          switch (op_) {
814            case Token::ADD: __ addsd(xmm0, xmm1); break;
815            case Token::SUB: __ subsd(xmm0, xmm1); break;
816            case Token::MUL: __ mulsd(xmm0, xmm1); break;
817            case Token::DIV: __ divsd(xmm0, xmm1); break;
818            default: UNREACHABLE();
819          }
820          __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
821        } else {  // SSE2 not available, use FPU.
822          FloatingPointHelper::LoadFloatSmis(masm, ebx);
823          switch (op_) {
824            case Token::ADD: __ faddp(1); break;
825            case Token::SUB: __ fsubp(1); break;
826            case Token::MUL: __ fmulp(1); break;
827            case Token::DIV: __ fdivp(1); break;
828            default: UNREACHABLE();
829          }
830          __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
831        }
832        __ mov(eax, ecx);
833        GenerateReturn(masm);
834      } else {
835        ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
836        __ jmp(slow);
837      }
838      break;
839    }
840
841    default:
842      break;
843  }
844
845  // 7. Non-smi operands, fall out to the non-smi code with the operands in
846  // edx and eax.
847  Comment done_comment(masm, "-- Enter non-smi code");
848  __ bind(&not_smis);
849  switch (op_) {
850    case Token::BIT_OR:
851    case Token::SHL:
852    case Token::SAR:
853    case Token::SHR:
854      // Right operand is saved in ecx and eax was destroyed by the smi
855      // check.
856      __ mov(eax, ecx);
857      break;
858
859    case Token::DIV:
860    case Token::MOD:
861      // Operands are in eax, ebx at this point.
862      __ mov(edx, eax);
863      __ mov(eax, ebx);
864      break;
865
866    default:
867      break;
868  }
869}
870
871
872void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
873  Label call_runtime;
874
875  __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
876
877  if (runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI) {
878    Label slow;
879    if (ShouldGenerateSmiCode()) GenerateSmiCode(masm, &slow);
880    __ bind(&slow);
881    GenerateTypeTransition(masm);
882  }
883
884  // Generate fast case smi code if requested. This flag is set when the fast
885  // case smi code is not generated by the caller. Generating it here will speed
886  // up common operations.
887  if (ShouldGenerateSmiCode()) {
888    GenerateSmiCode(masm, &call_runtime);
889  } else if (op_ != Token::MOD) {  // MOD goes straight to runtime.
890    if (!HasArgsInRegisters()) {
891      GenerateLoadArguments(masm);
892    }
893  }
894
895  // Floating point case.
896  if (ShouldGenerateFPCode()) {
897    switch (op_) {
898      case Token::ADD:
899      case Token::SUB:
900      case Token::MUL:
901      case Token::DIV: {
902        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
903            HasSmiCodeInStub()) {
904          // Execution reaches this point when the first non-smi argument occurs
905          // (and only if smi code is generated). This is the right moment to
906          // patch to HEAP_NUMBERS state. The transition is attempted only for
907          // the four basic operations. The stub stays in the DEFAULT state
908          // forever for all other operations (also if smi code is skipped).
909          GenerateTypeTransition(masm);
910          break;
911        }
912
913        Label not_floats;
914        if (CpuFeatures::IsSupported(SSE2)) {
915          CpuFeatures::Scope use_sse2(SSE2);
916          if (static_operands_type_.IsNumber()) {
917            if (FLAG_debug_code) {
918              // Assert at runtime that inputs are only numbers.
919              __ AbortIfNotNumber(edx);
920              __ AbortIfNotNumber(eax);
921            }
922            if (static_operands_type_.IsSmi()) {
923              if (FLAG_debug_code) {
924                __ AbortIfNotSmi(edx);
925                __ AbortIfNotSmi(eax);
926              }
927              FloatingPointHelper::LoadSSE2Smis(masm, ecx);
928            } else {
929              FloatingPointHelper::LoadSSE2Operands(masm);
930            }
931          } else {
932            FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
933          }
934
935          switch (op_) {
936            case Token::ADD: __ addsd(xmm0, xmm1); break;
937            case Token::SUB: __ subsd(xmm0, xmm1); break;
938            case Token::MUL: __ mulsd(xmm0, xmm1); break;
939            case Token::DIV: __ divsd(xmm0, xmm1); break;
940            default: UNREACHABLE();
941          }
942          GenerateHeapResultAllocation(masm, &call_runtime);
943          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
944          GenerateReturn(masm);
945        } else {  // SSE2 not available, use FPU.
946          if (static_operands_type_.IsNumber()) {
947            if (FLAG_debug_code) {
948              // Assert at runtime that inputs are only numbers.
949              __ AbortIfNotNumber(edx);
950              __ AbortIfNotNumber(eax);
951            }
952          } else {
953            FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
954          }
955          FloatingPointHelper::LoadFloatOperands(
956              masm,
957              ecx,
958              FloatingPointHelper::ARGS_IN_REGISTERS);
959          switch (op_) {
960            case Token::ADD: __ faddp(1); break;
961            case Token::SUB: __ fsubp(1); break;
962            case Token::MUL: __ fmulp(1); break;
963            case Token::DIV: __ fdivp(1); break;
964            default: UNREACHABLE();
965          }
966          Label after_alloc_failure;
967          GenerateHeapResultAllocation(masm, &after_alloc_failure);
968          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
969          GenerateReturn(masm);
970          __ bind(&after_alloc_failure);
971          __ ffree();
972          __ jmp(&call_runtime);
973        }
974        __ bind(&not_floats);
975        if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
976            !HasSmiCodeInStub()) {
977          // Execution reaches this point when the first non-number argument
978          // occurs (and only if smi code is skipped from the stub, otherwise
979          // the patching has already been done earlier in this case branch).
980          // Try patching to STRINGS for ADD operation.
981          if (op_ == Token::ADD) {
982            GenerateTypeTransition(masm);
983          }
984        }
985        break;
986      }
987      case Token::MOD: {
988        // For MOD we go directly to runtime in the non-smi case.
989        break;
990      }
991      case Token::BIT_OR:
992      case Token::BIT_AND:
993      case Token::BIT_XOR:
994      case Token::SAR:
995      case Token::SHL:
996      case Token::SHR: {
997        Label non_smi_result;
998        FloatingPointHelper::LoadAsIntegers(masm,
999                                            static_operands_type_,
1000                                            use_sse3_,
1001                                            &call_runtime);
1002        switch (op_) {
1003          case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
1004          case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
1005          case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
1006          case Token::SAR: __ sar_cl(eax); break;
1007          case Token::SHL: __ shl_cl(eax); break;
1008          case Token::SHR: __ shr_cl(eax); break;
1009          default: UNREACHABLE();
1010        }
1011        if (op_ == Token::SHR) {
1012          // Check if result is non-negative and fits in a smi.
1013          __ test(eax, Immediate(0xc0000000));
1014          __ j(not_zero, &call_runtime);
1015        } else {
1016          // Check if result fits in a smi.
1017          __ cmp(eax, 0xc0000000);
1018          __ j(negative, &non_smi_result);
1019        }
1020        // Tag smi result and return.
1021        __ SmiTag(eax);
1022        GenerateReturn(masm);
1023
1024        // All ops except SHR return a signed int32 that we load in
1025        // a HeapNumber.
1026        if (op_ != Token::SHR) {
1027          __ bind(&non_smi_result);
1028          // Allocate a heap number if needed.
1029          __ mov(ebx, Operand(eax));  // ebx: result
1030          NearLabel skip_allocation;
1031          switch (mode_) {
1032            case OVERWRITE_LEFT:
1033            case OVERWRITE_RIGHT:
1034              // If the operand was an object, we skip the
1035              // allocation of a heap number.
1036              __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1037                                  1 * kPointerSize : 2 * kPointerSize));
1038              __ test(eax, Immediate(kSmiTagMask));
1039              __ j(not_zero, &skip_allocation, not_taken);
1040              // Fall through!
1041            case NO_OVERWRITE:
1042              __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1043              __ bind(&skip_allocation);
1044              break;
1045            default: UNREACHABLE();
1046          }
1047          // Store the result in the HeapNumber and return.
1048          if (CpuFeatures::IsSupported(SSE2)) {
1049            CpuFeatures::Scope use_sse2(SSE2);
1050            __ cvtsi2sd(xmm0, Operand(ebx));
1051            __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1052          } else {
1053            __ mov(Operand(esp, 1 * kPointerSize), ebx);
1054            __ fild_s(Operand(esp, 1 * kPointerSize));
1055            __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1056          }
1057          GenerateReturn(masm);
1058        }
1059        break;
1060      }
1061      default: UNREACHABLE(); break;
1062    }
1063  }
1064
1065  // If all else fails, use the runtime system to get the correct
1066  // result. If arguments was passed in registers now place them on the
1067  // stack in the correct order below the return address.
1068
1069  // Avoid hitting the string ADD code below when allocation fails in
1070  // the floating point code above.
1071  if (op_ != Token::ADD) {
1072    __ bind(&call_runtime);
1073  }
1074
1075  if (HasArgsInRegisters()) {
1076    GenerateRegisterArgsPush(masm);
1077  }
1078
1079  switch (op_) {
1080    case Token::ADD: {
1081      // Test for string arguments before calling runtime.
1082
1083      // If this stub has already generated FP-specific code then the arguments
1084      // are already in edx, eax
1085      if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
1086        GenerateLoadArguments(masm);
1087      }
1088
1089      // Registers containing left and right operands respectively.
1090      Register lhs, rhs;
1091      if (HasArgsReversed()) {
1092        lhs = eax;
1093        rhs = edx;
1094      } else {
1095        lhs = edx;
1096        rhs = eax;
1097      }
1098
1099      // Test if left operand is a string.
1100      NearLabel lhs_not_string;
1101      __ test(lhs, Immediate(kSmiTagMask));
1102      __ j(zero, &lhs_not_string);
1103      __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
1104      __ j(above_equal, &lhs_not_string);
1105
1106      StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
1107      __ TailCallStub(&string_add_left_stub);
1108
1109      NearLabel call_runtime_with_args;
1110      // Left operand is not a string, test right.
1111      __ bind(&lhs_not_string);
1112      __ test(rhs, Immediate(kSmiTagMask));
1113      __ j(zero, &call_runtime_with_args);
1114      __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
1115      __ j(above_equal, &call_runtime_with_args);
1116
1117      StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
1118      __ TailCallStub(&string_add_right_stub);
1119
1120      // Neither argument is a string.
1121      __ bind(&call_runtime);
1122      if (HasArgsInRegisters()) {
1123        GenerateRegisterArgsPush(masm);
1124      }
1125      __ bind(&call_runtime_with_args);
1126      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1127      break;
1128    }
1129    case Token::SUB:
1130      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1131      break;
1132    case Token::MUL:
1133      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1134      break;
1135    case Token::DIV:
1136      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1137      break;
1138    case Token::MOD:
1139      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1140      break;
1141    case Token::BIT_OR:
1142      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1143      break;
1144    case Token::BIT_AND:
1145      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1146      break;
1147    case Token::BIT_XOR:
1148      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1149      break;
1150    case Token::SAR:
1151      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1152      break;
1153    case Token::SHL:
1154      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1155      break;
1156    case Token::SHR:
1157      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1158      break;
1159    default:
1160      UNREACHABLE();
1161  }
1162}
1163
1164
1165void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
1166                                                       Label* alloc_failure) {
1167  Label skip_allocation;
1168  OverwriteMode mode = mode_;
1169  if (HasArgsReversed()) {
1170    if (mode == OVERWRITE_RIGHT) {
1171      mode = OVERWRITE_LEFT;
1172    } else if (mode == OVERWRITE_LEFT) {
1173      mode = OVERWRITE_RIGHT;
1174    }
1175  }
1176  switch (mode) {
1177    case OVERWRITE_LEFT: {
1178      // If the argument in edx is already an object, we skip the
1179      // allocation of a heap number.
1180      __ test(edx, Immediate(kSmiTagMask));
1181      __ j(not_zero, &skip_allocation, not_taken);
1182      // Allocate a heap number for the result. Keep eax and edx intact
1183      // for the possible runtime call.
1184      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
1185      // Now edx can be overwritten losing one of the arguments as we are
1186      // now done and will not need it any more.
1187      __ mov(edx, Operand(ebx));
1188      __ bind(&skip_allocation);
1189      // Use object in edx as a result holder
1190      __ mov(eax, Operand(edx));
1191      break;
1192    }
1193    case OVERWRITE_RIGHT:
1194      // If the argument in eax is already an object, we skip the
1195      // allocation of a heap number.
1196      __ test(eax, Immediate(kSmiTagMask));
1197      __ j(not_zero, &skip_allocation, not_taken);
1198      // Fall through!
1199    case NO_OVERWRITE:
1200      // Allocate a heap number for the result. Keep eax and edx intact
1201      // for the possible runtime call.
1202      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
1203      // Now eax can be overwritten losing one of the arguments as we are
1204      // now done and will not need it any more.
1205      __ mov(eax, ebx);
1206      __ bind(&skip_allocation);
1207      break;
1208    default: UNREACHABLE();
1209  }
1210}
1211
1212
1213void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
1214  // If arguments are not passed in registers read them from the stack.
1215  ASSERT(!HasArgsInRegisters());
1216  __ mov(eax, Operand(esp, 1 * kPointerSize));
1217  __ mov(edx, Operand(esp, 2 * kPointerSize));
1218}
1219
1220
1221void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
1222  // If arguments are not passed in registers remove them from the stack before
1223  // returning.
1224  if (!HasArgsInRegisters()) {
1225    __ ret(2 * kPointerSize);  // Remove both operands
1226  } else {
1227    __ ret(0);
1228  }
1229}
1230
1231
1232void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1233  ASSERT(HasArgsInRegisters());
1234  __ pop(ecx);
1235  if (HasArgsReversed()) {
1236    __ push(eax);
1237    __ push(edx);
1238  } else {
1239    __ push(edx);
1240    __ push(eax);
1241  }
1242  __ push(ecx);
1243}
1244
1245
1246void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1247  // Ensure the operands are on the stack.
1248  if (HasArgsInRegisters()) {
1249    GenerateRegisterArgsPush(masm);
1250  }
1251
1252  __ pop(ecx);  // Save return address.
1253
1254  // Left and right arguments are now on top.
1255  // Push this stub's key. Although the operation and the type info are
1256  // encoded into the key, the encoding is opaque, so push them too.
1257  __ push(Immediate(Smi::FromInt(MinorKey())));
1258  __ push(Immediate(Smi::FromInt(op_)));
1259  __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
1260
1261  __ push(ecx);  // Push return address.
1262
1263  // Patch the caller to an appropriate specialized stub and return the
1264  // operation result to the caller of the stub.
1265  __ TailCallExternalReference(
1266      ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
1267      5,
1268      1);
1269}
1270
1271
1272Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
1273  GenericBinaryOpStub stub(key, type_info);
1274  return stub.GetCode();
1275}
1276
1277
1278Handle<Code> GetTypeRecordingBinaryOpStub(int key,
1279    TRBinaryOpIC::TypeInfo type_info,
1280    TRBinaryOpIC::TypeInfo result_type_info) {
1281  TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
1282  return stub.GetCode();
1283}
1284
1285
1286void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1287  __ pop(ecx);  // Save return address.
1288  __ push(edx);
1289  __ push(eax);
1290  // Left and right arguments are now on top.
1291  // Push this stub's key. Although the operation and the type info are
1292  // encoded into the key, the encoding is opaque, so push them too.
1293  __ push(Immediate(Smi::FromInt(MinorKey())));
1294  __ push(Immediate(Smi::FromInt(op_)));
1295  __ push(Immediate(Smi::FromInt(operands_type_)));
1296
1297  __ push(ecx);  // Push return address.
1298
1299  // Patch the caller to an appropriate specialized stub and return the
1300  // operation result to the caller of the stub.
1301  __ TailCallExternalReference(
1302      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
1303      5,
1304      1);
1305}
1306
1307
1308// Prepare for a type transition runtime call when the args are already on
1309// the stack, under the return address.
1310void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
1311    MacroAssembler* masm) {
1312  __ pop(ecx);  // Save return address.
1313  // Left and right arguments are already on top of the stack.
1314  // Push this stub's key. Although the operation and the type info are
1315  // encoded into the key, the encoding is opaque, so push them too.
1316  __ push(Immediate(Smi::FromInt(MinorKey())));
1317  __ push(Immediate(Smi::FromInt(op_)));
1318  __ push(Immediate(Smi::FromInt(operands_type_)));
1319
1320  __ push(ecx);  // Push return address.
1321
1322  // Patch the caller to an appropriate specialized stub and return the
1323  // operation result to the caller of the stub.
1324  __ TailCallExternalReference(
1325      ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
1326      5,
1327      1);
1328}
1329
1330
1331void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
1332  switch (operands_type_) {
1333    case TRBinaryOpIC::UNINITIALIZED:
1334      GenerateTypeTransition(masm);
1335      break;
1336    case TRBinaryOpIC::SMI:
1337      GenerateSmiStub(masm);
1338      break;
1339    case TRBinaryOpIC::INT32:
1340      GenerateInt32Stub(masm);
1341      break;
1342    case TRBinaryOpIC::HEAP_NUMBER:
1343      GenerateHeapNumberStub(masm);
1344      break;
1345    case TRBinaryOpIC::STRING:
1346      GenerateStringStub(masm);
1347      break;
1348    case TRBinaryOpIC::GENERIC:
1349      GenerateGeneric(masm);
1350      break;
1351    default:
1352      UNREACHABLE();
1353  }
1354}
1355
1356
1357const char* TypeRecordingBinaryOpStub::GetName() {
1358  if (name_ != NULL) return name_;
1359  const int kMaxNameLength = 100;
1360  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
1361  if (name_ == NULL) return "OOM";
1362  const char* op_name = Token::Name(op_);
1363  const char* overwrite_name;
1364  switch (mode_) {
1365    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
1366    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
1367    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
1368    default: overwrite_name = "UnknownOverwrite"; break;
1369  }
1370
1371  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
1372               "TypeRecordingBinaryOpStub_%s_%s_%s",
1373               op_name,
1374               overwrite_name,
1375               TRBinaryOpIC::GetName(operands_type_));
1376  return name_;
1377}
1378
1379
1380void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
1381    Label* slow,
1382    SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1383  // 1. Move arguments into edx, eax except for DIV and MOD, which need the
1384  // dividend in eax and edx free for the division.  Use eax, ebx for those.
1385  Comment load_comment(masm, "-- Load arguments");
1386  Register left = edx;
1387  Register right = eax;
1388  if (op_ == Token::DIV || op_ == Token::MOD) {
1389    left = eax;
1390    right = ebx;
1391    __ mov(ebx, eax);
1392    __ mov(eax, edx);
1393  }
1394
1395
1396  // 2. Prepare the smi check of both operands by oring them together.
1397  Comment smi_check_comment(masm, "-- Smi check arguments");
1398  Label not_smis;
1399  Register combined = ecx;
1400  ASSERT(!left.is(combined) && !right.is(combined));
1401  switch (op_) {
1402    case Token::BIT_OR:
1403      // Perform the operation into eax and smi check the result.  Preserve
1404      // eax in case the result is not a smi.
1405      ASSERT(!left.is(ecx) && !right.is(ecx));
1406      __ mov(ecx, right);
1407      __ or_(right, Operand(left));  // Bitwise or is commutative.
1408      combined = right;
1409      break;
1410
1411    case Token::BIT_XOR:
1412    case Token::BIT_AND:
1413    case Token::ADD:
1414    case Token::SUB:
1415    case Token::MUL:
1416    case Token::DIV:
1417    case Token::MOD:
1418      __ mov(combined, right);
1419      __ or_(combined, Operand(left));
1420      break;
1421
1422    case Token::SHL:
1423    case Token::SAR:
1424    case Token::SHR:
1425      // Move the right operand into ecx for the shift operation, use eax
1426      // for the smi check register.
1427      ASSERT(!left.is(ecx) && !right.is(ecx));
1428      __ mov(ecx, right);
1429      __ or_(right, Operand(left));
1430      combined = right;
1431      break;
1432
1433    default:
1434      break;
1435  }
1436
1437  // 3. Perform the smi check of the operands.
1438  STATIC_ASSERT(kSmiTag == 0);  // Adjust zero check if not the case.
1439  __ test(combined, Immediate(kSmiTagMask));
1440  __ j(not_zero, &not_smis, not_taken);
1441
1442  // 4. Operands are both smis, perform the operation leaving the result in
1443  // eax and check the result if necessary.
1444  Comment perform_smi(masm, "-- Perform smi operation");
1445  Label use_fp_on_smis;
1446  switch (op_) {
1447    case Token::BIT_OR:
1448      // Nothing to do.
1449      break;
1450
1451    case Token::BIT_XOR:
1452      ASSERT(right.is(eax));
1453      __ xor_(right, Operand(left));  // Bitwise xor is commutative.
1454      break;
1455
1456    case Token::BIT_AND:
1457      ASSERT(right.is(eax));
1458      __ and_(right, Operand(left));  // Bitwise and is commutative.
1459      break;
1460
1461    case Token::SHL:
1462      // Remove tags from operands (but keep sign).
1463      __ SmiUntag(left);
1464      __ SmiUntag(ecx);
1465      // Perform the operation.
1466      __ shl_cl(left);
1467      // Check that the *signed* result fits in a smi.
1468      __ cmp(left, 0xc0000000);
1469      __ j(sign, &use_fp_on_smis, not_taken);
1470      // Tag the result and store it in register eax.
1471      __ SmiTag(left);
1472      __ mov(eax, left);
1473      break;
1474
1475    case Token::SAR:
1476      // Remove tags from operands (but keep sign).
1477      __ SmiUntag(left);
1478      __ SmiUntag(ecx);
1479      // Perform the operation.
1480      __ sar_cl(left);
1481      // Tag the result and store it in register eax.
1482      __ SmiTag(left);
1483      __ mov(eax, left);
1484      break;
1485
1486    case Token::SHR:
1487      // Remove tags from operands (but keep sign).
1488      __ SmiUntag(left);
1489      __ SmiUntag(ecx);
1490      // Perform the operation.
1491      __ shr_cl(left);
1492      // Check that the *unsigned* result fits in a smi.
1493      // Neither of the two high-order bits can be set:
1494      // - 0x80000000: high bit would be lost when smi tagging.
1495      // - 0x40000000: this number would convert to negative when
1496      // Smi tagging these two cases can only happen with shifts
1497      // by 0 or 1 when handed a valid smi.
1498      __ test(left, Immediate(0xc0000000));
1499      __ j(not_zero, slow, not_taken);
1500      // Tag the result and store it in register eax.
1501      __ SmiTag(left);
1502      __ mov(eax, left);
1503      break;
1504
1505    case Token::ADD:
1506      ASSERT(right.is(eax));
1507      __ add(right, Operand(left));  // Addition is commutative.
1508      __ j(overflow, &use_fp_on_smis, not_taken);
1509      break;
1510
1511    case Token::SUB:
1512      __ sub(left, Operand(right));
1513      __ j(overflow, &use_fp_on_smis, not_taken);
1514      __ mov(eax, left);
1515      break;
1516
1517    case Token::MUL:
1518      // If the smi tag is 0 we can just leave the tag on one operand.
1519      STATIC_ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
1520      // We can't revert the multiplication if the result is not a smi
1521      // so save the right operand.
1522      __ mov(ebx, right);
1523      // Remove tag from one of the operands (but keep sign).
1524      __ SmiUntag(right);
1525      // Do multiplication.
1526      __ imul(right, Operand(left));  // Multiplication is commutative.
1527      __ j(overflow, &use_fp_on_smis, not_taken);
1528      // Check for negative zero result.  Use combined = left | right.
1529      __ NegativeZeroTest(right, combined, &use_fp_on_smis);
1530      break;
1531
1532    case Token::DIV:
1533      // We can't revert the division if the result is not a smi so
1534      // save the left operand.
1535      __ mov(edi, left);
1536      // Check for 0 divisor.
1537      __ test(right, Operand(right));
1538      __ j(zero, &use_fp_on_smis, not_taken);
1539      // Sign extend left into edx:eax.
1540      ASSERT(left.is(eax));
1541      __ cdq();
1542      // Divide edx:eax by right.
1543      __ idiv(right);
1544      // Check for the corner case of dividing the most negative smi by
1545      // -1. We cannot use the overflow flag, since it is not set by idiv
1546      // instruction.
1547      STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1548      __ cmp(eax, 0x40000000);
1549      __ j(equal, &use_fp_on_smis);
1550      // Check for negative zero result.  Use combined = left | right.
1551      __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
1552      // Check that the remainder is zero.
1553      __ test(edx, Operand(edx));
1554      __ j(not_zero, &use_fp_on_smis);
1555      // Tag the result and store it in register eax.
1556      __ SmiTag(eax);
1557      break;
1558
1559    case Token::MOD:
1560      // Check for 0 divisor.
1561      __ test(right, Operand(right));
1562      __ j(zero, &not_smis, not_taken);
1563
1564      // Sign extend left into edx:eax.
1565      ASSERT(left.is(eax));
1566      __ cdq();
1567      // Divide edx:eax by right.
1568      __ idiv(right);
1569      // Check for negative zero result.  Use combined = left | right.
1570      __ NegativeZeroTest(edx, combined, slow);
1571      // Move remainder to register eax.
1572      __ mov(eax, edx);
1573      break;
1574
1575    default:
1576      UNREACHABLE();
1577  }
1578
1579  // 5. Emit return of result in eax.  Some operations have registers pushed.
1580  switch (op_) {
1581    case Token::ADD:
1582    case Token::SUB:
1583    case Token::MUL:
1584    case Token::DIV:
1585      __ ret(0);
1586      break;
1587    case Token::MOD:
1588    case Token::BIT_OR:
1589    case Token::BIT_AND:
1590    case Token::BIT_XOR:
1591    case Token::SAR:
1592    case Token::SHL:
1593    case Token::SHR:
1594      __ ret(2 * kPointerSize);
1595      break;
1596    default:
1597      UNREACHABLE();
1598  }
1599
1600  // 6. For some operations emit inline code to perform floating point
1601  // operations on known smis (e.g., if the result of the operation
1602  // overflowed the smi range).
1603  if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
1604    __ bind(&use_fp_on_smis);
1605    switch (op_) {
1606      // Undo the effects of some operations, and some register moves.
1607      case Token::SHL:
1608        // The arguments are saved on the stack, and only used from there.
1609        break;
1610      case Token::ADD:
1611        // Revert right = right + left.
1612        __ sub(right, Operand(left));
1613        break;
1614      case Token::SUB:
1615        // Revert left = left - right.
1616        __ add(left, Operand(right));
1617        break;
1618      case Token::MUL:
1619        // Right was clobbered but a copy is in ebx.
1620        __ mov(right, ebx);
1621        break;
1622      case Token::DIV:
1623        // Left was clobbered but a copy is in edi.  Right is in ebx for
1624        // division.  They should be in eax, ebx for jump to not_smi.
1625        __ mov(eax, edi);
1626        break;
1627      default:
1628        // No other operators jump to use_fp_on_smis.
1629        break;
1630    }
1631    __ jmp(&not_smis);
1632  } else {
1633    ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
1634    switch (op_) {
1635      case Token::SHL: {
1636        Comment perform_float(masm, "-- Perform float operation on smis");
1637        __ bind(&use_fp_on_smis);
1638        // Result we want is in left == edx, so we can put the allocated heap
1639        // number in eax.
1640        __ AllocateHeapNumber(eax, ecx, ebx, slow);
1641        // Store the result in the HeapNumber and return.
1642        if (CpuFeatures::IsSupported(SSE2)) {
1643          CpuFeatures::Scope use_sse2(SSE2);
1644          __ cvtsi2sd(xmm0, Operand(left));
1645          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1646        } else {
1647          // It's OK to overwrite the right argument on the stack because we
1648          // are about to return.
1649          __ mov(Operand(esp, 1 * kPointerSize), left);
1650          __ fild_s(Operand(esp, 1 * kPointerSize));
1651          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1652        }
1653      __ ret(2 * kPointerSize);
1654      break;
1655      }
1656
1657      case Token::ADD:
1658      case Token::SUB:
1659      case Token::MUL:
1660      case Token::DIV: {
1661        Comment perform_float(masm, "-- Perform float operation on smis");
1662        __ bind(&use_fp_on_smis);
1663        // Restore arguments to edx, eax.
1664        switch (op_) {
1665          case Token::ADD:
1666            // Revert right = right + left.
1667            __ sub(right, Operand(left));
1668            break;
1669          case Token::SUB:
1670            // Revert left = left - right.
1671            __ add(left, Operand(right));
1672            break;
1673          case Token::MUL:
1674            // Right was clobbered but a copy is in ebx.
1675            __ mov(right, ebx);
1676            break;
1677          case Token::DIV:
1678            // Left was clobbered but a copy is in edi.  Right is in ebx for
1679            // division.
1680            __ mov(edx, edi);
1681            __ mov(eax, right);
1682            break;
1683          default: UNREACHABLE();
1684            break;
1685        }
1686        __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
1687        if (CpuFeatures::IsSupported(SSE2)) {
1688          CpuFeatures::Scope use_sse2(SSE2);
1689          FloatingPointHelper::LoadSSE2Smis(masm, ebx);
1690          switch (op_) {
1691            case Token::ADD: __ addsd(xmm0, xmm1); break;
1692            case Token::SUB: __ subsd(xmm0, xmm1); break;
1693            case Token::MUL: __ mulsd(xmm0, xmm1); break;
1694            case Token::DIV: __ divsd(xmm0, xmm1); break;
1695            default: UNREACHABLE();
1696          }
1697          __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
1698        } else {  // SSE2 not available, use FPU.
1699          FloatingPointHelper::LoadFloatSmis(masm, ebx);
1700          switch (op_) {
1701            case Token::ADD: __ faddp(1); break;
1702            case Token::SUB: __ fsubp(1); break;
1703            case Token::MUL: __ fmulp(1); break;
1704            case Token::DIV: __ fdivp(1); break;
1705            default: UNREACHABLE();
1706          }
1707          __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
1708        }
1709        __ mov(eax, ecx);
1710        __ ret(0);
1711        break;
1712      }
1713
1714      default:
1715        break;
1716    }
1717  }
1718
1719  // 7. Non-smi operands, fall out to the non-smi code with the operands in
1720  // edx and eax.
1721  Comment done_comment(masm, "-- Enter non-smi code");
1722  __ bind(&not_smis);
1723  switch (op_) {
1724    case Token::BIT_OR:
1725    case Token::SHL:
1726    case Token::SAR:
1727    case Token::SHR:
1728      // Right operand is saved in ecx and eax was destroyed by the smi
1729      // check.
1730      __ mov(eax, ecx);
1731      break;
1732
1733    case Token::DIV:
1734    case Token::MOD:
1735      // Operands are in eax, ebx at this point.
1736      __ mov(edx, eax);
1737      __ mov(eax, ebx);
1738      break;
1739
1740    default:
1741      break;
1742  }
1743}
1744
1745
1746void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1747  Label call_runtime;
1748
1749  switch (op_) {
1750    case Token::ADD:
1751    case Token::SUB:
1752    case Token::MUL:
1753    case Token::DIV:
1754      break;
1755    case Token::MOD:
1756    case Token::BIT_OR:
1757    case Token::BIT_AND:
1758    case Token::BIT_XOR:
1759    case Token::SAR:
1760    case Token::SHL:
1761    case Token::SHR:
1762      GenerateRegisterArgsPush(masm);
1763      break;
1764    default:
1765      UNREACHABLE();
1766  }
1767
1768  if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
1769      result_type_ == TRBinaryOpIC::SMI) {
1770    GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
1771  } else {
1772    GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1773  }
1774  __ bind(&call_runtime);
1775  switch (op_) {
1776    case Token::ADD:
1777    case Token::SUB:
1778    case Token::MUL:
1779    case Token::DIV:
1780      GenerateTypeTransition(masm);
1781      break;
1782    case Token::MOD:
1783    case Token::BIT_OR:
1784    case Token::BIT_AND:
1785    case Token::BIT_XOR:
1786    case Token::SAR:
1787    case Token::SHL:
1788    case Token::SHR:
1789      GenerateTypeTransitionWithSavedArgs(masm);
1790      break;
1791    default:
1792      UNREACHABLE();
1793  }
1794}
1795
1796
1797void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
1798  ASSERT(operands_type_ == TRBinaryOpIC::STRING);
1799  ASSERT(op_ == Token::ADD);
1800  // Try to add arguments as strings, otherwise, transition to the generic
1801  // TRBinaryOpIC type.
1802  GenerateAddStrings(masm);
1803  GenerateTypeTransition(masm);
1804}
1805
1806
1807void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1808  Label call_runtime;
1809  ASSERT(operands_type_ == TRBinaryOpIC::INT32);
1810
1811  // Floating point case.
1812  switch (op_) {
1813    case Token::ADD:
1814    case Token::SUB:
1815    case Token::MUL:
1816    case Token::DIV: {
1817      Label not_floats;
1818      Label not_int32;
1819      if (CpuFeatures::IsSupported(SSE2)) {
1820        CpuFeatures::Scope use_sse2(SSE2);
1821        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1822        FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
1823        switch (op_) {
1824          case Token::ADD: __ addsd(xmm0, xmm1); break;
1825          case Token::SUB: __ subsd(xmm0, xmm1); break;
1826          case Token::MUL: __ mulsd(xmm0, xmm1); break;
1827          case Token::DIV: __ divsd(xmm0, xmm1); break;
1828          default: UNREACHABLE();
1829        }
1830        // Check result type if it is currently Int32.
1831        if (result_type_ <= TRBinaryOpIC::INT32) {
1832          __ cvttsd2si(ecx, Operand(xmm0));
1833          __ cvtsi2sd(xmm2, Operand(ecx));
1834          __ ucomisd(xmm0, xmm2);
1835          __ j(not_zero, &not_int32);
1836          __ j(carry, &not_int32);
1837        }
1838        GenerateHeapResultAllocation(masm, &call_runtime);
1839        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1840        __ ret(0);
1841      } else {  // SSE2 not available, use FPU.
1842        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1843        FloatingPointHelper::LoadFloatOperands(
1844            masm,
1845            ecx,
1846            FloatingPointHelper::ARGS_IN_REGISTERS);
1847        FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
1848        switch (op_) {
1849          case Token::ADD: __ faddp(1); break;
1850          case Token::SUB: __ fsubp(1); break;
1851          case Token::MUL: __ fmulp(1); break;
1852          case Token::DIV: __ fdivp(1); break;
1853          default: UNREACHABLE();
1854        }
1855        Label after_alloc_failure;
1856        GenerateHeapResultAllocation(masm, &after_alloc_failure);
1857        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1858        __ ret(0);
1859        __ bind(&after_alloc_failure);
1860        __ ffree();
1861        __ jmp(&call_runtime);
1862      }
1863
1864      __ bind(&not_floats);
1865      __ bind(&not_int32);
1866      GenerateTypeTransition(masm);
1867      break;
1868    }
1869
1870    case Token::MOD: {
1871      // For MOD we go directly to runtime in the non-smi case.
1872      break;
1873    }
1874    case Token::BIT_OR:
1875    case Token::BIT_AND:
1876    case Token::BIT_XOR:
1877    case Token::SAR:
1878    case Token::SHL:
1879    case Token::SHR: {
1880      GenerateRegisterArgsPush(masm);
1881      Label not_floats;
1882      Label not_int32;
1883      Label non_smi_result;
1884      /*  {
1885        CpuFeatures::Scope use_sse2(SSE2);
1886        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1887        FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
1888        }*/
1889      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1890                                                  use_sse3_,
1891                                                  &not_floats);
1892      FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
1893                                                        &not_int32);
1894      switch (op_) {
1895        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
1896        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
1897        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
1898        case Token::SAR: __ sar_cl(eax); break;
1899        case Token::SHL: __ shl_cl(eax); break;
1900        case Token::SHR: __ shr_cl(eax); break;
1901        default: UNREACHABLE();
1902      }
1903      if (op_ == Token::SHR) {
1904        // Check if result is non-negative and fits in a smi.
1905        __ test(eax, Immediate(0xc0000000));
1906        __ j(not_zero, &call_runtime);
1907      } else {
1908        // Check if result fits in a smi.
1909        __ cmp(eax, 0xc0000000);
1910        __ j(negative, &non_smi_result);
1911      }
1912      // Tag smi result and return.
1913      __ SmiTag(eax);
1914      __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
1915
1916      // All ops except SHR return a signed int32 that we load in
1917      // a HeapNumber.
1918      if (op_ != Token::SHR) {
1919        __ bind(&non_smi_result);
1920        // Allocate a heap number if needed.
1921        __ mov(ebx, Operand(eax));  // ebx: result
1922        NearLabel skip_allocation;
1923        switch (mode_) {
1924          case OVERWRITE_LEFT:
1925          case OVERWRITE_RIGHT:
1926            // If the operand was an object, we skip the
1927            // allocation of a heap number.
1928            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1929                                1 * kPointerSize : 2 * kPointerSize));
1930            __ test(eax, Immediate(kSmiTagMask));
1931            __ j(not_zero, &skip_allocation, not_taken);
1932            // Fall through!
1933          case NO_OVERWRITE:
1934            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1935            __ bind(&skip_allocation);
1936            break;
1937          default: UNREACHABLE();
1938        }
1939        // Store the result in the HeapNumber and return.
1940        if (CpuFeatures::IsSupported(SSE2)) {
1941          CpuFeatures::Scope use_sse2(SSE2);
1942          __ cvtsi2sd(xmm0, Operand(ebx));
1943          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1944        } else {
1945          __ mov(Operand(esp, 1 * kPointerSize), ebx);
1946          __ fild_s(Operand(esp, 1 * kPointerSize));
1947          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1948        }
1949        __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
1950      }
1951
1952      __ bind(&not_floats);
1953      __ bind(&not_int32);
1954      GenerateTypeTransitionWithSavedArgs(masm);
1955      break;
1956    }
1957    default: UNREACHABLE(); break;
1958  }
1959
1960  // If an allocation fails, or SHR or MOD hit a hard case,
1961  // use the runtime system to get the correct result.
1962  __ bind(&call_runtime);
1963
1964  switch (op_) {
1965    case Token::ADD:
1966      GenerateRegisterArgsPush(masm);
1967      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1968      break;
1969    case Token::SUB:
1970      GenerateRegisterArgsPush(masm);
1971      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1972      break;
1973    case Token::MUL:
1974      GenerateRegisterArgsPush(masm);
1975      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1976      break;
1977    case Token::DIV:
1978      GenerateRegisterArgsPush(masm);
1979      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1980      break;
1981    case Token::MOD:
1982      GenerateRegisterArgsPush(masm);
1983      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1984      break;
1985    case Token::BIT_OR:
1986      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1987      break;
1988    case Token::BIT_AND:
1989      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1990      break;
1991    case Token::BIT_XOR:
1992      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1993      break;
1994    case Token::SAR:
1995      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1996      break;
1997    case Token::SHL:
1998      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1999      break;
2000    case Token::SHR:
2001      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2002      break;
2003    default:
2004      UNREACHABLE();
2005  }
2006}
2007
2008
2009void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2010  Label call_runtime;
2011  ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
2012
2013  // Floating point case.
2014  switch (op_) {
2015    case Token::ADD:
2016    case Token::SUB:
2017    case Token::MUL:
2018    case Token::DIV: {
2019      Label not_floats;
2020      if (CpuFeatures::IsSupported(SSE2)) {
2021        CpuFeatures::Scope use_sse2(SSE2);
2022        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
2023
2024        switch (op_) {
2025          case Token::ADD: __ addsd(xmm0, xmm1); break;
2026          case Token::SUB: __ subsd(xmm0, xmm1); break;
2027          case Token::MUL: __ mulsd(xmm0, xmm1); break;
2028          case Token::DIV: __ divsd(xmm0, xmm1); break;
2029          default: UNREACHABLE();
2030        }
2031        GenerateHeapResultAllocation(masm, &call_runtime);
2032        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2033        __ ret(0);
2034      } else {  // SSE2 not available, use FPU.
2035        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
2036        FloatingPointHelper::LoadFloatOperands(
2037            masm,
2038            ecx,
2039            FloatingPointHelper::ARGS_IN_REGISTERS);
2040        switch (op_) {
2041          case Token::ADD: __ faddp(1); break;
2042          case Token::SUB: __ fsubp(1); break;
2043          case Token::MUL: __ fmulp(1); break;
2044          case Token::DIV: __ fdivp(1); break;
2045          default: UNREACHABLE();
2046        }
2047        Label after_alloc_failure;
2048        GenerateHeapResultAllocation(masm, &after_alloc_failure);
2049        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2050        __ ret(0);
2051        __ bind(&after_alloc_failure);
2052        __ ffree();
2053        __ jmp(&call_runtime);
2054      }
2055
2056      __ bind(&not_floats);
2057      GenerateTypeTransition(masm);
2058      break;
2059    }
2060
2061    case Token::MOD: {
2062      // For MOD we go directly to runtime in the non-smi case.
2063      break;
2064    }
2065    case Token::BIT_OR:
2066    case Token::BIT_AND:
2067    case Token::BIT_XOR:
2068    case Token::SAR:
2069    case Token::SHL:
2070    case Token::SHR: {
2071      GenerateRegisterArgsPush(masm);
2072      Label not_floats;
2073      Label non_smi_result;
2074      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
2075                                                  use_sse3_,
2076                                                  &not_floats);
2077      switch (op_) {
2078        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
2079        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
2080        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
2081        case Token::SAR: __ sar_cl(eax); break;
2082        case Token::SHL: __ shl_cl(eax); break;
2083        case Token::SHR: __ shr_cl(eax); break;
2084        default: UNREACHABLE();
2085      }
2086      if (op_ == Token::SHR) {
2087        // Check if result is non-negative and fits in a smi.
2088        __ test(eax, Immediate(0xc0000000));
2089        __ j(not_zero, &call_runtime);
2090      } else {
2091        // Check if result fits in a smi.
2092        __ cmp(eax, 0xc0000000);
2093        __ j(negative, &non_smi_result);
2094      }
2095      // Tag smi result and return.
2096      __ SmiTag(eax);
2097      __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
2098
2099      // All ops except SHR return a signed int32 that we load in
2100      // a HeapNumber.
2101      if (op_ != Token::SHR) {
2102        __ bind(&non_smi_result);
2103        // Allocate a heap number if needed.
2104        __ mov(ebx, Operand(eax));  // ebx: result
2105        NearLabel skip_allocation;
2106        switch (mode_) {
2107          case OVERWRITE_LEFT:
2108          case OVERWRITE_RIGHT:
2109            // If the operand was an object, we skip the
2110            // allocation of a heap number.
2111            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
2112                                1 * kPointerSize : 2 * kPointerSize));
2113            __ test(eax, Immediate(kSmiTagMask));
2114            __ j(not_zero, &skip_allocation, not_taken);
2115            // Fall through!
2116          case NO_OVERWRITE:
2117            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
2118            __ bind(&skip_allocation);
2119            break;
2120          default: UNREACHABLE();
2121        }
2122        // Store the result in the HeapNumber and return.
2123        if (CpuFeatures::IsSupported(SSE2)) {
2124          CpuFeatures::Scope use_sse2(SSE2);
2125          __ cvtsi2sd(xmm0, Operand(ebx));
2126          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2127        } else {
2128          __ mov(Operand(esp, 1 * kPointerSize), ebx);
2129          __ fild_s(Operand(esp, 1 * kPointerSize));
2130          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2131        }
2132        __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
2133      }
2134
2135      __ bind(&not_floats);
2136      GenerateTypeTransitionWithSavedArgs(masm);
2137      break;
2138    }
2139    default: UNREACHABLE(); break;
2140  }
2141
2142  // If an allocation fails, or SHR or MOD hit a hard case,
2143  // use the runtime system to get the correct result.
2144  __ bind(&call_runtime);
2145
2146  switch (op_) {
2147    case Token::ADD:
2148      GenerateRegisterArgsPush(masm);
2149      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2150      break;
2151    case Token::SUB:
2152      GenerateRegisterArgsPush(masm);
2153      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2154      break;
2155    case Token::MUL:
2156      GenerateRegisterArgsPush(masm);
2157      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2158      break;
2159    case Token::DIV:
2160      GenerateRegisterArgsPush(masm);
2161      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2162      break;
2163    case Token::MOD:
2164      GenerateRegisterArgsPush(masm);
2165      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2166      break;
2167    case Token::BIT_OR:
2168      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2169      break;
2170    case Token::BIT_AND:
2171      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2172      break;
2173    case Token::BIT_XOR:
2174      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2175      break;
2176    case Token::SAR:
2177      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2178      break;
2179    case Token::SHL:
2180      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2181      break;
2182    case Token::SHR:
2183      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2184      break;
2185    default:
2186      UNREACHABLE();
2187  }
2188}
2189
2190
2191void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2192  Label call_runtime;
2193
2194  __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
2195
2196  switch (op_) {
2197    case Token::ADD:
2198    case Token::SUB:
2199    case Token::MUL:
2200    case Token::DIV:
2201      break;
2202    case Token::MOD:
2203    case Token::BIT_OR:
2204    case Token::BIT_AND:
2205    case Token::BIT_XOR:
2206    case Token::SAR:
2207    case Token::SHL:
2208    case Token::SHR:
2209      GenerateRegisterArgsPush(masm);
2210      break;
2211    default:
2212      UNREACHABLE();
2213  }
2214
2215  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2216
2217  // Floating point case.
2218  switch (op_) {
2219    case Token::ADD:
2220    case Token::SUB:
2221    case Token::MUL:
2222    case Token::DIV: {
2223      Label not_floats;
2224      if (CpuFeatures::IsSupported(SSE2)) {
2225        CpuFeatures::Scope use_sse2(SSE2);
2226        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
2227
2228        switch (op_) {
2229          case Token::ADD: __ addsd(xmm0, xmm1); break;
2230          case Token::SUB: __ subsd(xmm0, xmm1); break;
2231          case Token::MUL: __ mulsd(xmm0, xmm1); break;
2232          case Token::DIV: __ divsd(xmm0, xmm1); break;
2233          default: UNREACHABLE();
2234        }
2235        GenerateHeapResultAllocation(masm, &call_runtime);
2236        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2237        __ ret(0);
2238      } else {  // SSE2 not available, use FPU.
2239        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
2240        FloatingPointHelper::LoadFloatOperands(
2241            masm,
2242            ecx,
2243            FloatingPointHelper::ARGS_IN_REGISTERS);
2244        switch (op_) {
2245          case Token::ADD: __ faddp(1); break;
2246          case Token::SUB: __ fsubp(1); break;
2247          case Token::MUL: __ fmulp(1); break;
2248          case Token::DIV: __ fdivp(1); break;
2249          default: UNREACHABLE();
2250        }
2251        Label after_alloc_failure;
2252        GenerateHeapResultAllocation(masm, &after_alloc_failure);
2253        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2254        __ ret(0);
2255        __ bind(&after_alloc_failure);
2256          __ ffree();
2257          __ jmp(&call_runtime);
2258      }
2259        __ bind(&not_floats);
2260        break;
2261      }
2262    case Token::MOD: {
2263      // For MOD we go directly to runtime in the non-smi case.
2264      break;
2265    }
2266    case Token::BIT_OR:
2267    case Token::BIT_AND:
2268      case Token::BIT_XOR:
2269    case Token::SAR:
2270    case Token::SHL:
2271    case Token::SHR: {
2272      Label non_smi_result;
2273      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
2274                                                  use_sse3_,
2275                                                  &call_runtime);
2276      switch (op_) {
2277        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
2278        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
2279        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
2280        case Token::SAR: __ sar_cl(eax); break;
2281        case Token::SHL: __ shl_cl(eax); break;
2282        case Token::SHR: __ shr_cl(eax); break;
2283        default: UNREACHABLE();
2284      }
2285      if (op_ == Token::SHR) {
2286        // Check if result is non-negative and fits in a smi.
2287        __ test(eax, Immediate(0xc0000000));
2288        __ j(not_zero, &call_runtime);
2289      } else {
2290        // Check if result fits in a smi.
2291        __ cmp(eax, 0xc0000000);
2292        __ j(negative, &non_smi_result);
2293      }
2294      // Tag smi result and return.
2295      __ SmiTag(eax);
2296      __ ret(2 * kPointerSize);  // Drop the arguments from the stack.
2297
2298      // All ops except SHR return a signed int32 that we load in
2299      // a HeapNumber.
2300      if (op_ != Token::SHR) {
2301        __ bind(&non_smi_result);
2302        // Allocate a heap number if needed.
2303        __ mov(ebx, Operand(eax));  // ebx: result
2304        NearLabel skip_allocation;
2305        switch (mode_) {
2306          case OVERWRITE_LEFT:
2307          case OVERWRITE_RIGHT:
2308            // If the operand was an object, we skip the
2309              // allocation of a heap number.
2310            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
2311                                1 * kPointerSize : 2 * kPointerSize));
2312            __ test(eax, Immediate(kSmiTagMask));
2313            __ j(not_zero, &skip_allocation, not_taken);
2314            // Fall through!
2315          case NO_OVERWRITE:
2316            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
2317            __ bind(&skip_allocation);
2318            break;
2319          default: UNREACHABLE();
2320        }
2321        // Store the result in the HeapNumber and return.
2322        if (CpuFeatures::IsSupported(SSE2)) {
2323          CpuFeatures::Scope use_sse2(SSE2);
2324          __ cvtsi2sd(xmm0, Operand(ebx));
2325          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2326        } else {
2327          __ mov(Operand(esp, 1 * kPointerSize), ebx);
2328          __ fild_s(Operand(esp, 1 * kPointerSize));
2329          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2330        }
2331        __ ret(2 * kPointerSize);
2332      }
2333      break;
2334    }
2335    default: UNREACHABLE(); break;
2336  }
2337
2338  // If all else fails, use the runtime system to get the correct
2339  // result.
2340  __ bind(&call_runtime);
2341  switch (op_) {
2342    case Token::ADD: {
2343      GenerateAddStrings(masm);
2344      GenerateRegisterArgsPush(masm);
2345      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2346      break;
2347    }
2348    case Token::SUB:
2349      GenerateRegisterArgsPush(masm);
2350      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2351      break;
2352    case Token::MUL:
2353      GenerateRegisterArgsPush(masm);
2354      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2355      break;
2356    case Token::DIV:
2357      GenerateRegisterArgsPush(masm);
2358      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2359      break;
2360    case Token::MOD:
2361      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2362      break;
2363    case Token::BIT_OR:
2364      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2365      break;
2366    case Token::BIT_AND:
2367      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2368      break;
2369    case Token::BIT_XOR:
2370      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2371      break;
2372    case Token::SAR:
2373      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2374      break;
2375    case Token::SHL:
2376      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2377      break;
2378    case Token::SHR:
2379      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2380      break;
2381    default:
2382      UNREACHABLE();
2383  }
2384}
2385
2386
2387void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
2388  ASSERT(op_ == Token::ADD);
2389  NearLabel left_not_string, call_runtime;
2390
2391  // Registers containing left and right operands respectively.
2392  Register left = edx;
2393  Register right = eax;
2394
2395  // Test if left operand is a string.
2396  __ test(left, Immediate(kSmiTagMask));
2397  __ j(zero, &left_not_string);
2398  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
2399  __ j(above_equal, &left_not_string);
2400
2401  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
2402  GenerateRegisterArgsPush(masm);
2403  __ TailCallStub(&string_add_left_stub);
2404
2405  // Left operand is not a string, test right.
2406  __ bind(&left_not_string);
2407  __ test(right, Immediate(kSmiTagMask));
2408  __ j(zero, &call_runtime);
2409  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
2410  __ j(above_equal, &call_runtime);
2411
2412  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
2413  GenerateRegisterArgsPush(masm);
2414  __ TailCallStub(&string_add_right_stub);
2415
2416  // Neither argument is a string.
2417  __ bind(&call_runtime);
2418}
2419
2420
2421void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
2422    MacroAssembler* masm,
2423    Label* alloc_failure) {
2424  Label skip_allocation;
2425  OverwriteMode mode = mode_;
2426  switch (mode) {
2427    case OVERWRITE_LEFT: {
2428      // If the argument in edx is already an object, we skip the
2429      // allocation of a heap number.
2430      __ test(edx, Immediate(kSmiTagMask));
2431      __ j(not_zero, &skip_allocation, not_taken);
2432      // Allocate a heap number for the result. Keep eax and edx intact
2433      // for the possible runtime call.
2434      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2435      // Now edx can be overwritten losing one of the arguments as we are
2436      // now done and will not need it any more.
2437      __ mov(edx, Operand(ebx));
2438      __ bind(&skip_allocation);
2439      // Use object in edx as a result holder
2440      __ mov(eax, Operand(edx));
2441      break;
2442    }
2443    case OVERWRITE_RIGHT:
2444      // If the argument in eax is already an object, we skip the
2445      // allocation of a heap number.
2446      __ test(eax, Immediate(kSmiTagMask));
2447      __ j(not_zero, &skip_allocation, not_taken);
2448      // Fall through!
2449    case NO_OVERWRITE:
2450      // Allocate a heap number for the result. Keep eax and edx intact
2451      // for the possible runtime call.
2452      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2453      // Now eax can be overwritten losing one of the arguments as we are
2454      // now done and will not need it any more.
2455      __ mov(eax, ebx);
2456      __ bind(&skip_allocation);
2457      break;
2458    default: UNREACHABLE();
2459  }
2460}
2461
2462
2463void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
2464  __ pop(ecx);
2465  __ push(edx);
2466  __ push(eax);
2467  __ push(ecx);
2468}
2469
2470
2471void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2472  // TAGGED case:
2473  //   Input:
2474  //     esp[4]: tagged number input argument (should be number).
2475  //     esp[0]: return address.
2476  //   Output:
2477  //     eax: tagged double result.
2478  // UNTAGGED case:
2479  //   Input::
2480  //     esp[0]: return address.
2481  //     xmm1: untagged double input argument
2482  //   Output:
2483  //     xmm1: untagged double result.
2484
2485  Label runtime_call;
2486  Label runtime_call_clear_stack;
2487  Label skip_cache;
2488  const bool tagged = (argument_type_ == TAGGED);
2489  if (tagged) {
2490    // Test that eax is a number.
2491    NearLabel input_not_smi;
2492    NearLabel loaded;
2493    __ mov(eax, Operand(esp, kPointerSize));
2494    __ test(eax, Immediate(kSmiTagMask));
2495    __ j(not_zero, &input_not_smi);
2496    // Input is a smi. Untag and load it onto the FPU stack.
2497    // Then load the low and high words of the double into ebx, edx.
2498    STATIC_ASSERT(kSmiTagSize == 1);
2499    __ sar(eax, 1);
2500    __ sub(Operand(esp), Immediate(2 * kPointerSize));
2501    __ mov(Operand(esp, 0), eax);
2502    __ fild_s(Operand(esp, 0));
2503    __ fst_d(Operand(esp, 0));
2504    __ pop(edx);
2505    __ pop(ebx);
2506    __ jmp(&loaded);
2507    __ bind(&input_not_smi);
2508    // Check if input is a HeapNumber.
2509    __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
2510    __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
2511    __ j(not_equal, &runtime_call);
2512    // Input is a HeapNumber. Push it on the FPU stack and load its
2513    // low and high words into ebx, edx.
2514    __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
2515    __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
2516    __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
2517
2518    __ bind(&loaded);
2519  } else {  // UNTAGGED.
2520    if (CpuFeatures::IsSupported(SSE4_1)) {
2521      CpuFeatures::Scope sse4_scope(SSE4_1);
2522      __ pextrd(Operand(edx), xmm1, 0x1);  // copy xmm1[63..32] to edx.
2523    } else {
2524      __ pshufd(xmm0, xmm1, 0x1);
2525      __ movd(Operand(edx), xmm0);
2526    }
2527    __ movd(Operand(ebx), xmm1);
2528  }
2529
2530  // ST[0] or xmm1  == double value
2531  // ebx = low 32 bits of double value
2532  // edx = high 32 bits of double value
2533  // Compute hash (the shifts are arithmetic):
2534  //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
2535  __ mov(ecx, ebx);
2536  __ xor_(ecx, Operand(edx));
2537  __ mov(eax, ecx);
2538  __ sar(eax, 16);
2539  __ xor_(ecx, Operand(eax));
2540  __ mov(eax, ecx);
2541  __ sar(eax, 8);
2542  __ xor_(ecx, Operand(eax));
2543  ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
2544  __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
2545
2546  // ST[0] or xmm1 == double value.
2547  // ebx = low 32 bits of double value.
2548  // edx = high 32 bits of double value.
2549  // ecx = TranscendentalCache::hash(double value).
2550  __ mov(eax,
2551         Immediate(ExternalReference::transcendental_cache_array_address()));
2552  // Eax points to cache array.
2553  __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
2554  // Eax points to the cache for the type type_.
2555  // If NULL, the cache hasn't been initialized yet, so go through runtime.
2556  __ test(eax, Operand(eax));
2557  __ j(zero, &runtime_call_clear_stack);
2558#ifdef DEBUG
2559  // Check that the layout of cache elements match expectations.
2560  { TranscendentalCache::Element test_elem[2];
2561    char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
2562    char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
2563    char* elem_in0  = reinterpret_cast<char*>(&(test_elem[0].in[0]));
2564    char* elem_in1  = reinterpret_cast<char*>(&(test_elem[0].in[1]));
2565    char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
2566    CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
2567    CHECK_EQ(0, elem_in0 - elem_start);
2568    CHECK_EQ(kIntSize, elem_in1 - elem_start);
2569    CHECK_EQ(2 * kIntSize, elem_out - elem_start);
2570  }
2571#endif
2572  // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
2573  __ lea(ecx, Operand(ecx, ecx, times_2, 0));
2574  __ lea(ecx, Operand(eax, ecx, times_4, 0));
2575  // Check if cache matches: Double value is stored in uint32_t[2] array.
2576  NearLabel cache_miss;
2577  __ cmp(ebx, Operand(ecx, 0));
2578  __ j(not_equal, &cache_miss);
2579  __ cmp(edx, Operand(ecx, kIntSize));
2580  __ j(not_equal, &cache_miss);
2581  // Cache hit!
2582  __ mov(eax, Operand(ecx, 2 * kIntSize));
2583  if (tagged) {
2584    __ fstp(0);
2585    __ ret(kPointerSize);
2586  } else {  // UNTAGGED.
2587    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2588    __ Ret();
2589  }
2590
2591  __ bind(&cache_miss);
2592  // Update cache with new value.
2593  // We are short on registers, so use no_reg as scratch.
2594  // This gives slightly larger code.
2595  if (tagged) {
2596    __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
2597  } else {  // UNTAGGED.
2598    __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2599    __ sub(Operand(esp), Immediate(kDoubleSize));
2600    __ movdbl(Operand(esp, 0), xmm1);
2601    __ fld_d(Operand(esp, 0));
2602    __ add(Operand(esp), Immediate(kDoubleSize));
2603  }
2604  GenerateOperation(masm);
2605  __ mov(Operand(ecx, 0), ebx);
2606  __ mov(Operand(ecx, kIntSize), edx);
2607  __ mov(Operand(ecx, 2 * kIntSize), eax);
2608  __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2609  if (tagged) {
2610    __ ret(kPointerSize);
2611  } else {  // UNTAGGED.
2612    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2613    __ Ret();
2614
2615    // Skip cache and return answer directly, only in untagged case.
2616    __ bind(&skip_cache);
2617    __ sub(Operand(esp), Immediate(kDoubleSize));
2618    __ movdbl(Operand(esp, 0), xmm1);
2619    __ fld_d(Operand(esp, 0));
2620    GenerateOperation(masm);
2621    __ fstp_d(Operand(esp, 0));
2622    __ movdbl(xmm1, Operand(esp, 0));
2623    __ add(Operand(esp), Immediate(kDoubleSize));
2624    // We return the value in xmm1 without adding it to the cache, but
2625    // we cause a scavenging GC so that future allocations will succeed.
2626    __ EnterInternalFrame();
2627    // Allocate an unused object bigger than a HeapNumber.
2628    __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
2629    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
2630    __ LeaveInternalFrame();
2631    __ Ret();
2632  }
2633
2634  // Call runtime, doing whatever allocation and cleanup is necessary.
2635  if (tagged) {
2636    __ bind(&runtime_call_clear_stack);
2637    __ fstp(0);
2638    __ bind(&runtime_call);
2639    __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
2640  } else {  // UNTAGGED.
2641    __ bind(&runtime_call_clear_stack);
2642    __ bind(&runtime_call);
2643    __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2644    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
2645    __ EnterInternalFrame();
2646    __ push(eax);
2647    __ CallRuntime(RuntimeFunction(), 1);
2648    __ LeaveInternalFrame();
2649    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2650    __ Ret();
2651  }
2652}
2653
2654
2655Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
2656  switch (type_) {
2657    case TranscendentalCache::SIN: return Runtime::kMath_sin;
2658    case TranscendentalCache::COS: return Runtime::kMath_cos;
2659    case TranscendentalCache::LOG: return Runtime::kMath_log;
2660    default:
2661      UNIMPLEMENTED();
2662      return Runtime::kAbort;
2663  }
2664}
2665
2666
2667void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
2668  // Only free register is edi.
2669  // Input value is on FP stack, and also in ebx/edx.
2670  // Input value is possibly in xmm1.
2671  // Address of result (a newly allocated HeapNumber) may be in eax.
2672  if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
2673    // Both fsin and fcos require arguments in the range +/-2^63 and
2674    // return NaN for infinities and NaN. They can share all code except
2675    // the actual fsin/fcos operation.
2676    NearLabel in_range, done;
2677    // If argument is outside the range -2^63..2^63, fsin/cos doesn't
2678    // work. We must reduce it to the appropriate range.
2679    __ mov(edi, edx);
2680    __ and_(Operand(edi), Immediate(0x7ff00000));  // Exponent only.
2681    int supported_exponent_limit =
2682        (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
2683    __ cmp(Operand(edi), Immediate(supported_exponent_limit));
2684    __ j(below, &in_range, taken);
2685    // Check for infinity and NaN. Both return NaN for sin.
2686    __ cmp(Operand(edi), Immediate(0x7ff00000));
2687    NearLabel non_nan_result;
2688    __ j(not_equal, &non_nan_result, taken);
2689    // Input is +/-Infinity or NaN. Result is NaN.
2690    __ fstp(0);
2691    // NaN is represented by 0x7ff8000000000000.
2692    __ push(Immediate(0x7ff80000));
2693    __ push(Immediate(0));
2694    __ fld_d(Operand(esp, 0));
2695    __ add(Operand(esp), Immediate(2 * kPointerSize));
2696    __ jmp(&done);
2697
2698    __ bind(&non_nan_result);
2699
2700    // Use fpmod to restrict argument to the range +/-2*PI.
2701    __ mov(edi, eax);  // Save eax before using fnstsw_ax.
2702    __ fldpi();
2703    __ fadd(0);
2704    __ fld(1);
2705    // FPU Stack: input, 2*pi, input.
2706    {
2707      NearLabel no_exceptions;
2708      __ fwait();
2709      __ fnstsw_ax();
2710      // Clear if Illegal Operand or Zero Division exceptions are set.
2711      __ test(Operand(eax), Immediate(5));
2712      __ j(zero, &no_exceptions);
2713      __ fnclex();
2714      __ bind(&no_exceptions);
2715    }
2716
2717    // Compute st(0) % st(1)
2718    {
2719      NearLabel partial_remainder_loop;
2720      __ bind(&partial_remainder_loop);
2721      __ fprem1();
2722      __ fwait();
2723      __ fnstsw_ax();
2724      __ test(Operand(eax), Immediate(0x400 /* C2 */));
2725      // If C2 is set, computation only has partial result. Loop to
2726      // continue computation.
2727      __ j(not_zero, &partial_remainder_loop);
2728    }
2729    // FPU Stack: input, 2*pi, input % 2*pi
2730    __ fstp(2);
2731    __ fstp(0);
2732    __ mov(eax, edi);  // Restore eax (allocated HeapNumber pointer).
2733
2734    // FPU Stack: input % 2*pi
2735    __ bind(&in_range);
2736    switch (type_) {
2737      case TranscendentalCache::SIN:
2738        __ fsin();
2739        break;
2740      case TranscendentalCache::COS:
2741        __ fcos();
2742        break;
2743      default:
2744        UNREACHABLE();
2745    }
2746    __ bind(&done);
2747  } else {
2748    ASSERT(type_ == TranscendentalCache::LOG);
2749    __ fldln2();
2750    __ fxch();
2751    __ fyl2x();
2752  }
2753}
2754
2755
2756// Get the integer part of a heap number.  Surprisingly, all this bit twiddling
2757// is faster than using the built-in instructions on floating point registers.
2758// Trashes edi and ebx.  Dest is ecx.  Source cannot be ecx or one of the
2759// trashed registers.
2760void IntegerConvert(MacroAssembler* masm,
2761                    Register source,
2762                    TypeInfo type_info,
2763                    bool use_sse3,
2764                    Label* conversion_failure) {
2765  ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
2766  Label done, right_exponent, normal_exponent;
2767  Register scratch = ebx;
2768  Register scratch2 = edi;
2769  if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) {
2770    CpuFeatures::Scope scope(SSE2);
2771    __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
2772    return;
2773  }
2774  if (!type_info.IsInteger32() || !use_sse3) {
2775    // Get exponent word.
2776    __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
2777    // Get exponent alone in scratch2.
2778    __ mov(scratch2, scratch);
2779    __ and_(scratch2, HeapNumber::kExponentMask);
2780  }
2781  if (use_sse3) {
2782    CpuFeatures::Scope scope(SSE3);
2783    if (!type_info.IsInteger32()) {
2784      // Check whether the exponent is too big for a 64 bit signed integer.
2785      static const uint32_t kTooBigExponent =
2786          (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
2787      __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
2788      __ j(greater_equal, conversion_failure);
2789    }
2790    // Load x87 register with heap number.
2791    __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
2792    // Reserve space for 64 bit answer.
2793    __ sub(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
2794    // Do conversion, which cannot fail because we checked the exponent.
2795    __ fisttp_d(Operand(esp, 0));
2796    __ mov(ecx, Operand(esp, 0));  // Load low word of answer into ecx.
2797    __ add(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
2798  } else {
2799    // Load ecx with zero.  We use this either for the final shift or
2800    // for the answer.
2801    __ xor_(ecx, Operand(ecx));
2802    // Check whether the exponent matches a 32 bit signed int that cannot be
2803    // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
2804    // exponent is 30 (biased).  This is the exponent that we are fastest at and
2805    // also the highest exponent we can handle here.
2806    const uint32_t non_smi_exponent =
2807        (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
2808    __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
2809    // If we have a match of the int32-but-not-Smi exponent then skip some
2810    // logic.
2811    __ j(equal, &right_exponent);
2812    // If the exponent is higher than that then go to slow case.  This catches
2813    // numbers that don't fit in a signed int32, infinities and NaNs.
2814    __ j(less, &normal_exponent);
2815
2816    {
2817      // Handle a big exponent.  The only reason we have this code is that the
2818      // >>> operator has a tendency to generate numbers with an exponent of 31.
2819      const uint32_t big_non_smi_exponent =
2820          (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
2821      __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
2822      __ j(not_equal, conversion_failure);
2823      // We have the big exponent, typically from >>>.  This means the number is
2824      // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
2825      __ mov(scratch2, scratch);
2826      __ and_(scratch2, HeapNumber::kMantissaMask);
2827      // Put back the implicit 1.
2828      __ or_(scratch2, 1 << HeapNumber::kExponentShift);
2829      // Shift up the mantissa bits to take up the space the exponent used to
2830      // take. We just orred in the implicit bit so that took care of one and
2831      // we want to use the full unsigned range so we subtract 1 bit from the
2832      // shift distance.
2833      const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
2834      __ shl(scratch2, big_shift_distance);
2835      // Get the second half of the double.
2836      __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
2837      // Shift down 21 bits to get the most significant 11 bits or the low
2838      // mantissa word.
2839      __ shr(ecx, 32 - big_shift_distance);
2840      __ or_(ecx, Operand(scratch2));
2841      // We have the answer in ecx, but we may need to negate it.
2842      __ test(scratch, Operand(scratch));
2843      __ j(positive, &done);
2844      __ neg(ecx);
2845      __ jmp(&done);
2846    }
2847
2848    __ bind(&normal_exponent);
2849    // Exponent word in scratch, exponent part of exponent word in scratch2.
2850    // Zero in ecx.
2851    // We know the exponent is smaller than 30 (biased).  If it is less than
2852    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
2853    // it rounds to zero.
2854    const uint32_t zero_exponent =
2855        (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
2856    __ sub(Operand(scratch2), Immediate(zero_exponent));
2857    // ecx already has a Smi zero.
2858    __ j(less, &done);
2859
2860    // We have a shifted exponent between 0 and 30 in scratch2.
2861    __ shr(scratch2, HeapNumber::kExponentShift);
2862    __ mov(ecx, Immediate(30));
2863    __ sub(ecx, Operand(scratch2));
2864
2865    __ bind(&right_exponent);
2866    // Here ecx is the shift, scratch is the exponent word.
2867    // Get the top bits of the mantissa.
2868    __ and_(scratch, HeapNumber::kMantissaMask);
2869    // Put back the implicit 1.
2870    __ or_(scratch, 1 << HeapNumber::kExponentShift);
2871    // Shift up the mantissa bits to take up the space the exponent used to
2872    // take. We have kExponentShift + 1 significant bits int he low end of the
2873    // word.  Shift them to the top bits.
2874    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
2875    __ shl(scratch, shift_distance);
2876    // Get the second half of the double. For some exponents we don't
2877    // actually need this because the bits get shifted out again, but
2878    // it's probably slower to test than just to do it.
2879    __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
2880    // Shift down 22 bits to get the most significant 10 bits or the low
2881    // mantissa word.
2882    __ shr(scratch2, 32 - shift_distance);
2883    __ or_(scratch2, Operand(scratch));
2884    // Move down according to the exponent.
2885    __ shr_cl(scratch2);
2886    // Now the unsigned answer is in scratch2.  We need to move it to ecx and
2887    // we may need to fix the sign.
2888    NearLabel negative;
2889    __ xor_(ecx, Operand(ecx));
2890    __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
2891    __ j(greater, &negative);
2892    __ mov(ecx, scratch2);
2893    __ jmp(&done);
2894    __ bind(&negative);
2895    __ sub(ecx, Operand(scratch2));
2896    __ bind(&done);
2897  }
2898}
2899
2900
2901// Input: edx, eax are the left and right objects of a bit op.
2902// Output: eax, ecx are left and right integers for a bit op.
2903void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
2904                                                TypeInfo type_info,
2905                                                bool use_sse3,
2906                                                Label* conversion_failure) {
2907  // Check float operands.
2908  Label arg1_is_object, check_undefined_arg1;
2909  Label arg2_is_object, check_undefined_arg2;
2910  Label load_arg2, done;
2911
2912  if (!type_info.IsDouble()) {
2913    if (!type_info.IsSmi()) {
2914      __ test(edx, Immediate(kSmiTagMask));
2915      __ j(not_zero, &arg1_is_object);
2916    } else {
2917      if (FLAG_debug_code) __ AbortIfNotSmi(edx);
2918    }
2919    __ SmiUntag(edx);
2920    __ jmp(&load_arg2);
2921  }
2922
2923  __ bind(&arg1_is_object);
2924
2925  // Get the untagged integer version of the edx heap number in ecx.
2926  IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure);
2927  __ mov(edx, ecx);
2928
2929  // Here edx has the untagged integer, eax has a Smi or a heap number.
2930  __ bind(&load_arg2);
2931  if (!type_info.IsDouble()) {
2932    // Test if arg2 is a Smi.
2933    if (!type_info.IsSmi()) {
2934      __ test(eax, Immediate(kSmiTagMask));
2935      __ j(not_zero, &arg2_is_object);
2936    } else {
2937      if (FLAG_debug_code) __ AbortIfNotSmi(eax);
2938    }
2939    __ SmiUntag(eax);
2940    __ mov(ecx, eax);
2941    __ jmp(&done);
2942  }
2943
2944  __ bind(&arg2_is_object);
2945
2946  // Get the untagged integer version of the eax heap number in ecx.
2947  IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure);
2948  __ bind(&done);
2949  __ mov(eax, edx);
2950}
2951
2952
2953// Input: edx, eax are the left and right objects of a bit op.
2954// Output: eax, ecx are left and right integers for a bit op.
2955void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
2956                                                 bool use_sse3,
2957                                                 Label* conversion_failure) {
2958  // Check float operands.
2959  Label arg1_is_object, check_undefined_arg1;
2960  Label arg2_is_object, check_undefined_arg2;
2961  Label load_arg2, done;
2962
2963  // Test if arg1 is a Smi.
2964  __ test(edx, Immediate(kSmiTagMask));
2965  __ j(not_zero, &arg1_is_object);
2966
2967  __ SmiUntag(edx);
2968  __ jmp(&load_arg2);
2969
2970  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2971  __ bind(&check_undefined_arg1);
2972  __ cmp(edx, Factory::undefined_value());
2973  __ j(not_equal, conversion_failure);
2974  __ mov(edx, Immediate(0));
2975  __ jmp(&load_arg2);
2976
2977  __ bind(&arg1_is_object);
2978  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
2979  __ cmp(ebx, Factory::heap_number_map());
2980  __ j(not_equal, &check_undefined_arg1);
2981
2982  // Get the untagged integer version of the edx heap number in ecx.
2983  IntegerConvert(masm,
2984                 edx,
2985                 TypeInfo::Unknown(),
2986                 use_sse3,
2987                 conversion_failure);
2988  __ mov(edx, ecx);
2989
2990  // Here edx has the untagged integer, eax has a Smi or a heap number.
2991  __ bind(&load_arg2);
2992
2993  // Test if arg2 is a Smi.
2994  __ test(eax, Immediate(kSmiTagMask));
2995  __ j(not_zero, &arg2_is_object);
2996
2997  __ SmiUntag(eax);
2998  __ mov(ecx, eax);
2999  __ jmp(&done);
3000
3001  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
3002  __ bind(&check_undefined_arg2);
3003  __ cmp(eax, Factory::undefined_value());
3004  __ j(not_equal, conversion_failure);
3005  __ mov(ecx, Immediate(0));
3006  __ jmp(&done);
3007
3008  __ bind(&arg2_is_object);
3009  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
3010  __ cmp(ebx, Factory::heap_number_map());
3011  __ j(not_equal, &check_undefined_arg2);
3012
3013  // Get the untagged integer version of the eax heap number in ecx.
3014  IntegerConvert(masm,
3015                 eax,
3016                 TypeInfo::Unknown(),
3017                 use_sse3,
3018                 conversion_failure);
3019  __ bind(&done);
3020  __ mov(eax, edx);
3021}
3022
3023
3024void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
3025                                         TypeInfo type_info,
3026                                         bool use_sse3,
3027                                         Label* conversion_failure) {
3028  if (type_info.IsNumber()) {
3029    LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure);
3030  } else {
3031    LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
3032  }
3033}
3034
3035
3036void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
3037                                                       bool use_sse3,
3038                                                       Label* not_int32) {
3039  return;
3040}
3041
3042
3043void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
3044                                           Register number) {
3045  NearLabel load_smi, done;
3046
3047  __ test(number, Immediate(kSmiTagMask));
3048  __ j(zero, &load_smi, not_taken);
3049  __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
3050  __ jmp(&done);
3051
3052  __ bind(&load_smi);
3053  __ SmiUntag(number);
3054  __ push(number);
3055  __ fild_s(Operand(esp, 0));
3056  __ pop(number);
3057
3058  __ bind(&done);
3059}
3060
3061
3062void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
3063  NearLabel load_smi_edx, load_eax, load_smi_eax, done;
3064  // Load operand in edx into xmm0.
3065  __ test(edx, Immediate(kSmiTagMask));
3066  __ j(zero, &load_smi_edx, not_taken);  // Argument in edx is a smi.
3067  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
3068
3069  __ bind(&load_eax);
3070  // Load operand in eax into xmm1.
3071  __ test(eax, Immediate(kSmiTagMask));
3072  __ j(zero, &load_smi_eax, not_taken);  // Argument in eax is a smi.
3073  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
3074  __ jmp(&done);
3075
3076  __ bind(&load_smi_edx);
3077  __ SmiUntag(edx);  // Untag smi before converting to float.
3078  __ cvtsi2sd(xmm0, Operand(edx));
3079  __ SmiTag(edx);  // Retag smi for heap number overwriting test.
3080  __ jmp(&load_eax);
3081
3082  __ bind(&load_smi_eax);
3083  __ SmiUntag(eax);  // Untag smi before converting to float.
3084  __ cvtsi2sd(xmm1, Operand(eax));
3085  __ SmiTag(eax);  // Retag smi for heap number overwriting test.
3086
3087  __ bind(&done);
3088}
3089
3090
3091void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
3092                                           Label* not_numbers) {
3093  NearLabel load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
3094  // Load operand in edx into xmm0, or branch to not_numbers.
3095  __ test(edx, Immediate(kSmiTagMask));
3096  __ j(zero, &load_smi_edx, not_taken);  // Argument in edx is a smi.
3097  __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
3098  __ j(not_equal, not_numbers);  // Argument in edx is not a number.
3099  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
3100  __ bind(&load_eax);
3101  // Load operand in eax into xmm1, or branch to not_numbers.
3102  __ test(eax, Immediate(kSmiTagMask));
3103  __ j(zero, &load_smi_eax, not_taken);  // Argument in eax is a smi.
3104  __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
3105  __ j(equal, &load_float_eax);
3106  __ jmp(not_numbers);  // Argument in eax is not a number.
3107  __ bind(&load_smi_edx);
3108  __ SmiUntag(edx);  // Untag smi before converting to float.
3109  __ cvtsi2sd(xmm0, Operand(edx));
3110  __ SmiTag(edx);  // Retag smi for heap number overwriting test.
3111  __ jmp(&load_eax);
3112  __ bind(&load_smi_eax);
3113  __ SmiUntag(eax);  // Untag smi before converting to float.
3114  __ cvtsi2sd(xmm1, Operand(eax));
3115  __ SmiTag(eax);  // Retag smi for heap number overwriting test.
3116  __ jmp(&done);
3117  __ bind(&load_float_eax);
3118  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
3119  __ bind(&done);
3120}
3121
3122
3123void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
3124                                       Register scratch) {
3125  const Register left = edx;
3126  const Register right = eax;
3127  __ mov(scratch, left);
3128  ASSERT(!scratch.is(right));  // We're about to clobber scratch.
3129  __ SmiUntag(scratch);
3130  __ cvtsi2sd(xmm0, Operand(scratch));
3131
3132  __ mov(scratch, right);
3133  __ SmiUntag(scratch);
3134  __ cvtsi2sd(xmm1, Operand(scratch));
3135}
3136
3137
3138void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
3139                                                    Label* non_int32,
3140                                                    Register scratch) {
3141  __ cvttsd2si(scratch, Operand(xmm0));
3142  __ cvtsi2sd(xmm2, Operand(scratch));
3143  __ ucomisd(xmm0, xmm2);
3144  __ j(not_zero, non_int32);
3145  __ j(carry, non_int32);
3146  __ cvttsd2si(scratch, Operand(xmm1));
3147  __ cvtsi2sd(xmm2, Operand(scratch));
3148  __ ucomisd(xmm1, xmm2);
3149  __ j(not_zero, non_int32);
3150  __ j(carry, non_int32);
3151}
3152
3153
3154void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
3155                                            Register scratch,
3156                                            ArgLocation arg_location) {
3157  NearLabel load_smi_1, load_smi_2, done_load_1, done;
3158  if (arg_location == ARGS_IN_REGISTERS) {
3159    __ mov(scratch, edx);
3160  } else {
3161    __ mov(scratch, Operand(esp, 2 * kPointerSize));
3162  }
3163  __ test(scratch, Immediate(kSmiTagMask));
3164  __ j(zero, &load_smi_1, not_taken);
3165  __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
3166  __ bind(&done_load_1);
3167
3168  if (arg_location == ARGS_IN_REGISTERS) {
3169    __ mov(scratch, eax);
3170  } else {
3171    __ mov(scratch, Operand(esp, 1 * kPointerSize));
3172  }
3173  __ test(scratch, Immediate(kSmiTagMask));
3174  __ j(zero, &load_smi_2, not_taken);
3175  __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
3176  __ jmp(&done);
3177
3178  __ bind(&load_smi_1);
3179  __ SmiUntag(scratch);
3180  __ push(scratch);
3181  __ fild_s(Operand(esp, 0));
3182  __ pop(scratch);
3183  __ jmp(&done_load_1);
3184
3185  __ bind(&load_smi_2);
3186  __ SmiUntag(scratch);
3187  __ push(scratch);
3188  __ fild_s(Operand(esp, 0));
3189  __ pop(scratch);
3190
3191  __ bind(&done);
3192}
3193
3194
3195void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
3196                                        Register scratch) {
3197  const Register left = edx;
3198  const Register right = eax;
3199  __ mov(scratch, left);
3200  ASSERT(!scratch.is(right));  // We're about to clobber scratch.
3201  __ SmiUntag(scratch);
3202  __ push(scratch);
3203  __ fild_s(Operand(esp, 0));
3204
3205  __ mov(scratch, right);
3206  __ SmiUntag(scratch);
3207  __ mov(Operand(esp, 0), scratch);
3208  __ fild_s(Operand(esp, 0));
3209  __ pop(scratch);
3210}
3211
3212
3213void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
3214                                             Label* non_float,
3215                                             Register scratch) {
3216  NearLabel test_other, done;
3217  // Test if both operands are floats or smi -> scratch=k_is_float;
3218  // Otherwise scratch = k_not_float.
3219  __ test(edx, Immediate(kSmiTagMask));
3220  __ j(zero, &test_other, not_taken);  // argument in edx is OK
3221  __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
3222  __ cmp(scratch, Factory::heap_number_map());
3223  __ j(not_equal, non_float);  // argument in edx is not a number -> NaN
3224
3225  __ bind(&test_other);
3226  __ test(eax, Immediate(kSmiTagMask));
3227  __ j(zero, &done);  // argument in eax is OK
3228  __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
3229  __ cmp(scratch, Factory::heap_number_map());
3230  __ j(not_equal, non_float);  // argument in eax is not a number -> NaN
3231
3232  // Fall-through: Both operands are numbers.
3233  __ bind(&done);
3234}
3235
3236
3237void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
3238                                                     Label* non_int32) {
3239  return;
3240}
3241
3242
3243void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
3244  Label slow, done, undo;
3245
3246  if (op_ == Token::SUB) {
3247    if (include_smi_code_) {
3248      // Check whether the value is a smi.
3249      NearLabel try_float;
3250      __ test(eax, Immediate(kSmiTagMask));
3251      __ j(not_zero, &try_float, not_taken);
3252
3253      if (negative_zero_ == kStrictNegativeZero) {
3254        // Go slow case if the value of the expression is zero
3255        // to make sure that we switch between 0 and -0.
3256        __ test(eax, Operand(eax));
3257        __ j(zero, &slow, not_taken);
3258      }
3259
3260      // The value of the expression is a smi that is not zero.  Try
3261      // optimistic subtraction '0 - value'.
3262      __ mov(edx, Operand(eax));
3263      __ Set(eax, Immediate(0));
3264      __ sub(eax, Operand(edx));
3265      __ j(overflow, &undo, not_taken);
3266      __ StubReturn(1);
3267
3268      // Try floating point case.
3269      __ bind(&try_float);
3270    } else if (FLAG_debug_code) {
3271      __ AbortIfSmi(eax);
3272    }
3273
3274    __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
3275    __ cmp(edx, Factory::heap_number_map());
3276    __ j(not_equal, &slow);
3277    if (overwrite_ == UNARY_OVERWRITE) {
3278      __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
3279      __ xor_(edx, HeapNumber::kSignMask);  // Flip sign.
3280      __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
3281    } else {
3282      __ mov(edx, Operand(eax));
3283      // edx: operand
3284      __ AllocateHeapNumber(eax, ebx, ecx, &undo);
3285      // eax: allocated 'empty' number
3286      __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
3287      __ xor_(ecx, HeapNumber::kSignMask);  // Flip sign.
3288      __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
3289      __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
3290      __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
3291    }
3292  } else if (op_ == Token::BIT_NOT) {
3293    if (include_smi_code_) {
3294      Label non_smi;
3295      __ test(eax, Immediate(kSmiTagMask));
3296      __ j(not_zero, &non_smi);
3297      __ not_(eax);
3298      __ and_(eax, ~kSmiTagMask);  // Remove inverted smi-tag.
3299      __ ret(0);
3300      __ bind(&non_smi);
3301    } else if (FLAG_debug_code) {
3302      __ AbortIfSmi(eax);
3303    }
3304
3305    // Check if the operand is a heap number.
3306    __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
3307    __ cmp(edx, Factory::heap_number_map());
3308    __ j(not_equal, &slow, not_taken);
3309
3310    // Convert the heap number in eax to an untagged integer in ecx.
3311    IntegerConvert(masm,
3312                   eax,
3313                   TypeInfo::Unknown(),
3314                   CpuFeatures::IsSupported(SSE3),
3315                   &slow);
3316
3317    // Do the bitwise operation and check if the result fits in a smi.
3318    NearLabel try_float;
3319    __ not_(ecx);
3320    __ cmp(ecx, 0xc0000000);
3321    __ j(sign, &try_float, not_taken);
3322
3323    // Tag the result as a smi and we're done.
3324    STATIC_ASSERT(kSmiTagSize == 1);
3325    __ lea(eax, Operand(ecx, times_2, kSmiTag));
3326    __ jmp(&done);
3327
3328    // Try to store the result in a heap number.
3329    __ bind(&try_float);
3330    if (overwrite_ == UNARY_NO_OVERWRITE) {
3331      // Allocate a fresh heap number, but don't overwrite eax until
3332      // we're sure we can do it without going through the slow case
3333      // that needs the value in eax.
3334      __ AllocateHeapNumber(ebx, edx, edi, &slow);
3335      __ mov(eax, Operand(ebx));
3336    }
3337    if (CpuFeatures::IsSupported(SSE2)) {
3338      CpuFeatures::Scope use_sse2(SSE2);
3339      __ cvtsi2sd(xmm0, Operand(ecx));
3340      __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
3341    } else {
3342      __ push(ecx);
3343      __ fild_s(Operand(esp, 0));
3344      __ pop(ecx);
3345      __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
3346    }
3347  } else {
3348    UNIMPLEMENTED();
3349  }
3350
3351  // Return from the stub.
3352  __ bind(&done);
3353  __ StubReturn(1);
3354
3355  // Restore eax and go slow case.
3356  __ bind(&undo);
3357  __ mov(eax, Operand(edx));
3358
3359  // Handle the slow case by jumping to the JavaScript builtin.
3360  __ bind(&slow);
3361  __ pop(ecx);  // pop return address.
3362  __ push(eax);
3363  __ push(ecx);  // push return address
3364  switch (op_) {
3365    case Token::SUB:
3366      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
3367      break;
3368    case Token::BIT_NOT:
3369      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
3370      break;
3371    default:
3372      UNREACHABLE();
3373  }
3374}
3375
3376
3377void MathPowStub::Generate(MacroAssembler* masm) {
3378  // Registers are used as follows:
3379  // edx = base
3380  // eax = exponent
3381  // ecx = temporary, result
3382
3383  CpuFeatures::Scope use_sse2(SSE2);
3384  Label allocate_return, call_runtime;
3385
3386  // Load input parameters.
3387  __ mov(edx, Operand(esp, 2 * kPointerSize));
3388  __ mov(eax, Operand(esp, 1 * kPointerSize));
3389
3390  // Save 1 in xmm3 - we need this several times later on.
3391  __ mov(ecx, Immediate(1));
3392  __ cvtsi2sd(xmm3, Operand(ecx));
3393
3394  Label exponent_nonsmi;
3395  Label base_nonsmi;
3396  // If the exponent is a heap number go to that specific case.
3397  __ test(eax, Immediate(kSmiTagMask));
3398  __ j(not_zero, &exponent_nonsmi);
3399  __ test(edx, Immediate(kSmiTagMask));
3400  __ j(not_zero, &base_nonsmi);
3401
3402  // Optimized version when both exponent and base are smis.
3403  Label powi;
3404  __ SmiUntag(edx);
3405  __ cvtsi2sd(xmm0, Operand(edx));
3406  __ jmp(&powi);
3407  // exponent is smi and base is a heapnumber.
3408  __ bind(&base_nonsmi);
3409  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
3410         Factory::heap_number_map());
3411  __ j(not_equal, &call_runtime);
3412
3413  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
3414
3415  // Optimized version of pow if exponent is a smi.
3416  // xmm0 contains the base.
3417  __ bind(&powi);
3418  __ SmiUntag(eax);
3419
3420  // Save exponent in base as we need to check if exponent is negative later.
3421  // We know that base and exponent are in different registers.
3422  __ mov(edx, eax);
3423
3424  // Get absolute value of exponent.
3425  NearLabel no_neg;
3426  __ cmp(eax, 0);
3427  __ j(greater_equal, &no_neg);
3428  __ neg(eax);
3429  __ bind(&no_neg);
3430
3431  // Load xmm1 with 1.
3432  __ movsd(xmm1, xmm3);
3433  NearLabel while_true;
3434  NearLabel no_multiply;
3435
3436  __ bind(&while_true);
3437  __ shr(eax, 1);
3438  __ j(not_carry, &no_multiply);
3439  __ mulsd(xmm1, xmm0);
3440  __ bind(&no_multiply);
3441  __ mulsd(xmm0, xmm0);
3442  __ j(not_zero, &while_true);
3443
3444  // base has the original value of the exponent - if the exponent  is
3445  // negative return 1/result.
3446  __ test(edx, Operand(edx));
3447  __ j(positive, &allocate_return);
3448  // Special case if xmm1 has reached infinity.
3449  __ mov(ecx, Immediate(0x7FB00000));
3450  __ movd(xmm0, Operand(ecx));
3451  __ cvtss2sd(xmm0, xmm0);
3452  __ ucomisd(xmm0, xmm1);
3453  __ j(equal, &call_runtime);
3454  __ divsd(xmm3, xmm1);
3455  __ movsd(xmm1, xmm3);
3456  __ jmp(&allocate_return);
3457
3458  // exponent (or both) is a heapnumber - no matter what we should now work
3459  // on doubles.
3460  __ bind(&exponent_nonsmi);
3461  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
3462         Factory::heap_number_map());
3463  __ j(not_equal, &call_runtime);
3464  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
3465  // Test if exponent is nan.
3466  __ ucomisd(xmm1, xmm1);
3467  __ j(parity_even, &call_runtime);
3468
3469  NearLabel base_not_smi;
3470  NearLabel handle_special_cases;
3471  __ test(edx, Immediate(kSmiTagMask));
3472  __ j(not_zero, &base_not_smi);
3473  __ SmiUntag(edx);
3474  __ cvtsi2sd(xmm0, Operand(edx));
3475  __ jmp(&handle_special_cases);
3476
3477  __ bind(&base_not_smi);
3478  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
3479         Factory::heap_number_map());
3480  __ j(not_equal, &call_runtime);
3481  __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
3482  __ and_(ecx, HeapNumber::kExponentMask);
3483  __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
3484  // base is NaN or +/-Infinity
3485  __ j(greater_equal, &call_runtime);
3486  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
3487
3488  // base is in xmm0 and exponent is in xmm1.
3489  __ bind(&handle_special_cases);
3490  NearLabel not_minus_half;
3491  // Test for -0.5.
3492  // Load xmm2 with -0.5.
3493  __ mov(ecx, Immediate(0xBF000000));
3494  __ movd(xmm2, Operand(ecx));
3495  __ cvtss2sd(xmm2, xmm2);
3496  // xmm2 now has -0.5.
3497  __ ucomisd(xmm2, xmm1);
3498  __ j(not_equal, &not_minus_half);
3499
3500  // Calculates reciprocal of square root.
3501  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
3502  __ xorpd(xmm1, xmm1);
3503  __ addsd(xmm1, xmm0);
3504  __ sqrtsd(xmm1, xmm1);
3505  __ divsd(xmm3, xmm1);
3506  __ movsd(xmm1, xmm3);
3507  __ jmp(&allocate_return);
3508
3509  // Test for 0.5.
3510  __ bind(&not_minus_half);
3511  // Load xmm2 with 0.5.
3512  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
3513  __ addsd(xmm2, xmm3);
3514  // xmm2 now has 0.5.
3515  __ ucomisd(xmm2, xmm1);
3516  __ j(not_equal, &call_runtime);
3517  // Calculates square root.
3518  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
3519  __ xorpd(xmm1, xmm1);
3520  __ addsd(xmm1, xmm0);
3521  __ sqrtsd(xmm1, xmm1);
3522
3523  __ bind(&allocate_return);
3524  __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
3525  __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
3526  __ mov(eax, ecx);
3527  __ ret(2 * kPointerSize);
3528
3529  __ bind(&call_runtime);
3530  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3531}
3532
3533
3534void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
3535  // The key is in edx and the parameter count is in eax.
3536
3537  // The displacement is used for skipping the frame pointer on the
3538  // stack. It is the offset of the last parameter (if any) relative
3539  // to the frame pointer.
3540  static const int kDisplacement = 1 * kPointerSize;
3541
3542  // Check that the key is a smi.
3543  Label slow;
3544  __ test(edx, Immediate(kSmiTagMask));
3545  __ j(not_zero, &slow, not_taken);
3546
3547  // Check if the calling frame is an arguments adaptor frame.
3548  NearLabel adaptor;
3549  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3550  __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
3551  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3552  __ j(equal, &adaptor);
3553
3554  // Check index against formal parameters count limit passed in
3555  // through register eax. Use unsigned comparison to get negative
3556  // check for free.
3557  __ cmp(edx, Operand(eax));
3558  __ j(above_equal, &slow, not_taken);
3559
3560  // Read the argument from the stack and return it.
3561  STATIC_ASSERT(kSmiTagSize == 1);
3562  STATIC_ASSERT(kSmiTag == 0);  // Shifting code depends on these.
3563  __ lea(ebx, Operand(ebp, eax, times_2, 0));
3564  __ neg(edx);
3565  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
3566  __ ret(0);
3567
3568  // Arguments adaptor case: Check index against actual arguments
3569  // limit found in the arguments adaptor frame. Use unsigned
3570  // comparison to get negative check for free.
3571  __ bind(&adaptor);
3572  __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
3573  __ cmp(edx, Operand(ecx));
3574  __ j(above_equal, &slow, not_taken);
3575
3576  // Read the argument from the stack and return it.
3577  STATIC_ASSERT(kSmiTagSize == 1);
3578  STATIC_ASSERT(kSmiTag == 0);  // Shifting code depends on these.
3579  __ lea(ebx, Operand(ebx, ecx, times_2, 0));
3580  __ neg(edx);
3581  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
3582  __ ret(0);
3583
3584  // Slow-case: Handle non-smi or out-of-bounds access to arguments
3585  // by calling the runtime system.
3586  __ bind(&slow);
3587  __ pop(ebx);  // Return address.
3588  __ push(edx);
3589  __ push(ebx);
3590  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
3591}
3592
3593
3594void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
3595  // esp[0] : return address
3596  // esp[4] : number of parameters
3597  // esp[8] : receiver displacement
3598  // esp[16] : function
3599
3600  // The displacement is used for skipping the return address and the
3601  // frame pointer on the stack. It is the offset of the last
3602  // parameter (if any) relative to the frame pointer.
3603  static const int kDisplacement = 2 * kPointerSize;
3604
3605  // Check if the calling frame is an arguments adaptor frame.
3606  Label adaptor_frame, try_allocate, runtime;
3607  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3608  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
3609  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3610  __ j(equal, &adaptor_frame);
3611
3612  // Get the length from the frame.
3613  __ mov(ecx, Operand(esp, 1 * kPointerSize));
3614  __ jmp(&try_allocate);
3615
3616  // Patch the arguments.length and the parameters pointer.
3617  __ bind(&adaptor_frame);
3618  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
3619  __ mov(Operand(esp, 1 * kPointerSize), ecx);
3620  __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
3621  __ mov(Operand(esp, 2 * kPointerSize), edx);
3622
3623  // Try the new space allocation. Start out with computing the size of
3624  // the arguments object and the elements array.
3625  NearLabel add_arguments_object;
3626  __ bind(&try_allocate);
3627  __ test(ecx, Operand(ecx));
3628  __ j(zero, &add_arguments_object);
3629  __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
3630  __ bind(&add_arguments_object);
3631  __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize));
3632
3633  // Do the allocation of both objects in one go.
3634  __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
3635
3636  // Get the arguments boilerplate from the current (global) context.
3637  int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
3638  __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
3639  __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
3640  __ mov(edi, Operand(edi, offset));
3641
3642  // Copy the JS object part.
3643  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
3644    __ mov(ebx, FieldOperand(edi, i));
3645    __ mov(FieldOperand(eax, i), ebx);
3646  }
3647
3648  // Setup the callee in-object property.
3649  STATIC_ASSERT(Heap::arguments_callee_index == 0);
3650  __ mov(ebx, Operand(esp, 3 * kPointerSize));
3651  __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
3652
3653  // Get the length (smi tagged) and set that as an in-object property too.
3654  STATIC_ASSERT(Heap::arguments_length_index == 1);
3655  __ mov(ecx, Operand(esp, 1 * kPointerSize));
3656  __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
3657
3658  // If there are no actual arguments, we're done.
3659  Label done;
3660  __ test(ecx, Operand(ecx));
3661  __ j(zero, &done);
3662
3663  // Get the parameters pointer from the stack.
3664  __ mov(edx, Operand(esp, 2 * kPointerSize));
3665
3666  // Setup the elements pointer in the allocated arguments object and
3667  // initialize the header in the elements fixed array.
3668  __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
3669  __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
3670  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
3671         Immediate(Factory::fixed_array_map()));
3672  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
3673  // Untag the length for the loop below.
3674  __ SmiUntag(ecx);
3675
3676  // Copy the fixed array slots.
3677  NearLabel loop;
3678  __ bind(&loop);
3679  __ mov(ebx, Operand(edx, -1 * kPointerSize));  // Skip receiver.
3680  __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
3681  __ add(Operand(edi), Immediate(kPointerSize));
3682  __ sub(Operand(edx), Immediate(kPointerSize));
3683  __ dec(ecx);
3684  __ j(not_zero, &loop);
3685
3686  // Return and remove the on-stack parameters.
3687  __ bind(&done);
3688  __ ret(3 * kPointerSize);
3689
3690  // Do the runtime call to allocate the arguments object.
3691  __ bind(&runtime);
3692  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
3693}
3694
3695
3696void RegExpExecStub::Generate(MacroAssembler* masm) {
3697  // Just jump directly to runtime if native RegExp is not selected at compile
3698  // time or if regexp entry in generated code is turned off runtime switch or
3699  // at compilation.
3700#ifdef V8_INTERPRETED_REGEXP
3701  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3702#else  // V8_INTERPRETED_REGEXP
3703  if (!FLAG_regexp_entry_native) {
3704    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3705    return;
3706  }
3707
3708  // Stack frame on entry.
3709  //  esp[0]: return address
3710  //  esp[4]: last_match_info (expected JSArray)
3711  //  esp[8]: previous index
3712  //  esp[12]: subject string
3713  //  esp[16]: JSRegExp object
3714
3715  static const int kLastMatchInfoOffset = 1 * kPointerSize;
3716  static const int kPreviousIndexOffset = 2 * kPointerSize;
3717  static const int kSubjectOffset = 3 * kPointerSize;
3718  static const int kJSRegExpOffset = 4 * kPointerSize;
3719
3720  Label runtime, invoke_regexp;
3721
3722  // Ensure that a RegExp stack is allocated.
3723  ExternalReference address_of_regexp_stack_memory_address =
3724      ExternalReference::address_of_regexp_stack_memory_address();
3725  ExternalReference address_of_regexp_stack_memory_size =
3726      ExternalReference::address_of_regexp_stack_memory_size();
3727  __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3728  __ test(ebx, Operand(ebx));
3729  __ j(zero, &runtime, not_taken);
3730
3731  // Check that the first argument is a JSRegExp object.
3732  __ mov(eax, Operand(esp, kJSRegExpOffset));
3733  STATIC_ASSERT(kSmiTag == 0);
3734  __ test(eax, Immediate(kSmiTagMask));
3735  __ j(zero, &runtime);
3736  __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
3737  __ j(not_equal, &runtime);
3738  // Check that the RegExp has been compiled (data contains a fixed array).
3739  __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
3740  if (FLAG_debug_code) {
3741    __ test(ecx, Immediate(kSmiTagMask));
3742    __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
3743    __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
3744    __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
3745  }
3746
3747  // ecx: RegExp data (FixedArray)
3748  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
3749  __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
3750  __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
3751  __ j(not_equal, &runtime);
3752
3753  // ecx: RegExp data (FixedArray)
3754  // Check that the number of captures fit in the static offsets vector buffer.
3755  __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
3756  // Calculate number of capture registers (number_of_captures + 1) * 2. This
3757  // uses the asumption that smis are 2 * their untagged value.
3758  STATIC_ASSERT(kSmiTag == 0);
3759  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3760  __ add(Operand(edx), Immediate(2));  // edx was a smi.
3761  // Check that the static offsets vector buffer is large enough.
3762  __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
3763  __ j(above, &runtime);
3764
3765  // ecx: RegExp data (FixedArray)
3766  // edx: Number of capture registers
3767  // Check that the second argument is a string.
3768  __ mov(eax, Operand(esp, kSubjectOffset));
3769  __ test(eax, Immediate(kSmiTagMask));
3770  __ j(zero, &runtime);
3771  Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
3772  __ j(NegateCondition(is_string), &runtime);
3773  // Get the length of the string to ebx.
3774  __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
3775
3776  // ebx: Length of subject string as a smi
3777  // ecx: RegExp data (FixedArray)
3778  // edx: Number of capture registers
3779  // Check that the third argument is a positive smi less than the subject
3780  // string length. A negative value will be greater (unsigned comparison).
3781  __ mov(eax, Operand(esp, kPreviousIndexOffset));
3782  __ test(eax, Immediate(kSmiTagMask));
3783  __ j(not_zero, &runtime);
3784  __ cmp(eax, Operand(ebx));
3785  __ j(above_equal, &runtime);
3786
3787  // ecx: RegExp data (FixedArray)
3788  // edx: Number of capture registers
3789  // Check that the fourth object is a JSArray object.
3790  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3791  __ test(eax, Immediate(kSmiTagMask));
3792  __ j(zero, &runtime);
3793  __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
3794  __ j(not_equal, &runtime);
3795  // Check that the JSArray is in fast case.
3796  __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
3797  __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
3798  __ cmp(eax, Factory::fixed_array_map());
3799  __ j(not_equal, &runtime);
3800  // Check that the last match info has space for the capture registers and the
3801  // additional information.
3802  __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
3803  __ SmiUntag(eax);
3804  __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
3805  __ cmp(edx, Operand(eax));
3806  __ j(greater, &runtime);
3807
3808  // ecx: RegExp data (FixedArray)
3809  // Check the representation and encoding of the subject string.
3810  Label seq_ascii_string, seq_two_byte_string, check_code;
3811  __ mov(eax, Operand(esp, kSubjectOffset));
3812  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
3813  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
3814  // First check for flat two byte string.
3815  __ and_(ebx,
3816          kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
3817  STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
3818  __ j(zero, &seq_two_byte_string);
3819  // Any other flat string must be a flat ascii string.
3820  __ test(Operand(ebx),
3821          Immediate(kIsNotStringMask | kStringRepresentationMask));
3822  __ j(zero, &seq_ascii_string);
3823
3824  // Check for flat cons string.
3825  // A flat cons string is a cons string where the second part is the empty
3826  // string. In that case the subject string is just the first part of the cons
3827  // string. Also in this case the first part of the cons string is known to be
3828  // a sequential string or an external string.
3829  STATIC_ASSERT(kExternalStringTag != 0);
3830  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
3831  __ test(Operand(ebx),
3832          Immediate(kIsNotStringMask | kExternalStringTag));
3833  __ j(not_zero, &runtime);
3834  // String is a cons string.
3835  __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
3836  __ cmp(Operand(edx), Factory::empty_string());
3837  __ j(not_equal, &runtime);
3838  __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
3839  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
3840  // String is a cons string with empty second part.
3841  // eax: first part of cons string.
3842  // ebx: map of first part of cons string.
3843  // Is first part a flat two byte string?
3844  __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
3845            kStringRepresentationMask | kStringEncodingMask);
3846  STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
3847  __ j(zero, &seq_two_byte_string);
3848  // Any other flat string must be ascii.
3849  __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
3850            kStringRepresentationMask);
3851  __ j(not_zero, &runtime);
3852
3853  __ bind(&seq_ascii_string);
3854  // eax: subject string (flat ascii)
3855  // ecx: RegExp data (FixedArray)
3856  __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
3857  __ Set(edi, Immediate(1));  // Type is ascii.
3858  __ jmp(&check_code);
3859
3860  __ bind(&seq_two_byte_string);
3861  // eax: subject string (flat two byte)
3862  // ecx: RegExp data (FixedArray)
3863  __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
3864  __ Set(edi, Immediate(0));  // Type is two byte.
3865
3866  __ bind(&check_code);
3867  // Check that the irregexp code has been generated for the actual string
3868  // encoding. If it has, the field contains a code object otherwise it contains
3869  // the hole.
3870  __ CmpObjectType(edx, CODE_TYPE, ebx);
3871  __ j(not_equal, &runtime);
3872
3873  // eax: subject string
3874  // edx: code
3875  // edi: encoding of subject string (1 if ascii, 0 if two_byte);
3876  // Load used arguments before starting to push arguments for call to native
3877  // RegExp code to avoid handling changing stack height.
3878  __ mov(ebx, Operand(esp, kPreviousIndexOffset));
3879  __ SmiUntag(ebx);  // Previous index from smi.
3880
3881  // eax: subject string
3882  // ebx: previous index
3883  // edx: code
3884  // edi: encoding of subject string (1 if ascii 0 if two_byte);
3885  // All checks done. Now push arguments for native regexp code.
3886  __ IncrementCounter(&Counters::regexp_entry_native, 1);
3887
3888  static const int kRegExpExecuteArguments = 7;
3889  __ EnterApiExitFrame(kRegExpExecuteArguments);
3890
3891  // Argument 7: Indicate that this is a direct call from JavaScript.
3892  __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
3893
3894  // Argument 6: Start (high end) of backtracking stack memory area.
3895  __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
3896  __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3897  __ mov(Operand(esp, 5 * kPointerSize), ecx);
3898
3899  // Argument 5: static offsets vector buffer.
3900  __ mov(Operand(esp, 4 * kPointerSize),
3901         Immediate(ExternalReference::address_of_static_offsets_vector()));
3902
3903  // Argument 4: End of string data
3904  // Argument 3: Start of string data
3905  NearLabel setup_two_byte, setup_rest;
3906  __ test(edi, Operand(edi));
3907  __ mov(edi, FieldOperand(eax, String::kLengthOffset));
3908  __ j(zero, &setup_two_byte);
3909  __ SmiUntag(edi);
3910  __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
3911  __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
3912  __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
3913  __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
3914  __ jmp(&setup_rest);
3915
3916  __ bind(&setup_two_byte);
3917  STATIC_ASSERT(kSmiTag == 0);
3918  STATIC_ASSERT(kSmiTagSize == 1);  // edi is smi (powered by 2).
3919  __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
3920  __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
3921  __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
3922  __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
3923
3924  __ bind(&setup_rest);
3925
3926  // Argument 2: Previous index.
3927  __ mov(Operand(esp, 1 * kPointerSize), ebx);
3928
3929  // Argument 1: Subject string.
3930  __ mov(Operand(esp, 0 * kPointerSize), eax);
3931
3932  // Locate the code entry and call it.
3933  __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
3934  __ call(Operand(edx));
3935
3936  // Drop arguments and come back to JS mode.
3937  __ LeaveApiExitFrame();
3938
3939  // Check the result.
3940  Label success;
3941  __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
3942  __ j(equal, &success, taken);
3943  Label failure;
3944  __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
3945  __ j(equal, &failure, taken);
3946  __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
3947  // If not exception it can only be retry. Handle that in the runtime system.
3948  __ j(not_equal, &runtime);
3949  // Result must now be exception. If there is no pending exception already a
3950  // stack overflow (on the backtrack stack) was detected in RegExp code but
3951  // haven't created the exception yet. Handle that in the runtime system.
3952  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
3953  ExternalReference pending_exception(Top::k_pending_exception_address);
3954  __ mov(edx,
3955         Operand::StaticVariable(ExternalReference::the_hole_value_location()));
3956  __ mov(eax, Operand::StaticVariable(pending_exception));
3957  __ cmp(edx, Operand(eax));
3958  __ j(equal, &runtime);
3959  // For exception, throw the exception again.
3960
3961  // Clear the pending exception variable.
3962  __ mov(Operand::StaticVariable(pending_exception), edx);
3963
3964  // Special handling of termination exceptions which are uncatchable
3965  // by javascript code.
3966  __ cmp(eax, Factory::termination_exception());
3967  Label throw_termination_exception;
3968  __ j(equal, &throw_termination_exception);
3969
3970  // Handle normal exception by following handler chain.
3971  __ Throw(eax);
3972
3973  __ bind(&throw_termination_exception);
3974  __ ThrowUncatchable(TERMINATION, eax);
3975
3976  __ bind(&failure);
3977  // For failure to match, return null.
3978  __ mov(Operand(eax), Factory::null_value());
3979  __ ret(4 * kPointerSize);
3980
3981  // Load RegExp data.
3982  __ bind(&success);
3983  __ mov(eax, Operand(esp, kJSRegExpOffset));
3984  __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
3985  __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
3986  // Calculate number of capture registers (number_of_captures + 1) * 2.
3987  STATIC_ASSERT(kSmiTag == 0);
3988  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3989  __ add(Operand(edx), Immediate(2));  // edx was a smi.
3990
3991  // edx: Number of capture registers
3992  // Load last_match_info which is still known to be a fast case JSArray.
3993  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3994  __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
3995
3996  // ebx: last_match_info backing store (FixedArray)
3997  // edx: number of capture registers
3998  // Store the capture count.
3999  __ SmiTag(edx);  // Number of capture registers to smi.
4000  __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
4001  __ SmiUntag(edx);  // Number of capture registers back from smi.
4002  // Store last subject and last input.
4003  __ mov(eax, Operand(esp, kSubjectOffset));
4004  __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
4005  __ mov(ecx, ebx);
4006  __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
4007  __ mov(eax, Operand(esp, kSubjectOffset));
4008  __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
4009  __ mov(ecx, ebx);
4010  __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
4011
4012  // Get the static offsets vector filled by the native regexp code.
4013  ExternalReference address_of_static_offsets_vector =
4014      ExternalReference::address_of_static_offsets_vector();
4015  __ mov(ecx, Immediate(address_of_static_offsets_vector));
4016
4017  // ebx: last_match_info backing store (FixedArray)
4018  // ecx: offsets vector
4019  // edx: number of capture registers
4020  NearLabel next_capture, done;
4021  // Capture register counter starts from number of capture registers and
4022  // counts down until wraping after zero.
4023  __ bind(&next_capture);
4024  __ sub(Operand(edx), Immediate(1));
4025  __ j(negative, &done);
4026  // Read the value from the static offsets vector buffer.
4027  __ mov(edi, Operand(ecx, edx, times_int_size, 0));
4028  __ SmiTag(edi);
4029  // Store the smi value in the last match info.
4030  __ mov(FieldOperand(ebx,
4031                      edx,
4032                      times_pointer_size,
4033                      RegExpImpl::kFirstCaptureOffset),
4034                      edi);
4035  __ jmp(&next_capture);
4036  __ bind(&done);
4037
4038  // Return last match info.
4039  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
4040  __ ret(4 * kPointerSize);
4041
4042  // Do the runtime call to execute the regexp.
4043  __ bind(&runtime);
4044  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4045#endif  // V8_INTERPRETED_REGEXP
4046}
4047
4048
4049void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
4050  const int kMaxInlineLength = 100;
4051  Label slowcase;
4052  NearLabel done;
4053  __ mov(ebx, Operand(esp, kPointerSize * 3));
4054  __ test(ebx, Immediate(kSmiTagMask));
4055  __ j(not_zero, &slowcase);
4056  __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
4057  __ j(above, &slowcase);
4058  // Smi-tagging is equivalent to multiplying by 2.
4059  STATIC_ASSERT(kSmiTag == 0);
4060  STATIC_ASSERT(kSmiTagSize == 1);
4061  // Allocate RegExpResult followed by FixedArray with size in ebx.
4062  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
4063  // Elements:  [Map][Length][..elements..]
4064  __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
4065                        times_half_pointer_size,
4066                        ebx,  // In: Number of elements (times 2, being a smi)
4067                        eax,  // Out: Start of allocation (tagged).
4068                        ecx,  // Out: End of allocation.
4069                        edx,  // Scratch register
4070                        &slowcase,
4071                        TAG_OBJECT);
4072  // eax: Start of allocated area, object-tagged.
4073
4074  // Set JSArray map to global.regexp_result_map().
4075  // Set empty properties FixedArray.
4076  // Set elements to point to FixedArray allocated right after the JSArray.
4077  // Interleave operations for better latency.
4078  __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
4079  __ mov(ecx, Immediate(Factory::empty_fixed_array()));
4080  __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
4081  __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
4082  __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
4083  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
4084  __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
4085  __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
4086
4087  // Set input, index and length fields from arguments.
4088  __ mov(ecx, Operand(esp, kPointerSize * 1));
4089  __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx);
4090  __ mov(ecx, Operand(esp, kPointerSize * 2));
4091  __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx);
4092  __ mov(ecx, Operand(esp, kPointerSize * 3));
4093  __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
4094
4095  // Fill out the elements FixedArray.
4096  // eax: JSArray.
4097  // ebx: FixedArray.
4098  // ecx: Number of elements in array, as smi.
4099
4100  // Set map.
4101  __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
4102         Immediate(Factory::fixed_array_map()));
4103  // Set length.
4104  __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
4105  // Fill contents of fixed-array with the-hole.
4106  __ SmiUntag(ecx);
4107  __ mov(edx, Immediate(Factory::the_hole_value()));
4108  __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
4109  // Fill fixed array elements with hole.
4110  // eax: JSArray.
4111  // ecx: Number of elements to fill.
4112  // ebx: Start of elements in FixedArray.
4113  // edx: the hole.
4114  Label loop;
4115  __ test(ecx, Operand(ecx));
4116  __ bind(&loop);
4117  __ j(less_equal, &done);  // Jump if ecx is negative or zero.
4118  __ sub(Operand(ecx), Immediate(1));
4119  __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
4120  __ jmp(&loop);
4121
4122  __ bind(&done);
4123  __ ret(3 * kPointerSize);
4124
4125  __ bind(&slowcase);
4126  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
4127}
4128
4129
4130void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
4131                                                         Register object,
4132                                                         Register result,
4133                                                         Register scratch1,
4134                                                         Register scratch2,
4135                                                         bool object_is_smi,
4136                                                         Label* not_found) {
4137  // Use of registers. Register result is used as a temporary.
4138  Register number_string_cache = result;
4139  Register mask = scratch1;
4140  Register scratch = scratch2;
4141
4142  // Load the number string cache.
4143  ExternalReference roots_address = ExternalReference::roots_address();
4144  __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
4145  __ mov(number_string_cache,
4146         Operand::StaticArray(scratch, times_pointer_size, roots_address));
4147  // Make the hash mask from the length of the number string cache. It
4148  // contains two elements (number and string) for each cache entry.
4149  __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
4150  __ shr(mask, kSmiTagSize + 1);  // Untag length and divide it by two.
4151  __ sub(Operand(mask), Immediate(1));  // Make mask.
4152
4153  // Calculate the entry in the number string cache. The hash value in the
4154  // number string cache for smis is just the smi value, and the hash for
4155  // doubles is the xor of the upper and lower words. See
4156  // Heap::GetNumberStringCache.
4157  NearLabel smi_hash_calculated;
4158  NearLabel load_result_from_cache;
4159  if (object_is_smi) {
4160    __ mov(scratch, object);
4161    __ SmiUntag(scratch);
4162  } else {
4163    NearLabel not_smi, hash_calculated;
4164    STATIC_ASSERT(kSmiTag == 0);
4165    __ test(object, Immediate(kSmiTagMask));
4166    __ j(not_zero, &not_smi);
4167    __ mov(scratch, object);
4168    __ SmiUntag(scratch);
4169    __ jmp(&smi_hash_calculated);
4170    __ bind(&not_smi);
4171    __ cmp(FieldOperand(object, HeapObject::kMapOffset),
4172           Factory::heap_number_map());
4173    __ j(not_equal, not_found);
4174    STATIC_ASSERT(8 == kDoubleSize);
4175    __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
4176    __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
4177    // Object is heap number and hash is now in scratch. Calculate cache index.
4178    __ and_(scratch, Operand(mask));
4179    Register index = scratch;
4180    Register probe = mask;
4181    __ mov(probe,
4182           FieldOperand(number_string_cache,
4183                        index,
4184                        times_twice_pointer_size,
4185                        FixedArray::kHeaderSize));
4186    __ test(probe, Immediate(kSmiTagMask));
4187    __ j(zero, not_found);
4188    if (CpuFeatures::IsSupported(SSE2)) {
4189      CpuFeatures::Scope fscope(SSE2);
4190      __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
4191      __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
4192      __ ucomisd(xmm0, xmm1);
4193    } else {
4194      __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
4195      __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
4196      __ FCmp();
4197    }
4198    __ j(parity_even, not_found);  // Bail out if NaN is involved.
4199    __ j(not_equal, not_found);  // The cache did not contain this value.
4200    __ jmp(&load_result_from_cache);
4201  }
4202
4203  __ bind(&smi_hash_calculated);
4204  // Object is smi and hash is now in scratch. Calculate cache index.
4205  __ and_(scratch, Operand(mask));
4206  Register index = scratch;
4207  // Check if the entry is the smi we are looking for.
4208  __ cmp(object,
4209         FieldOperand(number_string_cache,
4210                      index,
4211                      times_twice_pointer_size,
4212                      FixedArray::kHeaderSize));
4213  __ j(not_equal, not_found);
4214
4215  // Get the result from the cache.
4216  __ bind(&load_result_from_cache);
4217  __ mov(result,
4218         FieldOperand(number_string_cache,
4219                      index,
4220                      times_twice_pointer_size,
4221                      FixedArray::kHeaderSize + kPointerSize));
4222  __ IncrementCounter(&Counters::number_to_string_native, 1);
4223}
4224
4225
4226void NumberToStringStub::Generate(MacroAssembler* masm) {
4227  Label runtime;
4228
4229  __ mov(ebx, Operand(esp, kPointerSize));
4230
4231  // Generate code to lookup number in the number string cache.
4232  GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
4233  __ ret(1 * kPointerSize);
4234
4235  __ bind(&runtime);
4236  // Handle number to string in the runtime system if not found in the cache.
4237  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
4238}
4239
4240
4241static int NegativeComparisonResult(Condition cc) {
4242  ASSERT(cc != equal);
4243  ASSERT((cc == less) || (cc == less_equal)
4244      || (cc == greater) || (cc == greater_equal));
4245  return (cc == greater || cc == greater_equal) ? LESS : GREATER;
4246}
4247
4248void CompareStub::Generate(MacroAssembler* masm) {
4249  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4250
4251  Label check_unequal_objects, done;
4252
4253  // Compare two smis if required.
4254  if (include_smi_compare_) {
4255    Label non_smi, smi_done;
4256    __ mov(ecx, Operand(edx));
4257    __ or_(ecx, Operand(eax));
4258    __ test(ecx, Immediate(kSmiTagMask));
4259    __ j(not_zero, &non_smi, not_taken);
4260    __ sub(edx, Operand(eax));  // Return on the result of the subtraction.
4261    __ j(no_overflow, &smi_done);
4262    __ not_(edx);  // Correct sign in case of overflow. edx is never 0 here.
4263    __ bind(&smi_done);
4264    __ mov(eax, edx);
4265    __ ret(0);
4266    __ bind(&non_smi);
4267  } else if (FLAG_debug_code) {
4268    __ mov(ecx, Operand(edx));
4269    __ or_(ecx, Operand(eax));
4270    __ test(ecx, Immediate(kSmiTagMask));
4271    __ Assert(not_zero, "Unexpected smi operands.");
4272  }
4273
4274  // NOTICE! This code is only reached after a smi-fast-case check, so
4275  // it is certain that at least one operand isn't a smi.
4276
4277  // Identical objects can be compared fast, but there are some tricky cases
4278  // for NaN and undefined.
4279  {
4280    Label not_identical;
4281    __ cmp(eax, Operand(edx));
4282    __ j(not_equal, &not_identical);
4283
4284    if (cc_ != equal) {
4285      // Check for undefined.  undefined OP undefined is false even though
4286      // undefined == undefined.
4287      NearLabel check_for_nan;
4288      __ cmp(edx, Factory::undefined_value());
4289      __ j(not_equal, &check_for_nan);
4290      __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4291      __ ret(0);
4292      __ bind(&check_for_nan);
4293    }
4294
4295    // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
4296    // so we do the second best thing - test it ourselves.
4297    // Note: if cc_ != equal, never_nan_nan_ is not used.
4298    if (never_nan_nan_ && (cc_ == equal)) {
4299      __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4300      __ ret(0);
4301    } else {
4302      NearLabel heap_number;
4303      __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
4304             Immediate(Factory::heap_number_map()));
4305      __ j(equal, &heap_number);
4306      if (cc_ != equal) {
4307        // Call runtime on identical JSObjects.  Otherwise return equal.
4308        __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
4309        __ j(above_equal, &not_identical);
4310      }
4311      __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4312      __ ret(0);
4313
4314      __ bind(&heap_number);
4315      // It is a heap number, so return non-equal if it's NaN and equal if
4316      // it's not NaN.
4317      // The representation of NaN values has all exponent bits (52..62) set,
4318      // and not all mantissa bits (0..51) clear.
4319      // We only accept QNaNs, which have bit 51 set.
4320      // Read top bits of double representation (second word of value).
4321
4322      // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
4323      // all bits in the mask are set. We only need to check the word
4324      // that contains the exponent and high bit of the mantissa.
4325      STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
4326      __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
4327      __ Set(eax, Immediate(0));
4328      // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
4329      // bits.
4330      __ add(edx, Operand(edx));
4331      __ cmp(edx, kQuietNaNHighBitsMask << 1);
4332      if (cc_ == equal) {
4333        STATIC_ASSERT(EQUAL != 1);
4334        __ setcc(above_equal, eax);
4335        __ ret(0);
4336      } else {
4337        NearLabel nan;
4338        __ j(above_equal, &nan);
4339        __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4340        __ ret(0);
4341        __ bind(&nan);
4342        __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4343        __ ret(0);
4344      }
4345    }
4346
4347    __ bind(&not_identical);
4348  }
4349
4350  // Strict equality can quickly decide whether objects are equal.
4351  // Non-strict object equality is slower, so it is handled later in the stub.
4352  if (cc_ == equal && strict_) {
4353    Label slow;  // Fallthrough label.
4354    NearLabel not_smis;
4355    // If we're doing a strict equality comparison, we don't have to do
4356    // type conversion, so we generate code to do fast comparison for objects
4357    // and oddballs. Non-smi numbers and strings still go through the usual
4358    // slow-case code.
4359    // If either is a Smi (we know that not both are), then they can only
4360    // be equal if the other is a HeapNumber. If so, use the slow case.
4361    STATIC_ASSERT(kSmiTag == 0);
4362    ASSERT_EQ(0, Smi::FromInt(0));
4363    __ mov(ecx, Immediate(kSmiTagMask));
4364    __ and_(ecx, Operand(eax));
4365    __ test(ecx, Operand(edx));
4366    __ j(not_zero, &not_smis);
4367    // One operand is a smi.
4368
4369    // Check whether the non-smi is a heap number.
4370    STATIC_ASSERT(kSmiTagMask == 1);
4371    // ecx still holds eax & kSmiTag, which is either zero or one.
4372    __ sub(Operand(ecx), Immediate(0x01));
4373    __ mov(ebx, edx);
4374    __ xor_(ebx, Operand(eax));
4375    __ and_(ebx, Operand(ecx));  // ebx holds either 0 or eax ^ edx.
4376    __ xor_(ebx, Operand(eax));
4377    // if eax was smi, ebx is now edx, else eax.
4378
4379    // Check if the non-smi operand is a heap number.
4380    __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
4381           Immediate(Factory::heap_number_map()));
4382    // If heap number, handle it in the slow case.
4383    __ j(equal, &slow);
4384    // Return non-equal (ebx is not zero)
4385    __ mov(eax, ebx);
4386    __ ret(0);
4387
4388    __ bind(&not_smis);
4389    // If either operand is a JSObject or an oddball value, then they are not
4390    // equal since their pointers are different
4391    // There is no test for undetectability in strict equality.
4392
4393    // Get the type of the first operand.
4394    // If the first object is a JS object, we have done pointer comparison.
4395    NearLabel first_non_object;
4396    STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4397    __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
4398    __ j(below, &first_non_object);
4399
4400    // Return non-zero (eax is not zero)
4401    NearLabel return_not_equal;
4402    STATIC_ASSERT(kHeapObjectTag != 0);
4403    __ bind(&return_not_equal);
4404    __ ret(0);
4405
4406    __ bind(&first_non_object);
4407    // Check for oddballs: true, false, null, undefined.
4408    __ CmpInstanceType(ecx, ODDBALL_TYPE);
4409    __ j(equal, &return_not_equal);
4410
4411    __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
4412    __ j(above_equal, &return_not_equal);
4413
4414    // Check for oddballs: true, false, null, undefined.
4415    __ CmpInstanceType(ecx, ODDBALL_TYPE);
4416    __ j(equal, &return_not_equal);
4417
4418    // Fall through to the general case.
4419    __ bind(&slow);
4420  }
4421
4422  // Generate the number comparison code.
4423  if (include_number_compare_) {
4424    Label non_number_comparison;
4425    Label unordered;
4426    if (CpuFeatures::IsSupported(SSE2)) {
4427      CpuFeatures::Scope use_sse2(SSE2);
4428      CpuFeatures::Scope use_cmov(CMOV);
4429
4430      FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
4431      __ ucomisd(xmm0, xmm1);
4432
4433      // Don't base result on EFLAGS when a NaN is involved.
4434      __ j(parity_even, &unordered, not_taken);
4435      // Return a result of -1, 0, or 1, based on EFLAGS.
4436      __ mov(eax, 0);  // equal
4437      __ mov(ecx, Immediate(Smi::FromInt(1)));
4438      __ cmov(above, eax, Operand(ecx));
4439      __ mov(ecx, Immediate(Smi::FromInt(-1)));
4440      __ cmov(below, eax, Operand(ecx));
4441      __ ret(0);
4442    } else {
4443      FloatingPointHelper::CheckFloatOperands(
4444          masm, &non_number_comparison, ebx);
4445      FloatingPointHelper::LoadFloatOperand(masm, eax);
4446      FloatingPointHelper::LoadFloatOperand(masm, edx);
4447      __ FCmp();
4448
4449      // Don't base result on EFLAGS when a NaN is involved.
4450      __ j(parity_even, &unordered, not_taken);
4451
4452      NearLabel below_label, above_label;
4453      // Return a result of -1, 0, or 1, based on EFLAGS.
4454      __ j(below, &below_label, not_taken);
4455      __ j(above, &above_label, not_taken);
4456
4457      __ Set(eax, Immediate(0));
4458      __ ret(0);
4459
4460      __ bind(&below_label);
4461      __ mov(eax, Immediate(Smi::FromInt(-1)));
4462      __ ret(0);
4463
4464      __ bind(&above_label);
4465      __ mov(eax, Immediate(Smi::FromInt(1)));
4466      __ ret(0);
4467    }
4468
4469    // If one of the numbers was NaN, then the result is always false.
4470    // The cc is never not-equal.
4471    __ bind(&unordered);
4472    ASSERT(cc_ != not_equal);
4473    if (cc_ == less || cc_ == less_equal) {
4474      __ mov(eax, Immediate(Smi::FromInt(1)));
4475    } else {
4476      __ mov(eax, Immediate(Smi::FromInt(-1)));
4477    }
4478    __ ret(0);
4479
4480    // The number comparison code did not provide a valid result.
4481    __ bind(&non_number_comparison);
4482  }
4483
4484  // Fast negative check for symbol-to-symbol equality.
4485  Label check_for_strings;
4486  if (cc_ == equal) {
4487    BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
4488    BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
4489
4490    // We've already checked for object identity, so if both operands
4491    // are symbols they aren't equal. Register eax already holds a
4492    // non-zero value, which indicates not equal, so just return.
4493    __ ret(0);
4494  }
4495
4496  __ bind(&check_for_strings);
4497
4498  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
4499                                         &check_unequal_objects);
4500
4501  // Inline comparison of ascii strings.
4502  StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
4503                                                     edx,
4504                                                     eax,
4505                                                     ecx,
4506                                                     ebx,
4507                                                     edi);
4508#ifdef DEBUG
4509  __ Abort("Unexpected fall-through from string comparison");
4510#endif
4511
4512  __ bind(&check_unequal_objects);
4513  if (cc_ == equal && !strict_) {
4514    // Non-strict equality.  Objects are unequal if
4515    // they are both JSObjects and not undetectable,
4516    // and their pointers are different.
4517    NearLabel not_both_objects;
4518    NearLabel return_unequal;
4519    // At most one is a smi, so we can test for smi by adding the two.
4520    // A smi plus a heap object has the low bit set, a heap object plus
4521    // a heap object has the low bit clear.
4522    STATIC_ASSERT(kSmiTag == 0);
4523    STATIC_ASSERT(kSmiTagMask == 1);
4524    __ lea(ecx, Operand(eax, edx, times_1, 0));
4525    __ test(ecx, Immediate(kSmiTagMask));
4526    __ j(not_zero, &not_both_objects);
4527    __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
4528    __ j(below, &not_both_objects);
4529    __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
4530    __ j(below, &not_both_objects);
4531    // We do not bail out after this point.  Both are JSObjects, and
4532    // they are equal if and only if both are undetectable.
4533    // The and of the undetectable flags is 1 if and only if they are equal.
4534    __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
4535              1 << Map::kIsUndetectable);
4536    __ j(zero, &return_unequal);
4537    __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
4538              1 << Map::kIsUndetectable);
4539    __ j(zero, &return_unequal);
4540    // The objects are both undetectable, so they both compare as the value
4541    // undefined, and are equal.
4542    __ Set(eax, Immediate(EQUAL));
4543    __ bind(&return_unequal);
4544    // Return non-equal by returning the non-zero object pointer in eax,
4545    // or return equal if we fell through to here.
4546    __ ret(0);  // rax, rdx were pushed
4547    __ bind(&not_both_objects);
4548  }
4549
4550  // Push arguments below the return address.
4551  __ pop(ecx);
4552  __ push(edx);
4553  __ push(eax);
4554
4555  // Figure out which native to call and setup the arguments.
4556  Builtins::JavaScript builtin;
4557  if (cc_ == equal) {
4558    builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
4559  } else {
4560    builtin = Builtins::COMPARE;
4561    __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4562  }
4563
4564  // Restore return address on the stack.
4565  __ push(ecx);
4566
4567  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
4568  // tagged as a small integer.
4569  __ InvokeBuiltin(builtin, JUMP_FUNCTION);
4570}
4571
4572
4573void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
4574                                    Label* label,
4575                                    Register object,
4576                                    Register scratch) {
4577  __ test(object, Immediate(kSmiTagMask));
4578  __ j(zero, label);
4579  __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
4580  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
4581  __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
4582  __ cmp(scratch, kSymbolTag | kStringTag);
4583  __ j(not_equal, label);
4584}
4585
4586
4587void StackCheckStub::Generate(MacroAssembler* masm) {
4588  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
4589}
4590
4591
4592void CallFunctionStub::Generate(MacroAssembler* masm) {
4593  Label slow;
4594
4595  // If the receiver might be a value (string, number or boolean) check for this
4596  // and box it if it is.
4597  if (ReceiverMightBeValue()) {
4598    // Get the receiver from the stack.
4599    // +1 ~ return address
4600    Label receiver_is_value, receiver_is_js_object;
4601    __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
4602
4603    // Check if receiver is a smi (which is a number value).
4604    __ test(eax, Immediate(kSmiTagMask));
4605    __ j(zero, &receiver_is_value, not_taken);
4606
4607    // Check if the receiver is a valid JS object.
4608    __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
4609    __ j(above_equal, &receiver_is_js_object);
4610
4611    // Call the runtime to box the value.
4612    __ bind(&receiver_is_value);
4613    __ EnterInternalFrame();
4614    __ push(eax);
4615    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
4616    __ LeaveInternalFrame();
4617    __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
4618
4619    __ bind(&receiver_is_js_object);
4620  }
4621
4622  // Get the function to call from the stack.
4623  // +2 ~ receiver, return address
4624  __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
4625
4626  // Check that the function really is a JavaScript function.
4627  __ test(edi, Immediate(kSmiTagMask));
4628  __ j(zero, &slow, not_taken);
4629  // Goto slow case if we do not have a function.
4630  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
4631  __ j(not_equal, &slow, not_taken);
4632
4633  // Fast-case: Just invoke the function.
4634  ParameterCount actual(argc_);
4635  __ InvokeFunction(edi, actual, JUMP_FUNCTION);
4636
4637  // Slow-case: Non-function called.
4638  __ bind(&slow);
4639  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
4640  // of the original receiver from the call site).
4641  __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
4642  __ Set(eax, Immediate(argc_));
4643  __ Set(ebx, Immediate(0));
4644  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
4645  Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
4646  __ jmp(adaptor, RelocInfo::CODE_TARGET);
4647}
4648
4649
4650void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
4651  __ Throw(eax);
4652}
4653
4654
4655void CEntryStub::GenerateCore(MacroAssembler* masm,
4656                              Label* throw_normal_exception,
4657                              Label* throw_termination_exception,
4658                              Label* throw_out_of_memory_exception,
4659                              bool do_gc,
4660                              bool always_allocate_scope) {
4661  // eax: result parameter for PerformGC, if any
4662  // ebx: pointer to C function  (C callee-saved)
4663  // ebp: frame pointer  (restored after C call)
4664  // esp: stack pointer  (restored after C call)
4665  // edi: number of arguments including receiver  (C callee-saved)
4666  // esi: pointer to the first argument (C callee-saved)
4667
4668  // Result returned in eax, or eax+edx if result_size_ is 2.
4669
4670  // Check stack alignment.
4671  if (FLAG_debug_code) {
4672    __ CheckStackAlignment();
4673  }
4674
4675  if (do_gc) {
4676    // Pass failure code returned from last attempt as first argument to
4677    // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
4678    // stack alignment is known to be correct. This function takes one argument
4679    // which is passed on the stack, and we know that the stack has been
4680    // prepared to pass at least one argument.
4681    __ mov(Operand(esp, 0 * kPointerSize), eax);  // Result.
4682    __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
4683  }
4684
4685  ExternalReference scope_depth =
4686      ExternalReference::heap_always_allocate_scope_depth();
4687  if (always_allocate_scope) {
4688    __ inc(Operand::StaticVariable(scope_depth));
4689  }
4690
4691  // Call C function.
4692  __ mov(Operand(esp, 0 * kPointerSize), edi);  // argc.
4693  __ mov(Operand(esp, 1 * kPointerSize), esi);  // argv.
4694  __ call(Operand(ebx));
4695  // Result is in eax or edx:eax - do not destroy these registers!
4696
4697  if (always_allocate_scope) {
4698    __ dec(Operand::StaticVariable(scope_depth));
4699  }
4700
4701  // Make sure we're not trying to return 'the hole' from the runtime
4702  // call as this may lead to crashes in the IC code later.
4703  if (FLAG_debug_code) {
4704    NearLabel okay;
4705    __ cmp(eax, Factory::the_hole_value());
4706    __ j(not_equal, &okay);
4707    __ int3();
4708    __ bind(&okay);
4709  }
4710
4711  // Check for failure result.
4712  Label failure_returned;
4713  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
4714  __ lea(ecx, Operand(eax, 1));
4715  // Lower 2 bits of ecx are 0 iff eax has failure tag.
4716  __ test(ecx, Immediate(kFailureTagMask));
4717  __ j(zero, &failure_returned, not_taken);
4718
4719  ExternalReference pending_exception_address(Top::k_pending_exception_address);
4720
4721  // Check that there is no pending exception, otherwise we
4722  // should have returned some failure value.
4723  if (FLAG_debug_code) {
4724    __ push(edx);
4725    __ mov(edx, Operand::StaticVariable(
4726           ExternalReference::the_hole_value_location()));
4727    NearLabel okay;
4728    __ cmp(edx, Operand::StaticVariable(pending_exception_address));
4729    // Cannot use check here as it attempts to generate call into runtime.
4730    __ j(equal, &okay);
4731    __ int3();
4732    __ bind(&okay);
4733    __ pop(edx);
4734  }
4735
4736  // Exit the JavaScript to C++ exit frame.
4737  __ LeaveExitFrame(save_doubles_);
4738  __ ret(0);
4739
4740  // Handling of failure.
4741  __ bind(&failure_returned);
4742
4743  Label retry;
4744  // If the returned exception is RETRY_AFTER_GC continue at retry label
4745  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
4746  __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
4747  __ j(zero, &retry, taken);
4748
4749  // Special handling of out of memory exceptions.
4750  __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
4751  __ j(equal, throw_out_of_memory_exception);
4752
4753  // Retrieve the pending exception and clear the variable.
4754  __ mov(eax, Operand::StaticVariable(pending_exception_address));
4755  __ mov(edx,
4756         Operand::StaticVariable(ExternalReference::the_hole_value_location()));
4757  __ mov(Operand::StaticVariable(pending_exception_address), edx);
4758
4759  // Special handling of termination exceptions which are uncatchable
4760  // by javascript code.
4761  __ cmp(eax, Factory::termination_exception());
4762  __ j(equal, throw_termination_exception);
4763
4764  // Handle normal exception.
4765  __ jmp(throw_normal_exception);
4766
4767  // Retry.
4768  __ bind(&retry);
4769}
4770
4771
4772void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
4773                                          UncatchableExceptionType type) {
4774  __ ThrowUncatchable(type, eax);
4775}
4776
4777
4778void CEntryStub::Generate(MacroAssembler* masm) {
4779  // eax: number of arguments including receiver
4780  // ebx: pointer to C function  (C callee-saved)
4781  // ebp: frame pointer  (restored after C call)
4782  // esp: stack pointer  (restored after C call)
4783  // esi: current context (C callee-saved)
4784  // edi: JS function of the caller (C callee-saved)
4785
4786  // NOTE: Invocations of builtins may return failure objects instead
4787  // of a proper result. The builtin entry handles this by performing
4788  // a garbage collection and retrying the builtin (twice).
4789
4790  // Enter the exit frame that transitions from JavaScript to C++.
4791  __ EnterExitFrame(save_doubles_);
4792
4793  // eax: result parameter for PerformGC, if any (setup below)
4794  // ebx: pointer to builtin function  (C callee-saved)
4795  // ebp: frame pointer  (restored after C call)
4796  // esp: stack pointer  (restored after C call)
4797  // edi: number of arguments including receiver (C callee-saved)
4798  // esi: argv pointer (C callee-saved)
4799
4800  Label throw_normal_exception;
4801  Label throw_termination_exception;
4802  Label throw_out_of_memory_exception;
4803
4804  // Call into the runtime system.
4805  GenerateCore(masm,
4806               &throw_normal_exception,
4807               &throw_termination_exception,
4808               &throw_out_of_memory_exception,
4809               false,
4810               false);
4811
4812  // Do space-specific GC and retry runtime call.
4813  GenerateCore(masm,
4814               &throw_normal_exception,
4815               &throw_termination_exception,
4816               &throw_out_of_memory_exception,
4817               true,
4818               false);
4819
4820  // Do full GC and retry runtime call one final time.
4821  Failure* failure = Failure::InternalError();
4822  __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
4823  GenerateCore(masm,
4824               &throw_normal_exception,
4825               &throw_termination_exception,
4826               &throw_out_of_memory_exception,
4827               true,
4828               true);
4829
4830  __ bind(&throw_out_of_memory_exception);
4831  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
4832
4833  __ bind(&throw_termination_exception);
4834  GenerateThrowUncatchable(masm, TERMINATION);
4835
4836  __ bind(&throw_normal_exception);
4837  GenerateThrowTOS(masm);
4838}
4839
4840
4841void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4842  Label invoke, exit;
4843#ifdef ENABLE_LOGGING_AND_PROFILING
4844  Label not_outermost_js, not_outermost_js_2;
4845#endif
4846
4847  // Setup frame.
4848  __ push(ebp);
4849  __ mov(ebp, Operand(esp));
4850
4851  // Push marker in two places.
4852  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4853  __ push(Immediate(Smi::FromInt(marker)));  // context slot
4854  __ push(Immediate(Smi::FromInt(marker)));  // function slot
4855  // Save callee-saved registers (C calling conventions).
4856  __ push(edi);
4857  __ push(esi);
4858  __ push(ebx);
4859
4860  // Save copies of the top frame descriptor on the stack.
4861  ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
4862  __ push(Operand::StaticVariable(c_entry_fp));
4863
4864#ifdef ENABLE_LOGGING_AND_PROFILING
4865  // If this is the outermost JS call, set js_entry_sp value.
4866  ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
4867  __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
4868  __ j(not_equal, &not_outermost_js);
4869  __ mov(Operand::StaticVariable(js_entry_sp), ebp);
4870  __ bind(&not_outermost_js);
4871#endif
4872
4873  // Call a faked try-block that does the invoke.
4874  __ call(&invoke);
4875
4876  // Caught exception: Store result (exception) in the pending
4877  // exception field in the JSEnv and return a failure sentinel.
4878  ExternalReference pending_exception(Top::k_pending_exception_address);
4879  __ mov(Operand::StaticVariable(pending_exception), eax);
4880  __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
4881  __ jmp(&exit);
4882
4883  // Invoke: Link this frame into the handler chain.
4884  __ bind(&invoke);
4885  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
4886
4887  // Clear any pending exceptions.
4888  __ mov(edx,
4889         Operand::StaticVariable(ExternalReference::the_hole_value_location()));
4890  __ mov(Operand::StaticVariable(pending_exception), edx);
4891
4892  // Fake a receiver (NULL).
4893  __ push(Immediate(0));  // receiver
4894
4895  // Invoke the function by calling through JS entry trampoline
4896  // builtin and pop the faked function when we return. Notice that we
4897  // cannot store a reference to the trampoline code directly in this
4898  // stub, because the builtin stubs may not have been generated yet.
4899  if (is_construct) {
4900    ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
4901    __ mov(edx, Immediate(construct_entry));
4902  } else {
4903    ExternalReference entry(Builtins::JSEntryTrampoline);
4904    __ mov(edx, Immediate(entry));
4905  }
4906  __ mov(edx, Operand(edx, 0));  // deref address
4907  __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
4908  __ call(Operand(edx));
4909
4910  // Unlink this frame from the handler chain.
4911  __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
4912  // Pop next_sp.
4913  __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
4914
4915#ifdef ENABLE_LOGGING_AND_PROFILING
4916  // If current EBP value is the same as js_entry_sp value, it means that
4917  // the current function is the outermost.
4918  __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
4919  __ j(not_equal, &not_outermost_js_2);
4920  __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
4921  __ bind(&not_outermost_js_2);
4922#endif
4923
4924  // Restore the top frame descriptor from the stack.
4925  __ bind(&exit);
4926  __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
4927
4928  // Restore callee-saved registers (C calling conventions).
4929  __ pop(ebx);
4930  __ pop(esi);
4931  __ pop(edi);
4932  __ add(Operand(esp), Immediate(2 * kPointerSize));  // remove markers
4933
4934  // Restore frame pointer and return.
4935  __ pop(ebp);
4936  __ ret(0);
4937}
4938
4939
4940// Generate stub code for instanceof.
4941// This code can patch a call site inlined cache of the instance of check,
4942// which looks like this.
4943//
4944//   81 ff XX XX XX XX   cmp    edi, <the hole, patched to a map>
4945//   75 0a               jne    <some near label>
4946//   b8 XX XX XX XX      mov    eax, <the hole, patched to either true or false>
4947//
4948// If call site patching is requested the stack will have the delta from the
4949// return address to the cmp instruction just below the return address. This
4950// also means that call site patching can only take place with arguments in
4951// registers. TOS looks like this when call site patching is requested
4952//
4953//   esp[0] : return address
4954//   esp[4] : delta from return address to cmp instruction
4955//
4956void InstanceofStub::Generate(MacroAssembler* masm) {
4957  // Call site inlining and patching implies arguments in registers.
4958  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4959
4960  // Fixed register usage throughout the stub.
4961  Register object = eax;  // Object (lhs).
4962  Register map = ebx;  // Map of the object.
4963  Register function = edx;  // Function (rhs).
4964  Register prototype = edi;  // Prototype of the function.
4965  Register scratch = ecx;
4966
4967  // Constants describing the call site code to patch.
4968  static const int kDeltaToCmpImmediate = 2;
4969  static const int kDeltaToMov = 8;
4970  static const int kDeltaToMovImmediate = 9;
4971  static const int8_t kCmpEdiImmediateByte1 = BitCast<int8_t, uint8_t>(0x81);
4972  static const int8_t kCmpEdiImmediateByte2 = BitCast<int8_t, uint8_t>(0xff);
4973  static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
4974
4975  ExternalReference roots_address = ExternalReference::roots_address();
4976
4977  ASSERT_EQ(object.code(), InstanceofStub::left().code());
4978  ASSERT_EQ(function.code(), InstanceofStub::right().code());
4979
4980  // Get the object and function - they are always both needed.
4981  Label slow, not_js_object;
4982  if (!HasArgsInRegisters()) {
4983    __ mov(object, Operand(esp, 2 * kPointerSize));
4984    __ mov(function, Operand(esp, 1 * kPointerSize));
4985  }
4986
4987  // Check that the left hand is a JS object.
4988  __ test(object, Immediate(kSmiTagMask));
4989  __ j(zero, &not_js_object, not_taken);
4990  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
4991
4992  // If there is a call site cache don't look in the global cache, but do the
4993  // real lookup and update the call site cache.
4994  if (!HasCallSiteInlineCheck()) {
4995    // Look up the function and the map in the instanceof cache.
4996    NearLabel miss;
4997    __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
4998    __ cmp(function,
4999           Operand::StaticArray(scratch, times_pointer_size, roots_address));
5000    __ j(not_equal, &miss);
5001    __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
5002    __ cmp(map, Operand::StaticArray(
5003        scratch, times_pointer_size, roots_address));
5004    __ j(not_equal, &miss);
5005    __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5006    __ mov(eax, Operand::StaticArray(
5007        scratch, times_pointer_size, roots_address));
5008    __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5009    __ bind(&miss);
5010  }
5011
5012  // Get the prototype of the function.
5013  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
5014
5015  // Check that the function prototype is a JS object.
5016  __ test(prototype, Immediate(kSmiTagMask));
5017  __ j(zero, &slow, not_taken);
5018  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
5019
5020  // Update the global instanceof or call site inlined cache with the current
5021  // map and function. The cached answer will be set when it is known below.
5022  if (!HasCallSiteInlineCheck()) {
5023  __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
5024  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
5025  __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
5026  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
5027         function);
5028  } else {
5029    // The constants for the code patching are based on no push instructions
5030    // at the call site.
5031    ASSERT(HasArgsInRegisters());
5032    // Get return address and delta to inlined map check.
5033    __ mov(scratch, Operand(esp, 0 * kPointerSize));
5034    __ sub(scratch, Operand(esp, 1 * kPointerSize));
5035    if (FLAG_debug_code) {
5036      __ cmpb(Operand(scratch, 0), kCmpEdiImmediateByte1);
5037      __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
5038      __ cmpb(Operand(scratch, 1), kCmpEdiImmediateByte2);
5039      __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
5040    }
5041    __ mov(Operand(scratch, kDeltaToCmpImmediate), map);
5042  }
5043
5044  // Loop through the prototype chain of the object looking for the function
5045  // prototype.
5046  __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
5047  NearLabel loop, is_instance, is_not_instance;
5048  __ bind(&loop);
5049  __ cmp(scratch, Operand(prototype));
5050  __ j(equal, &is_instance);
5051  __ cmp(Operand(scratch), Immediate(Factory::null_value()));
5052  __ j(equal, &is_not_instance);
5053  __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
5054  __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
5055  __ jmp(&loop);
5056
5057  __ bind(&is_instance);
5058  if (!HasCallSiteInlineCheck()) {
5059    __ Set(eax, Immediate(0));
5060    __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5061    __ mov(Operand::StaticArray(scratch,
5062                                times_pointer_size, roots_address), eax);
5063  } else {
5064    // Get return address and delta to inlined map check.
5065    __ mov(eax, Factory::true_value());
5066    __ mov(scratch, Operand(esp, 0 * kPointerSize));
5067    __ sub(scratch, Operand(esp, 1 * kPointerSize));
5068    if (FLAG_debug_code) {
5069      __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
5070      __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
5071    }
5072    __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
5073    if (!ReturnTrueFalseObject()) {
5074      __ Set(eax, Immediate(0));
5075    }
5076  }
5077  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5078
5079  __ bind(&is_not_instance);
5080  if (!HasCallSiteInlineCheck()) {
5081    __ Set(eax, Immediate(Smi::FromInt(1)));
5082    __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5083    __ mov(Operand::StaticArray(
5084        scratch, times_pointer_size, roots_address), eax);
5085  } else {
5086    // Get return address and delta to inlined map check.
5087    __ mov(eax, Factory::false_value());
5088    __ mov(scratch, Operand(esp, 0 * kPointerSize));
5089    __ sub(scratch, Operand(esp, 1 * kPointerSize));
5090    if (FLAG_debug_code) {
5091      __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
5092      __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
5093    }
5094    __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
5095    if (!ReturnTrueFalseObject()) {
5096      __ Set(eax, Immediate(Smi::FromInt(1)));
5097    }
5098  }
5099  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5100
5101  Label object_not_null, object_not_null_or_smi;
5102  __ bind(&not_js_object);
5103  // Before null, smi and string value checks, check that the rhs is a function
5104  // as for a non-function rhs an exception needs to be thrown.
5105  __ test(function, Immediate(kSmiTagMask));
5106  __ j(zero, &slow, not_taken);
5107  __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
5108  __ j(not_equal, &slow, not_taken);
5109
5110  // Null is not instance of anything.
5111  __ cmp(object, Factory::null_value());
5112  __ j(not_equal, &object_not_null);
5113  __ Set(eax, Immediate(Smi::FromInt(1)));
5114  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5115
5116  __ bind(&object_not_null);
5117  // Smi values is not instance of anything.
5118  __ test(object, Immediate(kSmiTagMask));
5119  __ j(not_zero, &object_not_null_or_smi, not_taken);
5120  __ Set(eax, Immediate(Smi::FromInt(1)));
5121  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5122
5123  __ bind(&object_not_null_or_smi);
5124  // String values is not instance of anything.
5125  Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
5126  __ j(NegateCondition(is_string), &slow);
5127  __ Set(eax, Immediate(Smi::FromInt(1)));
5128  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5129
5130  // Slow-case: Go through the JavaScript implementation.
5131  __ bind(&slow);
5132  if (!ReturnTrueFalseObject()) {
5133    // Tail call the builtin which returns 0 or 1.
5134    if (HasArgsInRegisters()) {
5135      // Push arguments below return address.
5136      __ pop(scratch);
5137      __ push(object);
5138      __ push(function);
5139      __ push(scratch);
5140    }
5141    __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
5142  } else {
5143    // Call the builtin and convert 0/1 to true/false.
5144    __ EnterInternalFrame();
5145    __ push(object);
5146    __ push(function);
5147    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
5148    __ LeaveInternalFrame();
5149    NearLabel true_value, done;
5150    __ test(eax, Operand(eax));
5151    __ j(zero, &true_value);
5152    __ mov(eax, Factory::false_value());
5153    __ jmp(&done);
5154    __ bind(&true_value);
5155    __ mov(eax, Factory::true_value());
5156    __ bind(&done);
5157    __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5158  }
5159}
5160
5161
5162Register InstanceofStub::left() { return eax; }
5163
5164
5165Register InstanceofStub::right() { return edx; }
5166
5167
5168int CompareStub::MinorKey() {
5169  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
5170  // stubs the never NaN NaN condition is only taken into account if the
5171  // condition is equals.
5172  ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
5173  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
5174  return ConditionField::encode(static_cast<unsigned>(cc_))
5175         | RegisterField::encode(false)   // lhs_ and rhs_ are not used
5176         | StrictField::encode(strict_)
5177         | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
5178         | IncludeNumberCompareField::encode(include_number_compare_)
5179         | IncludeSmiCompareField::encode(include_smi_compare_);
5180}
5181
5182
5183// Unfortunately you have to run without snapshots to see most of these
5184// names in the profile since most compare stubs end up in the snapshot.
5185const char* CompareStub::GetName() {
5186  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
5187
5188  if (name_ != NULL) return name_;
5189  const int kMaxNameLength = 100;
5190  name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
5191  if (name_ == NULL) return "OOM";
5192
5193  const char* cc_name;
5194  switch (cc_) {
5195    case less: cc_name = "LT"; break;
5196    case greater: cc_name = "GT"; break;
5197    case less_equal: cc_name = "LE"; break;
5198    case greater_equal: cc_name = "GE"; break;
5199    case equal: cc_name = "EQ"; break;
5200    case not_equal: cc_name = "NE"; break;
5201    default: cc_name = "UnknownCondition"; break;
5202  }
5203
5204  const char* strict_name = "";
5205  if (strict_ && (cc_ == equal || cc_ == not_equal)) {
5206    strict_name = "_STRICT";
5207  }
5208
5209  const char* never_nan_nan_name = "";
5210  if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
5211    never_nan_nan_name = "_NO_NAN";
5212  }
5213
5214  const char* include_number_compare_name = "";
5215  if (!include_number_compare_) {
5216    include_number_compare_name = "_NO_NUMBER";
5217  }
5218
5219  const char* include_smi_compare_name = "";
5220  if (!include_smi_compare_) {
5221    include_smi_compare_name = "_NO_SMI";
5222  }
5223
5224  OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
5225               "CompareStub_%s%s%s%s%s",
5226               cc_name,
5227               strict_name,
5228               never_nan_nan_name,
5229               include_number_compare_name,
5230               include_smi_compare_name);
5231  return name_;
5232}
5233
5234
5235// -------------------------------------------------------------------------
5236// StringCharCodeAtGenerator
5237
5238void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5239  Label flat_string;
5240  Label ascii_string;
5241  Label got_char_code;
5242
5243  // If the receiver is a smi trigger the non-string case.
5244  STATIC_ASSERT(kSmiTag == 0);
5245  __ test(object_, Immediate(kSmiTagMask));
5246  __ j(zero, receiver_not_string_);
5247
5248  // Fetch the instance type of the receiver into result register.
5249  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
5250  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
5251  // If the receiver is not a string trigger the non-string case.
5252  __ test(result_, Immediate(kIsNotStringMask));
5253  __ j(not_zero, receiver_not_string_);
5254
5255  // If the index is non-smi trigger the non-smi case.
5256  STATIC_ASSERT(kSmiTag == 0);
5257  __ test(index_, Immediate(kSmiTagMask));
5258  __ j(not_zero, &index_not_smi_);
5259
5260  // Put smi-tagged index into scratch register.
5261  __ mov(scratch_, index_);
5262  __ bind(&got_smi_index_);
5263
5264  // Check for index out of range.
5265  __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
5266  __ j(above_equal, index_out_of_range_);
5267
5268  // We need special handling for non-flat strings.
5269  STATIC_ASSERT(kSeqStringTag == 0);
5270  __ test(result_, Immediate(kStringRepresentationMask));
5271  __ j(zero, &flat_string);
5272
5273  // Handle non-flat strings.
5274  __ test(result_, Immediate(kIsConsStringMask));
5275  __ j(zero, &call_runtime_);
5276
5277  // ConsString.
5278  // Check whether the right hand side is the empty string (i.e. if
5279  // this is really a flat string in a cons string). If that is not
5280  // the case we would rather go to the runtime system now to flatten
5281  // the string.
5282  __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
5283         Immediate(Factory::empty_string()));
5284  __ j(not_equal, &call_runtime_);
5285  // Get the first of the two strings and load its instance type.
5286  __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
5287  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
5288  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
5289  // If the first cons component is also non-flat, then go to runtime.
5290  STATIC_ASSERT(kSeqStringTag == 0);
5291  __ test(result_, Immediate(kStringRepresentationMask));
5292  __ j(not_zero, &call_runtime_);
5293
5294  // Check for 1-byte or 2-byte string.
5295  __ bind(&flat_string);
5296  STATIC_ASSERT(kAsciiStringTag != 0);
5297  __ test(result_, Immediate(kStringEncodingMask));
5298  __ j(not_zero, &ascii_string);
5299
5300  // 2-byte string.
5301  // Load the 2-byte character code into the result register.
5302  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5303  __ movzx_w(result_, FieldOperand(object_,
5304                                   scratch_, times_1,  // Scratch is smi-tagged.
5305                                   SeqTwoByteString::kHeaderSize));
5306  __ jmp(&got_char_code);
5307
5308  // ASCII string.
5309  // Load the byte into the result register.
5310  __ bind(&ascii_string);
5311  __ SmiUntag(scratch_);
5312  __ movzx_b(result_, FieldOperand(object_,
5313                                   scratch_, times_1,
5314                                   SeqAsciiString::kHeaderSize));
5315  __ bind(&got_char_code);
5316  __ SmiTag(result_);
5317  __ bind(&exit_);
5318}
5319
5320
5321void StringCharCodeAtGenerator::GenerateSlow(
5322    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
5323  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5324
5325  // Index is not a smi.
5326  __ bind(&index_not_smi_);
5327  // If index is a heap number, try converting it to an integer.
5328  __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
5329  call_helper.BeforeCall(masm);
5330  __ push(object_);
5331  __ push(index_);
5332  __ push(index_);  // Consumed by runtime conversion function.
5333  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5334    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5335  } else {
5336    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5337    // NumberToSmi discards numbers that are not exact integers.
5338    __ CallRuntime(Runtime::kNumberToSmi, 1);
5339  }
5340  if (!scratch_.is(eax)) {
5341    // Save the conversion result before the pop instructions below
5342    // have a chance to overwrite it.
5343    __ mov(scratch_, eax);
5344  }
5345  __ pop(index_);
5346  __ pop(object_);
5347  // Reload the instance type.
5348  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
5349  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
5350  call_helper.AfterCall(masm);
5351  // If index is still not a smi, it must be out of range.
5352  STATIC_ASSERT(kSmiTag == 0);
5353  __ test(scratch_, Immediate(kSmiTagMask));
5354  __ j(not_zero, index_out_of_range_);
5355  // Otherwise, return to the fast path.
5356  __ jmp(&got_smi_index_);
5357
5358  // Call runtime. We get here when the receiver is a string and the
5359  // index is a number, but the code of getting the actual character
5360  // is too complex (e.g., when the string needs to be flattened).
5361  __ bind(&call_runtime_);
5362  call_helper.BeforeCall(masm);
5363  __ push(object_);
5364  __ push(index_);
5365  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5366  if (!result_.is(eax)) {
5367    __ mov(result_, eax);
5368  }
5369  call_helper.AfterCall(masm);
5370  __ jmp(&exit_);
5371
5372  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5373}
5374
5375
5376// -------------------------------------------------------------------------
5377// StringCharFromCodeGenerator
5378
5379void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5380  // Fast case of Heap::LookupSingleCharacterStringFromCode.
5381  STATIC_ASSERT(kSmiTag == 0);
5382  STATIC_ASSERT(kSmiShiftSize == 0);
5383  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5384  __ test(code_,
5385          Immediate(kSmiTagMask |
5386                    ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5387  __ j(not_zero, &slow_case_, not_taken);
5388
5389  __ Set(result_, Immediate(Factory::single_character_string_cache()));
5390  STATIC_ASSERT(kSmiTag == 0);
5391  STATIC_ASSERT(kSmiTagSize == 1);
5392  STATIC_ASSERT(kSmiShiftSize == 0);
5393  // At this point code register contains smi tagged ascii char code.
5394  __ mov(result_, FieldOperand(result_,
5395                               code_, times_half_pointer_size,
5396                               FixedArray::kHeaderSize));
5397  __ cmp(result_, Factory::undefined_value());
5398  __ j(equal, &slow_case_, not_taken);
5399  __ bind(&exit_);
5400}
5401
5402
5403void StringCharFromCodeGenerator::GenerateSlow(
5404    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
5405  __ Abort("Unexpected fallthrough to CharFromCode slow case");
5406
5407  __ bind(&slow_case_);
5408  call_helper.BeforeCall(masm);
5409  __ push(code_);
5410  __ CallRuntime(Runtime::kCharFromCode, 1);
5411  if (!result_.is(eax)) {
5412    __ mov(result_, eax);
5413  }
5414  call_helper.AfterCall(masm);
5415  __ jmp(&exit_);
5416
5417  __ Abort("Unexpected fallthrough from CharFromCode slow case");
5418}
5419
5420
5421// -------------------------------------------------------------------------
5422// StringCharAtGenerator
5423
5424void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5425  char_code_at_generator_.GenerateFast(masm);
5426  char_from_code_generator_.GenerateFast(masm);
5427}
5428
5429
5430void StringCharAtGenerator::GenerateSlow(
5431    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
5432  char_code_at_generator_.GenerateSlow(masm, call_helper);
5433  char_from_code_generator_.GenerateSlow(masm, call_helper);
5434}
5435
5436
5437void StringAddStub::Generate(MacroAssembler* masm) {
5438  Label string_add_runtime, call_builtin;
5439  Builtins::JavaScript builtin_id = Builtins::ADD;
5440
5441  // Load the two arguments.
5442  __ mov(eax, Operand(esp, 2 * kPointerSize));  // First argument.
5443  __ mov(edx, Operand(esp, 1 * kPointerSize));  // Second argument.
5444
5445  // Make sure that both arguments are strings if not known in advance.
5446  if (flags_ == NO_STRING_ADD_FLAGS) {
5447    __ test(eax, Immediate(kSmiTagMask));
5448    __ j(zero, &string_add_runtime);
5449    __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
5450    __ j(above_equal, &string_add_runtime);
5451
5452    // First argument is a a string, test second.
5453    __ test(edx, Immediate(kSmiTagMask));
5454    __ j(zero, &string_add_runtime);
5455    __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
5456    __ j(above_equal, &string_add_runtime);
5457  } else {
5458    // Here at least one of the arguments is definitely a string.
5459    // We convert the one that is not known to be a string.
5460    if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
5461      ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
5462      GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
5463                              &call_builtin);
5464      builtin_id = Builtins::STRING_ADD_RIGHT;
5465    } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
5466      ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
5467      GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
5468                              &call_builtin);
5469      builtin_id = Builtins::STRING_ADD_LEFT;
5470    }
5471  }
5472
5473  // Both arguments are strings.
5474  // eax: first string
5475  // edx: second string
5476  // Check if either of the strings are empty. In that case return the other.
5477  NearLabel second_not_zero_length, both_not_zero_length;
5478  __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
5479  STATIC_ASSERT(kSmiTag == 0);
5480  __ test(ecx, Operand(ecx));
5481  __ j(not_zero, &second_not_zero_length);
5482  // Second string is empty, result is first string which is already in eax.
5483  __ IncrementCounter(&Counters::string_add_native, 1);
5484  __ ret(2 * kPointerSize);
5485  __ bind(&second_not_zero_length);
5486  __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
5487  STATIC_ASSERT(kSmiTag == 0);
5488  __ test(ebx, Operand(ebx));
5489  __ j(not_zero, &both_not_zero_length);
5490  // First string is empty, result is second string which is in edx.
5491  __ mov(eax, edx);
5492  __ IncrementCounter(&Counters::string_add_native, 1);
5493  __ ret(2 * kPointerSize);
5494
5495  // Both strings are non-empty.
5496  // eax: first string
5497  // ebx: length of first string as a smi
5498  // ecx: length of second string as a smi
5499  // edx: second string
5500  // Look at the length of the result of adding the two strings.
5501  Label string_add_flat_result, longer_than_two;
5502  __ bind(&both_not_zero_length);
5503  __ add(ebx, Operand(ecx));
5504  STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
5505  // Handle exceptionally long strings in the runtime system.
5506  __ j(overflow, &string_add_runtime);
5507  // Use the runtime system when adding two one character strings, as it
5508  // contains optimizations for this specific case using the symbol table.
5509  __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
5510  __ j(not_equal, &longer_than_two);
5511
5512  // Check that both strings are non-external ascii strings.
5513  __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
5514                                         &string_add_runtime);
5515
5516  // Get the two characters forming the new string.
5517  __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
5518  __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
5519
5520  // Try to lookup two character string in symbol table. If it is not found
5521  // just allocate a new one.
5522  Label make_two_character_string, make_two_character_string_no_reload;
5523  StringHelper::GenerateTwoCharacterSymbolTableProbe(
5524      masm, ebx, ecx, eax, edx, edi,
5525      &make_two_character_string_no_reload, &make_two_character_string);
5526  __ IncrementCounter(&Counters::string_add_native, 1);
5527  __ ret(2 * kPointerSize);
5528
5529  // Allocate a two character string.
5530  __ bind(&make_two_character_string);
5531  // Reload the arguments.
5532  __ mov(eax, Operand(esp, 2 * kPointerSize));  // First argument.
5533  __ mov(edx, Operand(esp, 1 * kPointerSize));  // Second argument.
5534  // Get the two characters forming the new string.
5535  __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
5536  __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
5537  __ bind(&make_two_character_string_no_reload);
5538  __ IncrementCounter(&Counters::string_add_make_two_char, 1);
5539  __ AllocateAsciiString(eax,  // Result.
5540                         2,    // Length.
5541                         edi,  // Scratch 1.
5542                         edx,  // Scratch 2.
5543                         &string_add_runtime);
5544  // Pack both characters in ebx.
5545  __ shl(ecx, kBitsPerByte);
5546  __ or_(ebx, Operand(ecx));
5547  // Set the characters in the new string.
5548  __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
5549  __ IncrementCounter(&Counters::string_add_native, 1);
5550  __ ret(2 * kPointerSize);
5551
5552  __ bind(&longer_than_two);
5553  // Check if resulting string will be flat.
5554  __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
5555  __ j(below, &string_add_flat_result);
5556
5557  // If result is not supposed to be flat allocate a cons string object. If both
5558  // strings are ascii the result is an ascii cons string.
5559  Label non_ascii, allocated, ascii_data;
5560  __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
5561  __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
5562  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
5563  __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
5564  __ and_(ecx, Operand(edi));
5565  STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
5566  __ test(ecx, Immediate(kAsciiStringTag));
5567  __ j(zero, &non_ascii);
5568  __ bind(&ascii_data);
5569  // Allocate an acsii cons string.
5570  __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
5571  __ bind(&allocated);
5572  // Fill the fields of the cons string.
5573  if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
5574  __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
5575  __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
5576         Immediate(String::kEmptyHashField));
5577  __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
5578  __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
5579  __ mov(eax, ecx);
5580  __ IncrementCounter(&Counters::string_add_native, 1);
5581  __ ret(2 * kPointerSize);
5582  __ bind(&non_ascii);
5583  // At least one of the strings is two-byte. Check whether it happens
5584  // to contain only ascii characters.
5585  // ecx: first instance type AND second instance type.
5586  // edi: second instance type.
5587  __ test(ecx, Immediate(kAsciiDataHintMask));
5588  __ j(not_zero, &ascii_data);
5589  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5590  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5591  __ xor_(edi, Operand(ecx));
5592  STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
5593  __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
5594  __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
5595  __ j(equal, &ascii_data);
5596  // Allocate a two byte cons string.
5597  __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
5598  __ jmp(&allocated);
5599
5600  // Handle creating a flat result. First check that both strings are not
5601  // external strings.
5602  // eax: first string
5603  // ebx: length of resulting flat string as a smi
5604  // edx: second string
5605  __ bind(&string_add_flat_result);
5606  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5607  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5608  __ and_(ecx, kStringRepresentationMask);
5609  __ cmp(ecx, kExternalStringTag);
5610  __ j(equal, &string_add_runtime);
5611  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
5612  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5613  __ and_(ecx, kStringRepresentationMask);
5614  __ cmp(ecx, kExternalStringTag);
5615  __ j(equal, &string_add_runtime);
5616  // Now check if both strings are ascii strings.
5617  // eax: first string
5618  // ebx: length of resulting flat string as a smi
5619  // edx: second string
5620  Label non_ascii_string_add_flat_result;
5621  STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
5622  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5623  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
5624  __ j(zero, &non_ascii_string_add_flat_result);
5625  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
5626  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
5627  __ j(zero, &string_add_runtime);
5628
5629  // Both strings are ascii strings.  As they are short they are both flat.
5630  // ebx: length of resulting flat string as a smi
5631  __ SmiUntag(ebx);
5632  __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
5633  // eax: result string
5634  __ mov(ecx, eax);
5635  // Locate first character of result.
5636  __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5637  // Load first argument and locate first character.
5638  __ mov(edx, Operand(esp, 2 * kPointerSize));
5639  __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5640  __ SmiUntag(edi);
5641  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5642  // eax: result string
5643  // ecx: first character of result
5644  // edx: first char of first argument
5645  // edi: length of first argument
5646  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
5647  // Load second argument and locate first character.
5648  __ mov(edx, Operand(esp, 1 * kPointerSize));
5649  __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5650  __ SmiUntag(edi);
5651  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5652  // eax: result string
5653  // ecx: next character of result
5654  // edx: first char of second argument
5655  // edi: length of second argument
5656  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
5657  __ IncrementCounter(&Counters::string_add_native, 1);
5658  __ ret(2 * kPointerSize);
5659
5660  // Handle creating a flat two byte result.
5661  // eax: first string - known to be two byte
5662  // ebx: length of resulting flat string as a smi
5663  // edx: second string
5664  __ bind(&non_ascii_string_add_flat_result);
5665  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
5666  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
5667  __ j(not_zero, &string_add_runtime);
5668  // Both strings are two byte strings. As they are short they are both
5669  // flat.
5670  __ SmiUntag(ebx);
5671  __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
5672  // eax: result string
5673  __ mov(ecx, eax);
5674  // Locate first character of result.
5675  __ add(Operand(ecx),
5676         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5677  // Load first argument and locate first character.
5678  __ mov(edx, Operand(esp, 2 * kPointerSize));
5679  __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5680  __ SmiUntag(edi);
5681  __ add(Operand(edx),
5682         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5683  // eax: result string
5684  // ecx: first character of result
5685  // edx: first char of first argument
5686  // edi: length of first argument
5687  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
5688  // Load second argument and locate first character.
5689  __ mov(edx, Operand(esp, 1 * kPointerSize));
5690  __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5691  __ SmiUntag(edi);
5692  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5693  // eax: result string
5694  // ecx: next character of result
5695  // edx: first char of second argument
5696  // edi: length of second argument
5697  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
5698  __ IncrementCounter(&Counters::string_add_native, 1);
5699  __ ret(2 * kPointerSize);
5700
5701  // Just jump to runtime to add the two strings.
5702  __ bind(&string_add_runtime);
5703  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
5704
5705  if (call_builtin.is_linked()) {
5706    __ bind(&call_builtin);
5707    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
5708  }
5709}
5710
5711
5712void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
5713                                            int stack_offset,
5714                                            Register arg,
5715                                            Register scratch1,
5716                                            Register scratch2,
5717                                            Register scratch3,
5718                                            Label* slow) {
5719  // First check if the argument is already a string.
5720  Label not_string, done;
5721  __ test(arg, Immediate(kSmiTagMask));
5722  __ j(zero, &not_string);
5723  __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
5724  __ j(below, &done);
5725
5726  // Check the number to string cache.
5727  Label not_cached;
5728  __ bind(&not_string);
5729  // Puts the cached result into scratch1.
5730  NumberToStringStub::GenerateLookupNumberStringCache(masm,
5731                                                      arg,
5732                                                      scratch1,
5733                                                      scratch2,
5734                                                      scratch3,
5735                                                      false,
5736                                                      &not_cached);
5737  __ mov(arg, scratch1);
5738  __ mov(Operand(esp, stack_offset), arg);
5739  __ jmp(&done);
5740
5741  // Check if the argument is a safe string wrapper.
5742  __ bind(&not_cached);
5743  __ test(arg, Immediate(kSmiTagMask));
5744  __ j(zero, slow);
5745  __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1);  // map -> scratch1.
5746  __ j(not_equal, slow);
5747  __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
5748            1 << Map::kStringWrapperSafeForDefaultValueOf);
5749  __ j(zero, slow);
5750  __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
5751  __ mov(Operand(esp, stack_offset), arg);
5752
5753  __ bind(&done);
5754}
5755
5756
5757void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5758                                          Register dest,
5759                                          Register src,
5760                                          Register count,
5761                                          Register scratch,
5762                                          bool ascii) {
5763  NearLabel loop;
5764  __ bind(&loop);
5765  // This loop just copies one character at a time, as it is only used for very
5766  // short strings.
5767  if (ascii) {
5768    __ mov_b(scratch, Operand(src, 0));
5769    __ mov_b(Operand(dest, 0), scratch);
5770    __ add(Operand(src), Immediate(1));
5771    __ add(Operand(dest), Immediate(1));
5772  } else {
5773    __ mov_w(scratch, Operand(src, 0));
5774    __ mov_w(Operand(dest, 0), scratch);
5775    __ add(Operand(src), Immediate(2));
5776    __ add(Operand(dest), Immediate(2));
5777  }
5778  __ sub(Operand(count), Immediate(1));
5779  __ j(not_zero, &loop);
5780}
5781
5782
5783void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
5784                                             Register dest,
5785                                             Register src,
5786                                             Register count,
5787                                             Register scratch,
5788                                             bool ascii) {
5789  // Copy characters using rep movs of doublewords.
5790  // The destination is aligned on a 4 byte boundary because we are
5791  // copying to the beginning of a newly allocated string.
5792  ASSERT(dest.is(edi));  // rep movs destination
5793  ASSERT(src.is(esi));  // rep movs source
5794  ASSERT(count.is(ecx));  // rep movs count
5795  ASSERT(!scratch.is(dest));
5796  ASSERT(!scratch.is(src));
5797  ASSERT(!scratch.is(count));
5798
5799  // Nothing to do for zero characters.
5800  Label done;
5801  __ test(count, Operand(count));
5802  __ j(zero, &done);
5803
5804  // Make count the number of bytes to copy.
5805  if (!ascii) {
5806    __ shl(count, 1);
5807  }
5808
5809  // Don't enter the rep movs if there are less than 4 bytes to copy.
5810  NearLabel last_bytes;
5811  __ test(count, Immediate(~3));
5812  __ j(zero, &last_bytes);
5813
5814  // Copy from edi to esi using rep movs instruction.
5815  __ mov(scratch, count);
5816  __ sar(count, 2);  // Number of doublewords to copy.
5817  __ cld();
5818  __ rep_movs();
5819
5820  // Find number of bytes left.
5821  __ mov(count, scratch);
5822  __ and_(count, 3);
5823
5824  // Check if there are more bytes to copy.
5825  __ bind(&last_bytes);
5826  __ test(count, Operand(count));
5827  __ j(zero, &done);
5828
5829  // Copy remaining characters.
5830  NearLabel loop;
5831  __ bind(&loop);
5832  __ mov_b(scratch, Operand(src, 0));
5833  __ mov_b(Operand(dest, 0), scratch);
5834  __ add(Operand(src), Immediate(1));
5835  __ add(Operand(dest), Immediate(1));
5836  __ sub(Operand(count), Immediate(1));
5837  __ j(not_zero, &loop);
5838
5839  __ bind(&done);
5840}
5841
5842
5843void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5844                                                        Register c1,
5845                                                        Register c2,
5846                                                        Register scratch1,
5847                                                        Register scratch2,
5848                                                        Register scratch3,
5849                                                        Label* not_probed,
5850                                                        Label* not_found) {
5851  // Register scratch3 is the general scratch register in this function.
5852  Register scratch = scratch3;
5853
5854  // Make sure that both characters are not digits as such strings has a
5855  // different hash algorithm. Don't try to look for these in the symbol table.
5856  NearLabel not_array_index;
5857  __ mov(scratch, c1);
5858  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
5859  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
5860  __ j(above, &not_array_index);
5861  __ mov(scratch, c2);
5862  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
5863  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
5864  __ j(below_equal, not_probed);
5865
5866  __ bind(&not_array_index);
5867  // Calculate the two character string hash.
5868  Register hash = scratch1;
5869  GenerateHashInit(masm, hash, c1, scratch);
5870  GenerateHashAddCharacter(masm, hash, c2, scratch);
5871  GenerateHashGetHash(masm, hash, scratch);
5872
5873  // Collect the two characters in a register.
5874  Register chars = c1;
5875  __ shl(c2, kBitsPerByte);
5876  __ or_(chars, Operand(c2));
5877
5878  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5879  // hash:  hash of two character string.
5880
5881  // Load the symbol table.
5882  Register symbol_table = c2;
5883  ExternalReference roots_address = ExternalReference::roots_address();
5884  __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
5885  __ mov(symbol_table,
5886         Operand::StaticArray(scratch, times_pointer_size, roots_address));
5887
5888  // Calculate capacity mask from the symbol table capacity.
5889  Register mask = scratch2;
5890  __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
5891  __ SmiUntag(mask);
5892  __ sub(Operand(mask), Immediate(1));
5893
5894  // Registers
5895  // chars:        two character string, char 1 in byte 0 and char 2 in byte 1.
5896  // hash:         hash of two character string
5897  // symbol_table: symbol table
5898  // mask:         capacity mask
5899  // scratch:      -
5900
5901  // Perform a number of probes in the symbol table.
5902  static const int kProbes = 4;
5903  Label found_in_symbol_table;
5904  Label next_probe[kProbes], next_probe_pop_mask[kProbes];
5905  for (int i = 0; i < kProbes; i++) {
5906    // Calculate entry in symbol table.
5907    __ mov(scratch, hash);
5908    if (i > 0) {
5909      __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
5910    }
5911    __ and_(scratch, Operand(mask));
5912
5913    // Load the entry from the symbol table.
5914    Register candidate = scratch;  // Scratch register contains candidate.
5915    STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5916    __ mov(candidate,
5917           FieldOperand(symbol_table,
5918                        scratch,
5919                        times_pointer_size,
5920                        SymbolTable::kElementsStartOffset));
5921
5922    // If entry is undefined no string with this hash can be found.
5923    __ cmp(candidate, Factory::undefined_value());
5924    __ j(equal, not_found);
5925
5926    // If length is not 2 the string is not a candidate.
5927    __ cmp(FieldOperand(candidate, String::kLengthOffset),
5928           Immediate(Smi::FromInt(2)));
5929    __ j(not_equal, &next_probe[i]);
5930
5931    // As we are out of registers save the mask on the stack and use that
5932    // register as a temporary.
5933    __ push(mask);
5934    Register temp = mask;
5935
5936    // Check that the candidate is a non-external ascii string.
5937    __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
5938    __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
5939    __ JumpIfInstanceTypeIsNotSequentialAscii(
5940        temp, temp, &next_probe_pop_mask[i]);
5941
5942    // Check if the two characters match.
5943    __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
5944    __ and_(temp, 0x0000ffff);
5945    __ cmp(chars, Operand(temp));
5946    __ j(equal, &found_in_symbol_table);
5947    __ bind(&next_probe_pop_mask[i]);
5948    __ pop(mask);
5949    __ bind(&next_probe[i]);
5950  }
5951
5952  // No matching 2 character string found by probing.
5953  __ jmp(not_found);
5954
5955  // Scratch register contains result when we fall through to here.
5956  Register result = scratch;
5957  __ bind(&found_in_symbol_table);
5958  __ pop(mask);  // Pop saved mask from the stack.
5959  if (!result.is(eax)) {
5960    __ mov(eax, result);
5961  }
5962}
5963
5964
5965void StringHelper::GenerateHashInit(MacroAssembler* masm,
5966                                    Register hash,
5967                                    Register character,
5968                                    Register scratch) {
5969  // hash = character + (character << 10);
5970  __ mov(hash, character);
5971  __ shl(hash, 10);
5972  __ add(hash, Operand(character));
5973  // hash ^= hash >> 6;
5974  __ mov(scratch, hash);
5975  __ sar(scratch, 6);
5976  __ xor_(hash, Operand(scratch));
5977}
5978
5979
5980void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5981                                            Register hash,
5982                                            Register character,
5983                                            Register scratch) {
5984  // hash += character;
5985  __ add(hash, Operand(character));
5986  // hash += hash << 10;
5987  __ mov(scratch, hash);
5988  __ shl(scratch, 10);
5989  __ add(hash, Operand(scratch));
5990  // hash ^= hash >> 6;
5991  __ mov(scratch, hash);
5992  __ sar(scratch, 6);
5993  __ xor_(hash, Operand(scratch));
5994}
5995
5996
5997void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5998                                       Register hash,
5999                                       Register scratch) {
6000  // hash += hash << 3;
6001  __ mov(scratch, hash);
6002  __ shl(scratch, 3);
6003  __ add(hash, Operand(scratch));
6004  // hash ^= hash >> 11;
6005  __ mov(scratch, hash);
6006  __ sar(scratch, 11);
6007  __ xor_(hash, Operand(scratch));
6008  // hash += hash << 15;
6009  __ mov(scratch, hash);
6010  __ shl(scratch, 15);
6011  __ add(hash, Operand(scratch));
6012
6013  // if (hash == 0) hash = 27;
6014  NearLabel hash_not_zero;
6015  __ test(hash, Operand(hash));
6016  __ j(not_zero, &hash_not_zero);
6017  __ mov(hash, Immediate(27));
6018  __ bind(&hash_not_zero);
6019}
6020
6021
6022void SubStringStub::Generate(MacroAssembler* masm) {
6023  Label runtime;
6024
6025  // Stack frame on entry.
6026  //  esp[0]: return address
6027  //  esp[4]: to
6028  //  esp[8]: from
6029  //  esp[12]: string
6030
6031  // Make sure first argument is a string.
6032  __ mov(eax, Operand(esp, 3 * kPointerSize));
6033  STATIC_ASSERT(kSmiTag == 0);
6034  __ test(eax, Immediate(kSmiTagMask));
6035  __ j(zero, &runtime);
6036  Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
6037  __ j(NegateCondition(is_string), &runtime);
6038
6039  // eax: string
6040  // ebx: instance type
6041
6042  // Calculate length of sub string using the smi values.
6043  Label result_longer_than_two;
6044  __ mov(ecx, Operand(esp, 1 * kPointerSize));  // To index.
6045  __ test(ecx, Immediate(kSmiTagMask));
6046  __ j(not_zero, &runtime);
6047  __ mov(edx, Operand(esp, 2 * kPointerSize));  // From index.
6048  __ test(edx, Immediate(kSmiTagMask));
6049  __ j(not_zero, &runtime);
6050  __ sub(ecx, Operand(edx));
6051  __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
6052  Label return_eax;
6053  __ j(equal, &return_eax);
6054  // Special handling of sub-strings of length 1 and 2. One character strings
6055  // are handled in the runtime system (looked up in the single character
6056  // cache). Two character strings are looked for in the symbol cache.
6057  __ SmiUntag(ecx);  // Result length is no longer smi.
6058  __ cmp(ecx, 2);
6059  __ j(greater, &result_longer_than_two);
6060  __ j(less, &runtime);
6061
6062  // Sub string of length 2 requested.
6063  // eax: string
6064  // ebx: instance type
6065  // ecx: sub string length (value is 2)
6066  // edx: from index (smi)
6067  __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
6068
6069  // Get the two characters forming the sub string.
6070  __ SmiUntag(edx);  // From index is no longer smi.
6071  __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
6072  __ movzx_b(ecx,
6073             FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
6074
6075  // Try to lookup two character string in symbol table.
6076  Label make_two_character_string;
6077  StringHelper::GenerateTwoCharacterSymbolTableProbe(
6078      masm, ebx, ecx, eax, edx, edi,
6079      &make_two_character_string, &make_two_character_string);
6080  __ ret(3 * kPointerSize);
6081
6082  __ bind(&make_two_character_string);
6083  // Setup registers for allocating the two character string.
6084  __ mov(eax, Operand(esp, 3 * kPointerSize));
6085  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
6086  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
6087  __ Set(ecx, Immediate(2));
6088
6089  __ bind(&result_longer_than_two);
6090  // eax: string
6091  // ebx: instance type
6092  // ecx: result string length
6093  // Check for flat ascii string
6094  Label non_ascii_flat;
6095  __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
6096
6097  // Allocate the result.
6098  __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
6099
6100  // eax: result string
6101  // ecx: result string length
6102  __ mov(edx, esi);  // esi used by following code.
6103  // Locate first character of result.
6104  __ mov(edi, eax);
6105  __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6106  // Load string argument and locate character of sub string start.
6107  __ mov(esi, Operand(esp, 3 * kPointerSize));
6108  __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6109  __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
6110  __ SmiUntag(ebx);
6111  __ add(esi, Operand(ebx));
6112
6113  // eax: result string
6114  // ecx: result length
6115  // edx: original value of esi
6116  // edi: first character of result
6117  // esi: character of sub string start
6118  StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
6119  __ mov(esi, edx);  // Restore esi.
6120  __ IncrementCounter(&Counters::sub_string_native, 1);
6121  __ ret(3 * kPointerSize);
6122
6123  __ bind(&non_ascii_flat);
6124  // eax: string
6125  // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
6126  // ecx: result string length
6127  // Check for flat two byte string
6128  __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
6129  __ j(not_equal, &runtime);
6130
6131  // Allocate the result.
6132  __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
6133
6134  // eax: result string
6135  // ecx: result string length
6136  __ mov(edx, esi);  // esi used by following code.
6137  // Locate first character of result.
6138  __ mov(edi, eax);
6139  __ add(Operand(edi),
6140         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6141  // Load string argument and locate character of sub string start.
6142  __ mov(esi, Operand(esp, 3 * kPointerSize));
6143  __ add(Operand(esi),
6144         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6145  __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
6146  // As from is a smi it is 2 times the value which matches the size of a two
6147  // byte character.
6148  STATIC_ASSERT(kSmiTag == 0);
6149  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
6150  __ add(esi, Operand(ebx));
6151
6152  // eax: result string
6153  // ecx: result length
6154  // edx: original value of esi
6155  // edi: first character of result
6156  // esi: character of sub string start
6157  StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
6158  __ mov(esi, edx);  // Restore esi.
6159
6160  __ bind(&return_eax);
6161  __ IncrementCounter(&Counters::sub_string_native, 1);
6162  __ ret(3 * kPointerSize);
6163
6164  // Just jump to runtime to create the sub string.
6165  __ bind(&runtime);
6166  __ TailCallRuntime(Runtime::kSubString, 3, 1);
6167}
6168
6169
6170void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6171                                                        Register left,
6172                                                        Register right,
6173                                                        Register scratch1,
6174                                                        Register scratch2,
6175                                                        Register scratch3) {
6176  Label result_not_equal;
6177  Label result_greater;
6178  Label compare_lengths;
6179
6180  __ IncrementCounter(&Counters::string_compare_native, 1);
6181
6182  // Find minimum length.
6183  NearLabel left_shorter;
6184  __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
6185  __ mov(scratch3, scratch1);
6186  __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
6187
6188  Register length_delta = scratch3;
6189
6190  __ j(less_equal, &left_shorter);
6191  // Right string is shorter. Change scratch1 to be length of right string.
6192  __ sub(scratch1, Operand(length_delta));
6193  __ bind(&left_shorter);
6194
6195  Register min_length = scratch1;
6196
6197  // If either length is zero, just compare lengths.
6198  __ test(min_length, Operand(min_length));
6199  __ j(zero, &compare_lengths);
6200
6201  // Change index to run from -min_length to -1 by adding min_length
6202  // to string start. This means that loop ends when index reaches zero,
6203  // which doesn't need an additional compare.
6204  __ SmiUntag(min_length);
6205  __ lea(left,
6206         FieldOperand(left,
6207                      min_length, times_1,
6208                      SeqAsciiString::kHeaderSize));
6209  __ lea(right,
6210         FieldOperand(right,
6211                      min_length, times_1,
6212                      SeqAsciiString::kHeaderSize));
6213  __ neg(min_length);
6214
6215  Register index = min_length;  // index = -min_length;
6216
6217  {
6218    // Compare loop.
6219    NearLabel loop;
6220    __ bind(&loop);
6221    // Compare characters.
6222    __ mov_b(scratch2, Operand(left, index, times_1, 0));
6223    __ cmpb(scratch2, Operand(right, index, times_1, 0));
6224    __ j(not_equal, &result_not_equal);
6225    __ add(Operand(index), Immediate(1));
6226    __ j(not_zero, &loop);
6227  }
6228
6229  // Compare lengths -  strings up to min-length are equal.
6230  __ bind(&compare_lengths);
6231  __ test(length_delta, Operand(length_delta));
6232  __ j(not_zero, &result_not_equal);
6233
6234  // Result is EQUAL.
6235  STATIC_ASSERT(EQUAL == 0);
6236  STATIC_ASSERT(kSmiTag == 0);
6237  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6238  __ ret(0);
6239
6240  __ bind(&result_not_equal);
6241  __ j(greater, &result_greater);
6242
6243  // Result is LESS.
6244  __ Set(eax, Immediate(Smi::FromInt(LESS)));
6245  __ ret(0);
6246
6247  // Result is GREATER.
6248  __ bind(&result_greater);
6249  __ Set(eax, Immediate(Smi::FromInt(GREATER)));
6250  __ ret(0);
6251}
6252
6253
6254void StringCompareStub::Generate(MacroAssembler* masm) {
6255  Label runtime;
6256
6257  // Stack frame on entry.
6258  //  esp[0]: return address
6259  //  esp[4]: right string
6260  //  esp[8]: left string
6261
6262  __ mov(edx, Operand(esp, 2 * kPointerSize));  // left
6263  __ mov(eax, Operand(esp, 1 * kPointerSize));  // right
6264
6265  NearLabel not_same;
6266  __ cmp(edx, Operand(eax));
6267  __ j(not_equal, &not_same);
6268  STATIC_ASSERT(EQUAL == 0);
6269  STATIC_ASSERT(kSmiTag == 0);
6270  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6271  __ IncrementCounter(&Counters::string_compare_native, 1);
6272  __ ret(2 * kPointerSize);
6273
6274  __ bind(&not_same);
6275
6276  // Check that both objects are sequential ascii strings.
6277  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
6278
6279  // Compare flat ascii strings.
6280  // Drop arguments from the stack.
6281  __ pop(ecx);
6282  __ add(Operand(esp), Immediate(2 * kPointerSize));
6283  __ push(ecx);
6284  GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
6285
6286  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
6287  // tagged as a small integer.
6288  __ bind(&runtime);
6289  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6290}
6291
6292
6293void StringCharAtStub::Generate(MacroAssembler* masm) {
6294  // Expects two arguments (object, index) on the stack:
6295
6296  // Stack frame on entry.
6297  //  esp[0]: return address
6298  //  esp[4]: index
6299  //  esp[8]: object
6300
6301  Register object = ebx;
6302  Register index = eax;
6303  Register scratch1 = ecx;
6304  Register scratch2 = edx;
6305  Register result = eax;
6306
6307  __ pop(scratch1);  // Return address.
6308  __ pop(index);
6309  __ pop(object);
6310  __ push(scratch1);
6311
6312  Label need_conversion;
6313  Label index_out_of_range;
6314  Label done;
6315  StringCharAtGenerator generator(object,
6316                                  index,
6317                                  scratch1,
6318                                  scratch2,
6319                                  result,
6320                                  &need_conversion,
6321                                  &need_conversion,
6322                                  &index_out_of_range,
6323                                  STRING_INDEX_IS_NUMBER);
6324  generator.GenerateFast(masm);
6325  __ jmp(&done);
6326
6327  __ bind(&index_out_of_range);
6328  // When the index is out of range, the spec requires us to return
6329  // the empty string.
6330  __ Set(result, Immediate(Factory::empty_string()));
6331  __ jmp(&done);
6332
6333  __ bind(&need_conversion);
6334  // Move smi zero into the result register, which will trigger
6335  // conversion.
6336  __ Set(result, Immediate(Smi::FromInt(0)));
6337  __ jmp(&done);
6338
6339  StubRuntimeCallHelper call_helper;
6340  generator.GenerateSlow(masm, call_helper);
6341
6342  __ bind(&done);
6343  __ ret(0);
6344}
6345
6346void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6347  ASSERT(state_ == CompareIC::SMIS);
6348  NearLabel miss;
6349  __ mov(ecx, Operand(edx));
6350  __ or_(ecx, Operand(eax));
6351  __ test(ecx, Immediate(kSmiTagMask));
6352  __ j(not_zero, &miss, not_taken);
6353
6354  if (GetCondition() == equal) {
6355    // For equality we do not care about the sign of the result.
6356    __ sub(eax, Operand(edx));
6357  } else {
6358    NearLabel done;
6359    __ sub(edx, Operand(eax));
6360    __ j(no_overflow, &done);
6361    // Correct sign of result in case of overflow.
6362    __ not_(edx);
6363    __ bind(&done);
6364    __ mov(eax, edx);
6365  }
6366  __ ret(0);
6367
6368  __ bind(&miss);
6369  GenerateMiss(masm);
6370}
6371
6372
6373void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6374  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6375
6376  NearLabel generic_stub;
6377  NearLabel unordered;
6378  NearLabel miss;
6379  __ mov(ecx, Operand(edx));
6380  __ and_(ecx, Operand(eax));
6381  __ test(ecx, Immediate(kSmiTagMask));
6382  __ j(zero, &generic_stub, not_taken);
6383
6384  __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
6385  __ j(not_equal, &miss, not_taken);
6386  __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
6387  __ j(not_equal, &miss, not_taken);
6388
6389  // Inlining the double comparison and falling back to the general compare
6390  // stub if NaN is involved or SS2 or CMOV is unsupported.
6391  if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
6392    CpuFeatures::Scope scope1(SSE2);
6393    CpuFeatures::Scope scope2(CMOV);
6394
6395    // Load left and right operand
6396    __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
6397    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
6398
6399    // Compare operands
6400    __ ucomisd(xmm0, xmm1);
6401
6402    // Don't base result on EFLAGS when a NaN is involved.
6403    __ j(parity_even, &unordered, not_taken);
6404
6405    // Return a result of -1, 0, or 1, based on EFLAGS.
6406    // Performing mov, because xor would destroy the flag register.
6407    __ mov(eax, 0);  // equal
6408    __ mov(ecx, Immediate(Smi::FromInt(1)));
6409    __ cmov(above, eax, Operand(ecx));
6410    __ mov(ecx, Immediate(Smi::FromInt(-1)));
6411    __ cmov(below, eax, Operand(ecx));
6412    __ ret(0);
6413
6414    __ bind(&unordered);
6415  }
6416
6417  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
6418  __ bind(&generic_stub);
6419  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
6420
6421  __ bind(&miss);
6422  GenerateMiss(masm);
6423}
6424
6425
6426void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6427  ASSERT(state_ == CompareIC::OBJECTS);
6428  NearLabel miss;
6429  __ mov(ecx, Operand(edx));
6430  __ and_(ecx, Operand(eax));
6431  __ test(ecx, Immediate(kSmiTagMask));
6432  __ j(zero, &miss, not_taken);
6433
6434  __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
6435  __ j(not_equal, &miss, not_taken);
6436  __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
6437  __ j(not_equal, &miss, not_taken);
6438
6439  ASSERT(GetCondition() == equal);
6440  __ sub(eax, Operand(edx));
6441  __ ret(0);
6442
6443  __ bind(&miss);
6444  GenerateMiss(masm);
6445}
6446
6447
6448void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6449  // Save the registers.
6450  __ pop(ecx);
6451  __ push(edx);
6452  __ push(eax);
6453  __ push(ecx);
6454
6455  // Call the runtime system in a fresh internal frame.
6456  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
6457  __ EnterInternalFrame();
6458  __ push(edx);
6459  __ push(eax);
6460  __ push(Immediate(Smi::FromInt(op_)));
6461  __ CallExternalReference(miss, 3);
6462  __ LeaveInternalFrame();
6463
6464  // Compute the entry point of the rewritten stub.
6465  __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
6466
6467  // Restore registers.
6468  __ pop(ecx);
6469  __ pop(eax);
6470  __ pop(edx);
6471  __ push(ecx);
6472
6473  // Do a tail call to the rewritten stub.
6474  __ jmp(Operand(edi));
6475}
6476
6477
6478// Loads a indexed element from a pixel array.
6479void GenerateFastPixelArrayLoad(MacroAssembler* masm,
6480                                Register receiver,
6481                                Register key,
6482                                Register elements,
6483                                Register untagged_key,
6484                                Register result,
6485                                Label* not_pixel_array,
6486                                Label* key_not_smi,
6487                                Label* out_of_range) {
6488  // Register use:
6489  //   receiver - holds the receiver and is unchanged.
6490  //   key - holds the key and is unchanged (must be a smi).
6491  //   elements - is set to the the receiver's element if
6492  //       the receiver doesn't have a pixel array or the
6493  //       key is not a smi, otherwise it's the elements'
6494  //       external pointer.
6495  //   untagged_key - is set to the untagged key
6496
6497  // Some callers already have verified that the key is a smi.  key_not_smi is
6498  // set to NULL as a sentinel for that case.  Otherwise, add an explicit check
6499  // to ensure the key is a smi must be added.
6500  if (key_not_smi != NULL) {
6501    __ JumpIfNotSmi(key, key_not_smi);
6502  } else {
6503    if (FLAG_debug_code) {
6504      __ AbortIfNotSmi(key);
6505    }
6506  }
6507  __ mov(untagged_key, key);
6508  __ SmiUntag(untagged_key);
6509
6510  __ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset));
6511  // By passing NULL as not_pixel_array, callers signal that they have already
6512  // verified that the receiver has pixel array elements.
6513  if (not_pixel_array != NULL) {
6514    __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
6515  } else {
6516    if (FLAG_debug_code) {
6517      // Map check should have already made sure that elements is a pixel array.
6518      __ cmp(FieldOperand(elements, HeapObject::kMapOffset),
6519             Immediate(Factory::pixel_array_map()));
6520      __ Assert(equal, "Elements isn't a pixel array");
6521    }
6522  }
6523
6524  // Key must be in range.
6525  __ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
6526  __ j(above_equal, out_of_range);  // unsigned check handles negative keys.
6527
6528  // Perform the indexed load and tag the result as a smi.
6529  __ mov(elements, FieldOperand(elements, PixelArray::kExternalPointerOffset));
6530  __ movzx_b(result, Operand(elements, untagged_key, times_1, 0));
6531  __ SmiTag(result);
6532  __ ret(0);
6533}
6534
6535
6536// Stores an indexed element into a pixel array, clamping the stored value.
6537void GenerateFastPixelArrayStore(MacroAssembler* masm,
6538                                 Register receiver,
6539                                 Register key,
6540                                 Register value,
6541                                 Register elements,
6542                                 Register scratch1,
6543                                 bool load_elements_from_receiver,
6544                                 Label* key_not_smi,
6545                                 Label* value_not_smi,
6546                                 Label* not_pixel_array,
6547                                 Label* out_of_range) {
6548  // Register use:
6549  //   receiver - holds the receiver and is unchanged unless the
6550  //              store succeeds.
6551  //   key - holds the key (must be a smi) and is unchanged.
6552  //   value - holds the value (must be a smi) and is unchanged.
6553  //   elements - holds the element object of the receiver on entry if
6554  //              load_elements_from_receiver is false, otherwise used
6555  //              internally to store the pixel arrays elements and
6556  //              external array pointer.
6557  //
6558  // receiver, key and value remain unmodified until it's guaranteed that the
6559  // store will succeed.
6560  Register external_pointer = elements;
6561  Register untagged_key = scratch1;
6562  Register untagged_value = receiver;  // Only set once success guaranteed.
6563
6564  // Fetch the receiver's elements if the caller hasn't already done so.
6565  if (load_elements_from_receiver) {
6566    __ mov(elements, FieldOperand(receiver, JSObject::kElementsOffset));
6567  }
6568
6569  // By passing NULL as not_pixel_array, callers signal that they have already
6570  // verified that the receiver has pixel array elements.
6571  if (not_pixel_array != NULL) {
6572    __ CheckMap(elements, Factory::pixel_array_map(), not_pixel_array, true);
6573  } else {
6574    if (FLAG_debug_code) {
6575      // Map check should have already made sure that elements is a pixel array.
6576      __ cmp(FieldOperand(elements, HeapObject::kMapOffset),
6577             Immediate(Factory::pixel_array_map()));
6578      __ Assert(equal, "Elements isn't a pixel array");
6579    }
6580  }
6581
6582  // Some callers already have verified that the key is a smi.  key_not_smi is
6583  // set to NULL as a sentinel for that case.  Otherwise, add an explicit check
6584  // to ensure the key is a smi must be added.
6585  if (key_not_smi != NULL) {
6586    __ JumpIfNotSmi(key, key_not_smi);
6587  } else {
6588    if (FLAG_debug_code) {
6589      __ AbortIfNotSmi(key);
6590    }
6591  }
6592
6593  // Key must be a smi and it must be in range.
6594  __ mov(untagged_key, key);
6595  __ SmiUntag(untagged_key);
6596  __ cmp(untagged_key, FieldOperand(elements, PixelArray::kLengthOffset));
6597  __ j(above_equal, out_of_range);  // unsigned check handles negative keys.
6598
6599  // Value must be a smi.
6600  __ JumpIfNotSmi(value, value_not_smi);
6601  __ mov(untagged_value, value);
6602  __ SmiUntag(untagged_value);
6603
6604  {  // Clamp the value to [0..255].
6605    NearLabel done;
6606    __ test(untagged_value, Immediate(0xFFFFFF00));
6607    __ j(zero, &done);
6608    __ setcc(negative, untagged_value);  // 1 if negative, 0 if positive.
6609    __ dec_b(untagged_value);  // 0 if negative, 255 if positive.
6610    __ bind(&done);
6611  }
6612
6613  __ mov(external_pointer,
6614         FieldOperand(elements, PixelArray::kExternalPointerOffset));
6615  __ mov_b(Operand(external_pointer, untagged_key, times_1, 0), untagged_value);
6616  __ ret(0);  // Return value in eax.
6617}
6618
6619
6620#undef __
6621
6622} }  // namespace v8::internal
6623
6624#endif  // V8_TARGET_ARCH_IA32
6625