code-stubs-ia32.cc revision 3fb3ca8c7ca439d408449a395897395c0faae8d1
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_IA32)
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
34#include "isolate.h"
35#include "jsregexp.h"
36#include "regexp-macro-assembler.h"
37
38namespace v8 {
39namespace internal {
40
41#define __ ACCESS_MASM(masm)
42
43void ToNumberStub::Generate(MacroAssembler* masm) {
44  // The ToNumber stub takes one argument in eax.
45  Label check_heap_number, call_builtin;
46  __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
47  __ ret(0);
48
49  __ bind(&check_heap_number);
50  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
51  Factory* factory = masm->isolate()->factory();
52  __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
53  __ j(not_equal, &call_builtin, Label::kNear);
54  __ ret(0);
55
56  __ bind(&call_builtin);
57  __ pop(ecx);  // Pop return address.
58  __ push(eax);
59  __ push(ecx);  // Push return address.
60  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
61}
62
63
64void FastNewClosureStub::Generate(MacroAssembler* masm) {
65  // Create a new closure from the given function info in new
66  // space. Set the context to the current context in esi.
67  Label gc;
68  __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
69
70  // Get the function info from the stack.
71  __ mov(edx, Operand(esp, 1 * kPointerSize));
72
73  int map_index = strict_mode_ == kStrictMode
74      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
75      : Context::FUNCTION_MAP_INDEX;
76
77  // Compute the function map in the current global context and set that
78  // as the map of the allocated object.
79  __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
80  __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
81  __ mov(ecx, Operand(ecx, Context::SlotOffset(map_index)));
82  __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
83
84  // Initialize the rest of the function. We don't have to update the
85  // write barrier because the allocated object is in new space.
86  Factory* factory = masm->isolate()->factory();
87  __ mov(ebx, Immediate(factory->empty_fixed_array()));
88  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
89  __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
90  __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
91         Immediate(factory->the_hole_value()));
92  __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
93  __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
94  __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
95  __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
96         Immediate(factory->undefined_value()));
97
98  // Initialize the code pointer in the function to be the one
99  // found in the shared function info object.
100  __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
101  __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
102  __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
103
104  // Return and remove the on-stack parameter.
105  __ ret(1 * kPointerSize);
106
107  // Create a new closure through the slower runtime call.
108  __ bind(&gc);
109  __ pop(ecx);  // Temporarily remove return address.
110  __ pop(edx);
111  __ push(esi);
112  __ push(edx);
113  __ push(Immediate(factory->false_value()));
114  __ push(ecx);  // Restore return address.
115  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
116}
117
118
119void FastNewContextStub::Generate(MacroAssembler* masm) {
120  // Try to allocate the context in new space.
121  Label gc;
122  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
123  __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
124                        eax, ebx, ecx, &gc, TAG_OBJECT);
125
126  // Get the function from the stack.
127  __ mov(ecx, Operand(esp, 1 * kPointerSize));
128
129  // Setup the object header.
130  Factory* factory = masm->isolate()->factory();
131  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
132         factory->function_context_map());
133  __ mov(FieldOperand(eax, Context::kLengthOffset),
134         Immediate(Smi::FromInt(length)));
135
136  // Setup the fixed slots.
137  __ Set(ebx, Immediate(0));  // Set to NULL.
138  __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
139  __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
140  __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
141
142  // Copy the global object from the previous context.
143  __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
144  __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
145
146  // Initialize the rest of the slots to undefined.
147  __ mov(ebx, factory->undefined_value());
148  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
149    __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
150  }
151
152  // Return and remove the on-stack parameter.
153  __ mov(esi, Operand(eax));
154  __ ret(1 * kPointerSize);
155
156  // Need to collect. Call into runtime system.
157  __ bind(&gc);
158  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
159}
160
161
162void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
163  // Stack layout on entry:
164  //
165  // [esp + kPointerSize]: constant elements.
166  // [esp + (2 * kPointerSize)]: literal index.
167  // [esp + (3 * kPointerSize)]: literals array.
168
169  // All sizes here are multiples of kPointerSize.
170  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
171  int size = JSArray::kSize + elements_size;
172
173  // Load boilerplate object into ecx and check if we need to create a
174  // boilerplate.
175  Label slow_case;
176  __ mov(ecx, Operand(esp, 3 * kPointerSize));
177  __ mov(eax, Operand(esp, 2 * kPointerSize));
178  STATIC_ASSERT(kPointerSize == 4);
179  STATIC_ASSERT(kSmiTagSize == 1);
180  STATIC_ASSERT(kSmiTag == 0);
181  __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
182                           FixedArray::kHeaderSize));
183  Factory* factory = masm->isolate()->factory();
184  __ cmp(ecx, factory->undefined_value());
185  __ j(equal, &slow_case);
186
187  if (FLAG_debug_code) {
188    const char* message;
189    Handle<Map> expected_map;
190    if (mode_ == CLONE_ELEMENTS) {
191      message = "Expected (writable) fixed array";
192      expected_map = factory->fixed_array_map();
193    } else {
194      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
195      message = "Expected copy-on-write fixed array";
196      expected_map = factory->fixed_cow_array_map();
197    }
198    __ push(ecx);
199    __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
200    __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
201    __ Assert(equal, message);
202    __ pop(ecx);
203  }
204
205  // Allocate both the JS array and the elements array in one big
206  // allocation. This avoids multiple limit checks.
207  __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
208
209  // Copy the JS array part.
210  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
211    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
212      __ mov(ebx, FieldOperand(ecx, i));
213      __ mov(FieldOperand(eax, i), ebx);
214    }
215  }
216
217  if (length_ > 0) {
218    // Get hold of the elements array of the boilerplate and setup the
219    // elements pointer in the resulting object.
220    __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
221    __ lea(edx, Operand(eax, JSArray::kSize));
222    __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
223
224    // Copy the elements array.
225    for (int i = 0; i < elements_size; i += kPointerSize) {
226      __ mov(ebx, FieldOperand(ecx, i));
227      __ mov(FieldOperand(edx, i), ebx);
228    }
229  }
230
231  // Return and remove the on-stack parameters.
232  __ ret(3 * kPointerSize);
233
234  __ bind(&slow_case);
235  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
236}
237
238
239// The stub returns zero for false, and a non-zero value for true.
240void ToBooleanStub::Generate(MacroAssembler* masm) {
241  Label false_result, true_result, not_string;
242  Factory* factory = masm->isolate()->factory();
243  const Register map = edx;
244
245  __ mov(eax, Operand(esp, 1 * kPointerSize));
246
247  // undefined -> false
248  __ cmp(eax, factory->undefined_value());
249  __ j(equal, &false_result);
250
251  // Boolean -> its value
252  __ cmp(eax, factory->false_value());
253  __ j(equal, &false_result);
254  __ cmp(eax, factory->true_value());
255  __ j(equal, &true_result);
256
257  // Smis: 0 -> false, all other -> true
258  __ test(eax, Operand(eax));
259  __ j(zero, &false_result);
260  __ JumpIfSmi(eax, &true_result);
261
262  // 'null' -> false.
263  __ cmp(eax, factory->null_value());
264  __ j(equal, &false_result, Label::kNear);
265
266  // Get the map of the heap object.
267  __ mov(map, FieldOperand(eax, HeapObject::kMapOffset));
268
269  // Undetectable -> false.
270  __ test_b(FieldOperand(map, Map::kBitFieldOffset),
271            1 << Map::kIsUndetectable);
272  __ j(not_zero, &false_result, Label::kNear);
273
274  // JavaScript object -> true.
275  __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
276  __ j(above_equal, &true_result, Label::kNear);
277
278  // String value -> false iff empty.
279  __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
280  __ j(above_equal, &not_string, Label::kNear);
281  __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
282  __ j(zero, &false_result, Label::kNear);
283  __ jmp(&true_result, Label::kNear);
284
285  __ bind(&not_string);
286  // HeapNumber -> false iff +0, -0, or NaN.
287  __ cmp(map, factory->heap_number_map());
288  __ j(not_equal, &true_result, Label::kNear);
289  __ fldz();
290  __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
291  __ FCmp();
292  __ j(zero, &false_result, Label::kNear);
293  // Fall through to |true_result|.
294
295  // Return 1/0 for true/false in tos_.
296  __ bind(&true_result);
297  __ mov(tos_, 1);
298  __ ret(1 * kPointerSize);
299  __ bind(&false_result);
300  __ mov(tos_, 0);
301  __ ret(1 * kPointerSize);
302}
303
304
305class FloatingPointHelper : public AllStatic {
306 public:
307  enum ArgLocation {
308    ARGS_ON_STACK,
309    ARGS_IN_REGISTERS
310  };
311
312  // Code pattern for loading a floating point value. Input value must
313  // be either a smi or a heap number object (fp value). Requirements:
314  // operand in register number. Returns operand as floating point number
315  // on FPU stack.
316  static void LoadFloatOperand(MacroAssembler* masm, Register number);
317
318  // Code pattern for loading floating point values. Input values must
319  // be either smi or heap number objects (fp values). Requirements:
320  // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
321  // Returns operands as floating point numbers on FPU stack.
322  static void LoadFloatOperands(MacroAssembler* masm,
323                                Register scratch,
324                                ArgLocation arg_location = ARGS_ON_STACK);
325
326  // Similar to LoadFloatOperand but assumes that both operands are smis.
327  // Expects operands in edx, eax.
328  static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
329
330  // Test if operands are smi or number objects (fp). Requirements:
331  // operand_1 in eax, operand_2 in edx; falls through on float
332  // operands, jumps to the non_float label otherwise.
333  static void CheckFloatOperands(MacroAssembler* masm,
334                                 Label* non_float,
335                                 Register scratch);
336
337  // Checks that the two floating point numbers on top of the FPU stack
338  // have int32 values.
339  static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
340                                         Label* non_int32);
341
342  // Takes the operands in edx and eax and loads them as integers in eax
343  // and ecx.
344  static void LoadUnknownsAsIntegers(MacroAssembler* masm,
345                                     bool use_sse3,
346                                     Label* operand_conversion_failure);
347
348  // Must only be called after LoadUnknownsAsIntegers.  Assumes that the
349  // operands are pushed on the stack, and that their conversions to int32
350  // are in eax and ecx.  Checks that the original numbers were in the int32
351  // range.
352  static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
353                                           bool use_sse3,
354                                           Label* not_int32);
355
356  // Assumes that operands are smis or heap numbers and loads them
357  // into xmm0 and xmm1. Operands are in edx and eax.
358  // Leaves operands unchanged.
359  static void LoadSSE2Operands(MacroAssembler* masm);
360
361  // Test if operands are numbers (smi or HeapNumber objects), and load
362  // them into xmm0 and xmm1 if they are.  Jump to label not_numbers if
363  // either operand is not a number.  Operands are in edx and eax.
364  // Leaves operands unchanged.
365  static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
366
367  // Similar to LoadSSE2Operands but assumes that both operands are smis.
368  // Expects operands in edx, eax.
369  static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
370
371  // Checks that the two floating point numbers loaded into xmm0 and xmm1
372  // have int32 values.
373  static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
374                                        Label* non_int32,
375                                        Register scratch);
376};
377
378
379// Get the integer part of a heap number.  Surprisingly, all this bit twiddling
380// is faster than using the built-in instructions on floating point registers.
381// Trashes edi and ebx.  Dest is ecx.  Source cannot be ecx or one of the
382// trashed registers.
383static void IntegerConvert(MacroAssembler* masm,
384                           Register source,
385                           bool use_sse3,
386                           Label* conversion_failure) {
387  ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
388  Label done, right_exponent, normal_exponent;
389  Register scratch = ebx;
390  Register scratch2 = edi;
391  // Get exponent word.
392  __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
393  // Get exponent alone in scratch2.
394  __ mov(scratch2, scratch);
395  __ and_(scratch2, HeapNumber::kExponentMask);
396  if (use_sse3) {
397    CpuFeatures::Scope scope(SSE3);
398    // Check whether the exponent is too big for a 64 bit signed integer.
399    static const uint32_t kTooBigExponent =
400        (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
401    __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
402    __ j(greater_equal, conversion_failure);
403    // Load x87 register with heap number.
404    __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
405    // Reserve space for 64 bit answer.
406    __ sub(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
407    // Do conversion, which cannot fail because we checked the exponent.
408    __ fisttp_d(Operand(esp, 0));
409    __ mov(ecx, Operand(esp, 0));  // Load low word of answer into ecx.
410    __ add(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
411  } else {
412    // Load ecx with zero.  We use this either for the final shift or
413    // for the answer.
414    __ xor_(ecx, Operand(ecx));
415    // Check whether the exponent matches a 32 bit signed int that cannot be
416    // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
417    // exponent is 30 (biased).  This is the exponent that we are fastest at and
418    // also the highest exponent we can handle here.
419    const uint32_t non_smi_exponent =
420        (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
421    __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
422    // If we have a match of the int32-but-not-Smi exponent then skip some
423    // logic.
424    __ j(equal, &right_exponent);
425    // If the exponent is higher than that then go to slow case.  This catches
426    // numbers that don't fit in a signed int32, infinities and NaNs.
427    __ j(less, &normal_exponent);
428
429    {
430      // Handle a big exponent.  The only reason we have this code is that the
431      // >>> operator has a tendency to generate numbers with an exponent of 31.
432      const uint32_t big_non_smi_exponent =
433          (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
434      __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
435      __ j(not_equal, conversion_failure);
436      // We have the big exponent, typically from >>>.  This means the number is
437      // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
438      __ mov(scratch2, scratch);
439      __ and_(scratch2, HeapNumber::kMantissaMask);
440      // Put back the implicit 1.
441      __ or_(scratch2, 1 << HeapNumber::kExponentShift);
442      // Shift up the mantissa bits to take up the space the exponent used to
443      // take. We just orred in the implicit bit so that took care of one and
444      // we want to use the full unsigned range so we subtract 1 bit from the
445      // shift distance.
446      const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
447      __ shl(scratch2, big_shift_distance);
448      // Get the second half of the double.
449      __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
450      // Shift down 21 bits to get the most significant 11 bits or the low
451      // mantissa word.
452      __ shr(ecx, 32 - big_shift_distance);
453      __ or_(ecx, Operand(scratch2));
454      // We have the answer in ecx, but we may need to negate it.
455      __ test(scratch, Operand(scratch));
456      __ j(positive, &done);
457      __ neg(ecx);
458      __ jmp(&done);
459    }
460
461    __ bind(&normal_exponent);
462    // Exponent word in scratch, exponent part of exponent word in scratch2.
463    // Zero in ecx.
464    // We know the exponent is smaller than 30 (biased).  If it is less than
465    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
466    // it rounds to zero.
467    const uint32_t zero_exponent =
468        (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
469    __ sub(Operand(scratch2), Immediate(zero_exponent));
470    // ecx already has a Smi zero.
471    __ j(less, &done);
472
473    // We have a shifted exponent between 0 and 30 in scratch2.
474    __ shr(scratch2, HeapNumber::kExponentShift);
475    __ mov(ecx, Immediate(30));
476    __ sub(ecx, Operand(scratch2));
477
478    __ bind(&right_exponent);
479    // Here ecx is the shift, scratch is the exponent word.
480    // Get the top bits of the mantissa.
481    __ and_(scratch, HeapNumber::kMantissaMask);
482    // Put back the implicit 1.
483    __ or_(scratch, 1 << HeapNumber::kExponentShift);
484    // Shift up the mantissa bits to take up the space the exponent used to
485    // take. We have kExponentShift + 1 significant bits int he low end of the
486    // word.  Shift them to the top bits.
487    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
488    __ shl(scratch, shift_distance);
489    // Get the second half of the double. For some exponents we don't
490    // actually need this because the bits get shifted out again, but
491    // it's probably slower to test than just to do it.
492    __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
493    // Shift down 22 bits to get the most significant 10 bits or the low
494    // mantissa word.
495    __ shr(scratch2, 32 - shift_distance);
496    __ or_(scratch2, Operand(scratch));
497    // Move down according to the exponent.
498    __ shr_cl(scratch2);
499    // Now the unsigned answer is in scratch2.  We need to move it to ecx and
500    // we may need to fix the sign.
501    Label negative;
502    __ xor_(ecx, Operand(ecx));
503    __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
504    __ j(greater, &negative, Label::kNear);
505    __ mov(ecx, scratch2);
506    __ jmp(&done, Label::kNear);
507    __ bind(&negative);
508    __ sub(ecx, Operand(scratch2));
509    __ bind(&done);
510  }
511}
512
513
514void UnaryOpStub::PrintName(StringStream* stream) {
515  const char* op_name = Token::Name(op_);
516  const char* overwrite_name = NULL;  // Make g++ happy.
517  switch (mode_) {
518    case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
519    case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
520  }
521  stream->Add("UnaryOpStub_%s_%s_%s",
522              op_name,
523              overwrite_name,
524              UnaryOpIC::GetName(operand_type_));
525}
526
527
528// TODO(svenpanne): Use virtual functions instead of switch.
529void UnaryOpStub::Generate(MacroAssembler* masm) {
530  switch (operand_type_) {
531    case UnaryOpIC::UNINITIALIZED:
532      GenerateTypeTransition(masm);
533      break;
534    case UnaryOpIC::SMI:
535      GenerateSmiStub(masm);
536      break;
537    case UnaryOpIC::HEAP_NUMBER:
538      GenerateHeapNumberStub(masm);
539      break;
540    case UnaryOpIC::GENERIC:
541      GenerateGenericStub(masm);
542      break;
543  }
544}
545
546
547void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
548  __ pop(ecx);  // Save return address.
549
550  __ push(eax);  // the operand
551  __ push(Immediate(Smi::FromInt(op_)));
552  __ push(Immediate(Smi::FromInt(mode_)));
553  __ push(Immediate(Smi::FromInt(operand_type_)));
554
555  __ push(ecx);  // Push return address.
556
557  // Patch the caller to an appropriate specialized stub and return the
558  // operation result to the caller of the stub.
559  __ TailCallExternalReference(
560      ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
561}
562
563
564// TODO(svenpanne): Use virtual functions instead of switch.
565void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
566  switch (op_) {
567    case Token::SUB:
568      GenerateSmiStubSub(masm);
569      break;
570    case Token::BIT_NOT:
571      GenerateSmiStubBitNot(masm);
572      break;
573    default:
574      UNREACHABLE();
575  }
576}
577
578
579void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
580  Label non_smi, undo, slow;
581  GenerateSmiCodeSub(masm, &non_smi, &undo, &slow,
582                     Label::kNear, Label::kNear, Label::kNear);
583  __ bind(&undo);
584  GenerateSmiCodeUndo(masm);
585  __ bind(&non_smi);
586  __ bind(&slow);
587  GenerateTypeTransition(masm);
588}
589
590
591void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
592  Label non_smi;
593  GenerateSmiCodeBitNot(masm, &non_smi);
594  __ bind(&non_smi);
595  GenerateTypeTransition(masm);
596}
597
598
599void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
600                                     Label* non_smi,
601                                     Label* undo,
602                                     Label* slow,
603                                     Label::Distance non_smi_near,
604                                     Label::Distance undo_near,
605                                     Label::Distance slow_near) {
606  // Check whether the value is a smi.
607  __ JumpIfNotSmi(eax, non_smi, non_smi_near);
608
609  // We can't handle -0 with smis, so use a type transition for that case.
610  __ test(eax, Operand(eax));
611  __ j(zero, slow, slow_near);
612
613  // Try optimistic subtraction '0 - value', saving operand in eax for undo.
614  __ mov(edx, Operand(eax));
615  __ Set(eax, Immediate(0));
616  __ sub(eax, Operand(edx));
617  __ j(overflow, undo, undo_near);
618  __ ret(0);
619}
620
621
622void UnaryOpStub::GenerateSmiCodeBitNot(
623    MacroAssembler* masm,
624    Label* non_smi,
625    Label::Distance non_smi_near) {
626  // Check whether the value is a smi.
627  __ JumpIfNotSmi(eax, non_smi, non_smi_near);
628
629  // Flip bits and revert inverted smi-tag.
630  __ not_(eax);
631  __ and_(eax, ~kSmiTagMask);
632  __ ret(0);
633}
634
635
636void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
637  __ mov(eax, Operand(edx));
638}
639
640
641// TODO(svenpanne): Use virtual functions instead of switch.
642void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
643  switch (op_) {
644    case Token::SUB:
645      GenerateHeapNumberStubSub(masm);
646      break;
647    case Token::BIT_NOT:
648      GenerateHeapNumberStubBitNot(masm);
649      break;
650    default:
651      UNREACHABLE();
652  }
653}
654
655
656void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
657  Label non_smi, undo, slow, call_builtin;
658  GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear);
659  __ bind(&non_smi);
660  GenerateHeapNumberCodeSub(masm, &slow);
661  __ bind(&undo);
662  GenerateSmiCodeUndo(masm);
663  __ bind(&slow);
664  GenerateTypeTransition(masm);
665  __ bind(&call_builtin);
666  GenerateGenericCodeFallback(masm);
667}
668
669
670void UnaryOpStub::GenerateHeapNumberStubBitNot(
671    MacroAssembler* masm) {
672  Label non_smi, slow;
673  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
674  __ bind(&non_smi);
675  GenerateHeapNumberCodeBitNot(masm, &slow);
676  __ bind(&slow);
677  GenerateTypeTransition(masm);
678}
679
680
681void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
682                                            Label* slow) {
683  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
684  __ cmp(edx, masm->isolate()->factory()->heap_number_map());
685  __ j(not_equal, slow);
686
687  if (mode_ == UNARY_OVERWRITE) {
688    __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
689            Immediate(HeapNumber::kSignMask));  // Flip sign.
690  } else {
691    __ mov(edx, Operand(eax));
692    // edx: operand
693
694    Label slow_allocate_heapnumber, heapnumber_allocated;
695    __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
696    __ jmp(&heapnumber_allocated);
697
698    __ bind(&slow_allocate_heapnumber);
699    __ EnterInternalFrame();
700    __ push(edx);
701    __ CallRuntime(Runtime::kNumberAlloc, 0);
702    __ pop(edx);
703    __ LeaveInternalFrame();
704
705    __ bind(&heapnumber_allocated);
706    // eax: allocated 'empty' number
707    __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
708    __ xor_(ecx, HeapNumber::kSignMask);  // Flip sign.
709    __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
710    __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
711    __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
712  }
713  __ ret(0);
714}
715
716
717void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
718                                               Label* slow) {
719  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
720  __ cmp(edx, masm->isolate()->factory()->heap_number_map());
721  __ j(not_equal, slow);
722
723  // Convert the heap number in eax to an untagged integer in ecx.
724  IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow);
725
726  // Do the bitwise operation and check if the result fits in a smi.
727  Label try_float;
728  __ not_(ecx);
729  __ cmp(ecx, 0xc0000000);
730  __ j(sign, &try_float, Label::kNear);
731
732  // Tag the result as a smi and we're done.
733  STATIC_ASSERT(kSmiTagSize == 1);
734  __ lea(eax, Operand(ecx, times_2, kSmiTag));
735  __ ret(0);
736
737  // Try to store the result in a heap number.
738  __ bind(&try_float);
739  if (mode_ == UNARY_NO_OVERWRITE) {
740    Label slow_allocate_heapnumber, heapnumber_allocated;
741    __ mov(ebx, eax);
742    __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
743    __ jmp(&heapnumber_allocated);
744
745    __ bind(&slow_allocate_heapnumber);
746    __ EnterInternalFrame();
747    // Push the original HeapNumber on the stack. The integer value can't
748    // be stored since it's untagged and not in the smi range (so we can't
749    // smi-tag it). We'll recalculate the value after the GC instead.
750    __ push(ebx);
751    __ CallRuntime(Runtime::kNumberAlloc, 0);
752    // New HeapNumber is in eax.
753    __ pop(edx);
754    __ LeaveInternalFrame();
755    // IntegerConvert uses ebx and edi as scratch registers.
756    // This conversion won't go slow-case.
757    IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
758    __ not_(ecx);
759
760    __ bind(&heapnumber_allocated);
761  }
762  if (CpuFeatures::IsSupported(SSE2)) {
763    CpuFeatures::Scope use_sse2(SSE2);
764    __ cvtsi2sd(xmm0, Operand(ecx));
765    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
766  } else {
767    __ push(ecx);
768    __ fild_s(Operand(esp, 0));
769    __ pop(ecx);
770    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
771  }
772  __ ret(0);
773}
774
775
776// TODO(svenpanne): Use virtual functions instead of switch.
777void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
778  switch (op_) {
779    case Token::SUB:
780      GenerateGenericStubSub(masm);
781      break;
782    case Token::BIT_NOT:
783      GenerateGenericStubBitNot(masm);
784      break;
785    default:
786      UNREACHABLE();
787  }
788}
789
790
791void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm)  {
792  Label non_smi, undo, slow;
793  GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
794  __ bind(&non_smi);
795  GenerateHeapNumberCodeSub(masm, &slow);
796  __ bind(&undo);
797  GenerateSmiCodeUndo(masm);
798  __ bind(&slow);
799  GenerateGenericCodeFallback(masm);
800}
801
802
803void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
804  Label non_smi, slow;
805  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
806  __ bind(&non_smi);
807  GenerateHeapNumberCodeBitNot(masm, &slow);
808  __ bind(&slow);
809  GenerateGenericCodeFallback(masm);
810}
811
812
813void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
814  // Handle the slow case by jumping to the corresponding JavaScript builtin.
815  __ pop(ecx);  // pop return address.
816  __ push(eax);
817  __ push(ecx);  // push return address
818  switch (op_) {
819    case Token::SUB:
820      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
821      break;
822    case Token::BIT_NOT:
823      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
824      break;
825    default:
826      UNREACHABLE();
827  }
828}
829
830
831void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
832  __ pop(ecx);  // Save return address.
833  __ push(edx);
834  __ push(eax);
835  // Left and right arguments are now on top.
836  // Push this stub's key. Although the operation and the type info are
837  // encoded into the key, the encoding is opaque, so push them too.
838  __ push(Immediate(Smi::FromInt(MinorKey())));
839  __ push(Immediate(Smi::FromInt(op_)));
840  __ push(Immediate(Smi::FromInt(operands_type_)));
841
842  __ push(ecx);  // Push return address.
843
844  // Patch the caller to an appropriate specialized stub and return the
845  // operation result to the caller of the stub.
846  __ TailCallExternalReference(
847      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
848                        masm->isolate()),
849      5,
850      1);
851}
852
853
854// Prepare for a type transition runtime call when the args are already on
855// the stack, under the return address.
856void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
857  __ pop(ecx);  // Save return address.
858  // Left and right arguments are already on top of the stack.
859  // Push this stub's key. Although the operation and the type info are
860  // encoded into the key, the encoding is opaque, so push them too.
861  __ push(Immediate(Smi::FromInt(MinorKey())));
862  __ push(Immediate(Smi::FromInt(op_)));
863  __ push(Immediate(Smi::FromInt(operands_type_)));
864
865  __ push(ecx);  // Push return address.
866
867  // Patch the caller to an appropriate specialized stub and return the
868  // operation result to the caller of the stub.
869  __ TailCallExternalReference(
870      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
871                        masm->isolate()),
872      5,
873      1);
874}
875
876
877void BinaryOpStub::Generate(MacroAssembler* masm) {
878  switch (operands_type_) {
879    case BinaryOpIC::UNINITIALIZED:
880      GenerateTypeTransition(masm);
881      break;
882    case BinaryOpIC::SMI:
883      GenerateSmiStub(masm);
884      break;
885    case BinaryOpIC::INT32:
886      GenerateInt32Stub(masm);
887      break;
888    case BinaryOpIC::HEAP_NUMBER:
889      GenerateHeapNumberStub(masm);
890      break;
891    case BinaryOpIC::ODDBALL:
892      GenerateOddballStub(masm);
893      break;
894    case BinaryOpIC::BOTH_STRING:
895      GenerateBothStringStub(masm);
896      break;
897    case BinaryOpIC::STRING:
898      GenerateStringStub(masm);
899      break;
900    case BinaryOpIC::GENERIC:
901      GenerateGeneric(masm);
902      break;
903    default:
904      UNREACHABLE();
905  }
906}
907
908
909void BinaryOpStub::PrintName(StringStream* stream) {
910  const char* op_name = Token::Name(op_);
911  const char* overwrite_name;
912  switch (mode_) {
913    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
914    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
915    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
916    default: overwrite_name = "UnknownOverwrite"; break;
917  }
918  stream->Add("BinaryOpStub_%s_%s_%s",
919              op_name,
920              overwrite_name,
921              BinaryOpIC::GetName(operands_type_));
922}
923
924
925void BinaryOpStub::GenerateSmiCode(
926    MacroAssembler* masm,
927    Label* slow,
928    SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
929  // 1. Move arguments into edx, eax except for DIV and MOD, which need the
930  // dividend in eax and edx free for the division.  Use eax, ebx for those.
931  Comment load_comment(masm, "-- Load arguments");
932  Register left = edx;
933  Register right = eax;
934  if (op_ == Token::DIV || op_ == Token::MOD) {
935    left = eax;
936    right = ebx;
937    __ mov(ebx, eax);
938    __ mov(eax, edx);
939  }
940
941
942  // 2. Prepare the smi check of both operands by oring them together.
943  Comment smi_check_comment(masm, "-- Smi check arguments");
944  Label not_smis;
945  Register combined = ecx;
946  ASSERT(!left.is(combined) && !right.is(combined));
947  switch (op_) {
948    case Token::BIT_OR:
949      // Perform the operation into eax and smi check the result.  Preserve
950      // eax in case the result is not a smi.
951      ASSERT(!left.is(ecx) && !right.is(ecx));
952      __ mov(ecx, right);
953      __ or_(right, Operand(left));  // Bitwise or is commutative.
954      combined = right;
955      break;
956
957    case Token::BIT_XOR:
958    case Token::BIT_AND:
959    case Token::ADD:
960    case Token::SUB:
961    case Token::MUL:
962    case Token::DIV:
963    case Token::MOD:
964      __ mov(combined, right);
965      __ or_(combined, Operand(left));
966      break;
967
968    case Token::SHL:
969    case Token::SAR:
970    case Token::SHR:
971      // Move the right operand into ecx for the shift operation, use eax
972      // for the smi check register.
973      ASSERT(!left.is(ecx) && !right.is(ecx));
974      __ mov(ecx, right);
975      __ or_(right, Operand(left));
976      combined = right;
977      break;
978
979    default:
980      break;
981  }
982
983  // 3. Perform the smi check of the operands.
984  STATIC_ASSERT(kSmiTag == 0);  // Adjust zero check if not the case.
985  __ JumpIfNotSmi(combined, &not_smis);
986
987  // 4. Operands are both smis, perform the operation leaving the result in
988  // eax and check the result if necessary.
989  Comment perform_smi(masm, "-- Perform smi operation");
990  Label use_fp_on_smis;
991  switch (op_) {
992    case Token::BIT_OR:
993      // Nothing to do.
994      break;
995
996    case Token::BIT_XOR:
997      ASSERT(right.is(eax));
998      __ xor_(right, Operand(left));  // Bitwise xor is commutative.
999      break;
1000
1001    case Token::BIT_AND:
1002      ASSERT(right.is(eax));
1003      __ and_(right, Operand(left));  // Bitwise and is commutative.
1004      break;
1005
1006    case Token::SHL:
1007      // Remove tags from operands (but keep sign).
1008      __ SmiUntag(left);
1009      __ SmiUntag(ecx);
1010      // Perform the operation.
1011      __ shl_cl(left);
1012      // Check that the *signed* result fits in a smi.
1013      __ cmp(left, 0xc0000000);
1014      __ j(sign, &use_fp_on_smis);
1015      // Tag the result and store it in register eax.
1016      __ SmiTag(left);
1017      __ mov(eax, left);
1018      break;
1019
1020    case Token::SAR:
1021      // Remove tags from operands (but keep sign).
1022      __ SmiUntag(left);
1023      __ SmiUntag(ecx);
1024      // Perform the operation.
1025      __ sar_cl(left);
1026      // Tag the result and store it in register eax.
1027      __ SmiTag(left);
1028      __ mov(eax, left);
1029      break;
1030
1031    case Token::SHR:
1032      // Remove tags from operands (but keep sign).
1033      __ SmiUntag(left);
1034      __ SmiUntag(ecx);
1035      // Perform the operation.
1036      __ shr_cl(left);
1037      // Check that the *unsigned* result fits in a smi.
1038      // Neither of the two high-order bits can be set:
1039      // - 0x80000000: high bit would be lost when smi tagging.
1040      // - 0x40000000: this number would convert to negative when
1041      // Smi tagging these two cases can only happen with shifts
1042      // by 0 or 1 when handed a valid smi.
1043      __ test(left, Immediate(0xc0000000));
1044      __ j(not_zero, &use_fp_on_smis);
1045      // Tag the result and store it in register eax.
1046      __ SmiTag(left);
1047      __ mov(eax, left);
1048      break;
1049
1050    case Token::ADD:
1051      ASSERT(right.is(eax));
1052      __ add(right, Operand(left));  // Addition is commutative.
1053      __ j(overflow, &use_fp_on_smis);
1054      break;
1055
1056    case Token::SUB:
1057      __ sub(left, Operand(right));
1058      __ j(overflow, &use_fp_on_smis);
1059      __ mov(eax, left);
1060      break;
1061
1062    case Token::MUL:
1063      // If the smi tag is 0 we can just leave the tag on one operand.
1064      STATIC_ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
1065      // We can't revert the multiplication if the result is not a smi
1066      // so save the right operand.
1067      __ mov(ebx, right);
1068      // Remove tag from one of the operands (but keep sign).
1069      __ SmiUntag(right);
1070      // Do multiplication.
1071      __ imul(right, Operand(left));  // Multiplication is commutative.
1072      __ j(overflow, &use_fp_on_smis);
1073      // Check for negative zero result.  Use combined = left | right.
1074      __ NegativeZeroTest(right, combined, &use_fp_on_smis);
1075      break;
1076
1077    case Token::DIV:
1078      // We can't revert the division if the result is not a smi so
1079      // save the left operand.
1080      __ mov(edi, left);
1081      // Check for 0 divisor.
1082      __ test(right, Operand(right));
1083      __ j(zero, &use_fp_on_smis);
1084      // Sign extend left into edx:eax.
1085      ASSERT(left.is(eax));
1086      __ cdq();
1087      // Divide edx:eax by right.
1088      __ idiv(right);
1089      // Check for the corner case of dividing the most negative smi by
1090      // -1. We cannot use the overflow flag, since it is not set by idiv
1091      // instruction.
1092      STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1093      __ cmp(eax, 0x40000000);
1094      __ j(equal, &use_fp_on_smis);
1095      // Check for negative zero result.  Use combined = left | right.
1096      __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
1097      // Check that the remainder is zero.
1098      __ test(edx, Operand(edx));
1099      __ j(not_zero, &use_fp_on_smis);
1100      // Tag the result and store it in register eax.
1101      __ SmiTag(eax);
1102      break;
1103
1104    case Token::MOD:
1105      // Check for 0 divisor.
1106      __ test(right, Operand(right));
1107      __ j(zero, &not_smis);
1108
1109      // Sign extend left into edx:eax.
1110      ASSERT(left.is(eax));
1111      __ cdq();
1112      // Divide edx:eax by right.
1113      __ idiv(right);
1114      // Check for negative zero result.  Use combined = left | right.
1115      __ NegativeZeroTest(edx, combined, slow);
1116      // Move remainder to register eax.
1117      __ mov(eax, edx);
1118      break;
1119
1120    default:
1121      UNREACHABLE();
1122  }
1123
1124  // 5. Emit return of result in eax.  Some operations have registers pushed.
1125  switch (op_) {
1126    case Token::ADD:
1127    case Token::SUB:
1128    case Token::MUL:
1129    case Token::DIV:
1130      __ ret(0);
1131      break;
1132    case Token::MOD:
1133    case Token::BIT_OR:
1134    case Token::BIT_AND:
1135    case Token::BIT_XOR:
1136    case Token::SAR:
1137    case Token::SHL:
1138    case Token::SHR:
1139      __ ret(2 * kPointerSize);
1140      break;
1141    default:
1142      UNREACHABLE();
1143  }
1144
1145  // 6. For some operations emit inline code to perform floating point
1146  // operations on known smis (e.g., if the result of the operation
1147  // overflowed the smi range).
1148  if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
1149    __ bind(&use_fp_on_smis);
1150    switch (op_) {
1151      // Undo the effects of some operations, and some register moves.
1152      case Token::SHL:
1153        // The arguments are saved on the stack, and only used from there.
1154        break;
1155      case Token::ADD:
1156        // Revert right = right + left.
1157        __ sub(right, Operand(left));
1158        break;
1159      case Token::SUB:
1160        // Revert left = left - right.
1161        __ add(left, Operand(right));
1162        break;
1163      case Token::MUL:
1164        // Right was clobbered but a copy is in ebx.
1165        __ mov(right, ebx);
1166        break;
1167      case Token::DIV:
1168        // Left was clobbered but a copy is in edi.  Right is in ebx for
1169        // division.  They should be in eax, ebx for jump to not_smi.
1170        __ mov(eax, edi);
1171        break;
1172      default:
1173        // No other operators jump to use_fp_on_smis.
1174        break;
1175    }
1176    __ jmp(&not_smis);
1177  } else {
1178    ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
1179    switch (op_) {
1180      case Token::SHL:
1181      case Token::SHR: {
1182        Comment perform_float(masm, "-- Perform float operation on smis");
1183        __ bind(&use_fp_on_smis);
1184        // Result we want is in left == edx, so we can put the allocated heap
1185        // number in eax.
1186        __ AllocateHeapNumber(eax, ecx, ebx, slow);
1187        // Store the result in the HeapNumber and return.
1188        // It's OK to overwrite the arguments on the stack because we
1189        // are about to return.
1190        if (op_ == Token::SHR) {
1191          __ mov(Operand(esp, 1 * kPointerSize), left);
1192          __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
1193          __ fild_d(Operand(esp, 1 * kPointerSize));
1194          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1195        } else {
1196          ASSERT_EQ(Token::SHL, op_);
1197          if (CpuFeatures::IsSupported(SSE2)) {
1198            CpuFeatures::Scope use_sse2(SSE2);
1199            __ cvtsi2sd(xmm0, Operand(left));
1200            __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1201          } else {
1202            __ mov(Operand(esp, 1 * kPointerSize), left);
1203            __ fild_s(Operand(esp, 1 * kPointerSize));
1204            __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1205          }
1206        }
1207        __ ret(2 * kPointerSize);
1208        break;
1209      }
1210
1211      case Token::ADD:
1212      case Token::SUB:
1213      case Token::MUL:
1214      case Token::DIV: {
1215        Comment perform_float(masm, "-- Perform float operation on smis");
1216        __ bind(&use_fp_on_smis);
1217        // Restore arguments to edx, eax.
1218        switch (op_) {
1219          case Token::ADD:
1220            // Revert right = right + left.
1221            __ sub(right, Operand(left));
1222            break;
1223          case Token::SUB:
1224            // Revert left = left - right.
1225            __ add(left, Operand(right));
1226            break;
1227          case Token::MUL:
1228            // Right was clobbered but a copy is in ebx.
1229            __ mov(right, ebx);
1230            break;
1231          case Token::DIV:
1232            // Left was clobbered but a copy is in edi.  Right is in ebx for
1233            // division.
1234            __ mov(edx, edi);
1235            __ mov(eax, right);
1236            break;
1237          default: UNREACHABLE();
1238            break;
1239        }
1240        __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
1241        if (CpuFeatures::IsSupported(SSE2)) {
1242          CpuFeatures::Scope use_sse2(SSE2);
1243          FloatingPointHelper::LoadSSE2Smis(masm, ebx);
1244          switch (op_) {
1245            case Token::ADD: __ addsd(xmm0, xmm1); break;
1246            case Token::SUB: __ subsd(xmm0, xmm1); break;
1247            case Token::MUL: __ mulsd(xmm0, xmm1); break;
1248            case Token::DIV: __ divsd(xmm0, xmm1); break;
1249            default: UNREACHABLE();
1250          }
1251          __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
1252        } else {  // SSE2 not available, use FPU.
1253          FloatingPointHelper::LoadFloatSmis(masm, ebx);
1254          switch (op_) {
1255            case Token::ADD: __ faddp(1); break;
1256            case Token::SUB: __ fsubp(1); break;
1257            case Token::MUL: __ fmulp(1); break;
1258            case Token::DIV: __ fdivp(1); break;
1259            default: UNREACHABLE();
1260          }
1261          __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
1262        }
1263        __ mov(eax, ecx);
1264        __ ret(0);
1265        break;
1266      }
1267
1268      default:
1269        break;
1270    }
1271  }
1272
1273  // 7. Non-smi operands, fall out to the non-smi code with the operands in
1274  // edx and eax.
1275  Comment done_comment(masm, "-- Enter non-smi code");
1276  __ bind(&not_smis);
1277  switch (op_) {
1278    case Token::BIT_OR:
1279    case Token::SHL:
1280    case Token::SAR:
1281    case Token::SHR:
1282      // Right operand is saved in ecx and eax was destroyed by the smi
1283      // check.
1284      __ mov(eax, ecx);
1285      break;
1286
1287    case Token::DIV:
1288    case Token::MOD:
1289      // Operands are in eax, ebx at this point.
1290      __ mov(edx, eax);
1291      __ mov(eax, ebx);
1292      break;
1293
1294    default:
1295      break;
1296  }
1297}
1298
1299
1300void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1301  Label call_runtime;
1302
1303  switch (op_) {
1304    case Token::ADD:
1305    case Token::SUB:
1306    case Token::MUL:
1307    case Token::DIV:
1308      break;
1309    case Token::MOD:
1310    case Token::BIT_OR:
1311    case Token::BIT_AND:
1312    case Token::BIT_XOR:
1313    case Token::SAR:
1314    case Token::SHL:
1315    case Token::SHR:
1316      GenerateRegisterArgsPush(masm);
1317      break;
1318    default:
1319      UNREACHABLE();
1320  }
1321
1322  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1323      result_type_ == BinaryOpIC::SMI) {
1324    GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
1325  } else {
1326    GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1327  }
1328  __ bind(&call_runtime);
1329  switch (op_) {
1330    case Token::ADD:
1331    case Token::SUB:
1332    case Token::MUL:
1333    case Token::DIV:
1334      GenerateTypeTransition(masm);
1335      break;
1336    case Token::MOD:
1337    case Token::BIT_OR:
1338    case Token::BIT_AND:
1339    case Token::BIT_XOR:
1340    case Token::SAR:
1341    case Token::SHL:
1342    case Token::SHR:
1343      GenerateTypeTransitionWithSavedArgs(masm);
1344      break;
1345    default:
1346      UNREACHABLE();
1347  }
1348}
1349
1350
1351void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
1352  ASSERT(operands_type_ == BinaryOpIC::STRING);
1353  ASSERT(op_ == Token::ADD);
1354  // Try to add arguments as strings, otherwise, transition to the generic
1355  // BinaryOpIC type.
1356  GenerateAddStrings(masm);
1357  GenerateTypeTransition(masm);
1358}
1359
1360
1361void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1362  Label call_runtime;
1363  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
1364  ASSERT(op_ == Token::ADD);
1365  // If both arguments are strings, call the string add stub.
1366  // Otherwise, do a transition.
1367
1368  // Registers containing left and right operands respectively.
1369  Register left = edx;
1370  Register right = eax;
1371
1372  // Test if left operand is a string.
1373  __ JumpIfSmi(left, &call_runtime);
1374  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
1375  __ j(above_equal, &call_runtime);
1376
1377  // Test if right operand is a string.
1378  __ JumpIfSmi(right, &call_runtime);
1379  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
1380  __ j(above_equal, &call_runtime);
1381
1382  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
1383  GenerateRegisterArgsPush(masm);
1384  __ TailCallStub(&string_add_stub);
1385
1386  __ bind(&call_runtime);
1387  GenerateTypeTransition(masm);
1388}
1389
1390
1391void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1392  Label call_runtime;
1393  ASSERT(operands_type_ == BinaryOpIC::INT32);
1394
1395  // Floating point case.
1396  switch (op_) {
1397    case Token::ADD:
1398    case Token::SUB:
1399    case Token::MUL:
1400    case Token::DIV: {
1401      Label not_floats;
1402      Label not_int32;
1403      if (CpuFeatures::IsSupported(SSE2)) {
1404        CpuFeatures::Scope use_sse2(SSE2);
1405        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1406        FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
1407        switch (op_) {
1408          case Token::ADD: __ addsd(xmm0, xmm1); break;
1409          case Token::SUB: __ subsd(xmm0, xmm1); break;
1410          case Token::MUL: __ mulsd(xmm0, xmm1); break;
1411          case Token::DIV: __ divsd(xmm0, xmm1); break;
1412          default: UNREACHABLE();
1413        }
1414        // Check result type if it is currently Int32.
1415        if (result_type_ <= BinaryOpIC::INT32) {
1416          __ cvttsd2si(ecx, Operand(xmm0));
1417          __ cvtsi2sd(xmm2, Operand(ecx));
1418          __ ucomisd(xmm0, xmm2);
1419          __ j(not_zero, &not_int32);
1420          __ j(carry, &not_int32);
1421        }
1422        GenerateHeapResultAllocation(masm, &call_runtime);
1423        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1424        __ ret(0);
1425      } else {  // SSE2 not available, use FPU.
1426        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1427        FloatingPointHelper::LoadFloatOperands(
1428            masm,
1429            ecx,
1430            FloatingPointHelper::ARGS_IN_REGISTERS);
1431        FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
1432        switch (op_) {
1433          case Token::ADD: __ faddp(1); break;
1434          case Token::SUB: __ fsubp(1); break;
1435          case Token::MUL: __ fmulp(1); break;
1436          case Token::DIV: __ fdivp(1); break;
1437          default: UNREACHABLE();
1438        }
1439        Label after_alloc_failure;
1440        GenerateHeapResultAllocation(masm, &after_alloc_failure);
1441        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1442        __ ret(0);
1443        __ bind(&after_alloc_failure);
1444        __ ffree();
1445        __ jmp(&call_runtime);
1446      }
1447
1448      __ bind(&not_floats);
1449      __ bind(&not_int32);
1450      GenerateTypeTransition(masm);
1451      break;
1452    }
1453
1454    case Token::MOD: {
1455      // For MOD we go directly to runtime in the non-smi case.
1456      break;
1457    }
1458    case Token::BIT_OR:
1459    case Token::BIT_AND:
1460    case Token::BIT_XOR:
1461    case Token::SAR:
1462    case Token::SHL:
1463    case Token::SHR: {
1464      GenerateRegisterArgsPush(masm);
1465      Label not_floats;
1466      Label not_int32;
1467      Label non_smi_result;
1468      /*  {
1469        CpuFeatures::Scope use_sse2(SSE2);
1470        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1471        FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
1472        }*/
1473      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1474                                                  use_sse3_,
1475                                                  &not_floats);
1476      FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
1477                                                        &not_int32);
1478      switch (op_) {
1479        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
1480        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
1481        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
1482        case Token::SAR: __ sar_cl(eax); break;
1483        case Token::SHL: __ shl_cl(eax); break;
1484        case Token::SHR: __ shr_cl(eax); break;
1485        default: UNREACHABLE();
1486      }
1487      if (op_ == Token::SHR) {
1488        // Check if result is non-negative and fits in a smi.
1489        __ test(eax, Immediate(0xc0000000));
1490        __ j(not_zero, &call_runtime);
1491      } else {
1492        // Check if result fits in a smi.
1493        __ cmp(eax, 0xc0000000);
1494        __ j(negative, &non_smi_result);
1495      }
1496      // Tag smi result and return.
1497      __ SmiTag(eax);
1498      __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
1499
1500      // All ops except SHR return a signed int32 that we load in
1501      // a HeapNumber.
1502      if (op_ != Token::SHR) {
1503        __ bind(&non_smi_result);
1504        // Allocate a heap number if needed.
1505        __ mov(ebx, Operand(eax));  // ebx: result
1506        Label skip_allocation;
1507        switch (mode_) {
1508          case OVERWRITE_LEFT:
1509          case OVERWRITE_RIGHT:
1510            // If the operand was an object, we skip the
1511            // allocation of a heap number.
1512            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1513                                1 * kPointerSize : 2 * kPointerSize));
1514            __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1515            // Fall through!
1516          case NO_OVERWRITE:
1517            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1518            __ bind(&skip_allocation);
1519            break;
1520          default: UNREACHABLE();
1521        }
1522        // Store the result in the HeapNumber and return.
1523        if (CpuFeatures::IsSupported(SSE2)) {
1524          CpuFeatures::Scope use_sse2(SSE2);
1525          __ cvtsi2sd(xmm0, Operand(ebx));
1526          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1527        } else {
1528          __ mov(Operand(esp, 1 * kPointerSize), ebx);
1529          __ fild_s(Operand(esp, 1 * kPointerSize));
1530          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1531        }
1532        __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
1533      }
1534
1535      __ bind(&not_floats);
1536      __ bind(&not_int32);
1537      GenerateTypeTransitionWithSavedArgs(masm);
1538      break;
1539    }
1540    default: UNREACHABLE(); break;
1541  }
1542
1543  // If an allocation fails, or SHR or MOD hit a hard case,
1544  // use the runtime system to get the correct result.
1545  __ bind(&call_runtime);
1546
1547  switch (op_) {
1548    case Token::ADD:
1549      GenerateRegisterArgsPush(masm);
1550      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1551      break;
1552    case Token::SUB:
1553      GenerateRegisterArgsPush(masm);
1554      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1555      break;
1556    case Token::MUL:
1557      GenerateRegisterArgsPush(masm);
1558      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1559      break;
1560    case Token::DIV:
1561      GenerateRegisterArgsPush(masm);
1562      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1563      break;
1564    case Token::MOD:
1565      GenerateRegisterArgsPush(masm);
1566      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1567      break;
1568    case Token::BIT_OR:
1569      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1570      break;
1571    case Token::BIT_AND:
1572      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1573      break;
1574    case Token::BIT_XOR:
1575      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1576      break;
1577    case Token::SAR:
1578      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1579      break;
1580    case Token::SHL:
1581      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1582      break;
1583    case Token::SHR:
1584      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1585      break;
1586    default:
1587      UNREACHABLE();
1588  }
1589}
1590
1591
1592void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
1593  if (op_ == Token::ADD) {
1594    // Handle string addition here, because it is the only operation
1595    // that does not do a ToNumber conversion on the operands.
1596    GenerateAddStrings(masm);
1597  }
1598
1599  Factory* factory = masm->isolate()->factory();
1600
1601  // Convert odd ball arguments to numbers.
1602  Label check, done;
1603  __ cmp(edx, factory->undefined_value());
1604  __ j(not_equal, &check, Label::kNear);
1605  if (Token::IsBitOp(op_)) {
1606    __ xor_(edx, Operand(edx));
1607  } else {
1608    __ mov(edx, Immediate(factory->nan_value()));
1609  }
1610  __ jmp(&done, Label::kNear);
1611  __ bind(&check);
1612  __ cmp(eax, factory->undefined_value());
1613  __ j(not_equal, &done, Label::kNear);
1614  if (Token::IsBitOp(op_)) {
1615    __ xor_(eax, Operand(eax));
1616  } else {
1617    __ mov(eax, Immediate(factory->nan_value()));
1618  }
1619  __ bind(&done);
1620
1621  GenerateHeapNumberStub(masm);
1622}
1623
1624
1625void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1626  Label call_runtime;
1627
1628  // Floating point case.
1629  switch (op_) {
1630    case Token::ADD:
1631    case Token::SUB:
1632    case Token::MUL:
1633    case Token::DIV: {
1634      Label not_floats;
1635      if (CpuFeatures::IsSupported(SSE2)) {
1636        CpuFeatures::Scope use_sse2(SSE2);
1637        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1638
1639        switch (op_) {
1640          case Token::ADD: __ addsd(xmm0, xmm1); break;
1641          case Token::SUB: __ subsd(xmm0, xmm1); break;
1642          case Token::MUL: __ mulsd(xmm0, xmm1); break;
1643          case Token::DIV: __ divsd(xmm0, xmm1); break;
1644          default: UNREACHABLE();
1645        }
1646        GenerateHeapResultAllocation(masm, &call_runtime);
1647        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1648        __ ret(0);
1649      } else {  // SSE2 not available, use FPU.
1650        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1651        FloatingPointHelper::LoadFloatOperands(
1652            masm,
1653            ecx,
1654            FloatingPointHelper::ARGS_IN_REGISTERS);
1655        switch (op_) {
1656          case Token::ADD: __ faddp(1); break;
1657          case Token::SUB: __ fsubp(1); break;
1658          case Token::MUL: __ fmulp(1); break;
1659          case Token::DIV: __ fdivp(1); break;
1660          default: UNREACHABLE();
1661        }
1662        Label after_alloc_failure;
1663        GenerateHeapResultAllocation(masm, &after_alloc_failure);
1664        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1665        __ ret(0);
1666        __ bind(&after_alloc_failure);
1667        __ ffree();
1668        __ jmp(&call_runtime);
1669      }
1670
1671      __ bind(&not_floats);
1672      GenerateTypeTransition(masm);
1673      break;
1674    }
1675
1676    case Token::MOD: {
1677      // For MOD we go directly to runtime in the non-smi case.
1678      break;
1679    }
1680    case Token::BIT_OR:
1681    case Token::BIT_AND:
1682    case Token::BIT_XOR:
1683    case Token::SAR:
1684    case Token::SHL:
1685    case Token::SHR: {
1686      GenerateRegisterArgsPush(masm);
1687      Label not_floats;
1688      Label non_smi_result;
1689      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1690                                                  use_sse3_,
1691                                                  &not_floats);
1692      switch (op_) {
1693        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
1694        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
1695        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
1696        case Token::SAR: __ sar_cl(eax); break;
1697        case Token::SHL: __ shl_cl(eax); break;
1698        case Token::SHR: __ shr_cl(eax); break;
1699        default: UNREACHABLE();
1700      }
1701      if (op_ == Token::SHR) {
1702        // Check if result is non-negative and fits in a smi.
1703        __ test(eax, Immediate(0xc0000000));
1704        __ j(not_zero, &call_runtime);
1705      } else {
1706        // Check if result fits in a smi.
1707        __ cmp(eax, 0xc0000000);
1708        __ j(negative, &non_smi_result);
1709      }
1710      // Tag smi result and return.
1711      __ SmiTag(eax);
1712      __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
1713
1714      // All ops except SHR return a signed int32 that we load in
1715      // a HeapNumber.
1716      if (op_ != Token::SHR) {
1717        __ bind(&non_smi_result);
1718        // Allocate a heap number if needed.
1719        __ mov(ebx, Operand(eax));  // ebx: result
1720        Label skip_allocation;
1721        switch (mode_) {
1722          case OVERWRITE_LEFT:
1723          case OVERWRITE_RIGHT:
1724            // If the operand was an object, we skip the
1725            // allocation of a heap number.
1726            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1727                                1 * kPointerSize : 2 * kPointerSize));
1728            __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1729            // Fall through!
1730          case NO_OVERWRITE:
1731            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1732            __ bind(&skip_allocation);
1733            break;
1734          default: UNREACHABLE();
1735        }
1736        // Store the result in the HeapNumber and return.
1737        if (CpuFeatures::IsSupported(SSE2)) {
1738          CpuFeatures::Scope use_sse2(SSE2);
1739          __ cvtsi2sd(xmm0, Operand(ebx));
1740          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1741        } else {
1742          __ mov(Operand(esp, 1 * kPointerSize), ebx);
1743          __ fild_s(Operand(esp, 1 * kPointerSize));
1744          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1745        }
1746        __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
1747      }
1748
1749      __ bind(&not_floats);
1750      GenerateTypeTransitionWithSavedArgs(masm);
1751      break;
1752    }
1753    default: UNREACHABLE(); break;
1754  }
1755
1756  // If an allocation fails, or SHR or MOD hit a hard case,
1757  // use the runtime system to get the correct result.
1758  __ bind(&call_runtime);
1759
1760  switch (op_) {
1761    case Token::ADD:
1762      GenerateRegisterArgsPush(masm);
1763      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1764      break;
1765    case Token::SUB:
1766      GenerateRegisterArgsPush(masm);
1767      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1768      break;
1769    case Token::MUL:
1770      GenerateRegisterArgsPush(masm);
1771      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1772      break;
1773    case Token::DIV:
1774      GenerateRegisterArgsPush(masm);
1775      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1776      break;
1777    case Token::MOD:
1778      GenerateRegisterArgsPush(masm);
1779      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1780      break;
1781    case Token::BIT_OR:
1782      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1783      break;
1784    case Token::BIT_AND:
1785      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1786      break;
1787    case Token::BIT_XOR:
1788      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1789      break;
1790    case Token::SAR:
1791      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1792      break;
1793    case Token::SHL:
1794      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1795      break;
1796    case Token::SHR:
1797      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1798      break;
1799    default:
1800      UNREACHABLE();
1801  }
1802}
1803
1804
1805void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
1806  Label call_runtime;
1807
1808  Counters* counters = masm->isolate()->counters();
1809  __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
1810
1811  switch (op_) {
1812    case Token::ADD:
1813    case Token::SUB:
1814    case Token::MUL:
1815    case Token::DIV:
1816      break;
1817    case Token::MOD:
1818    case Token::BIT_OR:
1819    case Token::BIT_AND:
1820    case Token::BIT_XOR:
1821    case Token::SAR:
1822    case Token::SHL:
1823    case Token::SHR:
1824      GenerateRegisterArgsPush(masm);
1825      break;
1826    default:
1827      UNREACHABLE();
1828  }
1829
1830  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1831
1832  // Floating point case.
1833  switch (op_) {
1834    case Token::ADD:
1835    case Token::SUB:
1836    case Token::MUL:
1837    case Token::DIV: {
1838      Label not_floats;
1839      if (CpuFeatures::IsSupported(SSE2)) {
1840        CpuFeatures::Scope use_sse2(SSE2);
1841        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1842
1843        switch (op_) {
1844          case Token::ADD: __ addsd(xmm0, xmm1); break;
1845          case Token::SUB: __ subsd(xmm0, xmm1); break;
1846          case Token::MUL: __ mulsd(xmm0, xmm1); break;
1847          case Token::DIV: __ divsd(xmm0, xmm1); break;
1848          default: UNREACHABLE();
1849        }
1850        GenerateHeapResultAllocation(masm, &call_runtime);
1851        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1852        __ ret(0);
1853      } else {  // SSE2 not available, use FPU.
1854        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1855        FloatingPointHelper::LoadFloatOperands(
1856            masm,
1857            ecx,
1858            FloatingPointHelper::ARGS_IN_REGISTERS);
1859        switch (op_) {
1860          case Token::ADD: __ faddp(1); break;
1861          case Token::SUB: __ fsubp(1); break;
1862          case Token::MUL: __ fmulp(1); break;
1863          case Token::DIV: __ fdivp(1); break;
1864          default: UNREACHABLE();
1865        }
1866        Label after_alloc_failure;
1867        GenerateHeapResultAllocation(masm, &after_alloc_failure);
1868        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1869        __ ret(0);
1870        __ bind(&after_alloc_failure);
1871          __ ffree();
1872          __ jmp(&call_runtime);
1873      }
1874        __ bind(&not_floats);
1875        break;
1876      }
1877    case Token::MOD: {
1878      // For MOD we go directly to runtime in the non-smi case.
1879      break;
1880    }
1881    case Token::BIT_OR:
1882    case Token::BIT_AND:
1883      case Token::BIT_XOR:
1884    case Token::SAR:
1885    case Token::SHL:
1886    case Token::SHR: {
1887      Label non_smi_result;
1888      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1889                                                  use_sse3_,
1890                                                  &call_runtime);
1891      switch (op_) {
1892        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
1893        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
1894        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
1895        case Token::SAR: __ sar_cl(eax); break;
1896        case Token::SHL: __ shl_cl(eax); break;
1897        case Token::SHR: __ shr_cl(eax); break;
1898        default: UNREACHABLE();
1899      }
1900      if (op_ == Token::SHR) {
1901        // Check if result is non-negative and fits in a smi.
1902        __ test(eax, Immediate(0xc0000000));
1903        __ j(not_zero, &call_runtime);
1904      } else {
1905        // Check if result fits in a smi.
1906        __ cmp(eax, 0xc0000000);
1907        __ j(negative, &non_smi_result);
1908      }
1909      // Tag smi result and return.
1910      __ SmiTag(eax);
1911      __ ret(2 * kPointerSize);  // Drop the arguments from the stack.
1912
1913      // All ops except SHR return a signed int32 that we load in
1914      // a HeapNumber.
1915      if (op_ != Token::SHR) {
1916        __ bind(&non_smi_result);
1917        // Allocate a heap number if needed.
1918        __ mov(ebx, Operand(eax));  // ebx: result
1919        Label skip_allocation;
1920        switch (mode_) {
1921          case OVERWRITE_LEFT:
1922          case OVERWRITE_RIGHT:
1923            // If the operand was an object, we skip the
1924              // allocation of a heap number.
1925            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1926                                1 * kPointerSize : 2 * kPointerSize));
1927            __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1928            // Fall through!
1929          case NO_OVERWRITE:
1930            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1931            __ bind(&skip_allocation);
1932            break;
1933          default: UNREACHABLE();
1934        }
1935        // Store the result in the HeapNumber and return.
1936        if (CpuFeatures::IsSupported(SSE2)) {
1937          CpuFeatures::Scope use_sse2(SSE2);
1938          __ cvtsi2sd(xmm0, Operand(ebx));
1939          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1940        } else {
1941          __ mov(Operand(esp, 1 * kPointerSize), ebx);
1942          __ fild_s(Operand(esp, 1 * kPointerSize));
1943          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1944        }
1945        __ ret(2 * kPointerSize);
1946      }
1947      break;
1948    }
1949    default: UNREACHABLE(); break;
1950  }
1951
1952  // If all else fails, use the runtime system to get the correct
1953  // result.
1954  __ bind(&call_runtime);
1955  switch (op_) {
1956    case Token::ADD: {
1957      GenerateAddStrings(masm);
1958      GenerateRegisterArgsPush(masm);
1959      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1960      break;
1961    }
1962    case Token::SUB:
1963      GenerateRegisterArgsPush(masm);
1964      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1965      break;
1966    case Token::MUL:
1967      GenerateRegisterArgsPush(masm);
1968      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1969      break;
1970    case Token::DIV:
1971      GenerateRegisterArgsPush(masm);
1972      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1973      break;
1974    case Token::MOD:
1975      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1976      break;
1977    case Token::BIT_OR:
1978      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1979      break;
1980    case Token::BIT_AND:
1981      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1982      break;
1983    case Token::BIT_XOR:
1984      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1985      break;
1986    case Token::SAR:
1987      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1988      break;
1989    case Token::SHL:
1990      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1991      break;
1992    case Token::SHR:
1993      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1994      break;
1995    default:
1996      UNREACHABLE();
1997  }
1998}
1999
2000
2001void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
2002  ASSERT(op_ == Token::ADD);
2003  Label left_not_string, call_runtime;
2004
2005  // Registers containing left and right operands respectively.
2006  Register left = edx;
2007  Register right = eax;
2008
2009  // Test if left operand is a string.
2010  __ JumpIfSmi(left, &left_not_string, Label::kNear);
2011  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
2012  __ j(above_equal, &left_not_string, Label::kNear);
2013
2014  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
2015  GenerateRegisterArgsPush(masm);
2016  __ TailCallStub(&string_add_left_stub);
2017
2018  // Left operand is not a string, test right.
2019  __ bind(&left_not_string);
2020  __ JumpIfSmi(right, &call_runtime, Label::kNear);
2021  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
2022  __ j(above_equal, &call_runtime, Label::kNear);
2023
2024  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
2025  GenerateRegisterArgsPush(masm);
2026  __ TailCallStub(&string_add_right_stub);
2027
2028  // Neither argument is a string.
2029  __ bind(&call_runtime);
2030}
2031
2032
2033void BinaryOpStub::GenerateHeapResultAllocation(
2034    MacroAssembler* masm,
2035    Label* alloc_failure) {
2036  Label skip_allocation;
2037  OverwriteMode mode = mode_;
2038  switch (mode) {
2039    case OVERWRITE_LEFT: {
2040      // If the argument in edx is already an object, we skip the
2041      // allocation of a heap number.
2042      __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
2043      // Allocate a heap number for the result. Keep eax and edx intact
2044      // for the possible runtime call.
2045      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2046      // Now edx can be overwritten losing one of the arguments as we are
2047      // now done and will not need it any more.
2048      __ mov(edx, Operand(ebx));
2049      __ bind(&skip_allocation);
2050      // Use object in edx as a result holder
2051      __ mov(eax, Operand(edx));
2052      break;
2053    }
2054    case OVERWRITE_RIGHT:
2055      // If the argument in eax is already an object, we skip the
2056      // allocation of a heap number.
2057      __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
2058      // Fall through!
2059    case NO_OVERWRITE:
2060      // Allocate a heap number for the result. Keep eax and edx intact
2061      // for the possible runtime call.
2062      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2063      // Now eax can be overwritten losing one of the arguments as we are
2064      // now done and will not need it any more.
2065      __ mov(eax, ebx);
2066      __ bind(&skip_allocation);
2067      break;
2068    default: UNREACHABLE();
2069  }
2070}
2071
2072
2073void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
2074  __ pop(ecx);
2075  __ push(edx);
2076  __ push(eax);
2077  __ push(ecx);
2078}
2079
2080
2081void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2082  // TAGGED case:
2083  //   Input:
2084  //     esp[4]: tagged number input argument (should be number).
2085  //     esp[0]: return address.
2086  //   Output:
2087  //     eax: tagged double result.
2088  // UNTAGGED case:
2089  //   Input::
2090  //     esp[0]: return address.
2091  //     xmm1: untagged double input argument
2092  //   Output:
2093  //     xmm1: untagged double result.
2094
2095  Label runtime_call;
2096  Label runtime_call_clear_stack;
2097  Label skip_cache;
2098  const bool tagged = (argument_type_ == TAGGED);
2099  if (tagged) {
2100    // Test that eax is a number.
2101    Label input_not_smi;
2102    Label loaded;
2103    __ mov(eax, Operand(esp, kPointerSize));
2104    __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear);
2105    // Input is a smi. Untag and load it onto the FPU stack.
2106    // Then load the low and high words of the double into ebx, edx.
2107    STATIC_ASSERT(kSmiTagSize == 1);
2108    __ sar(eax, 1);
2109    __ sub(Operand(esp), Immediate(2 * kPointerSize));
2110    __ mov(Operand(esp, 0), eax);
2111    __ fild_s(Operand(esp, 0));
2112    __ fst_d(Operand(esp, 0));
2113    __ pop(edx);
2114    __ pop(ebx);
2115    __ jmp(&loaded, Label::kNear);
2116    __ bind(&input_not_smi);
2117    // Check if input is a HeapNumber.
2118    __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
2119    Factory* factory = masm->isolate()->factory();
2120    __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
2121    __ j(not_equal, &runtime_call);
2122    // Input is a HeapNumber. Push it on the FPU stack and load its
2123    // low and high words into ebx, edx.
2124    __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
2125    __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
2126    __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
2127
2128    __ bind(&loaded);
2129  } else {  // UNTAGGED.
2130    if (CpuFeatures::IsSupported(SSE4_1)) {
2131      CpuFeatures::Scope sse4_scope(SSE4_1);
2132      __ pextrd(Operand(edx), xmm1, 0x1);  // copy xmm1[63..32] to edx.
2133    } else {
2134      __ pshufd(xmm0, xmm1, 0x1);
2135      __ movd(Operand(edx), xmm0);
2136    }
2137    __ movd(Operand(ebx), xmm1);
2138  }
2139
2140  // ST[0] or xmm1  == double value
2141  // ebx = low 32 bits of double value
2142  // edx = high 32 bits of double value
2143  // Compute hash (the shifts are arithmetic):
2144  //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
2145  __ mov(ecx, ebx);
2146  __ xor_(ecx, Operand(edx));
2147  __ mov(eax, ecx);
2148  __ sar(eax, 16);
2149  __ xor_(ecx, Operand(eax));
2150  __ mov(eax, ecx);
2151  __ sar(eax, 8);
2152  __ xor_(ecx, Operand(eax));
2153  ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
2154  __ and_(Operand(ecx),
2155          Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
2156
2157  // ST[0] or xmm1 == double value.
2158  // ebx = low 32 bits of double value.
2159  // edx = high 32 bits of double value.
2160  // ecx = TranscendentalCache::hash(double value).
2161  ExternalReference cache_array =
2162      ExternalReference::transcendental_cache_array_address(masm->isolate());
2163  __ mov(eax, Immediate(cache_array));
2164  int cache_array_index =
2165      type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
2166  __ mov(eax, Operand(eax, cache_array_index));
2167  // Eax points to the cache for the type type_.
2168  // If NULL, the cache hasn't been initialized yet, so go through runtime.
2169  __ test(eax, Operand(eax));
2170  __ j(zero, &runtime_call_clear_stack);
2171#ifdef DEBUG
2172  // Check that the layout of cache elements match expectations.
2173  { TranscendentalCache::SubCache::Element test_elem[2];
2174    char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
2175    char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
2176    char* elem_in0  = reinterpret_cast<char*>(&(test_elem[0].in[0]));
2177    char* elem_in1  = reinterpret_cast<char*>(&(test_elem[0].in[1]));
2178    char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
2179    CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
2180    CHECK_EQ(0, elem_in0 - elem_start);
2181    CHECK_EQ(kIntSize, elem_in1 - elem_start);
2182    CHECK_EQ(2 * kIntSize, elem_out - elem_start);
2183  }
2184#endif
2185  // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
2186  __ lea(ecx, Operand(ecx, ecx, times_2, 0));
2187  __ lea(ecx, Operand(eax, ecx, times_4, 0));
2188  // Check if cache matches: Double value is stored in uint32_t[2] array.
2189  Label cache_miss;
2190  __ cmp(ebx, Operand(ecx, 0));
2191  __ j(not_equal, &cache_miss, Label::kNear);
2192  __ cmp(edx, Operand(ecx, kIntSize));
2193  __ j(not_equal, &cache_miss, Label::kNear);
2194  // Cache hit!
2195  __ mov(eax, Operand(ecx, 2 * kIntSize));
2196  if (tagged) {
2197    __ fstp(0);
2198    __ ret(kPointerSize);
2199  } else {  // UNTAGGED.
2200    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2201    __ Ret();
2202  }
2203
2204  __ bind(&cache_miss);
2205  // Update cache with new value.
2206  // We are short on registers, so use no_reg as scratch.
2207  // This gives slightly larger code.
2208  if (tagged) {
2209    __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
2210  } else {  // UNTAGGED.
2211    __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2212    __ sub(Operand(esp), Immediate(kDoubleSize));
2213    __ movdbl(Operand(esp, 0), xmm1);
2214    __ fld_d(Operand(esp, 0));
2215    __ add(Operand(esp), Immediate(kDoubleSize));
2216  }
2217  GenerateOperation(masm);
2218  __ mov(Operand(ecx, 0), ebx);
2219  __ mov(Operand(ecx, kIntSize), edx);
2220  __ mov(Operand(ecx, 2 * kIntSize), eax);
2221  __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2222  if (tagged) {
2223    __ ret(kPointerSize);
2224  } else {  // UNTAGGED.
2225    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2226    __ Ret();
2227
2228    // Skip cache and return answer directly, only in untagged case.
2229    __ bind(&skip_cache);
2230    __ sub(Operand(esp), Immediate(kDoubleSize));
2231    __ movdbl(Operand(esp, 0), xmm1);
2232    __ fld_d(Operand(esp, 0));
2233    GenerateOperation(masm);
2234    __ fstp_d(Operand(esp, 0));
2235    __ movdbl(xmm1, Operand(esp, 0));
2236    __ add(Operand(esp), Immediate(kDoubleSize));
2237    // We return the value in xmm1 without adding it to the cache, but
2238    // we cause a scavenging GC so that future allocations will succeed.
2239    __ EnterInternalFrame();
2240    // Allocate an unused object bigger than a HeapNumber.
2241    __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
2242    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
2243    __ LeaveInternalFrame();
2244    __ Ret();
2245  }
2246
2247  // Call runtime, doing whatever allocation and cleanup is necessary.
2248  if (tagged) {
2249    __ bind(&runtime_call_clear_stack);
2250    __ fstp(0);
2251    __ bind(&runtime_call);
2252    ExternalReference runtime =
2253        ExternalReference(RuntimeFunction(), masm->isolate());
2254    __ TailCallExternalReference(runtime, 1, 1);
2255  } else {  // UNTAGGED.
2256    __ bind(&runtime_call_clear_stack);
2257    __ bind(&runtime_call);
2258    __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2259    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
2260    __ EnterInternalFrame();
2261    __ push(eax);
2262    __ CallRuntime(RuntimeFunction(), 1);
2263    __ LeaveInternalFrame();
2264    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2265    __ Ret();
2266  }
2267}
2268
2269
2270Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
2271  switch (type_) {
2272    case TranscendentalCache::SIN: return Runtime::kMath_sin;
2273    case TranscendentalCache::COS: return Runtime::kMath_cos;
2274    case TranscendentalCache::LOG: return Runtime::kMath_log;
2275    default:
2276      UNIMPLEMENTED();
2277      return Runtime::kAbort;
2278  }
2279}
2280
2281
2282void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
2283  // Only free register is edi.
2284  // Input value is on FP stack, and also in ebx/edx.
2285  // Input value is possibly in xmm1.
2286  // Address of result (a newly allocated HeapNumber) may be in eax.
2287  if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
2288    // Both fsin and fcos require arguments in the range +/-2^63 and
2289    // return NaN for infinities and NaN. They can share all code except
2290    // the actual fsin/fcos operation.
2291    Label in_range, done;
2292    // If argument is outside the range -2^63..2^63, fsin/cos doesn't
2293    // work. We must reduce it to the appropriate range.
2294    __ mov(edi, edx);
2295    __ and_(Operand(edi), Immediate(0x7ff00000));  // Exponent only.
2296    int supported_exponent_limit =
2297        (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
2298    __ cmp(Operand(edi), Immediate(supported_exponent_limit));
2299    __ j(below, &in_range, Label::kNear);
2300    // Check for infinity and NaN. Both return NaN for sin.
2301    __ cmp(Operand(edi), Immediate(0x7ff00000));
2302    Label non_nan_result;
2303    __ j(not_equal, &non_nan_result, Label::kNear);
2304    // Input is +/-Infinity or NaN. Result is NaN.
2305    __ fstp(0);
2306    // NaN is represented by 0x7ff8000000000000.
2307    __ push(Immediate(0x7ff80000));
2308    __ push(Immediate(0));
2309    __ fld_d(Operand(esp, 0));
2310    __ add(Operand(esp), Immediate(2 * kPointerSize));
2311    __ jmp(&done, Label::kNear);
2312
2313    __ bind(&non_nan_result);
2314
2315    // Use fpmod to restrict argument to the range +/-2*PI.
2316    __ mov(edi, eax);  // Save eax before using fnstsw_ax.
2317    __ fldpi();
2318    __ fadd(0);
2319    __ fld(1);
2320    // FPU Stack: input, 2*pi, input.
2321    {
2322      Label no_exceptions;
2323      __ fwait();
2324      __ fnstsw_ax();
2325      // Clear if Illegal Operand or Zero Division exceptions are set.
2326      __ test(Operand(eax), Immediate(5));
2327      __ j(zero, &no_exceptions, Label::kNear);
2328      __ fnclex();
2329      __ bind(&no_exceptions);
2330    }
2331
2332    // Compute st(0) % st(1)
2333    {
2334      Label partial_remainder_loop;
2335      __ bind(&partial_remainder_loop);
2336      __ fprem1();
2337      __ fwait();
2338      __ fnstsw_ax();
2339      __ test(Operand(eax), Immediate(0x400 /* C2 */));
2340      // If C2 is set, computation only has partial result. Loop to
2341      // continue computation.
2342      __ j(not_zero, &partial_remainder_loop);
2343    }
2344    // FPU Stack: input, 2*pi, input % 2*pi
2345    __ fstp(2);
2346    __ fstp(0);
2347    __ mov(eax, edi);  // Restore eax (allocated HeapNumber pointer).
2348
2349    // FPU Stack: input % 2*pi
2350    __ bind(&in_range);
2351    switch (type_) {
2352      case TranscendentalCache::SIN:
2353        __ fsin();
2354        break;
2355      case TranscendentalCache::COS:
2356        __ fcos();
2357        break;
2358      default:
2359        UNREACHABLE();
2360    }
2361    __ bind(&done);
2362  } else {
2363    ASSERT(type_ == TranscendentalCache::LOG);
2364    __ fldln2();
2365    __ fxch();
2366    __ fyl2x();
2367  }
2368}
2369
2370
2371// Input: edx, eax are the left and right objects of a bit op.
2372// Output: eax, ecx are left and right integers for a bit op.
2373void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
2374                                                 bool use_sse3,
2375                                                 Label* conversion_failure) {
2376  // Check float operands.
2377  Label arg1_is_object, check_undefined_arg1;
2378  Label arg2_is_object, check_undefined_arg2;
2379  Label load_arg2, done;
2380
2381  // Test if arg1 is a Smi.
2382  __ JumpIfNotSmi(edx, &arg1_is_object);
2383
2384  __ SmiUntag(edx);
2385  __ jmp(&load_arg2);
2386
2387  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2388  __ bind(&check_undefined_arg1);
2389  Factory* factory = masm->isolate()->factory();
2390  __ cmp(edx, factory->undefined_value());
2391  __ j(not_equal, conversion_failure);
2392  __ mov(edx, Immediate(0));
2393  __ jmp(&load_arg2);
2394
2395  __ bind(&arg1_is_object);
2396  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
2397  __ cmp(ebx, factory->heap_number_map());
2398  __ j(not_equal, &check_undefined_arg1);
2399
2400  // Get the untagged integer version of the edx heap number in ecx.
2401  IntegerConvert(masm, edx, use_sse3, conversion_failure);
2402  __ mov(edx, ecx);
2403
2404  // Here edx has the untagged integer, eax has a Smi or a heap number.
2405  __ bind(&load_arg2);
2406
2407  // Test if arg2 is a Smi.
2408  __ JumpIfNotSmi(eax, &arg2_is_object);
2409
2410  __ SmiUntag(eax);
2411  __ mov(ecx, eax);
2412  __ jmp(&done);
2413
2414  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2415  __ bind(&check_undefined_arg2);
2416  __ cmp(eax, factory->undefined_value());
2417  __ j(not_equal, conversion_failure);
2418  __ mov(ecx, Immediate(0));
2419  __ jmp(&done);
2420
2421  __ bind(&arg2_is_object);
2422  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
2423  __ cmp(ebx, factory->heap_number_map());
2424  __ j(not_equal, &check_undefined_arg2);
2425
2426  // Get the untagged integer version of the eax heap number in ecx.
2427  IntegerConvert(masm, eax, use_sse3, conversion_failure);
2428  __ bind(&done);
2429  __ mov(eax, edx);
2430}
2431
2432
2433void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
2434                                                       bool use_sse3,
2435                                                       Label* not_int32) {
2436  return;
2437}
2438
2439
2440void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
2441                                           Register number) {
2442  Label load_smi, done;
2443
2444  __ JumpIfSmi(number, &load_smi, Label::kNear);
2445  __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
2446  __ jmp(&done, Label::kNear);
2447
2448  __ bind(&load_smi);
2449  __ SmiUntag(number);
2450  __ push(number);
2451  __ fild_s(Operand(esp, 0));
2452  __ pop(number);
2453
2454  __ bind(&done);
2455}
2456
2457
2458void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
2459  Label load_smi_edx, load_eax, load_smi_eax, done;
2460  // Load operand in edx into xmm0.
2461  __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
2462  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
2463
2464  __ bind(&load_eax);
2465  // Load operand in eax into xmm1.
2466  __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
2467  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2468  __ jmp(&done, Label::kNear);
2469
2470  __ bind(&load_smi_edx);
2471  __ SmiUntag(edx);  // Untag smi before converting to float.
2472  __ cvtsi2sd(xmm0, Operand(edx));
2473  __ SmiTag(edx);  // Retag smi for heap number overwriting test.
2474  __ jmp(&load_eax);
2475
2476  __ bind(&load_smi_eax);
2477  __ SmiUntag(eax);  // Untag smi before converting to float.
2478  __ cvtsi2sd(xmm1, Operand(eax));
2479  __ SmiTag(eax);  // Retag smi for heap number overwriting test.
2480
2481  __ bind(&done);
2482}
2483
2484
2485void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
2486                                           Label* not_numbers) {
2487  Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
2488  // Load operand in edx into xmm0, or branch to not_numbers.
2489  __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
2490  Factory* factory = masm->isolate()->factory();
2491  __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
2492  __ j(not_equal, not_numbers);  // Argument in edx is not a number.
2493  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
2494  __ bind(&load_eax);
2495  // Load operand in eax into xmm1, or branch to not_numbers.
2496  __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
2497  __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
2498  __ j(equal, &load_float_eax, Label::kNear);
2499  __ jmp(not_numbers);  // Argument in eax is not a number.
2500  __ bind(&load_smi_edx);
2501  __ SmiUntag(edx);  // Untag smi before converting to float.
2502  __ cvtsi2sd(xmm0, Operand(edx));
2503  __ SmiTag(edx);  // Retag smi for heap number overwriting test.
2504  __ jmp(&load_eax);
2505  __ bind(&load_smi_eax);
2506  __ SmiUntag(eax);  // Untag smi before converting to float.
2507  __ cvtsi2sd(xmm1, Operand(eax));
2508  __ SmiTag(eax);  // Retag smi for heap number overwriting test.
2509  __ jmp(&done, Label::kNear);
2510  __ bind(&load_float_eax);
2511  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2512  __ bind(&done);
2513}
2514
2515
2516void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
2517                                       Register scratch) {
2518  const Register left = edx;
2519  const Register right = eax;
2520  __ mov(scratch, left);
2521  ASSERT(!scratch.is(right));  // We're about to clobber scratch.
2522  __ SmiUntag(scratch);
2523  __ cvtsi2sd(xmm0, Operand(scratch));
2524
2525  __ mov(scratch, right);
2526  __ SmiUntag(scratch);
2527  __ cvtsi2sd(xmm1, Operand(scratch));
2528}
2529
2530
2531void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
2532                                                    Label* non_int32,
2533                                                    Register scratch) {
2534  __ cvttsd2si(scratch, Operand(xmm0));
2535  __ cvtsi2sd(xmm2, Operand(scratch));
2536  __ ucomisd(xmm0, xmm2);
2537  __ j(not_zero, non_int32);
2538  __ j(carry, non_int32);
2539  __ cvttsd2si(scratch, Operand(xmm1));
2540  __ cvtsi2sd(xmm2, Operand(scratch));
2541  __ ucomisd(xmm1, xmm2);
2542  __ j(not_zero, non_int32);
2543  __ j(carry, non_int32);
2544}
2545
2546
2547void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
2548                                            Register scratch,
2549                                            ArgLocation arg_location) {
2550  Label load_smi_1, load_smi_2, done_load_1, done;
2551  if (arg_location == ARGS_IN_REGISTERS) {
2552    __ mov(scratch, edx);
2553  } else {
2554    __ mov(scratch, Operand(esp, 2 * kPointerSize));
2555  }
2556  __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
2557  __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2558  __ bind(&done_load_1);
2559
2560  if (arg_location == ARGS_IN_REGISTERS) {
2561    __ mov(scratch, eax);
2562  } else {
2563    __ mov(scratch, Operand(esp, 1 * kPointerSize));
2564  }
2565  __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
2566  __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2567  __ jmp(&done, Label::kNear);
2568
2569  __ bind(&load_smi_1);
2570  __ SmiUntag(scratch);
2571  __ push(scratch);
2572  __ fild_s(Operand(esp, 0));
2573  __ pop(scratch);
2574  __ jmp(&done_load_1);
2575
2576  __ bind(&load_smi_2);
2577  __ SmiUntag(scratch);
2578  __ push(scratch);
2579  __ fild_s(Operand(esp, 0));
2580  __ pop(scratch);
2581
2582  __ bind(&done);
2583}
2584
2585
2586void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
2587                                        Register scratch) {
2588  const Register left = edx;
2589  const Register right = eax;
2590  __ mov(scratch, left);
2591  ASSERT(!scratch.is(right));  // We're about to clobber scratch.
2592  __ SmiUntag(scratch);
2593  __ push(scratch);
2594  __ fild_s(Operand(esp, 0));
2595
2596  __ mov(scratch, right);
2597  __ SmiUntag(scratch);
2598  __ mov(Operand(esp, 0), scratch);
2599  __ fild_s(Operand(esp, 0));
2600  __ pop(scratch);
2601}
2602
2603
2604void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
2605                                             Label* non_float,
2606                                             Register scratch) {
2607  Label test_other, done;
2608  // Test if both operands are floats or smi -> scratch=k_is_float;
2609  // Otherwise scratch = k_not_float.
2610  __ JumpIfSmi(edx, &test_other, Label::kNear);
2611  __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
2612  Factory* factory = masm->isolate()->factory();
2613  __ cmp(scratch, factory->heap_number_map());
2614  __ j(not_equal, non_float);  // argument in edx is not a number -> NaN
2615
2616  __ bind(&test_other);
2617  __ JumpIfSmi(eax, &done, Label::kNear);
2618  __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
2619  __ cmp(scratch, factory->heap_number_map());
2620  __ j(not_equal, non_float);  // argument in eax is not a number -> NaN
2621
2622  // Fall-through: Both operands are numbers.
2623  __ bind(&done);
2624}
2625
2626
2627void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
2628                                                     Label* non_int32) {
2629  return;
2630}
2631
2632
2633void MathPowStub::Generate(MacroAssembler* masm) {
2634  // Registers are used as follows:
2635  // edx = base
2636  // eax = exponent
2637  // ecx = temporary, result
2638
2639  CpuFeatures::Scope use_sse2(SSE2);
2640  Label allocate_return, call_runtime;
2641
2642  // Load input parameters.
2643  __ mov(edx, Operand(esp, 2 * kPointerSize));
2644  __ mov(eax, Operand(esp, 1 * kPointerSize));
2645
2646  // Save 1 in xmm3 - we need this several times later on.
2647  __ mov(ecx, Immediate(1));
2648  __ cvtsi2sd(xmm3, Operand(ecx));
2649
2650  Label exponent_nonsmi;
2651  Label base_nonsmi;
2652  // If the exponent is a heap number go to that specific case.
2653  __ JumpIfNotSmi(eax, &exponent_nonsmi);
2654  __ JumpIfNotSmi(edx, &base_nonsmi);
2655
2656  // Optimized version when both exponent and base are smis.
2657  Label powi;
2658  __ SmiUntag(edx);
2659  __ cvtsi2sd(xmm0, Operand(edx));
2660  __ jmp(&powi);
2661  // exponent is smi and base is a heapnumber.
2662  __ bind(&base_nonsmi);
2663  Factory* factory = masm->isolate()->factory();
2664  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
2665         factory->heap_number_map());
2666  __ j(not_equal, &call_runtime);
2667
2668  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
2669
2670  // Optimized version of pow if exponent is a smi.
2671  // xmm0 contains the base.
2672  __ bind(&powi);
2673  __ SmiUntag(eax);
2674
2675  // Save exponent in base as we need to check if exponent is negative later.
2676  // We know that base and exponent are in different registers.
2677  __ mov(edx, eax);
2678
2679  // Get absolute value of exponent.
2680  Label no_neg;
2681  __ cmp(eax, 0);
2682  __ j(greater_equal, &no_neg, Label::kNear);
2683  __ neg(eax);
2684  __ bind(&no_neg);
2685
2686  // Load xmm1 with 1.
2687  __ movsd(xmm1, xmm3);
2688  Label while_true;
2689  Label no_multiply;
2690
2691  __ bind(&while_true);
2692  __ shr(eax, 1);
2693  __ j(not_carry, &no_multiply, Label::kNear);
2694  __ mulsd(xmm1, xmm0);
2695  __ bind(&no_multiply);
2696  __ mulsd(xmm0, xmm0);
2697  __ j(not_zero, &while_true);
2698
2699  // base has the original value of the exponent - if the exponent  is
2700  // negative return 1/result.
2701  __ test(edx, Operand(edx));
2702  __ j(positive, &allocate_return);
2703  // Special case if xmm1 has reached infinity.
2704  __ mov(ecx, Immediate(0x7FB00000));
2705  __ movd(xmm0, Operand(ecx));
2706  __ cvtss2sd(xmm0, xmm0);
2707  __ ucomisd(xmm0, xmm1);
2708  __ j(equal, &call_runtime);
2709  __ divsd(xmm3, xmm1);
2710  __ movsd(xmm1, xmm3);
2711  __ jmp(&allocate_return);
2712
2713  // exponent (or both) is a heapnumber - no matter what we should now work
2714  // on doubles.
2715  __ bind(&exponent_nonsmi);
2716  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
2717         factory->heap_number_map());
2718  __ j(not_equal, &call_runtime);
2719  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2720  // Test if exponent is nan.
2721  __ ucomisd(xmm1, xmm1);
2722  __ j(parity_even, &call_runtime);
2723
2724  Label base_not_smi;
2725  Label handle_special_cases;
2726  __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
2727  __ SmiUntag(edx);
2728  __ cvtsi2sd(xmm0, Operand(edx));
2729  __ jmp(&handle_special_cases, Label::kNear);
2730
2731  __ bind(&base_not_smi);
2732  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
2733         factory->heap_number_map());
2734  __ j(not_equal, &call_runtime);
2735  __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
2736  __ and_(ecx, HeapNumber::kExponentMask);
2737  __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
2738  // base is NaN or +/-Infinity
2739  __ j(greater_equal, &call_runtime);
2740  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
2741
2742  // base is in xmm0 and exponent is in xmm1.
2743  __ bind(&handle_special_cases);
2744  Label not_minus_half;
2745  // Test for -0.5.
2746  // Load xmm2 with -0.5.
2747  __ mov(ecx, Immediate(0xBF000000));
2748  __ movd(xmm2, Operand(ecx));
2749  __ cvtss2sd(xmm2, xmm2);
2750  // xmm2 now has -0.5.
2751  __ ucomisd(xmm2, xmm1);
2752  __ j(not_equal, &not_minus_half, Label::kNear);
2753
2754  // Calculates reciprocal of square root.
2755  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
2756  __ xorps(xmm1, xmm1);
2757  __ addsd(xmm1, xmm0);
2758  __ sqrtsd(xmm1, xmm1);
2759  __ divsd(xmm3, xmm1);
2760  __ movsd(xmm1, xmm3);
2761  __ jmp(&allocate_return);
2762
2763  // Test for 0.5.
2764  __ bind(&not_minus_half);
2765  // Load xmm2 with 0.5.
2766  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
2767  __ addsd(xmm2, xmm3);
2768  // xmm2 now has 0.5.
2769  __ ucomisd(xmm2, xmm1);
2770  __ j(not_equal, &call_runtime);
2771  // Calculates square root.
2772  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
2773  __ xorps(xmm1, xmm1);
2774  __ addsd(xmm1, xmm0);
2775  __ sqrtsd(xmm1, xmm1);
2776
2777  __ bind(&allocate_return);
2778  __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
2779  __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
2780  __ mov(eax, ecx);
2781  __ ret(2 * kPointerSize);
2782
2783  __ bind(&call_runtime);
2784  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
2785}
2786
2787
2788void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2789  // The key is in edx and the parameter count is in eax.
2790
2791  // The displacement is used for skipping the frame pointer on the
2792  // stack. It is the offset of the last parameter (if any) relative
2793  // to the frame pointer.
2794  static const int kDisplacement = 1 * kPointerSize;
2795
2796  // Check that the key is a smi.
2797  Label slow;
2798  __ JumpIfNotSmi(edx, &slow);
2799
2800  // Check if the calling frame is an arguments adaptor frame.
2801  Label adaptor;
2802  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2803  __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
2804  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2805  __ j(equal, &adaptor, Label::kNear);
2806
2807  // Check index against formal parameters count limit passed in
2808  // through register eax. Use unsigned comparison to get negative
2809  // check for free.
2810  __ cmp(edx, Operand(eax));
2811  __ j(above_equal, &slow);
2812
2813  // Read the argument from the stack and return it.
2814  STATIC_ASSERT(kSmiTagSize == 1);
2815  STATIC_ASSERT(kSmiTag == 0);  // Shifting code depends on these.
2816  __ lea(ebx, Operand(ebp, eax, times_2, 0));
2817  __ neg(edx);
2818  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
2819  __ ret(0);
2820
2821  // Arguments adaptor case: Check index against actual arguments
2822  // limit found in the arguments adaptor frame. Use unsigned
2823  // comparison to get negative check for free.
2824  __ bind(&adaptor);
2825  __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2826  __ cmp(edx, Operand(ecx));
2827  __ j(above_equal, &slow);
2828
2829  // Read the argument from the stack and return it.
2830  STATIC_ASSERT(kSmiTagSize == 1);
2831  STATIC_ASSERT(kSmiTag == 0);  // Shifting code depends on these.
2832  __ lea(ebx, Operand(ebx, ecx, times_2, 0));
2833  __ neg(edx);
2834  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
2835  __ ret(0);
2836
2837  // Slow-case: Handle non-smi or out-of-bounds access to arguments
2838  // by calling the runtime system.
2839  __ bind(&slow);
2840  __ pop(ebx);  // Return address.
2841  __ push(edx);
2842  __ push(ebx);
2843  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2844}
2845
2846
2847void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
2848  // esp[0] : return address
2849  // esp[4] : number of parameters
2850  // esp[8] : receiver displacement
2851  // esp[12] : function
2852
2853  // Check if the calling frame is an arguments adaptor frame.
2854  Label runtime;
2855  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2856  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
2857  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2858  __ j(not_equal, &runtime, Label::kNear);
2859
2860  // Patch the arguments.length and the parameters pointer.
2861  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2862  __ mov(Operand(esp, 1 * kPointerSize), ecx);
2863  __ lea(edx, Operand(edx, ecx, times_2,
2864              StandardFrameConstants::kCallerSPOffset));
2865  __ mov(Operand(esp, 2 * kPointerSize), edx);
2866
2867  __ bind(&runtime);
2868  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2869}
2870
2871
2872void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
2873  // esp[0] : return address
2874  // esp[4] : number of parameters (tagged)
2875  // esp[8] : receiver displacement
2876  // esp[12] : function
2877
2878  // ebx = parameter count (tagged)
2879  __ mov(ebx, Operand(esp, 1 * kPointerSize));
2880
2881  // Check if the calling frame is an arguments adaptor frame.
2882  // TODO(rossberg): Factor out some of the bits that are shared with the other
2883  // Generate* functions.
2884  Label runtime;
2885  Label adaptor_frame, try_allocate;
2886  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2887  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
2888  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2889  __ j(equal, &adaptor_frame, Label::kNear);
2890
2891  // No adaptor, parameter count = argument count.
2892  __ mov(ecx, ebx);
2893  __ jmp(&try_allocate, Label::kNear);
2894
2895  // We have an adaptor frame. Patch the parameters pointer.
2896  __ bind(&adaptor_frame);
2897  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2898  __ lea(edx, Operand(edx, ecx, times_2,
2899                      StandardFrameConstants::kCallerSPOffset));
2900  __ mov(Operand(esp, 2 * kPointerSize), edx);
2901
2902  // ebx = parameter count (tagged)
2903  // ecx = argument count (tagged)
2904  // esp[4] = parameter count (tagged)
2905  // esp[8] = address of receiver argument
2906  // Compute the mapped parameter count = min(ebx, ecx) in ebx.
2907  __ cmp(ebx, Operand(ecx));
2908  __ j(less_equal, &try_allocate, Label::kNear);
2909  __ mov(ebx, ecx);
2910
2911  __ bind(&try_allocate);
2912
2913  // Save mapped parameter count.
2914  __ push(ebx);
2915
2916  // Compute the sizes of backing store, parameter map, and arguments object.
2917  // 1. Parameter map, has 2 extra words containing context and backing store.
2918  const int kParameterMapHeaderSize =
2919      FixedArray::kHeaderSize + 2 * kPointerSize;
2920  Label no_parameter_map;
2921  __ test(ebx, Operand(ebx));
2922  __ j(zero, &no_parameter_map, Label::kNear);
2923  __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
2924  __ bind(&no_parameter_map);
2925
2926  // 2. Backing store.
2927  __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
2928
2929  // 3. Arguments object.
2930  __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize));
2931
2932  // Do the allocation of all three objects in one go.
2933  __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
2934
2935  // eax = address of new object(s) (tagged)
2936  // ecx = argument count (tagged)
2937  // esp[0] = mapped parameter count (tagged)
2938  // esp[8] = parameter count (tagged)
2939  // esp[12] = address of receiver argument
2940  // Get the arguments boilerplate from the current (global) context into edi.
2941  Label has_mapped_parameters, copy;
2942  __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
2943  __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
2944  __ mov(ebx, Operand(esp, 0 * kPointerSize));
2945  __ test(ebx, Operand(ebx));
2946  __ j(not_zero, &has_mapped_parameters, Label::kNear);
2947  __ mov(edi, Operand(edi,
2948         Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
2949  __ jmp(&copy, Label::kNear);
2950
2951  __ bind(&has_mapped_parameters);
2952  __ mov(edi, Operand(edi,
2953            Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)));
2954  __ bind(&copy);
2955
2956  // eax = address of new object (tagged)
2957  // ebx = mapped parameter count (tagged)
2958  // ecx = argument count (tagged)
2959  // edi = address of boilerplate object (tagged)
2960  // esp[0] = mapped parameter count (tagged)
2961  // esp[8] = parameter count (tagged)
2962  // esp[12] = address of receiver argument
2963  // Copy the JS object part.
2964  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2965    __ mov(edx, FieldOperand(edi, i));
2966    __ mov(FieldOperand(eax, i), edx);
2967  }
2968
2969  // Setup the callee in-object property.
2970  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2971  __ mov(edx, Operand(esp, 4 * kPointerSize));
2972  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
2973                      Heap::kArgumentsCalleeIndex * kPointerSize),
2974         edx);
2975
2976  // Use the length (smi tagged) and set that as an in-object property too.
2977  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2978  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
2979                      Heap::kArgumentsLengthIndex * kPointerSize),
2980         ecx);
2981
2982  // Setup the elements pointer in the allocated arguments object.
2983  // If we allocated a parameter map, edi will point there, otherwise to the
2984  // backing store.
2985  __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
2986  __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
2987
2988  // eax = address of new object (tagged)
2989  // ebx = mapped parameter count (tagged)
2990  // ecx = argument count (tagged)
2991  // edi = address of parameter map or backing store (tagged)
2992  // esp[0] = mapped parameter count (tagged)
2993  // esp[8] = parameter count (tagged)
2994  // esp[12] = address of receiver argument
2995  // Free a register.
2996  __ push(eax);
2997
2998  // Initialize parameter map. If there are no mapped arguments, we're done.
2999  Label skip_parameter_map;
3000  __ test(ebx, Operand(ebx));
3001  __ j(zero, &skip_parameter_map);
3002
3003  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
3004         Immediate(FACTORY->non_strict_arguments_elements_map()));
3005  __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
3006  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
3007  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
3008  __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
3009  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
3010
3011  // Copy the parameter slots and the holes in the arguments.
3012  // We need to fill in mapped_parameter_count slots. They index the context,
3013  // where parameters are stored in reverse order, at
3014  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
3015  // The mapped parameter thus need to get indices
3016  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
3017  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
3018  // We loop from right to left.
3019  Label parameters_loop, parameters_test;
3020  __ push(ecx);
3021  __ mov(eax, Operand(esp, 2 * kPointerSize));
3022  __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
3023  __ add(ebx, Operand(esp, 4 * kPointerSize));
3024  __ sub(ebx, Operand(eax));
3025  __ mov(ecx, FACTORY->the_hole_value());
3026  __ mov(edx, edi);
3027  __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
3028  // eax = loop variable (tagged)
3029  // ebx = mapping index (tagged)
3030  // ecx = the hole value
3031  // edx = address of parameter map (tagged)
3032  // edi = address of backing store (tagged)
3033  // esp[0] = argument count (tagged)
3034  // esp[4] = address of new object (tagged)
3035  // esp[8] = mapped parameter count (tagged)
3036  // esp[16] = parameter count (tagged)
3037  // esp[20] = address of receiver argument
3038  __ jmp(&parameters_test, Label::kNear);
3039
3040  __ bind(&parameters_loop);
3041  __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
3042  __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
3043  __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
3044  __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
3045  __ bind(&parameters_test);
3046  __ test(eax, Operand(eax));
3047  __ j(not_zero, &parameters_loop, Label::kNear);
3048  __ pop(ecx);
3049
3050  __ bind(&skip_parameter_map);
3051
3052  // ecx = argument count (tagged)
3053  // edi = address of backing store (tagged)
3054  // esp[0] = address of new object (tagged)
3055  // esp[4] = mapped parameter count (tagged)
3056  // esp[12] = parameter count (tagged)
3057  // esp[16] = address of receiver argument
3058  // Copy arguments header and remaining slots (if there are any).
3059  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
3060         Immediate(FACTORY->fixed_array_map()));
3061  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
3062
3063  Label arguments_loop, arguments_test;
3064  __ mov(ebx, Operand(esp, 1 * kPointerSize));
3065  __ mov(edx, Operand(esp, 4 * kPointerSize));
3066  __ sub(Operand(edx), ebx);  // Is there a smarter way to do negative scaling?
3067  __ sub(Operand(edx), ebx);
3068  __ jmp(&arguments_test, Label::kNear);
3069
3070  __ bind(&arguments_loop);
3071  __ sub(Operand(edx), Immediate(kPointerSize));
3072  __ mov(eax, Operand(edx, 0));
3073  __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
3074  __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
3075
3076  __ bind(&arguments_test);
3077  __ cmp(ebx, Operand(ecx));
3078  __ j(less, &arguments_loop, Label::kNear);
3079
3080  // Restore.
3081  __ pop(eax);  // Address of arguments object.
3082  __ pop(ebx);  // Parameter count.
3083
3084  // Return and remove the on-stack parameters.
3085  __ ret(3 * kPointerSize);
3086
3087  // Do the runtime call to allocate the arguments object.
3088  __ bind(&runtime);
3089  __ pop(eax);  // Remove saved parameter count.
3090  __ mov(Operand(esp, 1 * kPointerSize), ecx);  // Patch argument count.
3091  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
3092}
3093
3094
3095void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
3096  // esp[0] : return address
3097  // esp[4] : number of parameters
3098  // esp[8] : receiver displacement
3099  // esp[12] : function
3100
3101  // Check if the calling frame is an arguments adaptor frame.
3102  Label adaptor_frame, try_allocate, runtime;
3103  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3104  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
3105  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3106  __ j(equal, &adaptor_frame);
3107
3108  // Get the length from the frame.
3109  __ mov(ecx, Operand(esp, 1 * kPointerSize));
3110  __ jmp(&try_allocate);
3111
3112  // Patch the arguments.length and the parameters pointer.
3113  __ bind(&adaptor_frame);
3114  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
3115  __ mov(Operand(esp, 1 * kPointerSize), ecx);
3116  __ lea(edx, Operand(edx, ecx, times_2,
3117                      StandardFrameConstants::kCallerSPOffset));
3118  __ mov(Operand(esp, 2 * kPointerSize), edx);
3119
3120  // Try the new space allocation. Start out with computing the size of
3121  // the arguments object and the elements array.
3122  Label add_arguments_object;
3123  __ bind(&try_allocate);
3124  __ test(ecx, Operand(ecx));
3125  __ j(zero, &add_arguments_object, Label::kNear);
3126  __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
3127  __ bind(&add_arguments_object);
3128  __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict));
3129
3130  // Do the allocation of both objects in one go.
3131  __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
3132
3133  // Get the arguments boilerplate from the current (global) context.
3134  __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
3135  __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
3136  const int offset =
3137      Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
3138  __ mov(edi, Operand(edi, offset));
3139
3140  // Copy the JS object part.
3141  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
3142    __ mov(ebx, FieldOperand(edi, i));
3143    __ mov(FieldOperand(eax, i), ebx);
3144  }
3145
3146  // Get the length (smi tagged) and set that as an in-object property too.
3147  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
3148  __ mov(ecx, Operand(esp, 1 * kPointerSize));
3149  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3150                      Heap::kArgumentsLengthIndex * kPointerSize),
3151         ecx);
3152
3153  // If there are no actual arguments, we're done.
3154  Label done;
3155  __ test(ecx, Operand(ecx));
3156  __ j(zero, &done);
3157
3158  // Get the parameters pointer from the stack.
3159  __ mov(edx, Operand(esp, 2 * kPointerSize));
3160
3161  // Setup the elements pointer in the allocated arguments object and
3162  // initialize the header in the elements fixed array.
3163  __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
3164  __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
3165  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
3166         Immediate(FACTORY->fixed_array_map()));
3167
3168  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
3169  // Untag the length for the loop below.
3170  __ SmiUntag(ecx);
3171
3172  // Copy the fixed array slots.
3173  Label loop;
3174  __ bind(&loop);
3175  __ mov(ebx, Operand(edx, -1 * kPointerSize));  // Skip receiver.
3176  __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
3177  __ add(Operand(edi), Immediate(kPointerSize));
3178  __ sub(Operand(edx), Immediate(kPointerSize));
3179  __ dec(ecx);
3180  __ j(not_zero, &loop);
3181
3182  // Return and remove the on-stack parameters.
3183  __ bind(&done);
3184  __ ret(3 * kPointerSize);
3185
3186  // Do the runtime call to allocate the arguments object.
3187  __ bind(&runtime);
3188  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
3189}
3190
3191
3192void RegExpExecStub::Generate(MacroAssembler* masm) {
3193  // Just jump directly to runtime if native RegExp is not selected at compile
3194  // time or if regexp entry in generated code is turned off runtime switch or
3195  // at compilation.
3196#ifdef V8_INTERPRETED_REGEXP
3197  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3198#else  // V8_INTERPRETED_REGEXP
3199  if (!FLAG_regexp_entry_native) {
3200    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3201    return;
3202  }
3203
3204  // Stack frame on entry.
3205  //  esp[0]: return address
3206  //  esp[4]: last_match_info (expected JSArray)
3207  //  esp[8]: previous index
3208  //  esp[12]: subject string
3209  //  esp[16]: JSRegExp object
3210
3211  static const int kLastMatchInfoOffset = 1 * kPointerSize;
3212  static const int kPreviousIndexOffset = 2 * kPointerSize;
3213  static const int kSubjectOffset = 3 * kPointerSize;
3214  static const int kJSRegExpOffset = 4 * kPointerSize;
3215
3216  Label runtime, invoke_regexp;
3217
3218  // Ensure that a RegExp stack is allocated.
3219  ExternalReference address_of_regexp_stack_memory_address =
3220      ExternalReference::address_of_regexp_stack_memory_address(
3221          masm->isolate());
3222  ExternalReference address_of_regexp_stack_memory_size =
3223      ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
3224  __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3225  __ test(ebx, Operand(ebx));
3226  __ j(zero, &runtime);
3227
3228  // Check that the first argument is a JSRegExp object.
3229  __ mov(eax, Operand(esp, kJSRegExpOffset));
3230  STATIC_ASSERT(kSmiTag == 0);
3231  __ JumpIfSmi(eax, &runtime);
3232  __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
3233  __ j(not_equal, &runtime);
3234  // Check that the RegExp has been compiled (data contains a fixed array).
3235  __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
3236  if (FLAG_debug_code) {
3237    __ test(ecx, Immediate(kSmiTagMask));
3238    __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
3239    __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
3240    __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
3241  }
3242
3243  // ecx: RegExp data (FixedArray)
3244  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
3245  __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
3246  __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
3247  __ j(not_equal, &runtime);
3248
3249  // ecx: RegExp data (FixedArray)
3250  // Check that the number of captures fit in the static offsets vector buffer.
3251  __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
3252  // Calculate number of capture registers (number_of_captures + 1) * 2. This
3253  // uses the asumption that smis are 2 * their untagged value.
3254  STATIC_ASSERT(kSmiTag == 0);
3255  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3256  __ add(Operand(edx), Immediate(2));  // edx was a smi.
3257  // Check that the static offsets vector buffer is large enough.
3258  __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
3259  __ j(above, &runtime);
3260
3261  // ecx: RegExp data (FixedArray)
3262  // edx: Number of capture registers
3263  // Check that the second argument is a string.
3264  __ mov(eax, Operand(esp, kSubjectOffset));
3265  __ JumpIfSmi(eax, &runtime);
3266  Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
3267  __ j(NegateCondition(is_string), &runtime);
3268  // Get the length of the string to ebx.
3269  __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
3270
3271  // ebx: Length of subject string as a smi
3272  // ecx: RegExp data (FixedArray)
3273  // edx: Number of capture registers
3274  // Check that the third argument is a positive smi less than the subject
3275  // string length. A negative value will be greater (unsigned comparison).
3276  __ mov(eax, Operand(esp, kPreviousIndexOffset));
3277  __ JumpIfNotSmi(eax, &runtime);
3278  __ cmp(eax, Operand(ebx));
3279  __ j(above_equal, &runtime);
3280
3281  // ecx: RegExp data (FixedArray)
3282  // edx: Number of capture registers
3283  // Check that the fourth object is a JSArray object.
3284  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3285  __ JumpIfSmi(eax, &runtime);
3286  __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
3287  __ j(not_equal, &runtime);
3288  // Check that the JSArray is in fast case.
3289  __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
3290  __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
3291  Factory* factory = masm->isolate()->factory();
3292  __ cmp(eax, factory->fixed_array_map());
3293  __ j(not_equal, &runtime);
3294  // Check that the last match info has space for the capture registers and the
3295  // additional information.
3296  __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
3297  __ SmiUntag(eax);
3298  __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
3299  __ cmp(edx, Operand(eax));
3300  __ j(greater, &runtime);
3301
3302  // ecx: RegExp data (FixedArray)
3303  // Check the representation and encoding of the subject string.
3304  Label seq_ascii_string, seq_two_byte_string, check_code;
3305  __ mov(eax, Operand(esp, kSubjectOffset));
3306  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
3307  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
3308  // First check for flat two byte string.
3309  __ and_(ebx,
3310          kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
3311  STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
3312  __ j(zero, &seq_two_byte_string);
3313  // Any other flat string must be a flat ascii string.
3314  __ test(Operand(ebx),
3315          Immediate(kIsNotStringMask | kStringRepresentationMask));
3316  __ j(zero, &seq_ascii_string);
3317
3318  // Check for flat cons string.
3319  // A flat cons string is a cons string where the second part is the empty
3320  // string. In that case the subject string is just the first part of the cons
3321  // string. Also in this case the first part of the cons string is known to be
3322  // a sequential string or an external string.
3323  STATIC_ASSERT(kExternalStringTag != 0);
3324  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
3325  __ test(Operand(ebx),
3326          Immediate(kIsNotStringMask | kExternalStringTag));
3327  __ j(not_zero, &runtime);
3328  // String is a cons string.
3329  __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
3330  __ cmp(Operand(edx), factory->empty_string());
3331  __ j(not_equal, &runtime);
3332  __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
3333  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
3334  // String is a cons string with empty second part.
3335  // eax: first part of cons string.
3336  // ebx: map of first part of cons string.
3337  // Is first part a flat two byte string?
3338  __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
3339            kStringRepresentationMask | kStringEncodingMask);
3340  STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
3341  __ j(zero, &seq_two_byte_string);
3342  // Any other flat string must be ascii.
3343  __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
3344            kStringRepresentationMask);
3345  __ j(not_zero, &runtime);
3346
3347  __ bind(&seq_ascii_string);
3348  // eax: subject string (flat ascii)
3349  // ecx: RegExp data (FixedArray)
3350  __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
3351  __ Set(edi, Immediate(1));  // Type is ascii.
3352  __ jmp(&check_code);
3353
3354  __ bind(&seq_two_byte_string);
3355  // eax: subject string (flat two byte)
3356  // ecx: RegExp data (FixedArray)
3357  __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
3358  __ Set(edi, Immediate(0));  // Type is two byte.
3359
3360  __ bind(&check_code);
3361  // Check that the irregexp code has been generated for the actual string
3362  // encoding. If it has, the field contains a code object otherwise it contains
3363  // a smi (code flushing support).
3364  __ JumpIfSmi(edx, &runtime);
3365
3366  // eax: subject string
3367  // edx: code
3368  // edi: encoding of subject string (1 if ascii, 0 if two_byte);
3369  // Load used arguments before starting to push arguments for call to native
3370  // RegExp code to avoid handling changing stack height.
3371  __ mov(ebx, Operand(esp, kPreviousIndexOffset));
3372  __ SmiUntag(ebx);  // Previous index from smi.
3373
3374  // eax: subject string
3375  // ebx: previous index
3376  // edx: code
3377  // edi: encoding of subject string (1 if ascii 0 if two_byte);
3378  // All checks done. Now push arguments for native regexp code.
3379  Counters* counters = masm->isolate()->counters();
3380  __ IncrementCounter(counters->regexp_entry_native(), 1);
3381
3382  // Isolates: note we add an additional parameter here (isolate pointer).
3383  static const int kRegExpExecuteArguments = 8;
3384  __ EnterApiExitFrame(kRegExpExecuteArguments);
3385
3386  // Argument 8: Pass current isolate address.
3387  __ mov(Operand(esp, 7 * kPointerSize),
3388      Immediate(ExternalReference::isolate_address()));
3389
3390  // Argument 7: Indicate that this is a direct call from JavaScript.
3391  __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
3392
3393  // Argument 6: Start (high end) of backtracking stack memory area.
3394  __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
3395  __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3396  __ mov(Operand(esp, 5 * kPointerSize), ecx);
3397
3398  // Argument 5: static offsets vector buffer.
3399  __ mov(Operand(esp, 4 * kPointerSize),
3400         Immediate(ExternalReference::address_of_static_offsets_vector(
3401             masm->isolate())));
3402
3403  // Argument 4: End of string data
3404  // Argument 3: Start of string data
3405  Label setup_two_byte, setup_rest;
3406  __ test(edi, Operand(edi));
3407  __ mov(edi, FieldOperand(eax, String::kLengthOffset));
3408  __ j(zero, &setup_two_byte, Label::kNear);
3409  __ SmiUntag(edi);
3410  __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
3411  __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
3412  __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
3413  __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
3414  __ jmp(&setup_rest, Label::kNear);
3415
3416  __ bind(&setup_two_byte);
3417  STATIC_ASSERT(kSmiTag == 0);
3418  STATIC_ASSERT(kSmiTagSize == 1);  // edi is smi (powered by 2).
3419  __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
3420  __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
3421  __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
3422  __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
3423
3424  __ bind(&setup_rest);
3425
3426  // Argument 2: Previous index.
3427  __ mov(Operand(esp, 1 * kPointerSize), ebx);
3428
3429  // Argument 1: Subject string.
3430  __ mov(Operand(esp, 0 * kPointerSize), eax);
3431
3432  // Locate the code entry and call it.
3433  __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
3434  __ call(Operand(edx));
3435
3436  // Drop arguments and come back to JS mode.
3437  __ LeaveApiExitFrame();
3438
3439  // Check the result.
3440  Label success;
3441  __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
3442  __ j(equal, &success);
3443  Label failure;
3444  __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
3445  __ j(equal, &failure);
3446  __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
3447  // If not exception it can only be retry. Handle that in the runtime system.
3448  __ j(not_equal, &runtime);
3449  // Result must now be exception. If there is no pending exception already a
3450  // stack overflow (on the backtrack stack) was detected in RegExp code but
3451  // haven't created the exception yet. Handle that in the runtime system.
3452  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
3453  ExternalReference pending_exception(Isolate::k_pending_exception_address,
3454                                      masm->isolate());
3455  __ mov(edx,
3456         Operand::StaticVariable(ExternalReference::the_hole_value_location(
3457             masm->isolate())));
3458  __ mov(eax, Operand::StaticVariable(pending_exception));
3459  __ cmp(edx, Operand(eax));
3460  __ j(equal, &runtime);
3461  // For exception, throw the exception again.
3462
3463  // Clear the pending exception variable.
3464  __ mov(Operand::StaticVariable(pending_exception), edx);
3465
3466  // Special handling of termination exceptions which are uncatchable
3467  // by javascript code.
3468  __ cmp(eax, factory->termination_exception());
3469  Label throw_termination_exception;
3470  __ j(equal, &throw_termination_exception);
3471
3472  // Handle normal exception by following handler chain.
3473  __ Throw(eax);
3474
3475  __ bind(&throw_termination_exception);
3476  __ ThrowUncatchable(TERMINATION, eax);
3477
3478  __ bind(&failure);
3479  // For failure to match, return null.
3480  __ mov(Operand(eax), factory->null_value());
3481  __ ret(4 * kPointerSize);
3482
3483  // Load RegExp data.
3484  __ bind(&success);
3485  __ mov(eax, Operand(esp, kJSRegExpOffset));
3486  __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
3487  __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
3488  // Calculate number of capture registers (number_of_captures + 1) * 2.
3489  STATIC_ASSERT(kSmiTag == 0);
3490  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3491  __ add(Operand(edx), Immediate(2));  // edx was a smi.
3492
3493  // edx: Number of capture registers
3494  // Load last_match_info which is still known to be a fast case JSArray.
3495  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3496  __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
3497
3498  // ebx: last_match_info backing store (FixedArray)
3499  // edx: number of capture registers
3500  // Store the capture count.
3501  __ SmiTag(edx);  // Number of capture registers to smi.
3502  __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
3503  __ SmiUntag(edx);  // Number of capture registers back from smi.
3504  // Store last subject and last input.
3505  __ mov(eax, Operand(esp, kSubjectOffset));
3506  __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
3507  __ mov(ecx, ebx);
3508  __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
3509  __ mov(eax, Operand(esp, kSubjectOffset));
3510  __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
3511  __ mov(ecx, ebx);
3512  __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
3513
3514  // Get the static offsets vector filled by the native regexp code.
3515  ExternalReference address_of_static_offsets_vector =
3516      ExternalReference::address_of_static_offsets_vector(masm->isolate());
3517  __ mov(ecx, Immediate(address_of_static_offsets_vector));
3518
3519  // ebx: last_match_info backing store (FixedArray)
3520  // ecx: offsets vector
3521  // edx: number of capture registers
3522  Label next_capture, done;
3523  // Capture register counter starts from number of capture registers and
3524  // counts down until wraping after zero.
3525  __ bind(&next_capture);
3526  __ sub(Operand(edx), Immediate(1));
3527  __ j(negative, &done, Label::kNear);
3528  // Read the value from the static offsets vector buffer.
3529  __ mov(edi, Operand(ecx, edx, times_int_size, 0));
3530  __ SmiTag(edi);
3531  // Store the smi value in the last match info.
3532  __ mov(FieldOperand(ebx,
3533                      edx,
3534                      times_pointer_size,
3535                      RegExpImpl::kFirstCaptureOffset),
3536                      edi);
3537  __ jmp(&next_capture);
3538  __ bind(&done);
3539
3540  // Return last match info.
3541  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3542  __ ret(4 * kPointerSize);
3543
3544  // Do the runtime call to execute the regexp.
3545  __ bind(&runtime);
3546  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3547#endif  // V8_INTERPRETED_REGEXP
3548}
3549
3550
3551void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
3552  const int kMaxInlineLength = 100;
3553  Label slowcase;
3554  Label done;
3555  __ mov(ebx, Operand(esp, kPointerSize * 3));
3556  __ JumpIfNotSmi(ebx, &slowcase);
3557  __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
3558  __ j(above, &slowcase);
3559  // Smi-tagging is equivalent to multiplying by 2.
3560  STATIC_ASSERT(kSmiTag == 0);
3561  STATIC_ASSERT(kSmiTagSize == 1);
3562  // Allocate RegExpResult followed by FixedArray with size in ebx.
3563  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
3564  // Elements:  [Map][Length][..elements..]
3565  __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
3566                        times_half_pointer_size,
3567                        ebx,  // In: Number of elements (times 2, being a smi)
3568                        eax,  // Out: Start of allocation (tagged).
3569                        ecx,  // Out: End of allocation.
3570                        edx,  // Scratch register
3571                        &slowcase,
3572                        TAG_OBJECT);
3573  // eax: Start of allocated area, object-tagged.
3574
3575  // Set JSArray map to global.regexp_result_map().
3576  // Set empty properties FixedArray.
3577  // Set elements to point to FixedArray allocated right after the JSArray.
3578  // Interleave operations for better latency.
3579  __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
3580  Factory* factory = masm->isolate()->factory();
3581  __ mov(ecx, Immediate(factory->empty_fixed_array()));
3582  __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
3583  __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
3584  __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
3585  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
3586  __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
3587  __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
3588
3589  // Set input, index and length fields from arguments.
3590  __ mov(ecx, Operand(esp, kPointerSize * 1));
3591  __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx);
3592  __ mov(ecx, Operand(esp, kPointerSize * 2));
3593  __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx);
3594  __ mov(ecx, Operand(esp, kPointerSize * 3));
3595  __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
3596
3597  // Fill out the elements FixedArray.
3598  // eax: JSArray.
3599  // ebx: FixedArray.
3600  // ecx: Number of elements in array, as smi.
3601
3602  // Set map.
3603  __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
3604         Immediate(factory->fixed_array_map()));
3605  // Set length.
3606  __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
3607  // Fill contents of fixed-array with the-hole.
3608  __ SmiUntag(ecx);
3609  __ mov(edx, Immediate(factory->the_hole_value()));
3610  __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
3611  // Fill fixed array elements with hole.
3612  // eax: JSArray.
3613  // ecx: Number of elements to fill.
3614  // ebx: Start of elements in FixedArray.
3615  // edx: the hole.
3616  Label loop;
3617  __ test(ecx, Operand(ecx));
3618  __ bind(&loop);
3619  __ j(less_equal, &done, Label::kNear);  // Jump if ecx is negative or zero.
3620  __ sub(Operand(ecx), Immediate(1));
3621  __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
3622  __ jmp(&loop);
3623
3624  __ bind(&done);
3625  __ ret(3 * kPointerSize);
3626
3627  __ bind(&slowcase);
3628  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
3629}
3630
3631
3632void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
3633                                                         Register object,
3634                                                         Register result,
3635                                                         Register scratch1,
3636                                                         Register scratch2,
3637                                                         bool object_is_smi,
3638                                                         Label* not_found) {
3639  // Use of registers. Register result is used as a temporary.
3640  Register number_string_cache = result;
3641  Register mask = scratch1;
3642  Register scratch = scratch2;
3643
3644  // Load the number string cache.
3645  ExternalReference roots_address =
3646      ExternalReference::roots_address(masm->isolate());
3647  __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
3648  __ mov(number_string_cache,
3649         Operand::StaticArray(scratch, times_pointer_size, roots_address));
3650  // Make the hash mask from the length of the number string cache. It
3651  // contains two elements (number and string) for each cache entry.
3652  __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
3653  __ shr(mask, kSmiTagSize + 1);  // Untag length and divide it by two.
3654  __ sub(Operand(mask), Immediate(1));  // Make mask.
3655
3656  // Calculate the entry in the number string cache. The hash value in the
3657  // number string cache for smis is just the smi value, and the hash for
3658  // doubles is the xor of the upper and lower words. See
3659  // Heap::GetNumberStringCache.
3660  Label smi_hash_calculated;
3661  Label load_result_from_cache;
3662  if (object_is_smi) {
3663    __ mov(scratch, object);
3664    __ SmiUntag(scratch);
3665  } else {
3666    Label not_smi;
3667    STATIC_ASSERT(kSmiTag == 0);
3668    __ JumpIfNotSmi(object, &not_smi, Label::kNear);
3669    __ mov(scratch, object);
3670    __ SmiUntag(scratch);
3671    __ jmp(&smi_hash_calculated, Label::kNear);
3672    __ bind(&not_smi);
3673    __ cmp(FieldOperand(object, HeapObject::kMapOffset),
3674           masm->isolate()->factory()->heap_number_map());
3675    __ j(not_equal, not_found);
3676    STATIC_ASSERT(8 == kDoubleSize);
3677    __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
3678    __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
3679    // Object is heap number and hash is now in scratch. Calculate cache index.
3680    __ and_(scratch, Operand(mask));
3681    Register index = scratch;
3682    Register probe = mask;
3683    __ mov(probe,
3684           FieldOperand(number_string_cache,
3685                        index,
3686                        times_twice_pointer_size,
3687                        FixedArray::kHeaderSize));
3688    __ JumpIfSmi(probe, not_found);
3689    if (CpuFeatures::IsSupported(SSE2)) {
3690      CpuFeatures::Scope fscope(SSE2);
3691      __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
3692      __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
3693      __ ucomisd(xmm0, xmm1);
3694    } else {
3695      __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
3696      __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
3697      __ FCmp();
3698    }
3699    __ j(parity_even, not_found);  // Bail out if NaN is involved.
3700    __ j(not_equal, not_found);  // The cache did not contain this value.
3701    __ jmp(&load_result_from_cache, Label::kNear);
3702  }
3703
3704  __ bind(&smi_hash_calculated);
3705  // Object is smi and hash is now in scratch. Calculate cache index.
3706  __ and_(scratch, Operand(mask));
3707  Register index = scratch;
3708  // Check if the entry is the smi we are looking for.
3709  __ cmp(object,
3710         FieldOperand(number_string_cache,
3711                      index,
3712                      times_twice_pointer_size,
3713                      FixedArray::kHeaderSize));
3714  __ j(not_equal, not_found);
3715
3716  // Get the result from the cache.
3717  __ bind(&load_result_from_cache);
3718  __ mov(result,
3719         FieldOperand(number_string_cache,
3720                      index,
3721                      times_twice_pointer_size,
3722                      FixedArray::kHeaderSize + kPointerSize));
3723  Counters* counters = masm->isolate()->counters();
3724  __ IncrementCounter(counters->number_to_string_native(), 1);
3725}
3726
3727
3728void NumberToStringStub::Generate(MacroAssembler* masm) {
3729  Label runtime;
3730
3731  __ mov(ebx, Operand(esp, kPointerSize));
3732
3733  // Generate code to lookup number in the number string cache.
3734  GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
3735  __ ret(1 * kPointerSize);
3736
3737  __ bind(&runtime);
3738  // Handle number to string in the runtime system if not found in the cache.
3739  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
3740}
3741
3742
3743static int NegativeComparisonResult(Condition cc) {
3744  ASSERT(cc != equal);
3745  ASSERT((cc == less) || (cc == less_equal)
3746      || (cc == greater) || (cc == greater_equal));
3747  return (cc == greater || cc == greater_equal) ? LESS : GREATER;
3748}
3749
3750void CompareStub::Generate(MacroAssembler* masm) {
3751  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
3752
3753  Label check_unequal_objects, done;
3754
3755  // Compare two smis if required.
3756  if (include_smi_compare_) {
3757    Label non_smi, smi_done;
3758    __ mov(ecx, Operand(edx));
3759    __ or_(ecx, Operand(eax));
3760    __ JumpIfNotSmi(ecx, &non_smi);
3761    __ sub(edx, Operand(eax));  // Return on the result of the subtraction.
3762    __ j(no_overflow, &smi_done);
3763    __ not_(edx);  // Correct sign in case of overflow. edx is never 0 here.
3764    __ bind(&smi_done);
3765    __ mov(eax, edx);
3766    __ ret(0);
3767    __ bind(&non_smi);
3768  } else if (FLAG_debug_code) {
3769    __ mov(ecx, Operand(edx));
3770    __ or_(ecx, Operand(eax));
3771    __ test(ecx, Immediate(kSmiTagMask));
3772    __ Assert(not_zero, "Unexpected smi operands.");
3773  }
3774
3775  // NOTICE! This code is only reached after a smi-fast-case check, so
3776  // it is certain that at least one operand isn't a smi.
3777
3778  // Identical objects can be compared fast, but there are some tricky cases
3779  // for NaN and undefined.
3780  {
3781    Label not_identical;
3782    __ cmp(eax, Operand(edx));
3783    __ j(not_equal, &not_identical);
3784
3785    if (cc_ != equal) {
3786      // Check for undefined.  undefined OP undefined is false even though
3787      // undefined == undefined.
3788      Label check_for_nan;
3789      __ cmp(edx, masm->isolate()->factory()->undefined_value());
3790      __ j(not_equal, &check_for_nan, Label::kNear);
3791      __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
3792      __ ret(0);
3793      __ bind(&check_for_nan);
3794    }
3795
3796    // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
3797    // so we do the second best thing - test it ourselves.
3798    // Note: if cc_ != equal, never_nan_nan_ is not used.
3799    if (never_nan_nan_ && (cc_ == equal)) {
3800      __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
3801      __ ret(0);
3802    } else {
3803      Label heap_number;
3804      __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
3805             Immediate(masm->isolate()->factory()->heap_number_map()));
3806      __ j(equal, &heap_number, Label::kNear);
3807      if (cc_ != equal) {
3808        // Call runtime on identical JSObjects.  Otherwise return equal.
3809        __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
3810        __ j(above_equal, &not_identical);
3811      }
3812      __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
3813      __ ret(0);
3814
3815      __ bind(&heap_number);
3816      // It is a heap number, so return non-equal if it's NaN and equal if
3817      // it's not NaN.
3818      // The representation of NaN values has all exponent bits (52..62) set,
3819      // and not all mantissa bits (0..51) clear.
3820      // We only accept QNaNs, which have bit 51 set.
3821      // Read top bits of double representation (second word of value).
3822
3823      // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
3824      // all bits in the mask are set. We only need to check the word
3825      // that contains the exponent and high bit of the mantissa.
3826      STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
3827      __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
3828      __ Set(eax, Immediate(0));
3829      // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
3830      // bits.
3831      __ add(edx, Operand(edx));
3832      __ cmp(edx, kQuietNaNHighBitsMask << 1);
3833      if (cc_ == equal) {
3834        STATIC_ASSERT(EQUAL != 1);
3835        __ setcc(above_equal, eax);
3836        __ ret(0);
3837      } else {
3838        Label nan;
3839        __ j(above_equal, &nan, Label::kNear);
3840        __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
3841        __ ret(0);
3842        __ bind(&nan);
3843        __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
3844        __ ret(0);
3845      }
3846    }
3847
3848    __ bind(&not_identical);
3849  }
3850
3851  // Strict equality can quickly decide whether objects are equal.
3852  // Non-strict object equality is slower, so it is handled later in the stub.
3853  if (cc_ == equal && strict_) {
3854    Label slow;  // Fallthrough label.
3855    Label not_smis;
3856    // If we're doing a strict equality comparison, we don't have to do
3857    // type conversion, so we generate code to do fast comparison for objects
3858    // and oddballs. Non-smi numbers and strings still go through the usual
3859    // slow-case code.
3860    // If either is a Smi (we know that not both are), then they can only
3861    // be equal if the other is a HeapNumber. If so, use the slow case.
3862    STATIC_ASSERT(kSmiTag == 0);
3863    ASSERT_EQ(0, Smi::FromInt(0));
3864    __ mov(ecx, Immediate(kSmiTagMask));
3865    __ and_(ecx, Operand(eax));
3866    __ test(ecx, Operand(edx));
3867    __ j(not_zero, &not_smis, Label::kNear);
3868    // One operand is a smi.
3869
3870    // Check whether the non-smi is a heap number.
3871    STATIC_ASSERT(kSmiTagMask == 1);
3872    // ecx still holds eax & kSmiTag, which is either zero or one.
3873    __ sub(Operand(ecx), Immediate(0x01));
3874    __ mov(ebx, edx);
3875    __ xor_(ebx, Operand(eax));
3876    __ and_(ebx, Operand(ecx));  // ebx holds either 0 or eax ^ edx.
3877    __ xor_(ebx, Operand(eax));
3878    // if eax was smi, ebx is now edx, else eax.
3879
3880    // Check if the non-smi operand is a heap number.
3881    __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
3882           Immediate(masm->isolate()->factory()->heap_number_map()));
3883    // If heap number, handle it in the slow case.
3884    __ j(equal, &slow);
3885    // Return non-equal (ebx is not zero)
3886    __ mov(eax, ebx);
3887    __ ret(0);
3888
3889    __ bind(&not_smis);
3890    // If either operand is a JSObject or an oddball value, then they are not
3891    // equal since their pointers are different
3892    // There is no test for undetectability in strict equality.
3893
3894    // Get the type of the first operand.
3895    // If the first object is a JS object, we have done pointer comparison.
3896    Label first_non_object;
3897    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
3898    __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
3899    __ j(below, &first_non_object, Label::kNear);
3900
3901    // Return non-zero (eax is not zero)
3902    Label return_not_equal;
3903    STATIC_ASSERT(kHeapObjectTag != 0);
3904    __ bind(&return_not_equal);
3905    __ ret(0);
3906
3907    __ bind(&first_non_object);
3908    // Check for oddballs: true, false, null, undefined.
3909    __ CmpInstanceType(ecx, ODDBALL_TYPE);
3910    __ j(equal, &return_not_equal);
3911
3912    __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
3913    __ j(above_equal, &return_not_equal);
3914
3915    // Check for oddballs: true, false, null, undefined.
3916    __ CmpInstanceType(ecx, ODDBALL_TYPE);
3917    __ j(equal, &return_not_equal);
3918
3919    // Fall through to the general case.
3920    __ bind(&slow);
3921  }
3922
3923  // Generate the number comparison code.
3924  if (include_number_compare_) {
3925    Label non_number_comparison;
3926    Label unordered;
3927    if (CpuFeatures::IsSupported(SSE2)) {
3928      CpuFeatures::Scope use_sse2(SSE2);
3929      CpuFeatures::Scope use_cmov(CMOV);
3930
3931      FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
3932      __ ucomisd(xmm0, xmm1);
3933
3934      // Don't base result on EFLAGS when a NaN is involved.
3935      __ j(parity_even, &unordered);
3936      // Return a result of -1, 0, or 1, based on EFLAGS.
3937      __ mov(eax, 0);  // equal
3938      __ mov(ecx, Immediate(Smi::FromInt(1)));
3939      __ cmov(above, eax, Operand(ecx));
3940      __ mov(ecx, Immediate(Smi::FromInt(-1)));
3941      __ cmov(below, eax, Operand(ecx));
3942      __ ret(0);
3943    } else {
3944      FloatingPointHelper::CheckFloatOperands(
3945          masm, &non_number_comparison, ebx);
3946      FloatingPointHelper::LoadFloatOperand(masm, eax);
3947      FloatingPointHelper::LoadFloatOperand(masm, edx);
3948      __ FCmp();
3949
3950      // Don't base result on EFLAGS when a NaN is involved.
3951      __ j(parity_even, &unordered);
3952
3953      Label below_label, above_label;
3954      // Return a result of -1, 0, or 1, based on EFLAGS.
3955      __ j(below, &below_label);
3956      __ j(above, &above_label);
3957
3958      __ Set(eax, Immediate(0));
3959      __ ret(0);
3960
3961      __ bind(&below_label);
3962      __ mov(eax, Immediate(Smi::FromInt(-1)));
3963      __ ret(0);
3964
3965      __ bind(&above_label);
3966      __ mov(eax, Immediate(Smi::FromInt(1)));
3967      __ ret(0);
3968    }
3969
3970    // If one of the numbers was NaN, then the result is always false.
3971    // The cc is never not-equal.
3972    __ bind(&unordered);
3973    ASSERT(cc_ != not_equal);
3974    if (cc_ == less || cc_ == less_equal) {
3975      __ mov(eax, Immediate(Smi::FromInt(1)));
3976    } else {
3977      __ mov(eax, Immediate(Smi::FromInt(-1)));
3978    }
3979    __ ret(0);
3980
3981    // The number comparison code did not provide a valid result.
3982    __ bind(&non_number_comparison);
3983  }
3984
3985  // Fast negative check for symbol-to-symbol equality.
3986  Label check_for_strings;
3987  if (cc_ == equal) {
3988    BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
3989    BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
3990
3991    // We've already checked for object identity, so if both operands
3992    // are symbols they aren't equal. Register eax already holds a
3993    // non-zero value, which indicates not equal, so just return.
3994    __ ret(0);
3995  }
3996
3997  __ bind(&check_for_strings);
3998
3999  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
4000                                         &check_unequal_objects);
4001
4002  // Inline comparison of ascii strings.
4003  if (cc_ == equal) {
4004    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
4005                                                     edx,
4006                                                     eax,
4007                                                     ecx,
4008                                                     ebx);
4009  } else {
4010    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
4011                                                       edx,
4012                                                       eax,
4013                                                       ecx,
4014                                                       ebx,
4015                                                       edi);
4016  }
4017#ifdef DEBUG
4018  __ Abort("Unexpected fall-through from string comparison");
4019#endif
4020
4021  __ bind(&check_unequal_objects);
4022  if (cc_ == equal && !strict_) {
4023    // Non-strict equality.  Objects are unequal if
4024    // they are both JSObjects and not undetectable,
4025    // and their pointers are different.
4026    Label not_both_objects;
4027    Label return_unequal;
4028    // At most one is a smi, so we can test for smi by adding the two.
4029    // A smi plus a heap object has the low bit set, a heap object plus
4030    // a heap object has the low bit clear.
4031    STATIC_ASSERT(kSmiTag == 0);
4032    STATIC_ASSERT(kSmiTagMask == 1);
4033    __ lea(ecx, Operand(eax, edx, times_1, 0));
4034    __ test(ecx, Immediate(kSmiTagMask));
4035    __ j(not_zero, &not_both_objects, Label::kNear);
4036    __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4037    __ j(below, &not_both_objects, Label::kNear);
4038    __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
4039    __ j(below, &not_both_objects, Label::kNear);
4040    // We do not bail out after this point.  Both are JSObjects, and
4041    // they are equal if and only if both are undetectable.
4042    // The and of the undetectable flags is 1 if and only if they are equal.
4043    __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
4044              1 << Map::kIsUndetectable);
4045    __ j(zero, &return_unequal, Label::kNear);
4046    __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
4047              1 << Map::kIsUndetectable);
4048    __ j(zero, &return_unequal, Label::kNear);
4049    // The objects are both undetectable, so they both compare as the value
4050    // undefined, and are equal.
4051    __ Set(eax, Immediate(EQUAL));
4052    __ bind(&return_unequal);
4053    // Return non-equal by returning the non-zero object pointer in eax,
4054    // or return equal if we fell through to here.
4055    __ ret(0);  // rax, rdx were pushed
4056    __ bind(&not_both_objects);
4057  }
4058
4059  // Push arguments below the return address.
4060  __ pop(ecx);
4061  __ push(edx);
4062  __ push(eax);
4063
4064  // Figure out which native to call and setup the arguments.
4065  Builtins::JavaScript builtin;
4066  if (cc_ == equal) {
4067    builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
4068  } else {
4069    builtin = Builtins::COMPARE;
4070    __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4071  }
4072
4073  // Restore return address on the stack.
4074  __ push(ecx);
4075
4076  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
4077  // tagged as a small integer.
4078  __ InvokeBuiltin(builtin, JUMP_FUNCTION);
4079}
4080
4081
4082void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
4083                                    Label* label,
4084                                    Register object,
4085                                    Register scratch) {
4086  __ JumpIfSmi(object, label);
4087  __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
4088  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
4089  __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
4090  __ cmp(scratch, kSymbolTag | kStringTag);
4091  __ j(not_equal, label);
4092}
4093
4094
4095void StackCheckStub::Generate(MacroAssembler* masm) {
4096  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
4097}
4098
4099
4100void CallFunctionStub::Generate(MacroAssembler* masm) {
4101  Label slow;
4102
4103  // The receiver might implicitly be the global object. This is
4104  // indicated by passing the hole as the receiver to the call
4105  // function stub.
4106  if (ReceiverMightBeImplicit()) {
4107    Label call;
4108    // Get the receiver from the stack.
4109    // +1 ~ return address
4110    __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
4111    // Call as function is indicated with the hole.
4112    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
4113    __ j(not_equal, &call, Label::kNear);
4114    // Patch the receiver on the stack with the global receiver object.
4115    __ mov(ebx, GlobalObjectOperand());
4116    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
4117    __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ebx);
4118    __ bind(&call);
4119  }
4120
4121  // Get the function to call from the stack.
4122  // +2 ~ receiver, return address
4123  __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
4124
4125  // Check that the function really is a JavaScript function.
4126  __ JumpIfSmi(edi, &slow);
4127  // Goto slow case if we do not have a function.
4128  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
4129  __ j(not_equal, &slow);
4130
4131  // Fast-case: Just invoke the function.
4132  ParameterCount actual(argc_);
4133
4134  if (ReceiverMightBeImplicit()) {
4135    Label call_as_function;
4136    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
4137    __ j(equal, &call_as_function);
4138    __ InvokeFunction(edi,
4139                      actual,
4140                      JUMP_FUNCTION,
4141                      NullCallWrapper(),
4142                      CALL_AS_METHOD);
4143    __ bind(&call_as_function);
4144  }
4145  __ InvokeFunction(edi,
4146                    actual,
4147                    JUMP_FUNCTION,
4148                    NullCallWrapper(),
4149                    CALL_AS_FUNCTION);
4150
4151  // Slow-case: Non-function called.
4152  __ bind(&slow);
4153  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
4154  // of the original receiver from the call site).
4155  __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
4156  __ Set(eax, Immediate(argc_));
4157  __ Set(ebx, Immediate(0));
4158  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
4159  Handle<Code> adaptor =
4160      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
4161  __ SetCallKind(ecx, CALL_AS_METHOD);
4162  __ jmp(adaptor, RelocInfo::CODE_TARGET);
4163}
4164
4165
4166bool CEntryStub::NeedsImmovableCode() {
4167  return false;
4168}
4169
4170
4171void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
4172  __ Throw(eax);
4173}
4174
4175
4176void CEntryStub::GenerateCore(MacroAssembler* masm,
4177                              Label* throw_normal_exception,
4178                              Label* throw_termination_exception,
4179                              Label* throw_out_of_memory_exception,
4180                              bool do_gc,
4181                              bool always_allocate_scope) {
4182  // eax: result parameter for PerformGC, if any
4183  // ebx: pointer to C function  (C callee-saved)
4184  // ebp: frame pointer  (restored after C call)
4185  // esp: stack pointer  (restored after C call)
4186  // edi: number of arguments including receiver  (C callee-saved)
4187  // esi: pointer to the first argument (C callee-saved)
4188
4189  // Result returned in eax, or eax+edx if result_size_ is 2.
4190
4191  // Check stack alignment.
4192  if (FLAG_debug_code) {
4193    __ CheckStackAlignment();
4194  }
4195
4196  if (do_gc) {
4197    // Pass failure code returned from last attempt as first argument to
4198    // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
4199    // stack alignment is known to be correct. This function takes one argument
4200    // which is passed on the stack, and we know that the stack has been
4201    // prepared to pass at least one argument.
4202    __ mov(Operand(esp, 0 * kPointerSize), eax);  // Result.
4203    __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
4204  }
4205
4206  ExternalReference scope_depth =
4207      ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
4208  if (always_allocate_scope) {
4209    __ inc(Operand::StaticVariable(scope_depth));
4210  }
4211
4212  // Call C function.
4213  __ mov(Operand(esp, 0 * kPointerSize), edi);  // argc.
4214  __ mov(Operand(esp, 1 * kPointerSize), esi);  // argv.
4215  __ mov(Operand(esp, 2 * kPointerSize),
4216         Immediate(ExternalReference::isolate_address()));
4217  __ call(Operand(ebx));
4218  // Result is in eax or edx:eax - do not destroy these registers!
4219
4220  if (always_allocate_scope) {
4221    __ dec(Operand::StaticVariable(scope_depth));
4222  }
4223
4224  // Make sure we're not trying to return 'the hole' from the runtime
4225  // call as this may lead to crashes in the IC code later.
4226  if (FLAG_debug_code) {
4227    Label okay;
4228    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
4229    __ j(not_equal, &okay, Label::kNear);
4230    __ int3();
4231    __ bind(&okay);
4232  }
4233
4234  // Check for failure result.
4235  Label failure_returned;
4236  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
4237  __ lea(ecx, Operand(eax, 1));
4238  // Lower 2 bits of ecx are 0 iff eax has failure tag.
4239  __ test(ecx, Immediate(kFailureTagMask));
4240  __ j(zero, &failure_returned);
4241
4242  ExternalReference pending_exception_address(
4243      Isolate::k_pending_exception_address, masm->isolate());
4244
4245  // Check that there is no pending exception, otherwise we
4246  // should have returned some failure value.
4247  if (FLAG_debug_code) {
4248    __ push(edx);
4249    __ mov(edx, Operand::StaticVariable(
4250        ExternalReference::the_hole_value_location(masm->isolate())));
4251    Label okay;
4252    __ cmp(edx, Operand::StaticVariable(pending_exception_address));
4253    // Cannot use check here as it attempts to generate call into runtime.
4254    __ j(equal, &okay, Label::kNear);
4255    __ int3();
4256    __ bind(&okay);
4257    __ pop(edx);
4258  }
4259
4260  // Exit the JavaScript to C++ exit frame.
4261  __ LeaveExitFrame(save_doubles_);
4262  __ ret(0);
4263
4264  // Handling of failure.
4265  __ bind(&failure_returned);
4266
4267  Label retry;
4268  // If the returned exception is RETRY_AFTER_GC continue at retry label
4269  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
4270  __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
4271  __ j(zero, &retry);
4272
4273  // Special handling of out of memory exceptions.
4274  __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
4275  __ j(equal, throw_out_of_memory_exception);
4276
4277  // Retrieve the pending exception and clear the variable.
4278  ExternalReference the_hole_location =
4279      ExternalReference::the_hole_value_location(masm->isolate());
4280  __ mov(eax, Operand::StaticVariable(pending_exception_address));
4281  __ mov(edx, Operand::StaticVariable(the_hole_location));
4282  __ mov(Operand::StaticVariable(pending_exception_address), edx);
4283
4284  // Special handling of termination exceptions which are uncatchable
4285  // by javascript code.
4286  __ cmp(eax, masm->isolate()->factory()->termination_exception());
4287  __ j(equal, throw_termination_exception);
4288
4289  // Handle normal exception.
4290  __ jmp(throw_normal_exception);
4291
4292  // Retry.
4293  __ bind(&retry);
4294}
4295
4296
4297void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
4298                                          UncatchableExceptionType type) {
4299  __ ThrowUncatchable(type, eax);
4300}
4301
4302
4303void CEntryStub::Generate(MacroAssembler* masm) {
4304  // eax: number of arguments including receiver
4305  // ebx: pointer to C function  (C callee-saved)
4306  // ebp: frame pointer  (restored after C call)
4307  // esp: stack pointer  (restored after C call)
4308  // esi: current context (C callee-saved)
4309  // edi: JS function of the caller (C callee-saved)
4310
4311  // NOTE: Invocations of builtins may return failure objects instead
4312  // of a proper result. The builtin entry handles this by performing
4313  // a garbage collection and retrying the builtin (twice).
4314
4315  // Enter the exit frame that transitions from JavaScript to C++.
4316  __ EnterExitFrame(save_doubles_);
4317
4318  // eax: result parameter for PerformGC, if any (setup below)
4319  // ebx: pointer to builtin function  (C callee-saved)
4320  // ebp: frame pointer  (restored after C call)
4321  // esp: stack pointer  (restored after C call)
4322  // edi: number of arguments including receiver (C callee-saved)
4323  // esi: argv pointer (C callee-saved)
4324
4325  Label throw_normal_exception;
4326  Label throw_termination_exception;
4327  Label throw_out_of_memory_exception;
4328
4329  // Call into the runtime system.
4330  GenerateCore(masm,
4331               &throw_normal_exception,
4332               &throw_termination_exception,
4333               &throw_out_of_memory_exception,
4334               false,
4335               false);
4336
4337  // Do space-specific GC and retry runtime call.
4338  GenerateCore(masm,
4339               &throw_normal_exception,
4340               &throw_termination_exception,
4341               &throw_out_of_memory_exception,
4342               true,
4343               false);
4344
4345  // Do full GC and retry runtime call one final time.
4346  Failure* failure = Failure::InternalError();
4347  __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
4348  GenerateCore(masm,
4349               &throw_normal_exception,
4350               &throw_termination_exception,
4351               &throw_out_of_memory_exception,
4352               true,
4353               true);
4354
4355  __ bind(&throw_out_of_memory_exception);
4356  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
4357
4358  __ bind(&throw_termination_exception);
4359  GenerateThrowUncatchable(masm, TERMINATION);
4360
4361  __ bind(&throw_normal_exception);
4362  GenerateThrowTOS(masm);
4363}
4364
4365
4366void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4367  Label invoke, exit;
4368  Label not_outermost_js, not_outermost_js_2;
4369
4370  // Setup frame.
4371  __ push(ebp);
4372  __ mov(ebp, Operand(esp));
4373
4374  // Push marker in two places.
4375  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4376  __ push(Immediate(Smi::FromInt(marker)));  // context slot
4377  __ push(Immediate(Smi::FromInt(marker)));  // function slot
4378  // Save callee-saved registers (C calling conventions).
4379  __ push(edi);
4380  __ push(esi);
4381  __ push(ebx);
4382
4383  // Save copies of the top frame descriptor on the stack.
4384  ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address, masm->isolate());
4385  __ push(Operand::StaticVariable(c_entry_fp));
4386
4387  // If this is the outermost JS call, set js_entry_sp value.
4388  ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address,
4389                                masm->isolate());
4390  __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
4391  __ j(not_equal, &not_outermost_js);
4392  __ mov(Operand::StaticVariable(js_entry_sp), ebp);
4393  __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4394  Label cont;
4395  __ jmp(&cont);
4396  __ bind(&not_outermost_js);
4397  __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
4398  __ bind(&cont);
4399
4400  // Call a faked try-block that does the invoke.
4401  __ call(&invoke);
4402
4403  // Caught exception: Store result (exception) in the pending
4404  // exception field in the JSEnv and return a failure sentinel.
4405  ExternalReference pending_exception(Isolate::k_pending_exception_address,
4406                                      masm->isolate());
4407  __ mov(Operand::StaticVariable(pending_exception), eax);
4408  __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
4409  __ jmp(&exit);
4410
4411  // Invoke: Link this frame into the handler chain.
4412  __ bind(&invoke);
4413  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
4414
4415  // Clear any pending exceptions.
4416  ExternalReference the_hole_location =
4417      ExternalReference::the_hole_value_location(masm->isolate());
4418  __ mov(edx, Operand::StaticVariable(the_hole_location));
4419  __ mov(Operand::StaticVariable(pending_exception), edx);
4420
4421  // Fake a receiver (NULL).
4422  __ push(Immediate(0));  // receiver
4423
4424  // Invoke the function by calling through JS entry trampoline
4425  // builtin and pop the faked function when we return. Notice that we
4426  // cannot store a reference to the trampoline code directly in this
4427  // stub, because the builtin stubs may not have been generated yet.
4428  if (is_construct) {
4429    ExternalReference construct_entry(
4430        Builtins::kJSConstructEntryTrampoline,
4431        masm->isolate());
4432    __ mov(edx, Immediate(construct_entry));
4433  } else {
4434    ExternalReference entry(Builtins::kJSEntryTrampoline,
4435                            masm->isolate());
4436    __ mov(edx, Immediate(entry));
4437  }
4438  __ mov(edx, Operand(edx, 0));  // deref address
4439  __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
4440  __ call(Operand(edx));
4441
4442  // Unlink this frame from the handler chain.
4443  __ PopTryHandler();
4444
4445  __ bind(&exit);
4446  // Check if the current stack frame is marked as the outermost JS frame.
4447  __ pop(ebx);
4448  __ cmp(Operand(ebx),
4449         Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4450  __ j(not_equal, &not_outermost_js_2);
4451  __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
4452  __ bind(&not_outermost_js_2);
4453
4454  // Restore the top frame descriptor from the stack.
4455  __ pop(Operand::StaticVariable(ExternalReference(
4456      Isolate::k_c_entry_fp_address,
4457      masm->isolate())));
4458
4459  // Restore callee-saved registers (C calling conventions).
4460  __ pop(ebx);
4461  __ pop(esi);
4462  __ pop(edi);
4463  __ add(Operand(esp), Immediate(2 * kPointerSize));  // remove markers
4464
4465  // Restore frame pointer and return.
4466  __ pop(ebp);
4467  __ ret(0);
4468}
4469
4470
4471// Generate stub code for instanceof.
4472// This code can patch a call site inlined cache of the instance of check,
4473// which looks like this.
4474//
4475//   81 ff XX XX XX XX   cmp    edi, <the hole, patched to a map>
4476//   75 0a               jne    <some near label>
4477//   b8 XX XX XX XX      mov    eax, <the hole, patched to either true or false>
4478//
4479// If call site patching is requested the stack will have the delta from the
4480// return address to the cmp instruction just below the return address. This
4481// also means that call site patching can only take place with arguments in
4482// registers. TOS looks like this when call site patching is requested
4483//
4484//   esp[0] : return address
4485//   esp[4] : delta from return address to cmp instruction
4486//
4487void InstanceofStub::Generate(MacroAssembler* masm) {
4488  // Call site inlining and patching implies arguments in registers.
4489  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4490
4491  // Fixed register usage throughout the stub.
4492  Register object = eax;  // Object (lhs).
4493  Register map = ebx;  // Map of the object.
4494  Register function = edx;  // Function (rhs).
4495  Register prototype = edi;  // Prototype of the function.
4496  Register scratch = ecx;
4497
4498  // Constants describing the call site code to patch.
4499  static const int kDeltaToCmpImmediate = 2;
4500  static const int kDeltaToMov = 8;
4501  static const int kDeltaToMovImmediate = 9;
4502  static const int8_t kCmpEdiImmediateByte1 = BitCast<int8_t, uint8_t>(0x81);
4503  static const int8_t kCmpEdiImmediateByte2 = BitCast<int8_t, uint8_t>(0xff);
4504  static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
4505
4506  ExternalReference roots_address =
4507      ExternalReference::roots_address(masm->isolate());
4508
4509  ASSERT_EQ(object.code(), InstanceofStub::left().code());
4510  ASSERT_EQ(function.code(), InstanceofStub::right().code());
4511
4512  // Get the object and function - they are always both needed.
4513  Label slow, not_js_object;
4514  if (!HasArgsInRegisters()) {
4515    __ mov(object, Operand(esp, 2 * kPointerSize));
4516    __ mov(function, Operand(esp, 1 * kPointerSize));
4517  }
4518
4519  // Check that the left hand is a JS object.
4520  __ JumpIfSmi(object, &not_js_object);
4521  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
4522
4523  // If there is a call site cache don't look in the global cache, but do the
4524  // real lookup and update the call site cache.
4525  if (!HasCallSiteInlineCheck()) {
4526    // Look up the function and the map in the instanceof cache.
4527    Label miss;
4528    __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
4529    __ cmp(function,
4530           Operand::StaticArray(scratch, times_pointer_size, roots_address));
4531    __ j(not_equal, &miss, Label::kNear);
4532    __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
4533    __ cmp(map, Operand::StaticArray(
4534        scratch, times_pointer_size, roots_address));
4535    __ j(not_equal, &miss, Label::kNear);
4536    __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
4537    __ mov(eax, Operand::StaticArray(
4538        scratch, times_pointer_size, roots_address));
4539    __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
4540    __ bind(&miss);
4541  }
4542
4543  // Get the prototype of the function.
4544  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
4545
4546  // Check that the function prototype is a JS object.
4547  __ JumpIfSmi(prototype, &slow);
4548  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4549
4550  // Update the global instanceof or call site inlined cache with the current
4551  // map and function. The cached answer will be set when it is known below.
4552  if (!HasCallSiteInlineCheck()) {
4553  __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
4554  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
4555  __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
4556  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
4557         function);
4558  } else {
4559    // The constants for the code patching are based on no push instructions
4560    // at the call site.
4561    ASSERT(HasArgsInRegisters());
4562    // Get return address and delta to inlined map check.
4563    __ mov(scratch, Operand(esp, 0 * kPointerSize));
4564    __ sub(scratch, Operand(esp, 1 * kPointerSize));
4565    if (FLAG_debug_code) {
4566      __ cmpb(Operand(scratch, 0), kCmpEdiImmediateByte1);
4567      __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
4568      __ cmpb(Operand(scratch, 1), kCmpEdiImmediateByte2);
4569      __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
4570    }
4571    __ mov(Operand(scratch, kDeltaToCmpImmediate), map);
4572  }
4573
4574  // Loop through the prototype chain of the object looking for the function
4575  // prototype.
4576  __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
4577  Label loop, is_instance, is_not_instance;
4578  __ bind(&loop);
4579  __ cmp(scratch, Operand(prototype));
4580  __ j(equal, &is_instance, Label::kNear);
4581  Factory* factory = masm->isolate()->factory();
4582  __ cmp(Operand(scratch), Immediate(factory->null_value()));
4583  __ j(equal, &is_not_instance, Label::kNear);
4584  __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
4585  __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
4586  __ jmp(&loop);
4587
4588  __ bind(&is_instance);
4589  if (!HasCallSiteInlineCheck()) {
4590    __ Set(eax, Immediate(0));
4591    __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
4592    __ mov(Operand::StaticArray(scratch,
4593                                times_pointer_size, roots_address), eax);
4594  } else {
4595    // Get return address and delta to inlined map check.
4596    __ mov(eax, factory->true_value());
4597    __ mov(scratch, Operand(esp, 0 * kPointerSize));
4598    __ sub(scratch, Operand(esp, 1 * kPointerSize));
4599    if (FLAG_debug_code) {
4600      __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
4601      __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
4602    }
4603    __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
4604    if (!ReturnTrueFalseObject()) {
4605      __ Set(eax, Immediate(0));
4606    }
4607  }
4608  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
4609
4610  __ bind(&is_not_instance);
4611  if (!HasCallSiteInlineCheck()) {
4612    __ Set(eax, Immediate(Smi::FromInt(1)));
4613    __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
4614    __ mov(Operand::StaticArray(
4615        scratch, times_pointer_size, roots_address), eax);
4616  } else {
4617    // Get return address and delta to inlined map check.
4618    __ mov(eax, factory->false_value());
4619    __ mov(scratch, Operand(esp, 0 * kPointerSize));
4620    __ sub(scratch, Operand(esp, 1 * kPointerSize));
4621    if (FLAG_debug_code) {
4622      __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
4623      __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
4624    }
4625    __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
4626    if (!ReturnTrueFalseObject()) {
4627      __ Set(eax, Immediate(Smi::FromInt(1)));
4628    }
4629  }
4630  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
4631
4632  Label object_not_null, object_not_null_or_smi;
4633  __ bind(&not_js_object);
4634  // Before null, smi and string value checks, check that the rhs is a function
4635  // as for a non-function rhs an exception needs to be thrown.
4636  __ JumpIfSmi(function, &slow);
4637  __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
4638  __ j(not_equal, &slow);
4639
4640  // Null is not instance of anything.
4641  __ cmp(object, factory->null_value());
4642  __ j(not_equal, &object_not_null);
4643  __ Set(eax, Immediate(Smi::FromInt(1)));
4644  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
4645
4646  __ bind(&object_not_null);
4647  // Smi values is not instance of anything.
4648  __ JumpIfNotSmi(object, &object_not_null_or_smi);
4649  __ Set(eax, Immediate(Smi::FromInt(1)));
4650  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
4651
4652  __ bind(&object_not_null_or_smi);
4653  // String values is not instance of anything.
4654  Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
4655  __ j(NegateCondition(is_string), &slow);
4656  __ Set(eax, Immediate(Smi::FromInt(1)));
4657  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
4658
4659  // Slow-case: Go through the JavaScript implementation.
4660  __ bind(&slow);
4661  if (!ReturnTrueFalseObject()) {
4662    // Tail call the builtin which returns 0 or 1.
4663    if (HasArgsInRegisters()) {
4664      // Push arguments below return address.
4665      __ pop(scratch);
4666      __ push(object);
4667      __ push(function);
4668      __ push(scratch);
4669    }
4670    __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4671  } else {
4672    // Call the builtin and convert 0/1 to true/false.
4673    __ EnterInternalFrame();
4674    __ push(object);
4675    __ push(function);
4676    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4677    __ LeaveInternalFrame();
4678    Label true_value, done;
4679    __ test(eax, Operand(eax));
4680    __ j(zero, &true_value, Label::kNear);
4681    __ mov(eax, factory->false_value());
4682    __ jmp(&done, Label::kNear);
4683    __ bind(&true_value);
4684    __ mov(eax, factory->true_value());
4685    __ bind(&done);
4686    __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
4687  }
4688}
4689
4690
4691Register InstanceofStub::left() { return eax; }
4692
4693
4694Register InstanceofStub::right() { return edx; }
4695
4696
4697int CompareStub::MinorKey() {
4698  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
4699  // stubs the never NaN NaN condition is only taken into account if the
4700  // condition is equals.
4701  ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
4702  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4703  return ConditionField::encode(static_cast<unsigned>(cc_))
4704         | RegisterField::encode(false)   // lhs_ and rhs_ are not used
4705         | StrictField::encode(strict_)
4706         | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
4707         | IncludeNumberCompareField::encode(include_number_compare_)
4708         | IncludeSmiCompareField::encode(include_smi_compare_);
4709}
4710
4711
4712// Unfortunately you have to run without snapshots to see most of these
4713// names in the profile since most compare stubs end up in the snapshot.
4714void CompareStub::PrintName(StringStream* stream) {
4715  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4716  const char* cc_name;
4717  switch (cc_) {
4718    case less: cc_name = "LT"; break;
4719    case greater: cc_name = "GT"; break;
4720    case less_equal: cc_name = "LE"; break;
4721    case greater_equal: cc_name = "GE"; break;
4722    case equal: cc_name = "EQ"; break;
4723    case not_equal: cc_name = "NE"; break;
4724    default: cc_name = "UnknownCondition"; break;
4725  }
4726  bool is_equality = cc_ == equal || cc_ == not_equal;
4727  stream->Add("CompareStub_%s", cc_name);
4728  if (strict_ && is_equality) stream->Add("_STRICT");
4729  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
4730  if (!include_number_compare_) stream->Add("_NO_NUMBER");
4731  if (!include_smi_compare_) stream->Add("_NO_SMI");
4732}
4733
4734
4735// -------------------------------------------------------------------------
4736// StringCharCodeAtGenerator
4737
4738void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
4739  Label flat_string;
4740  Label ascii_string;
4741  Label got_char_code;
4742
4743  // If the receiver is a smi trigger the non-string case.
4744  STATIC_ASSERT(kSmiTag == 0);
4745  __ JumpIfSmi(object_, receiver_not_string_);
4746
4747  // Fetch the instance type of the receiver into result register.
4748  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
4749  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4750  // If the receiver is not a string trigger the non-string case.
4751  __ test(result_, Immediate(kIsNotStringMask));
4752  __ j(not_zero, receiver_not_string_);
4753
4754  // If the index is non-smi trigger the non-smi case.
4755  STATIC_ASSERT(kSmiTag == 0);
4756  __ JumpIfNotSmi(index_, &index_not_smi_);
4757
4758  // Put smi-tagged index into scratch register.
4759  __ mov(scratch_, index_);
4760  __ bind(&got_smi_index_);
4761
4762  // Check for index out of range.
4763  __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
4764  __ j(above_equal, index_out_of_range_);
4765
4766  // We need special handling for non-flat strings.
4767  STATIC_ASSERT(kSeqStringTag == 0);
4768  __ test(result_, Immediate(kStringRepresentationMask));
4769  __ j(zero, &flat_string);
4770
4771  // Handle non-flat strings.
4772  __ test(result_, Immediate(kIsConsStringMask));
4773  __ j(zero, &call_runtime_);
4774
4775  // ConsString.
4776  // Check whether the right hand side is the empty string (i.e. if
4777  // this is really a flat string in a cons string). If that is not
4778  // the case we would rather go to the runtime system now to flatten
4779  // the string.
4780  __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
4781         Immediate(masm->isolate()->factory()->empty_string()));
4782  __ j(not_equal, &call_runtime_);
4783  // Get the first of the two strings and load its instance type.
4784  __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
4785  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
4786  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4787  // If the first cons component is also non-flat, then go to runtime.
4788  STATIC_ASSERT(kSeqStringTag == 0);
4789  __ test(result_, Immediate(kStringRepresentationMask));
4790  __ j(not_zero, &call_runtime_);
4791
4792  // Check for 1-byte or 2-byte string.
4793  __ bind(&flat_string);
4794  STATIC_ASSERT(kAsciiStringTag != 0);
4795  __ test(result_, Immediate(kStringEncodingMask));
4796  __ j(not_zero, &ascii_string);
4797
4798  // 2-byte string.
4799  // Load the 2-byte character code into the result register.
4800  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
4801  __ movzx_w(result_, FieldOperand(object_,
4802                                   scratch_, times_1,  // Scratch is smi-tagged.
4803                                   SeqTwoByteString::kHeaderSize));
4804  __ jmp(&got_char_code);
4805
4806  // ASCII string.
4807  // Load the byte into the result register.
4808  __ bind(&ascii_string);
4809  __ SmiUntag(scratch_);
4810  __ movzx_b(result_, FieldOperand(object_,
4811                                   scratch_, times_1,
4812                                   SeqAsciiString::kHeaderSize));
4813  __ bind(&got_char_code);
4814  __ SmiTag(result_);
4815  __ bind(&exit_);
4816}
4817
4818
4819void StringCharCodeAtGenerator::GenerateSlow(
4820    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
4821  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
4822
4823  // Index is not a smi.
4824  __ bind(&index_not_smi_);
4825  // If index is a heap number, try converting it to an integer.
4826  __ CheckMap(index_,
4827              masm->isolate()->factory()->heap_number_map(),
4828              index_not_number_,
4829              DONT_DO_SMI_CHECK);
4830  call_helper.BeforeCall(masm);
4831  __ push(object_);
4832  __ push(index_);
4833  __ push(index_);  // Consumed by runtime conversion function.
4834  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
4835    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
4836  } else {
4837    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
4838    // NumberToSmi discards numbers that are not exact integers.
4839    __ CallRuntime(Runtime::kNumberToSmi, 1);
4840  }
4841  if (!scratch_.is(eax)) {
4842    // Save the conversion result before the pop instructions below
4843    // have a chance to overwrite it.
4844    __ mov(scratch_, eax);
4845  }
4846  __ pop(index_);
4847  __ pop(object_);
4848  // Reload the instance type.
4849  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
4850  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4851  call_helper.AfterCall(masm);
4852  // If index is still not a smi, it must be out of range.
4853  STATIC_ASSERT(kSmiTag == 0);
4854  __ JumpIfNotSmi(scratch_, index_out_of_range_);
4855  // Otherwise, return to the fast path.
4856  __ jmp(&got_smi_index_);
4857
4858  // Call runtime. We get here when the receiver is a string and the
4859  // index is a number, but the code of getting the actual character
4860  // is too complex (e.g., when the string needs to be flattened).
4861  __ bind(&call_runtime_);
4862  call_helper.BeforeCall(masm);
4863  __ push(object_);
4864  __ push(index_);
4865  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
4866  if (!result_.is(eax)) {
4867    __ mov(result_, eax);
4868  }
4869  call_helper.AfterCall(masm);
4870  __ jmp(&exit_);
4871
4872  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
4873}
4874
4875
4876// -------------------------------------------------------------------------
4877// StringCharFromCodeGenerator
4878
4879void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
4880  // Fast case of Heap::LookupSingleCharacterStringFromCode.
4881  STATIC_ASSERT(kSmiTag == 0);
4882  STATIC_ASSERT(kSmiShiftSize == 0);
4883  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
4884  __ test(code_,
4885          Immediate(kSmiTagMask |
4886                    ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
4887  __ j(not_zero, &slow_case_);
4888
4889  Factory* factory = masm->isolate()->factory();
4890  __ Set(result_, Immediate(factory->single_character_string_cache()));
4891  STATIC_ASSERT(kSmiTag == 0);
4892  STATIC_ASSERT(kSmiTagSize == 1);
4893  STATIC_ASSERT(kSmiShiftSize == 0);
4894  // At this point code register contains smi tagged ascii char code.
4895  __ mov(result_, FieldOperand(result_,
4896                               code_, times_half_pointer_size,
4897                               FixedArray::kHeaderSize));
4898  __ cmp(result_, factory->undefined_value());
4899  __ j(equal, &slow_case_);
4900  __ bind(&exit_);
4901}
4902
4903
4904void StringCharFromCodeGenerator::GenerateSlow(
4905    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
4906  __ Abort("Unexpected fallthrough to CharFromCode slow case");
4907
4908  __ bind(&slow_case_);
4909  call_helper.BeforeCall(masm);
4910  __ push(code_);
4911  __ CallRuntime(Runtime::kCharFromCode, 1);
4912  if (!result_.is(eax)) {
4913    __ mov(result_, eax);
4914  }
4915  call_helper.AfterCall(masm);
4916  __ jmp(&exit_);
4917
4918  __ Abort("Unexpected fallthrough from CharFromCode slow case");
4919}
4920
4921
4922// -------------------------------------------------------------------------
4923// StringCharAtGenerator
4924
4925void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
4926  char_code_at_generator_.GenerateFast(masm);
4927  char_from_code_generator_.GenerateFast(masm);
4928}
4929
4930
4931void StringCharAtGenerator::GenerateSlow(
4932    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
4933  char_code_at_generator_.GenerateSlow(masm, call_helper);
4934  char_from_code_generator_.GenerateSlow(masm, call_helper);
4935}
4936
4937
4938void StringAddStub::Generate(MacroAssembler* masm) {
4939  Label string_add_runtime, call_builtin;
4940  Builtins::JavaScript builtin_id = Builtins::ADD;
4941
4942  // Load the two arguments.
4943  __ mov(eax, Operand(esp, 2 * kPointerSize));  // First argument.
4944  __ mov(edx, Operand(esp, 1 * kPointerSize));  // Second argument.
4945
4946  // Make sure that both arguments are strings if not known in advance.
4947  if (flags_ == NO_STRING_ADD_FLAGS) {
4948    __ JumpIfSmi(eax, &string_add_runtime);
4949    __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
4950    __ j(above_equal, &string_add_runtime);
4951
4952    // First argument is a a string, test second.
4953    __ JumpIfSmi(edx, &string_add_runtime);
4954    __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
4955    __ j(above_equal, &string_add_runtime);
4956  } else {
4957    // Here at least one of the arguments is definitely a string.
4958    // We convert the one that is not known to be a string.
4959    if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
4960      ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
4961      GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
4962                              &call_builtin);
4963      builtin_id = Builtins::STRING_ADD_RIGHT;
4964    } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
4965      ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
4966      GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
4967                              &call_builtin);
4968      builtin_id = Builtins::STRING_ADD_LEFT;
4969    }
4970  }
4971
4972  // Both arguments are strings.
4973  // eax: first string
4974  // edx: second string
4975  // Check if either of the strings are empty. In that case return the other.
4976  Label second_not_zero_length, both_not_zero_length;
4977  __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
4978  STATIC_ASSERT(kSmiTag == 0);
4979  __ test(ecx, Operand(ecx));
4980  __ j(not_zero, &second_not_zero_length, Label::kNear);
4981  // Second string is empty, result is first string which is already in eax.
4982  Counters* counters = masm->isolate()->counters();
4983  __ IncrementCounter(counters->string_add_native(), 1);
4984  __ ret(2 * kPointerSize);
4985  __ bind(&second_not_zero_length);
4986  __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
4987  STATIC_ASSERT(kSmiTag == 0);
4988  __ test(ebx, Operand(ebx));
4989  __ j(not_zero, &both_not_zero_length, Label::kNear);
4990  // First string is empty, result is second string which is in edx.
4991  __ mov(eax, edx);
4992  __ IncrementCounter(counters->string_add_native(), 1);
4993  __ ret(2 * kPointerSize);
4994
4995  // Both strings are non-empty.
4996  // eax: first string
4997  // ebx: length of first string as a smi
4998  // ecx: length of second string as a smi
4999  // edx: second string
5000  // Look at the length of the result of adding the two strings.
5001  Label string_add_flat_result, longer_than_two;
5002  __ bind(&both_not_zero_length);
5003  __ add(ebx, Operand(ecx));
5004  STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
5005  // Handle exceptionally long strings in the runtime system.
5006  __ j(overflow, &string_add_runtime);
5007  // Use the symbol table when adding two one character strings, as it
5008  // helps later optimizations to return a symbol here.
5009  __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
5010  __ j(not_equal, &longer_than_two);
5011
5012  // Check that both strings are non-external ascii strings.
5013  __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
5014                                         &string_add_runtime);
5015
5016  // Get the two characters forming the new string.
5017  __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
5018  __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
5019
5020  // Try to lookup two character string in symbol table. If it is not found
5021  // just allocate a new one.
5022  Label make_two_character_string, make_two_character_string_no_reload;
5023  StringHelper::GenerateTwoCharacterSymbolTableProbe(
5024      masm, ebx, ecx, eax, edx, edi,
5025      &make_two_character_string_no_reload, &make_two_character_string);
5026  __ IncrementCounter(counters->string_add_native(), 1);
5027  __ ret(2 * kPointerSize);
5028
5029  // Allocate a two character string.
5030  __ bind(&make_two_character_string);
5031  // Reload the arguments.
5032  __ mov(eax, Operand(esp, 2 * kPointerSize));  // First argument.
5033  __ mov(edx, Operand(esp, 1 * kPointerSize));  // Second argument.
5034  // Get the two characters forming the new string.
5035  __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
5036  __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
5037  __ bind(&make_two_character_string_no_reload);
5038  __ IncrementCounter(counters->string_add_make_two_char(), 1);
5039  __ AllocateAsciiString(eax,  // Result.
5040                         2,    // Length.
5041                         edi,  // Scratch 1.
5042                         edx,  // Scratch 2.
5043                         &string_add_runtime);
5044  // Pack both characters in ebx.
5045  __ shl(ecx, kBitsPerByte);
5046  __ or_(ebx, Operand(ecx));
5047  // Set the characters in the new string.
5048  __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
5049  __ IncrementCounter(counters->string_add_native(), 1);
5050  __ ret(2 * kPointerSize);
5051
5052  __ bind(&longer_than_two);
5053  // Check if resulting string will be flat.
5054  __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
5055  __ j(below, &string_add_flat_result);
5056
5057  // If result is not supposed to be flat allocate a cons string object. If both
5058  // strings are ascii the result is an ascii cons string.
5059  Label non_ascii, allocated, ascii_data;
5060  __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
5061  __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
5062  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
5063  __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
5064  __ and_(ecx, Operand(edi));
5065  STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
5066  __ test(ecx, Immediate(kAsciiStringTag));
5067  __ j(zero, &non_ascii);
5068  __ bind(&ascii_data);
5069  // Allocate an acsii cons string.
5070  __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
5071  __ bind(&allocated);
5072  // Fill the fields of the cons string.
5073  if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
5074  __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
5075  __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
5076         Immediate(String::kEmptyHashField));
5077  __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
5078  __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
5079  __ mov(eax, ecx);
5080  __ IncrementCounter(counters->string_add_native(), 1);
5081  __ ret(2 * kPointerSize);
5082  __ bind(&non_ascii);
5083  // At least one of the strings is two-byte. Check whether it happens
5084  // to contain only ascii characters.
5085  // ecx: first instance type AND second instance type.
5086  // edi: second instance type.
5087  __ test(ecx, Immediate(kAsciiDataHintMask));
5088  __ j(not_zero, &ascii_data);
5089  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5090  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5091  __ xor_(edi, Operand(ecx));
5092  STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
5093  __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
5094  __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
5095  __ j(equal, &ascii_data);
5096  // Allocate a two byte cons string.
5097  __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
5098  __ jmp(&allocated);
5099
5100  // Handle creating a flat result. First check that both strings are not
5101  // external strings.
5102  // eax: first string
5103  // ebx: length of resulting flat string as a smi
5104  // edx: second string
5105  __ bind(&string_add_flat_result);
5106  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5107  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5108  __ and_(ecx, kStringRepresentationMask);
5109  __ cmp(ecx, kExternalStringTag);
5110  __ j(equal, &string_add_runtime);
5111  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
5112  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5113  __ and_(ecx, kStringRepresentationMask);
5114  __ cmp(ecx, kExternalStringTag);
5115  __ j(equal, &string_add_runtime);
5116  // Now check if both strings are ascii strings.
5117  // eax: first string
5118  // ebx: length of resulting flat string as a smi
5119  // edx: second string
5120  Label non_ascii_string_add_flat_result;
5121  STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
5122  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5123  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
5124  __ j(zero, &non_ascii_string_add_flat_result);
5125  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
5126  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
5127  __ j(zero, &string_add_runtime);
5128
5129  // Both strings are ascii strings.  As they are short they are both flat.
5130  // ebx: length of resulting flat string as a smi
5131  __ SmiUntag(ebx);
5132  __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
5133  // eax: result string
5134  __ mov(ecx, eax);
5135  // Locate first character of result.
5136  __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5137  // Load first argument and locate first character.
5138  __ mov(edx, Operand(esp, 2 * kPointerSize));
5139  __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5140  __ SmiUntag(edi);
5141  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5142  // eax: result string
5143  // ecx: first character of result
5144  // edx: first char of first argument
5145  // edi: length of first argument
5146  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
5147  // Load second argument and locate first character.
5148  __ mov(edx, Operand(esp, 1 * kPointerSize));
5149  __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5150  __ SmiUntag(edi);
5151  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5152  // eax: result string
5153  // ecx: next character of result
5154  // edx: first char of second argument
5155  // edi: length of second argument
5156  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
5157  __ IncrementCounter(counters->string_add_native(), 1);
5158  __ ret(2 * kPointerSize);
5159
5160  // Handle creating a flat two byte result.
5161  // eax: first string - known to be two byte
5162  // ebx: length of resulting flat string as a smi
5163  // edx: second string
5164  __ bind(&non_ascii_string_add_flat_result);
5165  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
5166  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
5167  __ j(not_zero, &string_add_runtime);
5168  // Both strings are two byte strings. As they are short they are both
5169  // flat.
5170  __ SmiUntag(ebx);
5171  __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
5172  // eax: result string
5173  __ mov(ecx, eax);
5174  // Locate first character of result.
5175  __ add(Operand(ecx),
5176         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5177  // Load first argument and locate first character.
5178  __ mov(edx, Operand(esp, 2 * kPointerSize));
5179  __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5180  __ SmiUntag(edi);
5181  __ add(Operand(edx),
5182         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5183  // eax: result string
5184  // ecx: first character of result
5185  // edx: first char of first argument
5186  // edi: length of first argument
5187  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
5188  // Load second argument and locate first character.
5189  __ mov(edx, Operand(esp, 1 * kPointerSize));
5190  __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5191  __ SmiUntag(edi);
5192  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5193  // eax: result string
5194  // ecx: next character of result
5195  // edx: first char of second argument
5196  // edi: length of second argument
5197  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
5198  __ IncrementCounter(counters->string_add_native(), 1);
5199  __ ret(2 * kPointerSize);
5200
5201  // Just jump to runtime to add the two strings.
5202  __ bind(&string_add_runtime);
5203  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
5204
5205  if (call_builtin.is_linked()) {
5206    __ bind(&call_builtin);
5207    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
5208  }
5209}
5210
5211
5212void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
5213                                            int stack_offset,
5214                                            Register arg,
5215                                            Register scratch1,
5216                                            Register scratch2,
5217                                            Register scratch3,
5218                                            Label* slow) {
5219  // First check if the argument is already a string.
5220  Label not_string, done;
5221  __ JumpIfSmi(arg, &not_string);
5222  __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
5223  __ j(below, &done);
5224
5225  // Check the number to string cache.
5226  Label not_cached;
5227  __ bind(&not_string);
5228  // Puts the cached result into scratch1.
5229  NumberToStringStub::GenerateLookupNumberStringCache(masm,
5230                                                      arg,
5231                                                      scratch1,
5232                                                      scratch2,
5233                                                      scratch3,
5234                                                      false,
5235                                                      &not_cached);
5236  __ mov(arg, scratch1);
5237  __ mov(Operand(esp, stack_offset), arg);
5238  __ jmp(&done);
5239
5240  // Check if the argument is a safe string wrapper.
5241  __ bind(&not_cached);
5242  __ JumpIfSmi(arg, slow);
5243  __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1);  // map -> scratch1.
5244  __ j(not_equal, slow);
5245  __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
5246            1 << Map::kStringWrapperSafeForDefaultValueOf);
5247  __ j(zero, slow);
5248  __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
5249  __ mov(Operand(esp, stack_offset), arg);
5250
5251  __ bind(&done);
5252}
5253
5254
5255void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5256                                          Register dest,
5257                                          Register src,
5258                                          Register count,
5259                                          Register scratch,
5260                                          bool ascii) {
5261  Label loop;
5262  __ bind(&loop);
5263  // This loop just copies one character at a time, as it is only used for very
5264  // short strings.
5265  if (ascii) {
5266    __ mov_b(scratch, Operand(src, 0));
5267    __ mov_b(Operand(dest, 0), scratch);
5268    __ add(Operand(src), Immediate(1));
5269    __ add(Operand(dest), Immediate(1));
5270  } else {
5271    __ mov_w(scratch, Operand(src, 0));
5272    __ mov_w(Operand(dest, 0), scratch);
5273    __ add(Operand(src), Immediate(2));
5274    __ add(Operand(dest), Immediate(2));
5275  }
5276  __ sub(Operand(count), Immediate(1));
5277  __ j(not_zero, &loop);
5278}
5279
5280
5281void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
5282                                             Register dest,
5283                                             Register src,
5284                                             Register count,
5285                                             Register scratch,
5286                                             bool ascii) {
5287  // Copy characters using rep movs of doublewords.
5288  // The destination is aligned on a 4 byte boundary because we are
5289  // copying to the beginning of a newly allocated string.
5290  ASSERT(dest.is(edi));  // rep movs destination
5291  ASSERT(src.is(esi));  // rep movs source
5292  ASSERT(count.is(ecx));  // rep movs count
5293  ASSERT(!scratch.is(dest));
5294  ASSERT(!scratch.is(src));
5295  ASSERT(!scratch.is(count));
5296
5297  // Nothing to do for zero characters.
5298  Label done;
5299  __ test(count, Operand(count));
5300  __ j(zero, &done);
5301
5302  // Make count the number of bytes to copy.
5303  if (!ascii) {
5304    __ shl(count, 1);
5305  }
5306
5307  // Don't enter the rep movs if there are less than 4 bytes to copy.
5308  Label last_bytes;
5309  __ test(count, Immediate(~3));
5310  __ j(zero, &last_bytes, Label::kNear);
5311
5312  // Copy from edi to esi using rep movs instruction.
5313  __ mov(scratch, count);
5314  __ sar(count, 2);  // Number of doublewords to copy.
5315  __ cld();
5316  __ rep_movs();
5317
5318  // Find number of bytes left.
5319  __ mov(count, scratch);
5320  __ and_(count, 3);
5321
5322  // Check if there are more bytes to copy.
5323  __ bind(&last_bytes);
5324  __ test(count, Operand(count));
5325  __ j(zero, &done);
5326
5327  // Copy remaining characters.
5328  Label loop;
5329  __ bind(&loop);
5330  __ mov_b(scratch, Operand(src, 0));
5331  __ mov_b(Operand(dest, 0), scratch);
5332  __ add(Operand(src), Immediate(1));
5333  __ add(Operand(dest), Immediate(1));
5334  __ sub(Operand(count), Immediate(1));
5335  __ j(not_zero, &loop);
5336
5337  __ bind(&done);
5338}
5339
5340
5341void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5342                                                        Register c1,
5343                                                        Register c2,
5344                                                        Register scratch1,
5345                                                        Register scratch2,
5346                                                        Register scratch3,
5347                                                        Label* not_probed,
5348                                                        Label* not_found) {
5349  // Register scratch3 is the general scratch register in this function.
5350  Register scratch = scratch3;
5351
5352  // Make sure that both characters are not digits as such strings has a
5353  // different hash algorithm. Don't try to look for these in the symbol table.
5354  Label not_array_index;
5355  __ mov(scratch, c1);
5356  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
5357  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
5358  __ j(above, &not_array_index, Label::kNear);
5359  __ mov(scratch, c2);
5360  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
5361  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
5362  __ j(below_equal, not_probed);
5363
5364  __ bind(&not_array_index);
5365  // Calculate the two character string hash.
5366  Register hash = scratch1;
5367  GenerateHashInit(masm, hash, c1, scratch);
5368  GenerateHashAddCharacter(masm, hash, c2, scratch);
5369  GenerateHashGetHash(masm, hash, scratch);
5370
5371  // Collect the two characters in a register.
5372  Register chars = c1;
5373  __ shl(c2, kBitsPerByte);
5374  __ or_(chars, Operand(c2));
5375
5376  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5377  // hash:  hash of two character string.
5378
5379  // Load the symbol table.
5380  Register symbol_table = c2;
5381  ExternalReference roots_address =
5382      ExternalReference::roots_address(masm->isolate());
5383  __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
5384  __ mov(symbol_table,
5385         Operand::StaticArray(scratch, times_pointer_size, roots_address));
5386
5387  // Calculate capacity mask from the symbol table capacity.
5388  Register mask = scratch2;
5389  __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
5390  __ SmiUntag(mask);
5391  __ sub(Operand(mask), Immediate(1));
5392
5393  // Registers
5394  // chars:        two character string, char 1 in byte 0 and char 2 in byte 1.
5395  // hash:         hash of two character string
5396  // symbol_table: symbol table
5397  // mask:         capacity mask
5398  // scratch:      -
5399
5400  // Perform a number of probes in the symbol table.
5401  static const int kProbes = 4;
5402  Label found_in_symbol_table;
5403  Label next_probe[kProbes], next_probe_pop_mask[kProbes];
5404  for (int i = 0; i < kProbes; i++) {
5405    // Calculate entry in symbol table.
5406    __ mov(scratch, hash);
5407    if (i > 0) {
5408      __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
5409    }
5410    __ and_(scratch, Operand(mask));
5411
5412    // Load the entry from the symbol table.
5413    Register candidate = scratch;  // Scratch register contains candidate.
5414    STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5415    __ mov(candidate,
5416           FieldOperand(symbol_table,
5417                        scratch,
5418                        times_pointer_size,
5419                        SymbolTable::kElementsStartOffset));
5420
5421    // If entry is undefined no string with this hash can be found.
5422    Factory* factory = masm->isolate()->factory();
5423    __ cmp(candidate, factory->undefined_value());
5424    __ j(equal, not_found);
5425    __ cmp(candidate, factory->null_value());
5426    __ j(equal, &next_probe[i]);
5427
5428    // If length is not 2 the string is not a candidate.
5429    __ cmp(FieldOperand(candidate, String::kLengthOffset),
5430           Immediate(Smi::FromInt(2)));
5431    __ j(not_equal, &next_probe[i]);
5432
5433    // As we are out of registers save the mask on the stack and use that
5434    // register as a temporary.
5435    __ push(mask);
5436    Register temp = mask;
5437
5438    // Check that the candidate is a non-external ascii string.
5439    __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
5440    __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
5441    __ JumpIfInstanceTypeIsNotSequentialAscii(
5442        temp, temp, &next_probe_pop_mask[i]);
5443
5444    // Check if the two characters match.
5445    __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
5446    __ and_(temp, 0x0000ffff);
5447    __ cmp(chars, Operand(temp));
5448    __ j(equal, &found_in_symbol_table);
5449    __ bind(&next_probe_pop_mask[i]);
5450    __ pop(mask);
5451    __ bind(&next_probe[i]);
5452  }
5453
5454  // No matching 2 character string found by probing.
5455  __ jmp(not_found);
5456
5457  // Scratch register contains result when we fall through to here.
5458  Register result = scratch;
5459  __ bind(&found_in_symbol_table);
5460  __ pop(mask);  // Pop saved mask from the stack.
5461  if (!result.is(eax)) {
5462    __ mov(eax, result);
5463  }
5464}
5465
5466
5467void StringHelper::GenerateHashInit(MacroAssembler* masm,
5468                                    Register hash,
5469                                    Register character,
5470                                    Register scratch) {
5471  // hash = character + (character << 10);
5472  __ mov(hash, character);
5473  __ shl(hash, 10);
5474  __ add(hash, Operand(character));
5475  // hash ^= hash >> 6;
5476  __ mov(scratch, hash);
5477  __ sar(scratch, 6);
5478  __ xor_(hash, Operand(scratch));
5479}
5480
5481
5482void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5483                                            Register hash,
5484                                            Register character,
5485                                            Register scratch) {
5486  // hash += character;
5487  __ add(hash, Operand(character));
5488  // hash += hash << 10;
5489  __ mov(scratch, hash);
5490  __ shl(scratch, 10);
5491  __ add(hash, Operand(scratch));
5492  // hash ^= hash >> 6;
5493  __ mov(scratch, hash);
5494  __ sar(scratch, 6);
5495  __ xor_(hash, Operand(scratch));
5496}
5497
5498
5499void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5500                                       Register hash,
5501                                       Register scratch) {
5502  // hash += hash << 3;
5503  __ mov(scratch, hash);
5504  __ shl(scratch, 3);
5505  __ add(hash, Operand(scratch));
5506  // hash ^= hash >> 11;
5507  __ mov(scratch, hash);
5508  __ sar(scratch, 11);
5509  __ xor_(hash, Operand(scratch));
5510  // hash += hash << 15;
5511  __ mov(scratch, hash);
5512  __ shl(scratch, 15);
5513  __ add(hash, Operand(scratch));
5514
5515  // if (hash == 0) hash = 27;
5516  Label hash_not_zero;
5517  __ test(hash, Operand(hash));
5518  __ j(not_zero, &hash_not_zero, Label::kNear);
5519  __ mov(hash, Immediate(27));
5520  __ bind(&hash_not_zero);
5521}
5522
5523
5524void SubStringStub::Generate(MacroAssembler* masm) {
5525  Label runtime;
5526
5527  // Stack frame on entry.
5528  //  esp[0]: return address
5529  //  esp[4]: to
5530  //  esp[8]: from
5531  //  esp[12]: string
5532
5533  // Make sure first argument is a string.
5534  __ mov(eax, Operand(esp, 3 * kPointerSize));
5535  STATIC_ASSERT(kSmiTag == 0);
5536  __ JumpIfSmi(eax, &runtime);
5537  Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
5538  __ j(NegateCondition(is_string), &runtime);
5539
5540  // eax: string
5541  // ebx: instance type
5542
5543  // Calculate length of sub string using the smi values.
5544  Label result_longer_than_two;
5545  __ mov(ecx, Operand(esp, 1 * kPointerSize));  // To index.
5546  __ JumpIfNotSmi(ecx, &runtime);
5547  __ mov(edx, Operand(esp, 2 * kPointerSize));  // From index.
5548  __ JumpIfNotSmi(edx, &runtime);
5549  __ sub(ecx, Operand(edx));
5550  __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
5551  Label return_eax;
5552  __ j(equal, &return_eax);
5553  // Special handling of sub-strings of length 1 and 2. One character strings
5554  // are handled in the runtime system (looked up in the single character
5555  // cache). Two character strings are looked for in the symbol cache.
5556  __ SmiUntag(ecx);  // Result length is no longer smi.
5557  __ cmp(ecx, 2);
5558  __ j(greater, &result_longer_than_two);
5559  __ j(less, &runtime);
5560
5561  // Sub string of length 2 requested.
5562  // eax: string
5563  // ebx: instance type
5564  // ecx: sub string length (value is 2)
5565  // edx: from index (smi)
5566  __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
5567
5568  // Get the two characters forming the sub string.
5569  __ SmiUntag(edx);  // From index is no longer smi.
5570  __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
5571  __ movzx_b(ecx,
5572             FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
5573
5574  // Try to lookup two character string in symbol table.
5575  Label make_two_character_string;
5576  StringHelper::GenerateTwoCharacterSymbolTableProbe(
5577      masm, ebx, ecx, eax, edx, edi,
5578      &make_two_character_string, &make_two_character_string);
5579  __ ret(3 * kPointerSize);
5580
5581  __ bind(&make_two_character_string);
5582  // Setup registers for allocating the two character string.
5583  __ mov(eax, Operand(esp, 3 * kPointerSize));
5584  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
5585  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
5586  __ Set(ecx, Immediate(2));
5587
5588  __ bind(&result_longer_than_two);
5589  // eax: string
5590  // ebx: instance type
5591  // ecx: result string length
5592  // Check for flat ascii string
5593  Label non_ascii_flat;
5594  __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
5595
5596  // Allocate the result.
5597  __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
5598
5599  // eax: result string
5600  // ecx: result string length
5601  __ mov(edx, esi);  // esi used by following code.
5602  // Locate first character of result.
5603  __ mov(edi, eax);
5604  __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5605  // Load string argument and locate character of sub string start.
5606  __ mov(esi, Operand(esp, 3 * kPointerSize));
5607  __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5608  __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
5609  __ SmiUntag(ebx);
5610  __ add(esi, Operand(ebx));
5611
5612  // eax: result string
5613  // ecx: result length
5614  // edx: original value of esi
5615  // edi: first character of result
5616  // esi: character of sub string start
5617  StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
5618  __ mov(esi, edx);  // Restore esi.
5619  Counters* counters = masm->isolate()->counters();
5620  __ IncrementCounter(counters->sub_string_native(), 1);
5621  __ ret(3 * kPointerSize);
5622
5623  __ bind(&non_ascii_flat);
5624  // eax: string
5625  // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
5626  // ecx: result string length
5627  // Check for flat two byte string
5628  __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
5629  __ j(not_equal, &runtime);
5630
5631  // Allocate the result.
5632  __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
5633
5634  // eax: result string
5635  // ecx: result string length
5636  __ mov(edx, esi);  // esi used by following code.
5637  // Locate first character of result.
5638  __ mov(edi, eax);
5639  __ add(Operand(edi),
5640         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5641  // Load string argument and locate character of sub string start.
5642  __ mov(esi, Operand(esp, 3 * kPointerSize));
5643  __ add(Operand(esi),
5644         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5645  __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
5646  // As from is a smi it is 2 times the value which matches the size of a two
5647  // byte character.
5648  STATIC_ASSERT(kSmiTag == 0);
5649  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5650  __ add(esi, Operand(ebx));
5651
5652  // eax: result string
5653  // ecx: result length
5654  // edx: original value of esi
5655  // edi: first character of result
5656  // esi: character of sub string start
5657  StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
5658  __ mov(esi, edx);  // Restore esi.
5659
5660  __ bind(&return_eax);
5661  __ IncrementCounter(counters->sub_string_native(), 1);
5662  __ ret(3 * kPointerSize);
5663
5664  // Just jump to runtime to create the sub string.
5665  __ bind(&runtime);
5666  __ TailCallRuntime(Runtime::kSubString, 3, 1);
5667}
5668
5669
5670void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
5671                                                      Register left,
5672                                                      Register right,
5673                                                      Register scratch1,
5674                                                      Register scratch2) {
5675  Register length = scratch1;
5676
5677  // Compare lengths.
5678  Label strings_not_equal, check_zero_length;
5679  __ mov(length, FieldOperand(left, String::kLengthOffset));
5680  __ cmp(length, FieldOperand(right, String::kLengthOffset));
5681  __ j(equal, &check_zero_length, Label::kNear);
5682  __ bind(&strings_not_equal);
5683  __ Set(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
5684  __ ret(0);
5685
5686  // Check if the length is zero.
5687  Label compare_chars;
5688  __ bind(&check_zero_length);
5689  STATIC_ASSERT(kSmiTag == 0);
5690  __ test(length, Operand(length));
5691  __ j(not_zero, &compare_chars, Label::kNear);
5692  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
5693  __ ret(0);
5694
5695  // Compare characters.
5696  __ bind(&compare_chars);
5697  GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
5698                                &strings_not_equal, Label::kNear);
5699
5700  // Characters are equal.
5701  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
5702  __ ret(0);
5703}
5704
5705
5706void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
5707                                                        Register left,
5708                                                        Register right,
5709                                                        Register scratch1,
5710                                                        Register scratch2,
5711                                                        Register scratch3) {
5712  Counters* counters = masm->isolate()->counters();
5713  __ IncrementCounter(counters->string_compare_native(), 1);
5714
5715  // Find minimum length.
5716  Label left_shorter;
5717  __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
5718  __ mov(scratch3, scratch1);
5719  __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
5720
5721  Register length_delta = scratch3;
5722
5723  __ j(less_equal, &left_shorter, Label::kNear);
5724  // Right string is shorter. Change scratch1 to be length of right string.
5725  __ sub(scratch1, Operand(length_delta));
5726  __ bind(&left_shorter);
5727
5728  Register min_length = scratch1;
5729
5730  // If either length is zero, just compare lengths.
5731  Label compare_lengths;
5732  __ test(min_length, Operand(min_length));
5733  __ j(zero, &compare_lengths, Label::kNear);
5734
5735  // Compare characters.
5736  Label result_not_equal;
5737  GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
5738                                &result_not_equal, Label::kNear);
5739
5740  // Compare lengths -  strings up to min-length are equal.
5741  __ bind(&compare_lengths);
5742  __ test(length_delta, Operand(length_delta));
5743  __ j(not_zero, &result_not_equal, Label::kNear);
5744
5745  // Result is EQUAL.
5746  STATIC_ASSERT(EQUAL == 0);
5747  STATIC_ASSERT(kSmiTag == 0);
5748  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
5749  __ ret(0);
5750
5751  Label result_greater;
5752  __ bind(&result_not_equal);
5753  __ j(greater, &result_greater, Label::kNear);
5754
5755  // Result is LESS.
5756  __ Set(eax, Immediate(Smi::FromInt(LESS)));
5757  __ ret(0);
5758
5759  // Result is GREATER.
5760  __ bind(&result_greater);
5761  __ Set(eax, Immediate(Smi::FromInt(GREATER)));
5762  __ ret(0);
5763}
5764
5765
5766void StringCompareStub::GenerateAsciiCharsCompareLoop(
5767    MacroAssembler* masm,
5768    Register left,
5769    Register right,
5770    Register length,
5771    Register scratch,
5772    Label* chars_not_equal,
5773    Label::Distance chars_not_equal_near) {
5774  // Change index to run from -length to -1 by adding length to string
5775  // start. This means that loop ends when index reaches zero, which
5776  // doesn't need an additional compare.
5777  __ SmiUntag(length);
5778  __ lea(left,
5779         FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
5780  __ lea(right,
5781         FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
5782  __ neg(length);
5783  Register index = length;  // index = -length;
5784
5785  // Compare loop.
5786  Label loop;
5787  __ bind(&loop);
5788  __ mov_b(scratch, Operand(left, index, times_1, 0));
5789  __ cmpb(scratch, Operand(right, index, times_1, 0));
5790  __ j(not_equal, chars_not_equal, chars_not_equal_near);
5791  __ add(Operand(index), Immediate(1));
5792  __ j(not_zero, &loop);
5793}
5794
5795
5796void StringCompareStub::Generate(MacroAssembler* masm) {
5797  Label runtime;
5798
5799  // Stack frame on entry.
5800  //  esp[0]: return address
5801  //  esp[4]: right string
5802  //  esp[8]: left string
5803
5804  __ mov(edx, Operand(esp, 2 * kPointerSize));  // left
5805  __ mov(eax, Operand(esp, 1 * kPointerSize));  // right
5806
5807  Label not_same;
5808  __ cmp(edx, Operand(eax));
5809  __ j(not_equal, &not_same, Label::kNear);
5810  STATIC_ASSERT(EQUAL == 0);
5811  STATIC_ASSERT(kSmiTag == 0);
5812  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
5813  __ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1);
5814  __ ret(2 * kPointerSize);
5815
5816  __ bind(&not_same);
5817
5818  // Check that both objects are sequential ascii strings.
5819  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
5820
5821  // Compare flat ascii strings.
5822  // Drop arguments from the stack.
5823  __ pop(ecx);
5824  __ add(Operand(esp), Immediate(2 * kPointerSize));
5825  __ push(ecx);
5826  GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
5827
5828  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
5829  // tagged as a small integer.
5830  __ bind(&runtime);
5831  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
5832}
5833
5834
5835void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
5836  ASSERT(state_ == CompareIC::SMIS);
5837  Label miss;
5838  __ mov(ecx, Operand(edx));
5839  __ or_(ecx, Operand(eax));
5840  __ JumpIfNotSmi(ecx, &miss, Label::kNear);
5841
5842  if (GetCondition() == equal) {
5843    // For equality we do not care about the sign of the result.
5844    __ sub(eax, Operand(edx));
5845  } else {
5846    Label done;
5847    __ sub(edx, Operand(eax));
5848    __ j(no_overflow, &done, Label::kNear);
5849    // Correct sign of result in case of overflow.
5850    __ not_(edx);
5851    __ bind(&done);
5852    __ mov(eax, edx);
5853  }
5854  __ ret(0);
5855
5856  __ bind(&miss);
5857  GenerateMiss(masm);
5858}
5859
5860
5861void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
5862  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
5863
5864  Label generic_stub;
5865  Label unordered;
5866  Label miss;
5867  __ mov(ecx, Operand(edx));
5868  __ and_(ecx, Operand(eax));
5869  __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
5870
5871  __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
5872  __ j(not_equal, &miss, Label::kNear);
5873  __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
5874  __ j(not_equal, &miss, Label::kNear);
5875
5876  // Inlining the double comparison and falling back to the general compare
5877  // stub if NaN is involved or SS2 or CMOV is unsupported.
5878  if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
5879    CpuFeatures::Scope scope1(SSE2);
5880    CpuFeatures::Scope scope2(CMOV);
5881
5882    // Load left and right operand
5883    __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
5884    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
5885
5886    // Compare operands
5887    __ ucomisd(xmm0, xmm1);
5888
5889    // Don't base result on EFLAGS when a NaN is involved.
5890    __ j(parity_even, &unordered, Label::kNear);
5891
5892    // Return a result of -1, 0, or 1, based on EFLAGS.
5893    // Performing mov, because xor would destroy the flag register.
5894    __ mov(eax, 0);  // equal
5895    __ mov(ecx, Immediate(Smi::FromInt(1)));
5896    __ cmov(above, eax, Operand(ecx));
5897    __ mov(ecx, Immediate(Smi::FromInt(-1)));
5898    __ cmov(below, eax, Operand(ecx));
5899    __ ret(0);
5900
5901    __ bind(&unordered);
5902  }
5903
5904  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
5905  __ bind(&generic_stub);
5906  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
5907
5908  __ bind(&miss);
5909  GenerateMiss(masm);
5910}
5911
5912
5913void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
5914  ASSERT(state_ == CompareIC::SYMBOLS);
5915  ASSERT(GetCondition() == equal);
5916
5917  // Registers containing left and right operands respectively.
5918  Register left = edx;
5919  Register right = eax;
5920  Register tmp1 = ecx;
5921  Register tmp2 = ebx;
5922
5923  // Check that both operands are heap objects.
5924  Label miss;
5925  __ mov(tmp1, Operand(left));
5926  STATIC_ASSERT(kSmiTag == 0);
5927  __ and_(tmp1, Operand(right));
5928  __ JumpIfSmi(tmp1, &miss, Label::kNear);
5929
5930  // Check that both operands are symbols.
5931  __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
5932  __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
5933  __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
5934  __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
5935  STATIC_ASSERT(kSymbolTag != 0);
5936  __ and_(tmp1, Operand(tmp2));
5937  __ test(tmp1, Immediate(kIsSymbolMask));
5938  __ j(zero, &miss, Label::kNear);
5939
5940  // Symbols are compared by identity.
5941  Label done;
5942  __ cmp(left, Operand(right));
5943  // Make sure eax is non-zero. At this point input operands are
5944  // guaranteed to be non-zero.
5945  ASSERT(right.is(eax));
5946  __ j(not_equal, &done, Label::kNear);
5947  STATIC_ASSERT(EQUAL == 0);
5948  STATIC_ASSERT(kSmiTag == 0);
5949  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
5950  __ bind(&done);
5951  __ ret(0);
5952
5953  __ bind(&miss);
5954  GenerateMiss(masm);
5955}
5956
5957
5958void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
5959  ASSERT(state_ == CompareIC::STRINGS);
5960  ASSERT(GetCondition() == equal);
5961  Label miss;
5962
5963  // Registers containing left and right operands respectively.
5964  Register left = edx;
5965  Register right = eax;
5966  Register tmp1 = ecx;
5967  Register tmp2 = ebx;
5968  Register tmp3 = edi;
5969
5970  // Check that both operands are heap objects.
5971  __ mov(tmp1, Operand(left));
5972  STATIC_ASSERT(kSmiTag == 0);
5973  __ and_(tmp1, Operand(right));
5974  __ JumpIfSmi(tmp1, &miss);
5975
5976  // Check that both operands are strings. This leaves the instance
5977  // types loaded in tmp1 and tmp2.
5978  __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
5979  __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
5980  __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
5981  __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
5982  __ mov(tmp3, tmp1);
5983  STATIC_ASSERT(kNotStringTag != 0);
5984  __ or_(tmp3, Operand(tmp2));
5985  __ test(tmp3, Immediate(kIsNotStringMask));
5986  __ j(not_zero, &miss);
5987
5988  // Fast check for identical strings.
5989  Label not_same;
5990  __ cmp(left, Operand(right));
5991  __ j(not_equal, &not_same, Label::kNear);
5992  STATIC_ASSERT(EQUAL == 0);
5993  STATIC_ASSERT(kSmiTag == 0);
5994  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
5995  __ ret(0);
5996
5997  // Handle not identical strings.
5998  __ bind(&not_same);
5999
6000  // Check that both strings are symbols. If they are, we're done
6001  // because we already know they are not identical.
6002  Label do_compare;
6003  STATIC_ASSERT(kSymbolTag != 0);
6004  __ and_(tmp1, Operand(tmp2));
6005  __ test(tmp1, Immediate(kIsSymbolMask));
6006  __ j(zero, &do_compare, Label::kNear);
6007  // Make sure eax is non-zero. At this point input operands are
6008  // guaranteed to be non-zero.
6009  ASSERT(right.is(eax));
6010  __ ret(0);
6011
6012  // Check that both strings are sequential ASCII.
6013  Label runtime;
6014  __ bind(&do_compare);
6015  __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
6016
6017  // Compare flat ASCII strings. Returns when done.
6018  StringCompareStub::GenerateFlatAsciiStringEquals(
6019      masm, left, right, tmp1, tmp2);
6020
6021  // Handle more complex cases in runtime.
6022  __ bind(&runtime);
6023  __ pop(tmp1);  // Return address.
6024  __ push(left);
6025  __ push(right);
6026  __ push(tmp1);
6027  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6028
6029  __ bind(&miss);
6030  GenerateMiss(masm);
6031}
6032
6033
6034void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6035  ASSERT(state_ == CompareIC::OBJECTS);
6036  Label miss;
6037  __ mov(ecx, Operand(edx));
6038  __ and_(ecx, Operand(eax));
6039  __ JumpIfSmi(ecx, &miss, Label::kNear);
6040
6041  __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
6042  __ j(not_equal, &miss, Label::kNear);
6043  __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
6044  __ j(not_equal, &miss, Label::kNear);
6045
6046  ASSERT(GetCondition() == equal);
6047  __ sub(eax, Operand(edx));
6048  __ ret(0);
6049
6050  __ bind(&miss);
6051  GenerateMiss(masm);
6052}
6053
6054
6055void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6056  // Save the registers.
6057  __ pop(ecx);
6058  __ push(edx);
6059  __ push(eax);
6060  __ push(ecx);
6061
6062  // Call the runtime system in a fresh internal frame.
6063  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
6064                                             masm->isolate());
6065  __ EnterInternalFrame();
6066  __ push(edx);
6067  __ push(eax);
6068  __ push(Immediate(Smi::FromInt(op_)));
6069  __ CallExternalReference(miss, 3);
6070  __ LeaveInternalFrame();
6071
6072  // Compute the entry point of the rewritten stub.
6073  __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
6074
6075  // Restore registers.
6076  __ pop(ecx);
6077  __ pop(eax);
6078  __ pop(edx);
6079  __ push(ecx);
6080
6081  // Do a tail call to the rewritten stub.
6082  __ jmp(Operand(edi));
6083}
6084
6085
6086// Helper function used to check that the dictionary doesn't contain
6087// the property. This function may return false negatives, so miss_label
6088// must always call a backup property check that is complete.
6089// This function is safe to call if the receiver has fast properties.
6090// Name must be a symbol and receiver must be a heap object.
6091MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
6092    MacroAssembler* masm,
6093    Label* miss,
6094    Label* done,
6095    Register properties,
6096    String* name,
6097    Register r0) {
6098  ASSERT(name->IsSymbol());
6099
6100  // If names of slots in range from 1 to kProbes - 1 for the hash value are
6101  // not equal to the name and kProbes-th slot is not used (its name is the
6102  // undefined value), it guarantees the hash table doesn't contain the
6103  // property. It's true even if some slots represent deleted properties
6104  // (their names are the null value).
6105  for (int i = 0; i < kInlinedProbes; i++) {
6106    // Compute the masked index: (hash + i + i * i) & mask.
6107    Register index = r0;
6108    // Capacity is smi 2^n.
6109    __ mov(index, FieldOperand(properties, kCapacityOffset));
6110    __ dec(index);
6111    __ and_(Operand(index),
6112           Immediate(Smi::FromInt(name->Hash() +
6113                                   StringDictionary::GetProbeOffset(i))));
6114
6115    // Scale the index by multiplying by the entry size.
6116    ASSERT(StringDictionary::kEntrySize == 3);
6117    __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
6118    Register entity_name = r0;
6119    // Having undefined at this place means the name is not contained.
6120    ASSERT_EQ(kSmiTagSize, 1);
6121    __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
6122                                kElementsStartOffset - kHeapObjectTag));
6123    __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
6124    __ j(equal, done);
6125
6126    // Stop if found the property.
6127    __ cmp(entity_name, Handle<String>(name));
6128    __ j(equal, miss);
6129
6130    // Check if the entry name is not a symbol.
6131    __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
6132    __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
6133              kIsSymbolMask);
6134    __ j(zero, miss);
6135  }
6136
6137  StringDictionaryLookupStub stub(properties,
6138                                  r0,
6139                                  r0,
6140                                  StringDictionaryLookupStub::NEGATIVE_LOOKUP);
6141  __ push(Immediate(Handle<Object>(name)));
6142  __ push(Immediate(name->Hash()));
6143  MaybeObject* result = masm->TryCallStub(&stub);
6144  if (result->IsFailure()) return result;
6145  __ test(r0, Operand(r0));
6146  __ j(not_zero, miss);
6147  __ jmp(done);
6148  return result;
6149}
6150
6151
6152// Probe the string dictionary in the |elements| register. Jump to the
6153// |done| label if a property with the given name is found leaving the
6154// index into the dictionary in |r0|. Jump to the |miss| label
6155// otherwise.
6156void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6157                                                        Label* miss,
6158                                                        Label* done,
6159                                                        Register elements,
6160                                                        Register name,
6161                                                        Register r0,
6162                                                        Register r1) {
6163  // Assert that name contains a string.
6164  if (FLAG_debug_code) __ AbortIfNotString(name);
6165
6166  __ mov(r1, FieldOperand(elements, kCapacityOffset));
6167  __ shr(r1, kSmiTagSize);  // convert smi to int
6168  __ dec(r1);
6169
6170  // Generate an unrolled loop that performs a few probes before
6171  // giving up. Measurements done on Gmail indicate that 2 probes
6172  // cover ~93% of loads from dictionaries.
6173  for (int i = 0; i < kInlinedProbes; i++) {
6174    // Compute the masked index: (hash + i + i * i) & mask.
6175    __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
6176    __ shr(r0, String::kHashShift);
6177    if (i > 0) {
6178      __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
6179    }
6180    __ and_(r0, Operand(r1));
6181
6182    // Scale the index by multiplying by the entry size.
6183    ASSERT(StringDictionary::kEntrySize == 3);
6184    __ lea(r0, Operand(r0, r0, times_2, 0));  // r0 = r0 * 3
6185
6186    // Check if the key is identical to the name.
6187    __ cmp(name, Operand(elements,
6188                         r0,
6189                         times_4,
6190                         kElementsStartOffset - kHeapObjectTag));
6191    __ j(equal, done);
6192  }
6193
6194  StringDictionaryLookupStub stub(elements,
6195                                  r1,
6196                                  r0,
6197                                  POSITIVE_LOOKUP);
6198  __ push(name);
6199  __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
6200  __ shr(r0, String::kHashShift);
6201  __ push(r0);
6202  __ CallStub(&stub);
6203
6204  __ test(r1, Operand(r1));
6205  __ j(zero, miss);
6206  __ jmp(done);
6207}
6208
6209
6210void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
6211  // Stack frame on entry:
6212  //  esp[0 * kPointerSize]: return address.
6213  //  esp[1 * kPointerSize]: key's hash.
6214  //  esp[2 * kPointerSize]: key.
6215  // Registers:
6216  //  dictionary_: StringDictionary to probe.
6217  //  result_: used as scratch.
6218  //  index_: will hold an index of entry if lookup is successful.
6219  //          might alias with result_.
6220  // Returns:
6221  //  result_ is zero if lookup failed, non zero otherwise.
6222
6223  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
6224
6225  Register scratch = result_;
6226
6227  __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
6228  __ dec(scratch);
6229  __ SmiUntag(scratch);
6230  __ push(scratch);
6231
6232  // If names of slots in range from 1 to kProbes - 1 for the hash value are
6233  // not equal to the name and kProbes-th slot is not used (its name is the
6234  // undefined value), it guarantees the hash table doesn't contain the
6235  // property. It's true even if some slots represent deleted properties
6236  // (their names are the null value).
6237  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
6238    // Compute the masked index: (hash + i + i * i) & mask.
6239    __ mov(scratch, Operand(esp, 2 * kPointerSize));
6240    if (i > 0) {
6241      __ add(Operand(scratch),
6242             Immediate(StringDictionary::GetProbeOffset(i)));
6243    }
6244    __ and_(scratch, Operand(esp, 0));
6245
6246    // Scale the index by multiplying by the entry size.
6247    ASSERT(StringDictionary::kEntrySize == 3);
6248    __ lea(index_, Operand(scratch, scratch, times_2, 0));  // index *= 3.
6249
6250    // Having undefined at this place means the name is not contained.
6251    ASSERT_EQ(kSmiTagSize, 1);
6252    __ mov(scratch, Operand(dictionary_,
6253                            index_,
6254                            times_pointer_size,
6255                            kElementsStartOffset - kHeapObjectTag));
6256    __ cmp(scratch, masm->isolate()->factory()->undefined_value());
6257    __ j(equal, &not_in_dictionary);
6258
6259    // Stop if found the property.
6260    __ cmp(scratch, Operand(esp, 3 * kPointerSize));
6261    __ j(equal, &in_dictionary);
6262
6263    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
6264      // If we hit a non symbol key during negative lookup
6265      // we have to bailout as this key might be equal to the
6266      // key we are looking for.
6267
6268      // Check if the entry name is not a symbol.
6269      __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
6270      __ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
6271                kIsSymbolMask);
6272      __ j(zero, &maybe_in_dictionary);
6273    }
6274  }
6275
6276  __ bind(&maybe_in_dictionary);
6277  // If we are doing negative lookup then probing failure should be
6278  // treated as a lookup success. For positive lookup probing failure
6279  // should be treated as lookup failure.
6280  if (mode_ == POSITIVE_LOOKUP) {
6281    __ mov(result_, Immediate(0));
6282    __ Drop(1);
6283    __ ret(2 * kPointerSize);
6284  }
6285
6286  __ bind(&in_dictionary);
6287  __ mov(result_, Immediate(1));
6288  __ Drop(1);
6289  __ ret(2 * kPointerSize);
6290
6291  __ bind(&not_in_dictionary);
6292  __ mov(result_, Immediate(0));
6293  __ Drop(1);
6294  __ ret(2 * kPointerSize);
6295}
6296
6297
6298#undef __
6299
6300} }  // namespace v8::internal
6301
6302#endif  // V8_TARGET_ARCH_IA32
6303