code-stubs-ia32.cc revision 692be65d6b06edd9ff4cfc4c308555b7c99c1191
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_IA32)
31
32#include "bootstrapper.h"
33#include "code-stubs.h"
34#include "isolate.h"
35#include "jsregexp.h"
36#include "regexp-macro-assembler.h"
37
38namespace v8 {
39namespace internal {
40
41#define __ ACCESS_MASM(masm)
42
43void ToNumberStub::Generate(MacroAssembler* masm) {
44  // The ToNumber stub takes one argument in eax.
45  Label check_heap_number, call_builtin;
46  __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
47  __ ret(0);
48
49  __ bind(&check_heap_number);
50  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
51  Factory* factory = masm->isolate()->factory();
52  __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
53  __ j(not_equal, &call_builtin, Label::kNear);
54  __ ret(0);
55
56  __ bind(&call_builtin);
57  __ pop(ecx);  // Pop return address.
58  __ push(eax);
59  __ push(ecx);  // Push return address.
60  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
61}
62
63
64void FastNewClosureStub::Generate(MacroAssembler* masm) {
65  // Create a new closure from the given function info in new
66  // space. Set the context to the current context in esi.
67  Label gc;
68  __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
69
70  // Get the function info from the stack.
71  __ mov(edx, Operand(esp, 1 * kPointerSize));
72
73  int map_index = strict_mode_ == kStrictMode
74      ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
75      : Context::FUNCTION_MAP_INDEX;
76
77  // Compute the function map in the current global context and set that
78  // as the map of the allocated object.
79  __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
80  __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
81  __ mov(ecx, Operand(ecx, Context::SlotOffset(map_index)));
82  __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
83
84  // Initialize the rest of the function. We don't have to update the
85  // write barrier because the allocated object is in new space.
86  Factory* factory = masm->isolate()->factory();
87  __ mov(ebx, Immediate(factory->empty_fixed_array()));
88  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
89  __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
90  __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
91         Immediate(factory->the_hole_value()));
92  __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
93  __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
94  __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
95  __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
96         Immediate(factory->undefined_value()));
97
98  // Initialize the code pointer in the function to be the one
99  // found in the shared function info object.
100  __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
101  __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
102  __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
103
104  // Return and remove the on-stack parameter.
105  __ ret(1 * kPointerSize);
106
107  // Create a new closure through the slower runtime call.
108  __ bind(&gc);
109  __ pop(ecx);  // Temporarily remove return address.
110  __ pop(edx);
111  __ push(esi);
112  __ push(edx);
113  __ push(Immediate(factory->false_value()));
114  __ push(ecx);  // Restore return address.
115  __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
116}
117
118
119void FastNewContextStub::Generate(MacroAssembler* masm) {
120  // Try to allocate the context in new space.
121  Label gc;
122  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
123  __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
124                        eax, ebx, ecx, &gc, TAG_OBJECT);
125
126  // Get the function from the stack.
127  __ mov(ecx, Operand(esp, 1 * kPointerSize));
128
129  // Setup the object header.
130  Factory* factory = masm->isolate()->factory();
131  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
132         factory->function_context_map());
133  __ mov(FieldOperand(eax, Context::kLengthOffset),
134         Immediate(Smi::FromInt(length)));
135
136  // Setup the fixed slots.
137  __ Set(ebx, Immediate(0));  // Set to NULL.
138  __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
139  __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
140  __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
141
142  // Copy the global object from the previous context.
143  __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
144  __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
145
146  // Initialize the rest of the slots to undefined.
147  __ mov(ebx, factory->undefined_value());
148  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
149    __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
150  }
151
152  // Return and remove the on-stack parameter.
153  __ mov(esi, Operand(eax));
154  __ ret(1 * kPointerSize);
155
156  // Need to collect. Call into runtime system.
157  __ bind(&gc);
158  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
159}
160
161
162void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
163  // Stack layout on entry:
164  //
165  // [esp + kPointerSize]: constant elements.
166  // [esp + (2 * kPointerSize)]: literal index.
167  // [esp + (3 * kPointerSize)]: literals array.
168
169  // All sizes here are multiples of kPointerSize.
170  int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
171  int size = JSArray::kSize + elements_size;
172
173  // Load boilerplate object into ecx and check if we need to create a
174  // boilerplate.
175  Label slow_case;
176  __ mov(ecx, Operand(esp, 3 * kPointerSize));
177  __ mov(eax, Operand(esp, 2 * kPointerSize));
178  STATIC_ASSERT(kPointerSize == 4);
179  STATIC_ASSERT(kSmiTagSize == 1);
180  STATIC_ASSERT(kSmiTag == 0);
181  __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
182                           FixedArray::kHeaderSize));
183  Factory* factory = masm->isolate()->factory();
184  __ cmp(ecx, factory->undefined_value());
185  __ j(equal, &slow_case);
186
187  if (FLAG_debug_code) {
188    const char* message;
189    Handle<Map> expected_map;
190    if (mode_ == CLONE_ELEMENTS) {
191      message = "Expected (writable) fixed array";
192      expected_map = factory->fixed_array_map();
193    } else {
194      ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
195      message = "Expected copy-on-write fixed array";
196      expected_map = factory->fixed_cow_array_map();
197    }
198    __ push(ecx);
199    __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
200    __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
201    __ Assert(equal, message);
202    __ pop(ecx);
203  }
204
205  // Allocate both the JS array and the elements array in one big
206  // allocation. This avoids multiple limit checks.
207  __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
208
209  // Copy the JS array part.
210  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
211    if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
212      __ mov(ebx, FieldOperand(ecx, i));
213      __ mov(FieldOperand(eax, i), ebx);
214    }
215  }
216
217  if (length_ > 0) {
218    // Get hold of the elements array of the boilerplate and setup the
219    // elements pointer in the resulting object.
220    __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
221    __ lea(edx, Operand(eax, JSArray::kSize));
222    __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
223
224    // Copy the elements array.
225    for (int i = 0; i < elements_size; i += kPointerSize) {
226      __ mov(ebx, FieldOperand(ecx, i));
227      __ mov(FieldOperand(edx, i), ebx);
228    }
229  }
230
231  // Return and remove the on-stack parameters.
232  __ ret(3 * kPointerSize);
233
234  __ bind(&slow_case);
235  __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
236}
237
238
239// The stub expects its argument on the stack and returns its result in tos_:
240// zero for false, and a non-zero value for true.
241void ToBooleanStub::Generate(MacroAssembler* masm) {
242  Label patch;
243  Factory* factory = masm->isolate()->factory();
244  const Register argument = eax;
245  const Register map = edx;
246
247  if (!types_.IsEmpty()) {
248    __ mov(argument, Operand(esp, 1 * kPointerSize));
249  }
250
251  // undefined -> false
252  CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
253
254  // Boolean -> its value
255  CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
256  CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
257
258  // 'null' -> false.
259  CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
260
261  if (types_.Contains(SMI)) {
262    // Smis: 0 -> false, all other -> true
263    Label not_smi;
264    __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
265    // argument contains the correct return value already.
266    if (!tos_.is(argument)) {
267      __ mov(tos_, argument);
268    }
269    __ ret(1 * kPointerSize);
270    __ bind(&not_smi);
271  } else if (types_.NeedsMap()) {
272    // If we need a map later and have a Smi -> patch.
273    __ JumpIfSmi(argument, &patch, Label::kNear);
274  }
275
276  if (types_.NeedsMap()) {
277    __ mov(map, FieldOperand(argument, HeapObject::kMapOffset));
278
279    if (types_.CanBeUndetectable()) {
280      __ test_b(FieldOperand(map, Map::kBitFieldOffset),
281                1 << Map::kIsUndetectable);
282      // Undetectable -> false.
283      Label not_undetectable;
284      __ j(zero, &not_undetectable, Label::kNear);
285      __ Set(tos_, Immediate(0));
286      __ ret(1 * kPointerSize);
287      __ bind(&not_undetectable);
288    }
289  }
290
291  if (types_.Contains(SPEC_OBJECT)) {
292    // spec object -> true.
293    Label not_js_object;
294    __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
295    __ j(below, &not_js_object, Label::kNear);
296    // argument contains the correct return value already.
297    if (!tos_.is(argument)) {
298      __ Set(tos_, Immediate(1));
299    }
300    __ ret(1 * kPointerSize);
301    __ bind(&not_js_object);
302  }
303
304  if (types_.Contains(STRING)) {
305    // String value -> false iff empty.
306    Label not_string;
307    __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
308    __ j(above_equal, &not_string, Label::kNear);
309    __ mov(tos_, FieldOperand(argument, String::kLengthOffset));
310    __ ret(1 * kPointerSize);  // the string length is OK as the return value
311    __ bind(&not_string);
312  }
313
314  if (types_.Contains(HEAP_NUMBER)) {
315    // heap number -> false iff +0, -0, or NaN.
316    Label not_heap_number, false_result;
317    __ cmp(map, factory->heap_number_map());
318    __ j(not_equal, &not_heap_number, Label::kNear);
319    __ fldz();
320    __ fld_d(FieldOperand(argument, HeapNumber::kValueOffset));
321    __ FCmp();
322    __ j(zero, &false_result, Label::kNear);
323    // argument contains the correct return value already.
324    if (!tos_.is(argument)) {
325      __ Set(tos_, Immediate(1));
326    }
327    __ ret(1 * kPointerSize);
328    __ bind(&false_result);
329    __ Set(tos_, Immediate(0));
330    __ ret(1 * kPointerSize);
331    __ bind(&not_heap_number);
332  }
333
334  __ bind(&patch);
335  GenerateTypeTransition(masm);
336}
337
338
339void ToBooleanStub::CheckOddball(MacroAssembler* masm,
340                                 Type type,
341                                 Heap::RootListIndex value,
342                                 bool result) {
343  const Register argument = eax;
344  if (types_.Contains(type)) {
345    // If we see an expected oddball, return its ToBoolean value tos_.
346    Label different_value;
347    __ CompareRoot(argument, value);
348    __ j(not_equal, &different_value, Label::kNear);
349    if (!result) {
350      // If we have to return zero, there is no way around clearing tos_.
351      __ Set(tos_, Immediate(0));
352    } else if (!tos_.is(argument)) {
353      // If we have to return non-zero, we can re-use the argument if it is the
354      // same register as the result, because we never see Smi-zero here.
355      __ Set(tos_, Immediate(1));
356    }
357    __ ret(1 * kPointerSize);
358    __ bind(&different_value);
359  }
360}
361
362
363void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
364  __ pop(ecx);  // Get return address, operand is now on top of stack.
365  __ push(Immediate(Smi::FromInt(tos_.code())));
366  __ push(Immediate(Smi::FromInt(types_.ToByte())));
367  __ push(ecx);  // Push return address.
368  // Patch the caller to an appropriate specialized stub and return the
369  // operation result to the caller of the stub.
370  __ TailCallExternalReference(
371      ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
372      3,
373      1);
374}
375
376
377class FloatingPointHelper : public AllStatic {
378 public:
379  enum ArgLocation {
380    ARGS_ON_STACK,
381    ARGS_IN_REGISTERS
382  };
383
384  // Code pattern for loading a floating point value. Input value must
385  // be either a smi or a heap number object (fp value). Requirements:
386  // operand in register number. Returns operand as floating point number
387  // on FPU stack.
388  static void LoadFloatOperand(MacroAssembler* masm, Register number);
389
390  // Code pattern for loading floating point values. Input values must
391  // be either smi or heap number objects (fp values). Requirements:
392  // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
393  // Returns operands as floating point numbers on FPU stack.
394  static void LoadFloatOperands(MacroAssembler* masm,
395                                Register scratch,
396                                ArgLocation arg_location = ARGS_ON_STACK);
397
398  // Similar to LoadFloatOperand but assumes that both operands are smis.
399  // Expects operands in edx, eax.
400  static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
401
402  // Test if operands are smi or number objects (fp). Requirements:
403  // operand_1 in eax, operand_2 in edx; falls through on float
404  // operands, jumps to the non_float label otherwise.
405  static void CheckFloatOperands(MacroAssembler* masm,
406                                 Label* non_float,
407                                 Register scratch);
408
409  // Checks that the two floating point numbers on top of the FPU stack
410  // have int32 values.
411  static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
412                                         Label* non_int32);
413
414  // Takes the operands in edx and eax and loads them as integers in eax
415  // and ecx.
416  static void LoadUnknownsAsIntegers(MacroAssembler* masm,
417                                     bool use_sse3,
418                                     Label* operand_conversion_failure);
419
420  // Must only be called after LoadUnknownsAsIntegers.  Assumes that the
421  // operands are pushed on the stack, and that their conversions to int32
422  // are in eax and ecx.  Checks that the original numbers were in the int32
423  // range.
424  static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
425                                           bool use_sse3,
426                                           Label* not_int32);
427
428  // Assumes that operands are smis or heap numbers and loads them
429  // into xmm0 and xmm1. Operands are in edx and eax.
430  // Leaves operands unchanged.
431  static void LoadSSE2Operands(MacroAssembler* masm);
432
433  // Test if operands are numbers (smi or HeapNumber objects), and load
434  // them into xmm0 and xmm1 if they are.  Jump to label not_numbers if
435  // either operand is not a number.  Operands are in edx and eax.
436  // Leaves operands unchanged.
437  static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
438
439  // Similar to LoadSSE2Operands but assumes that both operands are smis.
440  // Expects operands in edx, eax.
441  static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
442
443  // Checks that the two floating point numbers loaded into xmm0 and xmm1
444  // have int32 values.
445  static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
446                                        Label* non_int32,
447                                        Register scratch);
448};
449
450
451// Get the integer part of a heap number.  Surprisingly, all this bit twiddling
452// is faster than using the built-in instructions on floating point registers.
453// Trashes edi and ebx.  Dest is ecx.  Source cannot be ecx or one of the
454// trashed registers.
455static void IntegerConvert(MacroAssembler* masm,
456                           Register source,
457                           bool use_sse3,
458                           Label* conversion_failure) {
459  ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
460  Label done, right_exponent, normal_exponent;
461  Register scratch = ebx;
462  Register scratch2 = edi;
463  // Get exponent word.
464  __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
465  // Get exponent alone in scratch2.
466  __ mov(scratch2, scratch);
467  __ and_(scratch2, HeapNumber::kExponentMask);
468  if (use_sse3) {
469    CpuFeatures::Scope scope(SSE3);
470    // Check whether the exponent is too big for a 64 bit signed integer.
471    static const uint32_t kTooBigExponent =
472        (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
473    __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
474    __ j(greater_equal, conversion_failure);
475    // Load x87 register with heap number.
476    __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
477    // Reserve space for 64 bit answer.
478    __ sub(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
479    // Do conversion, which cannot fail because we checked the exponent.
480    __ fisttp_d(Operand(esp, 0));
481    __ mov(ecx, Operand(esp, 0));  // Load low word of answer into ecx.
482    __ add(Operand(esp), Immediate(sizeof(uint64_t)));  // Nolint.
483  } else {
484    // Load ecx with zero.  We use this either for the final shift or
485    // for the answer.
486    __ xor_(ecx, Operand(ecx));
487    // Check whether the exponent matches a 32 bit signed int that cannot be
488    // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
489    // exponent is 30 (biased).  This is the exponent that we are fastest at and
490    // also the highest exponent we can handle here.
491    const uint32_t non_smi_exponent =
492        (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
493    __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
494    // If we have a match of the int32-but-not-Smi exponent then skip some
495    // logic.
496    __ j(equal, &right_exponent, Label::kNear);
497    // If the exponent is higher than that then go to slow case.  This catches
498    // numbers that don't fit in a signed int32, infinities and NaNs.
499    __ j(less, &normal_exponent, Label::kNear);
500
501    {
502      // Handle a big exponent.  The only reason we have this code is that the
503      // >>> operator has a tendency to generate numbers with an exponent of 31.
504      const uint32_t big_non_smi_exponent =
505          (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
506      __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
507      __ j(not_equal, conversion_failure);
508      // We have the big exponent, typically from >>>.  This means the number is
509      // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
510      __ mov(scratch2, scratch);
511      __ and_(scratch2, HeapNumber::kMantissaMask);
512      // Put back the implicit 1.
513      __ or_(scratch2, 1 << HeapNumber::kExponentShift);
514      // Shift up the mantissa bits to take up the space the exponent used to
515      // take. We just orred in the implicit bit so that took care of one and
516      // we want to use the full unsigned range so we subtract 1 bit from the
517      // shift distance.
518      const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
519      __ shl(scratch2, big_shift_distance);
520      // Get the second half of the double.
521      __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
522      // Shift down 21 bits to get the most significant 11 bits or the low
523      // mantissa word.
524      __ shr(ecx, 32 - big_shift_distance);
525      __ or_(ecx, Operand(scratch2));
526      // We have the answer in ecx, but we may need to negate it.
527      __ test(scratch, Operand(scratch));
528      __ j(positive, &done, Label::kNear);
529      __ neg(ecx);
530      __ jmp(&done, Label::kNear);
531    }
532
533    __ bind(&normal_exponent);
534    // Exponent word in scratch, exponent part of exponent word in scratch2.
535    // Zero in ecx.
536    // We know the exponent is smaller than 30 (biased).  If it is less than
537    // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
538    // it rounds to zero.
539    const uint32_t zero_exponent =
540        (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
541    __ sub(Operand(scratch2), Immediate(zero_exponent));
542    // ecx already has a Smi zero.
543    __ j(less, &done, Label::kNear);
544
545    // We have a shifted exponent between 0 and 30 in scratch2.
546    __ shr(scratch2, HeapNumber::kExponentShift);
547    __ mov(ecx, Immediate(30));
548    __ sub(ecx, Operand(scratch2));
549
550    __ bind(&right_exponent);
551    // Here ecx is the shift, scratch is the exponent word.
552    // Get the top bits of the mantissa.
553    __ and_(scratch, HeapNumber::kMantissaMask);
554    // Put back the implicit 1.
555    __ or_(scratch, 1 << HeapNumber::kExponentShift);
556    // Shift up the mantissa bits to take up the space the exponent used to
557    // take. We have kExponentShift + 1 significant bits int he low end of the
558    // word.  Shift them to the top bits.
559    const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
560    __ shl(scratch, shift_distance);
561    // Get the second half of the double. For some exponents we don't
562    // actually need this because the bits get shifted out again, but
563    // it's probably slower to test than just to do it.
564    __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
565    // Shift down 22 bits to get the most significant 10 bits or the low
566    // mantissa word.
567    __ shr(scratch2, 32 - shift_distance);
568    __ or_(scratch2, Operand(scratch));
569    // Move down according to the exponent.
570    __ shr_cl(scratch2);
571    // Now the unsigned answer is in scratch2.  We need to move it to ecx and
572    // we may need to fix the sign.
573    Label negative;
574    __ xor_(ecx, Operand(ecx));
575    __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
576    __ j(greater, &negative, Label::kNear);
577    __ mov(ecx, scratch2);
578    __ jmp(&done, Label::kNear);
579    __ bind(&negative);
580    __ sub(ecx, Operand(scratch2));
581    __ bind(&done);
582  }
583}
584
585
586void UnaryOpStub::PrintName(StringStream* stream) {
587  const char* op_name = Token::Name(op_);
588  const char* overwrite_name = NULL;  // Make g++ happy.
589  switch (mode_) {
590    case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
591    case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
592  }
593  stream->Add("UnaryOpStub_%s_%s_%s",
594              op_name,
595              overwrite_name,
596              UnaryOpIC::GetName(operand_type_));
597}
598
599
600// TODO(svenpanne): Use virtual functions instead of switch.
601void UnaryOpStub::Generate(MacroAssembler* masm) {
602  switch (operand_type_) {
603    case UnaryOpIC::UNINITIALIZED:
604      GenerateTypeTransition(masm);
605      break;
606    case UnaryOpIC::SMI:
607      GenerateSmiStub(masm);
608      break;
609    case UnaryOpIC::HEAP_NUMBER:
610      GenerateHeapNumberStub(masm);
611      break;
612    case UnaryOpIC::GENERIC:
613      GenerateGenericStub(masm);
614      break;
615  }
616}
617
618
619void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
620  __ pop(ecx);  // Save return address.
621
622  __ push(eax);  // the operand
623  __ push(Immediate(Smi::FromInt(op_)));
624  __ push(Immediate(Smi::FromInt(mode_)));
625  __ push(Immediate(Smi::FromInt(operand_type_)));
626
627  __ push(ecx);  // Push return address.
628
629  // Patch the caller to an appropriate specialized stub and return the
630  // operation result to the caller of the stub.
631  __ TailCallExternalReference(
632      ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
633}
634
635
636// TODO(svenpanne): Use virtual functions instead of switch.
637void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
638  switch (op_) {
639    case Token::SUB:
640      GenerateSmiStubSub(masm);
641      break;
642    case Token::BIT_NOT:
643      GenerateSmiStubBitNot(masm);
644      break;
645    default:
646      UNREACHABLE();
647  }
648}
649
650
651void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
652  Label non_smi, undo, slow;
653  GenerateSmiCodeSub(masm, &non_smi, &undo, &slow,
654                     Label::kNear, Label::kNear, Label::kNear);
655  __ bind(&undo);
656  GenerateSmiCodeUndo(masm);
657  __ bind(&non_smi);
658  __ bind(&slow);
659  GenerateTypeTransition(masm);
660}
661
662
663void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
664  Label non_smi;
665  GenerateSmiCodeBitNot(masm, &non_smi);
666  __ bind(&non_smi);
667  GenerateTypeTransition(masm);
668}
669
670
671void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
672                                     Label* non_smi,
673                                     Label* undo,
674                                     Label* slow,
675                                     Label::Distance non_smi_near,
676                                     Label::Distance undo_near,
677                                     Label::Distance slow_near) {
678  // Check whether the value is a smi.
679  __ JumpIfNotSmi(eax, non_smi, non_smi_near);
680
681  // We can't handle -0 with smis, so use a type transition for that case.
682  __ test(eax, Operand(eax));
683  __ j(zero, slow, slow_near);
684
685  // Try optimistic subtraction '0 - value', saving operand in eax for undo.
686  __ mov(edx, Operand(eax));
687  __ Set(eax, Immediate(0));
688  __ sub(eax, Operand(edx));
689  __ j(overflow, undo, undo_near);
690  __ ret(0);
691}
692
693
694void UnaryOpStub::GenerateSmiCodeBitNot(
695    MacroAssembler* masm,
696    Label* non_smi,
697    Label::Distance non_smi_near) {
698  // Check whether the value is a smi.
699  __ JumpIfNotSmi(eax, non_smi, non_smi_near);
700
701  // Flip bits and revert inverted smi-tag.
702  __ not_(eax);
703  __ and_(eax, ~kSmiTagMask);
704  __ ret(0);
705}
706
707
708void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
709  __ mov(eax, Operand(edx));
710}
711
712
713// TODO(svenpanne): Use virtual functions instead of switch.
714void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
715  switch (op_) {
716    case Token::SUB:
717      GenerateHeapNumberStubSub(masm);
718      break;
719    case Token::BIT_NOT:
720      GenerateHeapNumberStubBitNot(masm);
721      break;
722    default:
723      UNREACHABLE();
724  }
725}
726
727
728void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
729  Label non_smi, undo, slow, call_builtin;
730  GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear);
731  __ bind(&non_smi);
732  GenerateHeapNumberCodeSub(masm, &slow);
733  __ bind(&undo);
734  GenerateSmiCodeUndo(masm);
735  __ bind(&slow);
736  GenerateTypeTransition(masm);
737  __ bind(&call_builtin);
738  GenerateGenericCodeFallback(masm);
739}
740
741
742void UnaryOpStub::GenerateHeapNumberStubBitNot(
743    MacroAssembler* masm) {
744  Label non_smi, slow;
745  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
746  __ bind(&non_smi);
747  GenerateHeapNumberCodeBitNot(masm, &slow);
748  __ bind(&slow);
749  GenerateTypeTransition(masm);
750}
751
752
753void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
754                                            Label* slow) {
755  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
756  __ cmp(edx, masm->isolate()->factory()->heap_number_map());
757  __ j(not_equal, slow);
758
759  if (mode_ == UNARY_OVERWRITE) {
760    __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
761            Immediate(HeapNumber::kSignMask));  // Flip sign.
762  } else {
763    __ mov(edx, Operand(eax));
764    // edx: operand
765
766    Label slow_allocate_heapnumber, heapnumber_allocated;
767    __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
768    __ jmp(&heapnumber_allocated, Label::kNear);
769
770    __ bind(&slow_allocate_heapnumber);
771    __ EnterInternalFrame();
772    __ push(edx);
773    __ CallRuntime(Runtime::kNumberAlloc, 0);
774    __ pop(edx);
775    __ LeaveInternalFrame();
776
777    __ bind(&heapnumber_allocated);
778    // eax: allocated 'empty' number
779    __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
780    __ xor_(ecx, HeapNumber::kSignMask);  // Flip sign.
781    __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
782    __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
783    __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
784  }
785  __ ret(0);
786}
787
788
789void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
790                                               Label* slow) {
791  __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
792  __ cmp(edx, masm->isolate()->factory()->heap_number_map());
793  __ j(not_equal, slow);
794
795  // Convert the heap number in eax to an untagged integer in ecx.
796  IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow);
797
798  // Do the bitwise operation and check if the result fits in a smi.
799  Label try_float;
800  __ not_(ecx);
801  __ cmp(ecx, 0xc0000000);
802  __ j(sign, &try_float, Label::kNear);
803
804  // Tag the result as a smi and we're done.
805  STATIC_ASSERT(kSmiTagSize == 1);
806  __ lea(eax, Operand(ecx, times_2, kSmiTag));
807  __ ret(0);
808
809  // Try to store the result in a heap number.
810  __ bind(&try_float);
811  if (mode_ == UNARY_NO_OVERWRITE) {
812    Label slow_allocate_heapnumber, heapnumber_allocated;
813    __ mov(ebx, eax);
814    __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
815    __ jmp(&heapnumber_allocated);
816
817    __ bind(&slow_allocate_heapnumber);
818    __ EnterInternalFrame();
819    // Push the original HeapNumber on the stack. The integer value can't
820    // be stored since it's untagged and not in the smi range (so we can't
821    // smi-tag it). We'll recalculate the value after the GC instead.
822    __ push(ebx);
823    __ CallRuntime(Runtime::kNumberAlloc, 0);
824    // New HeapNumber is in eax.
825    __ pop(edx);
826    __ LeaveInternalFrame();
827    // IntegerConvert uses ebx and edi as scratch registers.
828    // This conversion won't go slow-case.
829    IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
830    __ not_(ecx);
831
832    __ bind(&heapnumber_allocated);
833  }
834  if (CpuFeatures::IsSupported(SSE2)) {
835    CpuFeatures::Scope use_sse2(SSE2);
836    __ cvtsi2sd(xmm0, Operand(ecx));
837    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
838  } else {
839    __ push(ecx);
840    __ fild_s(Operand(esp, 0));
841    __ pop(ecx);
842    __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
843  }
844  __ ret(0);
845}
846
847
848// TODO(svenpanne): Use virtual functions instead of switch.
849void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
850  switch (op_) {
851    case Token::SUB:
852      GenerateGenericStubSub(masm);
853      break;
854    case Token::BIT_NOT:
855      GenerateGenericStubBitNot(masm);
856      break;
857    default:
858      UNREACHABLE();
859  }
860}
861
862
863void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm)  {
864  Label non_smi, undo, slow;
865  GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
866  __ bind(&non_smi);
867  GenerateHeapNumberCodeSub(masm, &slow);
868  __ bind(&undo);
869  GenerateSmiCodeUndo(masm);
870  __ bind(&slow);
871  GenerateGenericCodeFallback(masm);
872}
873
874
875void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
876  Label non_smi, slow;
877  GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
878  __ bind(&non_smi);
879  GenerateHeapNumberCodeBitNot(masm, &slow);
880  __ bind(&slow);
881  GenerateGenericCodeFallback(masm);
882}
883
884
885void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
886  // Handle the slow case by jumping to the corresponding JavaScript builtin.
887  __ pop(ecx);  // pop return address.
888  __ push(eax);
889  __ push(ecx);  // push return address
890  switch (op_) {
891    case Token::SUB:
892      __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
893      break;
894    case Token::BIT_NOT:
895      __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
896      break;
897    default:
898      UNREACHABLE();
899  }
900}
901
902
903void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
904  __ pop(ecx);  // Save return address.
905  __ push(edx);
906  __ push(eax);
907  // Left and right arguments are now on top.
908  // Push this stub's key. Although the operation and the type info are
909  // encoded into the key, the encoding is opaque, so push them too.
910  __ push(Immediate(Smi::FromInt(MinorKey())));
911  __ push(Immediate(Smi::FromInt(op_)));
912  __ push(Immediate(Smi::FromInt(operands_type_)));
913
914  __ push(ecx);  // Push return address.
915
916  // Patch the caller to an appropriate specialized stub and return the
917  // operation result to the caller of the stub.
918  __ TailCallExternalReference(
919      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
920                        masm->isolate()),
921      5,
922      1);
923}
924
925
926// Prepare for a type transition runtime call when the args are already on
927// the stack, under the return address.
928void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
929  __ pop(ecx);  // Save return address.
930  // Left and right arguments are already on top of the stack.
931  // Push this stub's key. Although the operation and the type info are
932  // encoded into the key, the encoding is opaque, so push them too.
933  __ push(Immediate(Smi::FromInt(MinorKey())));
934  __ push(Immediate(Smi::FromInt(op_)));
935  __ push(Immediate(Smi::FromInt(operands_type_)));
936
937  __ push(ecx);  // Push return address.
938
939  // Patch the caller to an appropriate specialized stub and return the
940  // operation result to the caller of the stub.
941  __ TailCallExternalReference(
942      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
943                        masm->isolate()),
944      5,
945      1);
946}
947
948
949void BinaryOpStub::Generate(MacroAssembler* masm) {
950  switch (operands_type_) {
951    case BinaryOpIC::UNINITIALIZED:
952      GenerateTypeTransition(masm);
953      break;
954    case BinaryOpIC::SMI:
955      GenerateSmiStub(masm);
956      break;
957    case BinaryOpIC::INT32:
958      GenerateInt32Stub(masm);
959      break;
960    case BinaryOpIC::HEAP_NUMBER:
961      GenerateHeapNumberStub(masm);
962      break;
963    case BinaryOpIC::ODDBALL:
964      GenerateOddballStub(masm);
965      break;
966    case BinaryOpIC::BOTH_STRING:
967      GenerateBothStringStub(masm);
968      break;
969    case BinaryOpIC::STRING:
970      GenerateStringStub(masm);
971      break;
972    case BinaryOpIC::GENERIC:
973      GenerateGeneric(masm);
974      break;
975    default:
976      UNREACHABLE();
977  }
978}
979
980
981void BinaryOpStub::PrintName(StringStream* stream) {
982  const char* op_name = Token::Name(op_);
983  const char* overwrite_name;
984  switch (mode_) {
985    case NO_OVERWRITE: overwrite_name = "Alloc"; break;
986    case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
987    case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
988    default: overwrite_name = "UnknownOverwrite"; break;
989  }
990  stream->Add("BinaryOpStub_%s_%s_%s",
991              op_name,
992              overwrite_name,
993              BinaryOpIC::GetName(operands_type_));
994}
995
996
997void BinaryOpStub::GenerateSmiCode(
998    MacroAssembler* masm,
999    Label* slow,
1000    SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1001  // 1. Move arguments into edx, eax except for DIV and MOD, which need the
1002  // dividend in eax and edx free for the division.  Use eax, ebx for those.
1003  Comment load_comment(masm, "-- Load arguments");
1004  Register left = edx;
1005  Register right = eax;
1006  if (op_ == Token::DIV || op_ == Token::MOD) {
1007    left = eax;
1008    right = ebx;
1009    __ mov(ebx, eax);
1010    __ mov(eax, edx);
1011  }
1012
1013
1014  // 2. Prepare the smi check of both operands by oring them together.
1015  Comment smi_check_comment(masm, "-- Smi check arguments");
1016  Label not_smis;
1017  Register combined = ecx;
1018  ASSERT(!left.is(combined) && !right.is(combined));
1019  switch (op_) {
1020    case Token::BIT_OR:
1021      // Perform the operation into eax and smi check the result.  Preserve
1022      // eax in case the result is not a smi.
1023      ASSERT(!left.is(ecx) && !right.is(ecx));
1024      __ mov(ecx, right);
1025      __ or_(right, Operand(left));  // Bitwise or is commutative.
1026      combined = right;
1027      break;
1028
1029    case Token::BIT_XOR:
1030    case Token::BIT_AND:
1031    case Token::ADD:
1032    case Token::SUB:
1033    case Token::MUL:
1034    case Token::DIV:
1035    case Token::MOD:
1036      __ mov(combined, right);
1037      __ or_(combined, Operand(left));
1038      break;
1039
1040    case Token::SHL:
1041    case Token::SAR:
1042    case Token::SHR:
1043      // Move the right operand into ecx for the shift operation, use eax
1044      // for the smi check register.
1045      ASSERT(!left.is(ecx) && !right.is(ecx));
1046      __ mov(ecx, right);
1047      __ or_(right, Operand(left));
1048      combined = right;
1049      break;
1050
1051    default:
1052      break;
1053  }
1054
1055  // 3. Perform the smi check of the operands.
1056  STATIC_ASSERT(kSmiTag == 0);  // Adjust zero check if not the case.
1057  __ JumpIfNotSmi(combined, &not_smis);
1058
1059  // 4. Operands are both smis, perform the operation leaving the result in
1060  // eax and check the result if necessary.
1061  Comment perform_smi(masm, "-- Perform smi operation");
1062  Label use_fp_on_smis;
1063  switch (op_) {
1064    case Token::BIT_OR:
1065      // Nothing to do.
1066      break;
1067
1068    case Token::BIT_XOR:
1069      ASSERT(right.is(eax));
1070      __ xor_(right, Operand(left));  // Bitwise xor is commutative.
1071      break;
1072
1073    case Token::BIT_AND:
1074      ASSERT(right.is(eax));
1075      __ and_(right, Operand(left));  // Bitwise and is commutative.
1076      break;
1077
1078    case Token::SHL:
1079      // Remove tags from operands (but keep sign).
1080      __ SmiUntag(left);
1081      __ SmiUntag(ecx);
1082      // Perform the operation.
1083      __ shl_cl(left);
1084      // Check that the *signed* result fits in a smi.
1085      __ cmp(left, 0xc0000000);
1086      __ j(sign, &use_fp_on_smis);
1087      // Tag the result and store it in register eax.
1088      __ SmiTag(left);
1089      __ mov(eax, left);
1090      break;
1091
1092    case Token::SAR:
1093      // Remove tags from operands (but keep sign).
1094      __ SmiUntag(left);
1095      __ SmiUntag(ecx);
1096      // Perform the operation.
1097      __ sar_cl(left);
1098      // Tag the result and store it in register eax.
1099      __ SmiTag(left);
1100      __ mov(eax, left);
1101      break;
1102
1103    case Token::SHR:
1104      // Remove tags from operands (but keep sign).
1105      __ SmiUntag(left);
1106      __ SmiUntag(ecx);
1107      // Perform the operation.
1108      __ shr_cl(left);
1109      // Check that the *unsigned* result fits in a smi.
1110      // Neither of the two high-order bits can be set:
1111      // - 0x80000000: high bit would be lost when smi tagging.
1112      // - 0x40000000: this number would convert to negative when
1113      // Smi tagging these two cases can only happen with shifts
1114      // by 0 or 1 when handed a valid smi.
1115      __ test(left, Immediate(0xc0000000));
1116      __ j(not_zero, &use_fp_on_smis);
1117      // Tag the result and store it in register eax.
1118      __ SmiTag(left);
1119      __ mov(eax, left);
1120      break;
1121
1122    case Token::ADD:
1123      ASSERT(right.is(eax));
1124      __ add(right, Operand(left));  // Addition is commutative.
1125      __ j(overflow, &use_fp_on_smis);
1126      break;
1127
1128    case Token::SUB:
1129      __ sub(left, Operand(right));
1130      __ j(overflow, &use_fp_on_smis);
1131      __ mov(eax, left);
1132      break;
1133
1134    case Token::MUL:
1135      // If the smi tag is 0 we can just leave the tag on one operand.
1136      STATIC_ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
1137      // We can't revert the multiplication if the result is not a smi
1138      // so save the right operand.
1139      __ mov(ebx, right);
1140      // Remove tag from one of the operands (but keep sign).
1141      __ SmiUntag(right);
1142      // Do multiplication.
1143      __ imul(right, Operand(left));  // Multiplication is commutative.
1144      __ j(overflow, &use_fp_on_smis);
1145      // Check for negative zero result.  Use combined = left | right.
1146      __ NegativeZeroTest(right, combined, &use_fp_on_smis);
1147      break;
1148
1149    case Token::DIV:
1150      // We can't revert the division if the result is not a smi so
1151      // save the left operand.
1152      __ mov(edi, left);
1153      // Check for 0 divisor.
1154      __ test(right, Operand(right));
1155      __ j(zero, &use_fp_on_smis);
1156      // Sign extend left into edx:eax.
1157      ASSERT(left.is(eax));
1158      __ cdq();
1159      // Divide edx:eax by right.
1160      __ idiv(right);
1161      // Check for the corner case of dividing the most negative smi by
1162      // -1. We cannot use the overflow flag, since it is not set by idiv
1163      // instruction.
1164      STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1165      __ cmp(eax, 0x40000000);
1166      __ j(equal, &use_fp_on_smis);
1167      // Check for negative zero result.  Use combined = left | right.
1168      __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
1169      // Check that the remainder is zero.
1170      __ test(edx, Operand(edx));
1171      __ j(not_zero, &use_fp_on_smis);
1172      // Tag the result and store it in register eax.
1173      __ SmiTag(eax);
1174      break;
1175
1176    case Token::MOD:
1177      // Check for 0 divisor.
1178      __ test(right, Operand(right));
1179      __ j(zero, &not_smis);
1180
1181      // Sign extend left into edx:eax.
1182      ASSERT(left.is(eax));
1183      __ cdq();
1184      // Divide edx:eax by right.
1185      __ idiv(right);
1186      // Check for negative zero result.  Use combined = left | right.
1187      __ NegativeZeroTest(edx, combined, slow);
1188      // Move remainder to register eax.
1189      __ mov(eax, edx);
1190      break;
1191
1192    default:
1193      UNREACHABLE();
1194  }
1195
1196  // 5. Emit return of result in eax.  Some operations have registers pushed.
1197  switch (op_) {
1198    case Token::ADD:
1199    case Token::SUB:
1200    case Token::MUL:
1201    case Token::DIV:
1202      __ ret(0);
1203      break;
1204    case Token::MOD:
1205    case Token::BIT_OR:
1206    case Token::BIT_AND:
1207    case Token::BIT_XOR:
1208    case Token::SAR:
1209    case Token::SHL:
1210    case Token::SHR:
1211      __ ret(2 * kPointerSize);
1212      break;
1213    default:
1214      UNREACHABLE();
1215  }
1216
1217  // 6. For some operations emit inline code to perform floating point
1218  // operations on known smis (e.g., if the result of the operation
1219  // overflowed the smi range).
1220  if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
1221    __ bind(&use_fp_on_smis);
1222    switch (op_) {
1223      // Undo the effects of some operations, and some register moves.
1224      case Token::SHL:
1225        // The arguments are saved on the stack, and only used from there.
1226        break;
1227      case Token::ADD:
1228        // Revert right = right + left.
1229        __ sub(right, Operand(left));
1230        break;
1231      case Token::SUB:
1232        // Revert left = left - right.
1233        __ add(left, Operand(right));
1234        break;
1235      case Token::MUL:
1236        // Right was clobbered but a copy is in ebx.
1237        __ mov(right, ebx);
1238        break;
1239      case Token::DIV:
1240        // Left was clobbered but a copy is in edi.  Right is in ebx for
1241        // division.  They should be in eax, ebx for jump to not_smi.
1242        __ mov(eax, edi);
1243        break;
1244      default:
1245        // No other operators jump to use_fp_on_smis.
1246        break;
1247    }
1248    __ jmp(&not_smis);
1249  } else {
1250    ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
1251    switch (op_) {
1252      case Token::SHL:
1253      case Token::SHR: {
1254        Comment perform_float(masm, "-- Perform float operation on smis");
1255        __ bind(&use_fp_on_smis);
1256        // Result we want is in left == edx, so we can put the allocated heap
1257        // number in eax.
1258        __ AllocateHeapNumber(eax, ecx, ebx, slow);
1259        // Store the result in the HeapNumber and return.
1260        // It's OK to overwrite the arguments on the stack because we
1261        // are about to return.
1262        if (op_ == Token::SHR) {
1263          __ mov(Operand(esp, 1 * kPointerSize), left);
1264          __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
1265          __ fild_d(Operand(esp, 1 * kPointerSize));
1266          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1267        } else {
1268          ASSERT_EQ(Token::SHL, op_);
1269          if (CpuFeatures::IsSupported(SSE2)) {
1270            CpuFeatures::Scope use_sse2(SSE2);
1271            __ cvtsi2sd(xmm0, Operand(left));
1272            __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1273          } else {
1274            __ mov(Operand(esp, 1 * kPointerSize), left);
1275            __ fild_s(Operand(esp, 1 * kPointerSize));
1276            __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1277          }
1278        }
1279        __ ret(2 * kPointerSize);
1280        break;
1281      }
1282
1283      case Token::ADD:
1284      case Token::SUB:
1285      case Token::MUL:
1286      case Token::DIV: {
1287        Comment perform_float(masm, "-- Perform float operation on smis");
1288        __ bind(&use_fp_on_smis);
1289        // Restore arguments to edx, eax.
1290        switch (op_) {
1291          case Token::ADD:
1292            // Revert right = right + left.
1293            __ sub(right, Operand(left));
1294            break;
1295          case Token::SUB:
1296            // Revert left = left - right.
1297            __ add(left, Operand(right));
1298            break;
1299          case Token::MUL:
1300            // Right was clobbered but a copy is in ebx.
1301            __ mov(right, ebx);
1302            break;
1303          case Token::DIV:
1304            // Left was clobbered but a copy is in edi.  Right is in ebx for
1305            // division.
1306            __ mov(edx, edi);
1307            __ mov(eax, right);
1308            break;
1309          default: UNREACHABLE();
1310            break;
1311        }
1312        __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
1313        if (CpuFeatures::IsSupported(SSE2)) {
1314          CpuFeatures::Scope use_sse2(SSE2);
1315          FloatingPointHelper::LoadSSE2Smis(masm, ebx);
1316          switch (op_) {
1317            case Token::ADD: __ addsd(xmm0, xmm1); break;
1318            case Token::SUB: __ subsd(xmm0, xmm1); break;
1319            case Token::MUL: __ mulsd(xmm0, xmm1); break;
1320            case Token::DIV: __ divsd(xmm0, xmm1); break;
1321            default: UNREACHABLE();
1322          }
1323          __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
1324        } else {  // SSE2 not available, use FPU.
1325          FloatingPointHelper::LoadFloatSmis(masm, ebx);
1326          switch (op_) {
1327            case Token::ADD: __ faddp(1); break;
1328            case Token::SUB: __ fsubp(1); break;
1329            case Token::MUL: __ fmulp(1); break;
1330            case Token::DIV: __ fdivp(1); break;
1331            default: UNREACHABLE();
1332          }
1333          __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
1334        }
1335        __ mov(eax, ecx);
1336        __ ret(0);
1337        break;
1338      }
1339
1340      default:
1341        break;
1342    }
1343  }
1344
1345  // 7. Non-smi operands, fall out to the non-smi code with the operands in
1346  // edx and eax.
1347  Comment done_comment(masm, "-- Enter non-smi code");
1348  __ bind(&not_smis);
1349  switch (op_) {
1350    case Token::BIT_OR:
1351    case Token::SHL:
1352    case Token::SAR:
1353    case Token::SHR:
1354      // Right operand is saved in ecx and eax was destroyed by the smi
1355      // check.
1356      __ mov(eax, ecx);
1357      break;
1358
1359    case Token::DIV:
1360    case Token::MOD:
1361      // Operands are in eax, ebx at this point.
1362      __ mov(edx, eax);
1363      __ mov(eax, ebx);
1364      break;
1365
1366    default:
1367      break;
1368  }
1369}
1370
1371
1372void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1373  Label call_runtime;
1374
1375  switch (op_) {
1376    case Token::ADD:
1377    case Token::SUB:
1378    case Token::MUL:
1379    case Token::DIV:
1380      break;
1381    case Token::MOD:
1382    case Token::BIT_OR:
1383    case Token::BIT_AND:
1384    case Token::BIT_XOR:
1385    case Token::SAR:
1386    case Token::SHL:
1387    case Token::SHR:
1388      GenerateRegisterArgsPush(masm);
1389      break;
1390    default:
1391      UNREACHABLE();
1392  }
1393
1394  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1395      result_type_ == BinaryOpIC::SMI) {
1396    GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
1397  } else {
1398    GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1399  }
1400  __ bind(&call_runtime);
1401  switch (op_) {
1402    case Token::ADD:
1403    case Token::SUB:
1404    case Token::MUL:
1405    case Token::DIV:
1406      GenerateTypeTransition(masm);
1407      break;
1408    case Token::MOD:
1409    case Token::BIT_OR:
1410    case Token::BIT_AND:
1411    case Token::BIT_XOR:
1412    case Token::SAR:
1413    case Token::SHL:
1414    case Token::SHR:
1415      GenerateTypeTransitionWithSavedArgs(masm);
1416      break;
1417    default:
1418      UNREACHABLE();
1419  }
1420}
1421
1422
1423void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
1424  ASSERT(operands_type_ == BinaryOpIC::STRING);
1425  ASSERT(op_ == Token::ADD);
1426  // Try to add arguments as strings, otherwise, transition to the generic
1427  // BinaryOpIC type.
1428  GenerateAddStrings(masm);
1429  GenerateTypeTransition(masm);
1430}
1431
1432
1433void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1434  Label call_runtime;
1435  ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
1436  ASSERT(op_ == Token::ADD);
1437  // If both arguments are strings, call the string add stub.
1438  // Otherwise, do a transition.
1439
1440  // Registers containing left and right operands respectively.
1441  Register left = edx;
1442  Register right = eax;
1443
1444  // Test if left operand is a string.
1445  __ JumpIfSmi(left, &call_runtime, Label::kNear);
1446  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
1447  __ j(above_equal, &call_runtime, Label::kNear);
1448
1449  // Test if right operand is a string.
1450  __ JumpIfSmi(right, &call_runtime, Label::kNear);
1451  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
1452  __ j(above_equal, &call_runtime, Label::kNear);
1453
1454  StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
1455  GenerateRegisterArgsPush(masm);
1456  __ TailCallStub(&string_add_stub);
1457
1458  __ bind(&call_runtime);
1459  GenerateTypeTransition(masm);
1460}
1461
1462
1463void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1464  Label call_runtime;
1465  ASSERT(operands_type_ == BinaryOpIC::INT32);
1466
1467  // Floating point case.
1468  switch (op_) {
1469    case Token::ADD:
1470    case Token::SUB:
1471    case Token::MUL:
1472    case Token::DIV: {
1473      Label not_floats;
1474      Label not_int32;
1475      if (CpuFeatures::IsSupported(SSE2)) {
1476        CpuFeatures::Scope use_sse2(SSE2);
1477        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1478        FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
1479        switch (op_) {
1480          case Token::ADD: __ addsd(xmm0, xmm1); break;
1481          case Token::SUB: __ subsd(xmm0, xmm1); break;
1482          case Token::MUL: __ mulsd(xmm0, xmm1); break;
1483          case Token::DIV: __ divsd(xmm0, xmm1); break;
1484          default: UNREACHABLE();
1485        }
1486        // Check result type if it is currently Int32.
1487        if (result_type_ <= BinaryOpIC::INT32) {
1488          __ cvttsd2si(ecx, Operand(xmm0));
1489          __ cvtsi2sd(xmm2, Operand(ecx));
1490          __ ucomisd(xmm0, xmm2);
1491          __ j(not_zero, &not_int32);
1492          __ j(carry, &not_int32);
1493        }
1494        GenerateHeapResultAllocation(masm, &call_runtime);
1495        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1496        __ ret(0);
1497      } else {  // SSE2 not available, use FPU.
1498        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1499        FloatingPointHelper::LoadFloatOperands(
1500            masm,
1501            ecx,
1502            FloatingPointHelper::ARGS_IN_REGISTERS);
1503        FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
1504        switch (op_) {
1505          case Token::ADD: __ faddp(1); break;
1506          case Token::SUB: __ fsubp(1); break;
1507          case Token::MUL: __ fmulp(1); break;
1508          case Token::DIV: __ fdivp(1); break;
1509          default: UNREACHABLE();
1510        }
1511        Label after_alloc_failure;
1512        GenerateHeapResultAllocation(masm, &after_alloc_failure);
1513        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1514        __ ret(0);
1515        __ bind(&after_alloc_failure);
1516        __ ffree();
1517        __ jmp(&call_runtime);
1518      }
1519
1520      __ bind(&not_floats);
1521      __ bind(&not_int32);
1522      GenerateTypeTransition(masm);
1523      break;
1524    }
1525
1526    case Token::MOD: {
1527      // For MOD we go directly to runtime in the non-smi case.
1528      break;
1529    }
1530    case Token::BIT_OR:
1531    case Token::BIT_AND:
1532    case Token::BIT_XOR:
1533    case Token::SAR:
1534    case Token::SHL:
1535    case Token::SHR: {
1536      GenerateRegisterArgsPush(masm);
1537      Label not_floats;
1538      Label not_int32;
1539      Label non_smi_result;
1540      /*  {
1541        CpuFeatures::Scope use_sse2(SSE2);
1542        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1543        FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
1544        }*/
1545      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1546                                                  use_sse3_,
1547                                                  &not_floats);
1548      FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
1549                                                        &not_int32);
1550      switch (op_) {
1551        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
1552        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
1553        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
1554        case Token::SAR: __ sar_cl(eax); break;
1555        case Token::SHL: __ shl_cl(eax); break;
1556        case Token::SHR: __ shr_cl(eax); break;
1557        default: UNREACHABLE();
1558      }
1559      if (op_ == Token::SHR) {
1560        // Check if result is non-negative and fits in a smi.
1561        __ test(eax, Immediate(0xc0000000));
1562        __ j(not_zero, &call_runtime);
1563      } else {
1564        // Check if result fits in a smi.
1565        __ cmp(eax, 0xc0000000);
1566        __ j(negative, &non_smi_result, Label::kNear);
1567      }
1568      // Tag smi result and return.
1569      __ SmiTag(eax);
1570      __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
1571
1572      // All ops except SHR return a signed int32 that we load in
1573      // a HeapNumber.
1574      if (op_ != Token::SHR) {
1575        __ bind(&non_smi_result);
1576        // Allocate a heap number if needed.
1577        __ mov(ebx, Operand(eax));  // ebx: result
1578        Label skip_allocation;
1579        switch (mode_) {
1580          case OVERWRITE_LEFT:
1581          case OVERWRITE_RIGHT:
1582            // If the operand was an object, we skip the
1583            // allocation of a heap number.
1584            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1585                                1 * kPointerSize : 2 * kPointerSize));
1586            __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1587            // Fall through!
1588          case NO_OVERWRITE:
1589            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1590            __ bind(&skip_allocation);
1591            break;
1592          default: UNREACHABLE();
1593        }
1594        // Store the result in the HeapNumber and return.
1595        if (CpuFeatures::IsSupported(SSE2)) {
1596          CpuFeatures::Scope use_sse2(SSE2);
1597          __ cvtsi2sd(xmm0, Operand(ebx));
1598          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1599        } else {
1600          __ mov(Operand(esp, 1 * kPointerSize), ebx);
1601          __ fild_s(Operand(esp, 1 * kPointerSize));
1602          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1603        }
1604        __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
1605      }
1606
1607      __ bind(&not_floats);
1608      __ bind(&not_int32);
1609      GenerateTypeTransitionWithSavedArgs(masm);
1610      break;
1611    }
1612    default: UNREACHABLE(); break;
1613  }
1614
1615  // If an allocation fails, or SHR or MOD hit a hard case,
1616  // use the runtime system to get the correct result.
1617  __ bind(&call_runtime);
1618
1619  switch (op_) {
1620    case Token::ADD:
1621      GenerateRegisterArgsPush(masm);
1622      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1623      break;
1624    case Token::SUB:
1625      GenerateRegisterArgsPush(masm);
1626      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1627      break;
1628    case Token::MUL:
1629      GenerateRegisterArgsPush(masm);
1630      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1631      break;
1632    case Token::DIV:
1633      GenerateRegisterArgsPush(masm);
1634      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1635      break;
1636    case Token::MOD:
1637      GenerateRegisterArgsPush(masm);
1638      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1639      break;
1640    case Token::BIT_OR:
1641      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1642      break;
1643    case Token::BIT_AND:
1644      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1645      break;
1646    case Token::BIT_XOR:
1647      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1648      break;
1649    case Token::SAR:
1650      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1651      break;
1652    case Token::SHL:
1653      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1654      break;
1655    case Token::SHR:
1656      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1657      break;
1658    default:
1659      UNREACHABLE();
1660  }
1661}
1662
1663
1664void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
1665  if (op_ == Token::ADD) {
1666    // Handle string addition here, because it is the only operation
1667    // that does not do a ToNumber conversion on the operands.
1668    GenerateAddStrings(masm);
1669  }
1670
1671  Factory* factory = masm->isolate()->factory();
1672
1673  // Convert odd ball arguments to numbers.
1674  Label check, done;
1675  __ cmp(edx, factory->undefined_value());
1676  __ j(not_equal, &check, Label::kNear);
1677  if (Token::IsBitOp(op_)) {
1678    __ xor_(edx, Operand(edx));
1679  } else {
1680    __ mov(edx, Immediate(factory->nan_value()));
1681  }
1682  __ jmp(&done, Label::kNear);
1683  __ bind(&check);
1684  __ cmp(eax, factory->undefined_value());
1685  __ j(not_equal, &done, Label::kNear);
1686  if (Token::IsBitOp(op_)) {
1687    __ xor_(eax, Operand(eax));
1688  } else {
1689    __ mov(eax, Immediate(factory->nan_value()));
1690  }
1691  __ bind(&done);
1692
1693  GenerateHeapNumberStub(masm);
1694}
1695
1696
1697void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1698  Label call_runtime;
1699
1700  // Floating point case.
1701  switch (op_) {
1702    case Token::ADD:
1703    case Token::SUB:
1704    case Token::MUL:
1705    case Token::DIV: {
1706      Label not_floats;
1707      if (CpuFeatures::IsSupported(SSE2)) {
1708        CpuFeatures::Scope use_sse2(SSE2);
1709        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1710
1711        switch (op_) {
1712          case Token::ADD: __ addsd(xmm0, xmm1); break;
1713          case Token::SUB: __ subsd(xmm0, xmm1); break;
1714          case Token::MUL: __ mulsd(xmm0, xmm1); break;
1715          case Token::DIV: __ divsd(xmm0, xmm1); break;
1716          default: UNREACHABLE();
1717        }
1718        GenerateHeapResultAllocation(masm, &call_runtime);
1719        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1720        __ ret(0);
1721      } else {  // SSE2 not available, use FPU.
1722        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1723        FloatingPointHelper::LoadFloatOperands(
1724            masm,
1725            ecx,
1726            FloatingPointHelper::ARGS_IN_REGISTERS);
1727        switch (op_) {
1728          case Token::ADD: __ faddp(1); break;
1729          case Token::SUB: __ fsubp(1); break;
1730          case Token::MUL: __ fmulp(1); break;
1731          case Token::DIV: __ fdivp(1); break;
1732          default: UNREACHABLE();
1733        }
1734        Label after_alloc_failure;
1735        GenerateHeapResultAllocation(masm, &after_alloc_failure);
1736        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1737        __ ret(0);
1738        __ bind(&after_alloc_failure);
1739        __ ffree();
1740        __ jmp(&call_runtime);
1741      }
1742
1743      __ bind(&not_floats);
1744      GenerateTypeTransition(masm);
1745      break;
1746    }
1747
1748    case Token::MOD: {
1749      // For MOD we go directly to runtime in the non-smi case.
1750      break;
1751    }
1752    case Token::BIT_OR:
1753    case Token::BIT_AND:
1754    case Token::BIT_XOR:
1755    case Token::SAR:
1756    case Token::SHL:
1757    case Token::SHR: {
1758      GenerateRegisterArgsPush(masm);
1759      Label not_floats;
1760      Label non_smi_result;
1761      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1762                                                  use_sse3_,
1763                                                  &not_floats);
1764      switch (op_) {
1765        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
1766        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
1767        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
1768        case Token::SAR: __ sar_cl(eax); break;
1769        case Token::SHL: __ shl_cl(eax); break;
1770        case Token::SHR: __ shr_cl(eax); break;
1771        default: UNREACHABLE();
1772      }
1773      if (op_ == Token::SHR) {
1774        // Check if result is non-negative and fits in a smi.
1775        __ test(eax, Immediate(0xc0000000));
1776        __ j(not_zero, &call_runtime);
1777      } else {
1778        // Check if result fits in a smi.
1779        __ cmp(eax, 0xc0000000);
1780        __ j(negative, &non_smi_result, Label::kNear);
1781      }
1782      // Tag smi result and return.
1783      __ SmiTag(eax);
1784      __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
1785
1786      // All ops except SHR return a signed int32 that we load in
1787      // a HeapNumber.
1788      if (op_ != Token::SHR) {
1789        __ bind(&non_smi_result);
1790        // Allocate a heap number if needed.
1791        __ mov(ebx, Operand(eax));  // ebx: result
1792        Label skip_allocation;
1793        switch (mode_) {
1794          case OVERWRITE_LEFT:
1795          case OVERWRITE_RIGHT:
1796            // If the operand was an object, we skip the
1797            // allocation of a heap number.
1798            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1799                                1 * kPointerSize : 2 * kPointerSize));
1800            __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1801            // Fall through!
1802          case NO_OVERWRITE:
1803            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1804            __ bind(&skip_allocation);
1805            break;
1806          default: UNREACHABLE();
1807        }
1808        // Store the result in the HeapNumber and return.
1809        if (CpuFeatures::IsSupported(SSE2)) {
1810          CpuFeatures::Scope use_sse2(SSE2);
1811          __ cvtsi2sd(xmm0, Operand(ebx));
1812          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1813        } else {
1814          __ mov(Operand(esp, 1 * kPointerSize), ebx);
1815          __ fild_s(Operand(esp, 1 * kPointerSize));
1816          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1817        }
1818        __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
1819      }
1820
1821      __ bind(&not_floats);
1822      GenerateTypeTransitionWithSavedArgs(masm);
1823      break;
1824    }
1825    default: UNREACHABLE(); break;
1826  }
1827
1828  // If an allocation fails, or SHR or MOD hit a hard case,
1829  // use the runtime system to get the correct result.
1830  __ bind(&call_runtime);
1831
1832  switch (op_) {
1833    case Token::ADD:
1834      GenerateRegisterArgsPush(masm);
1835      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1836      break;
1837    case Token::SUB:
1838      GenerateRegisterArgsPush(masm);
1839      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1840      break;
1841    case Token::MUL:
1842      GenerateRegisterArgsPush(masm);
1843      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1844      break;
1845    case Token::DIV:
1846      GenerateRegisterArgsPush(masm);
1847      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1848      break;
1849    case Token::MOD:
1850      GenerateRegisterArgsPush(masm);
1851      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1852      break;
1853    case Token::BIT_OR:
1854      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1855      break;
1856    case Token::BIT_AND:
1857      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1858      break;
1859    case Token::BIT_XOR:
1860      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1861      break;
1862    case Token::SAR:
1863      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1864      break;
1865    case Token::SHL:
1866      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1867      break;
1868    case Token::SHR:
1869      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1870      break;
1871    default:
1872      UNREACHABLE();
1873  }
1874}
1875
1876
1877void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
1878  Label call_runtime;
1879
1880  Counters* counters = masm->isolate()->counters();
1881  __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
1882
1883  switch (op_) {
1884    case Token::ADD:
1885    case Token::SUB:
1886    case Token::MUL:
1887    case Token::DIV:
1888      break;
1889    case Token::MOD:
1890    case Token::BIT_OR:
1891    case Token::BIT_AND:
1892    case Token::BIT_XOR:
1893    case Token::SAR:
1894    case Token::SHL:
1895    case Token::SHR:
1896      GenerateRegisterArgsPush(masm);
1897      break;
1898    default:
1899      UNREACHABLE();
1900  }
1901
1902  GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1903
1904  // Floating point case.
1905  switch (op_) {
1906    case Token::ADD:
1907    case Token::SUB:
1908    case Token::MUL:
1909    case Token::DIV: {
1910      Label not_floats;
1911      if (CpuFeatures::IsSupported(SSE2)) {
1912        CpuFeatures::Scope use_sse2(SSE2);
1913        FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1914
1915        switch (op_) {
1916          case Token::ADD: __ addsd(xmm0, xmm1); break;
1917          case Token::SUB: __ subsd(xmm0, xmm1); break;
1918          case Token::MUL: __ mulsd(xmm0, xmm1); break;
1919          case Token::DIV: __ divsd(xmm0, xmm1); break;
1920          default: UNREACHABLE();
1921        }
1922        GenerateHeapResultAllocation(masm, &call_runtime);
1923        __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1924        __ ret(0);
1925      } else {  // SSE2 not available, use FPU.
1926        FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1927        FloatingPointHelper::LoadFloatOperands(
1928            masm,
1929            ecx,
1930            FloatingPointHelper::ARGS_IN_REGISTERS);
1931        switch (op_) {
1932          case Token::ADD: __ faddp(1); break;
1933          case Token::SUB: __ fsubp(1); break;
1934          case Token::MUL: __ fmulp(1); break;
1935          case Token::DIV: __ fdivp(1); break;
1936          default: UNREACHABLE();
1937        }
1938        Label after_alloc_failure;
1939        GenerateHeapResultAllocation(masm, &after_alloc_failure);
1940        __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1941        __ ret(0);
1942        __ bind(&after_alloc_failure);
1943          __ ffree();
1944          __ jmp(&call_runtime);
1945      }
1946        __ bind(&not_floats);
1947        break;
1948      }
1949    case Token::MOD: {
1950      // For MOD we go directly to runtime in the non-smi case.
1951      break;
1952    }
1953    case Token::BIT_OR:
1954    case Token::BIT_AND:
1955      case Token::BIT_XOR:
1956    case Token::SAR:
1957    case Token::SHL:
1958    case Token::SHR: {
1959      Label non_smi_result;
1960      FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1961                                                  use_sse3_,
1962                                                  &call_runtime);
1963      switch (op_) {
1964        case Token::BIT_OR:  __ or_(eax, Operand(ecx)); break;
1965        case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
1966        case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
1967        case Token::SAR: __ sar_cl(eax); break;
1968        case Token::SHL: __ shl_cl(eax); break;
1969        case Token::SHR: __ shr_cl(eax); break;
1970        default: UNREACHABLE();
1971      }
1972      if (op_ == Token::SHR) {
1973        // Check if result is non-negative and fits in a smi.
1974        __ test(eax, Immediate(0xc0000000));
1975        __ j(not_zero, &call_runtime);
1976      } else {
1977        // Check if result fits in a smi.
1978        __ cmp(eax, 0xc0000000);
1979        __ j(negative, &non_smi_result, Label::kNear);
1980      }
1981      // Tag smi result and return.
1982      __ SmiTag(eax);
1983      __ ret(2 * kPointerSize);  // Drop the arguments from the stack.
1984
1985      // All ops except SHR return a signed int32 that we load in
1986      // a HeapNumber.
1987      if (op_ != Token::SHR) {
1988        __ bind(&non_smi_result);
1989        // Allocate a heap number if needed.
1990        __ mov(ebx, Operand(eax));  // ebx: result
1991        Label skip_allocation;
1992        switch (mode_) {
1993          case OVERWRITE_LEFT:
1994          case OVERWRITE_RIGHT:
1995            // If the operand was an object, we skip the
1996              // allocation of a heap number.
1997            __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1998                                1 * kPointerSize : 2 * kPointerSize));
1999            __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
2000            // Fall through!
2001          case NO_OVERWRITE:
2002            __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
2003            __ bind(&skip_allocation);
2004            break;
2005          default: UNREACHABLE();
2006        }
2007        // Store the result in the HeapNumber and return.
2008        if (CpuFeatures::IsSupported(SSE2)) {
2009          CpuFeatures::Scope use_sse2(SSE2);
2010          __ cvtsi2sd(xmm0, Operand(ebx));
2011          __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2012        } else {
2013          __ mov(Operand(esp, 1 * kPointerSize), ebx);
2014          __ fild_s(Operand(esp, 1 * kPointerSize));
2015          __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2016        }
2017        __ ret(2 * kPointerSize);
2018      }
2019      break;
2020    }
2021    default: UNREACHABLE(); break;
2022  }
2023
2024  // If all else fails, use the runtime system to get the correct
2025  // result.
2026  __ bind(&call_runtime);
2027  switch (op_) {
2028    case Token::ADD: {
2029      GenerateAddStrings(masm);
2030      GenerateRegisterArgsPush(masm);
2031      __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2032      break;
2033    }
2034    case Token::SUB:
2035      GenerateRegisterArgsPush(masm);
2036      __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2037      break;
2038    case Token::MUL:
2039      GenerateRegisterArgsPush(masm);
2040      __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2041      break;
2042    case Token::DIV:
2043      GenerateRegisterArgsPush(masm);
2044      __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2045      break;
2046    case Token::MOD:
2047      __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2048      break;
2049    case Token::BIT_OR:
2050      __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2051      break;
2052    case Token::BIT_AND:
2053      __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2054      break;
2055    case Token::BIT_XOR:
2056      __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2057      break;
2058    case Token::SAR:
2059      __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2060      break;
2061    case Token::SHL:
2062      __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2063      break;
2064    case Token::SHR:
2065      __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2066      break;
2067    default:
2068      UNREACHABLE();
2069  }
2070}
2071
2072
2073void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
2074  ASSERT(op_ == Token::ADD);
2075  Label left_not_string, call_runtime;
2076
2077  // Registers containing left and right operands respectively.
2078  Register left = edx;
2079  Register right = eax;
2080
2081  // Test if left operand is a string.
2082  __ JumpIfSmi(left, &left_not_string, Label::kNear);
2083  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
2084  __ j(above_equal, &left_not_string, Label::kNear);
2085
2086  StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
2087  GenerateRegisterArgsPush(masm);
2088  __ TailCallStub(&string_add_left_stub);
2089
2090  // Left operand is not a string, test right.
2091  __ bind(&left_not_string);
2092  __ JumpIfSmi(right, &call_runtime, Label::kNear);
2093  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
2094  __ j(above_equal, &call_runtime, Label::kNear);
2095
2096  StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
2097  GenerateRegisterArgsPush(masm);
2098  __ TailCallStub(&string_add_right_stub);
2099
2100  // Neither argument is a string.
2101  __ bind(&call_runtime);
2102}
2103
2104
2105void BinaryOpStub::GenerateHeapResultAllocation(
2106    MacroAssembler* masm,
2107    Label* alloc_failure) {
2108  Label skip_allocation;
2109  OverwriteMode mode = mode_;
2110  switch (mode) {
2111    case OVERWRITE_LEFT: {
2112      // If the argument in edx is already an object, we skip the
2113      // allocation of a heap number.
2114      __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
2115      // Allocate a heap number for the result. Keep eax and edx intact
2116      // for the possible runtime call.
2117      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2118      // Now edx can be overwritten losing one of the arguments as we are
2119      // now done and will not need it any more.
2120      __ mov(edx, Operand(ebx));
2121      __ bind(&skip_allocation);
2122      // Use object in edx as a result holder
2123      __ mov(eax, Operand(edx));
2124      break;
2125    }
2126    case OVERWRITE_RIGHT:
2127      // If the argument in eax is already an object, we skip the
2128      // allocation of a heap number.
2129      __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
2130      // Fall through!
2131    case NO_OVERWRITE:
2132      // Allocate a heap number for the result. Keep eax and edx intact
2133      // for the possible runtime call.
2134      __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2135      // Now eax can be overwritten losing one of the arguments as we are
2136      // now done and will not need it any more.
2137      __ mov(eax, ebx);
2138      __ bind(&skip_allocation);
2139      break;
2140    default: UNREACHABLE();
2141  }
2142}
2143
2144
2145void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
2146  __ pop(ecx);
2147  __ push(edx);
2148  __ push(eax);
2149  __ push(ecx);
2150}
2151
2152
2153void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2154  // TAGGED case:
2155  //   Input:
2156  //     esp[4]: tagged number input argument (should be number).
2157  //     esp[0]: return address.
2158  //   Output:
2159  //     eax: tagged double result.
2160  // UNTAGGED case:
2161  //   Input::
2162  //     esp[0]: return address.
2163  //     xmm1: untagged double input argument
2164  //   Output:
2165  //     xmm1: untagged double result.
2166
2167  Label runtime_call;
2168  Label runtime_call_clear_stack;
2169  Label skip_cache;
2170  const bool tagged = (argument_type_ == TAGGED);
2171  if (tagged) {
2172    // Test that eax is a number.
2173    Label input_not_smi;
2174    Label loaded;
2175    __ mov(eax, Operand(esp, kPointerSize));
2176    __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear);
2177    // Input is a smi. Untag and load it onto the FPU stack.
2178    // Then load the low and high words of the double into ebx, edx.
2179    STATIC_ASSERT(kSmiTagSize == 1);
2180    __ sar(eax, 1);
2181    __ sub(Operand(esp), Immediate(2 * kPointerSize));
2182    __ mov(Operand(esp, 0), eax);
2183    __ fild_s(Operand(esp, 0));
2184    __ fst_d(Operand(esp, 0));
2185    __ pop(edx);
2186    __ pop(ebx);
2187    __ jmp(&loaded, Label::kNear);
2188    __ bind(&input_not_smi);
2189    // Check if input is a HeapNumber.
2190    __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
2191    Factory* factory = masm->isolate()->factory();
2192    __ cmp(Operand(ebx), Immediate(factory->heap_number_map()));
2193    __ j(not_equal, &runtime_call);
2194    // Input is a HeapNumber. Push it on the FPU stack and load its
2195    // low and high words into ebx, edx.
2196    __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
2197    __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
2198    __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
2199
2200    __ bind(&loaded);
2201  } else {  // UNTAGGED.
2202    if (CpuFeatures::IsSupported(SSE4_1)) {
2203      CpuFeatures::Scope sse4_scope(SSE4_1);
2204      __ pextrd(Operand(edx), xmm1, 0x1);  // copy xmm1[63..32] to edx.
2205    } else {
2206      __ pshufd(xmm0, xmm1, 0x1);
2207      __ movd(Operand(edx), xmm0);
2208    }
2209    __ movd(Operand(ebx), xmm1);
2210  }
2211
2212  // ST[0] or xmm1  == double value
2213  // ebx = low 32 bits of double value
2214  // edx = high 32 bits of double value
2215  // Compute hash (the shifts are arithmetic):
2216  //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
2217  __ mov(ecx, ebx);
2218  __ xor_(ecx, Operand(edx));
2219  __ mov(eax, ecx);
2220  __ sar(eax, 16);
2221  __ xor_(ecx, Operand(eax));
2222  __ mov(eax, ecx);
2223  __ sar(eax, 8);
2224  __ xor_(ecx, Operand(eax));
2225  ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
2226  __ and_(Operand(ecx),
2227          Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
2228
2229  // ST[0] or xmm1 == double value.
2230  // ebx = low 32 bits of double value.
2231  // edx = high 32 bits of double value.
2232  // ecx = TranscendentalCache::hash(double value).
2233  ExternalReference cache_array =
2234      ExternalReference::transcendental_cache_array_address(masm->isolate());
2235  __ mov(eax, Immediate(cache_array));
2236  int cache_array_index =
2237      type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
2238  __ mov(eax, Operand(eax, cache_array_index));
2239  // Eax points to the cache for the type type_.
2240  // If NULL, the cache hasn't been initialized yet, so go through runtime.
2241  __ test(eax, Operand(eax));
2242  __ j(zero, &runtime_call_clear_stack);
2243#ifdef DEBUG
2244  // Check that the layout of cache elements match expectations.
2245  { TranscendentalCache::SubCache::Element test_elem[2];
2246    char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
2247    char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
2248    char* elem_in0  = reinterpret_cast<char*>(&(test_elem[0].in[0]));
2249    char* elem_in1  = reinterpret_cast<char*>(&(test_elem[0].in[1]));
2250    char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
2251    CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
2252    CHECK_EQ(0, elem_in0 - elem_start);
2253    CHECK_EQ(kIntSize, elem_in1 - elem_start);
2254    CHECK_EQ(2 * kIntSize, elem_out - elem_start);
2255  }
2256#endif
2257  // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
2258  __ lea(ecx, Operand(ecx, ecx, times_2, 0));
2259  __ lea(ecx, Operand(eax, ecx, times_4, 0));
2260  // Check if cache matches: Double value is stored in uint32_t[2] array.
2261  Label cache_miss;
2262  __ cmp(ebx, Operand(ecx, 0));
2263  __ j(not_equal, &cache_miss, Label::kNear);
2264  __ cmp(edx, Operand(ecx, kIntSize));
2265  __ j(not_equal, &cache_miss, Label::kNear);
2266  // Cache hit!
2267  __ mov(eax, Operand(ecx, 2 * kIntSize));
2268  if (tagged) {
2269    __ fstp(0);
2270    __ ret(kPointerSize);
2271  } else {  // UNTAGGED.
2272    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2273    __ Ret();
2274  }
2275
2276  __ bind(&cache_miss);
2277  // Update cache with new value.
2278  // We are short on registers, so use no_reg as scratch.
2279  // This gives slightly larger code.
2280  if (tagged) {
2281    __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
2282  } else {  // UNTAGGED.
2283    __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2284    __ sub(Operand(esp), Immediate(kDoubleSize));
2285    __ movdbl(Operand(esp, 0), xmm1);
2286    __ fld_d(Operand(esp, 0));
2287    __ add(Operand(esp), Immediate(kDoubleSize));
2288  }
2289  GenerateOperation(masm);
2290  __ mov(Operand(ecx, 0), ebx);
2291  __ mov(Operand(ecx, kIntSize), edx);
2292  __ mov(Operand(ecx, 2 * kIntSize), eax);
2293  __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2294  if (tagged) {
2295    __ ret(kPointerSize);
2296  } else {  // UNTAGGED.
2297    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2298    __ Ret();
2299
2300    // Skip cache and return answer directly, only in untagged case.
2301    __ bind(&skip_cache);
2302    __ sub(Operand(esp), Immediate(kDoubleSize));
2303    __ movdbl(Operand(esp, 0), xmm1);
2304    __ fld_d(Operand(esp, 0));
2305    GenerateOperation(masm);
2306    __ fstp_d(Operand(esp, 0));
2307    __ movdbl(xmm1, Operand(esp, 0));
2308    __ add(Operand(esp), Immediate(kDoubleSize));
2309    // We return the value in xmm1 without adding it to the cache, but
2310    // we cause a scavenging GC so that future allocations will succeed.
2311    __ EnterInternalFrame();
2312    // Allocate an unused object bigger than a HeapNumber.
2313    __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
2314    __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
2315    __ LeaveInternalFrame();
2316    __ Ret();
2317  }
2318
2319  // Call runtime, doing whatever allocation and cleanup is necessary.
2320  if (tagged) {
2321    __ bind(&runtime_call_clear_stack);
2322    __ fstp(0);
2323    __ bind(&runtime_call);
2324    ExternalReference runtime =
2325        ExternalReference(RuntimeFunction(), masm->isolate());
2326    __ TailCallExternalReference(runtime, 1, 1);
2327  } else {  // UNTAGGED.
2328    __ bind(&runtime_call_clear_stack);
2329    __ bind(&runtime_call);
2330    __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2331    __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
2332    __ EnterInternalFrame();
2333    __ push(eax);
2334    __ CallRuntime(RuntimeFunction(), 1);
2335    __ LeaveInternalFrame();
2336    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2337    __ Ret();
2338  }
2339}
2340
2341
2342Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
2343  switch (type_) {
2344    case TranscendentalCache::SIN: return Runtime::kMath_sin;
2345    case TranscendentalCache::COS: return Runtime::kMath_cos;
2346    case TranscendentalCache::LOG: return Runtime::kMath_log;
2347    default:
2348      UNIMPLEMENTED();
2349      return Runtime::kAbort;
2350  }
2351}
2352
2353
2354void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
2355  // Only free register is edi.
2356  // Input value is on FP stack, and also in ebx/edx.
2357  // Input value is possibly in xmm1.
2358  // Address of result (a newly allocated HeapNumber) may be in eax.
2359  if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
2360    // Both fsin and fcos require arguments in the range +/-2^63 and
2361    // return NaN for infinities and NaN. They can share all code except
2362    // the actual fsin/fcos operation.
2363    Label in_range, done;
2364    // If argument is outside the range -2^63..2^63, fsin/cos doesn't
2365    // work. We must reduce it to the appropriate range.
2366    __ mov(edi, edx);
2367    __ and_(Operand(edi), Immediate(0x7ff00000));  // Exponent only.
2368    int supported_exponent_limit =
2369        (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
2370    __ cmp(Operand(edi), Immediate(supported_exponent_limit));
2371    __ j(below, &in_range, Label::kNear);
2372    // Check for infinity and NaN. Both return NaN for sin.
2373    __ cmp(Operand(edi), Immediate(0x7ff00000));
2374    Label non_nan_result;
2375    __ j(not_equal, &non_nan_result, Label::kNear);
2376    // Input is +/-Infinity or NaN. Result is NaN.
2377    __ fstp(0);
2378    // NaN is represented by 0x7ff8000000000000.
2379    __ push(Immediate(0x7ff80000));
2380    __ push(Immediate(0));
2381    __ fld_d(Operand(esp, 0));
2382    __ add(Operand(esp), Immediate(2 * kPointerSize));
2383    __ jmp(&done, Label::kNear);
2384
2385    __ bind(&non_nan_result);
2386
2387    // Use fpmod to restrict argument to the range +/-2*PI.
2388    __ mov(edi, eax);  // Save eax before using fnstsw_ax.
2389    __ fldpi();
2390    __ fadd(0);
2391    __ fld(1);
2392    // FPU Stack: input, 2*pi, input.
2393    {
2394      Label no_exceptions;
2395      __ fwait();
2396      __ fnstsw_ax();
2397      // Clear if Illegal Operand or Zero Division exceptions are set.
2398      __ test(Operand(eax), Immediate(5));
2399      __ j(zero, &no_exceptions, Label::kNear);
2400      __ fnclex();
2401      __ bind(&no_exceptions);
2402    }
2403
2404    // Compute st(0) % st(1)
2405    {
2406      Label partial_remainder_loop;
2407      __ bind(&partial_remainder_loop);
2408      __ fprem1();
2409      __ fwait();
2410      __ fnstsw_ax();
2411      __ test(Operand(eax), Immediate(0x400 /* C2 */));
2412      // If C2 is set, computation only has partial result. Loop to
2413      // continue computation.
2414      __ j(not_zero, &partial_remainder_loop);
2415    }
2416    // FPU Stack: input, 2*pi, input % 2*pi
2417    __ fstp(2);
2418    __ fstp(0);
2419    __ mov(eax, edi);  // Restore eax (allocated HeapNumber pointer).
2420
2421    // FPU Stack: input % 2*pi
2422    __ bind(&in_range);
2423    switch (type_) {
2424      case TranscendentalCache::SIN:
2425        __ fsin();
2426        break;
2427      case TranscendentalCache::COS:
2428        __ fcos();
2429        break;
2430      default:
2431        UNREACHABLE();
2432    }
2433    __ bind(&done);
2434  } else {
2435    ASSERT(type_ == TranscendentalCache::LOG);
2436    __ fldln2();
2437    __ fxch();
2438    __ fyl2x();
2439  }
2440}
2441
2442
2443// Input: edx, eax are the left and right objects of a bit op.
2444// Output: eax, ecx are left and right integers for a bit op.
2445void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
2446                                                 bool use_sse3,
2447                                                 Label* conversion_failure) {
2448  // Check float operands.
2449  Label arg1_is_object, check_undefined_arg1;
2450  Label arg2_is_object, check_undefined_arg2;
2451  Label load_arg2, done;
2452
2453  // Test if arg1 is a Smi.
2454  __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
2455
2456  __ SmiUntag(edx);
2457  __ jmp(&load_arg2);
2458
2459  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2460  __ bind(&check_undefined_arg1);
2461  Factory* factory = masm->isolate()->factory();
2462  __ cmp(edx, factory->undefined_value());
2463  __ j(not_equal, conversion_failure);
2464  __ mov(edx, Immediate(0));
2465  __ jmp(&load_arg2);
2466
2467  __ bind(&arg1_is_object);
2468  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
2469  __ cmp(ebx, factory->heap_number_map());
2470  __ j(not_equal, &check_undefined_arg1);
2471
2472  // Get the untagged integer version of the edx heap number in ecx.
2473  IntegerConvert(masm, edx, use_sse3, conversion_failure);
2474  __ mov(edx, ecx);
2475
2476  // Here edx has the untagged integer, eax has a Smi or a heap number.
2477  __ bind(&load_arg2);
2478
2479  // Test if arg2 is a Smi.
2480  __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
2481
2482  __ SmiUntag(eax);
2483  __ mov(ecx, eax);
2484  __ jmp(&done);
2485
2486  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2487  __ bind(&check_undefined_arg2);
2488  __ cmp(eax, factory->undefined_value());
2489  __ j(not_equal, conversion_failure);
2490  __ mov(ecx, Immediate(0));
2491  __ jmp(&done);
2492
2493  __ bind(&arg2_is_object);
2494  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
2495  __ cmp(ebx, factory->heap_number_map());
2496  __ j(not_equal, &check_undefined_arg2);
2497
2498  // Get the untagged integer version of the eax heap number in ecx.
2499  IntegerConvert(masm, eax, use_sse3, conversion_failure);
2500  __ bind(&done);
2501  __ mov(eax, edx);
2502}
2503
2504
2505void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
2506                                                       bool use_sse3,
2507                                                       Label* not_int32) {
2508  return;
2509}
2510
2511
2512void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
2513                                           Register number) {
2514  Label load_smi, done;
2515
2516  __ JumpIfSmi(number, &load_smi, Label::kNear);
2517  __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
2518  __ jmp(&done, Label::kNear);
2519
2520  __ bind(&load_smi);
2521  __ SmiUntag(number);
2522  __ push(number);
2523  __ fild_s(Operand(esp, 0));
2524  __ pop(number);
2525
2526  __ bind(&done);
2527}
2528
2529
2530void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
2531  Label load_smi_edx, load_eax, load_smi_eax, done;
2532  // Load operand in edx into xmm0.
2533  __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
2534  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
2535
2536  __ bind(&load_eax);
2537  // Load operand in eax into xmm1.
2538  __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
2539  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2540  __ jmp(&done, Label::kNear);
2541
2542  __ bind(&load_smi_edx);
2543  __ SmiUntag(edx);  // Untag smi before converting to float.
2544  __ cvtsi2sd(xmm0, Operand(edx));
2545  __ SmiTag(edx);  // Retag smi for heap number overwriting test.
2546  __ jmp(&load_eax);
2547
2548  __ bind(&load_smi_eax);
2549  __ SmiUntag(eax);  // Untag smi before converting to float.
2550  __ cvtsi2sd(xmm1, Operand(eax));
2551  __ SmiTag(eax);  // Retag smi for heap number overwriting test.
2552
2553  __ bind(&done);
2554}
2555
2556
2557void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
2558                                           Label* not_numbers) {
2559  Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
2560  // Load operand in edx into xmm0, or branch to not_numbers.
2561  __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
2562  Factory* factory = masm->isolate()->factory();
2563  __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
2564  __ j(not_equal, not_numbers);  // Argument in edx is not a number.
2565  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
2566  __ bind(&load_eax);
2567  // Load operand in eax into xmm1, or branch to not_numbers.
2568  __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
2569  __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
2570  __ j(equal, &load_float_eax, Label::kNear);
2571  __ jmp(not_numbers);  // Argument in eax is not a number.
2572  __ bind(&load_smi_edx);
2573  __ SmiUntag(edx);  // Untag smi before converting to float.
2574  __ cvtsi2sd(xmm0, Operand(edx));
2575  __ SmiTag(edx);  // Retag smi for heap number overwriting test.
2576  __ jmp(&load_eax);
2577  __ bind(&load_smi_eax);
2578  __ SmiUntag(eax);  // Untag smi before converting to float.
2579  __ cvtsi2sd(xmm1, Operand(eax));
2580  __ SmiTag(eax);  // Retag smi for heap number overwriting test.
2581  __ jmp(&done, Label::kNear);
2582  __ bind(&load_float_eax);
2583  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2584  __ bind(&done);
2585}
2586
2587
2588void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
2589                                       Register scratch) {
2590  const Register left = edx;
2591  const Register right = eax;
2592  __ mov(scratch, left);
2593  ASSERT(!scratch.is(right));  // We're about to clobber scratch.
2594  __ SmiUntag(scratch);
2595  __ cvtsi2sd(xmm0, Operand(scratch));
2596
2597  __ mov(scratch, right);
2598  __ SmiUntag(scratch);
2599  __ cvtsi2sd(xmm1, Operand(scratch));
2600}
2601
2602
2603void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
2604                                                    Label* non_int32,
2605                                                    Register scratch) {
2606  __ cvttsd2si(scratch, Operand(xmm0));
2607  __ cvtsi2sd(xmm2, Operand(scratch));
2608  __ ucomisd(xmm0, xmm2);
2609  __ j(not_zero, non_int32);
2610  __ j(carry, non_int32);
2611  __ cvttsd2si(scratch, Operand(xmm1));
2612  __ cvtsi2sd(xmm2, Operand(scratch));
2613  __ ucomisd(xmm1, xmm2);
2614  __ j(not_zero, non_int32);
2615  __ j(carry, non_int32);
2616}
2617
2618
2619void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
2620                                            Register scratch,
2621                                            ArgLocation arg_location) {
2622  Label load_smi_1, load_smi_2, done_load_1, done;
2623  if (arg_location == ARGS_IN_REGISTERS) {
2624    __ mov(scratch, edx);
2625  } else {
2626    __ mov(scratch, Operand(esp, 2 * kPointerSize));
2627  }
2628  __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
2629  __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2630  __ bind(&done_load_1);
2631
2632  if (arg_location == ARGS_IN_REGISTERS) {
2633    __ mov(scratch, eax);
2634  } else {
2635    __ mov(scratch, Operand(esp, 1 * kPointerSize));
2636  }
2637  __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
2638  __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2639  __ jmp(&done, Label::kNear);
2640
2641  __ bind(&load_smi_1);
2642  __ SmiUntag(scratch);
2643  __ push(scratch);
2644  __ fild_s(Operand(esp, 0));
2645  __ pop(scratch);
2646  __ jmp(&done_load_1);
2647
2648  __ bind(&load_smi_2);
2649  __ SmiUntag(scratch);
2650  __ push(scratch);
2651  __ fild_s(Operand(esp, 0));
2652  __ pop(scratch);
2653
2654  __ bind(&done);
2655}
2656
2657
2658void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
2659                                        Register scratch) {
2660  const Register left = edx;
2661  const Register right = eax;
2662  __ mov(scratch, left);
2663  ASSERT(!scratch.is(right));  // We're about to clobber scratch.
2664  __ SmiUntag(scratch);
2665  __ push(scratch);
2666  __ fild_s(Operand(esp, 0));
2667
2668  __ mov(scratch, right);
2669  __ SmiUntag(scratch);
2670  __ mov(Operand(esp, 0), scratch);
2671  __ fild_s(Operand(esp, 0));
2672  __ pop(scratch);
2673}
2674
2675
2676void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
2677                                             Label* non_float,
2678                                             Register scratch) {
2679  Label test_other, done;
2680  // Test if both operands are floats or smi -> scratch=k_is_float;
2681  // Otherwise scratch = k_not_float.
2682  __ JumpIfSmi(edx, &test_other, Label::kNear);
2683  __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
2684  Factory* factory = masm->isolate()->factory();
2685  __ cmp(scratch, factory->heap_number_map());
2686  __ j(not_equal, non_float);  // argument in edx is not a number -> NaN
2687
2688  __ bind(&test_other);
2689  __ JumpIfSmi(eax, &done, Label::kNear);
2690  __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
2691  __ cmp(scratch, factory->heap_number_map());
2692  __ j(not_equal, non_float);  // argument in eax is not a number -> NaN
2693
2694  // Fall-through: Both operands are numbers.
2695  __ bind(&done);
2696}
2697
2698
2699void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
2700                                                     Label* non_int32) {
2701  return;
2702}
2703
2704
2705void MathPowStub::Generate(MacroAssembler* masm) {
2706  // Registers are used as follows:
2707  // edx = base
2708  // eax = exponent
2709  // ecx = temporary, result
2710
2711  CpuFeatures::Scope use_sse2(SSE2);
2712  Label allocate_return, call_runtime;
2713
2714  // Load input parameters.
2715  __ mov(edx, Operand(esp, 2 * kPointerSize));
2716  __ mov(eax, Operand(esp, 1 * kPointerSize));
2717
2718  // Save 1 in xmm3 - we need this several times later on.
2719  __ mov(ecx, Immediate(1));
2720  __ cvtsi2sd(xmm3, Operand(ecx));
2721
2722  Label exponent_nonsmi;
2723  Label base_nonsmi;
2724  // If the exponent is a heap number go to that specific case.
2725  __ JumpIfNotSmi(eax, &exponent_nonsmi);
2726  __ JumpIfNotSmi(edx, &base_nonsmi);
2727
2728  // Optimized version when both exponent and base are smis.
2729  Label powi;
2730  __ SmiUntag(edx);
2731  __ cvtsi2sd(xmm0, Operand(edx));
2732  __ jmp(&powi);
2733  // exponent is smi and base is a heapnumber.
2734  __ bind(&base_nonsmi);
2735  Factory* factory = masm->isolate()->factory();
2736  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
2737         factory->heap_number_map());
2738  __ j(not_equal, &call_runtime);
2739
2740  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
2741
2742  // Optimized version of pow if exponent is a smi.
2743  // xmm0 contains the base.
2744  __ bind(&powi);
2745  __ SmiUntag(eax);
2746
2747  // Save exponent in base as we need to check if exponent is negative later.
2748  // We know that base and exponent are in different registers.
2749  __ mov(edx, eax);
2750
2751  // Get absolute value of exponent.
2752  Label no_neg;
2753  __ cmp(eax, 0);
2754  __ j(greater_equal, &no_neg, Label::kNear);
2755  __ neg(eax);
2756  __ bind(&no_neg);
2757
2758  // Load xmm1 with 1.
2759  __ movsd(xmm1, xmm3);
2760  Label while_true;
2761  Label no_multiply;
2762
2763  __ bind(&while_true);
2764  __ shr(eax, 1);
2765  __ j(not_carry, &no_multiply, Label::kNear);
2766  __ mulsd(xmm1, xmm0);
2767  __ bind(&no_multiply);
2768  __ mulsd(xmm0, xmm0);
2769  __ j(not_zero, &while_true);
2770
2771  // base has the original value of the exponent - if the exponent  is
2772  // negative return 1/result.
2773  __ test(edx, Operand(edx));
2774  __ j(positive, &allocate_return);
2775  // Special case if xmm1 has reached infinity.
2776  __ mov(ecx, Immediate(0x7FB00000));
2777  __ movd(xmm0, Operand(ecx));
2778  __ cvtss2sd(xmm0, xmm0);
2779  __ ucomisd(xmm0, xmm1);
2780  __ j(equal, &call_runtime);
2781  __ divsd(xmm3, xmm1);
2782  __ movsd(xmm1, xmm3);
2783  __ jmp(&allocate_return);
2784
2785  // exponent (or both) is a heapnumber - no matter what we should now work
2786  // on doubles.
2787  __ bind(&exponent_nonsmi);
2788  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
2789         factory->heap_number_map());
2790  __ j(not_equal, &call_runtime);
2791  __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2792  // Test if exponent is nan.
2793  __ ucomisd(xmm1, xmm1);
2794  __ j(parity_even, &call_runtime);
2795
2796  Label base_not_smi;
2797  Label handle_special_cases;
2798  __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
2799  __ SmiUntag(edx);
2800  __ cvtsi2sd(xmm0, Operand(edx));
2801  __ jmp(&handle_special_cases, Label::kNear);
2802
2803  __ bind(&base_not_smi);
2804  __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
2805         factory->heap_number_map());
2806  __ j(not_equal, &call_runtime);
2807  __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
2808  __ and_(ecx, HeapNumber::kExponentMask);
2809  __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
2810  // base is NaN or +/-Infinity
2811  __ j(greater_equal, &call_runtime);
2812  __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
2813
2814  // base is in xmm0 and exponent is in xmm1.
2815  __ bind(&handle_special_cases);
2816  Label not_minus_half;
2817  // Test for -0.5.
2818  // Load xmm2 with -0.5.
2819  __ mov(ecx, Immediate(0xBF000000));
2820  __ movd(xmm2, Operand(ecx));
2821  __ cvtss2sd(xmm2, xmm2);
2822  // xmm2 now has -0.5.
2823  __ ucomisd(xmm2, xmm1);
2824  __ j(not_equal, &not_minus_half, Label::kNear);
2825
2826  // Calculates reciprocal of square root.
2827  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
2828  __ xorps(xmm1, xmm1);
2829  __ addsd(xmm1, xmm0);
2830  __ sqrtsd(xmm1, xmm1);
2831  __ divsd(xmm3, xmm1);
2832  __ movsd(xmm1, xmm3);
2833  __ jmp(&allocate_return);
2834
2835  // Test for 0.5.
2836  __ bind(&not_minus_half);
2837  // Load xmm2 with 0.5.
2838  // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
2839  __ addsd(xmm2, xmm3);
2840  // xmm2 now has 0.5.
2841  __ ucomisd(xmm2, xmm1);
2842  __ j(not_equal, &call_runtime);
2843  // Calculates square root.
2844  // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
2845  __ xorps(xmm1, xmm1);
2846  __ addsd(xmm1, xmm0);
2847  __ sqrtsd(xmm1, xmm1);
2848
2849  __ bind(&allocate_return);
2850  __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
2851  __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
2852  __ mov(eax, ecx);
2853  __ ret(2 * kPointerSize);
2854
2855  __ bind(&call_runtime);
2856  __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
2857}
2858
2859
2860void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2861  // The key is in edx and the parameter count is in eax.
2862
2863  // The displacement is used for skipping the frame pointer on the
2864  // stack. It is the offset of the last parameter (if any) relative
2865  // to the frame pointer.
2866  static const int kDisplacement = 1 * kPointerSize;
2867
2868  // Check that the key is a smi.
2869  Label slow;
2870  __ JumpIfNotSmi(edx, &slow, Label::kNear);
2871
2872  // Check if the calling frame is an arguments adaptor frame.
2873  Label adaptor;
2874  __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2875  __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
2876  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2877  __ j(equal, &adaptor, Label::kNear);
2878
2879  // Check index against formal parameters count limit passed in
2880  // through register eax. Use unsigned comparison to get negative
2881  // check for free.
2882  __ cmp(edx, Operand(eax));
2883  __ j(above_equal, &slow, Label::kNear);
2884
2885  // Read the argument from the stack and return it.
2886  STATIC_ASSERT(kSmiTagSize == 1);
2887  STATIC_ASSERT(kSmiTag == 0);  // Shifting code depends on these.
2888  __ lea(ebx, Operand(ebp, eax, times_2, 0));
2889  __ neg(edx);
2890  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
2891  __ ret(0);
2892
2893  // Arguments adaptor case: Check index against actual arguments
2894  // limit found in the arguments adaptor frame. Use unsigned
2895  // comparison to get negative check for free.
2896  __ bind(&adaptor);
2897  __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2898  __ cmp(edx, Operand(ecx));
2899  __ j(above_equal, &slow, Label::kNear);
2900
2901  // Read the argument from the stack and return it.
2902  STATIC_ASSERT(kSmiTagSize == 1);
2903  STATIC_ASSERT(kSmiTag == 0);  // Shifting code depends on these.
2904  __ lea(ebx, Operand(ebx, ecx, times_2, 0));
2905  __ neg(edx);
2906  __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
2907  __ ret(0);
2908
2909  // Slow-case: Handle non-smi or out-of-bounds access to arguments
2910  // by calling the runtime system.
2911  __ bind(&slow);
2912  __ pop(ebx);  // Return address.
2913  __ push(edx);
2914  __ push(ebx);
2915  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2916}
2917
2918
2919void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
2920  // esp[0] : return address
2921  // esp[4] : number of parameters
2922  // esp[8] : receiver displacement
2923  // esp[12] : function
2924
2925  // Check if the calling frame is an arguments adaptor frame.
2926  Label runtime;
2927  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2928  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
2929  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2930  __ j(not_equal, &runtime, Label::kNear);
2931
2932  // Patch the arguments.length and the parameters pointer.
2933  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2934  __ mov(Operand(esp, 1 * kPointerSize), ecx);
2935  __ lea(edx, Operand(edx, ecx, times_2,
2936              StandardFrameConstants::kCallerSPOffset));
2937  __ mov(Operand(esp, 2 * kPointerSize), edx);
2938
2939  __ bind(&runtime);
2940  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2941}
2942
2943
2944void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
2945  // esp[0] : return address
2946  // esp[4] : number of parameters (tagged)
2947  // esp[8] : receiver displacement
2948  // esp[12] : function
2949
2950  // ebx = parameter count (tagged)
2951  __ mov(ebx, Operand(esp, 1 * kPointerSize));
2952
2953  // Check if the calling frame is an arguments adaptor frame.
2954  // TODO(rossberg): Factor out some of the bits that are shared with the other
2955  // Generate* functions.
2956  Label runtime;
2957  Label adaptor_frame, try_allocate;
2958  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2959  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
2960  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2961  __ j(equal, &adaptor_frame, Label::kNear);
2962
2963  // No adaptor, parameter count = argument count.
2964  __ mov(ecx, ebx);
2965  __ jmp(&try_allocate, Label::kNear);
2966
2967  // We have an adaptor frame. Patch the parameters pointer.
2968  __ bind(&adaptor_frame);
2969  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2970  __ lea(edx, Operand(edx, ecx, times_2,
2971                      StandardFrameConstants::kCallerSPOffset));
2972  __ mov(Operand(esp, 2 * kPointerSize), edx);
2973
2974  // ebx = parameter count (tagged)
2975  // ecx = argument count (tagged)
2976  // esp[4] = parameter count (tagged)
2977  // esp[8] = address of receiver argument
2978  // Compute the mapped parameter count = min(ebx, ecx) in ebx.
2979  __ cmp(ebx, Operand(ecx));
2980  __ j(less_equal, &try_allocate, Label::kNear);
2981  __ mov(ebx, ecx);
2982
2983  __ bind(&try_allocate);
2984
2985  // Save mapped parameter count.
2986  __ push(ebx);
2987
2988  // Compute the sizes of backing store, parameter map, and arguments object.
2989  // 1. Parameter map, has 2 extra words containing context and backing store.
2990  const int kParameterMapHeaderSize =
2991      FixedArray::kHeaderSize + 2 * kPointerSize;
2992  Label no_parameter_map;
2993  __ test(ebx, Operand(ebx));
2994  __ j(zero, &no_parameter_map, Label::kNear);
2995  __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
2996  __ bind(&no_parameter_map);
2997
2998  // 2. Backing store.
2999  __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
3000
3001  // 3. Arguments object.
3002  __ add(Operand(ebx), Immediate(Heap::kArgumentsObjectSize));
3003
3004  // Do the allocation of all three objects in one go.
3005  __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
3006
3007  // eax = address of new object(s) (tagged)
3008  // ecx = argument count (tagged)
3009  // esp[0] = mapped parameter count (tagged)
3010  // esp[8] = parameter count (tagged)
3011  // esp[12] = address of receiver argument
3012  // Get the arguments boilerplate from the current (global) context into edi.
3013  Label has_mapped_parameters, copy;
3014  __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
3015  __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
3016  __ mov(ebx, Operand(esp, 0 * kPointerSize));
3017  __ test(ebx, Operand(ebx));
3018  __ j(not_zero, &has_mapped_parameters, Label::kNear);
3019  __ mov(edi, Operand(edi,
3020         Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
3021  __ jmp(&copy, Label::kNear);
3022
3023  __ bind(&has_mapped_parameters);
3024  __ mov(edi, Operand(edi,
3025            Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)));
3026  __ bind(&copy);
3027
3028  // eax = address of new object (tagged)
3029  // ebx = mapped parameter count (tagged)
3030  // ecx = argument count (tagged)
3031  // edi = address of boilerplate object (tagged)
3032  // esp[0] = mapped parameter count (tagged)
3033  // esp[8] = parameter count (tagged)
3034  // esp[12] = address of receiver argument
3035  // Copy the JS object part.
3036  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
3037    __ mov(edx, FieldOperand(edi, i));
3038    __ mov(FieldOperand(eax, i), edx);
3039  }
3040
3041  // Setup the callee in-object property.
3042  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
3043  __ mov(edx, Operand(esp, 4 * kPointerSize));
3044  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3045                      Heap::kArgumentsCalleeIndex * kPointerSize),
3046         edx);
3047
3048  // Use the length (smi tagged) and set that as an in-object property too.
3049  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
3050  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3051                      Heap::kArgumentsLengthIndex * kPointerSize),
3052         ecx);
3053
3054  // Setup the elements pointer in the allocated arguments object.
3055  // If we allocated a parameter map, edi will point there, otherwise to the
3056  // backing store.
3057  __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
3058  __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
3059
3060  // eax = address of new object (tagged)
3061  // ebx = mapped parameter count (tagged)
3062  // ecx = argument count (tagged)
3063  // edi = address of parameter map or backing store (tagged)
3064  // esp[0] = mapped parameter count (tagged)
3065  // esp[8] = parameter count (tagged)
3066  // esp[12] = address of receiver argument
3067  // Free a register.
3068  __ push(eax);
3069
3070  // Initialize parameter map. If there are no mapped arguments, we're done.
3071  Label skip_parameter_map;
3072  __ test(ebx, Operand(ebx));
3073  __ j(zero, &skip_parameter_map);
3074
3075  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
3076         Immediate(FACTORY->non_strict_arguments_elements_map()));
3077  __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
3078  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
3079  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
3080  __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
3081  __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
3082
3083  // Copy the parameter slots and the holes in the arguments.
3084  // We need to fill in mapped_parameter_count slots. They index the context,
3085  // where parameters are stored in reverse order, at
3086  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
3087  // The mapped parameter thus need to get indices
3088  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
3089  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
3090  // We loop from right to left.
3091  Label parameters_loop, parameters_test;
3092  __ push(ecx);
3093  __ mov(eax, Operand(esp, 2 * kPointerSize));
3094  __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
3095  __ add(ebx, Operand(esp, 4 * kPointerSize));
3096  __ sub(ebx, Operand(eax));
3097  __ mov(ecx, FACTORY->the_hole_value());
3098  __ mov(edx, edi);
3099  __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
3100  // eax = loop variable (tagged)
3101  // ebx = mapping index (tagged)
3102  // ecx = the hole value
3103  // edx = address of parameter map (tagged)
3104  // edi = address of backing store (tagged)
3105  // esp[0] = argument count (tagged)
3106  // esp[4] = address of new object (tagged)
3107  // esp[8] = mapped parameter count (tagged)
3108  // esp[16] = parameter count (tagged)
3109  // esp[20] = address of receiver argument
3110  __ jmp(&parameters_test, Label::kNear);
3111
3112  __ bind(&parameters_loop);
3113  __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
3114  __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
3115  __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
3116  __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
3117  __ bind(&parameters_test);
3118  __ test(eax, Operand(eax));
3119  __ j(not_zero, &parameters_loop, Label::kNear);
3120  __ pop(ecx);
3121
3122  __ bind(&skip_parameter_map);
3123
3124  // ecx = argument count (tagged)
3125  // edi = address of backing store (tagged)
3126  // esp[0] = address of new object (tagged)
3127  // esp[4] = mapped parameter count (tagged)
3128  // esp[12] = parameter count (tagged)
3129  // esp[16] = address of receiver argument
3130  // Copy arguments header and remaining slots (if there are any).
3131  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
3132         Immediate(FACTORY->fixed_array_map()));
3133  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
3134
3135  Label arguments_loop, arguments_test;
3136  __ mov(ebx, Operand(esp, 1 * kPointerSize));
3137  __ mov(edx, Operand(esp, 4 * kPointerSize));
3138  __ sub(Operand(edx), ebx);  // Is there a smarter way to do negative scaling?
3139  __ sub(Operand(edx), ebx);
3140  __ jmp(&arguments_test, Label::kNear);
3141
3142  __ bind(&arguments_loop);
3143  __ sub(Operand(edx), Immediate(kPointerSize));
3144  __ mov(eax, Operand(edx, 0));
3145  __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
3146  __ add(Operand(ebx), Immediate(Smi::FromInt(1)));
3147
3148  __ bind(&arguments_test);
3149  __ cmp(ebx, Operand(ecx));
3150  __ j(less, &arguments_loop, Label::kNear);
3151
3152  // Restore.
3153  __ pop(eax);  // Address of arguments object.
3154  __ pop(ebx);  // Parameter count.
3155
3156  // Return and remove the on-stack parameters.
3157  __ ret(3 * kPointerSize);
3158
3159  // Do the runtime call to allocate the arguments object.
3160  __ bind(&runtime);
3161  __ pop(eax);  // Remove saved parameter count.
3162  __ mov(Operand(esp, 1 * kPointerSize), ecx);  // Patch argument count.
3163  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
3164}
3165
3166
3167void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
3168  // esp[0] : return address
3169  // esp[4] : number of parameters
3170  // esp[8] : receiver displacement
3171  // esp[12] : function
3172
3173  // Check if the calling frame is an arguments adaptor frame.
3174  Label adaptor_frame, try_allocate, runtime;
3175  __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3176  __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
3177  __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3178  __ j(equal, &adaptor_frame, Label::kNear);
3179
3180  // Get the length from the frame.
3181  __ mov(ecx, Operand(esp, 1 * kPointerSize));
3182  __ jmp(&try_allocate, Label::kNear);
3183
3184  // Patch the arguments.length and the parameters pointer.
3185  __ bind(&adaptor_frame);
3186  __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
3187  __ mov(Operand(esp, 1 * kPointerSize), ecx);
3188  __ lea(edx, Operand(edx, ecx, times_2,
3189                      StandardFrameConstants::kCallerSPOffset));
3190  __ mov(Operand(esp, 2 * kPointerSize), edx);
3191
3192  // Try the new space allocation. Start out with computing the size of
3193  // the arguments object and the elements array.
3194  Label add_arguments_object;
3195  __ bind(&try_allocate);
3196  __ test(ecx, Operand(ecx));
3197  __ j(zero, &add_arguments_object, Label::kNear);
3198  __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
3199  __ bind(&add_arguments_object);
3200  __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSizeStrict));
3201
3202  // Do the allocation of both objects in one go.
3203  __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
3204
3205  // Get the arguments boilerplate from the current (global) context.
3206  __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
3207  __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
3208  const int offset =
3209      Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
3210  __ mov(edi, Operand(edi, offset));
3211
3212  // Copy the JS object part.
3213  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
3214    __ mov(ebx, FieldOperand(edi, i));
3215    __ mov(FieldOperand(eax, i), ebx);
3216  }
3217
3218  // Get the length (smi tagged) and set that as an in-object property too.
3219  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
3220  __ mov(ecx, Operand(esp, 1 * kPointerSize));
3221  __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3222                      Heap::kArgumentsLengthIndex * kPointerSize),
3223         ecx);
3224
3225  // If there are no actual arguments, we're done.
3226  Label done;
3227  __ test(ecx, Operand(ecx));
3228  __ j(zero, &done, Label::kNear);
3229
3230  // Get the parameters pointer from the stack.
3231  __ mov(edx, Operand(esp, 2 * kPointerSize));
3232
3233  // Setup the elements pointer in the allocated arguments object and
3234  // initialize the header in the elements fixed array.
3235  __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
3236  __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
3237  __ mov(FieldOperand(edi, FixedArray::kMapOffset),
3238         Immediate(FACTORY->fixed_array_map()));
3239
3240  __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
3241  // Untag the length for the loop below.
3242  __ SmiUntag(ecx);
3243
3244  // Copy the fixed array slots.
3245  Label loop;
3246  __ bind(&loop);
3247  __ mov(ebx, Operand(edx, -1 * kPointerSize));  // Skip receiver.
3248  __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
3249  __ add(Operand(edi), Immediate(kPointerSize));
3250  __ sub(Operand(edx), Immediate(kPointerSize));
3251  __ dec(ecx);
3252  __ j(not_zero, &loop);
3253
3254  // Return and remove the on-stack parameters.
3255  __ bind(&done);
3256  __ ret(3 * kPointerSize);
3257
3258  // Do the runtime call to allocate the arguments object.
3259  __ bind(&runtime);
3260  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
3261}
3262
3263
3264void RegExpExecStub::Generate(MacroAssembler* masm) {
3265  // Just jump directly to runtime if native RegExp is not selected at compile
3266  // time or if regexp entry in generated code is turned off runtime switch or
3267  // at compilation.
3268#ifdef V8_INTERPRETED_REGEXP
3269  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3270#else  // V8_INTERPRETED_REGEXP
3271  if (!FLAG_regexp_entry_native) {
3272    __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3273    return;
3274  }
3275
3276  // Stack frame on entry.
3277  //  esp[0]: return address
3278  //  esp[4]: last_match_info (expected JSArray)
3279  //  esp[8]: previous index
3280  //  esp[12]: subject string
3281  //  esp[16]: JSRegExp object
3282
3283  static const int kLastMatchInfoOffset = 1 * kPointerSize;
3284  static const int kPreviousIndexOffset = 2 * kPointerSize;
3285  static const int kSubjectOffset = 3 * kPointerSize;
3286  static const int kJSRegExpOffset = 4 * kPointerSize;
3287
3288  Label runtime, invoke_regexp;
3289
3290  // Ensure that a RegExp stack is allocated.
3291  ExternalReference address_of_regexp_stack_memory_address =
3292      ExternalReference::address_of_regexp_stack_memory_address(
3293          masm->isolate());
3294  ExternalReference address_of_regexp_stack_memory_size =
3295      ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
3296  __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3297  __ test(ebx, Operand(ebx));
3298  __ j(zero, &runtime);
3299
3300  // Check that the first argument is a JSRegExp object.
3301  __ mov(eax, Operand(esp, kJSRegExpOffset));
3302  STATIC_ASSERT(kSmiTag == 0);
3303  __ JumpIfSmi(eax, &runtime);
3304  __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
3305  __ j(not_equal, &runtime);
3306  // Check that the RegExp has been compiled (data contains a fixed array).
3307  __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
3308  if (FLAG_debug_code) {
3309    __ test(ecx, Immediate(kSmiTagMask));
3310    __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
3311    __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
3312    __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
3313  }
3314
3315  // ecx: RegExp data (FixedArray)
3316  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
3317  __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
3318  __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
3319  __ j(not_equal, &runtime);
3320
3321  // ecx: RegExp data (FixedArray)
3322  // Check that the number of captures fit in the static offsets vector buffer.
3323  __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
3324  // Calculate number of capture registers (number_of_captures + 1) * 2. This
3325  // uses the asumption that smis are 2 * their untagged value.
3326  STATIC_ASSERT(kSmiTag == 0);
3327  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3328  __ add(Operand(edx), Immediate(2));  // edx was a smi.
3329  // Check that the static offsets vector buffer is large enough.
3330  __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
3331  __ j(above, &runtime);
3332
3333  // ecx: RegExp data (FixedArray)
3334  // edx: Number of capture registers
3335  // Check that the second argument is a string.
3336  __ mov(eax, Operand(esp, kSubjectOffset));
3337  __ JumpIfSmi(eax, &runtime);
3338  Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
3339  __ j(NegateCondition(is_string), &runtime);
3340  // Get the length of the string to ebx.
3341  __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
3342
3343  // ebx: Length of subject string as a smi
3344  // ecx: RegExp data (FixedArray)
3345  // edx: Number of capture registers
3346  // Check that the third argument is a positive smi less than the subject
3347  // string length. A negative value will be greater (unsigned comparison).
3348  __ mov(eax, Operand(esp, kPreviousIndexOffset));
3349  __ JumpIfNotSmi(eax, &runtime);
3350  __ cmp(eax, Operand(ebx));
3351  __ j(above_equal, &runtime);
3352
3353  // ecx: RegExp data (FixedArray)
3354  // edx: Number of capture registers
3355  // Check that the fourth object is a JSArray object.
3356  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3357  __ JumpIfSmi(eax, &runtime);
3358  __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
3359  __ j(not_equal, &runtime);
3360  // Check that the JSArray is in fast case.
3361  __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
3362  __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
3363  Factory* factory = masm->isolate()->factory();
3364  __ cmp(eax, factory->fixed_array_map());
3365  __ j(not_equal, &runtime);
3366  // Check that the last match info has space for the capture registers and the
3367  // additional information.
3368  __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
3369  __ SmiUntag(eax);
3370  __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
3371  __ cmp(edx, Operand(eax));
3372  __ j(greater, &runtime);
3373
3374  // Reset offset for possibly sliced string.
3375  __ Set(edi, Immediate(0));
3376  // ecx: RegExp data (FixedArray)
3377  // Check the representation and encoding of the subject string.
3378  Label seq_ascii_string, seq_two_byte_string, check_code;
3379  __ mov(eax, Operand(esp, kSubjectOffset));
3380  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
3381  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
3382  // First check for flat two byte string.
3383  __ and_(ebx,
3384          kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
3385  STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
3386  __ j(zero, &seq_two_byte_string, Label::kNear);
3387  // Any other flat string must be a flat ascii string.
3388  __ and_(Operand(ebx),
3389          Immediate(kIsNotStringMask | kStringRepresentationMask));
3390  __ j(zero, &seq_ascii_string, Label::kNear);
3391
3392  // Check for flat cons string or sliced string.
3393  // A flat cons string is a cons string where the second part is the empty
3394  // string. In that case the subject string is just the first part of the cons
3395  // string. Also in this case the first part of the cons string is known to be
3396  // a sequential string or an external string.
3397  // In the case of a sliced string its offset has to be taken into account.
3398  Label cons_string, check_encoding;
3399  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
3400  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
3401  __ cmp(Operand(ebx), Immediate(kExternalStringTag));
3402  __ j(less, &cons_string);
3403  __ j(equal, &runtime);
3404
3405  // String is sliced.
3406  __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
3407  __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
3408  // edi: offset of sliced string, smi-tagged.
3409  // eax: parent string.
3410  __ jmp(&check_encoding, Label::kNear);
3411  // String is a cons string, check whether it is flat.
3412  __ bind(&cons_string);
3413  __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
3414  __ j(not_equal, &runtime);
3415  __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
3416  __ bind(&check_encoding);
3417  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
3418  // eax: first part of cons string or parent of sliced string.
3419  // ebx: map of first part of cons string or map of parent of sliced string.
3420  // Is first part of cons or parent of slice a flat two byte string?
3421  __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
3422            kStringRepresentationMask | kStringEncodingMask);
3423  STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
3424  __ j(zero, &seq_two_byte_string, Label::kNear);
3425  // Any other flat string must be ascii.
3426  __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
3427            kStringRepresentationMask);
3428  __ j(not_zero, &runtime);
3429
3430  __ bind(&seq_ascii_string);
3431  // eax: subject string (flat ascii)
3432  // ecx: RegExp data (FixedArray)
3433  __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
3434  __ Set(ecx, Immediate(1));  // Type is ascii.
3435  __ jmp(&check_code, Label::kNear);
3436
3437  __ bind(&seq_two_byte_string);
3438  // eax: subject string (flat two byte)
3439  // ecx: RegExp data (FixedArray)
3440  __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
3441  __ Set(ecx, Immediate(0));  // Type is two byte.
3442
3443  __ bind(&check_code);
3444  // Check that the irregexp code has been generated for the actual string
3445  // encoding. If it has, the field contains a code object otherwise it contains
3446  // a smi (code flushing support).
3447  __ JumpIfSmi(edx, &runtime);
3448
3449  // eax: subject string
3450  // edx: code
3451  // ecx: encoding of subject string (1 if ascii, 0 if two_byte);
3452  // Load used arguments before starting to push arguments for call to native
3453  // RegExp code to avoid handling changing stack height.
3454  __ mov(ebx, Operand(esp, kPreviousIndexOffset));
3455  __ SmiUntag(ebx);  // Previous index from smi.
3456
3457  // eax: subject string
3458  // ebx: previous index
3459  // edx: code
3460  // ecx: encoding of subject string (1 if ascii 0 if two_byte);
3461  // All checks done. Now push arguments for native regexp code.
3462  Counters* counters = masm->isolate()->counters();
3463  __ IncrementCounter(counters->regexp_entry_native(), 1);
3464
3465  // Isolates: note we add an additional parameter here (isolate pointer).
3466  static const int kRegExpExecuteArguments = 8;
3467  __ EnterApiExitFrame(kRegExpExecuteArguments);
3468
3469  // Argument 8: Pass current isolate address.
3470  __ mov(Operand(esp, 7 * kPointerSize),
3471      Immediate(ExternalReference::isolate_address()));
3472
3473  // Argument 7: Indicate that this is a direct call from JavaScript.
3474  __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
3475
3476  // Argument 6: Start (high end) of backtracking stack memory area.
3477  __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
3478  __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3479  __ mov(Operand(esp, 5 * kPointerSize), esi);
3480
3481  // Argument 5: static offsets vector buffer.
3482  __ mov(Operand(esp, 4 * kPointerSize),
3483         Immediate(ExternalReference::address_of_static_offsets_vector(
3484             masm->isolate())));
3485
3486  // Argument 2: Previous index.
3487  __ mov(Operand(esp, 1 * kPointerSize), ebx);
3488
3489  // Argument 1: Original subject string.
3490  // The original subject is in the previous stack frame. Therefore we have to
3491  // use ebp, which points exactly to one pointer size below the previous esp.
3492  // (Because creating a new stack frame pushes the previous ebp onto the stack
3493  // and thereby moves up esp by one kPointerSize.)
3494  __ mov(esi, Operand(ebp, kSubjectOffset + kPointerSize));
3495  __ mov(Operand(esp, 0 * kPointerSize), esi);
3496
3497  // esi: original subject string
3498  // eax: underlying subject string
3499  // ebx: previous index
3500  // ecx: encoding of subject string (1 if ascii 0 if two_byte);
3501  // edx: code
3502  // Argument 4: End of string data
3503  // Argument 3: Start of string data
3504  // Prepare start and end index of the input.
3505  // Load the length from the original sliced string if that is the case.
3506  __ mov(esi, FieldOperand(esi, String::kLengthOffset));
3507  __ add(esi, Operand(edi));  // Calculate input end wrt offset.
3508  __ SmiUntag(edi);
3509  __ add(ebx, Operand(edi));  // Calculate input start wrt offset.
3510
3511  // ebx: start index of the input string
3512  // esi: end index of the input string
3513  Label setup_two_byte, setup_rest;
3514  __ test(ecx, Operand(ecx));
3515  __ j(zero, &setup_two_byte, Label::kNear);
3516  __ SmiUntag(esi);
3517  __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
3518  __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
3519  __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
3520  __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
3521  __ jmp(&setup_rest, Label::kNear);
3522
3523  __ bind(&setup_two_byte);
3524  STATIC_ASSERT(kSmiTag == 0);
3525  STATIC_ASSERT(kSmiTagSize == 1);  // esi is smi (powered by 2).
3526  __ lea(ecx, FieldOperand(eax, esi, times_1, SeqTwoByteString::kHeaderSize));
3527  __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
3528  __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
3529  __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
3530
3531  __ bind(&setup_rest);
3532
3533  // Locate the code entry and call it.
3534  __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
3535  __ call(Operand(edx));
3536
3537  // Drop arguments and come back to JS mode.
3538  __ LeaveApiExitFrame();
3539
3540  // Check the result.
3541  Label success;
3542  __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
3543  __ j(equal, &success);
3544  Label failure;
3545  __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
3546  __ j(equal, &failure);
3547  __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
3548  // If not exception it can only be retry. Handle that in the runtime system.
3549  __ j(not_equal, &runtime);
3550  // Result must now be exception. If there is no pending exception already a
3551  // stack overflow (on the backtrack stack) was detected in RegExp code but
3552  // haven't created the exception yet. Handle that in the runtime system.
3553  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
3554  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
3555                                      masm->isolate());
3556  __ mov(edx,
3557         Operand::StaticVariable(ExternalReference::the_hole_value_location(
3558             masm->isolate())));
3559  __ mov(eax, Operand::StaticVariable(pending_exception));
3560  __ cmp(edx, Operand(eax));
3561  __ j(equal, &runtime);
3562  // For exception, throw the exception again.
3563
3564  // Clear the pending exception variable.
3565  __ mov(Operand::StaticVariable(pending_exception), edx);
3566
3567  // Special handling of termination exceptions which are uncatchable
3568  // by javascript code.
3569  __ cmp(eax, factory->termination_exception());
3570  Label throw_termination_exception;
3571  __ j(equal, &throw_termination_exception, Label::kNear);
3572
3573  // Handle normal exception by following handler chain.
3574  __ Throw(eax);
3575
3576  __ bind(&throw_termination_exception);
3577  __ ThrowUncatchable(TERMINATION, eax);
3578
3579  __ bind(&failure);
3580  // For failure to match, return null.
3581  __ mov(Operand(eax), factory->null_value());
3582  __ ret(4 * kPointerSize);
3583
3584  // Load RegExp data.
3585  __ bind(&success);
3586  __ mov(eax, Operand(esp, kJSRegExpOffset));
3587  __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
3588  __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
3589  // Calculate number of capture registers (number_of_captures + 1) * 2.
3590  STATIC_ASSERT(kSmiTag == 0);
3591  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3592  __ add(Operand(edx), Immediate(2));  // edx was a smi.
3593
3594  // edx: Number of capture registers
3595  // Load last_match_info which is still known to be a fast case JSArray.
3596  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3597  __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
3598
3599  // ebx: last_match_info backing store (FixedArray)
3600  // edx: number of capture registers
3601  // Store the capture count.
3602  __ SmiTag(edx);  // Number of capture registers to smi.
3603  __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
3604  __ SmiUntag(edx);  // Number of capture registers back from smi.
3605  // Store last subject and last input.
3606  __ mov(eax, Operand(esp, kSubjectOffset));
3607  __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
3608  __ mov(ecx, ebx);
3609  __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
3610  __ mov(eax, Operand(esp, kSubjectOffset));
3611  __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
3612  __ mov(ecx, ebx);
3613  __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
3614
3615  // Get the static offsets vector filled by the native regexp code.
3616  ExternalReference address_of_static_offsets_vector =
3617      ExternalReference::address_of_static_offsets_vector(masm->isolate());
3618  __ mov(ecx, Immediate(address_of_static_offsets_vector));
3619
3620  // ebx: last_match_info backing store (FixedArray)
3621  // ecx: offsets vector
3622  // edx: number of capture registers
3623  Label next_capture, done;
3624  // Capture register counter starts from number of capture registers and
3625  // counts down until wraping after zero.
3626  __ bind(&next_capture);
3627  __ sub(Operand(edx), Immediate(1));
3628  __ j(negative, &done, Label::kNear);
3629  // Read the value from the static offsets vector buffer.
3630  __ mov(edi, Operand(ecx, edx, times_int_size, 0));
3631  __ SmiTag(edi);
3632  // Store the smi value in the last match info.
3633  __ mov(FieldOperand(ebx,
3634                      edx,
3635                      times_pointer_size,
3636                      RegExpImpl::kFirstCaptureOffset),
3637                      edi);
3638  __ jmp(&next_capture);
3639  __ bind(&done);
3640
3641  // Return last match info.
3642  __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3643  __ ret(4 * kPointerSize);
3644
3645  // Do the runtime call to execute the regexp.
3646  __ bind(&runtime);
3647  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3648#endif  // V8_INTERPRETED_REGEXP
3649}
3650
3651
3652void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
3653  const int kMaxInlineLength = 100;
3654  Label slowcase;
3655  Label done;
3656  __ mov(ebx, Operand(esp, kPointerSize * 3));
3657  __ JumpIfNotSmi(ebx, &slowcase);
3658  __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
3659  __ j(above, &slowcase);
3660  // Smi-tagging is equivalent to multiplying by 2.
3661  STATIC_ASSERT(kSmiTag == 0);
3662  STATIC_ASSERT(kSmiTagSize == 1);
3663  // Allocate RegExpResult followed by FixedArray with size in ebx.
3664  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
3665  // Elements:  [Map][Length][..elements..]
3666  __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
3667                        times_half_pointer_size,
3668                        ebx,  // In: Number of elements (times 2, being a smi)
3669                        eax,  // Out: Start of allocation (tagged).
3670                        ecx,  // Out: End of allocation.
3671                        edx,  // Scratch register
3672                        &slowcase,
3673                        TAG_OBJECT);
3674  // eax: Start of allocated area, object-tagged.
3675
3676  // Set JSArray map to global.regexp_result_map().
3677  // Set empty properties FixedArray.
3678  // Set elements to point to FixedArray allocated right after the JSArray.
3679  // Interleave operations for better latency.
3680  __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
3681  Factory* factory = masm->isolate()->factory();
3682  __ mov(ecx, Immediate(factory->empty_fixed_array()));
3683  __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
3684  __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
3685  __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
3686  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
3687  __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
3688  __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
3689
3690  // Set input, index and length fields from arguments.
3691  __ mov(ecx, Operand(esp, kPointerSize * 1));
3692  __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx);
3693  __ mov(ecx, Operand(esp, kPointerSize * 2));
3694  __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx);
3695  __ mov(ecx, Operand(esp, kPointerSize * 3));
3696  __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
3697
3698  // Fill out the elements FixedArray.
3699  // eax: JSArray.
3700  // ebx: FixedArray.
3701  // ecx: Number of elements in array, as smi.
3702
3703  // Set map.
3704  __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
3705         Immediate(factory->fixed_array_map()));
3706  // Set length.
3707  __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
3708  // Fill contents of fixed-array with the-hole.
3709  __ SmiUntag(ecx);
3710  __ mov(edx, Immediate(factory->the_hole_value()));
3711  __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
3712  // Fill fixed array elements with hole.
3713  // eax: JSArray.
3714  // ecx: Number of elements to fill.
3715  // ebx: Start of elements in FixedArray.
3716  // edx: the hole.
3717  Label loop;
3718  __ test(ecx, Operand(ecx));
3719  __ bind(&loop);
3720  __ j(less_equal, &done, Label::kNear);  // Jump if ecx is negative or zero.
3721  __ sub(Operand(ecx), Immediate(1));
3722  __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
3723  __ jmp(&loop);
3724
3725  __ bind(&done);
3726  __ ret(3 * kPointerSize);
3727
3728  __ bind(&slowcase);
3729  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
3730}
3731
3732
3733void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
3734                                                         Register object,
3735                                                         Register result,
3736                                                         Register scratch1,
3737                                                         Register scratch2,
3738                                                         bool object_is_smi,
3739                                                         Label* not_found) {
3740  // Use of registers. Register result is used as a temporary.
3741  Register number_string_cache = result;
3742  Register mask = scratch1;
3743  Register scratch = scratch2;
3744
3745  // Load the number string cache.
3746  ExternalReference roots_address =
3747      ExternalReference::roots_address(masm->isolate());
3748  __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
3749  __ mov(number_string_cache,
3750         Operand::StaticArray(scratch, times_pointer_size, roots_address));
3751  // Make the hash mask from the length of the number string cache. It
3752  // contains two elements (number and string) for each cache entry.
3753  __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
3754  __ shr(mask, kSmiTagSize + 1);  // Untag length and divide it by two.
3755  __ sub(Operand(mask), Immediate(1));  // Make mask.
3756
3757  // Calculate the entry in the number string cache. The hash value in the
3758  // number string cache for smis is just the smi value, and the hash for
3759  // doubles is the xor of the upper and lower words. See
3760  // Heap::GetNumberStringCache.
3761  Label smi_hash_calculated;
3762  Label load_result_from_cache;
3763  if (object_is_smi) {
3764    __ mov(scratch, object);
3765    __ SmiUntag(scratch);
3766  } else {
3767    Label not_smi;
3768    STATIC_ASSERT(kSmiTag == 0);
3769    __ JumpIfNotSmi(object, &not_smi, Label::kNear);
3770    __ mov(scratch, object);
3771    __ SmiUntag(scratch);
3772    __ jmp(&smi_hash_calculated, Label::kNear);
3773    __ bind(&not_smi);
3774    __ cmp(FieldOperand(object, HeapObject::kMapOffset),
3775           masm->isolate()->factory()->heap_number_map());
3776    __ j(not_equal, not_found);
3777    STATIC_ASSERT(8 == kDoubleSize);
3778    __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
3779    __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
3780    // Object is heap number and hash is now in scratch. Calculate cache index.
3781    __ and_(scratch, Operand(mask));
3782    Register index = scratch;
3783    Register probe = mask;
3784    __ mov(probe,
3785           FieldOperand(number_string_cache,
3786                        index,
3787                        times_twice_pointer_size,
3788                        FixedArray::kHeaderSize));
3789    __ JumpIfSmi(probe, not_found);
3790    if (CpuFeatures::IsSupported(SSE2)) {
3791      CpuFeatures::Scope fscope(SSE2);
3792      __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
3793      __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
3794      __ ucomisd(xmm0, xmm1);
3795    } else {
3796      __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
3797      __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
3798      __ FCmp();
3799    }
3800    __ j(parity_even, not_found);  // Bail out if NaN is involved.
3801    __ j(not_equal, not_found);  // The cache did not contain this value.
3802    __ jmp(&load_result_from_cache, Label::kNear);
3803  }
3804
3805  __ bind(&smi_hash_calculated);
3806  // Object is smi and hash is now in scratch. Calculate cache index.
3807  __ and_(scratch, Operand(mask));
3808  Register index = scratch;
3809  // Check if the entry is the smi we are looking for.
3810  __ cmp(object,
3811         FieldOperand(number_string_cache,
3812                      index,
3813                      times_twice_pointer_size,
3814                      FixedArray::kHeaderSize));
3815  __ j(not_equal, not_found);
3816
3817  // Get the result from the cache.
3818  __ bind(&load_result_from_cache);
3819  __ mov(result,
3820         FieldOperand(number_string_cache,
3821                      index,
3822                      times_twice_pointer_size,
3823                      FixedArray::kHeaderSize + kPointerSize));
3824  Counters* counters = masm->isolate()->counters();
3825  __ IncrementCounter(counters->number_to_string_native(), 1);
3826}
3827
3828
3829void NumberToStringStub::Generate(MacroAssembler* masm) {
3830  Label runtime;
3831
3832  __ mov(ebx, Operand(esp, kPointerSize));
3833
3834  // Generate code to lookup number in the number string cache.
3835  GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
3836  __ ret(1 * kPointerSize);
3837
3838  __ bind(&runtime);
3839  // Handle number to string in the runtime system if not found in the cache.
3840  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
3841}
3842
3843
3844static int NegativeComparisonResult(Condition cc) {
3845  ASSERT(cc != equal);
3846  ASSERT((cc == less) || (cc == less_equal)
3847      || (cc == greater) || (cc == greater_equal));
3848  return (cc == greater || cc == greater_equal) ? LESS : GREATER;
3849}
3850
3851void CompareStub::Generate(MacroAssembler* masm) {
3852  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
3853
3854  Label check_unequal_objects;
3855
3856  // Compare two smis if required.
3857  if (include_smi_compare_) {
3858    Label non_smi, smi_done;
3859    __ mov(ecx, Operand(edx));
3860    __ or_(ecx, Operand(eax));
3861    __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
3862    __ sub(edx, Operand(eax));  // Return on the result of the subtraction.
3863    __ j(no_overflow, &smi_done, Label::kNear);
3864    __ not_(edx);  // Correct sign in case of overflow. edx is never 0 here.
3865    __ bind(&smi_done);
3866    __ mov(eax, edx);
3867    __ ret(0);
3868    __ bind(&non_smi);
3869  } else if (FLAG_debug_code) {
3870    __ mov(ecx, Operand(edx));
3871    __ or_(ecx, Operand(eax));
3872    __ test(ecx, Immediate(kSmiTagMask));
3873    __ Assert(not_zero, "Unexpected smi operands.");
3874  }
3875
3876  // NOTICE! This code is only reached after a smi-fast-case check, so
3877  // it is certain that at least one operand isn't a smi.
3878
3879  // Identical objects can be compared fast, but there are some tricky cases
3880  // for NaN and undefined.
3881  {
3882    Label not_identical;
3883    __ cmp(eax, Operand(edx));
3884    __ j(not_equal, &not_identical);
3885
3886    if (cc_ != equal) {
3887      // Check for undefined.  undefined OP undefined is false even though
3888      // undefined == undefined.
3889      Label check_for_nan;
3890      __ cmp(edx, masm->isolate()->factory()->undefined_value());
3891      __ j(not_equal, &check_for_nan, Label::kNear);
3892      __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
3893      __ ret(0);
3894      __ bind(&check_for_nan);
3895    }
3896
3897    // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
3898    // so we do the second best thing - test it ourselves.
3899    // Note: if cc_ != equal, never_nan_nan_ is not used.
3900    if (never_nan_nan_ && (cc_ == equal)) {
3901      __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
3902      __ ret(0);
3903    } else {
3904      Label heap_number;
3905      __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
3906             Immediate(masm->isolate()->factory()->heap_number_map()));
3907      __ j(equal, &heap_number, Label::kNear);
3908      if (cc_ != equal) {
3909        // Call runtime on identical JSObjects.  Otherwise return equal.
3910        __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
3911        __ j(above_equal, &not_identical);
3912      }
3913      __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
3914      __ ret(0);
3915
3916      __ bind(&heap_number);
3917      // It is a heap number, so return non-equal if it's NaN and equal if
3918      // it's not NaN.
3919      // The representation of NaN values has all exponent bits (52..62) set,
3920      // and not all mantissa bits (0..51) clear.
3921      // We only accept QNaNs, which have bit 51 set.
3922      // Read top bits of double representation (second word of value).
3923
3924      // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
3925      // all bits in the mask are set. We only need to check the word
3926      // that contains the exponent and high bit of the mantissa.
3927      STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
3928      __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
3929      __ Set(eax, Immediate(0));
3930      // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
3931      // bits.
3932      __ add(edx, Operand(edx));
3933      __ cmp(edx, kQuietNaNHighBitsMask << 1);
3934      if (cc_ == equal) {
3935        STATIC_ASSERT(EQUAL != 1);
3936        __ setcc(above_equal, eax);
3937        __ ret(0);
3938      } else {
3939        Label nan;
3940        __ j(above_equal, &nan, Label::kNear);
3941        __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
3942        __ ret(0);
3943        __ bind(&nan);
3944        __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
3945        __ ret(0);
3946      }
3947    }
3948
3949    __ bind(&not_identical);
3950  }
3951
3952  // Strict equality can quickly decide whether objects are equal.
3953  // Non-strict object equality is slower, so it is handled later in the stub.
3954  if (cc_ == equal && strict_) {
3955    Label slow;  // Fallthrough label.
3956    Label not_smis;
3957    // If we're doing a strict equality comparison, we don't have to do
3958    // type conversion, so we generate code to do fast comparison for objects
3959    // and oddballs. Non-smi numbers and strings still go through the usual
3960    // slow-case code.
3961    // If either is a Smi (we know that not both are), then they can only
3962    // be equal if the other is a HeapNumber. If so, use the slow case.
3963    STATIC_ASSERT(kSmiTag == 0);
3964    ASSERT_EQ(0, Smi::FromInt(0));
3965    __ mov(ecx, Immediate(kSmiTagMask));
3966    __ and_(ecx, Operand(eax));
3967    __ test(ecx, Operand(edx));
3968    __ j(not_zero, &not_smis, Label::kNear);
3969    // One operand is a smi.
3970
3971    // Check whether the non-smi is a heap number.
3972    STATIC_ASSERT(kSmiTagMask == 1);
3973    // ecx still holds eax & kSmiTag, which is either zero or one.
3974    __ sub(Operand(ecx), Immediate(0x01));
3975    __ mov(ebx, edx);
3976    __ xor_(ebx, Operand(eax));
3977    __ and_(ebx, Operand(ecx));  // ebx holds either 0 or eax ^ edx.
3978    __ xor_(ebx, Operand(eax));
3979    // if eax was smi, ebx is now edx, else eax.
3980
3981    // Check if the non-smi operand is a heap number.
3982    __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
3983           Immediate(masm->isolate()->factory()->heap_number_map()));
3984    // If heap number, handle it in the slow case.
3985    __ j(equal, &slow, Label::kNear);
3986    // Return non-equal (ebx is not zero)
3987    __ mov(eax, ebx);
3988    __ ret(0);
3989
3990    __ bind(&not_smis);
3991    // If either operand is a JSObject or an oddball value, then they are not
3992    // equal since their pointers are different
3993    // There is no test for undetectability in strict equality.
3994
3995    // Get the type of the first operand.
3996    // If the first object is a JS object, we have done pointer comparison.
3997    Label first_non_object;
3998    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
3999    __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4000    __ j(below, &first_non_object, Label::kNear);
4001
4002    // Return non-zero (eax is not zero)
4003    Label return_not_equal;
4004    STATIC_ASSERT(kHeapObjectTag != 0);
4005    __ bind(&return_not_equal);
4006    __ ret(0);
4007
4008    __ bind(&first_non_object);
4009    // Check for oddballs: true, false, null, undefined.
4010    __ CmpInstanceType(ecx, ODDBALL_TYPE);
4011    __ j(equal, &return_not_equal);
4012
4013    __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
4014    __ j(above_equal, &return_not_equal);
4015
4016    // Check for oddballs: true, false, null, undefined.
4017    __ CmpInstanceType(ecx, ODDBALL_TYPE);
4018    __ j(equal, &return_not_equal);
4019
4020    // Fall through to the general case.
4021    __ bind(&slow);
4022  }
4023
4024  // Generate the number comparison code.
4025  if (include_number_compare_) {
4026    Label non_number_comparison;
4027    Label unordered;
4028    if (CpuFeatures::IsSupported(SSE2)) {
4029      CpuFeatures::Scope use_sse2(SSE2);
4030      CpuFeatures::Scope use_cmov(CMOV);
4031
4032      FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
4033      __ ucomisd(xmm0, xmm1);
4034
4035      // Don't base result on EFLAGS when a NaN is involved.
4036      __ j(parity_even, &unordered, Label::kNear);
4037      // Return a result of -1, 0, or 1, based on EFLAGS.
4038      __ mov(eax, 0);  // equal
4039      __ mov(ecx, Immediate(Smi::FromInt(1)));
4040      __ cmov(above, eax, Operand(ecx));
4041      __ mov(ecx, Immediate(Smi::FromInt(-1)));
4042      __ cmov(below, eax, Operand(ecx));
4043      __ ret(0);
4044    } else {
4045      FloatingPointHelper::CheckFloatOperands(
4046          masm, &non_number_comparison, ebx);
4047      FloatingPointHelper::LoadFloatOperand(masm, eax);
4048      FloatingPointHelper::LoadFloatOperand(masm, edx);
4049      __ FCmp();
4050
4051      // Don't base result on EFLAGS when a NaN is involved.
4052      __ j(parity_even, &unordered, Label::kNear);
4053
4054      Label below_label, above_label;
4055      // Return a result of -1, 0, or 1, based on EFLAGS.
4056      __ j(below, &below_label, Label::kNear);
4057      __ j(above, &above_label, Label::kNear);
4058
4059      __ Set(eax, Immediate(0));
4060      __ ret(0);
4061
4062      __ bind(&below_label);
4063      __ mov(eax, Immediate(Smi::FromInt(-1)));
4064      __ ret(0);
4065
4066      __ bind(&above_label);
4067      __ mov(eax, Immediate(Smi::FromInt(1)));
4068      __ ret(0);
4069    }
4070
4071    // If one of the numbers was NaN, then the result is always false.
4072    // The cc is never not-equal.
4073    __ bind(&unordered);
4074    ASSERT(cc_ != not_equal);
4075    if (cc_ == less || cc_ == less_equal) {
4076      __ mov(eax, Immediate(Smi::FromInt(1)));
4077    } else {
4078      __ mov(eax, Immediate(Smi::FromInt(-1)));
4079    }
4080    __ ret(0);
4081
4082    // The number comparison code did not provide a valid result.
4083    __ bind(&non_number_comparison);
4084  }
4085
4086  // Fast negative check for symbol-to-symbol equality.
4087  Label check_for_strings;
4088  if (cc_ == equal) {
4089    BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
4090    BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
4091
4092    // We've already checked for object identity, so if both operands
4093    // are symbols they aren't equal. Register eax already holds a
4094    // non-zero value, which indicates not equal, so just return.
4095    __ ret(0);
4096  }
4097
4098  __ bind(&check_for_strings);
4099
4100  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
4101                                         &check_unequal_objects);
4102
4103  // Inline comparison of ascii strings.
4104  if (cc_ == equal) {
4105    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
4106                                                     edx,
4107                                                     eax,
4108                                                     ecx,
4109                                                     ebx);
4110  } else {
4111    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
4112                                                       edx,
4113                                                       eax,
4114                                                       ecx,
4115                                                       ebx,
4116                                                       edi);
4117  }
4118#ifdef DEBUG
4119  __ Abort("Unexpected fall-through from string comparison");
4120#endif
4121
4122  __ bind(&check_unequal_objects);
4123  if (cc_ == equal && !strict_) {
4124    // Non-strict equality.  Objects are unequal if
4125    // they are both JSObjects and not undetectable,
4126    // and their pointers are different.
4127    Label not_both_objects;
4128    Label return_unequal;
4129    // At most one is a smi, so we can test for smi by adding the two.
4130    // A smi plus a heap object has the low bit set, a heap object plus
4131    // a heap object has the low bit clear.
4132    STATIC_ASSERT(kSmiTag == 0);
4133    STATIC_ASSERT(kSmiTagMask == 1);
4134    __ lea(ecx, Operand(eax, edx, times_1, 0));
4135    __ test(ecx, Immediate(kSmiTagMask));
4136    __ j(not_zero, &not_both_objects, Label::kNear);
4137    __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4138    __ j(below, &not_both_objects, Label::kNear);
4139    __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
4140    __ j(below, &not_both_objects, Label::kNear);
4141    // We do not bail out after this point.  Both are JSObjects, and
4142    // they are equal if and only if both are undetectable.
4143    // The and of the undetectable flags is 1 if and only if they are equal.
4144    __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
4145              1 << Map::kIsUndetectable);
4146    __ j(zero, &return_unequal, Label::kNear);
4147    __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
4148              1 << Map::kIsUndetectable);
4149    __ j(zero, &return_unequal, Label::kNear);
4150    // The objects are both undetectable, so they both compare as the value
4151    // undefined, and are equal.
4152    __ Set(eax, Immediate(EQUAL));
4153    __ bind(&return_unequal);
4154    // Return non-equal by returning the non-zero object pointer in eax,
4155    // or return equal if we fell through to here.
4156    __ ret(0);  // rax, rdx were pushed
4157    __ bind(&not_both_objects);
4158  }
4159
4160  // Push arguments below the return address.
4161  __ pop(ecx);
4162  __ push(edx);
4163  __ push(eax);
4164
4165  // Figure out which native to call and setup the arguments.
4166  Builtins::JavaScript builtin;
4167  if (cc_ == equal) {
4168    builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
4169  } else {
4170    builtin = Builtins::COMPARE;
4171    __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4172  }
4173
4174  // Restore return address on the stack.
4175  __ push(ecx);
4176
4177  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
4178  // tagged as a small integer.
4179  __ InvokeBuiltin(builtin, JUMP_FUNCTION);
4180}
4181
4182
4183void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
4184                                    Label* label,
4185                                    Register object,
4186                                    Register scratch) {
4187  __ JumpIfSmi(object, label);
4188  __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
4189  __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
4190  __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
4191  __ cmp(scratch, kSymbolTag | kStringTag);
4192  __ j(not_equal, label);
4193}
4194
4195
4196void StackCheckStub::Generate(MacroAssembler* masm) {
4197  __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
4198}
4199
4200
4201void CallFunctionStub::Generate(MacroAssembler* masm) {
4202  Label slow, non_function;
4203
4204  // The receiver might implicitly be the global object. This is
4205  // indicated by passing the hole as the receiver to the call
4206  // function stub.
4207  if (ReceiverMightBeImplicit()) {
4208    Label call;
4209    // Get the receiver from the stack.
4210    // +1 ~ return address
4211    __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
4212    // Call as function is indicated with the hole.
4213    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
4214    __ j(not_equal, &call, Label::kNear);
4215    // Patch the receiver on the stack with the global receiver object.
4216    __ mov(ebx, GlobalObjectOperand());
4217    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
4218    __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ebx);
4219    __ bind(&call);
4220  }
4221
4222  // Get the function to call from the stack.
4223  // +2 ~ receiver, return address
4224  __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
4225
4226  // Check that the function really is a JavaScript function.
4227  __ JumpIfSmi(edi, &non_function);
4228  // Goto slow case if we do not have a function.
4229  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
4230  __ j(not_equal, &slow);
4231
4232  // Fast-case: Just invoke the function.
4233  ParameterCount actual(argc_);
4234
4235  if (ReceiverMightBeImplicit()) {
4236    Label call_as_function;
4237    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
4238    __ j(equal, &call_as_function);
4239    __ InvokeFunction(edi,
4240                      actual,
4241                      JUMP_FUNCTION,
4242                      NullCallWrapper(),
4243                      CALL_AS_METHOD);
4244    __ bind(&call_as_function);
4245  }
4246  __ InvokeFunction(edi,
4247                    actual,
4248                    JUMP_FUNCTION,
4249                    NullCallWrapper(),
4250                    CALL_AS_FUNCTION);
4251
4252  // Slow-case: Non-function called.
4253  __ bind(&slow);
4254  // Check for function proxy.
4255  __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
4256  __ j(not_equal, &non_function);
4257  __ pop(ecx);
4258  __ push(edi);  // put proxy as additional argument under return address
4259  __ push(ecx);
4260  __ Set(eax, Immediate(argc_ + 1));
4261  __ Set(ebx, Immediate(0));
4262  __ SetCallKind(ecx, CALL_AS_FUNCTION);
4263  __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
4264  {
4265    Handle<Code> adaptor =
4266      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
4267    __ jmp(adaptor, RelocInfo::CODE_TARGET);
4268  }
4269
4270  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
4271  // of the original receiver from the call site).
4272  __ bind(&non_function);
4273  __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
4274  __ Set(eax, Immediate(argc_));
4275  __ Set(ebx, Immediate(0));
4276  __ SetCallKind(ecx, CALL_AS_METHOD);
4277  __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
4278  Handle<Code> adaptor =
4279      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
4280  __ jmp(adaptor, RelocInfo::CODE_TARGET);
4281}
4282
4283
4284bool CEntryStub::NeedsImmovableCode() {
4285  return false;
4286}
4287
4288
4289void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
4290  __ Throw(eax);
4291}
4292
4293
4294void CEntryStub::GenerateCore(MacroAssembler* masm,
4295                              Label* throw_normal_exception,
4296                              Label* throw_termination_exception,
4297                              Label* throw_out_of_memory_exception,
4298                              bool do_gc,
4299                              bool always_allocate_scope) {
4300  // eax: result parameter for PerformGC, if any
4301  // ebx: pointer to C function  (C callee-saved)
4302  // ebp: frame pointer  (restored after C call)
4303  // esp: stack pointer  (restored after C call)
4304  // edi: number of arguments including receiver  (C callee-saved)
4305  // esi: pointer to the first argument (C callee-saved)
4306
4307  // Result returned in eax, or eax+edx if result_size_ is 2.
4308
4309  // Check stack alignment.
4310  if (FLAG_debug_code) {
4311    __ CheckStackAlignment();
4312  }
4313
4314  if (do_gc) {
4315    // Pass failure code returned from last attempt as first argument to
4316    // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
4317    // stack alignment is known to be correct. This function takes one argument
4318    // which is passed on the stack, and we know that the stack has been
4319    // prepared to pass at least one argument.
4320    __ mov(Operand(esp, 0 * kPointerSize), eax);  // Result.
4321    __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
4322  }
4323
4324  ExternalReference scope_depth =
4325      ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
4326  if (always_allocate_scope) {
4327    __ inc(Operand::StaticVariable(scope_depth));
4328  }
4329
4330  // Call C function.
4331  __ mov(Operand(esp, 0 * kPointerSize), edi);  // argc.
4332  __ mov(Operand(esp, 1 * kPointerSize), esi);  // argv.
4333  __ mov(Operand(esp, 2 * kPointerSize),
4334         Immediate(ExternalReference::isolate_address()));
4335  __ call(Operand(ebx));
4336  // Result is in eax or edx:eax - do not destroy these registers!
4337
4338  if (always_allocate_scope) {
4339    __ dec(Operand::StaticVariable(scope_depth));
4340  }
4341
4342  // Make sure we're not trying to return 'the hole' from the runtime
4343  // call as this may lead to crashes in the IC code later.
4344  if (FLAG_debug_code) {
4345    Label okay;
4346    __ cmp(eax, masm->isolate()->factory()->the_hole_value());
4347    __ j(not_equal, &okay, Label::kNear);
4348    __ int3();
4349    __ bind(&okay);
4350  }
4351
4352  // Check for failure result.
4353  Label failure_returned;
4354  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
4355  __ lea(ecx, Operand(eax, 1));
4356  // Lower 2 bits of ecx are 0 iff eax has failure tag.
4357  __ test(ecx, Immediate(kFailureTagMask));
4358  __ j(zero, &failure_returned);
4359
4360  ExternalReference pending_exception_address(
4361      Isolate::kPendingExceptionAddress, masm->isolate());
4362
4363  // Check that there is no pending exception, otherwise we
4364  // should have returned some failure value.
4365  if (FLAG_debug_code) {
4366    __ push(edx);
4367    __ mov(edx, Operand::StaticVariable(
4368        ExternalReference::the_hole_value_location(masm->isolate())));
4369    Label okay;
4370    __ cmp(edx, Operand::StaticVariable(pending_exception_address));
4371    // Cannot use check here as it attempts to generate call into runtime.
4372    __ j(equal, &okay, Label::kNear);
4373    __ int3();
4374    __ bind(&okay);
4375    __ pop(edx);
4376  }
4377
4378  // Exit the JavaScript to C++ exit frame.
4379  __ LeaveExitFrame(save_doubles_);
4380  __ ret(0);
4381
4382  // Handling of failure.
4383  __ bind(&failure_returned);
4384
4385  Label retry;
4386  // If the returned exception is RETRY_AFTER_GC continue at retry label
4387  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
4388  __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
4389  __ j(zero, &retry, Label::kNear);
4390
4391  // Special handling of out of memory exceptions.
4392  __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
4393  __ j(equal, throw_out_of_memory_exception);
4394
4395  // Retrieve the pending exception and clear the variable.
4396  ExternalReference the_hole_location =
4397      ExternalReference::the_hole_value_location(masm->isolate());
4398  __ mov(eax, Operand::StaticVariable(pending_exception_address));
4399  __ mov(edx, Operand::StaticVariable(the_hole_location));
4400  __ mov(Operand::StaticVariable(pending_exception_address), edx);
4401
4402  // Special handling of termination exceptions which are uncatchable
4403  // by javascript code.
4404  __ cmp(eax, masm->isolate()->factory()->termination_exception());
4405  __ j(equal, throw_termination_exception);
4406
4407  // Handle normal exception.
4408  __ jmp(throw_normal_exception);
4409
4410  // Retry.
4411  __ bind(&retry);
4412}
4413
4414
4415void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
4416                                          UncatchableExceptionType type) {
4417  __ ThrowUncatchable(type, eax);
4418}
4419
4420
4421void CEntryStub::Generate(MacroAssembler* masm) {
4422  // eax: number of arguments including receiver
4423  // ebx: pointer to C function  (C callee-saved)
4424  // ebp: frame pointer  (restored after C call)
4425  // esp: stack pointer  (restored after C call)
4426  // esi: current context (C callee-saved)
4427  // edi: JS function of the caller (C callee-saved)
4428
4429  // NOTE: Invocations of builtins may return failure objects instead
4430  // of a proper result. The builtin entry handles this by performing
4431  // a garbage collection and retrying the builtin (twice).
4432
4433  // Enter the exit frame that transitions from JavaScript to C++.
4434  __ EnterExitFrame(save_doubles_);
4435
4436  // eax: result parameter for PerformGC, if any (setup below)
4437  // ebx: pointer to builtin function  (C callee-saved)
4438  // ebp: frame pointer  (restored after C call)
4439  // esp: stack pointer  (restored after C call)
4440  // edi: number of arguments including receiver (C callee-saved)
4441  // esi: argv pointer (C callee-saved)
4442
4443  Label throw_normal_exception;
4444  Label throw_termination_exception;
4445  Label throw_out_of_memory_exception;
4446
4447  // Call into the runtime system.
4448  GenerateCore(masm,
4449               &throw_normal_exception,
4450               &throw_termination_exception,
4451               &throw_out_of_memory_exception,
4452               false,
4453               false);
4454
4455  // Do space-specific GC and retry runtime call.
4456  GenerateCore(masm,
4457               &throw_normal_exception,
4458               &throw_termination_exception,
4459               &throw_out_of_memory_exception,
4460               true,
4461               false);
4462
4463  // Do full GC and retry runtime call one final time.
4464  Failure* failure = Failure::InternalError();
4465  __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
4466  GenerateCore(masm,
4467               &throw_normal_exception,
4468               &throw_termination_exception,
4469               &throw_out_of_memory_exception,
4470               true,
4471               true);
4472
4473  __ bind(&throw_out_of_memory_exception);
4474  GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
4475
4476  __ bind(&throw_termination_exception);
4477  GenerateThrowUncatchable(masm, TERMINATION);
4478
4479  __ bind(&throw_normal_exception);
4480  GenerateThrowTOS(masm);
4481}
4482
4483
4484void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4485  Label invoke, exit;
4486  Label not_outermost_js, not_outermost_js_2;
4487
4488  // Setup frame.
4489  __ push(ebp);
4490  __ mov(ebp, Operand(esp));
4491
4492  // Push marker in two places.
4493  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4494  __ push(Immediate(Smi::FromInt(marker)));  // context slot
4495  __ push(Immediate(Smi::FromInt(marker)));  // function slot
4496  // Save callee-saved registers (C calling conventions).
4497  __ push(edi);
4498  __ push(esi);
4499  __ push(ebx);
4500
4501  // Save copies of the top frame descriptor on the stack.
4502  ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, masm->isolate());
4503  __ push(Operand::StaticVariable(c_entry_fp));
4504
4505  // If this is the outermost JS call, set js_entry_sp value.
4506  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
4507                                masm->isolate());
4508  __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
4509  __ j(not_equal, &not_outermost_js, Label::kNear);
4510  __ mov(Operand::StaticVariable(js_entry_sp), ebp);
4511  __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4512  Label cont;
4513  __ jmp(&cont, Label::kNear);
4514  __ bind(&not_outermost_js);
4515  __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
4516  __ bind(&cont);
4517
4518  // Call a faked try-block that does the invoke.
4519  __ call(&invoke);
4520
4521  // Caught exception: Store result (exception) in the pending
4522  // exception field in the JSEnv and return a failure sentinel.
4523  ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
4524                                      masm->isolate());
4525  __ mov(Operand::StaticVariable(pending_exception), eax);
4526  __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
4527  __ jmp(&exit);
4528
4529  // Invoke: Link this frame into the handler chain.
4530  __ bind(&invoke);
4531  __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
4532
4533  // Clear any pending exceptions.
4534  ExternalReference the_hole_location =
4535      ExternalReference::the_hole_value_location(masm->isolate());
4536  __ mov(edx, Operand::StaticVariable(the_hole_location));
4537  __ mov(Operand::StaticVariable(pending_exception), edx);
4538
4539  // Fake a receiver (NULL).
4540  __ push(Immediate(0));  // receiver
4541
4542  // Invoke the function by calling through JS entry trampoline
4543  // builtin and pop the faked function when we return. Notice that we
4544  // cannot store a reference to the trampoline code directly in this
4545  // stub, because the builtin stubs may not have been generated yet.
4546  if (is_construct) {
4547    ExternalReference construct_entry(
4548        Builtins::kJSConstructEntryTrampoline,
4549        masm->isolate());
4550    __ mov(edx, Immediate(construct_entry));
4551  } else {
4552    ExternalReference entry(Builtins::kJSEntryTrampoline,
4553                            masm->isolate());
4554    __ mov(edx, Immediate(entry));
4555  }
4556  __ mov(edx, Operand(edx, 0));  // deref address
4557  __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
4558  __ call(Operand(edx));
4559
4560  // Unlink this frame from the handler chain.
4561  __ PopTryHandler();
4562
4563  __ bind(&exit);
4564  // Check if the current stack frame is marked as the outermost JS frame.
4565  __ pop(ebx);
4566  __ cmp(Operand(ebx),
4567         Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4568  __ j(not_equal, &not_outermost_js_2);
4569  __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
4570  __ bind(&not_outermost_js_2);
4571
4572  // Restore the top frame descriptor from the stack.
4573  __ pop(Operand::StaticVariable(ExternalReference(
4574      Isolate::kCEntryFPAddress,
4575      masm->isolate())));
4576
4577  // Restore callee-saved registers (C calling conventions).
4578  __ pop(ebx);
4579  __ pop(esi);
4580  __ pop(edi);
4581  __ add(Operand(esp), Immediate(2 * kPointerSize));  // remove markers
4582
4583  // Restore frame pointer and return.
4584  __ pop(ebp);
4585  __ ret(0);
4586}
4587
4588
4589// Generate stub code for instanceof.
4590// This code can patch a call site inlined cache of the instance of check,
4591// which looks like this.
4592//
4593//   81 ff XX XX XX XX   cmp    edi, <the hole, patched to a map>
4594//   75 0a               jne    <some near label>
4595//   b8 XX XX XX XX      mov    eax, <the hole, patched to either true or false>
4596//
4597// If call site patching is requested the stack will have the delta from the
4598// return address to the cmp instruction just below the return address. This
4599// also means that call site patching can only take place with arguments in
4600// registers. TOS looks like this when call site patching is requested
4601//
4602//   esp[0] : return address
4603//   esp[4] : delta from return address to cmp instruction
4604//
4605void InstanceofStub::Generate(MacroAssembler* masm) {
4606  // Call site inlining and patching implies arguments in registers.
4607  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4608
4609  // Fixed register usage throughout the stub.
4610  Register object = eax;  // Object (lhs).
4611  Register map = ebx;  // Map of the object.
4612  Register function = edx;  // Function (rhs).
4613  Register prototype = edi;  // Prototype of the function.
4614  Register scratch = ecx;
4615
4616  // Constants describing the call site code to patch.
4617  static const int kDeltaToCmpImmediate = 2;
4618  static const int kDeltaToMov = 8;
4619  static const int kDeltaToMovImmediate = 9;
4620  static const int8_t kCmpEdiImmediateByte1 = BitCast<int8_t, uint8_t>(0x81);
4621  static const int8_t kCmpEdiImmediateByte2 = BitCast<int8_t, uint8_t>(0xff);
4622  static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
4623
4624  ExternalReference roots_address =
4625      ExternalReference::roots_address(masm->isolate());
4626
4627  ASSERT_EQ(object.code(), InstanceofStub::left().code());
4628  ASSERT_EQ(function.code(), InstanceofStub::right().code());
4629
4630  // Get the object and function - they are always both needed.
4631  Label slow, not_js_object;
4632  if (!HasArgsInRegisters()) {
4633    __ mov(object, Operand(esp, 2 * kPointerSize));
4634    __ mov(function, Operand(esp, 1 * kPointerSize));
4635  }
4636
4637  // Check that the left hand is a JS object.
4638  __ JumpIfSmi(object, &not_js_object);
4639  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
4640
4641  // If there is a call site cache don't look in the global cache, but do the
4642  // real lookup and update the call site cache.
4643  if (!HasCallSiteInlineCheck()) {
4644    // Look up the function and the map in the instanceof cache.
4645    Label miss;
4646    __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
4647    __ cmp(function,
4648           Operand::StaticArray(scratch, times_pointer_size, roots_address));
4649    __ j(not_equal, &miss, Label::kNear);
4650    __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
4651    __ cmp(map, Operand::StaticArray(
4652        scratch, times_pointer_size, roots_address));
4653    __ j(not_equal, &miss, Label::kNear);
4654    __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
4655    __ mov(eax, Operand::StaticArray(
4656        scratch, times_pointer_size, roots_address));
4657    __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
4658    __ bind(&miss);
4659  }
4660
4661  // Get the prototype of the function.
4662  __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
4663
4664  // Check that the function prototype is a JS object.
4665  __ JumpIfSmi(prototype, &slow);
4666  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4667
4668  // Update the global instanceof or call site inlined cache with the current
4669  // map and function. The cached answer will be set when it is known below.
4670  if (!HasCallSiteInlineCheck()) {
4671  __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
4672  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
4673  __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
4674  __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
4675         function);
4676  } else {
4677    // The constants for the code patching are based on no push instructions
4678    // at the call site.
4679    ASSERT(HasArgsInRegisters());
4680    // Get return address and delta to inlined map check.
4681    __ mov(scratch, Operand(esp, 0 * kPointerSize));
4682    __ sub(scratch, Operand(esp, 1 * kPointerSize));
4683    if (FLAG_debug_code) {
4684      __ cmpb(Operand(scratch, 0), kCmpEdiImmediateByte1);
4685      __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
4686      __ cmpb(Operand(scratch, 1), kCmpEdiImmediateByte2);
4687      __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
4688    }
4689    __ mov(Operand(scratch, kDeltaToCmpImmediate), map);
4690  }
4691
4692  // Loop through the prototype chain of the object looking for the function
4693  // prototype.
4694  __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
4695  Label loop, is_instance, is_not_instance;
4696  __ bind(&loop);
4697  __ cmp(scratch, Operand(prototype));
4698  __ j(equal, &is_instance, Label::kNear);
4699  Factory* factory = masm->isolate()->factory();
4700  __ cmp(Operand(scratch), Immediate(factory->null_value()));
4701  __ j(equal, &is_not_instance, Label::kNear);
4702  __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
4703  __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
4704  __ jmp(&loop);
4705
4706  __ bind(&is_instance);
4707  if (!HasCallSiteInlineCheck()) {
4708    __ Set(eax, Immediate(0));
4709    __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
4710    __ mov(Operand::StaticArray(scratch,
4711                                times_pointer_size, roots_address), eax);
4712  } else {
4713    // Get return address and delta to inlined map check.
4714    __ mov(eax, factory->true_value());
4715    __ mov(scratch, Operand(esp, 0 * kPointerSize));
4716    __ sub(scratch, Operand(esp, 1 * kPointerSize));
4717    if (FLAG_debug_code) {
4718      __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
4719      __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
4720    }
4721    __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
4722    if (!ReturnTrueFalseObject()) {
4723      __ Set(eax, Immediate(0));
4724    }
4725  }
4726  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
4727
4728  __ bind(&is_not_instance);
4729  if (!HasCallSiteInlineCheck()) {
4730    __ Set(eax, Immediate(Smi::FromInt(1)));
4731    __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
4732    __ mov(Operand::StaticArray(
4733        scratch, times_pointer_size, roots_address), eax);
4734  } else {
4735    // Get return address and delta to inlined map check.
4736    __ mov(eax, factory->false_value());
4737    __ mov(scratch, Operand(esp, 0 * kPointerSize));
4738    __ sub(scratch, Operand(esp, 1 * kPointerSize));
4739    if (FLAG_debug_code) {
4740      __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
4741      __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
4742    }
4743    __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
4744    if (!ReturnTrueFalseObject()) {
4745      __ Set(eax, Immediate(Smi::FromInt(1)));
4746    }
4747  }
4748  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
4749
4750  Label object_not_null, object_not_null_or_smi;
4751  __ bind(&not_js_object);
4752  // Before null, smi and string value checks, check that the rhs is a function
4753  // as for a non-function rhs an exception needs to be thrown.
4754  __ JumpIfSmi(function, &slow, Label::kNear);
4755  __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
4756  __ j(not_equal, &slow, Label::kNear);
4757
4758  // Null is not instance of anything.
4759  __ cmp(object, factory->null_value());
4760  __ j(not_equal, &object_not_null, Label::kNear);
4761  __ Set(eax, Immediate(Smi::FromInt(1)));
4762  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
4763
4764  __ bind(&object_not_null);
4765  // Smi values is not instance of anything.
4766  __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
4767  __ Set(eax, Immediate(Smi::FromInt(1)));
4768  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
4769
4770  __ bind(&object_not_null_or_smi);
4771  // String values is not instance of anything.
4772  Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
4773  __ j(NegateCondition(is_string), &slow, Label::kNear);
4774  __ Set(eax, Immediate(Smi::FromInt(1)));
4775  __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
4776
4777  // Slow-case: Go through the JavaScript implementation.
4778  __ bind(&slow);
4779  if (!ReturnTrueFalseObject()) {
4780    // Tail call the builtin which returns 0 or 1.
4781    if (HasArgsInRegisters()) {
4782      // Push arguments below return address.
4783      __ pop(scratch);
4784      __ push(object);
4785      __ push(function);
4786      __ push(scratch);
4787    }
4788    __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4789  } else {
4790    // Call the builtin and convert 0/1 to true/false.
4791    __ EnterInternalFrame();
4792    __ push(object);
4793    __ push(function);
4794    __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4795    __ LeaveInternalFrame();
4796    Label true_value, done;
4797    __ test(eax, Operand(eax));
4798    __ j(zero, &true_value, Label::kNear);
4799    __ mov(eax, factory->false_value());
4800    __ jmp(&done, Label::kNear);
4801    __ bind(&true_value);
4802    __ mov(eax, factory->true_value());
4803    __ bind(&done);
4804    __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
4805  }
4806}
4807
4808
4809Register InstanceofStub::left() { return eax; }
4810
4811
4812Register InstanceofStub::right() { return edx; }
4813
4814
4815int CompareStub::MinorKey() {
4816  // Encode the three parameters in a unique 16 bit value. To avoid duplicate
4817  // stubs the never NaN NaN condition is only taken into account if the
4818  // condition is equals.
4819  ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
4820  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4821  return ConditionField::encode(static_cast<unsigned>(cc_))
4822         | RegisterField::encode(false)   // lhs_ and rhs_ are not used
4823         | StrictField::encode(strict_)
4824         | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
4825         | IncludeNumberCompareField::encode(include_number_compare_)
4826         | IncludeSmiCompareField::encode(include_smi_compare_);
4827}
4828
4829
4830// Unfortunately you have to run without snapshots to see most of these
4831// names in the profile since most compare stubs end up in the snapshot.
4832void CompareStub::PrintName(StringStream* stream) {
4833  ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4834  const char* cc_name;
4835  switch (cc_) {
4836    case less: cc_name = "LT"; break;
4837    case greater: cc_name = "GT"; break;
4838    case less_equal: cc_name = "LE"; break;
4839    case greater_equal: cc_name = "GE"; break;
4840    case equal: cc_name = "EQ"; break;
4841    case not_equal: cc_name = "NE"; break;
4842    default: cc_name = "UnknownCondition"; break;
4843  }
4844  bool is_equality = cc_ == equal || cc_ == not_equal;
4845  stream->Add("CompareStub_%s", cc_name);
4846  if (strict_ && is_equality) stream->Add("_STRICT");
4847  if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
4848  if (!include_number_compare_) stream->Add("_NO_NUMBER");
4849  if (!include_smi_compare_) stream->Add("_NO_SMI");
4850}
4851
4852
4853// -------------------------------------------------------------------------
4854// StringCharCodeAtGenerator
4855
4856void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
4857  Label flat_string;
4858  Label ascii_string;
4859  Label got_char_code;
4860  Label sliced_string;
4861
4862  // If the receiver is a smi trigger the non-string case.
4863  STATIC_ASSERT(kSmiTag == 0);
4864  __ JumpIfSmi(object_, receiver_not_string_);
4865
4866  // Fetch the instance type of the receiver into result register.
4867  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
4868  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4869  // If the receiver is not a string trigger the non-string case.
4870  __ test(result_, Immediate(kIsNotStringMask));
4871  __ j(not_zero, receiver_not_string_);
4872
4873  // If the index is non-smi trigger the non-smi case.
4874  STATIC_ASSERT(kSmiTag == 0);
4875  __ JumpIfNotSmi(index_, &index_not_smi_);
4876
4877  // Put smi-tagged index into scratch register.
4878  __ mov(scratch_, index_);
4879  __ bind(&got_smi_index_);
4880
4881  // Check for index out of range.
4882  __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
4883  __ j(above_equal, index_out_of_range_);
4884
4885  // We need special handling for non-flat strings.
4886  STATIC_ASSERT(kSeqStringTag == 0);
4887  __ test(result_, Immediate(kStringRepresentationMask));
4888  __ j(zero, &flat_string);
4889
4890  // Handle non-flat strings.
4891  __ and_(result_, kStringRepresentationMask);
4892  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
4893  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
4894  __ cmp(result_, kExternalStringTag);
4895  __ j(greater, &sliced_string, Label::kNear);
4896  __ j(equal, &call_runtime_);
4897
4898  // ConsString.
4899  // Check whether the right hand side is the empty string (i.e. if
4900  // this is really a flat string in a cons string). If that is not
4901  // the case we would rather go to the runtime system now to flatten
4902  // the string.
4903  Label assure_seq_string;
4904  __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
4905         Immediate(masm->isolate()->factory()->empty_string()));
4906  __ j(not_equal, &call_runtime_);
4907  // Get the first of the two strings and load its instance type.
4908  __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
4909  __ jmp(&assure_seq_string, Label::kNear);
4910
4911  // SlicedString, unpack and add offset.
4912  __ bind(&sliced_string);
4913  __ add(scratch_, FieldOperand(object_, SlicedString::kOffsetOffset));
4914  __ mov(object_, FieldOperand(object_, SlicedString::kParentOffset));
4915
4916  // Assure that we are dealing with a sequential string. Go to runtime if not.
4917  __ bind(&assure_seq_string);
4918  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
4919  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4920  STATIC_ASSERT(kSeqStringTag == 0);
4921  __ test(result_, Immediate(kStringRepresentationMask));
4922  __ j(not_zero, &call_runtime_);
4923  __ jmp(&flat_string, Label::kNear);
4924
4925  // Check for 1-byte or 2-byte string.
4926  __ bind(&flat_string);
4927  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
4928  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
4929  __ test(result_, Immediate(kStringEncodingMask));
4930  __ j(not_zero, &ascii_string, Label::kNear);
4931
4932  // 2-byte string.
4933  // Load the 2-byte character code into the result register.
4934  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
4935  __ movzx_w(result_, FieldOperand(object_,
4936                                   scratch_, times_1,  // Scratch is smi-tagged.
4937                                   SeqTwoByteString::kHeaderSize));
4938  __ jmp(&got_char_code, Label::kNear);
4939
4940  // ASCII string.
4941  // Load the byte into the result register.
4942  __ bind(&ascii_string);
4943  __ SmiUntag(scratch_);
4944  __ movzx_b(result_, FieldOperand(object_,
4945                                   scratch_, times_1,
4946                                   SeqAsciiString::kHeaderSize));
4947  __ bind(&got_char_code);
4948  __ SmiTag(result_);
4949  __ bind(&exit_);
4950}
4951
4952
4953void StringCharCodeAtGenerator::GenerateSlow(
4954    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
4955  __ Abort("Unexpected fallthrough to CharCodeAt slow case");
4956
4957  // Index is not a smi.
4958  __ bind(&index_not_smi_);
4959  // If index is a heap number, try converting it to an integer.
4960  __ CheckMap(index_,
4961              masm->isolate()->factory()->heap_number_map(),
4962              index_not_number_,
4963              DONT_DO_SMI_CHECK);
4964  call_helper.BeforeCall(masm);
4965  __ push(object_);
4966  __ push(index_);
4967  __ push(index_);  // Consumed by runtime conversion function.
4968  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
4969    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
4970  } else {
4971    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
4972    // NumberToSmi discards numbers that are not exact integers.
4973    __ CallRuntime(Runtime::kNumberToSmi, 1);
4974  }
4975  if (!scratch_.is(eax)) {
4976    // Save the conversion result before the pop instructions below
4977    // have a chance to overwrite it.
4978    __ mov(scratch_, eax);
4979  }
4980  __ pop(index_);
4981  __ pop(object_);
4982  // Reload the instance type.
4983  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
4984  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4985  call_helper.AfterCall(masm);
4986  // If index is still not a smi, it must be out of range.
4987  STATIC_ASSERT(kSmiTag == 0);
4988  __ JumpIfNotSmi(scratch_, index_out_of_range_);
4989  // Otherwise, return to the fast path.
4990  __ jmp(&got_smi_index_);
4991
4992  // Call runtime. We get here when the receiver is a string and the
4993  // index is a number, but the code of getting the actual character
4994  // is too complex (e.g., when the string needs to be flattened).
4995  __ bind(&call_runtime_);
4996  call_helper.BeforeCall(masm);
4997  __ push(object_);
4998  __ push(index_);
4999  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5000  if (!result_.is(eax)) {
5001    __ mov(result_, eax);
5002  }
5003  call_helper.AfterCall(masm);
5004  __ jmp(&exit_);
5005
5006  __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5007}
5008
5009
5010// -------------------------------------------------------------------------
5011// StringCharFromCodeGenerator
5012
5013void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5014  // Fast case of Heap::LookupSingleCharacterStringFromCode.
5015  STATIC_ASSERT(kSmiTag == 0);
5016  STATIC_ASSERT(kSmiShiftSize == 0);
5017  ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5018  __ test(code_,
5019          Immediate(kSmiTagMask |
5020                    ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5021  __ j(not_zero, &slow_case_);
5022
5023  Factory* factory = masm->isolate()->factory();
5024  __ Set(result_, Immediate(factory->single_character_string_cache()));
5025  STATIC_ASSERT(kSmiTag == 0);
5026  STATIC_ASSERT(kSmiTagSize == 1);
5027  STATIC_ASSERT(kSmiShiftSize == 0);
5028  // At this point code register contains smi tagged ascii char code.
5029  __ mov(result_, FieldOperand(result_,
5030                               code_, times_half_pointer_size,
5031                               FixedArray::kHeaderSize));
5032  __ cmp(result_, factory->undefined_value());
5033  __ j(equal, &slow_case_);
5034  __ bind(&exit_);
5035}
5036
5037
5038void StringCharFromCodeGenerator::GenerateSlow(
5039    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
5040  __ Abort("Unexpected fallthrough to CharFromCode slow case");
5041
5042  __ bind(&slow_case_);
5043  call_helper.BeforeCall(masm);
5044  __ push(code_);
5045  __ CallRuntime(Runtime::kCharFromCode, 1);
5046  if (!result_.is(eax)) {
5047    __ mov(result_, eax);
5048  }
5049  call_helper.AfterCall(masm);
5050  __ jmp(&exit_);
5051
5052  __ Abort("Unexpected fallthrough from CharFromCode slow case");
5053}
5054
5055
5056// -------------------------------------------------------------------------
5057// StringCharAtGenerator
5058
5059void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5060  char_code_at_generator_.GenerateFast(masm);
5061  char_from_code_generator_.GenerateFast(masm);
5062}
5063
5064
5065void StringCharAtGenerator::GenerateSlow(
5066    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
5067  char_code_at_generator_.GenerateSlow(masm, call_helper);
5068  char_from_code_generator_.GenerateSlow(masm, call_helper);
5069}
5070
5071
5072void StringAddStub::Generate(MacroAssembler* masm) {
5073  Label string_add_runtime, call_builtin;
5074  Builtins::JavaScript builtin_id = Builtins::ADD;
5075
5076  // Load the two arguments.
5077  __ mov(eax, Operand(esp, 2 * kPointerSize));  // First argument.
5078  __ mov(edx, Operand(esp, 1 * kPointerSize));  // Second argument.
5079
5080  // Make sure that both arguments are strings if not known in advance.
5081  if (flags_ == NO_STRING_ADD_FLAGS) {
5082    __ JumpIfSmi(eax, &string_add_runtime);
5083    __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
5084    __ j(above_equal, &string_add_runtime);
5085
5086    // First argument is a a string, test second.
5087    __ JumpIfSmi(edx, &string_add_runtime);
5088    __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
5089    __ j(above_equal, &string_add_runtime);
5090  } else {
5091    // Here at least one of the arguments is definitely a string.
5092    // We convert the one that is not known to be a string.
5093    if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
5094      ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
5095      GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
5096                              &call_builtin);
5097      builtin_id = Builtins::STRING_ADD_RIGHT;
5098    } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
5099      ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
5100      GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
5101                              &call_builtin);
5102      builtin_id = Builtins::STRING_ADD_LEFT;
5103    }
5104  }
5105
5106  // Both arguments are strings.
5107  // eax: first string
5108  // edx: second string
5109  // Check if either of the strings are empty. In that case return the other.
5110  Label second_not_zero_length, both_not_zero_length;
5111  __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
5112  STATIC_ASSERT(kSmiTag == 0);
5113  __ test(ecx, Operand(ecx));
5114  __ j(not_zero, &second_not_zero_length, Label::kNear);
5115  // Second string is empty, result is first string which is already in eax.
5116  Counters* counters = masm->isolate()->counters();
5117  __ IncrementCounter(counters->string_add_native(), 1);
5118  __ ret(2 * kPointerSize);
5119  __ bind(&second_not_zero_length);
5120  __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
5121  STATIC_ASSERT(kSmiTag == 0);
5122  __ test(ebx, Operand(ebx));
5123  __ j(not_zero, &both_not_zero_length, Label::kNear);
5124  // First string is empty, result is second string which is in edx.
5125  __ mov(eax, edx);
5126  __ IncrementCounter(counters->string_add_native(), 1);
5127  __ ret(2 * kPointerSize);
5128
5129  // Both strings are non-empty.
5130  // eax: first string
5131  // ebx: length of first string as a smi
5132  // ecx: length of second string as a smi
5133  // edx: second string
5134  // Look at the length of the result of adding the two strings.
5135  Label string_add_flat_result, longer_than_two;
5136  __ bind(&both_not_zero_length);
5137  __ add(ebx, Operand(ecx));
5138  STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
5139  // Handle exceptionally long strings in the runtime system.
5140  __ j(overflow, &string_add_runtime);
5141  // Use the symbol table when adding two one character strings, as it
5142  // helps later optimizations to return a symbol here.
5143  __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
5144  __ j(not_equal, &longer_than_two);
5145
5146  // Check that both strings are non-external ascii strings.
5147  __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
5148                                         &string_add_runtime);
5149
5150  // Get the two characters forming the new string.
5151  __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
5152  __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
5153
5154  // Try to lookup two character string in symbol table. If it is not found
5155  // just allocate a new one.
5156  Label make_two_character_string, make_two_character_string_no_reload;
5157  StringHelper::GenerateTwoCharacterSymbolTableProbe(
5158      masm, ebx, ecx, eax, edx, edi,
5159      &make_two_character_string_no_reload, &make_two_character_string);
5160  __ IncrementCounter(counters->string_add_native(), 1);
5161  __ ret(2 * kPointerSize);
5162
5163  // Allocate a two character string.
5164  __ bind(&make_two_character_string);
5165  // Reload the arguments.
5166  __ mov(eax, Operand(esp, 2 * kPointerSize));  // First argument.
5167  __ mov(edx, Operand(esp, 1 * kPointerSize));  // Second argument.
5168  // Get the two characters forming the new string.
5169  __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
5170  __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
5171  __ bind(&make_two_character_string_no_reload);
5172  __ IncrementCounter(counters->string_add_make_two_char(), 1);
5173  __ AllocateAsciiString(eax,  // Result.
5174                         2,    // Length.
5175                         edi,  // Scratch 1.
5176                         edx,  // Scratch 2.
5177                         &string_add_runtime);
5178  // Pack both characters in ebx.
5179  __ shl(ecx, kBitsPerByte);
5180  __ or_(ebx, Operand(ecx));
5181  // Set the characters in the new string.
5182  __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
5183  __ IncrementCounter(counters->string_add_native(), 1);
5184  __ ret(2 * kPointerSize);
5185
5186  __ bind(&longer_than_two);
5187  // Check if resulting string will be flat.
5188  __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
5189  __ j(below, &string_add_flat_result);
5190
5191  // If result is not supposed to be flat allocate a cons string object. If both
5192  // strings are ascii the result is an ascii cons string.
5193  Label non_ascii, allocated, ascii_data;
5194  __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
5195  __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
5196  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
5197  __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
5198  __ and_(ecx, Operand(edi));
5199  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
5200  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5201  __ test(ecx, Immediate(kStringEncodingMask));
5202  __ j(zero, &non_ascii);
5203  __ bind(&ascii_data);
5204  // Allocate an acsii cons string.
5205  __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
5206  __ bind(&allocated);
5207  // Fill the fields of the cons string.
5208  if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
5209  __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
5210  __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
5211         Immediate(String::kEmptyHashField));
5212  __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
5213  __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
5214  __ mov(eax, ecx);
5215  __ IncrementCounter(counters->string_add_native(), 1);
5216  __ ret(2 * kPointerSize);
5217  __ bind(&non_ascii);
5218  // At least one of the strings is two-byte. Check whether it happens
5219  // to contain only ascii characters.
5220  // ecx: first instance type AND second instance type.
5221  // edi: second instance type.
5222  __ test(ecx, Immediate(kAsciiDataHintMask));
5223  __ j(not_zero, &ascii_data);
5224  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5225  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5226  __ xor_(edi, Operand(ecx));
5227  STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
5228  __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
5229  __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
5230  __ j(equal, &ascii_data);
5231  // Allocate a two byte cons string.
5232  __ AllocateTwoByteConsString(ecx, edi, no_reg, &string_add_runtime);
5233  __ jmp(&allocated);
5234
5235  // Handle creating a flat result. First check that both strings are not
5236  // external strings.
5237  // eax: first string
5238  // ebx: length of resulting flat string as a smi
5239  // edx: second string
5240  __ bind(&string_add_flat_result);
5241  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5242  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5243  __ and_(ecx, kStringRepresentationMask);
5244  __ cmp(ecx, kExternalStringTag);
5245  __ j(equal, &string_add_runtime);
5246  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
5247  __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5248  __ and_(ecx, kStringRepresentationMask);
5249  __ cmp(ecx, kExternalStringTag);
5250  __ j(equal, &string_add_runtime);
5251  // We cannot encounter sliced strings here since:
5252  STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
5253  // Now check if both strings are ascii strings.
5254  // eax: first string
5255  // ebx: length of resulting flat string as a smi
5256  // edx: second string
5257  Label non_ascii_string_add_flat_result;
5258  STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
5259  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5260  __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5261  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
5262  __ j(zero, &non_ascii_string_add_flat_result);
5263  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
5264  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
5265  __ j(zero, &string_add_runtime);
5266
5267  // Both strings are ascii strings.  As they are short they are both flat.
5268  // ebx: length of resulting flat string as a smi
5269  __ SmiUntag(ebx);
5270  __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
5271  // eax: result string
5272  __ mov(ecx, eax);
5273  // Locate first character of result.
5274  __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5275  // Load first argument and locate first character.
5276  __ mov(edx, Operand(esp, 2 * kPointerSize));
5277  __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5278  __ SmiUntag(edi);
5279  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5280  // eax: result string
5281  // ecx: first character of result
5282  // edx: first char of first argument
5283  // edi: length of first argument
5284  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
5285  // Load second argument and locate first character.
5286  __ mov(edx, Operand(esp, 1 * kPointerSize));
5287  __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5288  __ SmiUntag(edi);
5289  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5290  // eax: result string
5291  // ecx: next character of result
5292  // edx: first char of second argument
5293  // edi: length of second argument
5294  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
5295  __ IncrementCounter(counters->string_add_native(), 1);
5296  __ ret(2 * kPointerSize);
5297
5298  // Handle creating a flat two byte result.
5299  // eax: first string - known to be two byte
5300  // ebx: length of resulting flat string as a smi
5301  // edx: second string
5302  __ bind(&non_ascii_string_add_flat_result);
5303  __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
5304  __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kStringEncodingMask);
5305  __ j(not_zero, &string_add_runtime);
5306  // Both strings are two byte strings. As they are short they are both
5307  // flat.
5308  __ SmiUntag(ebx);
5309  __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
5310  // eax: result string
5311  __ mov(ecx, eax);
5312  // Locate first character of result.
5313  __ add(Operand(ecx),
5314         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5315  // Load first argument and locate first character.
5316  __ mov(edx, Operand(esp, 2 * kPointerSize));
5317  __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5318  __ SmiUntag(edi);
5319  __ add(Operand(edx),
5320         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5321  // eax: result string
5322  // ecx: first character of result
5323  // edx: first char of first argument
5324  // edi: length of first argument
5325  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
5326  // Load second argument and locate first character.
5327  __ mov(edx, Operand(esp, 1 * kPointerSize));
5328  __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5329  __ SmiUntag(edi);
5330  __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5331  // eax: result string
5332  // ecx: next character of result
5333  // edx: first char of second argument
5334  // edi: length of second argument
5335  StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
5336  __ IncrementCounter(counters->string_add_native(), 1);
5337  __ ret(2 * kPointerSize);
5338
5339  // Just jump to runtime to add the two strings.
5340  __ bind(&string_add_runtime);
5341  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
5342
5343  if (call_builtin.is_linked()) {
5344    __ bind(&call_builtin);
5345    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
5346  }
5347}
5348
5349
5350void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
5351                                            int stack_offset,
5352                                            Register arg,
5353                                            Register scratch1,
5354                                            Register scratch2,
5355                                            Register scratch3,
5356                                            Label* slow) {
5357  // First check if the argument is already a string.
5358  Label not_string, done;
5359  __ JumpIfSmi(arg, &not_string);
5360  __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
5361  __ j(below, &done);
5362
5363  // Check the number to string cache.
5364  Label not_cached;
5365  __ bind(&not_string);
5366  // Puts the cached result into scratch1.
5367  NumberToStringStub::GenerateLookupNumberStringCache(masm,
5368                                                      arg,
5369                                                      scratch1,
5370                                                      scratch2,
5371                                                      scratch3,
5372                                                      false,
5373                                                      &not_cached);
5374  __ mov(arg, scratch1);
5375  __ mov(Operand(esp, stack_offset), arg);
5376  __ jmp(&done);
5377
5378  // Check if the argument is a safe string wrapper.
5379  __ bind(&not_cached);
5380  __ JumpIfSmi(arg, slow);
5381  __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1);  // map -> scratch1.
5382  __ j(not_equal, slow);
5383  __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
5384            1 << Map::kStringWrapperSafeForDefaultValueOf);
5385  __ j(zero, slow);
5386  __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
5387  __ mov(Operand(esp, stack_offset), arg);
5388
5389  __ bind(&done);
5390}
5391
5392
5393void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5394                                          Register dest,
5395                                          Register src,
5396                                          Register count,
5397                                          Register scratch,
5398                                          bool ascii) {
5399  Label loop;
5400  __ bind(&loop);
5401  // This loop just copies one character at a time, as it is only used for very
5402  // short strings.
5403  if (ascii) {
5404    __ mov_b(scratch, Operand(src, 0));
5405    __ mov_b(Operand(dest, 0), scratch);
5406    __ add(Operand(src), Immediate(1));
5407    __ add(Operand(dest), Immediate(1));
5408  } else {
5409    __ mov_w(scratch, Operand(src, 0));
5410    __ mov_w(Operand(dest, 0), scratch);
5411    __ add(Operand(src), Immediate(2));
5412    __ add(Operand(dest), Immediate(2));
5413  }
5414  __ sub(Operand(count), Immediate(1));
5415  __ j(not_zero, &loop);
5416}
5417
5418
5419void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
5420                                             Register dest,
5421                                             Register src,
5422                                             Register count,
5423                                             Register scratch,
5424                                             bool ascii) {
5425  // Copy characters using rep movs of doublewords.
5426  // The destination is aligned on a 4 byte boundary because we are
5427  // copying to the beginning of a newly allocated string.
5428  ASSERT(dest.is(edi));  // rep movs destination
5429  ASSERT(src.is(esi));  // rep movs source
5430  ASSERT(count.is(ecx));  // rep movs count
5431  ASSERT(!scratch.is(dest));
5432  ASSERT(!scratch.is(src));
5433  ASSERT(!scratch.is(count));
5434
5435  // Nothing to do for zero characters.
5436  Label done;
5437  __ test(count, Operand(count));
5438  __ j(zero, &done);
5439
5440  // Make count the number of bytes to copy.
5441  if (!ascii) {
5442    __ shl(count, 1);
5443  }
5444
5445  // Don't enter the rep movs if there are less than 4 bytes to copy.
5446  Label last_bytes;
5447  __ test(count, Immediate(~3));
5448  __ j(zero, &last_bytes, Label::kNear);
5449
5450  // Copy from edi to esi using rep movs instruction.
5451  __ mov(scratch, count);
5452  __ sar(count, 2);  // Number of doublewords to copy.
5453  __ cld();
5454  __ rep_movs();
5455
5456  // Find number of bytes left.
5457  __ mov(count, scratch);
5458  __ and_(count, 3);
5459
5460  // Check if there are more bytes to copy.
5461  __ bind(&last_bytes);
5462  __ test(count, Operand(count));
5463  __ j(zero, &done);
5464
5465  // Copy remaining characters.
5466  Label loop;
5467  __ bind(&loop);
5468  __ mov_b(scratch, Operand(src, 0));
5469  __ mov_b(Operand(dest, 0), scratch);
5470  __ add(Operand(src), Immediate(1));
5471  __ add(Operand(dest), Immediate(1));
5472  __ sub(Operand(count), Immediate(1));
5473  __ j(not_zero, &loop);
5474
5475  __ bind(&done);
5476}
5477
5478
5479void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5480                                                        Register c1,
5481                                                        Register c2,
5482                                                        Register scratch1,
5483                                                        Register scratch2,
5484                                                        Register scratch3,
5485                                                        Label* not_probed,
5486                                                        Label* not_found) {
5487  // Register scratch3 is the general scratch register in this function.
5488  Register scratch = scratch3;
5489
5490  // Make sure that both characters are not digits as such strings has a
5491  // different hash algorithm. Don't try to look for these in the symbol table.
5492  Label not_array_index;
5493  __ mov(scratch, c1);
5494  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
5495  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
5496  __ j(above, &not_array_index, Label::kNear);
5497  __ mov(scratch, c2);
5498  __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
5499  __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
5500  __ j(below_equal, not_probed);
5501
5502  __ bind(&not_array_index);
5503  // Calculate the two character string hash.
5504  Register hash = scratch1;
5505  GenerateHashInit(masm, hash, c1, scratch);
5506  GenerateHashAddCharacter(masm, hash, c2, scratch);
5507  GenerateHashGetHash(masm, hash, scratch);
5508
5509  // Collect the two characters in a register.
5510  Register chars = c1;
5511  __ shl(c2, kBitsPerByte);
5512  __ or_(chars, Operand(c2));
5513
5514  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5515  // hash:  hash of two character string.
5516
5517  // Load the symbol table.
5518  Register symbol_table = c2;
5519  ExternalReference roots_address =
5520      ExternalReference::roots_address(masm->isolate());
5521  __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
5522  __ mov(symbol_table,
5523         Operand::StaticArray(scratch, times_pointer_size, roots_address));
5524
5525  // Calculate capacity mask from the symbol table capacity.
5526  Register mask = scratch2;
5527  __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
5528  __ SmiUntag(mask);
5529  __ sub(Operand(mask), Immediate(1));
5530
5531  // Registers
5532  // chars:        two character string, char 1 in byte 0 and char 2 in byte 1.
5533  // hash:         hash of two character string
5534  // symbol_table: symbol table
5535  // mask:         capacity mask
5536  // scratch:      -
5537
5538  // Perform a number of probes in the symbol table.
5539  static const int kProbes = 4;
5540  Label found_in_symbol_table;
5541  Label next_probe[kProbes], next_probe_pop_mask[kProbes];
5542  Register candidate = scratch;  // Scratch register contains candidate.
5543  for (int i = 0; i < kProbes; i++) {
5544    // Calculate entry in symbol table.
5545    __ mov(scratch, hash);
5546    if (i > 0) {
5547      __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
5548    }
5549    __ and_(scratch, Operand(mask));
5550
5551    // Load the entry from the symbol table.
5552    STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5553    __ mov(candidate,
5554           FieldOperand(symbol_table,
5555                        scratch,
5556                        times_pointer_size,
5557                        SymbolTable::kElementsStartOffset));
5558
5559    // If entry is undefined no string with this hash can be found.
5560    Factory* factory = masm->isolate()->factory();
5561    __ cmp(candidate, factory->undefined_value());
5562    __ j(equal, not_found);
5563    __ cmp(candidate, factory->null_value());
5564    __ j(equal, &next_probe[i]);
5565
5566    // If length is not 2 the string is not a candidate.
5567    __ cmp(FieldOperand(candidate, String::kLengthOffset),
5568           Immediate(Smi::FromInt(2)));
5569    __ j(not_equal, &next_probe[i]);
5570
5571    // As we are out of registers save the mask on the stack and use that
5572    // register as a temporary.
5573    __ push(mask);
5574    Register temp = mask;
5575
5576    // Check that the candidate is a non-external ascii string.
5577    __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
5578    __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
5579    __ JumpIfInstanceTypeIsNotSequentialAscii(
5580        temp, temp, &next_probe_pop_mask[i]);
5581
5582    // Check if the two characters match.
5583    __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
5584    __ and_(temp, 0x0000ffff);
5585    __ cmp(chars, Operand(temp));
5586    __ j(equal, &found_in_symbol_table);
5587    __ bind(&next_probe_pop_mask[i]);
5588    __ pop(mask);
5589    __ bind(&next_probe[i]);
5590  }
5591
5592  // No matching 2 character string found by probing.
5593  __ jmp(not_found);
5594
5595  // Scratch register contains result when we fall through to here.
5596  Register result = candidate;
5597  __ bind(&found_in_symbol_table);
5598  __ pop(mask);  // Pop saved mask from the stack.
5599  if (!result.is(eax)) {
5600    __ mov(eax, result);
5601  }
5602}
5603
5604
5605void StringHelper::GenerateHashInit(MacroAssembler* masm,
5606                                    Register hash,
5607                                    Register character,
5608                                    Register scratch) {
5609  // hash = (seed + character) + ((seed + character) << 10);
5610  if (Serializer::enabled()) {
5611    ExternalReference roots_address =
5612        ExternalReference::roots_address(masm->isolate());
5613    __ mov(scratch, Immediate(Heap::kStringHashSeedRootIndex));
5614    __ mov(scratch, Operand::StaticArray(scratch,
5615                                         times_pointer_size,
5616                                         roots_address));
5617    __ add(scratch, Operand(character));
5618    __ mov(hash, scratch);
5619    __ shl(scratch, 10);
5620    __ add(hash, Operand(scratch));
5621  } else {
5622    int32_t seed = masm->isolate()->heap()->StringHashSeed();
5623    __ lea(scratch, Operand(character, seed));
5624    __ shl(scratch, 10);
5625    __ lea(hash, Operand(scratch, character, times_1, seed));
5626  }
5627  // hash ^= hash >> 6;
5628  __ mov(scratch, hash);
5629  __ shr(scratch, 6);
5630  __ xor_(hash, Operand(scratch));
5631}
5632
5633
5634void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5635                                            Register hash,
5636                                            Register character,
5637                                            Register scratch) {
5638  // hash += character;
5639  __ add(hash, Operand(character));
5640  // hash += hash << 10;
5641  __ mov(scratch, hash);
5642  __ shl(scratch, 10);
5643  __ add(hash, Operand(scratch));
5644  // hash ^= hash >> 6;
5645  __ mov(scratch, hash);
5646  __ shr(scratch, 6);
5647  __ xor_(hash, Operand(scratch));
5648}
5649
5650
5651void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5652                                       Register hash,
5653                                       Register scratch) {
5654  // hash += hash << 3;
5655  __ mov(scratch, hash);
5656  __ shl(scratch, 3);
5657  __ add(hash, Operand(scratch));
5658  // hash ^= hash >> 11;
5659  __ mov(scratch, hash);
5660  __ shr(scratch, 11);
5661  __ xor_(hash, Operand(scratch));
5662  // hash += hash << 15;
5663  __ mov(scratch, hash);
5664  __ shl(scratch, 15);
5665  __ add(hash, Operand(scratch));
5666
5667  __ and_(hash, String::kHashBitMask);
5668
5669  // if (hash == 0) hash = 27;
5670  Label hash_not_zero;
5671  __ j(not_zero, &hash_not_zero, Label::kNear);
5672  __ mov(hash, Immediate(StringHasher::kZeroHash));
5673  __ bind(&hash_not_zero);
5674}
5675
5676
5677void SubStringStub::Generate(MacroAssembler* masm) {
5678  Label runtime;
5679
5680  // Stack frame on entry.
5681  //  esp[0]: return address
5682  //  esp[4]: to
5683  //  esp[8]: from
5684  //  esp[12]: string
5685
5686  // Make sure first argument is a string.
5687  __ mov(eax, Operand(esp, 3 * kPointerSize));
5688  STATIC_ASSERT(kSmiTag == 0);
5689  __ JumpIfSmi(eax, &runtime);
5690  Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
5691  __ j(NegateCondition(is_string), &runtime);
5692
5693  // eax: string
5694  // ebx: instance type
5695
5696  // Calculate length of sub string using the smi values.
5697  Label result_longer_than_two;
5698  __ mov(ecx, Operand(esp, 1 * kPointerSize));  // To index.
5699  __ JumpIfNotSmi(ecx, &runtime);
5700  __ mov(edx, Operand(esp, 2 * kPointerSize));  // From index.
5701  __ JumpIfNotSmi(edx, &runtime);
5702  __ sub(ecx, Operand(edx));
5703  __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
5704  Label return_eax;
5705  __ j(equal, &return_eax);
5706  // Special handling of sub-strings of length 1 and 2. One character strings
5707  // are handled in the runtime system (looked up in the single character
5708  // cache). Two character strings are looked for in the symbol cache.
5709  __ SmiUntag(ecx);  // Result length is no longer smi.
5710  __ cmp(ecx, 2);
5711  __ j(greater, &result_longer_than_two);
5712  __ j(less, &runtime);
5713
5714  // Sub string of length 2 requested.
5715  // eax: string
5716  // ebx: instance type
5717  // ecx: sub string length (value is 2)
5718  // edx: from index (smi)
5719  __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
5720
5721  // Get the two characters forming the sub string.
5722  __ SmiUntag(edx);  // From index is no longer smi.
5723  __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
5724  __ movzx_b(ecx,
5725             FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
5726
5727  // Try to lookup two character string in symbol table.
5728  Label make_two_character_string;
5729  StringHelper::GenerateTwoCharacterSymbolTableProbe(
5730      masm, ebx, ecx, eax, edx, edi,
5731      &make_two_character_string, &make_two_character_string);
5732  __ ret(3 * kPointerSize);
5733
5734  __ bind(&make_two_character_string);
5735  // Setup registers for allocating the two character string.
5736  __ mov(eax, Operand(esp, 3 * kPointerSize));
5737  __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
5738  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
5739  __ Set(ecx, Immediate(2));
5740
5741  if (FLAG_string_slices) {
5742    Label copy_routine;
5743    // If coming from the make_two_character_string path, the string
5744    // is too short to be sliced anyways.
5745    STATIC_ASSERT(2 < SlicedString::kMinLength);
5746    __ jmp(&copy_routine);
5747    __ bind(&result_longer_than_two);
5748
5749    // eax: string
5750    // ebx: instance type
5751    // ecx: sub string length
5752    // edx: from index (smi)
5753    Label allocate_slice, sliced_string, seq_string;
5754    __ cmp(ecx, SlicedString::kMinLength);
5755    // Short slice.  Copy instead of slicing.
5756    __ j(less, &copy_routine);
5757    STATIC_ASSERT(kSeqStringTag == 0);
5758    __ test(ebx, Immediate(kStringRepresentationMask));
5759    __ j(zero, &seq_string, Label::kNear);
5760    STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
5761    STATIC_ASSERT(kIsIndirectStringMask != 0);
5762    __ test(ebx, Immediate(kIsIndirectStringMask));
5763    // External string.  Jump to runtime.
5764    __ j(zero, &runtime);
5765
5766    Factory* factory = masm->isolate()->factory();
5767    __ test(ebx, Immediate(kSlicedNotConsMask));
5768    __ j(not_zero, &sliced_string, Label::kNear);
5769    // Cons string.  Check whether it is flat, then fetch first part.
5770    __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
5771           factory->empty_string());
5772    __ j(not_equal, &runtime);
5773    __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
5774    __ jmp(&allocate_slice, Label::kNear);
5775
5776    __ bind(&sliced_string);
5777    // Sliced string.  Fetch parent and correct start index by offset.
5778    __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
5779    __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
5780    __ jmp(&allocate_slice, Label::kNear);
5781
5782    __ bind(&seq_string);
5783    // Sequential string.  Just move string to the right register.
5784    __ mov(edi, eax);
5785
5786    __ bind(&allocate_slice);
5787    // edi: underlying subject string
5788    // ebx: instance type of original subject string
5789    // edx: offset
5790    // ecx: length
5791    // Allocate new sliced string.  At this point we do not reload the instance
5792    // type including the string encoding because we simply rely on the info
5793    // provided by the original string.  It does not matter if the original
5794    // string's encoding is wrong because we always have to recheck encoding of
5795    // the newly created string's parent anyways due to externalized strings.
5796    Label two_byte_slice, set_slice_header;
5797    STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
5798    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5799    __ test(ebx, Immediate(kStringEncodingMask));
5800    __ j(zero, &two_byte_slice, Label::kNear);
5801    __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime);
5802    __ jmp(&set_slice_header, Label::kNear);
5803    __ bind(&two_byte_slice);
5804    __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
5805    __ bind(&set_slice_header);
5806    __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
5807    __ SmiTag(ecx);
5808    __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
5809    __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
5810    __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
5811           Immediate(String::kEmptyHashField));
5812    __ jmp(&return_eax);
5813
5814    __ bind(&copy_routine);
5815  } else {
5816    __ bind(&result_longer_than_two);
5817  }
5818
5819  // eax: string
5820  // ebx: instance type
5821  // ecx: result string length
5822  // Check for flat ascii string
5823  Label non_ascii_flat;
5824  __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
5825
5826  // Allocate the result.
5827  __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
5828
5829  // eax: result string
5830  // ecx: result string length
5831  __ mov(edx, esi);  // esi used by following code.
5832  // Locate first character of result.
5833  __ mov(edi, eax);
5834  __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5835  // Load string argument and locate character of sub string start.
5836  __ mov(esi, Operand(esp, 3 * kPointerSize));
5837  __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5838  __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
5839  __ SmiUntag(ebx);
5840  __ add(esi, Operand(ebx));
5841
5842  // eax: result string
5843  // ecx: result length
5844  // edx: original value of esi
5845  // edi: first character of result
5846  // esi: character of sub string start
5847  StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
5848  __ mov(esi, edx);  // Restore esi.
5849  Counters* counters = masm->isolate()->counters();
5850  __ IncrementCounter(counters->sub_string_native(), 1);
5851  __ ret(3 * kPointerSize);
5852
5853  __ bind(&non_ascii_flat);
5854  // eax: string
5855  // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
5856  // ecx: result string length
5857  // Check for flat two byte string
5858  __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
5859  __ j(not_equal, &runtime);
5860
5861  // Allocate the result.
5862  __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
5863
5864  // eax: result string
5865  // ecx: result string length
5866  __ mov(edx, esi);  // esi used by following code.
5867  // Locate first character of result.
5868  __ mov(edi, eax);
5869  __ add(Operand(edi),
5870         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5871  // Load string argument and locate character of sub string start.
5872  __ mov(esi, Operand(esp, 3 * kPointerSize));
5873  __ add(Operand(esi),
5874         Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5875  __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
5876  // As from is a smi it is 2 times the value which matches the size of a two
5877  // byte character.
5878  STATIC_ASSERT(kSmiTag == 0);
5879  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5880  __ add(esi, Operand(ebx));
5881
5882  // eax: result string
5883  // ecx: result length
5884  // edx: original value of esi
5885  // edi: first character of result
5886  // esi: character of sub string start
5887  StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
5888  __ mov(esi, edx);  // Restore esi.
5889
5890  __ bind(&return_eax);
5891  __ IncrementCounter(counters->sub_string_native(), 1);
5892  __ ret(3 * kPointerSize);
5893
5894  // Just jump to runtime to create the sub string.
5895  __ bind(&runtime);
5896  __ TailCallRuntime(Runtime::kSubString, 3, 1);
5897}
5898
5899
5900void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
5901                                                      Register left,
5902                                                      Register right,
5903                                                      Register scratch1,
5904                                                      Register scratch2) {
5905  Register length = scratch1;
5906
5907  // Compare lengths.
5908  Label strings_not_equal, check_zero_length;
5909  __ mov(length, FieldOperand(left, String::kLengthOffset));
5910  __ cmp(length, FieldOperand(right, String::kLengthOffset));
5911  __ j(equal, &check_zero_length, Label::kNear);
5912  __ bind(&strings_not_equal);
5913  __ Set(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
5914  __ ret(0);
5915
5916  // Check if the length is zero.
5917  Label compare_chars;
5918  __ bind(&check_zero_length);
5919  STATIC_ASSERT(kSmiTag == 0);
5920  __ test(length, Operand(length));
5921  __ j(not_zero, &compare_chars, Label::kNear);
5922  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
5923  __ ret(0);
5924
5925  // Compare characters.
5926  __ bind(&compare_chars);
5927  GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
5928                                &strings_not_equal, Label::kNear);
5929
5930  // Characters are equal.
5931  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
5932  __ ret(0);
5933}
5934
5935
5936void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
5937                                                        Register left,
5938                                                        Register right,
5939                                                        Register scratch1,
5940                                                        Register scratch2,
5941                                                        Register scratch3) {
5942  Counters* counters = masm->isolate()->counters();
5943  __ IncrementCounter(counters->string_compare_native(), 1);
5944
5945  // Find minimum length.
5946  Label left_shorter;
5947  __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
5948  __ mov(scratch3, scratch1);
5949  __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
5950
5951  Register length_delta = scratch3;
5952
5953  __ j(less_equal, &left_shorter, Label::kNear);
5954  // Right string is shorter. Change scratch1 to be length of right string.
5955  __ sub(scratch1, Operand(length_delta));
5956  __ bind(&left_shorter);
5957
5958  Register min_length = scratch1;
5959
5960  // If either length is zero, just compare lengths.
5961  Label compare_lengths;
5962  __ test(min_length, Operand(min_length));
5963  __ j(zero, &compare_lengths, Label::kNear);
5964
5965  // Compare characters.
5966  Label result_not_equal;
5967  GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
5968                                &result_not_equal, Label::kNear);
5969
5970  // Compare lengths -  strings up to min-length are equal.
5971  __ bind(&compare_lengths);
5972  __ test(length_delta, Operand(length_delta));
5973  __ j(not_zero, &result_not_equal, Label::kNear);
5974
5975  // Result is EQUAL.
5976  STATIC_ASSERT(EQUAL == 0);
5977  STATIC_ASSERT(kSmiTag == 0);
5978  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
5979  __ ret(0);
5980
5981  Label result_greater;
5982  __ bind(&result_not_equal);
5983  __ j(greater, &result_greater, Label::kNear);
5984
5985  // Result is LESS.
5986  __ Set(eax, Immediate(Smi::FromInt(LESS)));
5987  __ ret(0);
5988
5989  // Result is GREATER.
5990  __ bind(&result_greater);
5991  __ Set(eax, Immediate(Smi::FromInt(GREATER)));
5992  __ ret(0);
5993}
5994
5995
5996void StringCompareStub::GenerateAsciiCharsCompareLoop(
5997    MacroAssembler* masm,
5998    Register left,
5999    Register right,
6000    Register length,
6001    Register scratch,
6002    Label* chars_not_equal,
6003    Label::Distance chars_not_equal_near) {
6004  // Change index to run from -length to -1 by adding length to string
6005  // start. This means that loop ends when index reaches zero, which
6006  // doesn't need an additional compare.
6007  __ SmiUntag(length);
6008  __ lea(left,
6009         FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
6010  __ lea(right,
6011         FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
6012  __ neg(length);
6013  Register index = length;  // index = -length;
6014
6015  // Compare loop.
6016  Label loop;
6017  __ bind(&loop);
6018  __ mov_b(scratch, Operand(left, index, times_1, 0));
6019  __ cmpb(scratch, Operand(right, index, times_1, 0));
6020  __ j(not_equal, chars_not_equal, chars_not_equal_near);
6021  __ add(Operand(index), Immediate(1));
6022  __ j(not_zero, &loop);
6023}
6024
6025
6026void StringCompareStub::Generate(MacroAssembler* masm) {
6027  Label runtime;
6028
6029  // Stack frame on entry.
6030  //  esp[0]: return address
6031  //  esp[4]: right string
6032  //  esp[8]: left string
6033
6034  __ mov(edx, Operand(esp, 2 * kPointerSize));  // left
6035  __ mov(eax, Operand(esp, 1 * kPointerSize));  // right
6036
6037  Label not_same;
6038  __ cmp(edx, Operand(eax));
6039  __ j(not_equal, &not_same, Label::kNear);
6040  STATIC_ASSERT(EQUAL == 0);
6041  STATIC_ASSERT(kSmiTag == 0);
6042  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6043  __ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1);
6044  __ ret(2 * kPointerSize);
6045
6046  __ bind(&not_same);
6047
6048  // Check that both objects are sequential ascii strings.
6049  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
6050
6051  // Compare flat ascii strings.
6052  // Drop arguments from the stack.
6053  __ pop(ecx);
6054  __ add(Operand(esp), Immediate(2 * kPointerSize));
6055  __ push(ecx);
6056  GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
6057
6058  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
6059  // tagged as a small integer.
6060  __ bind(&runtime);
6061  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6062}
6063
6064
6065void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6066  ASSERT(state_ == CompareIC::SMIS);
6067  Label miss;
6068  __ mov(ecx, Operand(edx));
6069  __ or_(ecx, Operand(eax));
6070  __ JumpIfNotSmi(ecx, &miss, Label::kNear);
6071
6072  if (GetCondition() == equal) {
6073    // For equality we do not care about the sign of the result.
6074    __ sub(eax, Operand(edx));
6075  } else {
6076    Label done;
6077    __ sub(edx, Operand(eax));
6078    __ j(no_overflow, &done, Label::kNear);
6079    // Correct sign of result in case of overflow.
6080    __ not_(edx);
6081    __ bind(&done);
6082    __ mov(eax, edx);
6083  }
6084  __ ret(0);
6085
6086  __ bind(&miss);
6087  GenerateMiss(masm);
6088}
6089
6090
6091void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6092  ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6093
6094  Label generic_stub;
6095  Label unordered;
6096  Label miss;
6097  __ mov(ecx, Operand(edx));
6098  __ and_(ecx, Operand(eax));
6099  __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
6100
6101  __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
6102  __ j(not_equal, &miss, Label::kNear);
6103  __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
6104  __ j(not_equal, &miss, Label::kNear);
6105
6106  // Inlining the double comparison and falling back to the general compare
6107  // stub if NaN is involved or SS2 or CMOV is unsupported.
6108  if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
6109    CpuFeatures::Scope scope1(SSE2);
6110    CpuFeatures::Scope scope2(CMOV);
6111
6112    // Load left and right operand
6113    __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
6114    __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
6115
6116    // Compare operands
6117    __ ucomisd(xmm0, xmm1);
6118
6119    // Don't base result on EFLAGS when a NaN is involved.
6120    __ j(parity_even, &unordered, Label::kNear);
6121
6122    // Return a result of -1, 0, or 1, based on EFLAGS.
6123    // Performing mov, because xor would destroy the flag register.
6124    __ mov(eax, 0);  // equal
6125    __ mov(ecx, Immediate(Smi::FromInt(1)));
6126    __ cmov(above, eax, Operand(ecx));
6127    __ mov(ecx, Immediate(Smi::FromInt(-1)));
6128    __ cmov(below, eax, Operand(ecx));
6129    __ ret(0);
6130
6131    __ bind(&unordered);
6132  }
6133
6134  CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
6135  __ bind(&generic_stub);
6136  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
6137
6138  __ bind(&miss);
6139  GenerateMiss(masm);
6140}
6141
6142
6143void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6144  ASSERT(state_ == CompareIC::SYMBOLS);
6145  ASSERT(GetCondition() == equal);
6146
6147  // Registers containing left and right operands respectively.
6148  Register left = edx;
6149  Register right = eax;
6150  Register tmp1 = ecx;
6151  Register tmp2 = ebx;
6152
6153  // Check that both operands are heap objects.
6154  Label miss;
6155  __ mov(tmp1, Operand(left));
6156  STATIC_ASSERT(kSmiTag == 0);
6157  __ and_(tmp1, Operand(right));
6158  __ JumpIfSmi(tmp1, &miss, Label::kNear);
6159
6160  // Check that both operands are symbols.
6161  __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
6162  __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
6163  __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
6164  __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
6165  STATIC_ASSERT(kSymbolTag != 0);
6166  __ and_(tmp1, Operand(tmp2));
6167  __ test(tmp1, Immediate(kIsSymbolMask));
6168  __ j(zero, &miss, Label::kNear);
6169
6170  // Symbols are compared by identity.
6171  Label done;
6172  __ cmp(left, Operand(right));
6173  // Make sure eax is non-zero. At this point input operands are
6174  // guaranteed to be non-zero.
6175  ASSERT(right.is(eax));
6176  __ j(not_equal, &done, Label::kNear);
6177  STATIC_ASSERT(EQUAL == 0);
6178  STATIC_ASSERT(kSmiTag == 0);
6179  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6180  __ bind(&done);
6181  __ ret(0);
6182
6183  __ bind(&miss);
6184  GenerateMiss(masm);
6185}
6186
6187
6188void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6189  ASSERT(state_ == CompareIC::STRINGS);
6190  ASSERT(GetCondition() == equal);
6191  Label miss;
6192
6193  // Registers containing left and right operands respectively.
6194  Register left = edx;
6195  Register right = eax;
6196  Register tmp1 = ecx;
6197  Register tmp2 = ebx;
6198  Register tmp3 = edi;
6199
6200  // Check that both operands are heap objects.
6201  __ mov(tmp1, Operand(left));
6202  STATIC_ASSERT(kSmiTag == 0);
6203  __ and_(tmp1, Operand(right));
6204  __ JumpIfSmi(tmp1, &miss);
6205
6206  // Check that both operands are strings. This leaves the instance
6207  // types loaded in tmp1 and tmp2.
6208  __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
6209  __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
6210  __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
6211  __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
6212  __ mov(tmp3, tmp1);
6213  STATIC_ASSERT(kNotStringTag != 0);
6214  __ or_(tmp3, Operand(tmp2));
6215  __ test(tmp3, Immediate(kIsNotStringMask));
6216  __ j(not_zero, &miss);
6217
6218  // Fast check for identical strings.
6219  Label not_same;
6220  __ cmp(left, Operand(right));
6221  __ j(not_equal, &not_same, Label::kNear);
6222  STATIC_ASSERT(EQUAL == 0);
6223  STATIC_ASSERT(kSmiTag == 0);
6224  __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6225  __ ret(0);
6226
6227  // Handle not identical strings.
6228  __ bind(&not_same);
6229
6230  // Check that both strings are symbols. If they are, we're done
6231  // because we already know they are not identical.
6232  Label do_compare;
6233  STATIC_ASSERT(kSymbolTag != 0);
6234  __ and_(tmp1, Operand(tmp2));
6235  __ test(tmp1, Immediate(kIsSymbolMask));
6236  __ j(zero, &do_compare, Label::kNear);
6237  // Make sure eax is non-zero. At this point input operands are
6238  // guaranteed to be non-zero.
6239  ASSERT(right.is(eax));
6240  __ ret(0);
6241
6242  // Check that both strings are sequential ASCII.
6243  Label runtime;
6244  __ bind(&do_compare);
6245  __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
6246
6247  // Compare flat ASCII strings. Returns when done.
6248  StringCompareStub::GenerateFlatAsciiStringEquals(
6249      masm, left, right, tmp1, tmp2);
6250
6251  // Handle more complex cases in runtime.
6252  __ bind(&runtime);
6253  __ pop(tmp1);  // Return address.
6254  __ push(left);
6255  __ push(right);
6256  __ push(tmp1);
6257  __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6258
6259  __ bind(&miss);
6260  GenerateMiss(masm);
6261}
6262
6263
6264void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6265  ASSERT(state_ == CompareIC::OBJECTS);
6266  Label miss;
6267  __ mov(ecx, Operand(edx));
6268  __ and_(ecx, Operand(eax));
6269  __ JumpIfSmi(ecx, &miss, Label::kNear);
6270
6271  __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
6272  __ j(not_equal, &miss, Label::kNear);
6273  __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
6274  __ j(not_equal, &miss, Label::kNear);
6275
6276  ASSERT(GetCondition() == equal);
6277  __ sub(eax, Operand(edx));
6278  __ ret(0);
6279
6280  __ bind(&miss);
6281  GenerateMiss(masm);
6282}
6283
6284
6285void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6286  // Save the registers.
6287  __ pop(ecx);
6288  __ push(edx);
6289  __ push(eax);
6290  __ push(ecx);
6291
6292  // Call the runtime system in a fresh internal frame.
6293  ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
6294                                             masm->isolate());
6295  __ EnterInternalFrame();
6296  __ push(edx);
6297  __ push(eax);
6298  __ push(Immediate(Smi::FromInt(op_)));
6299  __ CallExternalReference(miss, 3);
6300  __ LeaveInternalFrame();
6301
6302  // Compute the entry point of the rewritten stub.
6303  __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
6304
6305  // Restore registers.
6306  __ pop(ecx);
6307  __ pop(eax);
6308  __ pop(edx);
6309  __ push(ecx);
6310
6311  // Do a tail call to the rewritten stub.
6312  __ jmp(Operand(edi));
6313}
6314
6315
6316// Helper function used to check that the dictionary doesn't contain
6317// the property. This function may return false negatives, so miss_label
6318// must always call a backup property check that is complete.
6319// This function is safe to call if the receiver has fast properties.
6320// Name must be a symbol and receiver must be a heap object.
6321MaybeObject* StringDictionaryLookupStub::GenerateNegativeLookup(
6322    MacroAssembler* masm,
6323    Label* miss,
6324    Label* done,
6325    Register properties,
6326    String* name,
6327    Register r0) {
6328  ASSERT(name->IsSymbol());
6329
6330  // If names of slots in range from 1 to kProbes - 1 for the hash value are
6331  // not equal to the name and kProbes-th slot is not used (its name is the
6332  // undefined value), it guarantees the hash table doesn't contain the
6333  // property. It's true even if some slots represent deleted properties
6334  // (their names are the null value).
6335  for (int i = 0; i < kInlinedProbes; i++) {
6336    // Compute the masked index: (hash + i + i * i) & mask.
6337    Register index = r0;
6338    // Capacity is smi 2^n.
6339    __ mov(index, FieldOperand(properties, kCapacityOffset));
6340    __ dec(index);
6341    __ and_(Operand(index),
6342           Immediate(Smi::FromInt(name->Hash() +
6343                                   StringDictionary::GetProbeOffset(i))));
6344
6345    // Scale the index by multiplying by the entry size.
6346    ASSERT(StringDictionary::kEntrySize == 3);
6347    __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
6348    Register entity_name = r0;
6349    // Having undefined at this place means the name is not contained.
6350    ASSERT_EQ(kSmiTagSize, 1);
6351    __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
6352                                kElementsStartOffset - kHeapObjectTag));
6353    __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
6354    __ j(equal, done);
6355
6356    // Stop if found the property.
6357    __ cmp(entity_name, Handle<String>(name));
6358    __ j(equal, miss);
6359
6360    // Check if the entry name is not a symbol.
6361    __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
6362    __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
6363              kIsSymbolMask);
6364    __ j(zero, miss);
6365  }
6366
6367  StringDictionaryLookupStub stub(properties,
6368                                  r0,
6369                                  r0,
6370                                  StringDictionaryLookupStub::NEGATIVE_LOOKUP);
6371  __ push(Immediate(Handle<Object>(name)));
6372  __ push(Immediate(name->Hash()));
6373  MaybeObject* result = masm->TryCallStub(&stub);
6374  if (result->IsFailure()) return result;
6375  __ test(r0, Operand(r0));
6376  __ j(not_zero, miss);
6377  __ jmp(done);
6378  return result;
6379}
6380
6381
6382// Probe the string dictionary in the |elements| register. Jump to the
6383// |done| label if a property with the given name is found leaving the
6384// index into the dictionary in |r0|. Jump to the |miss| label
6385// otherwise.
6386void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6387                                                        Label* miss,
6388                                                        Label* done,
6389                                                        Register elements,
6390                                                        Register name,
6391                                                        Register r0,
6392                                                        Register r1) {
6393  // Assert that name contains a string.
6394  if (FLAG_debug_code) __ AbortIfNotString(name);
6395
6396  __ mov(r1, FieldOperand(elements, kCapacityOffset));
6397  __ shr(r1, kSmiTagSize);  // convert smi to int
6398  __ dec(r1);
6399
6400  // Generate an unrolled loop that performs a few probes before
6401  // giving up. Measurements done on Gmail indicate that 2 probes
6402  // cover ~93% of loads from dictionaries.
6403  for (int i = 0; i < kInlinedProbes; i++) {
6404    // Compute the masked index: (hash + i + i * i) & mask.
6405    __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
6406    __ shr(r0, String::kHashShift);
6407    if (i > 0) {
6408      __ add(Operand(r0), Immediate(StringDictionary::GetProbeOffset(i)));
6409    }
6410    __ and_(r0, Operand(r1));
6411
6412    // Scale the index by multiplying by the entry size.
6413    ASSERT(StringDictionary::kEntrySize == 3);
6414    __ lea(r0, Operand(r0, r0, times_2, 0));  // r0 = r0 * 3
6415
6416    // Check if the key is identical to the name.
6417    __ cmp(name, Operand(elements,
6418                         r0,
6419                         times_4,
6420                         kElementsStartOffset - kHeapObjectTag));
6421    __ j(equal, done);
6422  }
6423
6424  StringDictionaryLookupStub stub(elements,
6425                                  r1,
6426                                  r0,
6427                                  POSITIVE_LOOKUP);
6428  __ push(name);
6429  __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
6430  __ shr(r0, String::kHashShift);
6431  __ push(r0);
6432  __ CallStub(&stub);
6433
6434  __ test(r1, Operand(r1));
6435  __ j(zero, miss);
6436  __ jmp(done);
6437}
6438
6439
6440void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
6441  // Stack frame on entry:
6442  //  esp[0 * kPointerSize]: return address.
6443  //  esp[1 * kPointerSize]: key's hash.
6444  //  esp[2 * kPointerSize]: key.
6445  // Registers:
6446  //  dictionary_: StringDictionary to probe.
6447  //  result_: used as scratch.
6448  //  index_: will hold an index of entry if lookup is successful.
6449  //          might alias with result_.
6450  // Returns:
6451  //  result_ is zero if lookup failed, non zero otherwise.
6452
6453  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
6454
6455  Register scratch = result_;
6456
6457  __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
6458  __ dec(scratch);
6459  __ SmiUntag(scratch);
6460  __ push(scratch);
6461
6462  // If names of slots in range from 1 to kProbes - 1 for the hash value are
6463  // not equal to the name and kProbes-th slot is not used (its name is the
6464  // undefined value), it guarantees the hash table doesn't contain the
6465  // property. It's true even if some slots represent deleted properties
6466  // (their names are the null value).
6467  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
6468    // Compute the masked index: (hash + i + i * i) & mask.
6469    __ mov(scratch, Operand(esp, 2 * kPointerSize));
6470    if (i > 0) {
6471      __ add(Operand(scratch),
6472             Immediate(StringDictionary::GetProbeOffset(i)));
6473    }
6474    __ and_(scratch, Operand(esp, 0));
6475
6476    // Scale the index by multiplying by the entry size.
6477    ASSERT(StringDictionary::kEntrySize == 3);
6478    __ lea(index_, Operand(scratch, scratch, times_2, 0));  // index *= 3.
6479
6480    // Having undefined at this place means the name is not contained.
6481    ASSERT_EQ(kSmiTagSize, 1);
6482    __ mov(scratch, Operand(dictionary_,
6483                            index_,
6484                            times_pointer_size,
6485                            kElementsStartOffset - kHeapObjectTag));
6486    __ cmp(scratch, masm->isolate()->factory()->undefined_value());
6487    __ j(equal, &not_in_dictionary);
6488
6489    // Stop if found the property.
6490    __ cmp(scratch, Operand(esp, 3 * kPointerSize));
6491    __ j(equal, &in_dictionary);
6492
6493    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
6494      // If we hit a non symbol key during negative lookup
6495      // we have to bailout as this key might be equal to the
6496      // key we are looking for.
6497
6498      // Check if the entry name is not a symbol.
6499      __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
6500      __ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
6501                kIsSymbolMask);
6502      __ j(zero, &maybe_in_dictionary);
6503    }
6504  }
6505
6506  __ bind(&maybe_in_dictionary);
6507  // If we are doing negative lookup then probing failure should be
6508  // treated as a lookup success. For positive lookup probing failure
6509  // should be treated as lookup failure.
6510  if (mode_ == POSITIVE_LOOKUP) {
6511    __ mov(result_, Immediate(0));
6512    __ Drop(1);
6513    __ ret(2 * kPointerSize);
6514  }
6515
6516  __ bind(&in_dictionary);
6517  __ mov(result_, Immediate(1));
6518  __ Drop(1);
6519  __ ret(2 * kPointerSize);
6520
6521  __ bind(&not_in_dictionary);
6522  __ mov(result_, Immediate(0));
6523  __ Drop(1);
6524  __ ret(2 * kPointerSize);
6525}
6526
6527
6528#undef __
6529
6530} }  // namespace v8::internal
6531
6532#endif  // V8_TARGET_ARCH_IA32
6533