macro-assembler-x64.h revision 8b112d2025046f85ef7f6be087c6129c872ebad2
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
29#define V8_X64_MACRO_ASSEMBLER_X64_H_
30
31#include "assembler.h"
32
33namespace v8 {
34namespace internal {
35
36// Flags used for the AllocateInNewSpace functions.
37enum AllocationFlags {
38  // No special flags.
39  NO_ALLOCATION_FLAGS = 0,
40  // Return the pointer to the allocated already tagged as a heap object.
41  TAG_OBJECT = 1 << 0,
42  // The content of the result register already contains the allocation top in
43  // new space.
44  RESULT_CONTAINS_TOP = 1 << 1
45};
46
47// Default scratch register used by MacroAssembler (and other code that needs
48// a spare register). The register isn't callee save, and not used by the
49// function calling convention.
50static const Register kScratchRegister = { 10 };      // r10.
51static const Register kSmiConstantRegister = { 12 };  // r12 (callee save).
52static const Register kRootRegister = { 13 };         // r13 (callee save).
53// Value of smi in kSmiConstantRegister.
54static const int kSmiConstantRegisterValue = 1;
55// Actual value of root register is offset from the root array's start
56// to take advantage of negitive 8-bit displacement values.
57static const int kRootRegisterBias = 128;
58
59// Convenience for platform-independent signatures.
60typedef Operand MemOperand;
61
62// Forward declaration.
63class JumpTarget;
64class CallWrapper;
65
66struct SmiIndex {
67  SmiIndex(Register index_register, ScaleFactor scale)
68      : reg(index_register),
69        scale(scale) {}
70  Register reg;
71  ScaleFactor scale;
72};
73
74// MacroAssembler implements a collection of frequently used macros.
75class MacroAssembler: public Assembler {
76 public:
77  // The isolate parameter can be NULL if the macro assembler should
78  // not use isolate-dependent functionality. In this case, it's the
79  // responsibility of the caller to never invoke such function on the
80  // macro assembler.
81  MacroAssembler(Isolate* isolate, void* buffer, int size);
82
83  // Prevent the use of the RootArray during the lifetime of this
84  // scope object.
85  class NoRootArrayScope BASE_EMBEDDED {
86   public:
87    explicit NoRootArrayScope(MacroAssembler* assembler)
88        : variable_(&assembler->root_array_available_),
89          old_value_(assembler->root_array_available_) {
90      assembler->root_array_available_ = false;
91    }
92    ~NoRootArrayScope() {
93      *variable_ = old_value_;
94    }
95   private:
96    bool* variable_;
97    bool old_value_;
98  };
99
100  // Operand pointing to an external reference.
101  // May emit code to set up the scratch register. The operand is
102  // only guaranteed to be correct as long as the scratch register
103  // isn't changed.
104  // If the operand is used more than once, use a scratch register
105  // that is guaranteed not to be clobbered.
106  Operand ExternalOperand(ExternalReference reference,
107                          Register scratch = kScratchRegister);
108  // Loads and stores the value of an external reference.
109  // Special case code for load and store to take advantage of
110  // load_rax/store_rax if possible/necessary.
111  // For other operations, just use:
112  //   Operand operand = ExternalOperand(extref);
113  //   operation(operand, ..);
114  void Load(Register destination, ExternalReference source);
115  void Store(ExternalReference destination, Register source);
116  // Loads the address of the external reference into the destination
117  // register.
118  void LoadAddress(Register destination, ExternalReference source);
119  // Returns the size of the code generated by LoadAddress.
120  // Used by CallSize(ExternalReference) to find the size of a call.
121  int LoadAddressSize(ExternalReference source);
122
123  // Operations on roots in the root-array.
124  void LoadRoot(Register destination, Heap::RootListIndex index);
125  void StoreRoot(Register source, Heap::RootListIndex index);
126  // Load a root value where the index (or part of it) is variable.
127  // The variable_offset register is added to the fixed_offset value
128  // to get the index into the root-array.
129  void LoadRootIndexed(Register destination,
130                       Register variable_offset,
131                       int fixed_offset);
132  void CompareRoot(Register with, Heap::RootListIndex index);
133  void CompareRoot(const Operand& with, Heap::RootListIndex index);
134  void PushRoot(Heap::RootListIndex index);
135
136  // ---------------------------------------------------------------------------
137  // GC Support
138
139  // For page containing |object| mark region covering |addr| dirty.
140  // RecordWriteHelper only works if the object is not in new
141  // space.
142  void RecordWriteHelper(Register object,
143                         Register addr,
144                         Register scratch);
145
146  // Check if object is in new space. The condition cc can be equal or
147  // not_equal. If it is equal a jump will be done if the object is on new
148  // space. The register scratch can be object itself, but it will be clobbered.
149  template <typename LabelType>
150  void InNewSpace(Register object,
151                  Register scratch,
152                  Condition cc,
153                  LabelType* branch);
154
155  // For page containing |object| mark region covering [object+offset]
156  // dirty. |object| is the object being stored into, |value| is the
157  // object being stored. If |offset| is zero, then the |scratch|
158  // register contains the array index into the elements array
159  // represented as an untagged 32-bit integer. All registers are
160  // clobbered by the operation. RecordWrite filters out smis so it
161  // does not update the write barrier if the value is a smi.
162  void RecordWrite(Register object,
163                   int offset,
164                   Register value,
165                   Register scratch);
166
167  // For page containing |object| mark region covering [address]
168  // dirty. |object| is the object being stored into, |value| is the
169  // object being stored. All registers are clobbered by the
170  // operation.  RecordWrite filters out smis so it does not update
171  // the write barrier if the value is a smi.
172  void RecordWrite(Register object,
173                   Register address,
174                   Register value);
175
176  // For page containing |object| mark region covering [object+offset] dirty.
177  // The value is known to not be a smi.
178  // object is the object being stored into, value is the object being stored.
179  // If offset is zero, then the scratch register contains the array index into
180  // the elements array represented as an untagged 32-bit integer.
181  // All registers are clobbered by the operation.
182  void RecordWriteNonSmi(Register object,
183                         int offset,
184                         Register value,
185                         Register scratch);
186
187#ifdef ENABLE_DEBUGGER_SUPPORT
188  // ---------------------------------------------------------------------------
189  // Debugger Support
190
191  void DebugBreak();
192#endif
193
194  // ---------------------------------------------------------------------------
195  // Activation frames
196
197  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
198  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
199
200  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
201  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
202
203  // Enter specific kind of exit frame; either in normal or
204  // debug mode. Expects the number of arguments in register rax and
205  // sets up the number of arguments in register rdi and the pointer
206  // to the first argument in register rsi.
207  //
208  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
209  // accessible via StackSpaceOperand.
210  void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
211
212  // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
213  // memory (not GCed) on the stack accessible via StackSpaceOperand.
214  void EnterApiExitFrame(int arg_stack_space);
215
216  // Leave the current exit frame. Expects/provides the return value in
217  // register rax:rdx (untouched) and the pointer to the first
218  // argument in register rsi.
219  void LeaveExitFrame(bool save_doubles = false);
220
221  // Leave the current exit frame. Expects/provides the return value in
222  // register rax (untouched).
223  void LeaveApiExitFrame();
224
225  // Push and pop the registers that can hold pointers.
226  void PushSafepointRegisters() { Pushad(); }
227  void PopSafepointRegisters() { Popad(); }
228  // Store the value in register src in the safepoint register stack
229  // slot for register dst.
230  void StoreToSafepointRegisterSlot(Register dst, Register src);
231  void LoadFromSafepointRegisterSlot(Register dst, Register src);
232
233  void InitializeRootRegister() {
234    ExternalReference roots_address =
235        ExternalReference::roots_address(isolate());
236    movq(kRootRegister, roots_address);
237    addq(kRootRegister, Immediate(kRootRegisterBias));
238  }
239
240  // ---------------------------------------------------------------------------
241  // JavaScript invokes
242
243  // Invoke the JavaScript function code by either calling or jumping.
244  void InvokeCode(Register code,
245                  const ParameterCount& expected,
246                  const ParameterCount& actual,
247                  InvokeFlag flag,
248                  CallWrapper* call_wrapper = NULL);
249
250  void InvokeCode(Handle<Code> code,
251                  const ParameterCount& expected,
252                  const ParameterCount& actual,
253                  RelocInfo::Mode rmode,
254                  InvokeFlag flag,
255                  CallWrapper* call_wrapper = NULL);
256
257  // Invoke the JavaScript function in the given register. Changes the
258  // current context to the context in the function before invoking.
259  void InvokeFunction(Register function,
260                      const ParameterCount& actual,
261                      InvokeFlag flag,
262                      CallWrapper* call_wrapper = NULL);
263
264  void InvokeFunction(JSFunction* function,
265                      const ParameterCount& actual,
266                      InvokeFlag flag,
267                      CallWrapper* call_wrapper = NULL);
268
269  // Invoke specified builtin JavaScript function. Adds an entry to
270  // the unresolved list if the name does not resolve.
271  void InvokeBuiltin(Builtins::JavaScript id,
272                     InvokeFlag flag,
273                     CallWrapper* call_wrapper = NULL);
274
275  // Store the function for the given builtin in the target register.
276  void GetBuiltinFunction(Register target, Builtins::JavaScript id);
277
278  // Store the code object for the given builtin in the target register.
279  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
280
281
282  // ---------------------------------------------------------------------------
283  // Smi tagging, untagging and operations on tagged smis.
284
285  void InitializeSmiConstantRegister() {
286    movq(kSmiConstantRegister,
287         reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
288         RelocInfo::NONE);
289  }
290
291  // Conversions between tagged smi values and non-tagged integer values.
292
293  // Tag an integer value. The result must be known to be a valid smi value.
294  // Only uses the low 32 bits of the src register. Sets the N and Z flags
295  // based on the value of the resulting smi.
296  void Integer32ToSmi(Register dst, Register src);
297
298  // Stores an integer32 value into a memory field that already holds a smi.
299  void Integer32ToSmiField(const Operand& dst, Register src);
300
301  // Adds constant to src and tags the result as a smi.
302  // Result must be a valid smi.
303  void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
304
305  // Convert smi to 32-bit integer. I.e., not sign extended into
306  // high 32 bits of destination.
307  void SmiToInteger32(Register dst, Register src);
308  void SmiToInteger32(Register dst, const Operand& src);
309
310  // Convert smi to 64-bit integer (sign extended if necessary).
311  void SmiToInteger64(Register dst, Register src);
312  void SmiToInteger64(Register dst, const Operand& src);
313
314  // Multiply a positive smi's integer value by a power of two.
315  // Provides result as 64-bit integer value.
316  void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
317                                             Register src,
318                                             int power);
319
320  // Divide a positive smi's integer value by a power of two.
321  // Provides result as 32-bit integer value.
322  void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
323                                           Register src,
324                                           int power);
325
326  // Perform the logical or of two smi values and return a smi value.
327  // If either argument is not a smi, jump to on_not_smis and retain
328  // the original values of source registers. The destination register
329  // may be changed if it's not one of the source registers.
330  template <typename LabelType>
331  void SmiOrIfSmis(Register dst,
332                   Register src1,
333                   Register src2,
334                   LabelType* on_not_smis);
335
336
337  // Simple comparison of smis.  Both sides must be known smis to use these,
338  // otherwise use Cmp.
339  void SmiCompare(Register smi1, Register smi2);
340  void SmiCompare(Register dst, Smi* src);
341  void SmiCompare(Register dst, const Operand& src);
342  void SmiCompare(const Operand& dst, Register src);
343  void SmiCompare(const Operand& dst, Smi* src);
344  // Compare the int32 in src register to the value of the smi stored at dst.
345  void SmiCompareInteger32(const Operand& dst, Register src);
346  // Sets sign and zero flags depending on value of smi in register.
347  void SmiTest(Register src);
348
349  // Functions performing a check on a known or potential smi. Returns
350  // a condition that is satisfied if the check is successful.
351
352  // Is the value a tagged smi.
353  Condition CheckSmi(Register src);
354  Condition CheckSmi(const Operand& src);
355
356  // Is the value a non-negative tagged smi.
357  Condition CheckNonNegativeSmi(Register src);
358
359  // Are both values tagged smis.
360  Condition CheckBothSmi(Register first, Register second);
361
362  // Are both values non-negative tagged smis.
363  Condition CheckBothNonNegativeSmi(Register first, Register second);
364
365  // Are either value a tagged smi.
366  Condition CheckEitherSmi(Register first,
367                           Register second,
368                           Register scratch = kScratchRegister);
369
370  // Is the value the minimum smi value (since we are using
371  // two's complement numbers, negating the value is known to yield
372  // a non-smi value).
373  Condition CheckIsMinSmi(Register src);
374
375  // Checks whether an 32-bit integer value is a valid for conversion
376  // to a smi.
377  Condition CheckInteger32ValidSmiValue(Register src);
378
379  // Checks whether an 32-bit unsigned integer value is a valid for
380  // conversion to a smi.
381  Condition CheckUInteger32ValidSmiValue(Register src);
382
383  // Check whether src is a Smi, and set dst to zero if it is a smi,
384  // and to one if it isn't.
385  void CheckSmiToIndicator(Register dst, Register src);
386  void CheckSmiToIndicator(Register dst, const Operand& src);
387
388  // Test-and-jump functions. Typically combines a check function
389  // above with a conditional jump.
390
391  // Jump if the value cannot be represented by a smi.
392  template <typename LabelType>
393  void JumpIfNotValidSmiValue(Register src, LabelType* on_invalid);
394
395  // Jump if the unsigned integer value cannot be represented by a smi.
396  template <typename LabelType>
397  void JumpIfUIntNotValidSmiValue(Register src, LabelType* on_invalid);
398
399  // Jump to label if the value is a tagged smi.
400  template <typename LabelType>
401  void JumpIfSmi(Register src, LabelType* on_smi);
402
403  // Jump to label if the value is not a tagged smi.
404  template <typename LabelType>
405  void JumpIfNotSmi(Register src, LabelType* on_not_smi);
406
407  // Jump to label if the value is not a non-negative tagged smi.
408  template <typename LabelType>
409  void JumpUnlessNonNegativeSmi(Register src, LabelType* on_not_smi);
410
411  // Jump to label if the value, which must be a tagged smi, has value equal
412  // to the constant.
413  template <typename LabelType>
414  void JumpIfSmiEqualsConstant(Register src,
415                               Smi* constant,
416                               LabelType* on_equals);
417
418  // Jump if either or both register are not smi values.
419  template <typename LabelType>
420  void JumpIfNotBothSmi(Register src1,
421                        Register src2,
422                        LabelType* on_not_both_smi);
423
424  // Jump if either or both register are not non-negative smi values.
425  template <typename LabelType>
426  void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
427                                    LabelType* on_not_both_smi);
428
429  // Operations on tagged smi values.
430
431  // Smis represent a subset of integers. The subset is always equivalent to
432  // a two's complement interpretation of a fixed number of bits.
433
434  // Optimistically adds an integer constant to a supposed smi.
435  // If the src is not a smi, or the result is not a smi, jump to
436  // the label.
437  template <typename LabelType>
438  void SmiTryAddConstant(Register dst,
439                         Register src,
440                         Smi* constant,
441                         LabelType* on_not_smi_result);
442
443  // Add an integer constant to a tagged smi, giving a tagged smi as result.
444  // No overflow testing on the result is done.
445  void SmiAddConstant(Register dst, Register src, Smi* constant);
446
447  // Add an integer constant to a tagged smi, giving a tagged smi as result.
448  // No overflow testing on the result is done.
449  void SmiAddConstant(const Operand& dst, Smi* constant);
450
451  // Add an integer constant to a tagged smi, giving a tagged smi as result,
452  // or jumping to a label if the result cannot be represented by a smi.
453  template <typename LabelType>
454  void SmiAddConstant(Register dst,
455                      Register src,
456                      Smi* constant,
457                      LabelType* on_not_smi_result);
458
459  // Subtract an integer constant from a tagged smi, giving a tagged smi as
460  // result. No testing on the result is done. Sets the N and Z flags
461  // based on the value of the resulting integer.
462  void SmiSubConstant(Register dst, Register src, Smi* constant);
463
464  // Subtract an integer constant from a tagged smi, giving a tagged smi as
465  // result, or jumping to a label if the result cannot be represented by a smi.
466  template <typename LabelType>
467  void SmiSubConstant(Register dst,
468                      Register src,
469                      Smi* constant,
470                      LabelType* on_not_smi_result);
471
472  // Negating a smi can give a negative zero or too large positive value.
473  // NOTICE: This operation jumps on success, not failure!
474  template <typename LabelType>
475  void SmiNeg(Register dst,
476              Register src,
477              LabelType* on_smi_result);
478
479  // Adds smi values and return the result as a smi.
480  // If dst is src1, then src1 will be destroyed, even if
481  // the operation is unsuccessful.
482  template <typename LabelType>
483  void SmiAdd(Register dst,
484              Register src1,
485              Register src2,
486              LabelType* on_not_smi_result);
487  template <typename LabelType>
488  void SmiAdd(Register dst,
489              Register src1,
490              const Operand& src2,
491              LabelType* on_not_smi_result);
492
493  void SmiAdd(Register dst,
494              Register src1,
495              Register src2);
496
497  // Subtracts smi values and return the result as a smi.
498  // If dst is src1, then src1 will be destroyed, even if
499  // the operation is unsuccessful.
500  template <typename LabelType>
501  void SmiSub(Register dst,
502              Register src1,
503              Register src2,
504              LabelType* on_not_smi_result);
505
506  void SmiSub(Register dst,
507              Register src1,
508              Register src2);
509
510  template <typename LabelType>
511  void SmiSub(Register dst,
512              Register src1,
513              const Operand& src2,
514              LabelType* on_not_smi_result);
515
516  void SmiSub(Register dst,
517              Register src1,
518              const Operand& src2);
519
520  // Multiplies smi values and return the result as a smi,
521  // if possible.
522  // If dst is src1, then src1 will be destroyed, even if
523  // the operation is unsuccessful.
524  template <typename LabelType>
525  void SmiMul(Register dst,
526              Register src1,
527              Register src2,
528              LabelType* on_not_smi_result);
529
530  // Divides one smi by another and returns the quotient.
531  // Clobbers rax and rdx registers.
532  template <typename LabelType>
533  void SmiDiv(Register dst,
534              Register src1,
535              Register src2,
536              LabelType* on_not_smi_result);
537
538  // Divides one smi by another and returns the remainder.
539  // Clobbers rax and rdx registers.
540  template <typename LabelType>
541  void SmiMod(Register dst,
542              Register src1,
543              Register src2,
544              LabelType* on_not_smi_result);
545
546  // Bitwise operations.
547  void SmiNot(Register dst, Register src);
548  void SmiAnd(Register dst, Register src1, Register src2);
549  void SmiOr(Register dst, Register src1, Register src2);
550  void SmiXor(Register dst, Register src1, Register src2);
551  void SmiAndConstant(Register dst, Register src1, Smi* constant);
552  void SmiOrConstant(Register dst, Register src1, Smi* constant);
553  void SmiXorConstant(Register dst, Register src1, Smi* constant);
554
555  void SmiShiftLeftConstant(Register dst,
556                            Register src,
557                            int shift_value);
558  template <typename LabelType>
559  void SmiShiftLogicalRightConstant(Register dst,
560                                  Register src,
561                                  int shift_value,
562                                  LabelType* on_not_smi_result);
563  void SmiShiftArithmeticRightConstant(Register dst,
564                                       Register src,
565                                       int shift_value);
566
567  // Shifts a smi value to the left, and returns the result if that is a smi.
568  // Uses and clobbers rcx, so dst may not be rcx.
569  void SmiShiftLeft(Register dst,
570                    Register src1,
571                    Register src2);
572  // Shifts a smi value to the right, shifting in zero bits at the top, and
573  // returns the unsigned intepretation of the result if that is a smi.
574  // Uses and clobbers rcx, so dst may not be rcx.
575  template <typename LabelType>
576  void SmiShiftLogicalRight(Register dst,
577                            Register src1,
578                            Register src2,
579                            LabelType* on_not_smi_result);
580  // Shifts a smi value to the right, sign extending the top, and
581  // returns the signed intepretation of the result. That will always
582  // be a valid smi value, since it's numerically smaller than the
583  // original.
584  // Uses and clobbers rcx, so dst may not be rcx.
585  void SmiShiftArithmeticRight(Register dst,
586                               Register src1,
587                               Register src2);
588
589  // Specialized operations
590
591  // Select the non-smi register of two registers where exactly one is a
592  // smi. If neither are smis, jump to the failure label.
593  template <typename LabelType>
594  void SelectNonSmi(Register dst,
595                    Register src1,
596                    Register src2,
597                    LabelType* on_not_smis);
598
599  // Converts, if necessary, a smi to a combination of number and
600  // multiplier to be used as a scaled index.
601  // The src register contains a *positive* smi value. The shift is the
602  // power of two to multiply the index value by (e.g.
603  // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
604  // The returned index register may be either src or dst, depending
605  // on what is most efficient. If src and dst are different registers,
606  // src is always unchanged.
607  SmiIndex SmiToIndex(Register dst, Register src, int shift);
608
609  // Converts a positive smi to a negative index.
610  SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
611
612  // Add the value of a smi in memory to an int32 register.
613  // Sets flags as a normal add.
614  void AddSmiField(Register dst, const Operand& src);
615
616  // Basic Smi operations.
617  void Move(Register dst, Smi* source) {
618    LoadSmiConstant(dst, source);
619  }
620
621  void Move(const Operand& dst, Smi* source) {
622    Register constant = GetSmiConstant(source);
623    movq(dst, constant);
624  }
625
626  void Push(Smi* smi);
627  void Test(const Operand& dst, Smi* source);
628
629  // ---------------------------------------------------------------------------
630  // String macros.
631
632  // If object is a string, its map is loaded into object_map.
633  template <typename LabelType>
634  void JumpIfNotString(Register object,
635                       Register object_map,
636                       LabelType* not_string);
637
638
639  template <typename LabelType>
640  void JumpIfNotBothSequentialAsciiStrings(Register first_object,
641                                           Register second_object,
642                                           Register scratch1,
643                                           Register scratch2,
644                                           LabelType* on_not_both_flat_ascii);
645
646  // Check whether the instance type represents a flat ascii string. Jump to the
647  // label if not. If the instance type can be scratched specify same register
648  // for both instance type and scratch.
649  template <typename LabelType>
650  void JumpIfInstanceTypeIsNotSequentialAscii(
651      Register instance_type,
652      Register scratch,
653      LabelType *on_not_flat_ascii_string);
654
655  template <typename LabelType>
656  void JumpIfBothInstanceTypesAreNotSequentialAscii(
657      Register first_object_instance_type,
658      Register second_object_instance_type,
659      Register scratch1,
660      Register scratch2,
661      LabelType* on_fail);
662
663  // ---------------------------------------------------------------------------
664  // Macro instructions.
665
666  // Load a register with a long value as efficiently as possible.
667  void Set(Register dst, int64_t x);
668  void Set(const Operand& dst, int64_t x);
669
670  // Move if the registers are not identical.
671  void Move(Register target, Register source);
672
673  // Handle support
674  void Move(Register dst, Handle<Object> source);
675  void Move(const Operand& dst, Handle<Object> source);
676  void Cmp(Register dst, Handle<Object> source);
677  void Cmp(const Operand& dst, Handle<Object> source);
678  void Cmp(Register dst, Smi* src);
679  void Cmp(const Operand& dst, Smi* src);
680  void Push(Handle<Object> source);
681
682  // Emit code to discard a non-negative number of pointer-sized elements
683  // from the stack, clobbering only the rsp register.
684  void Drop(int stack_elements);
685
686  void Call(Label* target) { call(target); }
687
688  // Control Flow
689  void Jump(Address destination, RelocInfo::Mode rmode);
690  void Jump(ExternalReference ext);
691  void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
692
693  void Call(Address destination, RelocInfo::Mode rmode);
694  void Call(ExternalReference ext);
695  void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
696
697  // The size of the code generated for different call instructions.
698  int CallSize(Address destination, RelocInfo::Mode rmode) {
699    return kCallInstructionLength;
700  }
701  int CallSize(ExternalReference ext);
702  int CallSize(Handle<Code> code_object) {
703    // Code calls use 32-bit relative addressing.
704    return kShortCallInstructionLength;
705  }
706  int CallSize(Register target) {
707    // Opcode: REX_opt FF /2 m64
708    return (target.high_bit() != 0) ? 3 : 2;
709  }
710  int CallSize(const Operand& target) {
711    // Opcode: REX_opt FF /2 m64
712    return (target.requires_rex() ? 2 : 1) + target.operand_size();
713  }
714
715  // Emit call to the code we are currently generating.
716  void CallSelf() {
717    Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
718    Call(self, RelocInfo::CODE_TARGET);
719  }
720
721  // Non-x64 instructions.
722  // Push/pop all general purpose registers.
723  // Does not push rsp/rbp nor any of the assembler's special purpose registers
724  // (kScratchRegister, kSmiConstantRegister, kRootRegister).
725  void Pushad();
726  void Popad();
727  // Sets the stack as after performing Popad, without actually loading the
728  // registers.
729  void Dropad();
730
731  // Compare object type for heap object.
732  // Always use unsigned comparisons: above and below, not less and greater.
733  // Incoming register is heap_object and outgoing register is map.
734  // They may be the same register, and may be kScratchRegister.
735  void CmpObjectType(Register heap_object, InstanceType type, Register map);
736
737  // Compare instance type for map.
738  // Always use unsigned comparisons: above and below, not less and greater.
739  void CmpInstanceType(Register map, InstanceType type);
740
741  // Check if the map of an object is equal to a specified map and
742  // branch to label if not. Skip the smi check if not required
743  // (object is known to be a heap object)
744  void CheckMap(Register obj,
745                Handle<Map> map,
746                Label* fail,
747                bool is_heap_object);
748
749  // Check if the object in register heap_object is a string. Afterwards the
750  // register map contains the object map and the register instance_type
751  // contains the instance_type. The registers map and instance_type can be the
752  // same in which case it contains the instance type afterwards. Either of the
753  // registers map and instance_type can be the same as heap_object.
754  Condition IsObjectStringType(Register heap_object,
755                               Register map,
756                               Register instance_type);
757
758  // FCmp compares and pops the two values on top of the FPU stack.
759  // The flag results are similar to integer cmp, but requires unsigned
760  // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
761  void FCmp();
762
763  // Abort execution if argument is not a number. Used in debug code.
764  void AbortIfNotNumber(Register object);
765
766  // Abort execution if argument is a smi. Used in debug code.
767  void AbortIfSmi(Register object);
768
769  // Abort execution if argument is not a smi. Used in debug code.
770  void AbortIfNotSmi(Register object);
771  void AbortIfNotSmi(const Operand& object);
772
773  // Abort execution if argument is a string. Used in debug code.
774  void AbortIfNotString(Register object);
775
776  // Abort execution if argument is not the root value with the given index.
777  void AbortIfNotRootValue(Register src,
778                           Heap::RootListIndex root_value_index,
779                           const char* message);
780
781  // ---------------------------------------------------------------------------
782  // Exception handling
783
784  // Push a new try handler and link into try handler chain.  The return
785  // address must be pushed before calling this helper.
786  void PushTryHandler(CodeLocation try_location, HandlerType type);
787
788  // Unlink the stack handler on top of the stack from the try handler chain.
789  void PopTryHandler();
790
791  // Activate the top handler in the try hander chain and pass the
792  // thrown value.
793  void Throw(Register value);
794
795  // Propagate an uncatchable exception out of the current JS stack.
796  void ThrowUncatchable(UncatchableExceptionType type, Register value);
797
798  // ---------------------------------------------------------------------------
799  // Inline caching support
800
801  // Generate code for checking access rights - used for security checks
802  // on access to global objects across environments. The holder register
803  // is left untouched, but the scratch register and kScratchRegister,
804  // which must be different, are clobbered.
805  void CheckAccessGlobalProxy(Register holder_reg,
806                              Register scratch,
807                              Label* miss);
808
809
810  // ---------------------------------------------------------------------------
811  // Allocation support
812
813  // Allocate an object in new space. If the new space is exhausted control
814  // continues at the gc_required label. The allocated object is returned in
815  // result and end of the new object is returned in result_end. The register
816  // scratch can be passed as no_reg in which case an additional object
817  // reference will be added to the reloc info. The returned pointers in result
818  // and result_end have not yet been tagged as heap objects. If
819  // result_contains_top_on_entry is true the content of result is known to be
820  // the allocation top on entry (could be result_end from a previous call to
821  // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
822  // should be no_reg as it is never used.
823  void AllocateInNewSpace(int object_size,
824                          Register result,
825                          Register result_end,
826                          Register scratch,
827                          Label* gc_required,
828                          AllocationFlags flags);
829
830  void AllocateInNewSpace(int header_size,
831                          ScaleFactor element_size,
832                          Register element_count,
833                          Register result,
834                          Register result_end,
835                          Register scratch,
836                          Label* gc_required,
837                          AllocationFlags flags);
838
839  void AllocateInNewSpace(Register object_size,
840                          Register result,
841                          Register result_end,
842                          Register scratch,
843                          Label* gc_required,
844                          AllocationFlags flags);
845
846  // Undo allocation in new space. The object passed and objects allocated after
847  // it will no longer be allocated. Make sure that no pointers are left to the
848  // object(s) no longer allocated as they would be invalid when allocation is
849  // un-done.
850  void UndoAllocationInNewSpace(Register object);
851
852  // Allocate a heap number in new space with undefined value. Returns
853  // tagged pointer in result register, or jumps to gc_required if new
854  // space is full.
855  void AllocateHeapNumber(Register result,
856                          Register scratch,
857                          Label* gc_required);
858
859  // Allocate a sequential string. All the header fields of the string object
860  // are initialized.
861  void AllocateTwoByteString(Register result,
862                             Register length,
863                             Register scratch1,
864                             Register scratch2,
865                             Register scratch3,
866                             Label* gc_required);
867  void AllocateAsciiString(Register result,
868                           Register length,
869                           Register scratch1,
870                           Register scratch2,
871                           Register scratch3,
872                           Label* gc_required);
873
874  // Allocate a raw cons string object. Only the map field of the result is
875  // initialized.
876  void AllocateConsString(Register result,
877                          Register scratch1,
878                          Register scratch2,
879                          Label* gc_required);
880  void AllocateAsciiConsString(Register result,
881                               Register scratch1,
882                               Register scratch2,
883                               Label* gc_required);
884
885  // ---------------------------------------------------------------------------
886  // Support functions.
887
888  // Check if result is zero and op is negative.
889  void NegativeZeroTest(Register result, Register op, Label* then_label);
890
891  // Check if result is zero and op is negative in code using jump targets.
892  void NegativeZeroTest(CodeGenerator* cgen,
893                        Register result,
894                        Register op,
895                        JumpTarget* then_target);
896
897  // Check if result is zero and any of op1 and op2 are negative.
898  // Register scratch is destroyed, and it must be different from op2.
899  void NegativeZeroTest(Register result, Register op1, Register op2,
900                        Register scratch, Label* then_label);
901
902  // Try to get function prototype of a function and puts the value in
903  // the result register. Checks that the function really is a
904  // function and jumps to the miss label if the fast checks fail. The
905  // function register will be untouched; the other register may be
906  // clobbered.
907  void TryGetFunctionPrototype(Register function,
908                               Register result,
909                               Label* miss);
910
911  // Generates code for reporting that an illegal operation has
912  // occurred.
913  void IllegalOperation(int num_arguments);
914
915  // Picks out an array index from the hash field.
916  // Register use:
917  //   hash - holds the index's hash. Clobbered.
918  //   index - holds the overwritten index on exit.
919  void IndexFromHash(Register hash, Register index);
920
921  // Find the function context up the context chain.
922  void LoadContext(Register dst, int context_chain_length);
923
924  // Load the global function with the given index.
925  void LoadGlobalFunction(int index, Register function);
926
927  // Load the initial map from the global function. The registers
928  // function and map can be the same.
929  void LoadGlobalFunctionInitialMap(Register function, Register map);
930
931  // ---------------------------------------------------------------------------
932  // Runtime calls
933
934  // Call a code stub.
935  void CallStub(CodeStub* stub);
936
937  // Call a code stub and return the code object called.  Try to generate
938  // the code if necessary.  Do not perform a GC but instead return a retry
939  // after GC failure.
940  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
941
942  // Tail call a code stub (jump).
943  void TailCallStub(CodeStub* stub);
944
945  // Tail call a code stub (jump) and return the code object called.  Try to
946  // generate the code if necessary.  Do not perform a GC but instead return
947  // a retry after GC failure.
948  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
949
950  // Return from a code stub after popping its arguments.
951  void StubReturn(int argc);
952
953  // Call a runtime routine.
954  void CallRuntime(const Runtime::Function* f, int num_arguments);
955
956  // Call a runtime function and save the value of XMM registers.
957  void CallRuntimeSaveDoubles(Runtime::FunctionId id);
958
959  // Call a runtime function, returning the CodeStub object called.
960  // Try to generate the stub code if necessary.  Do not perform a GC
961  // but instead return a retry after GC failure.
962  MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
963                                              int num_arguments);
964
965  // Convenience function: Same as above, but takes the fid instead.
966  void CallRuntime(Runtime::FunctionId id, int num_arguments);
967
968  // Convenience function: Same as above, but takes the fid instead.
969  MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
970                                              int num_arguments);
971
972  // Convenience function: call an external reference.
973  void CallExternalReference(const ExternalReference& ext,
974                             int num_arguments);
975
976  // Tail call of a runtime routine (jump).
977  // Like JumpToExternalReference, but also takes care of passing the number
978  // of parameters.
979  void TailCallExternalReference(const ExternalReference& ext,
980                                 int num_arguments,
981                                 int result_size);
982
983  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
984      const ExternalReference& ext, int num_arguments, int result_size);
985
986  // Convenience function: tail call a runtime routine (jump).
987  void TailCallRuntime(Runtime::FunctionId fid,
988                       int num_arguments,
989                       int result_size);
990
991  MUST_USE_RESULT  MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
992                                                   int num_arguments,
993                                                   int result_size);
994
995  // Jump to a runtime routine.
996  void JumpToExternalReference(const ExternalReference& ext, int result_size);
997
998  // Jump to a runtime routine.
999  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext,
1000                                          int result_size);
1001
1002  // Prepares stack to put arguments (aligns and so on).
1003  // WIN64 calling convention requires to put the pointer to the return value
1004  // slot into rcx (rcx must be preserverd until TryCallApiFunctionAndReturn).
1005  // Saves context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
1006  // inside the exit frame (not GCed) accessible via StackSpaceOperand.
1007  void PrepareCallApiFunction(int arg_stack_space);
1008
1009  // Calls an API function. Allocates HandleScope, extracts
1010  // returned value from handle and propagates exceptions.
1011  // Clobbers r14, r15, rbx and caller-save registers. Restores context.
1012  // On return removes stack_space * kPointerSize (GCed).
1013  MUST_USE_RESULT MaybeObject* TryCallApiFunctionAndReturn(
1014      ApiFunction* function, int stack_space);
1015
1016  // Before calling a C-function from generated code, align arguments on stack.
1017  // After aligning the frame, arguments must be stored in esp[0], esp[4],
1018  // etc., not pushed. The argument count assumes all arguments are word sized.
1019  // The number of slots reserved for arguments depends on platform. On Windows
1020  // stack slots are reserved for the arguments passed in registers. On other
1021  // platforms stack slots are only reserved for the arguments actually passed
1022  // on the stack.
1023  void PrepareCallCFunction(int num_arguments);
1024
1025  // Calls a C function and cleans up the space for arguments allocated
1026  // by PrepareCallCFunction. The called function is not allowed to trigger a
1027  // garbage collection, since that might move the code and invalidate the
1028  // return address (unless this is somehow accounted for by the called
1029  // function).
1030  void CallCFunction(ExternalReference function, int num_arguments);
1031  void CallCFunction(Register function, int num_arguments);
1032
1033  // Calculate the number of stack slots to reserve for arguments when calling a
1034  // C function.
1035  int ArgumentStackSlotsForCFunctionCall(int num_arguments);
1036
1037  // ---------------------------------------------------------------------------
1038  // Utilities
1039
1040  void Ret();
1041
1042  // Return and drop arguments from stack, where the number of arguments
1043  // may be bigger than 2^16 - 1.  Requires a scratch register.
1044  void Ret(int bytes_dropped, Register scratch);
1045
1046  Handle<Object> CodeObject() {
1047    ASSERT(!code_object_.is_null());
1048    return code_object_;
1049  }
1050
1051  // Copy length bytes from source to destination.
1052  // Uses scratch register internally (if you have a low-eight register
1053  // free, do use it, otherwise kScratchRegister will be used).
1054  // The min_length is a minimum limit on the value that length will have.
1055  // The algorithm has some special cases that might be omitted if the string
1056  // is known to always be long.
1057  void CopyBytes(Register destination,
1058                 Register source,
1059                 Register length,
1060                 int min_length = 0,
1061                 Register scratch = kScratchRegister);
1062
1063
1064  // ---------------------------------------------------------------------------
1065  // StatsCounter support
1066
1067  void SetCounter(StatsCounter* counter, int value);
1068  void IncrementCounter(StatsCounter* counter, int value);
1069  void DecrementCounter(StatsCounter* counter, int value);
1070
1071
1072  // ---------------------------------------------------------------------------
1073  // Debugging
1074
1075  // Calls Abort(msg) if the condition cc is not satisfied.
1076  // Use --debug_code to enable.
1077  void Assert(Condition cc, const char* msg);
1078
1079  void AssertFastElements(Register elements);
1080
1081  // Like Assert(), but always enabled.
1082  void Check(Condition cc, const char* msg);
1083
1084  // Print a message to stdout and abort execution.
1085  void Abort(const char* msg);
1086
1087  // Check that the stack is aligned.
1088  void CheckStackAlignment();
1089
1090  // Verify restrictions about code generated in stubs.
1091  void set_generating_stub(bool value) { generating_stub_ = value; }
1092  bool generating_stub() { return generating_stub_; }
1093  void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
1094  bool allow_stub_calls() { return allow_stub_calls_; }
1095
1096  static int SafepointRegisterStackIndex(Register reg) {
1097    return SafepointRegisterStackIndex(reg.code());
1098  }
1099
1100 private:
1101  // Order general registers are pushed by Pushad.
1102  // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
1103  static int kSafepointPushRegisterIndices[Register::kNumRegisters];
1104  static const int kNumSafepointSavedRegisters = 11;
1105
1106  bool generating_stub_;
1107  bool allow_stub_calls_;
1108  bool root_array_available_;
1109
1110  // Returns a register holding the smi value. The register MUST NOT be
1111  // modified. It may be the "smi 1 constant" register.
1112  Register GetSmiConstant(Smi* value);
1113
1114  // Moves the smi value to the destination register.
1115  void LoadSmiConstant(Register dst, Smi* value);
1116
1117  // This handle will be patched with the code object on installation.
1118  Handle<Object> code_object_;
1119
1120  // Helper functions for generating invokes.
1121  template <typename LabelType>
1122  void InvokePrologue(const ParameterCount& expected,
1123                      const ParameterCount& actual,
1124                      Handle<Code> code_constant,
1125                      Register code_register,
1126                      LabelType* done,
1127                      InvokeFlag flag,
1128                      CallWrapper* call_wrapper);
1129
1130  // Activation support.
1131  void EnterFrame(StackFrame::Type type);
1132  void LeaveFrame(StackFrame::Type type);
1133
1134  void EnterExitFramePrologue(bool save_rax);
1135
1136  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
1137  // accessible via StackSpaceOperand.
1138  void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
1139
1140  void LeaveExitFrameEpilogue();
1141
1142  // Allocation support helpers.
1143  // Loads the top of new-space into the result register.
1144  // Otherwise the address of the new-space top is loaded into scratch (if
1145  // scratch is valid), and the new-space top is loaded into result.
1146  void LoadAllocationTopHelper(Register result,
1147                               Register scratch,
1148                               AllocationFlags flags);
1149  // Update allocation top with value in result_end register.
1150  // If scratch is valid, it contains the address of the allocation top.
1151  void UpdateAllocationTopHelper(Register result_end, Register scratch);
1152
1153  // Helper for PopHandleScope.  Allowed to perform a GC and returns
1154  // NULL if gc_allowed.  Does not perform a GC if !gc_allowed, and
1155  // possibly returns a failure object indicating an allocation failure.
1156  Object* PopHandleScopeHelper(Register saved,
1157                               Register scratch,
1158                               bool gc_allowed);
1159
1160
1161  // Compute memory operands for safepoint stack slots.
1162  Operand SafepointRegisterSlot(Register reg);
1163  static int SafepointRegisterStackIndex(int reg_code) {
1164    return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
1165  }
1166
1167  // Needs access to SafepointRegisterStackIndex for optimized frame
1168  // traversal.
1169  friend class OptimizedFrame;
1170};
1171
1172
1173// The code patcher is used to patch (typically) small parts of code e.g. for
1174// debugging and other types of instrumentation. When using the code patcher
1175// the exact number of bytes specified must be emitted. Is not legal to emit
1176// relocation information. If any of these constraints are violated it causes
1177// an assertion.
1178class CodePatcher {
1179 public:
1180  CodePatcher(byte* address, int size);
1181  virtual ~CodePatcher();
1182
1183  // Macro assembler to emit code.
1184  MacroAssembler* masm() { return &masm_; }
1185
1186 private:
1187  byte* address_;  // The address of the code being patched.
1188  int size_;  // Number of bytes of the expected patch size.
1189  MacroAssembler masm_;  // Macro assembler used to generate the code.
1190};
1191
1192
1193// Helper class for generating code or data associated with the code
1194// right before or after a call instruction. As an example this can be used to
1195// generate safepoint data after calls for crankshaft.
1196class CallWrapper {
1197 public:
1198  CallWrapper() { }
1199  virtual ~CallWrapper() { }
1200  // Called just before emitting a call. Argument is the size of the generated
1201  // call code.
1202  virtual void BeforeCall(int call_size) = 0;
1203  // Called just after emitting a call, i.e., at the return site for the call.
1204  virtual void AfterCall() = 0;
1205};
1206
1207
1208// -----------------------------------------------------------------------------
1209// Static helper functions.
1210
1211// Generate an Operand for loading a field from an object.
1212static inline Operand FieldOperand(Register object, int offset) {
1213  return Operand(object, offset - kHeapObjectTag);
1214}
1215
1216
1217// Generate an Operand for loading an indexed field from an object.
1218static inline Operand FieldOperand(Register object,
1219                                   Register index,
1220                                   ScaleFactor scale,
1221                                   int offset) {
1222  return Operand(object, index, scale, offset - kHeapObjectTag);
1223}
1224
1225
1226static inline Operand ContextOperand(Register context, int index) {
1227  return Operand(context, Context::SlotOffset(index));
1228}
1229
1230
1231static inline Operand GlobalObjectOperand() {
1232  return ContextOperand(rsi, Context::GLOBAL_INDEX);
1233}
1234
1235
1236// Provides access to exit frame stack space (not GCed).
1237static inline Operand StackSpaceOperand(int index) {
1238#ifdef _WIN64
1239  const int kShaddowSpace = 4;
1240  return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
1241#else
1242  return Operand(rsp, index * kPointerSize);
1243#endif
1244}
1245
1246
1247
1248#ifdef GENERATED_CODE_COVERAGE
1249extern void LogGeneratedCodeCoverage(const char* file_line);
1250#define CODE_COVERAGE_STRINGIFY(x) #x
1251#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1252#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1253#define ACCESS_MASM(masm) {                                               \
1254    byte* x64_coverage_function =                                         \
1255        reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
1256    masm->pushfd();                                                       \
1257    masm->pushad();                                                       \
1258    masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__)));         \
1259    masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY);          \
1260    masm->pop(rax);                                                       \
1261    masm->popad();                                                        \
1262    masm->popfd();                                                        \
1263  }                                                                       \
1264  masm->
1265#else
1266#define ACCESS_MASM(masm) masm->
1267#endif
1268
1269// -----------------------------------------------------------------------------
1270// Template implementations.
1271
1272static int kSmiShift = kSmiTagSize + kSmiShiftSize;
1273
1274
1275template <typename LabelType>
1276void MacroAssembler::SmiNeg(Register dst,
1277                            Register src,
1278                            LabelType* on_smi_result) {
1279  if (dst.is(src)) {
1280    ASSERT(!dst.is(kScratchRegister));
1281    movq(kScratchRegister, src);
1282    neg(dst);  // Low 32 bits are retained as zero by negation.
1283    // Test if result is zero or Smi::kMinValue.
1284    cmpq(dst, kScratchRegister);
1285    j(not_equal, on_smi_result);
1286    movq(src, kScratchRegister);
1287  } else {
1288    movq(dst, src);
1289    neg(dst);
1290    cmpq(dst, src);
1291    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1292    j(not_equal, on_smi_result);
1293  }
1294}
1295
1296
1297template <typename LabelType>
1298void MacroAssembler::SmiAdd(Register dst,
1299                            Register src1,
1300                            Register src2,
1301                            LabelType* on_not_smi_result) {
1302  ASSERT_NOT_NULL(on_not_smi_result);
1303  ASSERT(!dst.is(src2));
1304  if (dst.is(src1)) {
1305    movq(kScratchRegister, src1);
1306    addq(kScratchRegister, src2);
1307    j(overflow, on_not_smi_result);
1308    movq(dst, kScratchRegister);
1309  } else {
1310    movq(dst, src1);
1311    addq(dst, src2);
1312    j(overflow, on_not_smi_result);
1313  }
1314}
1315
1316
1317template <typename LabelType>
1318void MacroAssembler::SmiAdd(Register dst,
1319                            Register src1,
1320                            const Operand& src2,
1321                            LabelType* on_not_smi_result) {
1322  ASSERT_NOT_NULL(on_not_smi_result);
1323  if (dst.is(src1)) {
1324    movq(kScratchRegister, src1);
1325    addq(kScratchRegister, src2);
1326    j(overflow, on_not_smi_result);
1327    movq(dst, kScratchRegister);
1328  } else {
1329    ASSERT(!src2.AddressUsesRegister(dst));
1330    movq(dst, src1);
1331    addq(dst, src2);
1332    j(overflow, on_not_smi_result);
1333  }
1334}
1335
1336
1337template <typename LabelType>
1338void MacroAssembler::SmiSub(Register dst,
1339                            Register src1,
1340                            Register src2,
1341                            LabelType* on_not_smi_result) {
1342  ASSERT_NOT_NULL(on_not_smi_result);
1343  ASSERT(!dst.is(src2));
1344  if (dst.is(src1)) {
1345    cmpq(dst, src2);
1346    j(overflow, on_not_smi_result);
1347    subq(dst, src2);
1348  } else {
1349    movq(dst, src1);
1350    subq(dst, src2);
1351    j(overflow, on_not_smi_result);
1352  }
1353}
1354
1355
1356template <typename LabelType>
1357void MacroAssembler::SmiSub(Register dst,
1358                            Register src1,
1359                            const Operand& src2,
1360                            LabelType* on_not_smi_result) {
1361  ASSERT_NOT_NULL(on_not_smi_result);
1362  if (dst.is(src1)) {
1363    movq(kScratchRegister, src2);
1364    cmpq(src1, kScratchRegister);
1365    j(overflow, on_not_smi_result);
1366    subq(src1, kScratchRegister);
1367  } else {
1368    movq(dst, src1);
1369    subq(dst, src2);
1370    j(overflow, on_not_smi_result);
1371  }
1372}
1373
1374
1375template <typename LabelType>
1376void MacroAssembler::SmiMul(Register dst,
1377                            Register src1,
1378                            Register src2,
1379                            LabelType* on_not_smi_result) {
1380  ASSERT(!dst.is(src2));
1381  ASSERT(!dst.is(kScratchRegister));
1382  ASSERT(!src1.is(kScratchRegister));
1383  ASSERT(!src2.is(kScratchRegister));
1384
1385  if (dst.is(src1)) {
1386    NearLabel failure, zero_correct_result;
1387    movq(kScratchRegister, src1);  // Create backup for later testing.
1388    SmiToInteger64(dst, src1);
1389    imul(dst, src2);
1390    j(overflow, &failure);
1391
1392    // Check for negative zero result.  If product is zero, and one
1393    // argument is negative, go to slow case.
1394    NearLabel correct_result;
1395    testq(dst, dst);
1396    j(not_zero, &correct_result);
1397
1398    movq(dst, kScratchRegister);
1399    xor_(dst, src2);
1400    j(positive, &zero_correct_result);  // Result was positive zero.
1401
1402    bind(&failure);  // Reused failure exit, restores src1.
1403    movq(src1, kScratchRegister);
1404    jmp(on_not_smi_result);
1405
1406    bind(&zero_correct_result);
1407    Set(dst, 0);
1408
1409    bind(&correct_result);
1410  } else {
1411    SmiToInteger64(dst, src1);
1412    imul(dst, src2);
1413    j(overflow, on_not_smi_result);
1414    // Check for negative zero result.  If product is zero, and one
1415    // argument is negative, go to slow case.
1416    NearLabel correct_result;
1417    testq(dst, dst);
1418    j(not_zero, &correct_result);
1419    // One of src1 and src2 is zero, the check whether the other is
1420    // negative.
1421    movq(kScratchRegister, src1);
1422    xor_(kScratchRegister, src2);
1423    j(negative, on_not_smi_result);
1424    bind(&correct_result);
1425  }
1426}
1427
1428
1429template <typename LabelType>
1430void MacroAssembler::SmiTryAddConstant(Register dst,
1431                                       Register src,
1432                                       Smi* constant,
1433                                       LabelType* on_not_smi_result) {
1434  // Does not assume that src is a smi.
1435  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
1436  ASSERT_EQ(0, kSmiTag);
1437  ASSERT(!dst.is(kScratchRegister));
1438  ASSERT(!src.is(kScratchRegister));
1439
1440  JumpIfNotSmi(src, on_not_smi_result);
1441  Register tmp = (dst.is(src) ? kScratchRegister : dst);
1442  LoadSmiConstant(tmp, constant);
1443  addq(tmp, src);
1444  j(overflow, on_not_smi_result);
1445  if (dst.is(src)) {
1446    movq(dst, tmp);
1447  }
1448}
1449
1450
1451template <typename LabelType>
1452void MacroAssembler::SmiAddConstant(Register dst,
1453                                    Register src,
1454                                    Smi* constant,
1455                                    LabelType* on_not_smi_result) {
1456  if (constant->value() == 0) {
1457    if (!dst.is(src)) {
1458      movq(dst, src);
1459    }
1460  } else if (dst.is(src)) {
1461    ASSERT(!dst.is(kScratchRegister));
1462
1463    LoadSmiConstant(kScratchRegister, constant);
1464    addq(kScratchRegister, src);
1465    j(overflow, on_not_smi_result);
1466    movq(dst, kScratchRegister);
1467  } else {
1468    LoadSmiConstant(dst, constant);
1469    addq(dst, src);
1470    j(overflow, on_not_smi_result);
1471  }
1472}
1473
1474
1475template <typename LabelType>
1476void MacroAssembler::SmiSubConstant(Register dst,
1477                                    Register src,
1478                                    Smi* constant,
1479                                    LabelType* on_not_smi_result) {
1480  if (constant->value() == 0) {
1481    if (!dst.is(src)) {
1482      movq(dst, src);
1483    }
1484  } else if (dst.is(src)) {
1485    ASSERT(!dst.is(kScratchRegister));
1486    if (constant->value() == Smi::kMinValue) {
1487      // Subtracting min-value from any non-negative value will overflow.
1488      // We test the non-negativeness before doing the subtraction.
1489      testq(src, src);
1490      j(not_sign, on_not_smi_result);
1491      LoadSmiConstant(kScratchRegister, constant);
1492      subq(dst, kScratchRegister);
1493    } else {
1494      // Subtract by adding the negation.
1495      LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
1496      addq(kScratchRegister, dst);
1497      j(overflow, on_not_smi_result);
1498      movq(dst, kScratchRegister);
1499    }
1500  } else {
1501    if (constant->value() == Smi::kMinValue) {
1502      // Subtracting min-value from any non-negative value will overflow.
1503      // We test the non-negativeness before doing the subtraction.
1504      testq(src, src);
1505      j(not_sign, on_not_smi_result);
1506      LoadSmiConstant(dst, constant);
1507      // Adding and subtracting the min-value gives the same result, it only
1508      // differs on the overflow bit, which we don't check here.
1509      addq(dst, src);
1510    } else {
1511      // Subtract by adding the negation.
1512      LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1513      addq(dst, src);
1514      j(overflow, on_not_smi_result);
1515    }
1516  }
1517}
1518
1519
1520template <typename LabelType>
1521void MacroAssembler::SmiDiv(Register dst,
1522                            Register src1,
1523                            Register src2,
1524                            LabelType* on_not_smi_result) {
1525  ASSERT(!src1.is(kScratchRegister));
1526  ASSERT(!src2.is(kScratchRegister));
1527  ASSERT(!dst.is(kScratchRegister));
1528  ASSERT(!src2.is(rax));
1529  ASSERT(!src2.is(rdx));
1530  ASSERT(!src1.is(rdx));
1531
1532  // Check for 0 divisor (result is +/-Infinity).
1533  NearLabel positive_divisor;
1534  testq(src2, src2);
1535  j(zero, on_not_smi_result);
1536
1537  if (src1.is(rax)) {
1538    movq(kScratchRegister, src1);
1539  }
1540  SmiToInteger32(rax, src1);
1541  // We need to rule out dividing Smi::kMinValue by -1, since that would
1542  // overflow in idiv and raise an exception.
1543  // We combine this with negative zero test (negative zero only happens
1544  // when dividing zero by a negative number).
1545
1546  // We overshoot a little and go to slow case if we divide min-value
1547  // by any negative value, not just -1.
1548  NearLabel safe_div;
1549  testl(rax, Immediate(0x7fffffff));
1550  j(not_zero, &safe_div);
1551  testq(src2, src2);
1552  if (src1.is(rax)) {
1553    j(positive, &safe_div);
1554    movq(src1, kScratchRegister);
1555    jmp(on_not_smi_result);
1556  } else {
1557    j(negative, on_not_smi_result);
1558  }
1559  bind(&safe_div);
1560
1561  SmiToInteger32(src2, src2);
1562  // Sign extend src1 into edx:eax.
1563  cdq();
1564  idivl(src2);
1565  Integer32ToSmi(src2, src2);
1566  // Check that the remainder is zero.
1567  testl(rdx, rdx);
1568  if (src1.is(rax)) {
1569    NearLabel smi_result;
1570    j(zero, &smi_result);
1571    movq(src1, kScratchRegister);
1572    jmp(on_not_smi_result);
1573    bind(&smi_result);
1574  } else {
1575    j(not_zero, on_not_smi_result);
1576  }
1577  if (!dst.is(src1) && src1.is(rax)) {
1578    movq(src1, kScratchRegister);
1579  }
1580  Integer32ToSmi(dst, rax);
1581}
1582
1583
1584template <typename LabelType>
1585void MacroAssembler::SmiMod(Register dst,
1586                            Register src1,
1587                            Register src2,
1588                            LabelType* on_not_smi_result) {
1589  ASSERT(!dst.is(kScratchRegister));
1590  ASSERT(!src1.is(kScratchRegister));
1591  ASSERT(!src2.is(kScratchRegister));
1592  ASSERT(!src2.is(rax));
1593  ASSERT(!src2.is(rdx));
1594  ASSERT(!src1.is(rdx));
1595  ASSERT(!src1.is(src2));
1596
1597  testq(src2, src2);
1598  j(zero, on_not_smi_result);
1599
1600  if (src1.is(rax)) {
1601    movq(kScratchRegister, src1);
1602  }
1603  SmiToInteger32(rax, src1);
1604  SmiToInteger32(src2, src2);
1605
1606  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1607  NearLabel safe_div;
1608  cmpl(rax, Immediate(Smi::kMinValue));
1609  j(not_equal, &safe_div);
1610  cmpl(src2, Immediate(-1));
1611  j(not_equal, &safe_div);
1612  // Retag inputs and go slow case.
1613  Integer32ToSmi(src2, src2);
1614  if (src1.is(rax)) {
1615    movq(src1, kScratchRegister);
1616  }
1617  jmp(on_not_smi_result);
1618  bind(&safe_div);
1619
1620  // Sign extend eax into edx:eax.
1621  cdq();
1622  idivl(src2);
1623  // Restore smi tags on inputs.
1624  Integer32ToSmi(src2, src2);
1625  if (src1.is(rax)) {
1626    movq(src1, kScratchRegister);
1627  }
1628  // Check for a negative zero result.  If the result is zero, and the
1629  // dividend is negative, go slow to return a floating point negative zero.
1630  NearLabel smi_result;
1631  testl(rdx, rdx);
1632  j(not_zero, &smi_result);
1633  testq(src1, src1);
1634  j(negative, on_not_smi_result);
1635  bind(&smi_result);
1636  Integer32ToSmi(dst, rdx);
1637}
1638
1639
1640template <typename LabelType>
1641void MacroAssembler::SmiShiftLogicalRightConstant(
1642    Register dst, Register src, int shift_value, LabelType* on_not_smi_result) {
1643  // Logic right shift interprets its result as an *unsigned* number.
1644  if (dst.is(src)) {
1645    UNIMPLEMENTED();  // Not used.
1646  } else {
1647    movq(dst, src);
1648    if (shift_value == 0) {
1649      testq(dst, dst);
1650      j(negative, on_not_smi_result);
1651    }
1652    shr(dst, Immediate(shift_value + kSmiShift));
1653    shl(dst, Immediate(kSmiShift));
1654  }
1655}
1656
1657
1658template <typename LabelType>
1659void MacroAssembler::SmiShiftLogicalRight(Register dst,
1660                                          Register src1,
1661                                          Register src2,
1662                                          LabelType* on_not_smi_result) {
1663  ASSERT(!dst.is(kScratchRegister));
1664  ASSERT(!src1.is(kScratchRegister));
1665  ASSERT(!src2.is(kScratchRegister));
1666  ASSERT(!dst.is(rcx));
1667  // dst and src1 can be the same, because the one case that bails out
1668  // is a shift by 0, which leaves dst, and therefore src1, unchanged.
1669  NearLabel result_ok;
1670  if (src1.is(rcx) || src2.is(rcx)) {
1671    movq(kScratchRegister, rcx);
1672  }
1673  if (!dst.is(src1)) {
1674    movq(dst, src1);
1675  }
1676  SmiToInteger32(rcx, src2);
1677  orl(rcx, Immediate(kSmiShift));
1678  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
1679  shl(dst, Immediate(kSmiShift));
1680  testq(dst, dst);
1681  if (src1.is(rcx) || src2.is(rcx)) {
1682    NearLabel positive_result;
1683    j(positive, &positive_result);
1684    if (src1.is(rcx)) {
1685      movq(src1, kScratchRegister);
1686    } else {
1687      movq(src2, kScratchRegister);
1688    }
1689    jmp(on_not_smi_result);
1690    bind(&positive_result);
1691  } else {
1692    j(negative, on_not_smi_result);  // src2 was zero and src1 negative.
1693  }
1694}
1695
1696
1697template <typename LabelType>
1698void MacroAssembler::SelectNonSmi(Register dst,
1699                                  Register src1,
1700                                  Register src2,
1701                                  LabelType* on_not_smis) {
1702  ASSERT(!dst.is(kScratchRegister));
1703  ASSERT(!src1.is(kScratchRegister));
1704  ASSERT(!src2.is(kScratchRegister));
1705  ASSERT(!dst.is(src1));
1706  ASSERT(!dst.is(src2));
1707  // Both operands must not be smis.
1708#ifdef DEBUG
1709  if (allow_stub_calls()) {  // Check contains a stub call.
1710    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
1711    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
1712  }
1713#endif
1714  ASSERT_EQ(0, kSmiTag);
1715  ASSERT_EQ(0, Smi::FromInt(0));
1716  movl(kScratchRegister, Immediate(kSmiTagMask));
1717  and_(kScratchRegister, src1);
1718  testl(kScratchRegister, src2);
1719  // If non-zero then both are smis.
1720  j(not_zero, on_not_smis);
1721
1722  // Exactly one operand is a smi.
1723  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
1724  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
1725  subq(kScratchRegister, Immediate(1));
1726  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
1727  movq(dst, src1);
1728  xor_(dst, src2);
1729  and_(dst, kScratchRegister);
1730  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
1731  xor_(dst, src1);
1732  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
1733}
1734
1735
1736template <typename LabelType>
1737void MacroAssembler::JumpIfSmi(Register src, LabelType* on_smi) {
1738  ASSERT_EQ(0, kSmiTag);
1739  Condition smi = CheckSmi(src);
1740  j(smi, on_smi);
1741}
1742
1743
1744template <typename LabelType>
1745void MacroAssembler::JumpIfNotSmi(Register src, LabelType* on_not_smi) {
1746  Condition smi = CheckSmi(src);
1747  j(NegateCondition(smi), on_not_smi);
1748}
1749
1750
1751template <typename LabelType>
1752void MacroAssembler::JumpUnlessNonNegativeSmi(
1753    Register src, LabelType* on_not_smi_or_negative) {
1754  Condition non_negative_smi = CheckNonNegativeSmi(src);
1755  j(NegateCondition(non_negative_smi), on_not_smi_or_negative);
1756}
1757
1758
1759template <typename LabelType>
1760void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1761                                             Smi* constant,
1762                                             LabelType* on_equals) {
1763  SmiCompare(src, constant);
1764  j(equal, on_equals);
1765}
1766
1767
1768template <typename LabelType>
1769void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1770                                            LabelType* on_invalid) {
1771  Condition is_valid = CheckInteger32ValidSmiValue(src);
1772  j(NegateCondition(is_valid), on_invalid);
1773}
1774
1775
1776template <typename LabelType>
1777void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1778                                                LabelType* on_invalid) {
1779  Condition is_valid = CheckUInteger32ValidSmiValue(src);
1780  j(NegateCondition(is_valid), on_invalid);
1781}
1782
1783
1784template <typename LabelType>
1785void MacroAssembler::JumpIfNotBothSmi(Register src1,
1786                                      Register src2,
1787                                      LabelType* on_not_both_smi) {
1788  Condition both_smi = CheckBothSmi(src1, src2);
1789  j(NegateCondition(both_smi), on_not_both_smi);
1790}
1791
1792
1793template <typename LabelType>
1794void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1795                                                  Register src2,
1796                                                  LabelType* on_not_both_smi) {
1797  Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1798  j(NegateCondition(both_smi), on_not_both_smi);
1799}
1800
1801
1802template <typename LabelType>
1803void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1804                                 LabelType* on_not_smis) {
1805  if (dst.is(src1) || dst.is(src2)) {
1806    ASSERT(!src1.is(kScratchRegister));
1807    ASSERT(!src2.is(kScratchRegister));
1808    movq(kScratchRegister, src1);
1809    or_(kScratchRegister, src2);
1810    JumpIfNotSmi(kScratchRegister, on_not_smis);
1811    movq(dst, kScratchRegister);
1812  } else {
1813    movq(dst, src1);
1814    or_(dst, src2);
1815    JumpIfNotSmi(dst, on_not_smis);
1816  }
1817}
1818
1819
1820template <typename LabelType>
1821void MacroAssembler::JumpIfNotString(Register object,
1822                                     Register object_map,
1823                                     LabelType* not_string) {
1824  Condition is_smi = CheckSmi(object);
1825  j(is_smi, not_string);
1826  CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
1827  j(above_equal, not_string);
1828}
1829
1830
1831template <typename LabelType>
1832void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
1833                                                         Register second_object,
1834                                                         Register scratch1,
1835                                                         Register scratch2,
1836                                                         LabelType* on_fail) {
1837  // Check that both objects are not smis.
1838  Condition either_smi = CheckEitherSmi(first_object, second_object);
1839  j(either_smi, on_fail);
1840
1841  // Load instance type for both strings.
1842  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
1843  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
1844  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
1845  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
1846
1847  // Check that both are flat ascii strings.
1848  ASSERT(kNotStringTag != 0);
1849  const int kFlatAsciiStringMask =
1850      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
1851  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
1852
1853  andl(scratch1, Immediate(kFlatAsciiStringMask));
1854  andl(scratch2, Immediate(kFlatAsciiStringMask));
1855  // Interleave the bits to check both scratch1 and scratch2 in one test.
1856  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
1857  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
1858  cmpl(scratch1,
1859       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
1860  j(not_equal, on_fail);
1861}
1862
1863
1864template <typename LabelType>
1865void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
1866    Register instance_type,
1867    Register scratch,
1868    LabelType *failure) {
1869  if (!scratch.is(instance_type)) {
1870    movl(scratch, instance_type);
1871  }
1872
1873  const int kFlatAsciiStringMask =
1874      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
1875
1876  andl(scratch, Immediate(kFlatAsciiStringMask));
1877  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
1878  j(not_equal, failure);
1879}
1880
1881
1882template <typename LabelType>
1883void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
1884    Register first_object_instance_type,
1885    Register second_object_instance_type,
1886    Register scratch1,
1887    Register scratch2,
1888    LabelType* on_fail) {
1889  // Load instance type for both strings.
1890  movq(scratch1, first_object_instance_type);
1891  movq(scratch2, second_object_instance_type);
1892
1893  // Check that both are flat ascii strings.
1894  ASSERT(kNotStringTag != 0);
1895  const int kFlatAsciiStringMask =
1896      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
1897  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
1898
1899  andl(scratch1, Immediate(kFlatAsciiStringMask));
1900  andl(scratch2, Immediate(kFlatAsciiStringMask));
1901  // Interleave the bits to check both scratch1 and scratch2 in one test.
1902  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
1903  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
1904  cmpl(scratch1,
1905       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
1906  j(not_equal, on_fail);
1907}
1908
1909
1910template <typename LabelType>
1911void MacroAssembler::InNewSpace(Register object,
1912                                Register scratch,
1913                                Condition cc,
1914                                LabelType* branch) {
1915  if (Serializer::enabled()) {
1916    // Can't do arithmetic on external references if it might get serialized.
1917    // The mask isn't really an address.  We load it as an external reference in
1918    // case the size of the new space is different between the snapshot maker
1919    // and the running system.
1920    if (scratch.is(object)) {
1921      movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
1922      and_(scratch, kScratchRegister);
1923    } else {
1924      movq(scratch, ExternalReference::new_space_mask(isolate()));
1925      and_(scratch, object);
1926    }
1927    movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
1928    cmpq(scratch, kScratchRegister);
1929    j(cc, branch);
1930  } else {
1931    ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
1932    intptr_t new_space_start =
1933        reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
1934    movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
1935    if (scratch.is(object)) {
1936      addq(scratch, kScratchRegister);
1937    } else {
1938      lea(scratch, Operand(object, kScratchRegister, times_1, 0));
1939    }
1940    and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
1941    j(cc, branch);
1942  }
1943}
1944
1945
1946template <typename LabelType>
1947void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1948                                    const ParameterCount& actual,
1949                                    Handle<Code> code_constant,
1950                                    Register code_register,
1951                                    LabelType* done,
1952                                    InvokeFlag flag,
1953                                    CallWrapper* call_wrapper) {
1954  bool definitely_matches = false;
1955  NearLabel invoke;
1956  if (expected.is_immediate()) {
1957    ASSERT(actual.is_immediate());
1958    if (expected.immediate() == actual.immediate()) {
1959      definitely_matches = true;
1960    } else {
1961      Set(rax, actual.immediate());
1962      if (expected.immediate() ==
1963              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
1964        // Don't worry about adapting arguments for built-ins that
1965        // don't want that done. Skip adaption code by making it look
1966        // like we have a match between expected and actual number of
1967        // arguments.
1968        definitely_matches = true;
1969      } else {
1970        Set(rbx, expected.immediate());
1971      }
1972    }
1973  } else {
1974    if (actual.is_immediate()) {
1975      // Expected is in register, actual is immediate. This is the
1976      // case when we invoke function values without going through the
1977      // IC mechanism.
1978      cmpq(expected.reg(), Immediate(actual.immediate()));
1979      j(equal, &invoke);
1980      ASSERT(expected.reg().is(rbx));
1981      Set(rax, actual.immediate());
1982    } else if (!expected.reg().is(actual.reg())) {
1983      // Both expected and actual are in (different) registers. This
1984      // is the case when we invoke functions using call and apply.
1985      cmpq(expected.reg(), actual.reg());
1986      j(equal, &invoke);
1987      ASSERT(actual.reg().is(rax));
1988      ASSERT(expected.reg().is(rbx));
1989    }
1990  }
1991
1992  if (!definitely_matches) {
1993    Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
1994    if (!code_constant.is_null()) {
1995      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
1996      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
1997    } else if (!code_register.is(rdx)) {
1998      movq(rdx, code_register);
1999    }
2000
2001    if (flag == CALL_FUNCTION) {
2002      if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(adaptor));
2003      Call(adaptor, RelocInfo::CODE_TARGET);
2004      if (call_wrapper != NULL) call_wrapper->AfterCall();
2005      jmp(done);
2006    } else {
2007      Jump(adaptor, RelocInfo::CODE_TARGET);
2008    }
2009    bind(&invoke);
2010  }
2011}
2012
2013
2014} }  // namespace v8::internal
2015
2016#endif  // V8_X64_MACRO_ASSEMBLER_X64_H_
2017