1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
6#define V8_X64_MACRO_ASSEMBLER_X64_H_
7
8#include "src/assembler.h"
9#include "src/bailout-reason.h"
10#include "src/base/flags.h"
11#include "src/frames.h"
12#include "src/globals.h"
13#include "src/x64/assembler-x64.h"
14#include "src/x64/frames-x64.h"
15
16namespace v8 {
17namespace internal {
18
19// Give alias names to registers for calling conventions.
20const Register kReturnRegister0 = {Register::kCode_rax};
21const Register kReturnRegister1 = {Register::kCode_rdx};
22const Register kReturnRegister2 = {Register::kCode_r8};
23const Register kJSFunctionRegister = {Register::kCode_rdi};
24const Register kContextRegister = {Register::kCode_rsi};
25const Register kAllocateSizeRegister = {Register::kCode_rdx};
26const Register kInterpreterAccumulatorRegister = {Register::kCode_rax};
27const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r12};
28const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r14};
29const Register kInterpreterDispatchTableRegister = {Register::kCode_r15};
30const Register kJavaScriptCallArgCountRegister = {Register::kCode_rax};
31const Register kJavaScriptCallNewTargetRegister = {Register::kCode_rdx};
32const Register kRuntimeCallFunctionRegister = {Register::kCode_rbx};
33const Register kRuntimeCallArgCountRegister = {Register::kCode_rax};
34
35// Default scratch register used by MacroAssembler (and other code that needs
36// a spare register). The register isn't callee save, and not used by the
37// function calling convention.
38const Register kScratchRegister = {10};      // r10.
39const XMMRegister kScratchDoubleReg = {15};  // xmm15.
40const Register kRootRegister = {13};         // r13 (callee save).
41// Actual value of root register is offset from the root array's start
42// to take advantage of negitive 8-bit displacement values.
43const int kRootRegisterBias = 128;
44
45// Convenience for platform-independent signatures.
46typedef Operand MemOperand;
47
48enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
49enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
50enum PointersToHereCheck {
51  kPointersToHereMaybeInteresting,
52  kPointersToHereAreAlwaysInteresting
53};
54
55enum class SmiOperationConstraint {
56  kPreserveSourceRegister = 1 << 0,
57  kBailoutOnNoOverflow = 1 << 1,
58  kBailoutOnOverflow = 1 << 2
59};
60
61enum class ReturnAddressState { kOnStack, kNotOnStack };
62
63typedef base::Flags<SmiOperationConstraint> SmiOperationConstraints;
64
65DEFINE_OPERATORS_FOR_FLAGS(SmiOperationConstraints)
66
67#ifdef DEBUG
68bool AreAliased(Register reg1,
69                Register reg2,
70                Register reg3 = no_reg,
71                Register reg4 = no_reg,
72                Register reg5 = no_reg,
73                Register reg6 = no_reg,
74                Register reg7 = no_reg,
75                Register reg8 = no_reg);
76#endif
77
78// Forward declaration.
79class JumpTarget;
80
81struct SmiIndex {
82  SmiIndex(Register index_register, ScaleFactor scale)
83      : reg(index_register),
84        scale(scale) {}
85  Register reg;
86  ScaleFactor scale;
87};
88
89
90// MacroAssembler implements a collection of frequently used macros.
91class MacroAssembler: public Assembler {
92 public:
93  MacroAssembler(Isolate* isolate, void* buffer, int size,
94                 CodeObjectRequired create_code_object);
95
96  // Prevent the use of the RootArray during the lifetime of this
97  // scope object.
98  class NoRootArrayScope BASE_EMBEDDED {
99   public:
100    explicit NoRootArrayScope(MacroAssembler* assembler)
101        : variable_(&assembler->root_array_available_),
102          old_value_(assembler->root_array_available_) {
103      assembler->root_array_available_ = false;
104    }
105    ~NoRootArrayScope() {
106      *variable_ = old_value_;
107    }
108   private:
109    bool* variable_;
110    bool old_value_;
111  };
112
113  // Operand pointing to an external reference.
114  // May emit code to set up the scratch register. The operand is
115  // only guaranteed to be correct as long as the scratch register
116  // isn't changed.
117  // If the operand is used more than once, use a scratch register
118  // that is guaranteed not to be clobbered.
119  Operand ExternalOperand(ExternalReference reference,
120                          Register scratch = kScratchRegister);
121  // Loads and stores the value of an external reference.
122  // Special case code for load and store to take advantage of
123  // load_rax/store_rax if possible/necessary.
124  // For other operations, just use:
125  //   Operand operand = ExternalOperand(extref);
126  //   operation(operand, ..);
127  void Load(Register destination, ExternalReference source);
128  void Store(ExternalReference destination, Register source);
129  // Loads the address of the external reference into the destination
130  // register.
131  void LoadAddress(Register destination, ExternalReference source);
132  // Returns the size of the code generated by LoadAddress.
133  // Used by CallSize(ExternalReference) to find the size of a call.
134  int LoadAddressSize(ExternalReference source);
135  // Pushes the address of the external reference onto the stack.
136  void PushAddress(ExternalReference source);
137
138  // Operations on roots in the root-array.
139  void LoadRoot(Register destination, Heap::RootListIndex index);
140  void LoadRoot(const Operand& destination, Heap::RootListIndex index) {
141    LoadRoot(kScratchRegister, index);
142    movp(destination, kScratchRegister);
143  }
144  void StoreRoot(Register source, Heap::RootListIndex index);
145  // Load a root value where the index (or part of it) is variable.
146  // The variable_offset register is added to the fixed_offset value
147  // to get the index into the root-array.
148  void LoadRootIndexed(Register destination,
149                       Register variable_offset,
150                       int fixed_offset);
151  void CompareRoot(Register with, Heap::RootListIndex index);
152  void CompareRoot(const Operand& with, Heap::RootListIndex index);
153  void PushRoot(Heap::RootListIndex index);
154
155  // Compare the object in a register to a value and jump if they are equal.
156  void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
157                  Label::Distance if_equal_distance = Label::kFar) {
158    CompareRoot(with, index);
159    j(equal, if_equal, if_equal_distance);
160  }
161  void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
162                  Label* if_equal,
163                  Label::Distance if_equal_distance = Label::kFar) {
164    CompareRoot(with, index);
165    j(equal, if_equal, if_equal_distance);
166  }
167
168  // Compare the object in a register to a value and jump if they are not equal.
169  void JumpIfNotRoot(Register with, Heap::RootListIndex index,
170                     Label* if_not_equal,
171                     Label::Distance if_not_equal_distance = Label::kFar) {
172    CompareRoot(with, index);
173    j(not_equal, if_not_equal, if_not_equal_distance);
174  }
175  void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
176                     Label* if_not_equal,
177                     Label::Distance if_not_equal_distance = Label::kFar) {
178    CompareRoot(with, index);
179    j(not_equal, if_not_equal, if_not_equal_distance);
180  }
181
182  // These functions do not arrange the registers in any particular order so
183  // they are not useful for calls that can cause a GC.  The caller can
184  // exclude up to 3 registers that do not need to be saved and restored.
185  void PushCallerSaved(SaveFPRegsMode fp_mode,
186                       Register exclusion1 = no_reg,
187                       Register exclusion2 = no_reg,
188                       Register exclusion3 = no_reg);
189  void PopCallerSaved(SaveFPRegsMode fp_mode,
190                      Register exclusion1 = no_reg,
191                      Register exclusion2 = no_reg,
192                      Register exclusion3 = no_reg);
193
194// ---------------------------------------------------------------------------
195// GC Support
196
197
198  enum RememberedSetFinalAction {
199    kReturnAtEnd,
200    kFallThroughAtEnd
201  };
202
203  // Record in the remembered set the fact that we have a pointer to new space
204  // at the address pointed to by the addr register.  Only works if addr is not
205  // in new space.
206  void RememberedSetHelper(Register object,  // Used for debug code.
207                           Register addr,
208                           Register scratch,
209                           SaveFPRegsMode save_fp,
210                           RememberedSetFinalAction and_then);
211
212  void CheckPageFlag(Register object,
213                     Register scratch,
214                     int mask,
215                     Condition cc,
216                     Label* condition_met,
217                     Label::Distance condition_met_distance = Label::kFar);
218
219  // Check if object is in new space.  Jumps if the object is not in new space.
220  // The register scratch can be object itself, but scratch will be clobbered.
221  void JumpIfNotInNewSpace(Register object,
222                           Register scratch,
223                           Label* branch,
224                           Label::Distance distance = Label::kFar) {
225    InNewSpace(object, scratch, zero, branch, distance);
226  }
227
228  // Check if object is in new space.  Jumps if the object is in new space.
229  // The register scratch can be object itself, but it will be clobbered.
230  void JumpIfInNewSpace(Register object,
231                        Register scratch,
232                        Label* branch,
233                        Label::Distance distance = Label::kFar) {
234    InNewSpace(object, scratch, not_zero, branch, distance);
235  }
236
237  // Check if an object has the black incremental marking color.  Also uses rcx!
238  void JumpIfBlack(Register object, Register bitmap_scratch,
239                   Register mask_scratch, Label* on_black,
240                   Label::Distance on_black_distance);
241
242  // Checks the color of an object.  If the object is white we jump to the
243  // incremental marker.
244  void JumpIfWhite(Register value, Register scratch1, Register scratch2,
245                   Label* value_is_white, Label::Distance distance);
246
247  // Notify the garbage collector that we wrote a pointer into an object.
248  // |object| is the object being stored into, |value| is the object being
249  // stored.  value and scratch registers are clobbered by the operation.
250  // The offset is the offset from the start of the object, not the offset from
251  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
252  void RecordWriteField(
253      Register object,
254      int offset,
255      Register value,
256      Register scratch,
257      SaveFPRegsMode save_fp,
258      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
259      SmiCheck smi_check = INLINE_SMI_CHECK,
260      PointersToHereCheck pointers_to_here_check_for_value =
261          kPointersToHereMaybeInteresting);
262
263  // As above, but the offset has the tag presubtracted.  For use with
264  // Operand(reg, off).
265  void RecordWriteContextSlot(
266      Register context,
267      int offset,
268      Register value,
269      Register scratch,
270      SaveFPRegsMode save_fp,
271      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
272      SmiCheck smi_check = INLINE_SMI_CHECK,
273      PointersToHereCheck pointers_to_here_check_for_value =
274          kPointersToHereMaybeInteresting) {
275    RecordWriteField(context,
276                     offset + kHeapObjectTag,
277                     value,
278                     scratch,
279                     save_fp,
280                     remembered_set_action,
281                     smi_check,
282                     pointers_to_here_check_for_value);
283  }
284
285  // Notify the garbage collector that we wrote a pointer into a fixed array.
286  // |array| is the array being stored into, |value| is the
287  // object being stored.  |index| is the array index represented as a non-smi.
288  // All registers are clobbered by the operation RecordWriteArray
289  // filters out smis so it does not update the write barrier if the
290  // value is a smi.
291  void RecordWriteArray(
292      Register array,
293      Register value,
294      Register index,
295      SaveFPRegsMode save_fp,
296      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
297      SmiCheck smi_check = INLINE_SMI_CHECK,
298      PointersToHereCheck pointers_to_here_check_for_value =
299          kPointersToHereMaybeInteresting);
300
301  // Notify the garbage collector that we wrote a code entry into a
302  // JSFunction. Only scratch is clobbered by the operation.
303  void RecordWriteCodeEntryField(Register js_function, Register code_entry,
304                                 Register scratch);
305
306  void RecordWriteForMap(
307      Register object,
308      Register map,
309      Register dst,
310      SaveFPRegsMode save_fp);
311
312  // For page containing |object| mark region covering |address|
313  // dirty. |object| is the object being stored into, |value| is the
314  // object being stored. The address and value registers are clobbered by the
315  // operation.  RecordWrite filters out smis so it does not update
316  // the write barrier if the value is a smi.
317  void RecordWrite(
318      Register object,
319      Register address,
320      Register value,
321      SaveFPRegsMode save_fp,
322      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
323      SmiCheck smi_check = INLINE_SMI_CHECK,
324      PointersToHereCheck pointers_to_here_check_for_value =
325          kPointersToHereMaybeInteresting);
326
327  // Frame restart support.
328  void MaybeDropFrames();
329
330  // Generates function and stub prologue code.
331  void StubPrologue(StackFrame::Type type);
332  void Prologue(bool code_pre_aging);
333
334  // Enter specific kind of exit frame; either in normal or
335  // debug mode. Expects the number of arguments in register rax and
336  // sets up the number of arguments in register rdi and the pointer
337  // to the first argument in register rsi.
338  //
339  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
340  // accessible via StackSpaceOperand.
341  void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false,
342                      StackFrame::Type frame_type = StackFrame::EXIT);
343
344  // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
345  // memory (not GCed) on the stack accessible via StackSpaceOperand.
346  void EnterApiExitFrame(int arg_stack_space);
347
348  // Leave the current exit frame. Expects/provides the return value in
349  // register rax:rdx (untouched) and the pointer to the first
350  // argument in register rsi (if pop_arguments == true).
351  void LeaveExitFrame(bool save_doubles = false, bool pop_arguments = true);
352
353  // Leave the current exit frame. Expects/provides the return value in
354  // register rax (untouched).
355  void LeaveApiExitFrame(bool restore_context);
356
357  // Push and pop the registers that can hold pointers.
358  void PushSafepointRegisters() { Pushad(); }
359  void PopSafepointRegisters() { Popad(); }
360  // Store the value in register src in the safepoint register stack
361  // slot for register dst.
362  void StoreToSafepointRegisterSlot(Register dst, const Immediate& imm);
363  void StoreToSafepointRegisterSlot(Register dst, Register src);
364  void LoadFromSafepointRegisterSlot(Register dst, Register src);
365
366  void InitializeRootRegister() {
367    ExternalReference roots_array_start =
368        ExternalReference::roots_array_start(isolate());
369    Move(kRootRegister, roots_array_start);
370    addp(kRootRegister, Immediate(kRootRegisterBias));
371  }
372
373  // ---------------------------------------------------------------------------
374  // JavaScript invokes
375
376  // Removes current frame and its arguments from the stack preserving
377  // the arguments and a return address pushed to the stack for the next call.
378  // |ra_state| defines whether return address is already pushed to stack or
379  // not. Both |callee_args_count| and |caller_args_count_reg| do not include
380  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
381  // is trashed.
382  void PrepareForTailCall(const ParameterCount& callee_args_count,
383                          Register caller_args_count_reg, Register scratch0,
384                          Register scratch1, ReturnAddressState ra_state);
385
386  // Invoke the JavaScript function code by either calling or jumping.
387  void InvokeFunctionCode(Register function, Register new_target,
388                          const ParameterCount& expected,
389                          const ParameterCount& actual, InvokeFlag flag,
390                          const CallWrapper& call_wrapper);
391
392  // On function call, call into the debugger if necessary.
393  void CheckDebugHook(Register fun, Register new_target,
394                      const ParameterCount& expected,
395                      const ParameterCount& actual);
396
397  // Invoke the JavaScript function in the given register. Changes the
398  // current context to the context in the function before invoking.
399  void InvokeFunction(Register function,
400                      Register new_target,
401                      const ParameterCount& actual,
402                      InvokeFlag flag,
403                      const CallWrapper& call_wrapper);
404
405  void InvokeFunction(Register function,
406                      Register new_target,
407                      const ParameterCount& expected,
408                      const ParameterCount& actual,
409                      InvokeFlag flag,
410                      const CallWrapper& call_wrapper);
411
412  void InvokeFunction(Handle<JSFunction> function,
413                      const ParameterCount& expected,
414                      const ParameterCount& actual,
415                      InvokeFlag flag,
416                      const CallWrapper& call_wrapper);
417
418  // ---------------------------------------------------------------------------
419  // Smi tagging, untagging and operations on tagged smis.
420
421  // Support for constant splitting.
422  bool IsUnsafeInt(const int32_t x);
423  void SafeMove(Register dst, Smi* src);
424  void SafePush(Smi* src);
425
426  // Conversions between tagged smi values and non-tagged integer values.
427
428  // Tag an integer value. The result must be known to be a valid smi value.
429  // Only uses the low 32 bits of the src register. Sets the N and Z flags
430  // based on the value of the resulting smi.
431  void Integer32ToSmi(Register dst, Register src);
432
433  // Stores an integer32 value into a memory field that already holds a smi.
434  void Integer32ToSmiField(const Operand& dst, Register src);
435
436  // Adds constant to src and tags the result as a smi.
437  // Result must be a valid smi.
438  void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
439
440  // Convert smi to 32-bit integer. I.e., not sign extended into
441  // high 32 bits of destination.
442  void SmiToInteger32(Register dst, Register src);
443  void SmiToInteger32(Register dst, const Operand& src);
444
445  // Convert smi to 64-bit integer (sign extended if necessary).
446  void SmiToInteger64(Register dst, Register src);
447  void SmiToInteger64(Register dst, const Operand& src);
448
449  // Convert smi to double.
450  void SmiToDouble(XMMRegister dst, Register src) {
451    SmiToInteger32(kScratchRegister, src);
452    Cvtlsi2sd(dst, kScratchRegister);
453  }
454
455  // Multiply a positive smi's integer value by a power of two.
456  // Provides result as 64-bit integer value.
457  void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
458                                             Register src,
459                                             int power);
460
461  // Divide a positive smi's integer value by a power of two.
462  // Provides result as 32-bit integer value.
463  void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
464                                           Register src,
465                                           int power);
466
467  // Perform the logical or of two smi values and return a smi value.
468  // If either argument is not a smi, jump to on_not_smis and retain
469  // the original values of source registers. The destination register
470  // may be changed if it's not one of the source registers.
471  void SmiOrIfSmis(Register dst,
472                   Register src1,
473                   Register src2,
474                   Label* on_not_smis,
475                   Label::Distance near_jump = Label::kFar);
476
477
478  // Simple comparison of smis.  Both sides must be known smis to use these,
479  // otherwise use Cmp.
480  void SmiCompare(Register smi1, Register smi2);
481  void SmiCompare(Register dst, Smi* src);
482  void SmiCompare(Register dst, const Operand& src);
483  void SmiCompare(const Operand& dst, Register src);
484  void SmiCompare(const Operand& dst, Smi* src);
485  // Compare the int32 in src register to the value of the smi stored at dst.
486  void SmiCompareInteger32(const Operand& dst, Register src);
487  // Sets sign and zero flags depending on value of smi in register.
488  void SmiTest(Register src);
489
490  // Functions performing a check on a known or potential smi. Returns
491  // a condition that is satisfied if the check is successful.
492
493  // Is the value a tagged smi.
494  Condition CheckSmi(Register src);
495  Condition CheckSmi(const Operand& src);
496
497  // Is the value a non-negative tagged smi.
498  Condition CheckNonNegativeSmi(Register src);
499
500  // Are both values tagged smis.
501  Condition CheckBothSmi(Register first, Register second);
502
503  // Are both values non-negative tagged smis.
504  Condition CheckBothNonNegativeSmi(Register first, Register second);
505
506  // Are either value a tagged smi.
507  Condition CheckEitherSmi(Register first,
508                           Register second,
509                           Register scratch = kScratchRegister);
510
511  // Checks whether an 32-bit integer value is a valid for conversion
512  // to a smi.
513  Condition CheckInteger32ValidSmiValue(Register src);
514
515  // Checks whether an 32-bit unsigned integer value is a valid for
516  // conversion to a smi.
517  Condition CheckUInteger32ValidSmiValue(Register src);
518
519  // Check whether src is a Smi, and set dst to zero if it is a smi,
520  // and to one if it isn't.
521  void CheckSmiToIndicator(Register dst, Register src);
522  void CheckSmiToIndicator(Register dst, const Operand& src);
523
524  // Test-and-jump functions. Typically combines a check function
525  // above with a conditional jump.
526
527  // Jump if the value can be represented by a smi.
528  void JumpIfValidSmiValue(Register src, Label* on_valid,
529                           Label::Distance near_jump = Label::kFar);
530
531  // Jump if the value cannot be represented by a smi.
532  void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
533                              Label::Distance near_jump = Label::kFar);
534
535  // Jump if the unsigned integer value can be represented by a smi.
536  void JumpIfUIntValidSmiValue(Register src, Label* on_valid,
537                               Label::Distance near_jump = Label::kFar);
538
539  // Jump if the unsigned integer value cannot be represented by a smi.
540  void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
541                                  Label::Distance near_jump = Label::kFar);
542
543  // Jump to label if the value is a tagged smi.
544  void JumpIfSmi(Register src,
545                 Label* on_smi,
546                 Label::Distance near_jump = Label::kFar);
547
548  // Jump to label if the value is not a tagged smi.
549  void JumpIfNotSmi(Register src,
550                    Label* on_not_smi,
551                    Label::Distance near_jump = Label::kFar);
552
553  // Jump to label if the value is not a tagged smi.
554  void JumpIfNotSmi(Operand src, Label* on_not_smi,
555                    Label::Distance near_jump = Label::kFar);
556
557  // Jump to label if the value is not a non-negative tagged smi.
558  void JumpUnlessNonNegativeSmi(Register src,
559                                Label* on_not_smi,
560                                Label::Distance near_jump = Label::kFar);
561
562  // Jump to label if the value, which must be a tagged smi, has value equal
563  // to the constant.
564  void JumpIfSmiEqualsConstant(Register src,
565                               Smi* constant,
566                               Label* on_equals,
567                               Label::Distance near_jump = Label::kFar);
568
569  // Jump if either or both register are not smi values.
570  void JumpIfNotBothSmi(Register src1,
571                        Register src2,
572                        Label* on_not_both_smi,
573                        Label::Distance near_jump = Label::kFar);
574
575  // Jump if either or both register are not non-negative smi values.
576  void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
577                                    Label* on_not_both_smi,
578                                    Label::Distance near_jump = Label::kFar);
579
580  // Operations on tagged smi values.
581
582  // Smis represent a subset of integers. The subset is always equivalent to
583  // a two's complement interpretation of a fixed number of bits.
584
585  // Add an integer constant to a tagged smi, giving a tagged smi as result.
586  // No overflow testing on the result is done.
587  void SmiAddConstant(Register dst, Register src, Smi* constant);
588
589  // Add an integer constant to a tagged smi, giving a tagged smi as result.
590  // No overflow testing on the result is done.
591  void SmiAddConstant(const Operand& dst, Smi* constant);
592
593  // Add an integer constant to a tagged smi, giving a tagged smi as result,
594  // or jumping to a label if the result cannot be represented by a smi.
595  void SmiAddConstant(Register dst, Register src, Smi* constant,
596                      SmiOperationConstraints constraints, Label* bailout_label,
597                      Label::Distance near_jump = Label::kFar);
598
599  // Subtract an integer constant from a tagged smi, giving a tagged smi as
600  // result. No testing on the result is done. Sets the N and Z flags
601  // based on the value of the resulting integer.
602  void SmiSubConstant(Register dst, Register src, Smi* constant);
603
604  // Subtract an integer constant from a tagged smi, giving a tagged smi as
605  // result, or jumping to a label if the result cannot be represented by a smi.
606  void SmiSubConstant(Register dst, Register src, Smi* constant,
607                      SmiOperationConstraints constraints, Label* bailout_label,
608                      Label::Distance near_jump = Label::kFar);
609
610  // Negating a smi can give a negative zero or too large positive value.
611  // NOTICE: This operation jumps on success, not failure!
612  void SmiNeg(Register dst,
613              Register src,
614              Label* on_smi_result,
615              Label::Distance near_jump = Label::kFar);
616
617  // Adds smi values and return the result as a smi.
618  // If dst is src1, then src1 will be destroyed if the operation is
619  // successful, otherwise kept intact.
620  void SmiAdd(Register dst,
621              Register src1,
622              Register src2,
623              Label* on_not_smi_result,
624              Label::Distance near_jump = Label::kFar);
625  void SmiAdd(Register dst,
626              Register src1,
627              const Operand& src2,
628              Label* on_not_smi_result,
629              Label::Distance near_jump = Label::kFar);
630
631  void SmiAdd(Register dst,
632              Register src1,
633              Register src2);
634
635  // Subtracts smi values and return the result as a smi.
636  // If dst is src1, then src1 will be destroyed if the operation is
637  // successful, otherwise kept intact.
638  void SmiSub(Register dst,
639              Register src1,
640              Register src2,
641              Label* on_not_smi_result,
642              Label::Distance near_jump = Label::kFar);
643  void SmiSub(Register dst,
644              Register src1,
645              const Operand& src2,
646              Label* on_not_smi_result,
647              Label::Distance near_jump = Label::kFar);
648
649  void SmiSub(Register dst,
650              Register src1,
651              Register src2);
652
653  void SmiSub(Register dst,
654              Register src1,
655              const Operand& src2);
656
657  // Multiplies smi values and return the result as a smi,
658  // if possible.
659  // If dst is src1, then src1 will be destroyed, even if
660  // the operation is unsuccessful.
661  void SmiMul(Register dst,
662              Register src1,
663              Register src2,
664              Label* on_not_smi_result,
665              Label::Distance near_jump = Label::kFar);
666
667  // Divides one smi by another and returns the quotient.
668  // Clobbers rax and rdx registers.
669  void SmiDiv(Register dst,
670              Register src1,
671              Register src2,
672              Label* on_not_smi_result,
673              Label::Distance near_jump = Label::kFar);
674
675  // Divides one smi by another and returns the remainder.
676  // Clobbers rax and rdx registers.
677  void SmiMod(Register dst,
678              Register src1,
679              Register src2,
680              Label* on_not_smi_result,
681              Label::Distance near_jump = Label::kFar);
682
683  // Bitwise operations.
684  void SmiNot(Register dst, Register src);
685  void SmiAnd(Register dst, Register src1, Register src2);
686  void SmiOr(Register dst, Register src1, Register src2);
687  void SmiXor(Register dst, Register src1, Register src2);
688  void SmiAndConstant(Register dst, Register src1, Smi* constant);
689  void SmiOrConstant(Register dst, Register src1, Smi* constant);
690  void SmiXorConstant(Register dst, Register src1, Smi* constant);
691
692  void SmiShiftLeftConstant(Register dst,
693                            Register src,
694                            int shift_value,
695                            Label* on_not_smi_result = NULL,
696                            Label::Distance near_jump = Label::kFar);
697  void SmiShiftLogicalRightConstant(Register dst,
698                                    Register src,
699                                    int shift_value,
700                                    Label* on_not_smi_result,
701                                    Label::Distance near_jump = Label::kFar);
702  void SmiShiftArithmeticRightConstant(Register dst,
703                                       Register src,
704                                       int shift_value);
705
706  // Shifts a smi value to the left, and returns the result if that is a smi.
707  // Uses and clobbers rcx, so dst may not be rcx.
708  void SmiShiftLeft(Register dst,
709                    Register src1,
710                    Register src2,
711                    Label* on_not_smi_result = NULL,
712                    Label::Distance near_jump = Label::kFar);
713  // Shifts a smi value to the right, shifting in zero bits at the top, and
714  // returns the unsigned intepretation of the result if that is a smi.
715  // Uses and clobbers rcx, so dst may not be rcx.
716  void SmiShiftLogicalRight(Register dst,
717                            Register src1,
718                            Register src2,
719                            Label* on_not_smi_result,
720                            Label::Distance near_jump = Label::kFar);
721  // Shifts a smi value to the right, sign extending the top, and
722  // returns the signed intepretation of the result. That will always
723  // be a valid smi value, since it's numerically smaller than the
724  // original.
725  // Uses and clobbers rcx, so dst may not be rcx.
726  void SmiShiftArithmeticRight(Register dst,
727                               Register src1,
728                               Register src2);
729
730  // Specialized operations
731
732  // Select the non-smi register of two registers where exactly one is a
733  // smi. If neither are smis, jump to the failure label.
734  void SelectNonSmi(Register dst,
735                    Register src1,
736                    Register src2,
737                    Label* on_not_smis,
738                    Label::Distance near_jump = Label::kFar);
739
740  // Converts, if necessary, a smi to a combination of number and
741  // multiplier to be used as a scaled index.
742  // The src register contains a *positive* smi value. The shift is the
743  // power of two to multiply the index value by (e.g.
744  // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
745  // The returned index register may be either src or dst, depending
746  // on what is most efficient. If src and dst are different registers,
747  // src is always unchanged.
748  SmiIndex SmiToIndex(Register dst, Register src, int shift);
749
750  // Converts a positive smi to a negative index.
751  SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
752
753  // Add the value of a smi in memory to an int32 register.
754  // Sets flags as a normal add.
755  void AddSmiField(Register dst, const Operand& src);
756
757  // Basic Smi operations.
758  void Move(Register dst, Smi* source) {
759    LoadSmiConstant(dst, source);
760  }
761
762  void Move(const Operand& dst, Smi* source) {
763    Register constant = GetSmiConstant(source);
764    movp(dst, constant);
765  }
766
767  void Push(Smi* smi);
768
769  // Save away a raw integer with pointer size on the stack as two integers
770  // masquerading as smis so that the garbage collector skips visiting them.
771  void PushRegisterAsTwoSmis(Register src, Register scratch = kScratchRegister);
772  // Reconstruct a raw integer with pointer size from two integers masquerading
773  // as smis on the top of stack.
774  void PopRegisterAsTwoSmis(Register dst, Register scratch = kScratchRegister);
775
776  void Test(const Operand& dst, Smi* source);
777
778
779  // ---------------------------------------------------------------------------
780  // String macros.
781
782  // If object is a string, its map is loaded into object_map.
783  void JumpIfNotString(Register object,
784                       Register object_map,
785                       Label* not_string,
786                       Label::Distance near_jump = Label::kFar);
787
788
789  void JumpIfNotBothSequentialOneByteStrings(
790      Register first_object, Register second_object, Register scratch1,
791      Register scratch2, Label* on_not_both_flat_one_byte,
792      Label::Distance near_jump = Label::kFar);
793
794  // Check whether the instance type represents a flat one-byte string. Jump
795  // to the label if not. If the instance type can be scratched specify same
796  // register for both instance type and scratch.
797  void JumpIfInstanceTypeIsNotSequentialOneByte(
798      Register instance_type, Register scratch,
799      Label* on_not_flat_one_byte_string,
800      Label::Distance near_jump = Label::kFar);
801
802  void JumpIfBothInstanceTypesAreNotSequentialOneByte(
803      Register first_object_instance_type, Register second_object_instance_type,
804      Register scratch1, Register scratch2, Label* on_fail,
805      Label::Distance near_jump = Label::kFar);
806
807  void EmitSeqStringSetCharCheck(Register string,
808                                 Register index,
809                                 Register value,
810                                 uint32_t encoding_mask);
811
812  // Checks if the given register or operand is a unique name
813  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
814                                       Label::Distance distance = Label::kFar);
815  void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
816                                       Label::Distance distance = Label::kFar);
817
818  // ---------------------------------------------------------------------------
819  // Macro instructions.
820
821  // Load/store with specific representation.
822  void Load(Register dst, const Operand& src, Representation r);
823  void Store(const Operand& dst, Register src, Representation r);
824
825  // Load a register with a long value as efficiently as possible.
826  void Set(Register dst, int64_t x);
827  void Set(const Operand& dst, intptr_t x);
828
829  void Cvtss2sd(XMMRegister dst, XMMRegister src);
830  void Cvtss2sd(XMMRegister dst, const Operand& src);
831  void Cvtsd2ss(XMMRegister dst, XMMRegister src);
832  void Cvtsd2ss(XMMRegister dst, const Operand& src);
833
834  // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
835  // hinders register renaming and makes dependence chains longer. So we use
836  // xorpd to clear the dst register before cvtsi2sd to solve this issue.
837  void Cvtlsi2sd(XMMRegister dst, Register src);
838  void Cvtlsi2sd(XMMRegister dst, const Operand& src);
839
840  void Cvtlsi2ss(XMMRegister dst, Register src);
841  void Cvtlsi2ss(XMMRegister dst, const Operand& src);
842  void Cvtqsi2ss(XMMRegister dst, Register src);
843  void Cvtqsi2ss(XMMRegister dst, const Operand& src);
844
845  void Cvtqsi2sd(XMMRegister dst, Register src);
846  void Cvtqsi2sd(XMMRegister dst, const Operand& src);
847
848  void Cvtqui2ss(XMMRegister dst, Register src, Register tmp);
849  void Cvtqui2sd(XMMRegister dst, Register src, Register tmp);
850
851  void Cvtsd2si(Register dst, XMMRegister src);
852
853  void Cvttss2si(Register dst, XMMRegister src);
854  void Cvttss2si(Register dst, const Operand& src);
855  void Cvttsd2si(Register dst, XMMRegister src);
856  void Cvttsd2si(Register dst, const Operand& src);
857  void Cvttss2siq(Register dst, XMMRegister src);
858  void Cvttss2siq(Register dst, const Operand& src);
859  void Cvttsd2siq(Register dst, XMMRegister src);
860  void Cvttsd2siq(Register dst, const Operand& src);
861
862  // Move if the registers are not identical.
863  void Move(Register target, Register source);
864
865  // TestBit and Load SharedFunctionInfo special field.
866  void TestBitSharedFunctionInfoSpecialField(Register base,
867                                             int offset,
868                                             int bit_index);
869  void LoadSharedFunctionInfoSpecialField(Register dst,
870                                          Register base,
871                                          int offset);
872
873  // Handle support
874  void Move(Register dst, Handle<Object> source);
875  void Move(const Operand& dst, Handle<Object> source);
876  void Cmp(Register dst, Handle<Object> source);
877  void Cmp(const Operand& dst, Handle<Object> source);
878  void Cmp(Register dst, Smi* src);
879  void Cmp(const Operand& dst, Smi* src);
880  void Push(Handle<Object> source);
881
882  // Load a heap object and handle the case of new-space objects by
883  // indirecting via a global cell.
884  void MoveHeapObject(Register result, Handle<Object> object);
885
886  // Load a global cell into a register.
887  void LoadGlobalCell(Register dst, Handle<Cell> cell);
888
889  // Compare the given value and the value of weak cell.
890  void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
891
892  void GetWeakValue(Register value, Handle<WeakCell> cell);
893
894  // Load the value of the weak cell in the value register. Branch to the given
895  // miss label if the weak cell was cleared.
896  void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
897
898  // Emit code that loads |parameter_index|'th parameter from the stack to
899  // the register according to the CallInterfaceDescriptor definition.
900  // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
901  // below the caller's sp (on x64 it's at least return address).
902  template <class Descriptor>
903  void LoadParameterFromStack(
904      Register reg, typename Descriptor::ParameterIndices parameter_index,
905      int sp_to_ra_offset_in_words = 1) {
906    DCHECK(Descriptor::kPassLastArgsOnStack);
907    UNIMPLEMENTED();
908  }
909
910  // Emit code to discard a non-negative number of pointer-sized elements
911  // from the stack, clobbering only the rsp register.
912  void Drop(int stack_elements);
913  // Emit code to discard a positive number of pointer-sized elements
914  // from the stack under the return address which remains on the top,
915  // clobbering the rsp register.
916  void DropUnderReturnAddress(int stack_elements,
917                              Register scratch = kScratchRegister);
918
919  void Call(Label* target) { call(target); }
920  void Push(Register src);
921  void Push(const Operand& src);
922  void PushQuad(const Operand& src);
923  void Push(Immediate value);
924  void PushImm32(int32_t imm32);
925  void Pop(Register dst);
926  void Pop(const Operand& dst);
927  void PopQuad(const Operand& dst);
928  void PushReturnAddressFrom(Register src) { pushq(src); }
929  void PopReturnAddressTo(Register dst) { popq(dst); }
930  void Move(Register dst, ExternalReference ext) {
931    movp(dst, reinterpret_cast<void*>(ext.address()),
932         RelocInfo::EXTERNAL_REFERENCE);
933  }
934
935  // Loads a pointer into a register with a relocation mode.
936  void Move(Register dst, void* ptr, RelocInfo::Mode rmode) {
937    // This method must not be used with heap object references. The stored
938    // address is not GC safe. Use the handle version instead.
939    DCHECK(rmode > RelocInfo::LAST_GCED_ENUM);
940    movp(dst, ptr, rmode);
941  }
942
943  void Move(Register dst, Handle<Object> value, RelocInfo::Mode rmode) {
944    AllowDeferredHandleDereference using_raw_address;
945    DCHECK(!RelocInfo::IsNone(rmode));
946    DCHECK(value->IsHeapObject());
947    movp(dst, reinterpret_cast<void*>(value.location()), rmode);
948  }
949
950  void Move(XMMRegister dst, uint32_t src);
951  void Move(XMMRegister dst, uint64_t src);
952  void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
953  void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
954
955#define AVX_OP2_WITH_TYPE(macro_name, name, src_type) \
956  void macro_name(XMMRegister dst, src_type src) {    \
957    if (CpuFeatures::IsSupported(AVX)) {              \
958      CpuFeatureScope scope(this, AVX);               \
959      v##name(dst, dst, src);                         \
960    } else {                                          \
961      name(dst, src);                                 \
962    }                                                 \
963  }
964#define AVX_OP2_X(macro_name, name) \
965  AVX_OP2_WITH_TYPE(macro_name, name, XMMRegister)
966#define AVX_OP2_O(macro_name, name) \
967  AVX_OP2_WITH_TYPE(macro_name, name, const Operand&)
968#define AVX_OP2_XO(macro_name, name) \
969  AVX_OP2_X(macro_name, name)        \
970  AVX_OP2_O(macro_name, name)
971
972  AVX_OP2_XO(Addsd, addsd)
973  AVX_OP2_XO(Subsd, subsd)
974  AVX_OP2_XO(Mulsd, mulsd)
975  AVX_OP2_XO(Divss, divss)
976  AVX_OP2_XO(Divsd, divsd)
977  AVX_OP2_XO(Andps, andps)
978  AVX_OP2_XO(Andpd, andpd)
979  AVX_OP2_XO(Orpd, orpd)
980  AVX_OP2_XO(Xorpd, xorpd)
981  AVX_OP2_XO(Cmpeqps, cmpeqps)
982  AVX_OP2_XO(Cmpltps, cmpltps)
983  AVX_OP2_XO(Cmpleps, cmpleps)
984  AVX_OP2_XO(Cmpneqps, cmpneqps)
985  AVX_OP2_XO(Cmpnltps, cmpnltps)
986  AVX_OP2_XO(Cmpnleps, cmpnleps)
987  AVX_OP2_XO(Cmpeqpd, cmpeqpd)
988  AVX_OP2_XO(Cmpltpd, cmpltpd)
989  AVX_OP2_XO(Cmplepd, cmplepd)
990  AVX_OP2_XO(Cmpneqpd, cmpneqpd)
991  AVX_OP2_XO(Cmpnltpd, cmpnltpd)
992  AVX_OP2_XO(Cmpnlepd, cmpnlepd)
993  AVX_OP2_X(Pcmpeqd, pcmpeqd)
994  AVX_OP2_WITH_TYPE(Psllq, psllq, byte)
995  AVX_OP2_WITH_TYPE(Psrlq, psrlq, byte)
996
997#undef AVX_OP2_O
998#undef AVX_OP2_X
999#undef AVX_OP2_XO
1000#undef AVX_OP2_WITH_TYPE
1001
1002  void Movsd(XMMRegister dst, XMMRegister src);
1003  void Movsd(XMMRegister dst, const Operand& src);
1004  void Movsd(const Operand& dst, XMMRegister src);
1005  void Movss(XMMRegister dst, XMMRegister src);
1006  void Movss(XMMRegister dst, const Operand& src);
1007  void Movss(const Operand& dst, XMMRegister src);
1008
1009  void Movd(XMMRegister dst, Register src);
1010  void Movd(XMMRegister dst, const Operand& src);
1011  void Movd(Register dst, XMMRegister src);
1012  void Movq(XMMRegister dst, Register src);
1013  void Movq(Register dst, XMMRegister src);
1014
1015  void Movaps(XMMRegister dst, XMMRegister src);
1016  void Movups(XMMRegister dst, XMMRegister src);
1017  void Movups(XMMRegister dst, const Operand& src);
1018  void Movups(const Operand& dst, XMMRegister src);
1019  void Movmskps(Register dst, XMMRegister src);
1020  void Movapd(XMMRegister dst, XMMRegister src);
1021  void Movupd(XMMRegister dst, const Operand& src);
1022  void Movupd(const Operand& dst, XMMRegister src);
1023  void Movmskpd(Register dst, XMMRegister src);
1024
1025  void Xorps(XMMRegister dst, XMMRegister src);
1026  void Xorps(XMMRegister dst, const Operand& src);
1027
1028  void Roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
1029  void Roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
1030  void Sqrtsd(XMMRegister dst, XMMRegister src);
1031  void Sqrtsd(XMMRegister dst, const Operand& src);
1032
1033  void Ucomiss(XMMRegister src1, XMMRegister src2);
1034  void Ucomiss(XMMRegister src1, const Operand& src2);
1035  void Ucomisd(XMMRegister src1, XMMRegister src2);
1036  void Ucomisd(XMMRegister src1, const Operand& src2);
1037
1038  // ---------------------------------------------------------------------------
1039  // SIMD macros.
1040  void Absps(XMMRegister dst);
1041  void Negps(XMMRegister dst);
1042  void Abspd(XMMRegister dst);
1043  void Negpd(XMMRegister dst);
1044
1045  // Control Flow
1046  void Jump(Address destination, RelocInfo::Mode rmode);
1047  void Jump(ExternalReference ext);
1048  void Jump(const Operand& op);
1049  void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
1050
1051  void Call(Address destination, RelocInfo::Mode rmode);
1052  void Call(ExternalReference ext);
1053  void Call(const Operand& op);
1054  void Call(Handle<Code> code_object,
1055            RelocInfo::Mode rmode,
1056            TypeFeedbackId ast_id = TypeFeedbackId::None());
1057
1058  // The size of the code generated for different call instructions.
1059  int CallSize(Address destination) {
1060    return kCallSequenceLength;
1061  }
1062  int CallSize(ExternalReference ext);
1063  int CallSize(Handle<Code> code_object) {
1064    // Code calls use 32-bit relative addressing.
1065    return kShortCallInstructionLength;
1066  }
1067  int CallSize(Register target) {
1068    // Opcode: REX_opt FF /2 m64
1069    return (target.high_bit() != 0) ? 3 : 2;
1070  }
1071  int CallSize(const Operand& target) {
1072    // Opcode: REX_opt FF /2 m64
1073    return (target.requires_rex() ? 2 : 1) + target.operand_size();
1074  }
1075
1076  // Non-SSE2 instructions.
1077  void Pextrd(Register dst, XMMRegister src, int8_t imm8);
1078  void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
1079  void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
1080
1081  void Lzcntq(Register dst, Register src);
1082  void Lzcntq(Register dst, const Operand& src);
1083
1084  void Lzcntl(Register dst, Register src);
1085  void Lzcntl(Register dst, const Operand& src);
1086
1087  void Tzcntq(Register dst, Register src);
1088  void Tzcntq(Register dst, const Operand& src);
1089
1090  void Tzcntl(Register dst, Register src);
1091  void Tzcntl(Register dst, const Operand& src);
1092
1093  void Popcntl(Register dst, Register src);
1094  void Popcntl(Register dst, const Operand& src);
1095
1096  void Popcntq(Register dst, Register src);
1097  void Popcntq(Register dst, const Operand& src);
1098
1099  // Non-x64 instructions.
1100  // Push/pop all general purpose registers.
1101  // Does not push rsp/rbp nor any of the assembler's special purpose registers
1102  // (kScratchRegister, kRootRegister).
1103  void Pushad();
1104  void Popad();
1105  // Sets the stack as after performing Popad, without actually loading the
1106  // registers.
1107  void Dropad();
1108
1109  // Compare object type for heap object.
1110  // Always use unsigned comparisons: above and below, not less and greater.
1111  // Incoming register is heap_object and outgoing register is map.
1112  // They may be the same register, and may be kScratchRegister.
1113  void CmpObjectType(Register heap_object, InstanceType type, Register map);
1114
1115  // Compare instance type for map.
1116  // Always use unsigned comparisons: above and below, not less and greater.
1117  void CmpInstanceType(Register map, InstanceType type);
1118
1119  // Compare an object's map with the specified map.
1120  void CompareMap(Register obj, Handle<Map> map);
1121
1122  // Check if the map of an object is equal to a specified map and branch to
1123  // label if not. Skip the smi check if not required (object is known to be a
1124  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1125  // against maps that are ElementsKind transition maps of the specified map.
1126  void CheckMap(Register obj,
1127                Handle<Map> map,
1128                Label* fail,
1129                SmiCheckType smi_check_type);
1130
1131  // Check if the map of an object is equal to a specified weak map and branch
1132  // to a specified target if equal. Skip the smi check if not required
1133  // (object is known to be a heap object)
1134  void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
1135                       Handle<WeakCell> cell, Handle<Code> success,
1136                       SmiCheckType smi_check_type);
1137
1138  // Check if the object in register heap_object is a string. Afterwards the
1139  // register map contains the object map and the register instance_type
1140  // contains the instance_type. The registers map and instance_type can be the
1141  // same in which case it contains the instance type afterwards. Either of the
1142  // registers map and instance_type can be the same as heap_object.
1143  Condition IsObjectStringType(Register heap_object,
1144                               Register map,
1145                               Register instance_type);
1146
1147  // Check if the object in register heap_object is a name. Afterwards the
1148  // register map contains the object map and the register instance_type
1149  // contains the instance_type. The registers map and instance_type can be the
1150  // same in which case it contains the instance type afterwards. Either of the
1151  // registers map and instance_type can be the same as heap_object.
1152  Condition IsObjectNameType(Register heap_object,
1153                             Register map,
1154                             Register instance_type);
1155
1156  // FCmp compares and pops the two values on top of the FPU stack.
1157  // The flag results are similar to integer cmp, but requires unsigned
1158  // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
1159  void FCmp();
1160
1161  void ClampUint8(Register reg);
1162
1163  void ClampDoubleToUint8(XMMRegister input_reg,
1164                          XMMRegister temp_xmm_reg,
1165                          Register result_reg);
1166
1167  void SlowTruncateToI(Register result_reg, Register input_reg,
1168      int offset = HeapNumber::kValueOffset - kHeapObjectTag);
1169
1170  void TruncateHeapNumberToI(Register result_reg, Register input_reg);
1171  void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
1172
1173  void DoubleToI(Register result_reg, XMMRegister input_reg,
1174                 XMMRegister scratch, MinusZeroMode minus_zero_mode,
1175                 Label* lost_precision, Label* is_nan, Label* minus_zero,
1176                 Label::Distance dst = Label::kFar);
1177
1178  void LoadUint32(XMMRegister dst, Register src);
1179
1180  void LoadInstanceDescriptors(Register map, Register descriptors);
1181  void EnumLength(Register dst, Register map);
1182  void NumberOfOwnDescriptors(Register dst, Register map);
1183  void LoadAccessor(Register dst, Register holder, int accessor_index,
1184                    AccessorComponent accessor);
1185
1186  template<typename Field>
1187  void DecodeField(Register reg) {
1188    static const int shift = Field::kShift;
1189    static const int mask = Field::kMask >> Field::kShift;
1190    if (shift != 0) {
1191      shrp(reg, Immediate(shift));
1192    }
1193    andp(reg, Immediate(mask));
1194  }
1195
1196  template<typename Field>
1197  void DecodeFieldToSmi(Register reg) {
1198    if (SmiValuesAre32Bits()) {
1199      andp(reg, Immediate(Field::kMask));
1200      shlp(reg, Immediate(kSmiShift - Field::kShift));
1201    } else {
1202      static const int shift = Field::kShift;
1203      static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
1204      DCHECK(SmiValuesAre31Bits());
1205      DCHECK(kSmiShift == kSmiTagSize);
1206      DCHECK((mask & 0x80000000u) == 0);
1207      if (shift < kSmiShift) {
1208        shlp(reg, Immediate(kSmiShift - shift));
1209      } else if (shift > kSmiShift) {
1210        sarp(reg, Immediate(shift - kSmiShift));
1211      }
1212      andp(reg, Immediate(mask));
1213    }
1214  }
1215
1216  // Abort execution if argument is not a number, enabled via --debug-code.
1217  void AssertNumber(Register object);
1218  void AssertNotNumber(Register object);
1219
1220  // Abort execution if argument is a smi, enabled via --debug-code.
1221  void AssertNotSmi(Register object);
1222
1223  // Abort execution if argument is not a smi, enabled via --debug-code.
1224  void AssertSmi(Register object);
1225  void AssertSmi(const Operand& object);
1226
1227  // Abort execution if a 64 bit register containing a 32 bit payload does not
1228  // have zeros in the top 32 bits, enabled via --debug-code.
1229  void AssertZeroExtended(Register reg);
1230
1231  // Abort execution if argument is not a string, enabled via --debug-code.
1232  void AssertString(Register object);
1233
1234  // Abort execution if argument is not a name, enabled via --debug-code.
1235  void AssertName(Register object);
1236
1237  // Abort execution if argument is not a JSFunction, enabled via --debug-code.
1238  void AssertFunction(Register object);
1239
1240  // Abort execution if argument is not a JSBoundFunction,
1241  // enabled via --debug-code.
1242  void AssertBoundFunction(Register object);
1243
1244  // Abort execution if argument is not a JSGeneratorObject,
1245  // enabled via --debug-code.
1246  void AssertGeneratorObject(Register object);
1247
1248  // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
1249  void AssertReceiver(Register object);
1250
1251  // Abort execution if argument is not undefined or an AllocationSite, enabled
1252  // via --debug-code.
1253  void AssertUndefinedOrAllocationSite(Register object);
1254
1255  // Abort execution if argument is not the root value with the given index,
1256  // enabled via --debug-code.
1257  void AssertRootValue(Register src,
1258                       Heap::RootListIndex root_value_index,
1259                       BailoutReason reason);
1260
1261  // ---------------------------------------------------------------------------
1262  // Exception handling
1263
1264  // Push a new stack handler and link it into stack handler chain.
1265  void PushStackHandler();
1266
1267  // Unlink the stack handler on top of the stack from the stack handler chain.
1268  void PopStackHandler();
1269
1270  // ---------------------------------------------------------------------------
1271  // Inline caching support
1272
1273  void GetNumberHash(Register r0, Register scratch);
1274
1275  // ---------------------------------------------------------------------------
1276  // Allocation support
1277
1278  // Allocate an object in new space or old space. If the given space
1279  // is exhausted control continues at the gc_required label. The allocated
1280  // object is returned in result and end of the new object is returned in
1281  // result_end. The register scratch can be passed as no_reg in which case
1282  // an additional object reference will be added to the reloc info. The
1283  // returned pointers in result and result_end have not yet been tagged as
1284  // heap objects. If result_contains_top_on_entry is true the content of
1285  // result is known to be the allocation top on entry (could be result_end
1286  // from a previous call). If result_contains_top_on_entry is true scratch
1287  // should be no_reg as it is never used.
1288  void Allocate(int object_size,
1289                Register result,
1290                Register result_end,
1291                Register scratch,
1292                Label* gc_required,
1293                AllocationFlags flags);
1294
1295  void Allocate(int header_size,
1296                ScaleFactor element_size,
1297                Register element_count,
1298                Register result,
1299                Register result_end,
1300                Register scratch,
1301                Label* gc_required,
1302                AllocationFlags flags);
1303
1304  void Allocate(Register object_size,
1305                Register result,
1306                Register result_end,
1307                Register scratch,
1308                Label* gc_required,
1309                AllocationFlags flags);
1310
1311  // FastAllocate is right now only used for folded allocations. It just
1312  // increments the top pointer without checking against limit. This can only
1313  // be done if it was proved earlier that the allocation will succeed.
1314  void FastAllocate(int object_size, Register result, Register result_end,
1315                    AllocationFlags flags);
1316
1317  void FastAllocate(Register object_size, Register result, Register result_end,
1318                    AllocationFlags flags);
1319
1320  // Allocate a heap number in new space with undefined value. Returns
1321  // tagged pointer in result register, or jumps to gc_required if new
1322  // space is full.
1323  void AllocateHeapNumber(Register result,
1324                          Register scratch,
1325                          Label* gc_required,
1326                          MutableMode mode = IMMUTABLE);
1327
1328  // Allocate and initialize a JSValue wrapper with the specified {constructor}
1329  // and {value}.
1330  void AllocateJSValue(Register result, Register constructor, Register value,
1331                       Register scratch, Label* gc_required);
1332
1333  // ---------------------------------------------------------------------------
1334  // Support functions.
1335
1336  // Check if result is zero and op is negative.
1337  void NegativeZeroTest(Register result, Register op, Label* then_label);
1338
1339  // Check if result is zero and op is negative in code using jump targets.
1340  void NegativeZeroTest(CodeGenerator* cgen,
1341                        Register result,
1342                        Register op,
1343                        JumpTarget* then_target);
1344
1345  // Check if result is zero and any of op1 and op2 are negative.
1346  // Register scratch is destroyed, and it must be different from op2.
1347  void NegativeZeroTest(Register result, Register op1, Register op2,
1348                        Register scratch, Label* then_label);
1349
1350  // Machine code version of Map::GetConstructor().
1351  // |temp| holds |result|'s map when done.
1352  void GetMapConstructor(Register result, Register map, Register temp);
1353
1354  // Find the function context up the context chain.
1355  void LoadContext(Register dst, int context_chain_length);
1356
1357  // Load the global object from the current context.
1358  void LoadGlobalObject(Register dst) {
1359    LoadNativeContextSlot(Context::EXTENSION_INDEX, dst);
1360  }
1361
1362  // Load the global proxy from the current context.
1363  void LoadGlobalProxy(Register dst) {
1364    LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
1365  }
1366
1367  // Load the native context slot with the current index.
1368  void LoadNativeContextSlot(int index, Register dst);
1369
1370  // Load the initial map from the global function. The registers
1371  // function and map can be the same.
1372  void LoadGlobalFunctionInitialMap(Register function, Register map);
1373
1374  // ---------------------------------------------------------------------------
1375  // Runtime calls
1376
1377  // Call a code stub.
1378  void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
1379
1380  // Tail call a code stub (jump).
1381  void TailCallStub(CodeStub* stub);
1382
1383  // Return from a code stub after popping its arguments.
1384  void StubReturn(int argc);
1385
1386  // Call a runtime routine.
1387  void CallRuntime(const Runtime::Function* f,
1388                   int num_arguments,
1389                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1390
1391  // Call a runtime function and save the value of XMM registers.
1392  void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
1393    const Runtime::Function* function = Runtime::FunctionForId(fid);
1394    CallRuntime(function, function->nargs, kSaveFPRegs);
1395  }
1396
1397  // Convenience function: Same as above, but takes the fid instead.
1398  void CallRuntime(Runtime::FunctionId fid,
1399                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1400    const Runtime::Function* function = Runtime::FunctionForId(fid);
1401    CallRuntime(function, function->nargs, save_doubles);
1402  }
1403
1404  // Convenience function: Same as above, but takes the fid instead.
1405  void CallRuntime(Runtime::FunctionId fid, int num_arguments,
1406                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1407    CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
1408  }
1409
1410  // Convenience function: call an external reference.
1411  void CallExternalReference(const ExternalReference& ext,
1412                             int num_arguments);
1413
1414  // Convenience function: tail call a runtime routine (jump)
1415  void TailCallRuntime(Runtime::FunctionId fid);
1416
1417  // Jump to a runtime routines
1418  void JumpToExternalReference(const ExternalReference& ext,
1419                               bool builtin_exit_frame = false);
1420
1421  // Before calling a C-function from generated code, align arguments on stack.
1422  // After aligning the frame, arguments must be stored in rsp[0], rsp[8],
1423  // etc., not pushed. The argument count assumes all arguments are word sized.
1424  // The number of slots reserved for arguments depends on platform. On Windows
1425  // stack slots are reserved for the arguments passed in registers. On other
1426  // platforms stack slots are only reserved for the arguments actually passed
1427  // on the stack.
1428  void PrepareCallCFunction(int num_arguments);
1429
1430  // Calls a C function and cleans up the space for arguments allocated
1431  // by PrepareCallCFunction. The called function is not allowed to trigger a
1432  // garbage collection, since that might move the code and invalidate the
1433  // return address (unless this is somehow accounted for by the called
1434  // function).
1435  void CallCFunction(ExternalReference function, int num_arguments);
1436  void CallCFunction(Register function, int num_arguments);
1437
1438  // Calculate the number of stack slots to reserve for arguments when calling a
1439  // C function.
1440  int ArgumentStackSlotsForCFunctionCall(int num_arguments);
1441
1442  // ---------------------------------------------------------------------------
1443  // Utilities
1444
1445  void Ret();
1446
1447  // Return and drop arguments from stack, where the number of arguments
1448  // may be bigger than 2^16 - 1.  Requires a scratch register.
1449  void Ret(int bytes_dropped, Register scratch);
1450
1451  Handle<Object> CodeObject() {
1452    DCHECK(!code_object_.is_null());
1453    return code_object_;
1454  }
1455
1456  // Initialize fields with filler values.  Fields starting at |current_address|
1457  // not including |end_address| are overwritten with the value in |filler|.  At
1458  // the end the loop, |current_address| takes the value of |end_address|.
1459  void InitializeFieldsWithFiller(Register current_address,
1460                                  Register end_address, Register filler);
1461
1462
1463  // Emit code for a truncating division by a constant. The dividend register is
1464  // unchanged, the result is in rdx, and rax gets clobbered.
1465  void TruncatingDiv(Register dividend, int32_t divisor);
1466
1467  // ---------------------------------------------------------------------------
1468  // StatsCounter support
1469
1470  void SetCounter(StatsCounter* counter, int value);
1471  void IncrementCounter(StatsCounter* counter, int value);
1472  void DecrementCounter(StatsCounter* counter, int value);
1473
1474
1475  // ---------------------------------------------------------------------------
1476  // Debugging
1477
1478  // Calls Abort(msg) if the condition cc is not satisfied.
1479  // Use --debug_code to enable.
1480  void Assert(Condition cc, BailoutReason reason);
1481
1482  void AssertFastElements(Register elements);
1483
1484  // Like Assert(), but always enabled.
1485  void Check(Condition cc, BailoutReason reason);
1486
1487  // Print a message to stdout and abort execution.
1488  void Abort(BailoutReason msg);
1489
1490  // Check that the stack is aligned.
1491  void CheckStackAlignment();
1492
1493  // Verify restrictions about code generated in stubs.
1494  void set_generating_stub(bool value) { generating_stub_ = value; }
1495  bool generating_stub() { return generating_stub_; }
1496  void set_has_frame(bool value) { has_frame_ = value; }
1497  bool has_frame() { return has_frame_; }
1498  inline bool AllowThisStubCall(CodeStub* stub);
1499
1500  static int SafepointRegisterStackIndex(Register reg) {
1501    return SafepointRegisterStackIndex(reg.code());
1502  }
1503
1504  // Load the type feedback vector from a JavaScript frame.
1505  void EmitLoadFeedbackVector(Register vector);
1506
1507  // Activation support.
1508  void EnterFrame(StackFrame::Type type);
1509  void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
1510  void LeaveFrame(StackFrame::Type type);
1511
1512  void EnterBuiltinFrame(Register context, Register target, Register argc);
1513  void LeaveBuiltinFrame(Register context, Register target, Register argc);
1514
1515  // Expects object in rax and returns map with validated enum cache
1516  // in rax.  Assumes that any other register can be used as a scratch.
1517  void CheckEnumCache(Label* call_runtime);
1518
1519  // AllocationMemento support. Arrays may have an associated
1520  // AllocationMemento object that can be checked for in order to pretransition
1521  // to another type.
1522  // On entry, receiver_reg should point to the array object.
1523  // scratch_reg gets clobbered.
1524  // If allocation info is present, condition flags are set to equal.
1525  void TestJSArrayForAllocationMemento(Register receiver_reg,
1526                                       Register scratch_reg,
1527                                       Label* no_memento_found);
1528
1529 private:
1530  // Order general registers are pushed by Pushad.
1531  // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14, r15.
1532  static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
1533  static const int kNumSafepointSavedRegisters = 12;
1534  static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1535
1536  bool generating_stub_;
1537  bool has_frame_;
1538  bool root_array_available_;
1539
1540  // Returns a register holding the smi value. The register MUST NOT be
1541  // modified. It may be the "smi 1 constant" register.
1542  Register GetSmiConstant(Smi* value);
1543
1544  int64_t RootRegisterDelta(ExternalReference other);
1545
1546  // Moves the smi value to the destination register.
1547  void LoadSmiConstant(Register dst, Smi* value);
1548
1549  // This handle will be patched with the code object on installation.
1550  Handle<Object> code_object_;
1551
1552  // Helper functions for generating invokes.
1553  void InvokePrologue(const ParameterCount& expected,
1554                      const ParameterCount& actual,
1555                      Label* done,
1556                      bool* definitely_mismatches,
1557                      InvokeFlag flag,
1558                      Label::Distance near_jump,
1559                      const CallWrapper& call_wrapper);
1560
1561  void EnterExitFramePrologue(bool save_rax, StackFrame::Type frame_type);
1562
1563  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
1564  // accessible via StackSpaceOperand.
1565  void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
1566
1567  void LeaveExitFrameEpilogue(bool restore_context);
1568
1569  // Allocation support helpers.
1570  // Loads the top of new-space into the result register.
1571  // Otherwise the address of the new-space top is loaded into scratch (if
1572  // scratch is valid), and the new-space top is loaded into result.
1573  void LoadAllocationTopHelper(Register result,
1574                               Register scratch,
1575                               AllocationFlags flags);
1576
1577  void MakeSureDoubleAlignedHelper(Register result,
1578                                   Register scratch,
1579                                   Label* gc_required,
1580                                   AllocationFlags flags);
1581
1582  // Update allocation top with value in result_end register.
1583  // If scratch is valid, it contains the address of the allocation top.
1584  void UpdateAllocationTopHelper(Register result_end,
1585                                 Register scratch,
1586                                 AllocationFlags flags);
1587
1588  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1589  void InNewSpace(Register object,
1590                  Register scratch,
1591                  Condition cc,
1592                  Label* branch,
1593                  Label::Distance distance = Label::kFar);
1594
1595  // Helper for finding the mark bits for an address.  Afterwards, the
1596  // bitmap register points at the word with the mark bits and the mask
1597  // the position of the first bit.  Uses rcx as scratch and leaves addr_reg
1598  // unchanged.
1599  inline void GetMarkBits(Register addr_reg,
1600                          Register bitmap_reg,
1601                          Register mask_reg);
1602
1603  // Compute memory operands for safepoint stack slots.
1604  Operand SafepointRegisterSlot(Register reg);
1605  static int SafepointRegisterStackIndex(int reg_code) {
1606    return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
1607  }
1608
1609  // Needs access to SafepointRegisterStackIndex for compiled frame
1610  // traversal.
1611  friend class StandardFrame;
1612};
1613
1614
1615// The code patcher is used to patch (typically) small parts of code e.g. for
1616// debugging and other types of instrumentation. When using the code patcher
1617// the exact number of bytes specified must be emitted. Is not legal to emit
1618// relocation information. If any of these constraints are violated it causes
1619// an assertion.
1620class CodePatcher {
1621 public:
1622  CodePatcher(Isolate* isolate, byte* address, int size);
1623  ~CodePatcher();
1624
1625  // Macro assembler to emit code.
1626  MacroAssembler* masm() { return &masm_; }
1627
1628 private:
1629  byte* address_;  // The address of the code being patched.
1630  int size_;  // Number of bytes of the expected patch size.
1631  MacroAssembler masm_;  // Macro assembler used to generate the code.
1632};
1633
1634
1635// -----------------------------------------------------------------------------
1636// Static helper functions.
1637
1638// Generate an Operand for loading a field from an object.
1639inline Operand FieldOperand(Register object, int offset) {
1640  return Operand(object, offset - kHeapObjectTag);
1641}
1642
1643
1644// Generate an Operand for loading an indexed field from an object.
1645inline Operand FieldOperand(Register object,
1646                            Register index,
1647                            ScaleFactor scale,
1648                            int offset) {
1649  return Operand(object, index, scale, offset - kHeapObjectTag);
1650}
1651
1652
1653inline Operand ContextOperand(Register context, int index) {
1654  return Operand(context, Context::SlotOffset(index));
1655}
1656
1657
1658inline Operand ContextOperand(Register context, Register index) {
1659  return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
1660}
1661
1662
1663inline Operand NativeContextOperand() {
1664  return ContextOperand(rsi, Context::NATIVE_CONTEXT_INDEX);
1665}
1666
1667
1668// Provides access to exit frame stack space (not GCed).
1669inline Operand StackSpaceOperand(int index) {
1670#ifdef _WIN64
1671  const int kShaddowSpace = 4;
1672  return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
1673#else
1674  return Operand(rsp, index * kPointerSize);
1675#endif
1676}
1677
1678
1679inline Operand StackOperandForReturnAddress(int32_t disp) {
1680  return Operand(rsp, disp);
1681}
1682
1683#define ACCESS_MASM(masm) masm->
1684
1685}  // namespace internal
1686}  // namespace v8
1687
1688#endif  // V8_X64_MACRO_ASSEMBLER_X64_H_
1689