1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
6#define V8_X64_MACRO_ASSEMBLER_X64_H_
7
8#include "src/assembler.h"
9#include "src/bailout-reason.h"
10#include "src/frames.h"
11#include "src/globals.h"
12
13namespace v8 {
14namespace internal {
15
16// Default scratch register used by MacroAssembler (and other code that needs
17// a spare register). The register isn't callee save, and not used by the
18// function calling convention.
19const Register kScratchRegister = { 10 };      // r10.
20const Register kSmiConstantRegister = { 12 };  // r12 (callee save).
21const Register kRootRegister = { 13 };         // r13 (callee save).
22// Value of smi in kSmiConstantRegister.
23const int kSmiConstantRegisterValue = 1;
24// Actual value of root register is offset from the root array's start
25// to take advantage of negitive 8-bit displacement values.
26const int kRootRegisterBias = 128;
27
28// Convenience for platform-independent signatures.
29typedef Operand MemOperand;
30
31enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
32enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
33enum PointersToHereCheck {
34  kPointersToHereMaybeInteresting,
35  kPointersToHereAreAlwaysInteresting
36};
37
38enum SmiOperationConstraint {
39  PRESERVE_SOURCE_REGISTER,
40  BAILOUT_ON_NO_OVERFLOW,
41  BAILOUT_ON_OVERFLOW,
42  NUMBER_OF_CONSTRAINTS
43};
44
45STATIC_ASSERT(NUMBER_OF_CONSTRAINTS <= 8);
46
47class SmiOperationExecutionMode : public EnumSet<SmiOperationConstraint, byte> {
48 public:
49  SmiOperationExecutionMode() : EnumSet<SmiOperationConstraint, byte>(0) { }
50  explicit SmiOperationExecutionMode(byte bits)
51      : EnumSet<SmiOperationConstraint, byte>(bits) { }
52};
53
54#ifdef DEBUG
55bool AreAliased(Register reg1,
56                Register reg2,
57                Register reg3 = no_reg,
58                Register reg4 = no_reg,
59                Register reg5 = no_reg,
60                Register reg6 = no_reg,
61                Register reg7 = no_reg,
62                Register reg8 = no_reg);
63#endif
64
65// Forward declaration.
66class JumpTarget;
67
68struct SmiIndex {
69  SmiIndex(Register index_register, ScaleFactor scale)
70      : reg(index_register),
71        scale(scale) {}
72  Register reg;
73  ScaleFactor scale;
74};
75
76
77// MacroAssembler implements a collection of frequently used macros.
78class MacroAssembler: public Assembler {
79 public:
80  // The isolate parameter can be NULL if the macro assembler should
81  // not use isolate-dependent functionality. In this case, it's the
82  // responsibility of the caller to never invoke such function on the
83  // macro assembler.
84  MacroAssembler(Isolate* isolate, void* buffer, int size);
85
86  // Prevent the use of the RootArray during the lifetime of this
87  // scope object.
88  class NoRootArrayScope BASE_EMBEDDED {
89   public:
90    explicit NoRootArrayScope(MacroAssembler* assembler)
91        : variable_(&assembler->root_array_available_),
92          old_value_(assembler->root_array_available_) {
93      assembler->root_array_available_ = false;
94    }
95    ~NoRootArrayScope() {
96      *variable_ = old_value_;
97    }
98   private:
99    bool* variable_;
100    bool old_value_;
101  };
102
103  // Operand pointing to an external reference.
104  // May emit code to set up the scratch register. The operand is
105  // only guaranteed to be correct as long as the scratch register
106  // isn't changed.
107  // If the operand is used more than once, use a scratch register
108  // that is guaranteed not to be clobbered.
109  Operand ExternalOperand(ExternalReference reference,
110                          Register scratch = kScratchRegister);
111  // Loads and stores the value of an external reference.
112  // Special case code for load and store to take advantage of
113  // load_rax/store_rax if possible/necessary.
114  // For other operations, just use:
115  //   Operand operand = ExternalOperand(extref);
116  //   operation(operand, ..);
117  void Load(Register destination, ExternalReference source);
118  void Store(ExternalReference destination, Register source);
119  // Loads the address of the external reference into the destination
120  // register.
121  void LoadAddress(Register destination, ExternalReference source);
122  // Returns the size of the code generated by LoadAddress.
123  // Used by CallSize(ExternalReference) to find the size of a call.
124  int LoadAddressSize(ExternalReference source);
125  // Pushes the address of the external reference onto the stack.
126  void PushAddress(ExternalReference source);
127
128  // Operations on roots in the root-array.
129  void LoadRoot(Register destination, Heap::RootListIndex index);
130  void StoreRoot(Register source, Heap::RootListIndex index);
131  // Load a root value where the index (or part of it) is variable.
132  // The variable_offset register is added to the fixed_offset value
133  // to get the index into the root-array.
134  void LoadRootIndexed(Register destination,
135                       Register variable_offset,
136                       int fixed_offset);
137  void CompareRoot(Register with, Heap::RootListIndex index);
138  void CompareRoot(const Operand& with, Heap::RootListIndex index);
139  void PushRoot(Heap::RootListIndex index);
140
141  // These functions do not arrange the registers in any particular order so
142  // they are not useful for calls that can cause a GC.  The caller can
143  // exclude up to 3 registers that do not need to be saved and restored.
144  void PushCallerSaved(SaveFPRegsMode fp_mode,
145                       Register exclusion1 = no_reg,
146                       Register exclusion2 = no_reg,
147                       Register exclusion3 = no_reg);
148  void PopCallerSaved(SaveFPRegsMode fp_mode,
149                      Register exclusion1 = no_reg,
150                      Register exclusion2 = no_reg,
151                      Register exclusion3 = no_reg);
152
153// ---------------------------------------------------------------------------
154// GC Support
155
156
157  enum RememberedSetFinalAction {
158    kReturnAtEnd,
159    kFallThroughAtEnd
160  };
161
162  // Record in the remembered set the fact that we have a pointer to new space
163  // at the address pointed to by the addr register.  Only works if addr is not
164  // in new space.
165  void RememberedSetHelper(Register object,  // Used for debug code.
166                           Register addr,
167                           Register scratch,
168                           SaveFPRegsMode save_fp,
169                           RememberedSetFinalAction and_then);
170
171  void CheckPageFlag(Register object,
172                     Register scratch,
173                     int mask,
174                     Condition cc,
175                     Label* condition_met,
176                     Label::Distance condition_met_distance = Label::kFar);
177
178  void CheckMapDeprecated(Handle<Map> map,
179                          Register scratch,
180                          Label* if_deprecated);
181
182  // Check if object is in new space.  Jumps if the object is not in new space.
183  // The register scratch can be object itself, but scratch will be clobbered.
184  void JumpIfNotInNewSpace(Register object,
185                           Register scratch,
186                           Label* branch,
187                           Label::Distance distance = Label::kFar) {
188    InNewSpace(object, scratch, not_equal, branch, distance);
189  }
190
191  // Check if object is in new space.  Jumps if the object is in new space.
192  // The register scratch can be object itself, but it will be clobbered.
193  void JumpIfInNewSpace(Register object,
194                        Register scratch,
195                        Label* branch,
196                        Label::Distance distance = Label::kFar) {
197    InNewSpace(object, scratch, equal, branch, distance);
198  }
199
200  // Check if an object has the black incremental marking color.  Also uses rcx!
201  void JumpIfBlack(Register object,
202                   Register scratch0,
203                   Register scratch1,
204                   Label* on_black,
205                   Label::Distance on_black_distance = Label::kFar);
206
207  // Detects conservatively whether an object is data-only, i.e. it does need to
208  // be scanned by the garbage collector.
209  void JumpIfDataObject(Register value,
210                        Register scratch,
211                        Label* not_data_object,
212                        Label::Distance not_data_object_distance);
213
214  // Checks the color of an object.  If the object is already grey or black
215  // then we just fall through, since it is already live.  If it is white and
216  // we can determine that it doesn't need to be scanned, then we just mark it
217  // black and fall through.  For the rest we jump to the label so the
218  // incremental marker can fix its assumptions.
219  void EnsureNotWhite(Register object,
220                      Register scratch1,
221                      Register scratch2,
222                      Label* object_is_white_and_not_data,
223                      Label::Distance distance);
224
225  // Notify the garbage collector that we wrote a pointer into an object.
226  // |object| is the object being stored into, |value| is the object being
227  // stored.  value and scratch registers are clobbered by the operation.
228  // The offset is the offset from the start of the object, not the offset from
229  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
230  void RecordWriteField(
231      Register object,
232      int offset,
233      Register value,
234      Register scratch,
235      SaveFPRegsMode save_fp,
236      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
237      SmiCheck smi_check = INLINE_SMI_CHECK,
238      PointersToHereCheck pointers_to_here_check_for_value =
239          kPointersToHereMaybeInteresting);
240
241  // As above, but the offset has the tag presubtracted.  For use with
242  // Operand(reg, off).
243  void RecordWriteContextSlot(
244      Register context,
245      int offset,
246      Register value,
247      Register scratch,
248      SaveFPRegsMode save_fp,
249      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
250      SmiCheck smi_check = INLINE_SMI_CHECK,
251      PointersToHereCheck pointers_to_here_check_for_value =
252          kPointersToHereMaybeInteresting) {
253    RecordWriteField(context,
254                     offset + kHeapObjectTag,
255                     value,
256                     scratch,
257                     save_fp,
258                     remembered_set_action,
259                     smi_check,
260                     pointers_to_here_check_for_value);
261  }
262
263  // Notify the garbage collector that we wrote a pointer into a fixed array.
264  // |array| is the array being stored into, |value| is the
265  // object being stored.  |index| is the array index represented as a non-smi.
266  // All registers are clobbered by the operation RecordWriteArray
267  // filters out smis so it does not update the write barrier if the
268  // value is a smi.
269  void RecordWriteArray(
270      Register array,
271      Register value,
272      Register index,
273      SaveFPRegsMode save_fp,
274      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
275      SmiCheck smi_check = INLINE_SMI_CHECK,
276      PointersToHereCheck pointers_to_here_check_for_value =
277          kPointersToHereMaybeInteresting);
278
279  void RecordWriteForMap(
280      Register object,
281      Register map,
282      Register dst,
283      SaveFPRegsMode save_fp);
284
285  // For page containing |object| mark region covering |address|
286  // dirty. |object| is the object being stored into, |value| is the
287  // object being stored. The address and value registers are clobbered by the
288  // operation.  RecordWrite filters out smis so it does not update
289  // the write barrier if the value is a smi.
290  void RecordWrite(
291      Register object,
292      Register address,
293      Register value,
294      SaveFPRegsMode save_fp,
295      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
296      SmiCheck smi_check = INLINE_SMI_CHECK,
297      PointersToHereCheck pointers_to_here_check_for_value =
298          kPointersToHereMaybeInteresting);
299
300  // ---------------------------------------------------------------------------
301  // Debugger Support
302
303  void DebugBreak();
304
305  // Generates function and stub prologue code.
306  void StubPrologue();
307  void Prologue(bool code_pre_aging);
308
309  // Enter specific kind of exit frame; either in normal or
310  // debug mode. Expects the number of arguments in register rax and
311  // sets up the number of arguments in register rdi and the pointer
312  // to the first argument in register rsi.
313  //
314  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
315  // accessible via StackSpaceOperand.
316  void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
317
318  // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
319  // memory (not GCed) on the stack accessible via StackSpaceOperand.
320  void EnterApiExitFrame(int arg_stack_space);
321
322  // Leave the current exit frame. Expects/provides the return value in
323  // register rax:rdx (untouched) and the pointer to the first
324  // argument in register rsi.
325  void LeaveExitFrame(bool save_doubles = false);
326
327  // Leave the current exit frame. Expects/provides the return value in
328  // register rax (untouched).
329  void LeaveApiExitFrame(bool restore_context);
330
331  // Push and pop the registers that can hold pointers.
332  void PushSafepointRegisters() { Pushad(); }
333  void PopSafepointRegisters() { Popad(); }
334  // Store the value in register src in the safepoint register stack
335  // slot for register dst.
336  void StoreToSafepointRegisterSlot(Register dst, const Immediate& imm);
337  void StoreToSafepointRegisterSlot(Register dst, Register src);
338  void LoadFromSafepointRegisterSlot(Register dst, Register src);
339
340  void InitializeRootRegister() {
341    ExternalReference roots_array_start =
342        ExternalReference::roots_array_start(isolate());
343    Move(kRootRegister, roots_array_start);
344    addp(kRootRegister, Immediate(kRootRegisterBias));
345  }
346
347  // ---------------------------------------------------------------------------
348  // JavaScript invokes
349
350  // Invoke the JavaScript function code by either calling or jumping.
351  void InvokeCode(Register code,
352                  const ParameterCount& expected,
353                  const ParameterCount& actual,
354                  InvokeFlag flag,
355                  const CallWrapper& call_wrapper);
356
357  // Invoke the JavaScript function in the given register. Changes the
358  // current context to the context in the function before invoking.
359  void InvokeFunction(Register function,
360                      const ParameterCount& actual,
361                      InvokeFlag flag,
362                      const CallWrapper& call_wrapper);
363
364  void InvokeFunction(Register function,
365                      const ParameterCount& expected,
366                      const ParameterCount& actual,
367                      InvokeFlag flag,
368                      const CallWrapper& call_wrapper);
369
370  void InvokeFunction(Handle<JSFunction> function,
371                      const ParameterCount& expected,
372                      const ParameterCount& actual,
373                      InvokeFlag flag,
374                      const CallWrapper& call_wrapper);
375
376  // Invoke specified builtin JavaScript function. Adds an entry to
377  // the unresolved list if the name does not resolve.
378  void InvokeBuiltin(Builtins::JavaScript id,
379                     InvokeFlag flag,
380                     const CallWrapper& call_wrapper = NullCallWrapper());
381
382  // Store the function for the given builtin in the target register.
383  void GetBuiltinFunction(Register target, Builtins::JavaScript id);
384
385  // Store the code object for the given builtin in the target register.
386  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
387
388
389  // ---------------------------------------------------------------------------
390  // Smi tagging, untagging and operations on tagged smis.
391
392  // Support for constant splitting.
393  bool IsUnsafeInt(const int32_t x);
394  void SafeMove(Register dst, Smi* src);
395  void SafePush(Smi* src);
396
397  void InitializeSmiConstantRegister() {
398    Move(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue),
399         Assembler::RelocInfoNone());
400  }
401
402  // Conversions between tagged smi values and non-tagged integer values.
403
404  // Tag an integer value. The result must be known to be a valid smi value.
405  // Only uses the low 32 bits of the src register. Sets the N and Z flags
406  // based on the value of the resulting smi.
407  void Integer32ToSmi(Register dst, Register src);
408
409  // Stores an integer32 value into a memory field that already holds a smi.
410  void Integer32ToSmiField(const Operand& dst, Register src);
411
412  // Adds constant to src and tags the result as a smi.
413  // Result must be a valid smi.
414  void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
415
416  // Convert smi to 32-bit integer. I.e., not sign extended into
417  // high 32 bits of destination.
418  void SmiToInteger32(Register dst, Register src);
419  void SmiToInteger32(Register dst, const Operand& src);
420
421  // Convert smi to 64-bit integer (sign extended if necessary).
422  void SmiToInteger64(Register dst, Register src);
423  void SmiToInteger64(Register dst, const Operand& src);
424
425  // Multiply a positive smi's integer value by a power of two.
426  // Provides result as 64-bit integer value.
427  void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
428                                             Register src,
429                                             int power);
430
431  // Divide a positive smi's integer value by a power of two.
432  // Provides result as 32-bit integer value.
433  void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
434                                           Register src,
435                                           int power);
436
437  // Perform the logical or of two smi values and return a smi value.
438  // If either argument is not a smi, jump to on_not_smis and retain
439  // the original values of source registers. The destination register
440  // may be changed if it's not one of the source registers.
441  void SmiOrIfSmis(Register dst,
442                   Register src1,
443                   Register src2,
444                   Label* on_not_smis,
445                   Label::Distance near_jump = Label::kFar);
446
447
448  // Simple comparison of smis.  Both sides must be known smis to use these,
449  // otherwise use Cmp.
450  void SmiCompare(Register smi1, Register smi2);
451  void SmiCompare(Register dst, Smi* src);
452  void SmiCompare(Register dst, const Operand& src);
453  void SmiCompare(const Operand& dst, Register src);
454  void SmiCompare(const Operand& dst, Smi* src);
455  // Compare the int32 in src register to the value of the smi stored at dst.
456  void SmiCompareInteger32(const Operand& dst, Register src);
457  // Sets sign and zero flags depending on value of smi in register.
458  void SmiTest(Register src);
459
460  // Functions performing a check on a known or potential smi. Returns
461  // a condition that is satisfied if the check is successful.
462
463  // Is the value a tagged smi.
464  Condition CheckSmi(Register src);
465  Condition CheckSmi(const Operand& src);
466
467  // Is the value a non-negative tagged smi.
468  Condition CheckNonNegativeSmi(Register src);
469
470  // Are both values tagged smis.
471  Condition CheckBothSmi(Register first, Register second);
472
473  // Are both values non-negative tagged smis.
474  Condition CheckBothNonNegativeSmi(Register first, Register second);
475
476  // Are either value a tagged smi.
477  Condition CheckEitherSmi(Register first,
478                           Register second,
479                           Register scratch = kScratchRegister);
480
481  // Is the value the minimum smi value (since we are using
482  // two's complement numbers, negating the value is known to yield
483  // a non-smi value).
484  Condition CheckIsMinSmi(Register src);
485
486  // Checks whether an 32-bit integer value is a valid for conversion
487  // to a smi.
488  Condition CheckInteger32ValidSmiValue(Register src);
489
490  // Checks whether an 32-bit unsigned integer value is a valid for
491  // conversion to a smi.
492  Condition CheckUInteger32ValidSmiValue(Register src);
493
494  // Check whether src is a Smi, and set dst to zero if it is a smi,
495  // and to one if it isn't.
496  void CheckSmiToIndicator(Register dst, Register src);
497  void CheckSmiToIndicator(Register dst, const Operand& src);
498
499  // Test-and-jump functions. Typically combines a check function
500  // above with a conditional jump.
501
502  // Jump if the value can be represented by a smi.
503  void JumpIfValidSmiValue(Register src, Label* on_valid,
504                           Label::Distance near_jump = Label::kFar);
505
506  // Jump if the value cannot be represented by a smi.
507  void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
508                              Label::Distance near_jump = Label::kFar);
509
510  // Jump if the unsigned integer value can be represented by a smi.
511  void JumpIfUIntValidSmiValue(Register src, Label* on_valid,
512                               Label::Distance near_jump = Label::kFar);
513
514  // Jump if the unsigned integer value cannot be represented by a smi.
515  void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
516                                  Label::Distance near_jump = Label::kFar);
517
518  // Jump to label if the value is a tagged smi.
519  void JumpIfSmi(Register src,
520                 Label* on_smi,
521                 Label::Distance near_jump = Label::kFar);
522
523  // Jump to label if the value is not a tagged smi.
524  void JumpIfNotSmi(Register src,
525                    Label* on_not_smi,
526                    Label::Distance near_jump = Label::kFar);
527
528  // Jump to label if the value is not a non-negative tagged smi.
529  void JumpUnlessNonNegativeSmi(Register src,
530                                Label* on_not_smi,
531                                Label::Distance near_jump = Label::kFar);
532
533  // Jump to label if the value, which must be a tagged smi, has value equal
534  // to the constant.
535  void JumpIfSmiEqualsConstant(Register src,
536                               Smi* constant,
537                               Label* on_equals,
538                               Label::Distance near_jump = Label::kFar);
539
540  // Jump if either or both register are not smi values.
541  void JumpIfNotBothSmi(Register src1,
542                        Register src2,
543                        Label* on_not_both_smi,
544                        Label::Distance near_jump = Label::kFar);
545
546  // Jump if either or both register are not non-negative smi values.
547  void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
548                                    Label* on_not_both_smi,
549                                    Label::Distance near_jump = Label::kFar);
550
551  // Operations on tagged smi values.
552
553  // Smis represent a subset of integers. The subset is always equivalent to
554  // a two's complement interpretation of a fixed number of bits.
555
556  // Add an integer constant to a tagged smi, giving a tagged smi as result.
557  // No overflow testing on the result is done.
558  void SmiAddConstant(Register dst, Register src, Smi* constant);
559
560  // Add an integer constant to a tagged smi, giving a tagged smi as result.
561  // No overflow testing on the result is done.
562  void SmiAddConstant(const Operand& dst, Smi* constant);
563
564  // Add an integer constant to a tagged smi, giving a tagged smi as result,
565  // or jumping to a label if the result cannot be represented by a smi.
566  void SmiAddConstant(Register dst,
567                      Register src,
568                      Smi* constant,
569                      SmiOperationExecutionMode mode,
570                      Label* bailout_label,
571                      Label::Distance near_jump = Label::kFar);
572
573  // Subtract an integer constant from a tagged smi, giving a tagged smi as
574  // result. No testing on the result is done. Sets the N and Z flags
575  // based on the value of the resulting integer.
576  void SmiSubConstant(Register dst, Register src, Smi* constant);
577
578  // Subtract an integer constant from a tagged smi, giving a tagged smi as
579  // result, or jumping to a label if the result cannot be represented by a smi.
580  void SmiSubConstant(Register dst,
581                      Register src,
582                      Smi* constant,
583                      SmiOperationExecutionMode mode,
584                      Label* bailout_label,
585                      Label::Distance near_jump = Label::kFar);
586
587  // Negating a smi can give a negative zero or too large positive value.
588  // NOTICE: This operation jumps on success, not failure!
589  void SmiNeg(Register dst,
590              Register src,
591              Label* on_smi_result,
592              Label::Distance near_jump = Label::kFar);
593
594  // Adds smi values and return the result as a smi.
595  // If dst is src1, then src1 will be destroyed if the operation is
596  // successful, otherwise kept intact.
597  void SmiAdd(Register dst,
598              Register src1,
599              Register src2,
600              Label* on_not_smi_result,
601              Label::Distance near_jump = Label::kFar);
602  void SmiAdd(Register dst,
603              Register src1,
604              const Operand& src2,
605              Label* on_not_smi_result,
606              Label::Distance near_jump = Label::kFar);
607
608  void SmiAdd(Register dst,
609              Register src1,
610              Register src2);
611
612  // Subtracts smi values and return the result as a smi.
613  // If dst is src1, then src1 will be destroyed if the operation is
614  // successful, otherwise kept intact.
615  void SmiSub(Register dst,
616              Register src1,
617              Register src2,
618              Label* on_not_smi_result,
619              Label::Distance near_jump = Label::kFar);
620  void SmiSub(Register dst,
621              Register src1,
622              const Operand& src2,
623              Label* on_not_smi_result,
624              Label::Distance near_jump = Label::kFar);
625
626  void SmiSub(Register dst,
627              Register src1,
628              Register src2);
629
630  void SmiSub(Register dst,
631              Register src1,
632              const Operand& src2);
633
634  // Multiplies smi values and return the result as a smi,
635  // if possible.
636  // If dst is src1, then src1 will be destroyed, even if
637  // the operation is unsuccessful.
638  void SmiMul(Register dst,
639              Register src1,
640              Register src2,
641              Label* on_not_smi_result,
642              Label::Distance near_jump = Label::kFar);
643
644  // Divides one smi by another and returns the quotient.
645  // Clobbers rax and rdx registers.
646  void SmiDiv(Register dst,
647              Register src1,
648              Register src2,
649              Label* on_not_smi_result,
650              Label::Distance near_jump = Label::kFar);
651
652  // Divides one smi by another and returns the remainder.
653  // Clobbers rax and rdx registers.
654  void SmiMod(Register dst,
655              Register src1,
656              Register src2,
657              Label* on_not_smi_result,
658              Label::Distance near_jump = Label::kFar);
659
660  // Bitwise operations.
661  void SmiNot(Register dst, Register src);
662  void SmiAnd(Register dst, Register src1, Register src2);
663  void SmiOr(Register dst, Register src1, Register src2);
664  void SmiXor(Register dst, Register src1, Register src2);
665  void SmiAndConstant(Register dst, Register src1, Smi* constant);
666  void SmiOrConstant(Register dst, Register src1, Smi* constant);
667  void SmiXorConstant(Register dst, Register src1, Smi* constant);
668
669  void SmiShiftLeftConstant(Register dst,
670                            Register src,
671                            int shift_value,
672                            Label* on_not_smi_result = NULL,
673                            Label::Distance near_jump = Label::kFar);
674  void SmiShiftLogicalRightConstant(Register dst,
675                                    Register src,
676                                    int shift_value,
677                                    Label* on_not_smi_result,
678                                    Label::Distance near_jump = Label::kFar);
679  void SmiShiftArithmeticRightConstant(Register dst,
680                                       Register src,
681                                       int shift_value);
682
683  // Shifts a smi value to the left, and returns the result if that is a smi.
684  // Uses and clobbers rcx, so dst may not be rcx.
685  void SmiShiftLeft(Register dst,
686                    Register src1,
687                    Register src2,
688                    Label* on_not_smi_result = NULL,
689                    Label::Distance near_jump = Label::kFar);
690  // Shifts a smi value to the right, shifting in zero bits at the top, and
691  // returns the unsigned intepretation of the result if that is a smi.
692  // Uses and clobbers rcx, so dst may not be rcx.
693  void SmiShiftLogicalRight(Register dst,
694                            Register src1,
695                            Register src2,
696                            Label* on_not_smi_result,
697                            Label::Distance near_jump = Label::kFar);
698  // Shifts a smi value to the right, sign extending the top, and
699  // returns the signed intepretation of the result. That will always
700  // be a valid smi value, since it's numerically smaller than the
701  // original.
702  // Uses and clobbers rcx, so dst may not be rcx.
703  void SmiShiftArithmeticRight(Register dst,
704                               Register src1,
705                               Register src2);
706
707  // Specialized operations
708
709  // Select the non-smi register of two registers where exactly one is a
710  // smi. If neither are smis, jump to the failure label.
711  void SelectNonSmi(Register dst,
712                    Register src1,
713                    Register src2,
714                    Label* on_not_smis,
715                    Label::Distance near_jump = Label::kFar);
716
717  // Converts, if necessary, a smi to a combination of number and
718  // multiplier to be used as a scaled index.
719  // The src register contains a *positive* smi value. The shift is the
720  // power of two to multiply the index value by (e.g.
721  // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
722  // The returned index register may be either src or dst, depending
723  // on what is most efficient. If src and dst are different registers,
724  // src is always unchanged.
725  SmiIndex SmiToIndex(Register dst, Register src, int shift);
726
727  // Converts a positive smi to a negative index.
728  SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
729
730  // Add the value of a smi in memory to an int32 register.
731  // Sets flags as a normal add.
732  void AddSmiField(Register dst, const Operand& src);
733
734  // Basic Smi operations.
735  void Move(Register dst, Smi* source) {
736    LoadSmiConstant(dst, source);
737  }
738
739  void Move(const Operand& dst, Smi* source) {
740    Register constant = GetSmiConstant(source);
741    movp(dst, constant);
742  }
743
744  void Push(Smi* smi);
745
746  // Save away a raw integer with pointer size on the stack as two integers
747  // masquerading as smis so that the garbage collector skips visiting them.
748  void PushRegisterAsTwoSmis(Register src, Register scratch = kScratchRegister);
749  // Reconstruct a raw integer with pointer size from two integers masquerading
750  // as smis on the top of stack.
751  void PopRegisterAsTwoSmis(Register dst, Register scratch = kScratchRegister);
752
753  void Test(const Operand& dst, Smi* source);
754
755
756  // ---------------------------------------------------------------------------
757  // String macros.
758
759  // Generate code to do a lookup in the number string cache. If the number in
760  // the register object is found in the cache the generated code falls through
761  // with the result in the result register. The object and the result register
762  // can be the same. If the number is not found in the cache the code jumps to
763  // the label not_found with only the content of register object unchanged.
764  void LookupNumberStringCache(Register object,
765                               Register result,
766                               Register scratch1,
767                               Register scratch2,
768                               Label* not_found);
769
770  // If object is a string, its map is loaded into object_map.
771  void JumpIfNotString(Register object,
772                       Register object_map,
773                       Label* not_string,
774                       Label::Distance near_jump = Label::kFar);
775
776
777  void JumpIfNotBothSequentialOneByteStrings(
778      Register first_object, Register second_object, Register scratch1,
779      Register scratch2, Label* on_not_both_flat_one_byte,
780      Label::Distance near_jump = Label::kFar);
781
782  // Check whether the instance type represents a flat one-byte string. Jump
783  // to the label if not. If the instance type can be scratched specify same
784  // register for both instance type and scratch.
785  void JumpIfInstanceTypeIsNotSequentialOneByte(
786      Register instance_type, Register scratch,
787      Label* on_not_flat_one_byte_string,
788      Label::Distance near_jump = Label::kFar);
789
790  void JumpIfBothInstanceTypesAreNotSequentialOneByte(
791      Register first_object_instance_type, Register second_object_instance_type,
792      Register scratch1, Register scratch2, Label* on_fail,
793      Label::Distance near_jump = Label::kFar);
794
795  void EmitSeqStringSetCharCheck(Register string,
796                                 Register index,
797                                 Register value,
798                                 uint32_t encoding_mask);
799
800  // Checks if the given register or operand is a unique name
801  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
802                                       Label::Distance distance = Label::kFar);
803  void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
804                                       Label::Distance distance = Label::kFar);
805
806  // ---------------------------------------------------------------------------
807  // Macro instructions.
808
809  // Load/store with specific representation.
810  void Load(Register dst, const Operand& src, Representation r);
811  void Store(const Operand& dst, Register src, Representation r);
812
813  // Load a register with a long value as efficiently as possible.
814  void Set(Register dst, int64_t x);
815  void Set(const Operand& dst, intptr_t x);
816
817  // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
818  // hinders register renaming and makes dependence chains longer. So we use
819  // xorps to clear the dst register before cvtsi2sd to solve this issue.
820  void Cvtlsi2sd(XMMRegister dst, Register src);
821  void Cvtlsi2sd(XMMRegister dst, const Operand& src);
822
823  // Move if the registers are not identical.
824  void Move(Register target, Register source);
825
826  // TestBit and Load SharedFunctionInfo special field.
827  void TestBitSharedFunctionInfoSpecialField(Register base,
828                                             int offset,
829                                             int bit_index);
830  void LoadSharedFunctionInfoSpecialField(Register dst,
831                                          Register base,
832                                          int offset);
833
834  // Handle support
835  void Move(Register dst, Handle<Object> source);
836  void Move(const Operand& dst, Handle<Object> source);
837  void Cmp(Register dst, Handle<Object> source);
838  void Cmp(const Operand& dst, Handle<Object> source);
839  void Cmp(Register dst, Smi* src);
840  void Cmp(const Operand& dst, Smi* src);
841  void Push(Handle<Object> source);
842
843  // Load a heap object and handle the case of new-space objects by
844  // indirecting via a global cell.
845  void MoveHeapObject(Register result, Handle<Object> object);
846
847  // Load a global cell into a register.
848  void LoadGlobalCell(Register dst, Handle<Cell> cell);
849
850  // Emit code to discard a non-negative number of pointer-sized elements
851  // from the stack, clobbering only the rsp register.
852  void Drop(int stack_elements);
853  // Emit code to discard a positive number of pointer-sized elements
854  // from the stack under the return address which remains on the top,
855  // clobbering the rsp register.
856  void DropUnderReturnAddress(int stack_elements,
857                              Register scratch = kScratchRegister);
858
859  void Call(Label* target) { call(target); }
860  void Push(Register src);
861  void Push(const Operand& src);
862  void PushQuad(const Operand& src);
863  void Push(Immediate value);
864  void PushImm32(int32_t imm32);
865  void Pop(Register dst);
866  void Pop(const Operand& dst);
867  void PopQuad(const Operand& dst);
868  void PushReturnAddressFrom(Register src) { pushq(src); }
869  void PopReturnAddressTo(Register dst) { popq(dst); }
870  void Move(Register dst, ExternalReference ext) {
871    movp(dst, reinterpret_cast<void*>(ext.address()),
872         RelocInfo::EXTERNAL_REFERENCE);
873  }
874
875  // Loads a pointer into a register with a relocation mode.
876  void Move(Register dst, void* ptr, RelocInfo::Mode rmode) {
877    // This method must not be used with heap object references. The stored
878    // address is not GC safe. Use the handle version instead.
879    DCHECK(rmode > RelocInfo::LAST_GCED_ENUM);
880    movp(dst, ptr, rmode);
881  }
882
883  void Move(Register dst, Handle<Object> value, RelocInfo::Mode rmode) {
884    AllowDeferredHandleDereference using_raw_address;
885    DCHECK(!RelocInfo::IsNone(rmode));
886    DCHECK(value->IsHeapObject());
887    DCHECK(!isolate()->heap()->InNewSpace(*value));
888    movp(dst, reinterpret_cast<void*>(value.location()), rmode);
889  }
890
891  // Control Flow
892  void Jump(Address destination, RelocInfo::Mode rmode);
893  void Jump(ExternalReference ext);
894  void Jump(const Operand& op);
895  void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
896
897  void Call(Address destination, RelocInfo::Mode rmode);
898  void Call(ExternalReference ext);
899  void Call(const Operand& op);
900  void Call(Handle<Code> code_object,
901            RelocInfo::Mode rmode,
902            TypeFeedbackId ast_id = TypeFeedbackId::None());
903
904  // The size of the code generated for different call instructions.
905  int CallSize(Address destination) {
906    return kCallSequenceLength;
907  }
908  int CallSize(ExternalReference ext);
909  int CallSize(Handle<Code> code_object) {
910    // Code calls use 32-bit relative addressing.
911    return kShortCallInstructionLength;
912  }
913  int CallSize(Register target) {
914    // Opcode: REX_opt FF /2 m64
915    return (target.high_bit() != 0) ? 3 : 2;
916  }
917  int CallSize(const Operand& target) {
918    // Opcode: REX_opt FF /2 m64
919    return (target.requires_rex() ? 2 : 1) + target.operand_size();
920  }
921
922  // Emit call to the code we are currently generating.
923  void CallSelf() {
924    Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
925    Call(self, RelocInfo::CODE_TARGET);
926  }
927
928  // Non-x64 instructions.
929  // Push/pop all general purpose registers.
930  // Does not push rsp/rbp nor any of the assembler's special purpose registers
931  // (kScratchRegister, kSmiConstantRegister, kRootRegister).
932  void Pushad();
933  void Popad();
934  // Sets the stack as after performing Popad, without actually loading the
935  // registers.
936  void Dropad();
937
938  // Compare object type for heap object.
939  // Always use unsigned comparisons: above and below, not less and greater.
940  // Incoming register is heap_object and outgoing register is map.
941  // They may be the same register, and may be kScratchRegister.
942  void CmpObjectType(Register heap_object, InstanceType type, Register map);
943
944  // Compare instance type for map.
945  // Always use unsigned comparisons: above and below, not less and greater.
946  void CmpInstanceType(Register map, InstanceType type);
947
948  // Check if a map for a JSObject indicates that the object has fast elements.
949  // Jump to the specified label if it does not.
950  void CheckFastElements(Register map,
951                         Label* fail,
952                         Label::Distance distance = Label::kFar);
953
954  // Check if a map for a JSObject indicates that the object can have both smi
955  // and HeapObject elements.  Jump to the specified label if it does not.
956  void CheckFastObjectElements(Register map,
957                               Label* fail,
958                               Label::Distance distance = Label::kFar);
959
960  // Check if a map for a JSObject indicates that the object has fast smi only
961  // elements.  Jump to the specified label if it does not.
962  void CheckFastSmiElements(Register map,
963                            Label* fail,
964                            Label::Distance distance = Label::kFar);
965
966  // Check to see if maybe_number can be stored as a double in
967  // FastDoubleElements. If it can, store it at the index specified by index in
968  // the FastDoubleElements array elements, otherwise jump to fail.  Note that
969  // index must not be smi-tagged.
970  void StoreNumberToDoubleElements(Register maybe_number,
971                                   Register elements,
972                                   Register index,
973                                   XMMRegister xmm_scratch,
974                                   Label* fail,
975                                   int elements_offset = 0);
976
977  // Compare an object's map with the specified map.
978  void CompareMap(Register obj, Handle<Map> map);
979
980  // Check if the map of an object is equal to a specified map and branch to
981  // label if not. Skip the smi check if not required (object is known to be a
982  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
983  // against maps that are ElementsKind transition maps of the specified map.
984  void CheckMap(Register obj,
985                Handle<Map> map,
986                Label* fail,
987                SmiCheckType smi_check_type);
988
989  // Check if the map of an object is equal to a specified map and branch to a
990  // specified target if equal. Skip the smi check if not required (object is
991  // known to be a heap object)
992  void DispatchMap(Register obj,
993                   Register unused,
994                   Handle<Map> map,
995                   Handle<Code> success,
996                   SmiCheckType smi_check_type);
997
998  // Check if the object in register heap_object is a string. Afterwards the
999  // register map contains the object map and the register instance_type
1000  // contains the instance_type. The registers map and instance_type can be the
1001  // same in which case it contains the instance type afterwards. Either of the
1002  // registers map and instance_type can be the same as heap_object.
1003  Condition IsObjectStringType(Register heap_object,
1004                               Register map,
1005                               Register instance_type);
1006
1007  // Check if the object in register heap_object is a name. Afterwards the
1008  // register map contains the object map and the register instance_type
1009  // contains the instance_type. The registers map and instance_type can be the
1010  // same in which case it contains the instance type afterwards. Either of the
1011  // registers map and instance_type can be the same as heap_object.
1012  Condition IsObjectNameType(Register heap_object,
1013                             Register map,
1014                             Register instance_type);
1015
1016  // FCmp compares and pops the two values on top of the FPU stack.
1017  // The flag results are similar to integer cmp, but requires unsigned
1018  // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
1019  void FCmp();
1020
1021  void ClampUint8(Register reg);
1022
1023  void ClampDoubleToUint8(XMMRegister input_reg,
1024                          XMMRegister temp_xmm_reg,
1025                          Register result_reg);
1026
1027  void SlowTruncateToI(Register result_reg, Register input_reg,
1028      int offset = HeapNumber::kValueOffset - kHeapObjectTag);
1029
1030  void TruncateHeapNumberToI(Register result_reg, Register input_reg);
1031  void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
1032
1033  void DoubleToI(Register result_reg, XMMRegister input_reg,
1034                 XMMRegister scratch, MinusZeroMode minus_zero_mode,
1035                 Label* lost_precision, Label* is_nan, Label* minus_zero,
1036                 Label::Distance dst = Label::kFar);
1037
1038  void LoadUint32(XMMRegister dst, Register src);
1039
1040  void LoadInstanceDescriptors(Register map, Register descriptors);
1041  void EnumLength(Register dst, Register map);
1042  void NumberOfOwnDescriptors(Register dst, Register map);
1043
1044  template<typename Field>
1045  void DecodeField(Register reg) {
1046    static const int shift = Field::kShift;
1047    static const int mask = Field::kMask >> Field::kShift;
1048    if (shift != 0) {
1049      shrp(reg, Immediate(shift));
1050    }
1051    andp(reg, Immediate(mask));
1052  }
1053
1054  template<typename Field>
1055  void DecodeFieldToSmi(Register reg) {
1056    if (SmiValuesAre32Bits()) {
1057      andp(reg, Immediate(Field::kMask));
1058      shlp(reg, Immediate(kSmiShift - Field::kShift));
1059    } else {
1060      static const int shift = Field::kShift;
1061      static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
1062      DCHECK(SmiValuesAre31Bits());
1063      DCHECK(kSmiShift == kSmiTagSize);
1064      DCHECK((mask & 0x80000000u) == 0);
1065      if (shift < kSmiShift) {
1066        shlp(reg, Immediate(kSmiShift - shift));
1067      } else if (shift > kSmiShift) {
1068        sarp(reg, Immediate(shift - kSmiShift));
1069      }
1070      andp(reg, Immediate(mask));
1071    }
1072  }
1073
1074  // Abort execution if argument is not a number, enabled via --debug-code.
1075  void AssertNumber(Register object);
1076
1077  // Abort execution if argument is a smi, enabled via --debug-code.
1078  void AssertNotSmi(Register object);
1079
1080  // Abort execution if argument is not a smi, enabled via --debug-code.
1081  void AssertSmi(Register object);
1082  void AssertSmi(const Operand& object);
1083
1084  // Abort execution if a 64 bit register containing a 32 bit payload does not
1085  // have zeros in the top 32 bits, enabled via --debug-code.
1086  void AssertZeroExtended(Register reg);
1087
1088  // Abort execution if argument is not a string, enabled via --debug-code.
1089  void AssertString(Register object);
1090
1091  // Abort execution if argument is not a name, enabled via --debug-code.
1092  void AssertName(Register object);
1093
1094  // Abort execution if argument is not undefined or an AllocationSite, enabled
1095  // via --debug-code.
1096  void AssertUndefinedOrAllocationSite(Register object);
1097
1098  // Abort execution if argument is not the root value with the given index,
1099  // enabled via --debug-code.
1100  void AssertRootValue(Register src,
1101                       Heap::RootListIndex root_value_index,
1102                       BailoutReason reason);
1103
1104  // ---------------------------------------------------------------------------
1105  // Exception handling
1106
1107  // Push a new try handler and link it into try handler chain.
1108  void PushTryHandler(StackHandler::Kind kind, int handler_index);
1109
1110  // Unlink the stack handler on top of the stack from the try handler chain.
1111  void PopTryHandler();
1112
1113  // Activate the top handler in the try hander chain and pass the
1114  // thrown value.
1115  void Throw(Register value);
1116
1117  // Propagate an uncatchable exception out of the current JS stack.
1118  void ThrowUncatchable(Register value);
1119
1120  // ---------------------------------------------------------------------------
1121  // Inline caching support
1122
1123  // Generate code for checking access rights - used for security checks
1124  // on access to global objects across environments. The holder register
1125  // is left untouched, but the scratch register and kScratchRegister,
1126  // which must be different, are clobbered.
1127  void CheckAccessGlobalProxy(Register holder_reg,
1128                              Register scratch,
1129                              Label* miss);
1130
1131  void GetNumberHash(Register r0, Register scratch);
1132
1133  void LoadFromNumberDictionary(Label* miss,
1134                                Register elements,
1135                                Register key,
1136                                Register r0,
1137                                Register r1,
1138                                Register r2,
1139                                Register result);
1140
1141
1142  // ---------------------------------------------------------------------------
1143  // Allocation support
1144
1145  // Allocate an object in new space or old pointer space. If the given space
1146  // is exhausted control continues at the gc_required label. The allocated
1147  // object is returned in result and end of the new object is returned in
1148  // result_end. The register scratch can be passed as no_reg in which case
1149  // an additional object reference will be added to the reloc info. The
1150  // returned pointers in result and result_end have not yet been tagged as
1151  // heap objects. If result_contains_top_on_entry is true the content of
1152  // result is known to be the allocation top on entry (could be result_end
1153  // from a previous call). If result_contains_top_on_entry is true scratch
1154  // should be no_reg as it is never used.
1155  void Allocate(int object_size,
1156                Register result,
1157                Register result_end,
1158                Register scratch,
1159                Label* gc_required,
1160                AllocationFlags flags);
1161
1162  void Allocate(int header_size,
1163                ScaleFactor element_size,
1164                Register element_count,
1165                Register result,
1166                Register result_end,
1167                Register scratch,
1168                Label* gc_required,
1169                AllocationFlags flags);
1170
1171  void Allocate(Register object_size,
1172                Register result,
1173                Register result_end,
1174                Register scratch,
1175                Label* gc_required,
1176                AllocationFlags flags);
1177
1178  // Undo allocation in new space. The object passed and objects allocated after
1179  // it will no longer be allocated. Make sure that no pointers are left to the
1180  // object(s) no longer allocated as they would be invalid when allocation is
1181  // un-done.
1182  void UndoAllocationInNewSpace(Register object);
1183
1184  // Allocate a heap number in new space with undefined value. Returns
1185  // tagged pointer in result register, or jumps to gc_required if new
1186  // space is full.
1187  void AllocateHeapNumber(Register result,
1188                          Register scratch,
1189                          Label* gc_required,
1190                          MutableMode mode = IMMUTABLE);
1191
1192  // Allocate a sequential string. All the header fields of the string object
1193  // are initialized.
1194  void AllocateTwoByteString(Register result,
1195                             Register length,
1196                             Register scratch1,
1197                             Register scratch2,
1198                             Register scratch3,
1199                             Label* gc_required);
1200  void AllocateOneByteString(Register result, Register length,
1201                             Register scratch1, Register scratch2,
1202                             Register scratch3, Label* gc_required);
1203
1204  // Allocate a raw cons string object. Only the map field of the result is
1205  // initialized.
1206  void AllocateTwoByteConsString(Register result,
1207                          Register scratch1,
1208                          Register scratch2,
1209                          Label* gc_required);
1210  void AllocateOneByteConsString(Register result, Register scratch1,
1211                                 Register scratch2, Label* gc_required);
1212
1213  // Allocate a raw sliced string object. Only the map field of the result is
1214  // initialized.
1215  void AllocateTwoByteSlicedString(Register result,
1216                            Register scratch1,
1217                            Register scratch2,
1218                            Label* gc_required);
1219  void AllocateOneByteSlicedString(Register result, Register scratch1,
1220                                   Register scratch2, Label* gc_required);
1221
1222  // ---------------------------------------------------------------------------
1223  // Support functions.
1224
1225  // Check if result is zero and op is negative.
1226  void NegativeZeroTest(Register result, Register op, Label* then_label);
1227
1228  // Check if result is zero and op is negative in code using jump targets.
1229  void NegativeZeroTest(CodeGenerator* cgen,
1230                        Register result,
1231                        Register op,
1232                        JumpTarget* then_target);
1233
1234  // Check if result is zero and any of op1 and op2 are negative.
1235  // Register scratch is destroyed, and it must be different from op2.
1236  void NegativeZeroTest(Register result, Register op1, Register op2,
1237                        Register scratch, Label* then_label);
1238
1239  // Try to get function prototype of a function and puts the value in
1240  // the result register. Checks that the function really is a
1241  // function and jumps to the miss label if the fast checks fail. The
1242  // function register will be untouched; the other register may be
1243  // clobbered.
1244  void TryGetFunctionPrototype(Register function,
1245                               Register result,
1246                               Label* miss,
1247                               bool miss_on_bound_function = false);
1248
1249  // Picks out an array index from the hash field.
1250  // Register use:
1251  //   hash - holds the index's hash. Clobbered.
1252  //   index - holds the overwritten index on exit.
1253  void IndexFromHash(Register hash, Register index);
1254
1255  // Find the function context up the context chain.
1256  void LoadContext(Register dst, int context_chain_length);
1257
1258  // Conditionally load the cached Array transitioned map of type
1259  // transitioned_kind from the native context if the map in register
1260  // map_in_out is the cached Array map in the native context of
1261  // expected_kind.
1262  void LoadTransitionedArrayMapConditional(
1263      ElementsKind expected_kind,
1264      ElementsKind transitioned_kind,
1265      Register map_in_out,
1266      Register scratch,
1267      Label* no_map_match);
1268
1269  // Load the global function with the given index.
1270  void LoadGlobalFunction(int index, Register function);
1271
1272  // Load the initial map from the global function. The registers
1273  // function and map can be the same.
1274  void LoadGlobalFunctionInitialMap(Register function, Register map);
1275
1276  // ---------------------------------------------------------------------------
1277  // Runtime calls
1278
1279  // Call a code stub.
1280  void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
1281
1282  // Tail call a code stub (jump).
1283  void TailCallStub(CodeStub* stub);
1284
1285  // Return from a code stub after popping its arguments.
1286  void StubReturn(int argc);
1287
1288  // Call a runtime routine.
1289  void CallRuntime(const Runtime::Function* f,
1290                   int num_arguments,
1291                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1292
1293  // Call a runtime function and save the value of XMM registers.
1294  void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1295    const Runtime::Function* function = Runtime::FunctionForId(id);
1296    CallRuntime(function, function->nargs, kSaveFPRegs);
1297  }
1298
1299  // Convenience function: Same as above, but takes the fid instead.
1300  void CallRuntime(Runtime::FunctionId id,
1301                   int num_arguments,
1302                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
1303    CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
1304  }
1305
1306  // Convenience function: call an external reference.
1307  void CallExternalReference(const ExternalReference& ext,
1308                             int num_arguments);
1309
1310  // Tail call of a runtime routine (jump).
1311  // Like JumpToExternalReference, but also takes care of passing the number
1312  // of parameters.
1313  void TailCallExternalReference(const ExternalReference& ext,
1314                                 int num_arguments,
1315                                 int result_size);
1316
1317  // Convenience function: tail call a runtime routine (jump).
1318  void TailCallRuntime(Runtime::FunctionId fid,
1319                       int num_arguments,
1320                       int result_size);
1321
1322  // Jump to a runtime routine.
1323  void JumpToExternalReference(const ExternalReference& ext, int result_size);
1324
1325  // Prepares stack to put arguments (aligns and so on).  WIN64 calling
1326  // convention requires to put the pointer to the return value slot into
1327  // rcx (rcx must be preserverd until CallApiFunctionAndReturn).  Saves
1328  // context (rsi).  Clobbers rax.  Allocates arg_stack_space * kPointerSize
1329  // inside the exit frame (not GCed) accessible via StackSpaceOperand.
1330  void PrepareCallApiFunction(int arg_stack_space);
1331
1332  // Calls an API function.  Allocates HandleScope, extracts returned value
1333  // from handle and propagates exceptions.  Clobbers r14, r15, rbx and
1334  // caller-save registers.  Restores context.  On return removes
1335  // stack_space * kPointerSize (GCed).
1336  void CallApiFunctionAndReturn(Register function_address,
1337                                ExternalReference thunk_ref,
1338                                Register thunk_last_arg,
1339                                int stack_space,
1340                                Operand return_value_operand,
1341                                Operand* context_restore_operand);
1342
1343  // Before calling a C-function from generated code, align arguments on stack.
1344  // After aligning the frame, arguments must be stored in rsp[0], rsp[8],
1345  // etc., not pushed. The argument count assumes all arguments are word sized.
1346  // The number of slots reserved for arguments depends on platform. On Windows
1347  // stack slots are reserved for the arguments passed in registers. On other
1348  // platforms stack slots are only reserved for the arguments actually passed
1349  // on the stack.
1350  void PrepareCallCFunction(int num_arguments);
1351
1352  // Calls a C function and cleans up the space for arguments allocated
1353  // by PrepareCallCFunction. The called function is not allowed to trigger a
1354  // garbage collection, since that might move the code and invalidate the
1355  // return address (unless this is somehow accounted for by the called
1356  // function).
1357  void CallCFunction(ExternalReference function, int num_arguments);
1358  void CallCFunction(Register function, int num_arguments);
1359
1360  // Calculate the number of stack slots to reserve for arguments when calling a
1361  // C function.
1362  int ArgumentStackSlotsForCFunctionCall(int num_arguments);
1363
1364  // ---------------------------------------------------------------------------
1365  // Utilities
1366
1367  void Ret();
1368
1369  // Return and drop arguments from stack, where the number of arguments
1370  // may be bigger than 2^16 - 1.  Requires a scratch register.
1371  void Ret(int bytes_dropped, Register scratch);
1372
1373  Handle<Object> CodeObject() {
1374    DCHECK(!code_object_.is_null());
1375    return code_object_;
1376  }
1377
1378  // Copy length bytes from source to destination.
1379  // Uses scratch register internally (if you have a low-eight register
1380  // free, do use it, otherwise kScratchRegister will be used).
1381  // The min_length is a minimum limit on the value that length will have.
1382  // The algorithm has some special cases that might be omitted if the string
1383  // is known to always be long.
1384  void CopyBytes(Register destination,
1385                 Register source,
1386                 Register length,
1387                 int min_length = 0,
1388                 Register scratch = kScratchRegister);
1389
1390  // Initialize fields with filler values.  Fields starting at |start_offset|
1391  // not including end_offset are overwritten with the value in |filler|.  At
1392  // the end the loop, |start_offset| takes the value of |end_offset|.
1393  void InitializeFieldsWithFiller(Register start_offset,
1394                                  Register end_offset,
1395                                  Register filler);
1396
1397
1398  // Emit code for a truncating division by a constant. The dividend register is
1399  // unchanged, the result is in rdx, and rax gets clobbered.
1400  void TruncatingDiv(Register dividend, int32_t divisor);
1401
1402  // ---------------------------------------------------------------------------
1403  // StatsCounter support
1404
1405  void SetCounter(StatsCounter* counter, int value);
1406  void IncrementCounter(StatsCounter* counter, int value);
1407  void DecrementCounter(StatsCounter* counter, int value);
1408
1409
1410  // ---------------------------------------------------------------------------
1411  // Debugging
1412
1413  // Calls Abort(msg) if the condition cc is not satisfied.
1414  // Use --debug_code to enable.
1415  void Assert(Condition cc, BailoutReason reason);
1416
1417  void AssertFastElements(Register elements);
1418
1419  // Like Assert(), but always enabled.
1420  void Check(Condition cc, BailoutReason reason);
1421
1422  // Print a message to stdout and abort execution.
1423  void Abort(BailoutReason msg);
1424
1425  // Check that the stack is aligned.
1426  void CheckStackAlignment();
1427
1428  // Verify restrictions about code generated in stubs.
1429  void set_generating_stub(bool value) { generating_stub_ = value; }
1430  bool generating_stub() { return generating_stub_; }
1431  void set_has_frame(bool value) { has_frame_ = value; }
1432  bool has_frame() { return has_frame_; }
1433  inline bool AllowThisStubCall(CodeStub* stub);
1434
1435  static int SafepointRegisterStackIndex(Register reg) {
1436    return SafepointRegisterStackIndex(reg.code());
1437  }
1438
1439  // Activation support.
1440  void EnterFrame(StackFrame::Type type);
1441  void LeaveFrame(StackFrame::Type type);
1442
1443  // Expects object in rax and returns map with validated enum cache
1444  // in rax.  Assumes that any other register can be used as a scratch.
1445  void CheckEnumCache(Register null_value,
1446                      Label* call_runtime);
1447
1448  // AllocationMemento support. Arrays may have an associated
1449  // AllocationMemento object that can be checked for in order to pretransition
1450  // to another type.
1451  // On entry, receiver_reg should point to the array object.
1452  // scratch_reg gets clobbered.
1453  // If allocation info is present, condition flags are set to equal.
1454  void TestJSArrayForAllocationMemento(Register receiver_reg,
1455                                       Register scratch_reg,
1456                                       Label* no_memento_found);
1457
1458  void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1459                                         Register scratch_reg,
1460                                         Label* memento_found) {
1461    Label no_memento_found;
1462    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1463                                    &no_memento_found);
1464    j(equal, memento_found);
1465    bind(&no_memento_found);
1466  }
1467
1468  // Jumps to found label if a prototype map has dictionary elements.
1469  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1470                                        Register scratch1, Label* found);
1471
1472 private:
1473  // Order general registers are pushed by Pushad.
1474  // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
1475  static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
1476  static const int kNumSafepointSavedRegisters = 11;
1477  static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1478
1479  bool generating_stub_;
1480  bool has_frame_;
1481  bool root_array_available_;
1482
1483  // Returns a register holding the smi value. The register MUST NOT be
1484  // modified. It may be the "smi 1 constant" register.
1485  Register GetSmiConstant(Smi* value);
1486
1487  int64_t RootRegisterDelta(ExternalReference other);
1488
1489  // Moves the smi value to the destination register.
1490  void LoadSmiConstant(Register dst, Smi* value);
1491
1492  // This handle will be patched with the code object on installation.
1493  Handle<Object> code_object_;
1494
1495  // Helper functions for generating invokes.
1496  void InvokePrologue(const ParameterCount& expected,
1497                      const ParameterCount& actual,
1498                      Handle<Code> code_constant,
1499                      Register code_register,
1500                      Label* done,
1501                      bool* definitely_mismatches,
1502                      InvokeFlag flag,
1503                      Label::Distance near_jump = Label::kFar,
1504                      const CallWrapper& call_wrapper = NullCallWrapper());
1505
1506  void EnterExitFramePrologue(bool save_rax);
1507
1508  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
1509  // accessible via StackSpaceOperand.
1510  void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
1511
1512  void LeaveExitFrameEpilogue(bool restore_context);
1513
1514  // Allocation support helpers.
1515  // Loads the top of new-space into the result register.
1516  // Otherwise the address of the new-space top is loaded into scratch (if
1517  // scratch is valid), and the new-space top is loaded into result.
1518  void LoadAllocationTopHelper(Register result,
1519                               Register scratch,
1520                               AllocationFlags flags);
1521
1522  void MakeSureDoubleAlignedHelper(Register result,
1523                                   Register scratch,
1524                                   Label* gc_required,
1525                                   AllocationFlags flags);
1526
1527  // Update allocation top with value in result_end register.
1528  // If scratch is valid, it contains the address of the allocation top.
1529  void UpdateAllocationTopHelper(Register result_end,
1530                                 Register scratch,
1531                                 AllocationFlags flags);
1532
1533  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1534  void InNewSpace(Register object,
1535                  Register scratch,
1536                  Condition cc,
1537                  Label* branch,
1538                  Label::Distance distance = Label::kFar);
1539
1540  // Helper for finding the mark bits for an address.  Afterwards, the
1541  // bitmap register points at the word with the mark bits and the mask
1542  // the position of the first bit.  Uses rcx as scratch and leaves addr_reg
1543  // unchanged.
1544  inline void GetMarkBits(Register addr_reg,
1545                          Register bitmap_reg,
1546                          Register mask_reg);
1547
1548  // Helper for throwing exceptions.  Compute a handler address and jump to
1549  // it.  See the implementation for register usage.
1550  void JumpToHandlerEntry();
1551
1552  // Compute memory operands for safepoint stack slots.
1553  Operand SafepointRegisterSlot(Register reg);
1554  static int SafepointRegisterStackIndex(int reg_code) {
1555    return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
1556  }
1557
1558  // Needs access to SafepointRegisterStackIndex for compiled frame
1559  // traversal.
1560  friend class StandardFrame;
1561};
1562
1563
1564// The code patcher is used to patch (typically) small parts of code e.g. for
1565// debugging and other types of instrumentation. When using the code patcher
1566// the exact number of bytes specified must be emitted. Is not legal to emit
1567// relocation information. If any of these constraints are violated it causes
1568// an assertion.
1569class CodePatcher {
1570 public:
1571  CodePatcher(byte* address, int size);
1572  virtual ~CodePatcher();
1573
1574  // Macro assembler to emit code.
1575  MacroAssembler* masm() { return &masm_; }
1576
1577 private:
1578  byte* address_;  // The address of the code being patched.
1579  int size_;  // Number of bytes of the expected patch size.
1580  MacroAssembler masm_;  // Macro assembler used to generate the code.
1581};
1582
1583
1584// -----------------------------------------------------------------------------
1585// Static helper functions.
1586
1587// Generate an Operand for loading a field from an object.
1588inline Operand FieldOperand(Register object, int offset) {
1589  return Operand(object, offset - kHeapObjectTag);
1590}
1591
1592
1593// Generate an Operand for loading an indexed field from an object.
1594inline Operand FieldOperand(Register object,
1595                            Register index,
1596                            ScaleFactor scale,
1597                            int offset) {
1598  return Operand(object, index, scale, offset - kHeapObjectTag);
1599}
1600
1601
1602inline Operand ContextOperand(Register context, int index) {
1603  return Operand(context, Context::SlotOffset(index));
1604}
1605
1606
1607inline Operand GlobalObjectOperand() {
1608  return ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX);
1609}
1610
1611
1612// Provides access to exit frame stack space (not GCed).
1613inline Operand StackSpaceOperand(int index) {
1614#ifdef _WIN64
1615  const int kShaddowSpace = 4;
1616  return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
1617#else
1618  return Operand(rsp, index * kPointerSize);
1619#endif
1620}
1621
1622
1623inline Operand StackOperandForReturnAddress(int32_t disp) {
1624  return Operand(rsp, disp);
1625}
1626
1627
1628#ifdef GENERATED_CODE_COVERAGE
1629extern void LogGeneratedCodeCoverage(const char* file_line);
1630#define CODE_COVERAGE_STRINGIFY(x) #x
1631#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1632#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1633#define ACCESS_MASM(masm) {                                                  \
1634    Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
1635    masm->pushfq();                                                          \
1636    masm->Pushad();                                                          \
1637    masm->Push(Immediate(reinterpret_cast<int>(&__FILE_LINE__)));            \
1638    masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE);        \
1639    masm->Pop(rax);                                                          \
1640    masm->Popad();                                                           \
1641    masm->popfq();                                                           \
1642  }                                                                          \
1643  masm->
1644#else
1645#define ACCESS_MASM(masm) masm->
1646#endif
1647
1648} }  // namespace v8::internal
1649
1650#endif  // V8_X64_MACRO_ASSEMBLER_X64_H_
1651