macro-assembler-x64.h revision 3fb3ca8c7ca439d408449a395897395c0faae8d1
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
29#define V8_X64_MACRO_ASSEMBLER_X64_H_
30
31#include "assembler.h"
32#include "v8globals.h"
33
34namespace v8 {
35namespace internal {
36
37// Flags used for the AllocateInNewSpace functions.
38enum AllocationFlags {
39  // No special flags.
40  NO_ALLOCATION_FLAGS = 0,
41  // Return the pointer to the allocated already tagged as a heap object.
42  TAG_OBJECT = 1 << 0,
43  // The content of the result register already contains the allocation top in
44  // new space.
45  RESULT_CONTAINS_TOP = 1 << 1
46};
47
48
49// Default scratch register used by MacroAssembler (and other code that needs
50// a spare register). The register isn't callee save, and not used by the
51// function calling convention.
52static const Register kScratchRegister = { 10 };      // r10.
53static const Register kSmiConstantRegister = { 12 };  // r12 (callee save).
54static const Register kRootRegister = { 13 };         // r13 (callee save).
55// Value of smi in kSmiConstantRegister.
56static const int kSmiConstantRegisterValue = 1;
57// Actual value of root register is offset from the root array's start
58// to take advantage of negitive 8-bit displacement values.
59static const int kRootRegisterBias = 128;
60
61// Convenience for platform-independent signatures.
62typedef Operand MemOperand;
63
64// Forward declaration.
65class JumpTarget;
66
67struct SmiIndex {
68  SmiIndex(Register index_register, ScaleFactor scale)
69      : reg(index_register),
70        scale(scale) {}
71  Register reg;
72  ScaleFactor scale;
73};
74
75// MacroAssembler implements a collection of frequently used macros.
76class MacroAssembler: public Assembler {
77 public:
78  // The isolate parameter can be NULL if the macro assembler should
79  // not use isolate-dependent functionality. In this case, it's the
80  // responsibility of the caller to never invoke such function on the
81  // macro assembler.
82  MacroAssembler(Isolate* isolate, void* buffer, int size);
83
84  // Prevent the use of the RootArray during the lifetime of this
85  // scope object.
86  class NoRootArrayScope BASE_EMBEDDED {
87   public:
88    explicit NoRootArrayScope(MacroAssembler* assembler)
89        : variable_(&assembler->root_array_available_),
90          old_value_(assembler->root_array_available_) {
91      assembler->root_array_available_ = false;
92    }
93    ~NoRootArrayScope() {
94      *variable_ = old_value_;
95    }
96   private:
97    bool* variable_;
98    bool old_value_;
99  };
100
101  // Operand pointing to an external reference.
102  // May emit code to set up the scratch register. The operand is
103  // only guaranteed to be correct as long as the scratch register
104  // isn't changed.
105  // If the operand is used more than once, use a scratch register
106  // that is guaranteed not to be clobbered.
107  Operand ExternalOperand(ExternalReference reference,
108                          Register scratch = kScratchRegister);
109  // Loads and stores the value of an external reference.
110  // Special case code for load and store to take advantage of
111  // load_rax/store_rax if possible/necessary.
112  // For other operations, just use:
113  //   Operand operand = ExternalOperand(extref);
114  //   operation(operand, ..);
115  void Load(Register destination, ExternalReference source);
116  void Store(ExternalReference destination, Register source);
117  // Loads the address of the external reference into the destination
118  // register.
119  void LoadAddress(Register destination, ExternalReference source);
120  // Returns the size of the code generated by LoadAddress.
121  // Used by CallSize(ExternalReference) to find the size of a call.
122  int LoadAddressSize(ExternalReference source);
123
124  // Operations on roots in the root-array.
125  void LoadRoot(Register destination, Heap::RootListIndex index);
126  void StoreRoot(Register source, Heap::RootListIndex index);
127  // Load a root value where the index (or part of it) is variable.
128  // The variable_offset register is added to the fixed_offset value
129  // to get the index into the root-array.
130  void LoadRootIndexed(Register destination,
131                       Register variable_offset,
132                       int fixed_offset);
133  void CompareRoot(Register with, Heap::RootListIndex index);
134  void CompareRoot(const Operand& with, Heap::RootListIndex index);
135  void PushRoot(Heap::RootListIndex index);
136
137  // ---------------------------------------------------------------------------
138  // GC Support
139
140  // For page containing |object| mark region covering |addr| dirty.
141  // RecordWriteHelper only works if the object is not in new
142  // space.
143  void RecordWriteHelper(Register object,
144                         Register addr,
145                         Register scratch);
146
147  // Check if object is in new space. The condition cc can be equal or
148  // not_equal. If it is equal a jump will be done if the object is on new
149  // space. The register scratch can be object itself, but it will be clobbered.
150  void InNewSpace(Register object,
151                  Register scratch,
152                  Condition cc,
153                  Label* branch,
154                  Label::Distance near_jump = Label::kFar);
155
156  // For page containing |object| mark region covering [object+offset]
157  // dirty. |object| is the object being stored into, |value| is the
158  // object being stored. If |offset| is zero, then the |scratch|
159  // register contains the array index into the elements array
160  // represented as an untagged 32-bit integer. All registers are
161  // clobbered by the operation. RecordWrite filters out smis so it
162  // does not update the write barrier if the value is a smi.
163  void RecordWrite(Register object,
164                   int offset,
165                   Register value,
166                   Register scratch);
167
168  // For page containing |object| mark region covering [address]
169  // dirty. |object| is the object being stored into, |value| is the
170  // object being stored. All registers are clobbered by the
171  // operation.  RecordWrite filters out smis so it does not update
172  // the write barrier if the value is a smi.
173  void RecordWrite(Register object,
174                   Register address,
175                   Register value);
176
177  // For page containing |object| mark region covering [object+offset] dirty.
178  // The value is known to not be a smi.
179  // object is the object being stored into, value is the object being stored.
180  // If offset is zero, then the scratch register contains the array index into
181  // the elements array represented as an untagged 32-bit integer.
182  // All registers are clobbered by the operation.
183  void RecordWriteNonSmi(Register object,
184                         int offset,
185                         Register value,
186                         Register scratch);
187
188#ifdef ENABLE_DEBUGGER_SUPPORT
189  // ---------------------------------------------------------------------------
190  // Debugger Support
191
192  void DebugBreak();
193#endif
194
195  // ---------------------------------------------------------------------------
196  // Activation frames
197
198  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
199  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
200
201  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
202  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
203
204  // Enter specific kind of exit frame; either in normal or
205  // debug mode. Expects the number of arguments in register rax and
206  // sets up the number of arguments in register rdi and the pointer
207  // to the first argument in register rsi.
208  //
209  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
210  // accessible via StackSpaceOperand.
211  void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
212
213  // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
214  // memory (not GCed) on the stack accessible via StackSpaceOperand.
215  void EnterApiExitFrame(int arg_stack_space);
216
217  // Leave the current exit frame. Expects/provides the return value in
218  // register rax:rdx (untouched) and the pointer to the first
219  // argument in register rsi.
220  void LeaveExitFrame(bool save_doubles = false);
221
222  // Leave the current exit frame. Expects/provides the return value in
223  // register rax (untouched).
224  void LeaveApiExitFrame();
225
226  // Push and pop the registers that can hold pointers.
227  void PushSafepointRegisters() { Pushad(); }
228  void PopSafepointRegisters() { Popad(); }
229  // Store the value in register src in the safepoint register stack
230  // slot for register dst.
231  void StoreToSafepointRegisterSlot(Register dst, Register src);
232  void LoadFromSafepointRegisterSlot(Register dst, Register src);
233
234  void InitializeRootRegister() {
235    ExternalReference roots_address =
236        ExternalReference::roots_address(isolate());
237    movq(kRootRegister, roots_address);
238    addq(kRootRegister, Immediate(kRootRegisterBias));
239  }
240
241  // ---------------------------------------------------------------------------
242  // JavaScript invokes
243
244  // Setup call kind marking in rcx. The method takes rcx as an
245  // explicit first parameter to make the code more readable at the
246  // call sites.
247  void SetCallKind(Register dst, CallKind kind);
248
249  // Invoke the JavaScript function code by either calling or jumping.
250  void InvokeCode(Register code,
251                  const ParameterCount& expected,
252                  const ParameterCount& actual,
253                  InvokeFlag flag,
254                  const CallWrapper& call_wrapper,
255                  CallKind call_kind);
256
257  void InvokeCode(Handle<Code> code,
258                  const ParameterCount& expected,
259                  const ParameterCount& actual,
260                  RelocInfo::Mode rmode,
261                  InvokeFlag flag,
262                  const CallWrapper& call_wrapper,
263                  CallKind call_kind);
264
265  // Invoke the JavaScript function in the given register. Changes the
266  // current context to the context in the function before invoking.
267  void InvokeFunction(Register function,
268                      const ParameterCount& actual,
269                      InvokeFlag flag,
270                      const CallWrapper& call_wrapper,
271                      CallKind call_kind);
272
273  void InvokeFunction(JSFunction* function,
274                      const ParameterCount& actual,
275                      InvokeFlag flag,
276                      const CallWrapper& call_wrapper,
277                      CallKind call_kind);
278
279  // Invoke specified builtin JavaScript function. Adds an entry to
280  // the unresolved list if the name does not resolve.
281  void InvokeBuiltin(Builtins::JavaScript id,
282                     InvokeFlag flag,
283                     const CallWrapper& call_wrapper = NullCallWrapper());
284
285  // Store the function for the given builtin in the target register.
286  void GetBuiltinFunction(Register target, Builtins::JavaScript id);
287
288  // Store the code object for the given builtin in the target register.
289  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
290
291
292  // ---------------------------------------------------------------------------
293  // Smi tagging, untagging and operations on tagged smis.
294
295  void InitializeSmiConstantRegister() {
296    movq(kSmiConstantRegister,
297         reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
298         RelocInfo::NONE);
299  }
300
301  // Conversions between tagged smi values and non-tagged integer values.
302
303  // Tag an integer value. The result must be known to be a valid smi value.
304  // Only uses the low 32 bits of the src register. Sets the N and Z flags
305  // based on the value of the resulting smi.
306  void Integer32ToSmi(Register dst, Register src);
307
308  // Stores an integer32 value into a memory field that already holds a smi.
309  void Integer32ToSmiField(const Operand& dst, Register src);
310
311  // Adds constant to src and tags the result as a smi.
312  // Result must be a valid smi.
313  void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
314
315  // Convert smi to 32-bit integer. I.e., not sign extended into
316  // high 32 bits of destination.
317  void SmiToInteger32(Register dst, Register src);
318  void SmiToInteger32(Register dst, const Operand& src);
319
320  // Convert smi to 64-bit integer (sign extended if necessary).
321  void SmiToInteger64(Register dst, Register src);
322  void SmiToInteger64(Register dst, const Operand& src);
323
324  // Multiply a positive smi's integer value by a power of two.
325  // Provides result as 64-bit integer value.
326  void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
327                                             Register src,
328                                             int power);
329
330  // Divide a positive smi's integer value by a power of two.
331  // Provides result as 32-bit integer value.
332  void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
333                                           Register src,
334                                           int power);
335
336  // Perform the logical or of two smi values and return a smi value.
337  // If either argument is not a smi, jump to on_not_smis and retain
338  // the original values of source registers. The destination register
339  // may be changed if it's not one of the source registers.
340  void SmiOrIfSmis(Register dst,
341                   Register src1,
342                   Register src2,
343                   Label* on_not_smis,
344                   Label::Distance near_jump = Label::kFar);
345
346
347  // Simple comparison of smis.  Both sides must be known smis to use these,
348  // otherwise use Cmp.
349  void SmiCompare(Register smi1, Register smi2);
350  void SmiCompare(Register dst, Smi* src);
351  void SmiCompare(Register dst, const Operand& src);
352  void SmiCompare(const Operand& dst, Register src);
353  void SmiCompare(const Operand& dst, Smi* src);
354  // Compare the int32 in src register to the value of the smi stored at dst.
355  void SmiCompareInteger32(const Operand& dst, Register src);
356  // Sets sign and zero flags depending on value of smi in register.
357  void SmiTest(Register src);
358
359  // Functions performing a check on a known or potential smi. Returns
360  // a condition that is satisfied if the check is successful.
361
362  // Is the value a tagged smi.
363  Condition CheckSmi(Register src);
364  Condition CheckSmi(const Operand& src);
365
366  // Is the value a non-negative tagged smi.
367  Condition CheckNonNegativeSmi(Register src);
368
369  // Are both values tagged smis.
370  Condition CheckBothSmi(Register first, Register second);
371
372  // Are both values non-negative tagged smis.
373  Condition CheckBothNonNegativeSmi(Register first, Register second);
374
375  // Are either value a tagged smi.
376  Condition CheckEitherSmi(Register first,
377                           Register second,
378                           Register scratch = kScratchRegister);
379
380  // Is the value the minimum smi value (since we are using
381  // two's complement numbers, negating the value is known to yield
382  // a non-smi value).
383  Condition CheckIsMinSmi(Register src);
384
385  // Checks whether an 32-bit integer value is a valid for conversion
386  // to a smi.
387  Condition CheckInteger32ValidSmiValue(Register src);
388
389  // Checks whether an 32-bit unsigned integer value is a valid for
390  // conversion to a smi.
391  Condition CheckUInteger32ValidSmiValue(Register src);
392
393  // Check whether src is a Smi, and set dst to zero if it is a smi,
394  // and to one if it isn't.
395  void CheckSmiToIndicator(Register dst, Register src);
396  void CheckSmiToIndicator(Register dst, const Operand& src);
397
398  // Test-and-jump functions. Typically combines a check function
399  // above with a conditional jump.
400
401  // Jump if the value cannot be represented by a smi.
402  void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
403                              Label::Distance near_jump = Label::kFar);
404
405  // Jump if the unsigned integer value cannot be represented by a smi.
406  void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
407                                  Label::Distance near_jump = Label::kFar);
408
409  // Jump to label if the value is a tagged smi.
410  void JumpIfSmi(Register src,
411                 Label* on_smi,
412                 Label::Distance near_jump = Label::kFar);
413
414  // Jump to label if the value is not a tagged smi.
415  void JumpIfNotSmi(Register src,
416                    Label* on_not_smi,
417                    Label::Distance near_jump = Label::kFar);
418
419  // Jump to label if the value is not a non-negative tagged smi.
420  void JumpUnlessNonNegativeSmi(Register src,
421                                Label* on_not_smi,
422                                Label::Distance near_jump = Label::kFar);
423
424  // Jump to label if the value, which must be a tagged smi, has value equal
425  // to the constant.
426  void JumpIfSmiEqualsConstant(Register src,
427                               Smi* constant,
428                               Label* on_equals,
429                               Label::Distance near_jump = Label::kFar);
430
431  // Jump if either or both register are not smi values.
432  void JumpIfNotBothSmi(Register src1,
433                        Register src2,
434                        Label* on_not_both_smi,
435                        Label::Distance near_jump = Label::kFar);
436
437  // Jump if either or both register are not non-negative smi values.
438  void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
439                                    Label* on_not_both_smi,
440                                    Label::Distance near_jump = Label::kFar);
441
442  // Operations on tagged smi values.
443
444  // Smis represent a subset of integers. The subset is always equivalent to
445  // a two's complement interpretation of a fixed number of bits.
446
447  // Optimistically adds an integer constant to a supposed smi.
448  // If the src is not a smi, or the result is not a smi, jump to
449  // the label.
450  void SmiTryAddConstant(Register dst,
451                         Register src,
452                         Smi* constant,
453                         Label* on_not_smi_result,
454                         Label::Distance near_jump = Label::kFar);
455
456  // Add an integer constant to a tagged smi, giving a tagged smi as result.
457  // No overflow testing on the result is done.
458  void SmiAddConstant(Register dst, Register src, Smi* constant);
459
460  // Add an integer constant to a tagged smi, giving a tagged smi as result.
461  // No overflow testing on the result is done.
462  void SmiAddConstant(const Operand& dst, Smi* constant);
463
464  // Add an integer constant to a tagged smi, giving a tagged smi as result,
465  // or jumping to a label if the result cannot be represented by a smi.
466  void SmiAddConstant(Register dst,
467                      Register src,
468                      Smi* constant,
469                      Label* on_not_smi_result,
470                      Label::Distance near_jump = Label::kFar);
471
472  // Subtract an integer constant from a tagged smi, giving a tagged smi as
473  // result. No testing on the result is done. Sets the N and Z flags
474  // based on the value of the resulting integer.
475  void SmiSubConstant(Register dst, Register src, Smi* constant);
476
477  // Subtract an integer constant from a tagged smi, giving a tagged smi as
478  // result, or jumping to a label if the result cannot be represented by a smi.
479  void SmiSubConstant(Register dst,
480                      Register src,
481                      Smi* constant,
482                      Label* on_not_smi_result,
483                      Label::Distance near_jump = Label::kFar);
484
485  // Negating a smi can give a negative zero or too large positive value.
486  // NOTICE: This operation jumps on success, not failure!
487  void SmiNeg(Register dst,
488              Register src,
489              Label* on_smi_result,
490              Label::Distance near_jump = Label::kFar);
491
492  // Adds smi values and return the result as a smi.
493  // If dst is src1, then src1 will be destroyed, even if
494  // the operation is unsuccessful.
495  void SmiAdd(Register dst,
496              Register src1,
497              Register src2,
498              Label* on_not_smi_result,
499              Label::Distance near_jump = Label::kFar);
500  void SmiAdd(Register dst,
501              Register src1,
502              const Operand& src2,
503              Label* on_not_smi_result,
504              Label::Distance near_jump = Label::kFar);
505
506  void SmiAdd(Register dst,
507              Register src1,
508              Register src2);
509
510  // Subtracts smi values and return the result as a smi.
511  // If dst is src1, then src1 will be destroyed, even if
512  // the operation is unsuccessful.
513  void SmiSub(Register dst,
514              Register src1,
515              Register src2,
516              Label* on_not_smi_result,
517              Label::Distance near_jump = Label::kFar);
518
519  void SmiSub(Register dst,
520              Register src1,
521              Register src2);
522
523  void SmiSub(Register dst,
524              Register src1,
525              const Operand& src2,
526              Label* on_not_smi_result,
527              Label::Distance near_jump = Label::kFar);
528
529  void SmiSub(Register dst,
530              Register src1,
531              const Operand& src2);
532
533  // Multiplies smi values and return the result as a smi,
534  // if possible.
535  // If dst is src1, then src1 will be destroyed, even if
536  // the operation is unsuccessful.
537  void SmiMul(Register dst,
538              Register src1,
539              Register src2,
540              Label* on_not_smi_result,
541              Label::Distance near_jump = Label::kFar);
542
543  // Divides one smi by another and returns the quotient.
544  // Clobbers rax and rdx registers.
545  void SmiDiv(Register dst,
546              Register src1,
547              Register src2,
548              Label* on_not_smi_result,
549              Label::Distance near_jump = Label::kFar);
550
551  // Divides one smi by another and returns the remainder.
552  // Clobbers rax and rdx registers.
553  void SmiMod(Register dst,
554              Register src1,
555              Register src2,
556              Label* on_not_smi_result,
557              Label::Distance near_jump = Label::kFar);
558
559  // Bitwise operations.
560  void SmiNot(Register dst, Register src);
561  void SmiAnd(Register dst, Register src1, Register src2);
562  void SmiOr(Register dst, Register src1, Register src2);
563  void SmiXor(Register dst, Register src1, Register src2);
564  void SmiAndConstant(Register dst, Register src1, Smi* constant);
565  void SmiOrConstant(Register dst, Register src1, Smi* constant);
566  void SmiXorConstant(Register dst, Register src1, Smi* constant);
567
568  void SmiShiftLeftConstant(Register dst,
569                            Register src,
570                            int shift_value);
571  void SmiShiftLogicalRightConstant(Register dst,
572                                  Register src,
573                                  int shift_value,
574                                  Label* on_not_smi_result,
575                                  Label::Distance near_jump = Label::kFar);
576  void SmiShiftArithmeticRightConstant(Register dst,
577                                       Register src,
578                                       int shift_value);
579
580  // Shifts a smi value to the left, and returns the result if that is a smi.
581  // Uses and clobbers rcx, so dst may not be rcx.
582  void SmiShiftLeft(Register dst,
583                    Register src1,
584                    Register src2);
585  // Shifts a smi value to the right, shifting in zero bits at the top, and
586  // returns the unsigned intepretation of the result if that is a smi.
587  // Uses and clobbers rcx, so dst may not be rcx.
588  void SmiShiftLogicalRight(Register dst,
589                            Register src1,
590                            Register src2,
591                            Label* on_not_smi_result,
592                            Label::Distance near_jump = Label::kFar);
593  // Shifts a smi value to the right, sign extending the top, and
594  // returns the signed intepretation of the result. That will always
595  // be a valid smi value, since it's numerically smaller than the
596  // original.
597  // Uses and clobbers rcx, so dst may not be rcx.
598  void SmiShiftArithmeticRight(Register dst,
599                               Register src1,
600                               Register src2);
601
602  // Specialized operations
603
604  // Select the non-smi register of two registers where exactly one is a
605  // smi. If neither are smis, jump to the failure label.
606  void SelectNonSmi(Register dst,
607                    Register src1,
608                    Register src2,
609                    Label* on_not_smis,
610                    Label::Distance near_jump = Label::kFar);
611
612  // Converts, if necessary, a smi to a combination of number and
613  // multiplier to be used as a scaled index.
614  // The src register contains a *positive* smi value. The shift is the
615  // power of two to multiply the index value by (e.g.
616  // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
617  // The returned index register may be either src or dst, depending
618  // on what is most efficient. If src and dst are different registers,
619  // src is always unchanged.
620  SmiIndex SmiToIndex(Register dst, Register src, int shift);
621
622  // Converts a positive smi to a negative index.
623  SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
624
625  // Add the value of a smi in memory to an int32 register.
626  // Sets flags as a normal add.
627  void AddSmiField(Register dst, const Operand& src);
628
629  // Basic Smi operations.
630  void Move(Register dst, Smi* source) {
631    LoadSmiConstant(dst, source);
632  }
633
634  void Move(const Operand& dst, Smi* source) {
635    Register constant = GetSmiConstant(source);
636    movq(dst, constant);
637  }
638
639  void Push(Smi* smi);
640  void Test(const Operand& dst, Smi* source);
641
642  // ---------------------------------------------------------------------------
643  // String macros.
644
645  // If object is a string, its map is loaded into object_map.
646  void JumpIfNotString(Register object,
647                       Register object_map,
648                       Label* not_string,
649                       Label::Distance near_jump = Label::kFar);
650
651
652  void JumpIfNotBothSequentialAsciiStrings(
653      Register first_object,
654      Register second_object,
655      Register scratch1,
656      Register scratch2,
657      Label* on_not_both_flat_ascii,
658      Label::Distance near_jump = Label::kFar);
659
660  // Check whether the instance type represents a flat ascii string. Jump to the
661  // label if not. If the instance type can be scratched specify same register
662  // for both instance type and scratch.
663  void JumpIfInstanceTypeIsNotSequentialAscii(
664      Register instance_type,
665      Register scratch,
666      Label*on_not_flat_ascii_string,
667      Label::Distance near_jump = Label::kFar);
668
669  void JumpIfBothInstanceTypesAreNotSequentialAscii(
670      Register first_object_instance_type,
671      Register second_object_instance_type,
672      Register scratch1,
673      Register scratch2,
674      Label* on_fail,
675      Label::Distance near_jump = Label::kFar);
676
677  // ---------------------------------------------------------------------------
678  // Macro instructions.
679
680  // Load a register with a long value as efficiently as possible.
681  void Set(Register dst, int64_t x);
682  void Set(const Operand& dst, int64_t x);
683
684  // Move if the registers are not identical.
685  void Move(Register target, Register source);
686
687  // Handle support
688  void Move(Register dst, Handle<Object> source);
689  void Move(const Operand& dst, Handle<Object> source);
690  void Cmp(Register dst, Handle<Object> source);
691  void Cmp(const Operand& dst, Handle<Object> source);
692  void Cmp(Register dst, Smi* src);
693  void Cmp(const Operand& dst, Smi* src);
694  void Push(Handle<Object> source);
695
696  // Emit code to discard a non-negative number of pointer-sized elements
697  // from the stack, clobbering only the rsp register.
698  void Drop(int stack_elements);
699
700  void Call(Label* target) { call(target); }
701
702  // Control Flow
703  void Jump(Address destination, RelocInfo::Mode rmode);
704  void Jump(ExternalReference ext);
705  void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
706
707  void Call(Address destination, RelocInfo::Mode rmode);
708  void Call(ExternalReference ext);
709  void Call(Handle<Code> code_object,
710            RelocInfo::Mode rmode,
711            unsigned ast_id = kNoASTId);
712
713  // The size of the code generated for different call instructions.
714  int CallSize(Address destination, RelocInfo::Mode rmode) {
715    return kCallInstructionLength;
716  }
717  int CallSize(ExternalReference ext);
718  int CallSize(Handle<Code> code_object) {
719    // Code calls use 32-bit relative addressing.
720    return kShortCallInstructionLength;
721  }
722  int CallSize(Register target) {
723    // Opcode: REX_opt FF /2 m64
724    return (target.high_bit() != 0) ? 3 : 2;
725  }
726  int CallSize(const Operand& target) {
727    // Opcode: REX_opt FF /2 m64
728    return (target.requires_rex() ? 2 : 1) + target.operand_size();
729  }
730
731  // Emit call to the code we are currently generating.
732  void CallSelf() {
733    Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
734    Call(self, RelocInfo::CODE_TARGET);
735  }
736
737  // Non-x64 instructions.
738  // Push/pop all general purpose registers.
739  // Does not push rsp/rbp nor any of the assembler's special purpose registers
740  // (kScratchRegister, kSmiConstantRegister, kRootRegister).
741  void Pushad();
742  void Popad();
743  // Sets the stack as after performing Popad, without actually loading the
744  // registers.
745  void Dropad();
746
747  // Compare object type for heap object.
748  // Always use unsigned comparisons: above and below, not less and greater.
749  // Incoming register is heap_object and outgoing register is map.
750  // They may be the same register, and may be kScratchRegister.
751  void CmpObjectType(Register heap_object, InstanceType type, Register map);
752
753  // Compare instance type for map.
754  // Always use unsigned comparisons: above and below, not less and greater.
755  void CmpInstanceType(Register map, InstanceType type);
756
757  // Check if a map for a JSObject indicates that the object has fast elements.
758  // Jump to the specified label if it does not.
759  void CheckFastElements(Register map,
760                         Label* fail,
761                         Label::Distance distance = Label::kFar);
762
763  // Check if the map of an object is equal to a specified map and
764  // branch to label if not. Skip the smi check if not required
765  // (object is known to be a heap object)
766  void CheckMap(Register obj,
767                Handle<Map> map,
768                Label* fail,
769                SmiCheckType smi_check_type);
770
771  // Check if the map of an object is equal to a specified map and branch to a
772  // specified target if equal. Skip the smi check if not required (object is
773  // known to be a heap object)
774  void DispatchMap(Register obj,
775                   Handle<Map> map,
776                   Handle<Code> success,
777                   SmiCheckType smi_check_type);
778
779  // Check if the object in register heap_object is a string. Afterwards the
780  // register map contains the object map and the register instance_type
781  // contains the instance_type. The registers map and instance_type can be the
782  // same in which case it contains the instance type afterwards. Either of the
783  // registers map and instance_type can be the same as heap_object.
784  Condition IsObjectStringType(Register heap_object,
785                               Register map,
786                               Register instance_type);
787
788  // FCmp compares and pops the two values on top of the FPU stack.
789  // The flag results are similar to integer cmp, but requires unsigned
790  // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
791  void FCmp();
792
793  void ClampUint8(Register reg);
794
795  void ClampDoubleToUint8(XMMRegister input_reg,
796                          XMMRegister temp_xmm_reg,
797                          Register result_reg,
798                          Register temp_reg);
799
800  void LoadInstanceDescriptors(Register map, Register descriptors);
801
802  // Abort execution if argument is not a number. Used in debug code.
803  void AbortIfNotNumber(Register object);
804
805  // Abort execution if argument is a smi. Used in debug code.
806  void AbortIfSmi(Register object);
807
808  // Abort execution if argument is not a smi. Used in debug code.
809  void AbortIfNotSmi(Register object);
810  void AbortIfNotSmi(const Operand& object);
811
812  // Abort execution if argument is a string. Used in debug code.
813  void AbortIfNotString(Register object);
814
815  // Abort execution if argument is not the root value with the given index.
816  void AbortIfNotRootValue(Register src,
817                           Heap::RootListIndex root_value_index,
818                           const char* message);
819
820  // ---------------------------------------------------------------------------
821  // Exception handling
822
823  // Push a new try handler and link into try handler chain.  The return
824  // address must be pushed before calling this helper.
825  void PushTryHandler(CodeLocation try_location, HandlerType type);
826
827  // Unlink the stack handler on top of the stack from the try handler chain.
828  void PopTryHandler();
829
830  // Activate the top handler in the try hander chain and pass the
831  // thrown value.
832  void Throw(Register value);
833
834  // Propagate an uncatchable exception out of the current JS stack.
835  void ThrowUncatchable(UncatchableExceptionType type, Register value);
836
837  // ---------------------------------------------------------------------------
838  // Inline caching support
839
840  // Generate code for checking access rights - used for security checks
841  // on access to global objects across environments. The holder register
842  // is left untouched, but the scratch register and kScratchRegister,
843  // which must be different, are clobbered.
844  void CheckAccessGlobalProxy(Register holder_reg,
845                              Register scratch,
846                              Label* miss);
847
848
849  void LoadFromNumberDictionary(Label* miss,
850                                Register elements,
851                                Register key,
852                                Register r0,
853                                Register r1,
854                                Register r2,
855                                Register result);
856
857
858  // ---------------------------------------------------------------------------
859  // Allocation support
860
861  // Allocate an object in new space. If the new space is exhausted control
862  // continues at the gc_required label. The allocated object is returned in
863  // result and end of the new object is returned in result_end. The register
864  // scratch can be passed as no_reg in which case an additional object
865  // reference will be added to the reloc info. The returned pointers in result
866  // and result_end have not yet been tagged as heap objects. If
867  // result_contains_top_on_entry is true the content of result is known to be
868  // the allocation top on entry (could be result_end from a previous call to
869  // AllocateInNewSpace). If result_contains_top_on_entry is true scratch
870  // should be no_reg as it is never used.
871  void AllocateInNewSpace(int object_size,
872                          Register result,
873                          Register result_end,
874                          Register scratch,
875                          Label* gc_required,
876                          AllocationFlags flags);
877
878  void AllocateInNewSpace(int header_size,
879                          ScaleFactor element_size,
880                          Register element_count,
881                          Register result,
882                          Register result_end,
883                          Register scratch,
884                          Label* gc_required,
885                          AllocationFlags flags);
886
887  void AllocateInNewSpace(Register object_size,
888                          Register result,
889                          Register result_end,
890                          Register scratch,
891                          Label* gc_required,
892                          AllocationFlags flags);
893
894  // Undo allocation in new space. The object passed and objects allocated after
895  // it will no longer be allocated. Make sure that no pointers are left to the
896  // object(s) no longer allocated as they would be invalid when allocation is
897  // un-done.
898  void UndoAllocationInNewSpace(Register object);
899
900  // Allocate a heap number in new space with undefined value. Returns
901  // tagged pointer in result register, or jumps to gc_required if new
902  // space is full.
903  void AllocateHeapNumber(Register result,
904                          Register scratch,
905                          Label* gc_required);
906
907  // Allocate a sequential string. All the header fields of the string object
908  // are initialized.
909  void AllocateTwoByteString(Register result,
910                             Register length,
911                             Register scratch1,
912                             Register scratch2,
913                             Register scratch3,
914                             Label* gc_required);
915  void AllocateAsciiString(Register result,
916                           Register length,
917                           Register scratch1,
918                           Register scratch2,
919                           Register scratch3,
920                           Label* gc_required);
921
922  // Allocate a raw cons string object. Only the map field of the result is
923  // initialized.
924  void AllocateConsString(Register result,
925                          Register scratch1,
926                          Register scratch2,
927                          Label* gc_required);
928  void AllocateAsciiConsString(Register result,
929                               Register scratch1,
930                               Register scratch2,
931                               Label* gc_required);
932
933  // ---------------------------------------------------------------------------
934  // Support functions.
935
936  // Check if result is zero and op is negative.
937  void NegativeZeroTest(Register result, Register op, Label* then_label);
938
939  // Check if result is zero and op is negative in code using jump targets.
940  void NegativeZeroTest(CodeGenerator* cgen,
941                        Register result,
942                        Register op,
943                        JumpTarget* then_target);
944
945  // Check if result is zero and any of op1 and op2 are negative.
946  // Register scratch is destroyed, and it must be different from op2.
947  void NegativeZeroTest(Register result, Register op1, Register op2,
948                        Register scratch, Label* then_label);
949
950  // Try to get function prototype of a function and puts the value in
951  // the result register. Checks that the function really is a
952  // function and jumps to the miss label if the fast checks fail. The
953  // function register will be untouched; the other register may be
954  // clobbered.
955  void TryGetFunctionPrototype(Register function,
956                               Register result,
957                               Label* miss);
958
959  // Generates code for reporting that an illegal operation has
960  // occurred.
961  void IllegalOperation(int num_arguments);
962
963  // Picks out an array index from the hash field.
964  // Register use:
965  //   hash - holds the index's hash. Clobbered.
966  //   index - holds the overwritten index on exit.
967  void IndexFromHash(Register hash, Register index);
968
969  // Find the function context up the context chain.
970  void LoadContext(Register dst, int context_chain_length);
971
972  // Load the global function with the given index.
973  void LoadGlobalFunction(int index, Register function);
974
975  // Load the initial map from the global function. The registers
976  // function and map can be the same.
977  void LoadGlobalFunctionInitialMap(Register function, Register map);
978
979  // ---------------------------------------------------------------------------
980  // Runtime calls
981
982  // Call a code stub.
983  void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
984
985  // Call a code stub and return the code object called.  Try to generate
986  // the code if necessary.  Do not perform a GC but instead return a retry
987  // after GC failure.
988  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub);
989
990  // Tail call a code stub (jump).
991  void TailCallStub(CodeStub* stub);
992
993  // Tail call a code stub (jump) and return the code object called.  Try to
994  // generate the code if necessary.  Do not perform a GC but instead return
995  // a retry after GC failure.
996  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub);
997
998  // Return from a code stub after popping its arguments.
999  void StubReturn(int argc);
1000
1001  // Call a runtime routine.
1002  void CallRuntime(const Runtime::Function* f, int num_arguments);
1003
1004  // Call a runtime function and save the value of XMM registers.
1005  void CallRuntimeSaveDoubles(Runtime::FunctionId id);
1006
1007  // Call a runtime function, returning the CodeStub object called.
1008  // Try to generate the stub code if necessary.  Do not perform a GC
1009  // but instead return a retry after GC failure.
1010  MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
1011                                              int num_arguments);
1012
1013  // Convenience function: Same as above, but takes the fid instead.
1014  void CallRuntime(Runtime::FunctionId id, int num_arguments);
1015
1016  // Convenience function: Same as above, but takes the fid instead.
1017  MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::FunctionId id,
1018                                              int num_arguments);
1019
1020  // Convenience function: call an external reference.
1021  void CallExternalReference(const ExternalReference& ext,
1022                             int num_arguments);
1023
1024  // Tail call of a runtime routine (jump).
1025  // Like JumpToExternalReference, but also takes care of passing the number
1026  // of parameters.
1027  void TailCallExternalReference(const ExternalReference& ext,
1028                                 int num_arguments,
1029                                 int result_size);
1030
1031  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
1032      const ExternalReference& ext, int num_arguments, int result_size);
1033
1034  // Convenience function: tail call a runtime routine (jump).
1035  void TailCallRuntime(Runtime::FunctionId fid,
1036                       int num_arguments,
1037                       int result_size);
1038
1039  MUST_USE_RESULT  MaybeObject* TryTailCallRuntime(Runtime::FunctionId fid,
1040                                                   int num_arguments,
1041                                                   int result_size);
1042
1043  // Jump to a runtime routine.
1044  void JumpToExternalReference(const ExternalReference& ext, int result_size);
1045
1046  // Jump to a runtime routine.
1047  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext,
1048                                          int result_size);
1049
1050  // Prepares stack to put arguments (aligns and so on).
1051  // WIN64 calling convention requires to put the pointer to the return value
1052  // slot into rcx (rcx must be preserverd until TryCallApiFunctionAndReturn).
1053  // Saves context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
1054  // inside the exit frame (not GCed) accessible via StackSpaceOperand.
1055  void PrepareCallApiFunction(int arg_stack_space);
1056
1057  // Calls an API function. Allocates HandleScope, extracts
1058  // returned value from handle and propagates exceptions.
1059  // Clobbers r14, r15, rbx and caller-save registers. Restores context.
1060  // On return removes stack_space * kPointerSize (GCed).
1061  MUST_USE_RESULT MaybeObject* TryCallApiFunctionAndReturn(
1062      ApiFunction* function, int stack_space);
1063
1064  // Before calling a C-function from generated code, align arguments on stack.
1065  // After aligning the frame, arguments must be stored in esp[0], esp[4],
1066  // etc., not pushed. The argument count assumes all arguments are word sized.
1067  // The number of slots reserved for arguments depends on platform. On Windows
1068  // stack slots are reserved for the arguments passed in registers. On other
1069  // platforms stack slots are only reserved for the arguments actually passed
1070  // on the stack.
1071  void PrepareCallCFunction(int num_arguments);
1072
1073  // Calls a C function and cleans up the space for arguments allocated
1074  // by PrepareCallCFunction. The called function is not allowed to trigger a
1075  // garbage collection, since that might move the code and invalidate the
1076  // return address (unless this is somehow accounted for by the called
1077  // function).
1078  void CallCFunction(ExternalReference function, int num_arguments);
1079  void CallCFunction(Register function, int num_arguments);
1080
1081  // Calculate the number of stack slots to reserve for arguments when calling a
1082  // C function.
1083  int ArgumentStackSlotsForCFunctionCall(int num_arguments);
1084
1085  // ---------------------------------------------------------------------------
1086  // Utilities
1087
1088  void Ret();
1089
1090  // Return and drop arguments from stack, where the number of arguments
1091  // may be bigger than 2^16 - 1.  Requires a scratch register.
1092  void Ret(int bytes_dropped, Register scratch);
1093
1094  Handle<Object> CodeObject() {
1095    ASSERT(!code_object_.is_null());
1096    return code_object_;
1097  }
1098
1099  // Copy length bytes from source to destination.
1100  // Uses scratch register internally (if you have a low-eight register
1101  // free, do use it, otherwise kScratchRegister will be used).
1102  // The min_length is a minimum limit on the value that length will have.
1103  // The algorithm has some special cases that might be omitted if the string
1104  // is known to always be long.
1105  void CopyBytes(Register destination,
1106                 Register source,
1107                 Register length,
1108                 int min_length = 0,
1109                 Register scratch = kScratchRegister);
1110
1111
1112  // ---------------------------------------------------------------------------
1113  // StatsCounter support
1114
1115  void SetCounter(StatsCounter* counter, int value);
1116  void IncrementCounter(StatsCounter* counter, int value);
1117  void DecrementCounter(StatsCounter* counter, int value);
1118
1119
1120  // ---------------------------------------------------------------------------
1121  // Debugging
1122
1123  // Calls Abort(msg) if the condition cc is not satisfied.
1124  // Use --debug_code to enable.
1125  void Assert(Condition cc, const char* msg);
1126
1127  void AssertFastElements(Register elements);
1128
1129  // Like Assert(), but always enabled.
1130  void Check(Condition cc, const char* msg);
1131
1132  // Print a message to stdout and abort execution.
1133  void Abort(const char* msg);
1134
1135  // Check that the stack is aligned.
1136  void CheckStackAlignment();
1137
1138  // Verify restrictions about code generated in stubs.
1139  void set_generating_stub(bool value) { generating_stub_ = value; }
1140  bool generating_stub() { return generating_stub_; }
1141  void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
1142  bool allow_stub_calls() { return allow_stub_calls_; }
1143
1144  static int SafepointRegisterStackIndex(Register reg) {
1145    return SafepointRegisterStackIndex(reg.code());
1146  }
1147
1148 private:
1149  // Order general registers are pushed by Pushad.
1150  // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
1151  static int kSafepointPushRegisterIndices[Register::kNumRegisters];
1152  static const int kNumSafepointSavedRegisters = 11;
1153  static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1154
1155  bool generating_stub_;
1156  bool allow_stub_calls_;
1157  bool root_array_available_;
1158
1159  // Returns a register holding the smi value. The register MUST NOT be
1160  // modified. It may be the "smi 1 constant" register.
1161  Register GetSmiConstant(Smi* value);
1162
1163  // Moves the smi value to the destination register.
1164  void LoadSmiConstant(Register dst, Smi* value);
1165
1166  // This handle will be patched with the code object on installation.
1167  Handle<Object> code_object_;
1168
1169  // Helper functions for generating invokes.
1170  void InvokePrologue(const ParameterCount& expected,
1171                      const ParameterCount& actual,
1172                      Handle<Code> code_constant,
1173                      Register code_register,
1174                      Label* done,
1175                      InvokeFlag flag,
1176                      Label::Distance near_jump = Label::kFar,
1177                      const CallWrapper& call_wrapper = NullCallWrapper(),
1178                      CallKind call_kind = CALL_AS_METHOD);
1179
1180  // Activation support.
1181  void EnterFrame(StackFrame::Type type);
1182  void LeaveFrame(StackFrame::Type type);
1183
1184  void EnterExitFramePrologue(bool save_rax);
1185
1186  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
1187  // accessible via StackSpaceOperand.
1188  void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
1189
1190  void LeaveExitFrameEpilogue();
1191
1192  // Allocation support helpers.
1193  // Loads the top of new-space into the result register.
1194  // Otherwise the address of the new-space top is loaded into scratch (if
1195  // scratch is valid), and the new-space top is loaded into result.
1196  void LoadAllocationTopHelper(Register result,
1197                               Register scratch,
1198                               AllocationFlags flags);
1199  // Update allocation top with value in result_end register.
1200  // If scratch is valid, it contains the address of the allocation top.
1201  void UpdateAllocationTopHelper(Register result_end, Register scratch);
1202
1203  // Helper for PopHandleScope.  Allowed to perform a GC and returns
1204  // NULL if gc_allowed.  Does not perform a GC if !gc_allowed, and
1205  // possibly returns a failure object indicating an allocation failure.
1206  Object* PopHandleScopeHelper(Register saved,
1207                               Register scratch,
1208                               bool gc_allowed);
1209
1210
1211  // Compute memory operands for safepoint stack slots.
1212  Operand SafepointRegisterSlot(Register reg);
1213  static int SafepointRegisterStackIndex(int reg_code) {
1214    return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
1215  }
1216
1217  // Needs access to SafepointRegisterStackIndex for optimized frame
1218  // traversal.
1219  friend class OptimizedFrame;
1220};
1221
1222
1223// The code patcher is used to patch (typically) small parts of code e.g. for
1224// debugging and other types of instrumentation. When using the code patcher
1225// the exact number of bytes specified must be emitted. Is not legal to emit
1226// relocation information. If any of these constraints are violated it causes
1227// an assertion.
1228class CodePatcher {
1229 public:
1230  CodePatcher(byte* address, int size);
1231  virtual ~CodePatcher();
1232
1233  // Macro assembler to emit code.
1234  MacroAssembler* masm() { return &masm_; }
1235
1236 private:
1237  byte* address_;  // The address of the code being patched.
1238  int size_;  // Number of bytes of the expected patch size.
1239  MacroAssembler masm_;  // Macro assembler used to generate the code.
1240};
1241
1242
1243// -----------------------------------------------------------------------------
1244// Static helper functions.
1245
1246// Generate an Operand for loading a field from an object.
1247static inline Operand FieldOperand(Register object, int offset) {
1248  return Operand(object, offset - kHeapObjectTag);
1249}
1250
1251
1252// Generate an Operand for loading an indexed field from an object.
1253static inline Operand FieldOperand(Register object,
1254                                   Register index,
1255                                   ScaleFactor scale,
1256                                   int offset) {
1257  return Operand(object, index, scale, offset - kHeapObjectTag);
1258}
1259
1260
1261static inline Operand ContextOperand(Register context, int index) {
1262  return Operand(context, Context::SlotOffset(index));
1263}
1264
1265
1266static inline Operand GlobalObjectOperand() {
1267  return ContextOperand(rsi, Context::GLOBAL_INDEX);
1268}
1269
1270
1271// Provides access to exit frame stack space (not GCed).
1272static inline Operand StackSpaceOperand(int index) {
1273#ifdef _WIN64
1274  const int kShaddowSpace = 4;
1275  return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
1276#else
1277  return Operand(rsp, index * kPointerSize);
1278#endif
1279}
1280
1281
1282
1283#ifdef GENERATED_CODE_COVERAGE
1284extern void LogGeneratedCodeCoverage(const char* file_line);
1285#define CODE_COVERAGE_STRINGIFY(x) #x
1286#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1287#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1288#define ACCESS_MASM(masm) {                                               \
1289    byte* x64_coverage_function =                                         \
1290        reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
1291    masm->pushfd();                                                       \
1292    masm->pushad();                                                       \
1293    masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__)));         \
1294    masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY);          \
1295    masm->pop(rax);                                                       \
1296    masm->popad();                                                        \
1297    masm->popfd();                                                        \
1298  }                                                                       \
1299  masm->
1300#else
1301#define ACCESS_MASM(masm) masm->
1302#endif
1303
1304} }  // namespace v8::internal
1305
1306#endif  // V8_X64_MACRO_ASSEMBLER_X64_H_
1307