1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
6#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
7
8#include "src/assembler.h"
9#include "src/bailout-reason.h"
10#include "src/frames.h"
11#include "src/globals.h"
12
13namespace v8 {
14namespace internal {
15
16// Give alias names to registers for calling conventions.
17const Register kReturnRegister0 = {Register::kCode_eax};
18const Register kReturnRegister1 = {Register::kCode_edx};
19const Register kReturnRegister2 = {Register::kCode_edi};
20const Register kJSFunctionRegister = {Register::kCode_edi};
21const Register kContextRegister = {Register::kCode_esi};
22const Register kAllocateSizeRegister = {Register::kCode_edx};
23const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
24const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
25const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
26const Register kInterpreterDispatchTableRegister = {Register::kCode_esi};
27const Register kJavaScriptCallArgCountRegister = {Register::kCode_eax};
28const Register kJavaScriptCallNewTargetRegister = {Register::kCode_edx};
29const Register kRuntimeCallFunctionRegister = {Register::kCode_ebx};
30const Register kRuntimeCallArgCountRegister = {Register::kCode_eax};
31
32// Convenience for platform-independent signatures.  We do not normally
33// distinguish memory operands from other operands on ia32.
34typedef Operand MemOperand;
35
36enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
37enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
38enum PointersToHereCheck {
39  kPointersToHereMaybeInteresting,
40  kPointersToHereAreAlwaysInteresting
41};
42
43enum RegisterValueType { REGISTER_VALUE_IS_SMI, REGISTER_VALUE_IS_INT32 };
44
45enum class ReturnAddressState { kOnStack, kNotOnStack };
46
47#ifdef DEBUG
48bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
49                Register reg4 = no_reg, Register reg5 = no_reg,
50                Register reg6 = no_reg, Register reg7 = no_reg,
51                Register reg8 = no_reg);
52#endif
53
54// MacroAssembler implements a collection of frequently used macros.
55class MacroAssembler: public Assembler {
56 public:
57  MacroAssembler(Isolate* isolate, void* buffer, int size,
58                 CodeObjectRequired create_code_object);
59
60  void Load(Register dst, const Operand& src, Representation r);
61  void Store(Register src, const Operand& dst, Representation r);
62
63  // Load a register with a long value as efficiently as possible.
64  void Set(Register dst, int32_t x) {
65    if (x == 0) {
66      xor_(dst, dst);
67    } else {
68      mov(dst, Immediate(x));
69    }
70  }
71  void Set(const Operand& dst, int32_t x) { mov(dst, Immediate(x)); }
72
73  // Operations on roots in the root-array.
74  void LoadRoot(Register destination, Heap::RootListIndex index);
75  void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
76  void CompareRoot(Register with, Register scratch, Heap::RootListIndex index);
77  // These methods can only be used with constant roots (i.e. non-writable
78  // and not in new space).
79  void CompareRoot(Register with, Heap::RootListIndex index);
80  void CompareRoot(const Operand& with, Heap::RootListIndex index);
81  void PushRoot(Heap::RootListIndex index);
82
83  // Compare the object in a register to a value and jump if they are equal.
84  void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
85                  Label::Distance if_equal_distance = Label::kFar) {
86    CompareRoot(with, index);
87    j(equal, if_equal, if_equal_distance);
88  }
89  void JumpIfRoot(const Operand& with, Heap::RootListIndex index,
90                  Label* if_equal,
91                  Label::Distance if_equal_distance = Label::kFar) {
92    CompareRoot(with, index);
93    j(equal, if_equal, if_equal_distance);
94  }
95
96  // Compare the object in a register to a value and jump if they are not equal.
97  void JumpIfNotRoot(Register with, Heap::RootListIndex index,
98                     Label* if_not_equal,
99                     Label::Distance if_not_equal_distance = Label::kFar) {
100    CompareRoot(with, index);
101    j(not_equal, if_not_equal, if_not_equal_distance);
102  }
103  void JumpIfNotRoot(const Operand& with, Heap::RootListIndex index,
104                     Label* if_not_equal,
105                     Label::Distance if_not_equal_distance = Label::kFar) {
106    CompareRoot(with, index);
107    j(not_equal, if_not_equal, if_not_equal_distance);
108  }
109
110  // These functions do not arrange the registers in any particular order so
111  // they are not useful for calls that can cause a GC.  The caller can
112  // exclude up to 3 registers that do not need to be saved and restored.
113  void PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
114                       Register exclusion2 = no_reg,
115                       Register exclusion3 = no_reg);
116  void PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
117                      Register exclusion2 = no_reg,
118                      Register exclusion3 = no_reg);
119
120  // ---------------------------------------------------------------------------
121  // GC Support
122  enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
123
124  // Record in the remembered set the fact that we have a pointer to new space
125  // at the address pointed to by the addr register.  Only works if addr is not
126  // in new space.
127  void RememberedSetHelper(Register object,  // Used for debug code.
128                           Register addr, Register scratch,
129                           SaveFPRegsMode save_fp,
130                           RememberedSetFinalAction and_then);
131
132  void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
133                     Label* condition_met,
134                     Label::Distance condition_met_distance = Label::kFar);
135
136  void CheckPageFlagForMap(
137      Handle<Map> map, int mask, Condition cc, Label* condition_met,
138      Label::Distance condition_met_distance = Label::kFar);
139
140  // Check if object is in new space.  Jumps if the object is not in new space.
141  // The register scratch can be object itself, but scratch will be clobbered.
142  void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch,
143                           Label::Distance distance = Label::kFar) {
144    InNewSpace(object, scratch, zero, branch, distance);
145  }
146
147  // Check if object is in new space.  Jumps if the object is in new space.
148  // The register scratch can be object itself, but it will be clobbered.
149  void JumpIfInNewSpace(Register object, Register scratch, Label* branch,
150                        Label::Distance distance = Label::kFar) {
151    InNewSpace(object, scratch, not_zero, branch, distance);
152  }
153
154  // Check if an object has a given incremental marking color.  Also uses ecx!
155  void HasColor(Register object, Register scratch0, Register scratch1,
156                Label* has_color, Label::Distance has_color_distance,
157                int first_bit, int second_bit);
158
159  void JumpIfBlack(Register object, Register scratch0, Register scratch1,
160                   Label* on_black,
161                   Label::Distance on_black_distance = Label::kFar);
162
163  // Checks the color of an object.  If the object is white we jump to the
164  // incremental marker.
165  void JumpIfWhite(Register value, Register scratch1, Register scratch2,
166                   Label* value_is_white, Label::Distance distance);
167
168  // Notify the garbage collector that we wrote a pointer into an object.
169  // |object| is the object being stored into, |value| is the object being
170  // stored.  value and scratch registers are clobbered by the operation.
171  // The offset is the offset from the start of the object, not the offset from
172  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
173  void RecordWriteField(
174      Register object, int offset, Register value, Register scratch,
175      SaveFPRegsMode save_fp,
176      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
177      SmiCheck smi_check = INLINE_SMI_CHECK,
178      PointersToHereCheck pointers_to_here_check_for_value =
179          kPointersToHereMaybeInteresting);
180
181  // As above, but the offset has the tag presubtracted.  For use with
182  // Operand(reg, off).
183  void RecordWriteContextSlot(
184      Register context, int offset, Register value, Register scratch,
185      SaveFPRegsMode save_fp,
186      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
187      SmiCheck smi_check = INLINE_SMI_CHECK,
188      PointersToHereCheck pointers_to_here_check_for_value =
189          kPointersToHereMaybeInteresting) {
190    RecordWriteField(context, offset + kHeapObjectTag, value, scratch, save_fp,
191                     remembered_set_action, smi_check,
192                     pointers_to_here_check_for_value);
193  }
194
195  // Notify the garbage collector that we wrote a pointer into a fixed array.
196  // |array| is the array being stored into, |value| is the
197  // object being stored.  |index| is the array index represented as a
198  // Smi. All registers are clobbered by the operation RecordWriteArray
199  // filters out smis so it does not update the write barrier if the
200  // value is a smi.
201  void RecordWriteArray(
202      Register array, Register value, Register index, SaveFPRegsMode save_fp,
203      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
204      SmiCheck smi_check = INLINE_SMI_CHECK,
205      PointersToHereCheck pointers_to_here_check_for_value =
206          kPointersToHereMaybeInteresting);
207
208  // For page containing |object| mark region covering |address|
209  // dirty. |object| is the object being stored into, |value| is the
210  // object being stored. The address and value registers are clobbered by the
211  // operation. RecordWrite filters out smis so it does not update the
212  // write barrier if the value is a smi.
213  void RecordWrite(
214      Register object, Register address, Register value, SaveFPRegsMode save_fp,
215      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
216      SmiCheck smi_check = INLINE_SMI_CHECK,
217      PointersToHereCheck pointers_to_here_check_for_value =
218          kPointersToHereMaybeInteresting);
219
220  // Notify the garbage collector that we wrote a code entry into a
221  // JSFunction. Only scratch is clobbered by the operation.
222  void RecordWriteCodeEntryField(Register js_function, Register code_entry,
223                                 Register scratch);
224
225  // For page containing |object| mark the region covering the object's map
226  // dirty. |object| is the object being stored into, |map| is the Map object
227  // that was stored.
228  void RecordWriteForMap(Register object, Handle<Map> map, Register scratch1,
229                         Register scratch2, SaveFPRegsMode save_fp);
230
231  // Frame restart support
232  void MaybeDropFrames();
233
234  // Generates function and stub prologue code.
235  void StubPrologue(StackFrame::Type type);
236  void Prologue(bool code_pre_aging);
237
238  // Enter specific kind of exit frame. Expects the number of
239  // arguments in register eax and sets up the number of arguments in
240  // register edi and the pointer to the first argument in register
241  // esi.
242  void EnterExitFrame(int argc, bool save_doubles, StackFrame::Type frame_type);
243
244  void EnterApiExitFrame(int argc);
245
246  // Leave the current exit frame. Expects the return value in
247  // register eax:edx (untouched) and the pointer to the first
248  // argument in register esi (if pop_arguments == true).
249  void LeaveExitFrame(bool save_doubles, bool pop_arguments = true);
250
251  // Leave the current exit frame. Expects the return value in
252  // register eax (untouched).
253  void LeaveApiExitFrame(bool restore_context);
254
255  // Find the function context up the context chain.
256  void LoadContext(Register dst, int context_chain_length);
257
258  // Load the global proxy from the current context.
259  void LoadGlobalProxy(Register dst);
260
261  // Load the global function with the given index.
262  void LoadGlobalFunction(int index, Register function);
263
264  // Load the initial map from the global function. The registers
265  // function and map can be the same.
266  void LoadGlobalFunctionInitialMap(Register function, Register map);
267
268  // Push and pop the registers that can hold pointers.
269  void PushSafepointRegisters() { pushad(); }
270  void PopSafepointRegisters() { popad(); }
271  // Store the value in register/immediate src in the safepoint
272  // register stack slot for register dst.
273  void StoreToSafepointRegisterSlot(Register dst, Register src);
274  void StoreToSafepointRegisterSlot(Register dst, Immediate src);
275  void LoadFromSafepointRegisterSlot(Register dst, Register src);
276
277  // Nop, because ia32 does not have a root register.
278  void InitializeRootRegister() {}
279
280  void LoadHeapObject(Register result, Handle<HeapObject> object);
281  void CmpHeapObject(Register reg, Handle<HeapObject> object);
282  void PushHeapObject(Handle<HeapObject> object);
283
284  void LoadObject(Register result, Handle<Object> object) {
285    AllowDeferredHandleDereference heap_object_check;
286    if (object->IsHeapObject()) {
287      LoadHeapObject(result, Handle<HeapObject>::cast(object));
288    } else {
289      Move(result, Immediate(object));
290    }
291  }
292
293  void CmpObject(Register reg, Handle<Object> object) {
294    AllowDeferredHandleDereference heap_object_check;
295    if (object->IsHeapObject()) {
296      CmpHeapObject(reg, Handle<HeapObject>::cast(object));
297    } else {
298      cmp(reg, Immediate(object));
299    }
300  }
301
302  // Compare the given value and the value of weak cell.
303  void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
304
305  void GetWeakValue(Register value, Handle<WeakCell> cell);
306
307  // Load the value of the weak cell in the value register. Branch to the given
308  // miss label if the weak cell was cleared.
309  void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
310
311  // ---------------------------------------------------------------------------
312  // JavaScript invokes
313
314  // Removes current frame and its arguments from the stack preserving
315  // the arguments and a return address pushed to the stack for the next call.
316  // |ra_state| defines whether return address is already pushed to stack or
317  // not. Both |callee_args_count| and |caller_args_count_reg| do not include
318  // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
319  // is trashed. |number_of_temp_values_after_return_address| specifies
320  // the number of words pushed to the stack after the return address. This is
321  // to allow "allocation" of scratch registers that this function requires
322  // by saving their values on the stack.
323  void PrepareForTailCall(const ParameterCount& callee_args_count,
324                          Register caller_args_count_reg, Register scratch0,
325                          Register scratch1, ReturnAddressState ra_state,
326                          int number_of_temp_values_after_return_address);
327
328  // Invoke the JavaScript function code by either calling or jumping.
329
330  void InvokeFunctionCode(Register function, Register new_target,
331                          const ParameterCount& expected,
332                          const ParameterCount& actual, InvokeFlag flag,
333                          const CallWrapper& call_wrapper);
334
335  // On function call, call into the debugger if necessary.
336  void CheckDebugHook(Register fun, Register new_target,
337                      const ParameterCount& expected,
338                      const ParameterCount& actual);
339
340  // Invoke the JavaScript function in the given register. Changes the
341  // current context to the context in the function before invoking.
342  void InvokeFunction(Register function, Register new_target,
343                      const ParameterCount& actual, InvokeFlag flag,
344                      const CallWrapper& call_wrapper);
345
346  void InvokeFunction(Register function, const ParameterCount& expected,
347                      const ParameterCount& actual, InvokeFlag flag,
348                      const CallWrapper& call_wrapper);
349
350  void InvokeFunction(Handle<JSFunction> function,
351                      const ParameterCount& expected,
352                      const ParameterCount& actual, InvokeFlag flag,
353                      const CallWrapper& call_wrapper);
354
355  // Expression support
356  // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
357  // hinders register renaming and makes dependence chains longer. So we use
358  // xorps to clear the dst register before cvtsi2sd to solve this issue.
359  void Cvtsi2sd(XMMRegister dst, Register src) { Cvtsi2sd(dst, Operand(src)); }
360  void Cvtsi2sd(XMMRegister dst, const Operand& src);
361
362  void Cvtui2ss(XMMRegister dst, Register src, Register tmp);
363
364  void ShlPair(Register high, Register low, uint8_t imm8);
365  void ShlPair_cl(Register high, Register low);
366  void ShrPair(Register high, Register low, uint8_t imm8);
367  void ShrPair_cl(Register high, Register src);
368  void SarPair(Register high, Register low, uint8_t imm8);
369  void SarPair_cl(Register high, Register low);
370
371  // Support for constant splitting.
372  bool IsUnsafeImmediate(const Immediate& x);
373  void SafeMove(Register dst, const Immediate& x);
374  void SafePush(const Immediate& x);
375
376  // Compare object type for heap object.
377  // Incoming register is heap_object and outgoing register is map.
378  void CmpObjectType(Register heap_object, InstanceType type, Register map);
379
380  // Compare instance type for map.
381  void CmpInstanceType(Register map, InstanceType type);
382
383  // Compare an object's map with the specified map.
384  void CompareMap(Register obj, Handle<Map> map);
385
386  // Check if the map of an object is equal to a specified map and branch to
387  // label if not. Skip the smi check if not required (object is known to be a
388  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
389  // against maps that are ElementsKind transition maps of the specified map.
390  void CheckMap(Register obj, Handle<Map> map, Label* fail,
391                SmiCheckType smi_check_type);
392
393  // Check if the map of an object is equal to a specified weak map and branch
394  // to a specified target if equal. Skip the smi check if not required
395  // (object is known to be a heap object)
396  void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
397                       Handle<WeakCell> cell, Handle<Code> success,
398                       SmiCheckType smi_check_type);
399
400  // Check if the object in register heap_object is a string. Afterwards the
401  // register map contains the object map and the register instance_type
402  // contains the instance_type. The registers map and instance_type can be the
403  // same in which case it contains the instance type afterwards. Either of the
404  // registers map and instance_type can be the same as heap_object.
405  Condition IsObjectStringType(Register heap_object, Register map,
406                               Register instance_type);
407
408  // Check if the object in register heap_object is a name. Afterwards the
409  // register map contains the object map and the register instance_type
410  // contains the instance_type. The registers map and instance_type can be the
411  // same in which case it contains the instance type afterwards. Either of the
412  // registers map and instance_type can be the same as heap_object.
413  Condition IsObjectNameType(Register heap_object, Register map,
414                             Register instance_type);
415
416  // FCmp is similar to integer cmp, but requires unsigned
417  // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
418  void FCmp();
419
420  void ClampUint8(Register reg);
421
422  void ClampDoubleToUint8(XMMRegister input_reg, XMMRegister scratch_reg,
423                          Register result_reg);
424
425  void SlowTruncateToI(Register result_reg, Register input_reg,
426      int offset = HeapNumber::kValueOffset - kHeapObjectTag);
427
428  void TruncateHeapNumberToI(Register result_reg, Register input_reg);
429  void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
430
431  void DoubleToI(Register result_reg, XMMRegister input_reg,
432                 XMMRegister scratch, MinusZeroMode minus_zero_mode,
433                 Label* lost_precision, Label* is_nan, Label* minus_zero,
434                 Label::Distance dst = Label::kFar);
435
436  // Smi tagging support.
437  void SmiTag(Register reg) {
438    STATIC_ASSERT(kSmiTag == 0);
439    STATIC_ASSERT(kSmiTagSize == 1);
440    add(reg, reg);
441  }
442  void SmiUntag(Register reg) {
443    sar(reg, kSmiTagSize);
444  }
445
446  // Modifies the register even if it does not contain a Smi!
447  void SmiUntag(Register reg, Label* is_smi) {
448    STATIC_ASSERT(kSmiTagSize == 1);
449    sar(reg, kSmiTagSize);
450    STATIC_ASSERT(kSmiTag == 0);
451    j(not_carry, is_smi);
452  }
453
454  void LoadUint32(XMMRegister dst, Register src) {
455    LoadUint32(dst, Operand(src));
456  }
457  void LoadUint32(XMMRegister dst, const Operand& src);
458
459  // Jump the register contains a smi.
460  inline void JumpIfSmi(Register value, Label* smi_label,
461                        Label::Distance distance = Label::kFar) {
462    test(value, Immediate(kSmiTagMask));
463    j(zero, smi_label, distance);
464  }
465  // Jump if the operand is a smi.
466  inline void JumpIfSmi(Operand value, Label* smi_label,
467                        Label::Distance distance = Label::kFar) {
468    test(value, Immediate(kSmiTagMask));
469    j(zero, smi_label, distance);
470  }
471  // Jump if register contain a non-smi.
472  inline void JumpIfNotSmi(Register value, Label* not_smi_label,
473                           Label::Distance distance = Label::kFar) {
474    test(value, Immediate(kSmiTagMask));
475    j(not_zero, not_smi_label, distance);
476  }
477  // Jump if the operand is not a smi.
478  inline void JumpIfNotSmi(Operand value, Label* smi_label,
479                           Label::Distance distance = Label::kFar) {
480    test(value, Immediate(kSmiTagMask));
481    j(not_zero, smi_label, distance);
482  }
483  // Jump if the value cannot be represented by a smi.
484  inline void JumpIfNotValidSmiValue(Register value, Register scratch,
485                                     Label* on_invalid,
486                                     Label::Distance distance = Label::kFar) {
487    mov(scratch, value);
488    add(scratch, Immediate(0x40000000U));
489    j(sign, on_invalid, distance);
490  }
491
492  // Jump if the unsigned integer value cannot be represented by a smi.
493  inline void JumpIfUIntNotValidSmiValue(
494      Register value, Label* on_invalid,
495      Label::Distance distance = Label::kFar) {
496    cmp(value, Immediate(0x40000000U));
497    j(above_equal, on_invalid, distance);
498  }
499
500  void LoadInstanceDescriptors(Register map, Register descriptors);
501  void EnumLength(Register dst, Register map);
502  void NumberOfOwnDescriptors(Register dst, Register map);
503  void LoadAccessor(Register dst, Register holder, int accessor_index,
504                    AccessorComponent accessor);
505
506  template<typename Field>
507  void DecodeField(Register reg) {
508    static const int shift = Field::kShift;
509    static const int mask = Field::kMask >> Field::kShift;
510    if (shift != 0) {
511      sar(reg, shift);
512    }
513    and_(reg, Immediate(mask));
514  }
515
516  template<typename Field>
517  void DecodeFieldToSmi(Register reg) {
518    static const int shift = Field::kShift;
519    static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
520    STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
521    STATIC_ASSERT(kSmiTag == 0);
522    if (shift < kSmiTagSize) {
523      shl(reg, kSmiTagSize - shift);
524    } else if (shift > kSmiTagSize) {
525      sar(reg, shift - kSmiTagSize);
526    }
527    and_(reg, Immediate(mask));
528  }
529
530  void LoadPowerOf2(XMMRegister dst, Register scratch, int power);
531
532  // Abort execution if argument is not a number, enabled via --debug-code.
533  void AssertNumber(Register object);
534  void AssertNotNumber(Register object);
535
536  // Abort execution if argument is not a smi, enabled via --debug-code.
537  void AssertSmi(Register object);
538
539  // Abort execution if argument is a smi, enabled via --debug-code.
540  void AssertNotSmi(Register object);
541
542  // Abort execution if argument is not a string, enabled via --debug-code.
543  void AssertString(Register object);
544
545  // Abort execution if argument is not a name, enabled via --debug-code.
546  void AssertName(Register object);
547
548  // Abort execution if argument is not a JSFunction, enabled via --debug-code.
549  void AssertFunction(Register object);
550
551  // Abort execution if argument is not a JSBoundFunction,
552  // enabled via --debug-code.
553  void AssertBoundFunction(Register object);
554
555  // Abort execution if argument is not a JSGeneratorObject,
556  // enabled via --debug-code.
557  void AssertGeneratorObject(Register object);
558
559  // Abort execution if argument is not a JSReceiver, enabled via --debug-code.
560  void AssertReceiver(Register object);
561
562  // Abort execution if argument is not undefined or an AllocationSite, enabled
563  // via --debug-code.
564  void AssertUndefinedOrAllocationSite(Register object);
565
566  // ---------------------------------------------------------------------------
567  // Exception handling
568
569  // Push a new stack handler and link it into stack handler chain.
570  void PushStackHandler();
571
572  // Unlink the stack handler on top of the stack from the stack handler chain.
573  void PopStackHandler();
574
575  // ---------------------------------------------------------------------------
576  // Inline caching support
577
578  void GetNumberHash(Register r0, Register scratch);
579
580  // ---------------------------------------------------------------------------
581  // Allocation support
582
583  // Allocate an object in new space or old space. If the given space
584  // is exhausted control continues at the gc_required label. The allocated
585  // object is returned in result and end of the new object is returned in
586  // result_end. The register scratch can be passed as no_reg in which case
587  // an additional object reference will be added to the reloc info. The
588  // returned pointers in result and result_end have not yet been tagged as
589  // heap objects. If result_contains_top_on_entry is true the content of
590  // result is known to be the allocation top on entry (could be result_end
591  // from a previous call). If result_contains_top_on_entry is true scratch
592  // should be no_reg as it is never used.
593  void Allocate(int object_size, Register result, Register result_end,
594                Register scratch, Label* gc_required, AllocationFlags flags);
595
596  void Allocate(int header_size, ScaleFactor element_size,
597                Register element_count, RegisterValueType element_count_type,
598                Register result, Register result_end, Register scratch,
599                Label* gc_required, AllocationFlags flags);
600
601  void Allocate(Register object_size, Register result, Register result_end,
602                Register scratch, Label* gc_required, AllocationFlags flags);
603
604  // FastAllocate is right now only used for folded allocations. It just
605  // increments the top pointer without checking against limit. This can only
606  // be done if it was proved earlier that the allocation will succeed.
607  void FastAllocate(int object_size, Register result, Register result_end,
608                    AllocationFlags flags);
609  void FastAllocate(Register object_size, Register result, Register result_end,
610                    AllocationFlags flags);
611
612  // Allocate a heap number in new space with undefined value. The
613  // register scratch2 can be passed as no_reg; the others must be
614  // valid registers. Returns tagged pointer in result register, or
615  // jumps to gc_required if new space is full.
616  void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
617                          Label* gc_required, MutableMode mode = IMMUTABLE);
618
619  // Allocate and initialize a JSValue wrapper with the specified {constructor}
620  // and {value}.
621  void AllocateJSValue(Register result, Register constructor, Register value,
622                       Register scratch, Label* gc_required);
623
624  // Initialize fields with filler values.  Fields starting at |current_address|
625  // not including |end_address| are overwritten with the value in |filler|.  At
626  // the end the loop, |current_address| takes the value of |end_address|.
627  void InitializeFieldsWithFiller(Register current_address,
628                                  Register end_address, Register filler);
629
630  // ---------------------------------------------------------------------------
631  // Support functions.
632
633  // Check a boolean-bit of a Smi field.
634  void BooleanBitTest(Register object, int field_offset, int bit_index);
635
636  // Check if result is zero and op is negative.
637  void NegativeZeroTest(Register result, Register op, Label* then_label);
638
639  // Check if result is zero and any of op1 and op2 are negative.
640  // Register scratch is destroyed, and it must be different from op2.
641  void NegativeZeroTest(Register result, Register op1, Register op2,
642                        Register scratch, Label* then_label);
643
644  // Machine code version of Map::GetConstructor().
645  // |temp| holds |result|'s map when done.
646  void GetMapConstructor(Register result, Register map, Register temp);
647
648  // ---------------------------------------------------------------------------
649  // Runtime calls
650
651  // Call a code stub.  Generate the code if necessary.
652  void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
653
654  // Tail call a code stub (jump).  Generate the code if necessary.
655  void TailCallStub(CodeStub* stub);
656
657  // Return from a code stub after popping its arguments.
658  void StubReturn(int argc);
659
660  // Call a runtime routine.
661  void CallRuntime(const Runtime::Function* f, int num_arguments,
662                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
663  void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
664    const Runtime::Function* function = Runtime::FunctionForId(fid);
665    CallRuntime(function, function->nargs, kSaveFPRegs);
666  }
667
668  // Convenience function: Same as above, but takes the fid instead.
669  void CallRuntime(Runtime::FunctionId fid,
670                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
671    const Runtime::Function* function = Runtime::FunctionForId(fid);
672    CallRuntime(function, function->nargs, save_doubles);
673  }
674
675  // Convenience function: Same as above, but takes the fid instead.
676  void CallRuntime(Runtime::FunctionId fid, int num_arguments,
677                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
678    CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
679  }
680
681  // Convenience function: call an external reference.
682  void CallExternalReference(ExternalReference ref, int num_arguments);
683
684  // Convenience function: tail call a runtime routine (jump).
685  void TailCallRuntime(Runtime::FunctionId fid);
686
687  // Before calling a C-function from generated code, align arguments on stack.
688  // After aligning the frame, arguments must be stored in esp[0], esp[4],
689  // etc., not pushed. The argument count assumes all arguments are word sized.
690  // Some compilers/platforms require the stack to be aligned when calling
691  // C++ code.
692  // Needs a scratch register to do some arithmetic. This register will be
693  // trashed.
694  void PrepareCallCFunction(int num_arguments, Register scratch);
695
696  // Calls a C function and cleans up the space for arguments allocated
697  // by PrepareCallCFunction. The called function is not allowed to trigger a
698  // garbage collection, since that might move the code and invalidate the
699  // return address (unless this is somehow accounted for by the called
700  // function).
701  void CallCFunction(ExternalReference function, int num_arguments);
702  void CallCFunction(Register function, int num_arguments);
703
704  // Jump to a runtime routine.
705  void JumpToExternalReference(const ExternalReference& ext,
706                               bool builtin_exit_frame = false);
707
708  // ---------------------------------------------------------------------------
709  // Utilities
710
711  void Ret();
712
713  // Return and drop arguments from stack, where the number of arguments
714  // may be bigger than 2^16 - 1.  Requires a scratch register.
715  void Ret(int bytes_dropped, Register scratch);
716
717  // Emit code that loads |parameter_index|'th parameter from the stack to
718  // the register according to the CallInterfaceDescriptor definition.
719  // |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
720  // below the caller's sp (on ia32 it's at least return address).
721  template <class Descriptor>
722  void LoadParameterFromStack(
723      Register reg, typename Descriptor::ParameterIndices parameter_index,
724      int sp_to_ra_offset_in_words = 1) {
725    DCHECK(Descriptor::kPassLastArgsOnStack);
726    DCHECK_LT(parameter_index, Descriptor::kParameterCount);
727    DCHECK_LE(Descriptor::kParameterCount - Descriptor::kStackArgumentsCount,
728              parameter_index);
729    int offset = (Descriptor::kParameterCount - parameter_index - 1 +
730                  sp_to_ra_offset_in_words) *
731                 kPointerSize;
732    mov(reg, Operand(esp, offset));
733  }
734
735  // Emit code to discard a non-negative number of pointer-sized elements
736  // from the stack, clobbering only the esp register.
737  void Drop(int element_count);
738
739  void Call(Label* target) { call(target); }
740  void Call(Handle<Code> target, RelocInfo::Mode rmode,
741            TypeFeedbackId id = TypeFeedbackId::None()) {
742    call(target, rmode, id);
743  }
744  void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
745  void Push(Register src) { push(src); }
746  void Push(const Operand& src) { push(src); }
747  void Push(Immediate value) { push(value); }
748  void Pop(Register dst) { pop(dst); }
749  void Pop(const Operand& dst) { pop(dst); }
750  void PushReturnAddressFrom(Register src) { push(src); }
751  void PopReturnAddressTo(Register dst) { pop(dst); }
752
753  // Non-SSE2 instructions.
754  void Pextrd(Register dst, XMMRegister src, int8_t imm8);
755  void Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
756    Pinsrd(dst, Operand(src), imm8);
757  }
758  void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
759
760  void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
761  void Lzcnt(Register dst, const Operand& src);
762
763  void Tzcnt(Register dst, Register src) { Tzcnt(dst, Operand(src)); }
764  void Tzcnt(Register dst, const Operand& src);
765
766  void Popcnt(Register dst, Register src) { Popcnt(dst, Operand(src)); }
767  void Popcnt(Register dst, const Operand& src);
768
769  // Move if the registers are not identical.
770  void Move(Register target, Register source);
771
772  // Move a constant into a destination using the most efficient encoding.
773  void Move(Register dst, const Immediate& x);
774  void Move(const Operand& dst, const Immediate& x);
775
776  // Move an immediate into an XMM register.
777  void Move(XMMRegister dst, uint32_t src);
778  void Move(XMMRegister dst, uint64_t src);
779  void Move(XMMRegister dst, float src) { Move(dst, bit_cast<uint32_t>(src)); }
780  void Move(XMMRegister dst, double src) { Move(dst, bit_cast<uint64_t>(src)); }
781
782  void Move(Register dst, Handle<Object> handle) { LoadObject(dst, handle); }
783  void Move(Register dst, Smi* source) { Move(dst, Immediate(source)); }
784
785  // Push a handle value.
786  void Push(Handle<Object> handle) { push(Immediate(handle)); }
787  void Push(Smi* smi) { Push(Immediate(smi)); }
788
789  Handle<Object> CodeObject() {
790    DCHECK(!code_object_.is_null());
791    return code_object_;
792  }
793
794  // Emit code for a truncating division by a constant. The dividend register is
795  // unchanged, the result is in edx, and eax gets clobbered.
796  void TruncatingDiv(Register dividend, int32_t divisor);
797
798  // ---------------------------------------------------------------------------
799  // StatsCounter support
800
801  void SetCounter(StatsCounter* counter, int value);
802  void IncrementCounter(StatsCounter* counter, int value);
803  void DecrementCounter(StatsCounter* counter, int value);
804  void IncrementCounter(Condition cc, StatsCounter* counter, int value);
805  void DecrementCounter(Condition cc, StatsCounter* counter, int value);
806
807  // ---------------------------------------------------------------------------
808  // Debugging
809
810  // Calls Abort(msg) if the condition cc is not satisfied.
811  // Use --debug_code to enable.
812  void Assert(Condition cc, BailoutReason reason);
813
814  void AssertFastElements(Register elements);
815
816  // Like Assert(), but always enabled.
817  void Check(Condition cc, BailoutReason reason);
818
819  // Print a message to stdout and abort execution.
820  void Abort(BailoutReason reason);
821
822  // Check that the stack is aligned.
823  void CheckStackAlignment();
824
825  // Verify restrictions about code generated in stubs.
826  void set_generating_stub(bool value) { generating_stub_ = value; }
827  bool generating_stub() { return generating_stub_; }
828  void set_has_frame(bool value) { has_frame_ = value; }
829  bool has_frame() { return has_frame_; }
830  inline bool AllowThisStubCall(CodeStub* stub);
831
832  // ---------------------------------------------------------------------------
833  // String utilities.
834
835  // Checks if both objects are sequential one-byte strings, and jumps to label
836  // if either is not.
837  void JumpIfNotBothSequentialOneByteStrings(
838      Register object1, Register object2, Register scratch1, Register scratch2,
839      Label* on_not_flat_one_byte_strings);
840
841  // Checks if the given register or operand is a unique name
842  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
843                                       Label::Distance distance = Label::kFar) {
844    JumpIfNotUniqueNameInstanceType(Operand(reg), not_unique_name, distance);
845  }
846
847  void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
848                                       Label::Distance distance = Label::kFar);
849
850  void EmitSeqStringSetCharCheck(Register string, Register index,
851                                 Register value, uint32_t encoding_mask);
852
853  static int SafepointRegisterStackIndex(Register reg) {
854    return SafepointRegisterStackIndex(reg.code());
855  }
856
857  // Load the type feedback vector from a JavaScript frame.
858  void EmitLoadFeedbackVector(Register vector);
859
860  // Activation support.
861  void EnterFrame(StackFrame::Type type);
862  void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
863  void LeaveFrame(StackFrame::Type type);
864
865  void EnterBuiltinFrame(Register context, Register target, Register argc);
866  void LeaveBuiltinFrame(Register context, Register target, Register argc);
867
868  // Expects object in eax and returns map with validated enum cache
869  // in eax.  Assumes that any other register can be used as a scratch.
870  void CheckEnumCache(Label* call_runtime);
871
872  // AllocationMemento support. Arrays may have an associated
873  // AllocationMemento object that can be checked for in order to pretransition
874  // to another type.
875  // On entry, receiver_reg should point to the array object.
876  // scratch_reg gets clobbered.
877  // If allocation info is present, conditional code is set to equal.
878  void TestJSArrayForAllocationMemento(Register receiver_reg,
879                                       Register scratch_reg,
880                                       Label* no_memento_found);
881
882 private:
883  bool generating_stub_;
884  bool has_frame_;
885  // This handle will be patched with the code object on installation.
886  Handle<Object> code_object_;
887
888  // Helper functions for generating invokes.
889  void InvokePrologue(const ParameterCount& expected,
890                      const ParameterCount& actual, Label* done,
891                      bool* definitely_mismatches, InvokeFlag flag,
892                      Label::Distance done_distance,
893                      const CallWrapper& call_wrapper);
894
895  void EnterExitFramePrologue(StackFrame::Type frame_type);
896  void EnterExitFrameEpilogue(int argc, bool save_doubles);
897
898  void LeaveExitFrameEpilogue(bool restore_context);
899
900  // Allocation support helpers.
901  void LoadAllocationTopHelper(Register result, Register scratch,
902                               AllocationFlags flags);
903
904  void UpdateAllocationTopHelper(Register result_end, Register scratch,
905                                 AllocationFlags flags);
906
907  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
908  void InNewSpace(Register object, Register scratch, Condition cc,
909                  Label* condition_met,
910                  Label::Distance condition_met_distance = Label::kFar);
911
912  // Helper for finding the mark bits for an address.  Afterwards, the
913  // bitmap register points at the word with the mark bits and the mask
914  // the position of the first bit.  Uses ecx as scratch and leaves addr_reg
915  // unchanged.
916  inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
917                          Register mask_reg);
918
919  // Compute memory operands for safepoint stack slots.
920  Operand SafepointRegisterSlot(Register reg);
921  static int SafepointRegisterStackIndex(int reg_code);
922
923  // Needs access to SafepointRegisterStackIndex for compiled frame
924  // traversal.
925  friend class StandardFrame;
926};
927
928// The code patcher is used to patch (typically) small parts of code e.g. for
929// debugging and other types of instrumentation. When using the code patcher
930// the exact number of bytes specified must be emitted. Is not legal to emit
931// relocation information. If any of these constraints are violated it causes
932// an assertion.
933class CodePatcher {
934 public:
935  CodePatcher(Isolate* isolate, byte* address, int size);
936  ~CodePatcher();
937
938  // Macro assembler to emit code.
939  MacroAssembler* masm() { return &masm_; }
940
941 private:
942  byte* address_;        // The address of the code being patched.
943  int size_;             // Number of bytes of the expected patch size.
944  MacroAssembler masm_;  // Macro assembler used to generate the code.
945};
946
947// -----------------------------------------------------------------------------
948// Static helper functions.
949
950// Generate an Operand for loading a field from an object.
951inline Operand FieldOperand(Register object, int offset) {
952  return Operand(object, offset - kHeapObjectTag);
953}
954
955// Generate an Operand for loading an indexed field from an object.
956inline Operand FieldOperand(Register object, Register index, ScaleFactor scale,
957                            int offset) {
958  return Operand(object, index, scale, offset - kHeapObjectTag);
959}
960
961inline Operand FixedArrayElementOperand(Register array, Register index_as_smi,
962                                        int additional_offset = 0) {
963  int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize;
964  return FieldOperand(array, index_as_smi, times_half_pointer_size, offset);
965}
966
967inline Operand ContextOperand(Register context, int index) {
968  return Operand(context, Context::SlotOffset(index));
969}
970
971inline Operand ContextOperand(Register context, Register index) {
972  return Operand(context, index, times_pointer_size, Context::SlotOffset(0));
973}
974
975inline Operand NativeContextOperand() {
976  return ContextOperand(esi, Context::NATIVE_CONTEXT_INDEX);
977}
978
979#define ACCESS_MASM(masm) masm->
980
981}  // namespace internal
982}  // namespace v8
983
984#endif  // V8_IA32_MACRO_ASSEMBLER_IA32_H_
985