macro-assembler-arm.h revision 257744e915dfc84d6d07a6b2accf8402d9ffc708
1// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
29#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
30
31#include "assembler.h"
32#include "v8globals.h"
33
34namespace v8 {
35namespace internal {
36
37// ----------------------------------------------------------------------------
38// Static helper functions
39
40// Generate a MemOperand for loading a field from an object.
41static inline MemOperand FieldMemOperand(Register object, int offset) {
42  return MemOperand(object, offset - kHeapObjectTag);
43}
44
45
46static inline Operand SmiUntagOperand(Register object) {
47  return Operand(object, ASR, kSmiTagSize);
48}
49
50
51
52// Give alias names to registers
53const Register cp = { 8 };  // JavaScript context pointer
54const Register roots = { 10 };  // Roots array pointer.
55
56// Flags used for the AllocateInNewSpace functions.
57enum AllocationFlags {
58  // No special flags.
59  NO_ALLOCATION_FLAGS = 0,
60  // Return the pointer to the allocated already tagged as a heap object.
61  TAG_OBJECT = 1 << 0,
62  // The content of the result register already contains the allocation top in
63  // new space.
64  RESULT_CONTAINS_TOP = 1 << 1,
65  // Specify that the requested size of the space to allocate is specified in
66  // words instead of bytes.
67  SIZE_IN_WORDS = 1 << 2
68};
69
70
71// Flags used for the ObjectToDoubleVFPRegister function.
72enum ObjectToDoubleFlags {
73  // No special flags.
74  NO_OBJECT_TO_DOUBLE_FLAGS = 0,
75  // Object is known to be a non smi.
76  OBJECT_NOT_SMI = 1 << 0,
77  // Don't load NaNs or infinities, branch to the non number case instead.
78  AVOID_NANS_AND_INFINITIES = 1 << 1
79};
80
81
82// MacroAssembler implements a collection of frequently used macros.
83class MacroAssembler: public Assembler {
84 public:
85  // The isolate parameter can be NULL if the macro assembler should
86  // not use isolate-dependent functionality. In this case, it's the
87  // responsibility of the caller to never invoke such function on the
88  // macro assembler.
89  MacroAssembler(Isolate* isolate, void* buffer, int size);
90
91  // Jump, Call, and Ret pseudo instructions implementing inter-working.
92  void Jump(Register target, Condition cond = al);
93  void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
94  void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
95  static int CallSize(Register target, Condition cond = al);
96  void Call(Register target, Condition cond = al);
97  static int CallSize(byte* target, RelocInfo::Mode rmode, Condition cond = al);
98  void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
99  static int CallSize(Handle<Code> code,
100                      RelocInfo::Mode rmode,
101                      Condition cond = al);
102  void Call(Handle<Code> code,
103            RelocInfo::Mode rmode,
104            Condition cond = al);
105  void CallWithAstId(Handle<Code> code,
106            RelocInfo::Mode rmode,
107            unsigned ast_id,
108            Condition cond = al);
109  void Ret(Condition cond = al);
110
111  // Emit code to discard a non-negative number of pointer-sized elements
112  // from the stack, clobbering only the sp register.
113  void Drop(int count, Condition cond = al);
114
115  void Ret(int drop, Condition cond = al);
116
117  // Swap two registers.  If the scratch register is omitted then a slightly
118  // less efficient form using xor instead of mov is emitted.
119  void Swap(Register reg1,
120            Register reg2,
121            Register scratch = no_reg,
122            Condition cond = al);
123
124
125  void And(Register dst, Register src1, const Operand& src2,
126           Condition cond = al);
127  void Ubfx(Register dst, Register src, int lsb, int width,
128            Condition cond = al);
129  void Sbfx(Register dst, Register src, int lsb, int width,
130            Condition cond = al);
131  // The scratch register is not used for ARMv7.
132  // scratch can be the same register as src (in which case it is trashed), but
133  // not the same as dst.
134  void Bfi(Register dst,
135           Register src,
136           Register scratch,
137           int lsb,
138           int width,
139           Condition cond = al);
140  void Bfc(Register dst, int lsb, int width, Condition cond = al);
141  void Usat(Register dst, int satpos, const Operand& src,
142            Condition cond = al);
143
144  void Call(Label* target);
145
146  // Register move. May do nothing if the registers are identical.
147  void Move(Register dst, Handle<Object> value);
148  void Move(Register dst, Register src);
149  void Move(DoubleRegister dst, DoubleRegister src);
150
151  // Jumps to the label at the index given by the Smi in "index".
152  void SmiJumpTable(Register index, Vector<Label*> targets);
153  // Load an object from the root table.
154  void LoadRoot(Register destination,
155                Heap::RootListIndex index,
156                Condition cond = al);
157  // Store an object to the root table.
158  void StoreRoot(Register source,
159                 Heap::RootListIndex index,
160                 Condition cond = al);
161
162
163  // Check if object is in new space.
164  // scratch can be object itself, but it will be clobbered.
165  void InNewSpace(Register object,
166                  Register scratch,
167                  Condition cond,  // eq for new space, ne otherwise
168                  Label* branch);
169
170
171  // For the page containing |object| mark the region covering [address]
172  // dirty. The object address must be in the first 8K of an allocated page.
173  void RecordWriteHelper(Register object,
174                         Register address,
175                         Register scratch);
176
177  // For the page containing |object| mark the region covering
178  // [object+offset] dirty. The object address must be in the first 8K
179  // of an allocated page.  The 'scratch' registers are used in the
180  // implementation and all 3 registers are clobbered by the
181  // operation, as well as the ip register. RecordWrite updates the
182  // write barrier even when storing smis.
183  void RecordWrite(Register object,
184                   Operand offset,
185                   Register scratch0,
186                   Register scratch1);
187
188  // For the page containing |object| mark the region covering
189  // [address] dirty. The object address must be in the first 8K of an
190  // allocated page.  All 3 registers are clobbered by the operation,
191  // as well as the ip register. RecordWrite updates the write barrier
192  // even when storing smis.
193  void RecordWrite(Register object,
194                   Register address,
195                   Register scratch);
196
197  // Push two registers.  Pushes leftmost register first (to highest address).
198  void Push(Register src1, Register src2, Condition cond = al) {
199    ASSERT(!src1.is(src2));
200    if (src1.code() > src2.code()) {
201      stm(db_w, sp, src1.bit() | src2.bit(), cond);
202    } else {
203      str(src1, MemOperand(sp, 4, NegPreIndex), cond);
204      str(src2, MemOperand(sp, 4, NegPreIndex), cond);
205    }
206  }
207
208  // Push three registers.  Pushes leftmost register first (to highest address).
209  void Push(Register src1, Register src2, Register src3, Condition cond = al) {
210    ASSERT(!src1.is(src2));
211    ASSERT(!src2.is(src3));
212    ASSERT(!src1.is(src3));
213    if (src1.code() > src2.code()) {
214      if (src2.code() > src3.code()) {
215        stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
216      } else {
217        stm(db_w, sp, src1.bit() | src2.bit(), cond);
218        str(src3, MemOperand(sp, 4, NegPreIndex), cond);
219      }
220    } else {
221      str(src1, MemOperand(sp, 4, NegPreIndex), cond);
222      Push(src2, src3, cond);
223    }
224  }
225
226  // Push four registers.  Pushes leftmost register first (to highest address).
227  void Push(Register src1, Register src2,
228            Register src3, Register src4, Condition cond = al) {
229    ASSERT(!src1.is(src2));
230    ASSERT(!src2.is(src3));
231    ASSERT(!src1.is(src3));
232    ASSERT(!src1.is(src4));
233    ASSERT(!src2.is(src4));
234    ASSERT(!src3.is(src4));
235    if (src1.code() > src2.code()) {
236      if (src2.code() > src3.code()) {
237        if (src3.code() > src4.code()) {
238          stm(db_w,
239              sp,
240              src1.bit() | src2.bit() | src3.bit() | src4.bit(),
241              cond);
242        } else {
243          stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
244          str(src4, MemOperand(sp, 4, NegPreIndex), cond);
245        }
246      } else {
247        stm(db_w, sp, src1.bit() | src2.bit(), cond);
248        Push(src3, src4, cond);
249      }
250    } else {
251      str(src1, MemOperand(sp, 4, NegPreIndex), cond);
252      Push(src2, src3, src4, cond);
253    }
254  }
255
256  // Pop two registers. Pops rightmost register first (from lower address).
257  void Pop(Register src1, Register src2, Condition cond = al) {
258    ASSERT(!src1.is(src2));
259    if (src1.code() > src2.code()) {
260      ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
261    } else {
262      ldr(src2, MemOperand(sp, 4, PostIndex), cond);
263      ldr(src1, MemOperand(sp, 4, PostIndex), cond);
264    }
265  }
266
267  // Push and pop the registers that can hold pointers, as defined by the
268  // RegList constant kSafepointSavedRegisters.
269  void PushSafepointRegisters();
270  void PopSafepointRegisters();
271  void PushSafepointRegistersAndDoubles();
272  void PopSafepointRegistersAndDoubles();
273  // Store value in register src in the safepoint stack slot for
274  // register dst.
275  void StoreToSafepointRegisterSlot(Register src, Register dst);
276  void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
277  // Load the value of the src register from its safepoint stack slot
278  // into register dst.
279  void LoadFromSafepointRegisterSlot(Register dst, Register src);
280
281  // Load two consecutive registers with two consecutive memory locations.
282  void Ldrd(Register dst1,
283            Register dst2,
284            const MemOperand& src,
285            Condition cond = al);
286
287  // Store two consecutive registers to two consecutive memory locations.
288  void Strd(Register src1,
289            Register src2,
290            const MemOperand& dst,
291            Condition cond = al);
292
293  // Clear specified FPSCR bits.
294  void ClearFPSCRBits(const uint32_t bits_to_clear,
295                      const Register scratch,
296                      const Condition cond = al);
297
298  // Compare double values and move the result to the normal condition flags.
299  void VFPCompareAndSetFlags(const DwVfpRegister src1,
300                             const DwVfpRegister src2,
301                             const Condition cond = al);
302  void VFPCompareAndSetFlags(const DwVfpRegister src1,
303                             const double src2,
304                             const Condition cond = al);
305
306  // Compare double values and then load the fpscr flags to a register.
307  void VFPCompareAndLoadFlags(const DwVfpRegister src1,
308                              const DwVfpRegister src2,
309                              const Register fpscr_flags,
310                              const Condition cond = al);
311  void VFPCompareAndLoadFlags(const DwVfpRegister src1,
312                              const double src2,
313                              const Register fpscr_flags,
314                              const Condition cond = al);
315
316
317  // ---------------------------------------------------------------------------
318  // Activation frames
319
320  void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
321  void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
322
323  void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
324  void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
325
326  // Enter exit frame.
327  // stack_space - extra stack space, used for alignment before call to C.
328  void EnterExitFrame(bool save_doubles, int stack_space = 0);
329
330  // Leave the current exit frame. Expects the return value in r0.
331  // Expect the number of values, pushed prior to the exit frame, to
332  // remove in a register (or no_reg, if there is nothing to remove).
333  void LeaveExitFrame(bool save_doubles, Register argument_count);
334
335  // Get the actual activation frame alignment for target environment.
336  static int ActivationFrameAlignment();
337
338  void LoadContext(Register dst, int context_chain_length);
339
340  void LoadGlobalFunction(int index, Register function);
341
342  // Load the initial map from the global function. The registers
343  // function and map can be the same, function is then overwritten.
344  void LoadGlobalFunctionInitialMap(Register function,
345                                    Register map,
346                                    Register scratch);
347
348  // ---------------------------------------------------------------------------
349  // JavaScript invokes
350
351  // Setup call kind marking in ecx. The method takes ecx as an
352  // explicit first parameter to make the code more readable at the
353  // call sites.
354  void SetCallKind(Register dst, CallKind kind);
355
356  // Invoke the JavaScript function code by either calling or jumping.
357  void InvokeCode(Register code,
358                  const ParameterCount& expected,
359                  const ParameterCount& actual,
360                  InvokeFlag flag,
361                  const CallWrapper& call_wrapper,
362                  CallKind call_kind);
363
364  void InvokeCode(Handle<Code> code,
365                  const ParameterCount& expected,
366                  const ParameterCount& actual,
367                  RelocInfo::Mode rmode,
368                  InvokeFlag flag,
369                  CallKind call_kind);
370
371  // Invoke the JavaScript function in the given register. Changes the
372  // current context to the context in the function before invoking.
373  void InvokeFunction(Register function,
374                      const ParameterCount& actual,
375                      InvokeFlag flag,
376                      const CallWrapper& call_wrapper,
377                      CallKind call_kind);
378
379  void InvokeFunction(JSFunction* function,
380                      const ParameterCount& actual,
381                      InvokeFlag flag,
382                      CallKind call_kind);
383
384  void IsObjectJSObjectType(Register heap_object,
385                            Register map,
386                            Register scratch,
387                            Label* fail);
388
389  void IsInstanceJSObjectType(Register map,
390                              Register scratch,
391                              Label* fail);
392
393  void IsObjectJSStringType(Register object,
394                            Register scratch,
395                            Label* fail);
396
397#ifdef ENABLE_DEBUGGER_SUPPORT
398  // ---------------------------------------------------------------------------
399  // Debugger Support
400
401  void DebugBreak();
402#endif
403
404  // ---------------------------------------------------------------------------
405  // Exception handling
406
407  // Push a new try handler and link into try handler chain.
408  // The return address must be passed in register lr.
409  // On exit, r0 contains TOS (code slot).
410  void PushTryHandler(CodeLocation try_location, HandlerType type);
411
412  // Unlink the stack handler on top of the stack from the try handler chain.
413  // Must preserve the result register.
414  void PopTryHandler();
415
416  // Passes thrown value (in r0) to the handler of top of the try handler chain.
417  void Throw(Register value);
418
419  // Propagates an uncatchable exception to the top of the current JS stack's
420  // handler chain.
421  void ThrowUncatchable(UncatchableExceptionType type, Register value);
422
423  // ---------------------------------------------------------------------------
424  // Inline caching support
425
426  // Generate code for checking access rights - used for security checks
427  // on access to global objects across environments. The holder register
428  // is left untouched, whereas both scratch registers are clobbered.
429  void CheckAccessGlobalProxy(Register holder_reg,
430                              Register scratch,
431                              Label* miss);
432
433  inline void MarkCode(NopMarkerTypes type) {
434    nop(type);
435  }
436
437  // Check if the given instruction is a 'type' marker.
438  // ie. check if is is a mov r<type>, r<type> (referenced as nop(type))
439  // These instructions are generated to mark special location in the code,
440  // like some special IC code.
441  static inline bool IsMarkedCode(Instr instr, int type) {
442    ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
443    return IsNop(instr, type);
444  }
445
446
447  static inline int GetCodeMarker(Instr instr) {
448    int dst_reg_offset = 12;
449    int dst_mask = 0xf << dst_reg_offset;
450    int src_mask = 0xf;
451    int dst_reg = (instr & dst_mask) >> dst_reg_offset;
452    int src_reg = instr & src_mask;
453    uint32_t non_register_mask = ~(dst_mask | src_mask);
454    uint32_t mov_mask = al | 13 << 21;
455
456    // Return <n> if we have a mov rn rn, else return -1.
457    int type = ((instr & non_register_mask) == mov_mask) &&
458               (dst_reg == src_reg) &&
459               (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
460                   ? src_reg
461                   : -1;
462    ASSERT((type == -1) ||
463           ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
464    return type;
465  }
466
467
468  // ---------------------------------------------------------------------------
469  // Allocation support
470
471  // Allocate an object in new space. The object_size is specified
472  // either in bytes or in words if the allocation flag SIZE_IN_WORDS
473  // is passed. If the new space is exhausted control continues at the
474  // gc_required label. The allocated object is returned in result. If
475  // the flag tag_allocated_object is true the result is tagged as as
476  // a heap object. All registers are clobbered also when control
477  // continues at the gc_required label.
478  void AllocateInNewSpace(int object_size,
479                          Register result,
480                          Register scratch1,
481                          Register scratch2,
482                          Label* gc_required,
483                          AllocationFlags flags);
484  void AllocateInNewSpace(Register object_size,
485                          Register result,
486                          Register scratch1,
487                          Register scratch2,
488                          Label* gc_required,
489                          AllocationFlags flags);
490
491  // Undo allocation in new space. The object passed and objects allocated after
492  // it will no longer be allocated. The caller must make sure that no pointers
493  // are left to the object(s) no longer allocated as they would be invalid when
494  // allocation is undone.
495  void UndoAllocationInNewSpace(Register object, Register scratch);
496
497
498  void AllocateTwoByteString(Register result,
499                             Register length,
500                             Register scratch1,
501                             Register scratch2,
502                             Register scratch3,
503                             Label* gc_required);
504  void AllocateAsciiString(Register result,
505                           Register length,
506                           Register scratch1,
507                           Register scratch2,
508                           Register scratch3,
509                           Label* gc_required);
510  void AllocateTwoByteConsString(Register result,
511                                 Register length,
512                                 Register scratch1,
513                                 Register scratch2,
514                                 Label* gc_required);
515  void AllocateAsciiConsString(Register result,
516                               Register length,
517                               Register scratch1,
518                               Register scratch2,
519                               Label* gc_required);
520
521  // Allocates a heap number or jumps to the gc_required label if the young
522  // space is full and a scavenge is needed. All registers are clobbered also
523  // when control continues at the gc_required label.
524  void AllocateHeapNumber(Register result,
525                          Register scratch1,
526                          Register scratch2,
527                          Register heap_number_map,
528                          Label* gc_required);
529  void AllocateHeapNumberWithValue(Register result,
530                                   DwVfpRegister value,
531                                   Register scratch1,
532                                   Register scratch2,
533                                   Register heap_number_map,
534                                   Label* gc_required);
535
536  // Copies a fixed number of fields of heap objects from src to dst.
537  void CopyFields(Register dst, Register src, RegList temps, int field_count);
538
539  // Copies a number of bytes from src to dst. All registers are clobbered. On
540  // exit src and dst will point to the place just after where the last byte was
541  // read or written and length will be zero.
542  void CopyBytes(Register src,
543                 Register dst,
544                 Register length,
545                 Register scratch);
546
547  // ---------------------------------------------------------------------------
548  // Support functions.
549
550  // Try to get function prototype of a function and puts the value in
551  // the result register. Checks that the function really is a
552  // function and jumps to the miss label if the fast checks fail. The
553  // function register will be untouched; the other registers may be
554  // clobbered.
555  void TryGetFunctionPrototype(Register function,
556                               Register result,
557                               Register scratch,
558                               Label* miss);
559
560  // Compare object type for heap object.  heap_object contains a non-Smi
561  // whose object type should be compared with the given type.  This both
562  // sets the flags and leaves the object type in the type_reg register.
563  // It leaves the map in the map register (unless the type_reg and map register
564  // are the same register).  It leaves the heap object in the heap_object
565  // register unless the heap_object register is the same register as one of the
566  // other registers.
567  void CompareObjectType(Register heap_object,
568                         Register map,
569                         Register type_reg,
570                         InstanceType type);
571
572  // Compare instance type in a map.  map contains a valid map object whose
573  // object type should be compared with the given type.  This both
574  // sets the flags and leaves the object type in the type_reg register.  It
575  // leaves the heap object in the heap_object register unless the heap_object
576  // register is the same register as type_reg.
577  void CompareInstanceType(Register map,
578                           Register type_reg,
579                           InstanceType type);
580
581
582  // Check if the map of an object is equal to a specified map (either
583  // given directly or as an index into the root list) and branch to
584  // label if not. Skip the smi check if not required (object is known
585  // to be a heap object)
586  void CheckMap(Register obj,
587                Register scratch,
588                Handle<Map> map,
589                Label* fail,
590                SmiCheckType smi_check_type);
591
592
593  void CheckMap(Register obj,
594                Register scratch,
595                Heap::RootListIndex index,
596                Label* fail,
597                SmiCheckType smi_check_type);
598
599
600  // Check if the map of an object is equal to a specified map and branch to a
601  // specified target if equal. Skip the smi check if not required (object is
602  // known to be a heap object)
603  void DispatchMap(Register obj,
604                   Register scratch,
605                   Handle<Map> map,
606                   Handle<Code> success,
607                   SmiCheckType smi_check_type);
608
609
610  // Compare the object in a register to a value from the root list.
611  // Uses the ip register as scratch.
612  void CompareRoot(Register obj, Heap::RootListIndex index);
613
614
615  // Load and check the instance type of an object for being a string.
616  // Loads the type into the second argument register.
617  // Returns a condition that will be enabled if the object was a string.
618  Condition IsObjectStringType(Register obj,
619                               Register type) {
620    ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset));
621    ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
622    tst(type, Operand(kIsNotStringMask));
623    ASSERT_EQ(0, kStringTag);
624    return eq;
625  }
626
627
628  // Generates code for reporting that an illegal operation has
629  // occurred.
630  void IllegalOperation(int num_arguments);
631
632  // Picks out an array index from the hash field.
633  // Register use:
634  //   hash - holds the index's hash. Clobbered.
635  //   index - holds the overwritten index on exit.
636  void IndexFromHash(Register hash, Register index);
637
638  // Get the number of least significant bits from a register
639  void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
640  void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
641
642  // Uses VFP instructions to Convert a Smi to a double.
643  void IntegerToDoubleConversionWithVFP3(Register inReg,
644                                         Register outHighReg,
645                                         Register outLowReg);
646
647  // Load the value of a number object into a VFP double register. If the object
648  // is not a number a jump to the label not_number is performed and the VFP
649  // double register is unchanged.
650  void ObjectToDoubleVFPRegister(
651      Register object,
652      DwVfpRegister value,
653      Register scratch1,
654      Register scratch2,
655      Register heap_number_map,
656      SwVfpRegister scratch3,
657      Label* not_number,
658      ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
659
660  // Load the value of a smi object into a VFP double register. The register
661  // scratch1 can be the same register as smi in which case smi will hold the
662  // untagged value afterwards.
663  void SmiToDoubleVFPRegister(Register smi,
664                              DwVfpRegister value,
665                              Register scratch1,
666                              SwVfpRegister scratch2);
667
668  // Convert the HeapNumber pointed to by source to a 32bits signed integer
669  // dest. If the HeapNumber does not fit into a 32bits signed integer branch
670  // to not_int32 label. If VFP3 is available double_scratch is used but not
671  // scratch2.
672  void ConvertToInt32(Register source,
673                      Register dest,
674                      Register scratch,
675                      Register scratch2,
676                      DwVfpRegister double_scratch,
677                      Label *not_int32);
678
679  // Truncates a double using a specific rounding mode.
680  // Clears the z flag (ne condition) if an overflow occurs.
681  // If exact_conversion is true, the z flag is also cleared if the conversion
682  // was inexact, ie. if the double value could not be converted exactly
683  // to a 32bit integer.
684  void EmitVFPTruncate(VFPRoundingMode rounding_mode,
685                       SwVfpRegister result,
686                       DwVfpRegister double_input,
687                       Register scratch1,
688                       Register scratch2,
689                       CheckForInexactConversion check
690                           = kDontCheckForInexactConversion);
691
692  // Helper for EmitECMATruncate.
693  // This will truncate a floating-point value outside of the singed 32bit
694  // integer range to a 32bit signed integer.
695  // Expects the double value loaded in input_high and input_low.
696  // Exits with the answer in 'result'.
697  // Note that this code does not work for values in the 32bit range!
698  void EmitOutOfInt32RangeTruncate(Register result,
699                                   Register input_high,
700                                   Register input_low,
701                                   Register scratch);
702
703  // Performs a truncating conversion of a floating point number as used by
704  // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
705  // Exits with 'result' holding the answer and all other registers clobbered.
706  void EmitECMATruncate(Register result,
707                        DwVfpRegister double_input,
708                        SwVfpRegister single_scratch,
709                        Register scratch,
710                        Register scratch2,
711                        Register scratch3);
712
713  // Count leading zeros in a 32 bit word.  On ARM5 and later it uses the clz
714  // instruction.  On pre-ARM5 hardware this routine gives the wrong answer
715  // for 0 (31 instead of 32).  Source and scratch can be the same in which case
716  // the source is clobbered.  Source and zeros can also be the same in which
717  // case scratch should be a different register.
718  void CountLeadingZeros(Register zeros,
719                         Register source,
720                         Register scratch);
721
722  // ---------------------------------------------------------------------------
723  // Runtime calls
724
725  // Call a code stub.
726  void CallStub(CodeStub* stub, Condition cond = al);
727
728  // Call a code stub and return the code object called.  Try to generate
729  // the code if necessary.  Do not perform a GC but instead return a retry
730  // after GC failure.
731  MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub, Condition cond = al);
732
733  // Call a code stub.
734  void TailCallStub(CodeStub* stub, Condition cond = al);
735
736  // Tail call a code stub (jump) and return the code object called.  Try to
737  // generate the code if necessary.  Do not perform a GC but instead return
738  // a retry after GC failure.
739  MUST_USE_RESULT MaybeObject* TryTailCallStub(CodeStub* stub,
740                                               Condition cond = al);
741
742  // Call a runtime routine.
743  void CallRuntime(const Runtime::Function* f, int num_arguments);
744  void CallRuntimeSaveDoubles(Runtime::FunctionId id);
745
746  // Convenience function: Same as above, but takes the fid instead.
747  void CallRuntime(Runtime::FunctionId fid, int num_arguments);
748
749  // Convenience function: call an external reference.
750  void CallExternalReference(const ExternalReference& ext,
751                             int num_arguments);
752
753  // Tail call of a runtime routine (jump).
754  // Like JumpToExternalReference, but also takes care of passing the number
755  // of parameters.
756  void TailCallExternalReference(const ExternalReference& ext,
757                                 int num_arguments,
758                                 int result_size);
759
760  // Tail call of a runtime routine (jump). Try to generate the code if
761  // necessary. Do not perform a GC but instead return a retry after GC
762  // failure.
763  MUST_USE_RESULT MaybeObject* TryTailCallExternalReference(
764      const ExternalReference& ext, int num_arguments, int result_size);
765
766  // Convenience function: tail call a runtime routine (jump).
767  void TailCallRuntime(Runtime::FunctionId fid,
768                       int num_arguments,
769                       int result_size);
770
771  int CalculateStackPassedWords(int num_reg_arguments,
772                                int num_double_arguments);
773
774  // Before calling a C-function from generated code, align arguments on stack.
775  // After aligning the frame, non-register arguments must be stored in
776  // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
777  // are word sized. If double arguments are used, this function assumes that
778  // all double arguments are stored before core registers; otherwise the
779  // correct alignment of the double values is not guaranteed.
780  // Some compilers/platforms require the stack to be aligned when calling
781  // C++ code.
782  // Needs a scratch register to do some arithmetic. This register will be
783  // trashed.
784  void PrepareCallCFunction(int num_reg_arguments,
785                            int num_double_registers,
786                            Register scratch);
787  void PrepareCallCFunction(int num_reg_arguments,
788                            Register scratch);
789
790  // There are two ways of passing double arguments on ARM, depending on
791  // whether soft or hard floating point ABI is used. These functions
792  // abstract parameter passing for the three different ways we call
793  // C functions from generated code.
794  void SetCallCDoubleArguments(DoubleRegister dreg);
795  void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
796  void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
797
798  // Calls a C function and cleans up the space for arguments allocated
799  // by PrepareCallCFunction. The called function is not allowed to trigger a
800  // garbage collection, since that might move the code and invalidate the
801  // return address (unless this is somehow accounted for by the called
802  // function).
803  void CallCFunction(ExternalReference function, int num_arguments);
804  void CallCFunction(Register function, Register scratch, int num_arguments);
805  void CallCFunction(ExternalReference function,
806                     int num_reg_arguments,
807                     int num_double_arguments);
808  void CallCFunction(Register function, Register scratch,
809                     int num_reg_arguments,
810                     int num_double_arguments);
811
812  void GetCFunctionDoubleResult(const DoubleRegister dst);
813
814  // Calls an API function. Allocates HandleScope, extracts returned value
815  // from handle and propagates exceptions. Restores context.
816  // stack_space - space to be unwound on exit (includes the call js
817  // arguments space and the additional space allocated for the fast call).
818  MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
819                                           int stack_space);
820
821  // Jump to a runtime routine.
822  void JumpToExternalReference(const ExternalReference& builtin);
823
824  MaybeObject* TryJumpToExternalReference(const ExternalReference& ext);
825
826  // Invoke specified builtin JavaScript function. Adds an entry to
827  // the unresolved list if the name does not resolve.
828  void InvokeBuiltin(Builtins::JavaScript id,
829                     InvokeFlag flag,
830                     const CallWrapper& call_wrapper = NullCallWrapper());
831
832  // Store the code object for the given builtin in the target register and
833  // setup the function in r1.
834  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
835
836  // Store the function for the given builtin in the target register.
837  void GetBuiltinFunction(Register target, Builtins::JavaScript id);
838
839  Handle<Object> CodeObject() {
840    ASSERT(!code_object_.is_null());
841    return code_object_;
842  }
843
844
845  // ---------------------------------------------------------------------------
846  // StatsCounter support
847
848  void SetCounter(StatsCounter* counter, int value,
849                  Register scratch1, Register scratch2);
850  void IncrementCounter(StatsCounter* counter, int value,
851                        Register scratch1, Register scratch2);
852  void DecrementCounter(StatsCounter* counter, int value,
853                        Register scratch1, Register scratch2);
854
855
856  // ---------------------------------------------------------------------------
857  // Debugging
858
859  // Calls Abort(msg) if the condition cond is not satisfied.
860  // Use --debug_code to enable.
861  void Assert(Condition cond, const char* msg);
862  void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
863  void AssertFastElements(Register elements);
864
865  // Like Assert(), but always enabled.
866  void Check(Condition cond, const char* msg);
867
868  // Print a message to stdout and abort execution.
869  void Abort(const char* msg);
870
871  // Verify restrictions about code generated in stubs.
872  void set_generating_stub(bool value) { generating_stub_ = value; }
873  bool generating_stub() { return generating_stub_; }
874  void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
875  bool allow_stub_calls() { return allow_stub_calls_; }
876
877  // EABI variant for double arguments in use.
878  bool use_eabi_hardfloat() {
879#if USE_EABI_HARDFLOAT
880    return true;
881#else
882    return false;
883#endif
884  }
885
886  // ---------------------------------------------------------------------------
887  // Number utilities
888
889  // Check whether the value of reg is a power of two and not zero. If not
890  // control continues at the label not_power_of_two. If reg is a power of two
891  // the register scratch contains the value of (reg - 1) when control falls
892  // through.
893  void JumpIfNotPowerOfTwoOrZero(Register reg,
894                                 Register scratch,
895                                 Label* not_power_of_two_or_zero);
896  // Check whether the value of reg is a power of two and not zero.
897  // Control falls through if it is, with scratch containing the mask
898  // value (reg - 1).
899  // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
900  // zero or negative, or jumps to the 'not_power_of_two' label if the value is
901  // strictly positive but not a power of two.
902  void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
903                                       Register scratch,
904                                       Label* zero_and_neg,
905                                       Label* not_power_of_two);
906
907  // ---------------------------------------------------------------------------
908  // Smi utilities
909
910  void SmiTag(Register reg, SBit s = LeaveCC) {
911    add(reg, reg, Operand(reg), s);
912  }
913  void SmiTag(Register dst, Register src, SBit s = LeaveCC) {
914    add(dst, src, Operand(src), s);
915  }
916
917  // Try to convert int32 to smi. If the value is to large, preserve
918  // the original value and jump to not_a_smi. Destroys scratch and
919  // sets flags.
920  void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
921    mov(scratch, reg);
922    SmiTag(scratch, SetCC);
923    b(vs, not_a_smi);
924    mov(reg, scratch);
925  }
926
927  void SmiUntag(Register reg, SBit s = LeaveCC) {
928    mov(reg, Operand(reg, ASR, kSmiTagSize), s);
929  }
930  void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
931    mov(dst, Operand(src, ASR, kSmiTagSize), s);
932  }
933
934  // Jump the register contains a smi.
935  inline void JumpIfSmi(Register value, Label* smi_label) {
936    tst(value, Operand(kSmiTagMask));
937    b(eq, smi_label);
938  }
939  // Jump if either of the registers contain a non-smi.
940  inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
941    tst(value, Operand(kSmiTagMask));
942    b(ne, not_smi_label);
943  }
944  // Jump if either of the registers contain a non-smi.
945  void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
946  // Jump if either of the registers contain a smi.
947  void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
948
949  // Abort execution if argument is a smi. Used in debug code.
950  void AbortIfSmi(Register object);
951  void AbortIfNotSmi(Register object);
952
953  // Abort execution if argument is a string. Used in debug code.
954  void AbortIfNotString(Register object);
955
956  // Abort execution if argument is not the root value with the given index.
957  void AbortIfNotRootValue(Register src,
958                           Heap::RootListIndex root_value_index,
959                           const char* message);
960
961  // ---------------------------------------------------------------------------
962  // HeapNumber utilities
963
964  void JumpIfNotHeapNumber(Register object,
965                           Register heap_number_map,
966                           Register scratch,
967                           Label* on_not_heap_number);
968
969  // ---------------------------------------------------------------------------
970  // String utilities
971
972  // Checks if both objects are sequential ASCII strings and jumps to label
973  // if either is not. Assumes that neither object is a smi.
974  void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
975                                                  Register object2,
976                                                  Register scratch1,
977                                                  Register scratch2,
978                                                  Label* failure);
979
980  // Checks if both objects are sequential ASCII strings and jumps to label
981  // if either is not.
982  void JumpIfNotBothSequentialAsciiStrings(Register first,
983                                           Register second,
984                                           Register scratch1,
985                                           Register scratch2,
986                                           Label* not_flat_ascii_strings);
987
988  // Checks if both instance types are sequential ASCII strings and jumps to
989  // label if either is not.
990  void JumpIfBothInstanceTypesAreNotSequentialAscii(
991      Register first_object_instance_type,
992      Register second_object_instance_type,
993      Register scratch1,
994      Register scratch2,
995      Label* failure);
996
997  // Check if instance type is sequential ASCII string and jump to label if
998  // it is not.
999  void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
1000                                              Register scratch,
1001                                              Label* failure);
1002
1003
1004  // ---------------------------------------------------------------------------
1005  // Patching helpers.
1006
1007  // Get the location of a relocated constant (its address in the constant pool)
1008  // from its load site.
1009  void GetRelocatedValueLocation(Register ldr_location,
1010                                 Register result);
1011
1012
1013  void ClampUint8(Register output_reg, Register input_reg);
1014
1015  void ClampDoubleToUint8(Register result_reg,
1016                          DoubleRegister input_reg,
1017                          DoubleRegister temp_double_reg);
1018
1019
1020  void LoadInstanceDescriptors(Register map, Register descriptors);
1021
1022 private:
1023  void CallCFunctionHelper(Register function,
1024                           ExternalReference function_reference,
1025                           Register scratch,
1026                           int num_reg_arguments,
1027                           int num_double_arguments);
1028
1029  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
1030  static int CallSize(intptr_t target,
1031                      RelocInfo::Mode rmode,
1032                      Condition cond = al);
1033  void Call(intptr_t target,
1034            RelocInfo::Mode rmode,
1035            Condition cond = al);
1036
1037  // Helper functions for generating invokes.
1038  void InvokePrologue(const ParameterCount& expected,
1039                      const ParameterCount& actual,
1040                      Handle<Code> code_constant,
1041                      Register code_reg,
1042                      Label* done,
1043                      InvokeFlag flag,
1044                      const CallWrapper& call_wrapper,
1045                      CallKind call_kind);
1046
1047  // Activation support.
1048  void EnterFrame(StackFrame::Type type);
1049  void LeaveFrame(StackFrame::Type type);
1050
1051  void InitializeNewString(Register string,
1052                           Register length,
1053                           Heap::RootListIndex map_index,
1054                           Register scratch1,
1055                           Register scratch2);
1056
1057  // Compute memory operands for safepoint stack slots.
1058  static int SafepointRegisterStackIndex(int reg_code);
1059  MemOperand SafepointRegisterSlot(Register reg);
1060  MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1061
1062  bool generating_stub_;
1063  bool allow_stub_calls_;
1064  // This handle will be patched with the code object on installation.
1065  Handle<Object> code_object_;
1066
1067  // Needs access to SafepointRegisterStackIndex for optimized frame
1068  // traversal.
1069  friend class OptimizedFrame;
1070};
1071
1072
1073#ifdef ENABLE_DEBUGGER_SUPPORT
1074// The code patcher is used to patch (typically) small parts of code e.g. for
1075// debugging and other types of instrumentation. When using the code patcher
1076// the exact number of bytes specified must be emitted. It is not legal to emit
1077// relocation information. If any of these constraints are violated it causes
1078// an assertion to fail.
1079class CodePatcher {
1080 public:
1081  CodePatcher(byte* address, int instructions);
1082  virtual ~CodePatcher();
1083
1084  // Macro assembler to emit code.
1085  MacroAssembler* masm() { return &masm_; }
1086
1087  // Emit an instruction directly.
1088  void Emit(Instr instr);
1089
1090  // Emit an address directly.
1091  void Emit(Address addr);
1092
1093  // Emit the condition part of an instruction leaving the rest of the current
1094  // instruction unchanged.
1095  void EmitCondition(Condition cond);
1096
1097 private:
1098  byte* address_;  // The address of the code being patched.
1099  int instructions_;  // Number of instructions of the expected patch size.
1100  int size_;  // Number of bytes of the expected patch size.
1101  MacroAssembler masm_;  // Macro assembler used to generate the code.
1102};
1103#endif  // ENABLE_DEBUGGER_SUPPORT
1104
1105
1106// -----------------------------------------------------------------------------
1107// Static helper functions.
1108
1109static MemOperand ContextOperand(Register context, int index) {
1110  return MemOperand(context, Context::SlotOffset(index));
1111}
1112
1113
1114static inline MemOperand GlobalObjectOperand()  {
1115  return ContextOperand(cp, Context::GLOBAL_INDEX);
1116}
1117
1118
1119#ifdef GENERATED_CODE_COVERAGE
1120#define CODE_COVERAGE_STRINGIFY(x) #x
1121#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1122#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1123#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1124#else
1125#define ACCESS_MASM(masm) masm->
1126#endif
1127
1128
1129} }  // namespace v8::internal
1130
1131#endif  // V8_ARM_MACRO_ASSEMBLER_ARM_H_
1132