1// Copyright 2013, ARM Limited
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are met:
6//
7//   * Redistributions of source code must retain the above copyright notice,
8//     this list of conditions and the following disclaimer.
9//   * Redistributions in binary form must reproduce the above copyright notice,
10//     this list of conditions and the following disclaimer in the documentation
11//     and/or other materials provided with the distribution.
12//   * Neither the name of ARM Limited nor the names of its contributors may be
13//     used to endorse or promote products derived from this software without
14//     specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#ifndef VIXL_A64_MACRO_ASSEMBLER_A64_H_
28#define VIXL_A64_MACRO_ASSEMBLER_A64_H_
29
30#include "globals-vixl.h"
31#include "a64/assembler-a64.h"
32#include "a64/debugger-a64.h"
33
34
35#define LS_MACRO_LIST(V)                                      \
36  V(Ldrb, Register&, rt, LDRB_w)                              \
37  V(Strb, Register&, rt, STRB_w)                              \
38  V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w)  \
39  V(Ldrh, Register&, rt, LDRH_w)                              \
40  V(Strh, Register&, rt, STRH_w)                              \
41  V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w)  \
42  V(Ldr, CPURegister&, rt, LoadOpFor(rt))                     \
43  V(Str, CPURegister&, rt, StoreOpFor(rt))                    \
44  V(Ldrsw, Register&, rt, LDRSW_x)
45
46namespace vixl {
47
48enum BranchType {
49  // Copies of architectural conditions.
50  // The associated conditions can be used in place of those, the code will
51  // take care of reinterpreting them with the correct type.
52  integer_eq = eq,
53  integer_ne = ne,
54  integer_hs = hs,
55  integer_lo = lo,
56  integer_mi = mi,
57  integer_pl = pl,
58  integer_vs = vs,
59  integer_vc = vc,
60  integer_hi = hi,
61  integer_ls = ls,
62  integer_ge = ge,
63  integer_lt = lt,
64  integer_gt = gt,
65  integer_le = le,
66  integer_al = al,
67  integer_nv = nv,
68
69  // These two are *different* from the architectural codes al and nv.
70  // 'always' is used to generate unconditional branches.
71  // 'never' is used to not generate a branch (generally as the inverse
72  // branch type of 'always).
73  always, never,
74  // cbz and cbnz
75  reg_zero, reg_not_zero,
76  // tbz and tbnz
77  reg_bit_clear, reg_bit_set,
78
79  // Aliases.
80  kBranchTypeFirstCondition = eq,
81  kBranchTypeLastCondition = nv,
82  kBranchTypeFirstUsingReg = reg_zero,
83  kBranchTypeFirstUsingBit = reg_bit_clear
84};
85
86
87enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
88
89class MacroAssembler : public Assembler {
90 public:
91  MacroAssembler(byte * buffer, unsigned buffer_size)
92      : Assembler(buffer, buffer_size),
93#ifdef DEBUG
94        allow_macro_instructions_(true),
95#endif
96        sp_(sp), tmp_list_(ip0, ip1), fptmp_list_(d31) {}
97
98  // Logical macros.
99  void And(const Register& rd,
100           const Register& rn,
101           const Operand& operand);
102  void Ands(const Register& rd,
103            const Register& rn,
104            const Operand& operand);
105  void Bic(const Register& rd,
106           const Register& rn,
107           const Operand& operand);
108  void Bics(const Register& rd,
109            const Register& rn,
110            const Operand& operand);
111  void Orr(const Register& rd,
112           const Register& rn,
113           const Operand& operand);
114  void Orn(const Register& rd,
115           const Register& rn,
116           const Operand& operand);
117  void Eor(const Register& rd,
118           const Register& rn,
119           const Operand& operand);
120  void Eon(const Register& rd,
121           const Register& rn,
122           const Operand& operand);
123  void Tst(const Register& rn, const Operand& operand);
124  void LogicalMacro(const Register& rd,
125                    const Register& rn,
126                    const Operand& operand,
127                    LogicalOp op);
128
129  // Add and sub macros.
130  void Add(const Register& rd,
131           const Register& rn,
132           const Operand& operand);
133  void Adds(const Register& rd,
134            const Register& rn,
135            const Operand& operand);
136  void Sub(const Register& rd,
137           const Register& rn,
138           const Operand& operand);
139  void Subs(const Register& rd,
140            const Register& rn,
141            const Operand& operand);
142  void Cmn(const Register& rn, const Operand& operand);
143  void Cmp(const Register& rn, const Operand& operand);
144  void Neg(const Register& rd,
145           const Operand& operand);
146  void Negs(const Register& rd,
147            const Operand& operand);
148
149  void AddSubMacro(const Register& rd,
150                   const Register& rn,
151                   const Operand& operand,
152                   FlagsUpdate S,
153                   AddSubOp op);
154
155  // Add/sub with carry macros.
156  void Adc(const Register& rd,
157           const Register& rn,
158           const Operand& operand);
159  void Adcs(const Register& rd,
160            const Register& rn,
161            const Operand& operand);
162  void Sbc(const Register& rd,
163           const Register& rn,
164           const Operand& operand);
165  void Sbcs(const Register& rd,
166            const Register& rn,
167            const Operand& operand);
168  void Ngc(const Register& rd,
169           const Operand& operand);
170  void Ngcs(const Register& rd,
171            const Operand& operand);
172  void AddSubWithCarryMacro(const Register& rd,
173                            const Register& rn,
174                            const Operand& operand,
175                            FlagsUpdate S,
176                            AddSubWithCarryOp op);
177
178  // Move macros.
179  void Mov(const Register& rd, uint64_t imm);
180  void Mov(const Register& rd,
181           const Operand& operand,
182           DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
183  void Mvn(const Register& rd, uint64_t imm) {
184    Mov(rd, (rd.size() == kXRegSize) ? ~imm : (~imm & kWRegMask));
185  };
186  void Mvn(const Register& rd, const Operand& operand);
187  bool IsImmMovz(uint64_t imm, unsigned reg_size);
188  bool IsImmMovn(uint64_t imm, unsigned reg_size);
189  unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
190
191  // Conditional macros.
192  void Ccmp(const Register& rn,
193            const Operand& operand,
194            StatusFlags nzcv,
195            Condition cond);
196  void Ccmn(const Register& rn,
197            const Operand& operand,
198            StatusFlags nzcv,
199            Condition cond);
200  void ConditionalCompareMacro(const Register& rn,
201                               const Operand& operand,
202                               StatusFlags nzcv,
203                               Condition cond,
204                               ConditionalCompareOp op);
205  void Csel(const Register& rd,
206            const Register& rn,
207            const Operand& operand,
208            Condition cond);
209
210  // Load/store macros.
211#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
212  void FN(const REGTYPE REG, const MemOperand& addr);
213  LS_MACRO_LIST(DECLARE_FUNCTION)
214#undef DECLARE_FUNCTION
215
216  void LoadStoreMacro(const CPURegister& rt,
217                      const MemOperand& addr,
218                      LoadStoreOp op);
219
220  // Push or pop up to 4 registers of the same width to or from the stack,
221  // using the current stack pointer as set by SetStackPointer.
222  //
223  // If an argument register is 'NoReg', all further arguments are also assumed
224  // to be 'NoReg', and are thus not pushed or popped.
225  //
226  // Arguments are ordered such that "Push(a, b);" is functionally equivalent
227  // to "Push(a); Push(b);".
228  //
229  // It is valid to push the same register more than once, and there is no
230  // restriction on the order in which registers are specified.
231  //
232  // It is not valid to pop into the same register more than once in one
233  // operation, not even into the zero register.
234  //
235  // If the current stack pointer (as set by SetStackPointer) is sp, then it
236  // must be aligned to 16 bytes on entry and the total size of the specified
237  // registers must also be a multiple of 16 bytes.
238  //
239  // Even if the current stack pointer is not the system stack pointer (sp),
240  // Push (and derived methods) will still modify the system stack pointer in
241  // order to comply with ABI rules about accessing memory below the system
242  // stack pointer.
243  //
244  // Other than the registers passed into Pop, the stack pointer and (possibly)
245  // the system stack pointer, these methods do not modify any other registers.
246  void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
247            const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
248  void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
249           const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
250
251  // Alternative forms of Push and Pop, taking a RegList or CPURegList that
252  // specifies the registers that are to be pushed or popped. Higher-numbered
253  // registers are associated with higher memory addresses (as in the A32 push
254  // and pop instructions).
255  //
256  // (Push|Pop)SizeRegList allow you to specify the register size as a
257  // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
258  // supported.
259  //
260  // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
261  void PushCPURegList(CPURegList registers);
262  void PopCPURegList(CPURegList registers);
263
264  void PushSizeRegList(RegList registers, unsigned reg_size,
265      CPURegister::RegisterType type = CPURegister::kRegister) {
266    PushCPURegList(CPURegList(type, reg_size, registers));
267  }
268  void PopSizeRegList(RegList registers, unsigned reg_size,
269      CPURegister::RegisterType type = CPURegister::kRegister) {
270    PopCPURegList(CPURegList(type, reg_size, registers));
271  }
272  void PushXRegList(RegList regs) {
273    PushSizeRegList(regs, kXRegSize);
274  }
275  void PopXRegList(RegList regs) {
276    PopSizeRegList(regs, kXRegSize);
277  }
278  void PushWRegList(RegList regs) {
279    PushSizeRegList(regs, kWRegSize);
280  }
281  void PopWRegList(RegList regs) {
282    PopSizeRegList(regs, kWRegSize);
283  }
284  inline void PushDRegList(RegList regs) {
285    PushSizeRegList(regs, kDRegSize, CPURegister::kFPRegister);
286  }
287  inline void PopDRegList(RegList regs) {
288    PopSizeRegList(regs, kDRegSize, CPURegister::kFPRegister);
289  }
290  inline void PushSRegList(RegList regs) {
291    PushSizeRegList(regs, kSRegSize, CPURegister::kFPRegister);
292  }
293  inline void PopSRegList(RegList regs) {
294    PopSizeRegList(regs, kSRegSize, CPURegister::kFPRegister);
295  }
296
297  // Push the specified register 'count' times.
298  void PushMultipleTimes(int count, Register src);
299
300  // Poke 'src' onto the stack. The offset is in bytes.
301  //
302  // If the current stack pointer (as set by SetStackPointer) is sp, then sp
303  // must be aligned to 16 bytes.
304  void Poke(const Register& src, const Operand& offset);
305
306  // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
307  //
308  // If the current stack pointer (as set by SetStackPointer) is sp, then sp
309  // must be aligned to 16 bytes.
310  void Peek(const Register& dst, const Operand& offset);
311
312  // Claim or drop stack space without actually accessing memory.
313  //
314  // If the current stack pointer (as set by SetStackPointer) is sp, then it
315  // must be aligned to 16 bytes and the size claimed or dropped must be a
316  // multiple of 16 bytes.
317  void Claim(const Operand& size);
318  void Drop(const Operand& size);
319
320  // Preserve the callee-saved registers (as defined by AAPCS64).
321  //
322  // Higher-numbered registers are pushed before lower-numbered registers, and
323  // thus get higher addresses.
324  // Floating-point registers are pushed before general-purpose registers, and
325  // thus get higher addresses.
326  //
327  // This method must not be called unless StackPointer() is sp, and it is
328  // aligned to 16 bytes.
329  void PushCalleeSavedRegisters();
330
331  // Restore the callee-saved registers (as defined by AAPCS64).
332  //
333  // Higher-numbered registers are popped after lower-numbered registers, and
334  // thus come from higher addresses.
335  // Floating-point registers are popped after general-purpose registers, and
336  // thus come from higher addresses.
337  //
338  // This method must not be called unless StackPointer() is sp, and it is
339  // aligned to 16 bytes.
340  void PopCalleeSavedRegisters();
341
342  // Remaining instructions are simple pass-through calls to the assembler.
343  void Adr(const Register& rd, Label* label) {
344    VIXL_ASSERT(allow_macro_instructions_);
345    VIXL_ASSERT(!rd.IsZero());
346    adr(rd, label);
347  }
348  void Asr(const Register& rd, const Register& rn, unsigned shift) {
349    VIXL_ASSERT(allow_macro_instructions_);
350    VIXL_ASSERT(!rd.IsZero());
351    VIXL_ASSERT(!rn.IsZero());
352    asr(rd, rn, shift);
353  }
354  void Asr(const Register& rd, const Register& rn, const Register& rm) {
355    VIXL_ASSERT(allow_macro_instructions_);
356    VIXL_ASSERT(!rd.IsZero());
357    VIXL_ASSERT(!rn.IsZero());
358    VIXL_ASSERT(!rm.IsZero());
359    asrv(rd, rn, rm);
360  }
361
362  // Branch type inversion relies on these relations.
363  VIXL_STATIC_ASSERT((reg_zero      == (reg_not_zero ^ 1)) &&
364                     (reg_bit_clear == (reg_bit_set ^ 1)) &&
365                     (always        == (never ^ 1)));
366
367  BranchType InvertBranchType(BranchType type) {
368    if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
369      return static_cast<BranchType>(
370          InvertCondition(static_cast<Condition>(type)));
371    } else {
372      return static_cast<BranchType>(type ^ 1);
373    }
374  }
375
376  void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
377
378  void B(Label* label) {
379    b(label);
380  }
381  void B(Label* label, Condition cond) {
382    VIXL_ASSERT(allow_macro_instructions_);
383    VIXL_ASSERT((cond != al) && (cond != nv));
384    b(label, cond);
385  }
386  void B(Condition cond, Label* label) {
387    B(label, cond);
388  }
389  void Bfi(const Register& rd,
390           const Register& rn,
391           unsigned lsb,
392           unsigned width) {
393    VIXL_ASSERT(allow_macro_instructions_);
394    VIXL_ASSERT(!rd.IsZero());
395    VIXL_ASSERT(!rn.IsZero());
396    bfi(rd, rn, lsb, width);
397  }
398  void Bfxil(const Register& rd,
399             const Register& rn,
400             unsigned lsb,
401             unsigned width) {
402    VIXL_ASSERT(allow_macro_instructions_);
403    VIXL_ASSERT(!rd.IsZero());
404    VIXL_ASSERT(!rn.IsZero());
405    bfxil(rd, rn, lsb, width);
406  }
407  void Bind(Label* label) {
408    VIXL_ASSERT(allow_macro_instructions_);
409    bind(label);
410  }
411  void Bl(Label* label) {
412    VIXL_ASSERT(allow_macro_instructions_);
413    bl(label);
414  }
415  void Blr(const Register& xn) {
416    VIXL_ASSERT(allow_macro_instructions_);
417    VIXL_ASSERT(!xn.IsZero());
418    blr(xn);
419  }
420  void Br(const Register& xn) {
421    VIXL_ASSERT(allow_macro_instructions_);
422    VIXL_ASSERT(!xn.IsZero());
423    br(xn);
424  }
425  void Brk(int code = 0) {
426    VIXL_ASSERT(allow_macro_instructions_);
427    brk(code);
428  }
429  void Cbnz(const Register& rt, Label* label) {
430    VIXL_ASSERT(allow_macro_instructions_);
431    VIXL_ASSERT(!rt.IsZero());
432    cbnz(rt, label);
433  }
434  void Cbz(const Register& rt, Label* label) {
435    VIXL_ASSERT(allow_macro_instructions_);
436    VIXL_ASSERT(!rt.IsZero());
437    cbz(rt, label);
438  }
439  void Cinc(const Register& rd, const Register& rn, Condition cond) {
440    VIXL_ASSERT(allow_macro_instructions_);
441    VIXL_ASSERT(!rd.IsZero());
442    VIXL_ASSERT(!rn.IsZero());
443    cinc(rd, rn, cond);
444  }
445  void Cinv(const Register& rd, const Register& rn, Condition cond) {
446    VIXL_ASSERT(allow_macro_instructions_);
447    VIXL_ASSERT(!rd.IsZero());
448    VIXL_ASSERT(!rn.IsZero());
449    cinv(rd, rn, cond);
450  }
451  void Cls(const Register& rd, const Register& rn) {
452    VIXL_ASSERT(allow_macro_instructions_);
453    VIXL_ASSERT(!rd.IsZero());
454    VIXL_ASSERT(!rn.IsZero());
455    cls(rd, rn);
456  }
457  void Clz(const Register& rd, const Register& rn) {
458    VIXL_ASSERT(allow_macro_instructions_);
459    VIXL_ASSERT(!rd.IsZero());
460    VIXL_ASSERT(!rn.IsZero());
461    clz(rd, rn);
462  }
463  void Cneg(const Register& rd, const Register& rn, Condition cond) {
464    VIXL_ASSERT(allow_macro_instructions_);
465    VIXL_ASSERT(!rd.IsZero());
466    VIXL_ASSERT(!rn.IsZero());
467    cneg(rd, rn, cond);
468  }
469  void Cset(const Register& rd, Condition cond) {
470    VIXL_ASSERT(allow_macro_instructions_);
471    VIXL_ASSERT(!rd.IsZero());
472    cset(rd, cond);
473  }
474  void Csetm(const Register& rd, Condition cond) {
475    VIXL_ASSERT(allow_macro_instructions_);
476    VIXL_ASSERT(!rd.IsZero());
477    csetm(rd, cond);
478  }
479  void Csinc(const Register& rd,
480             const Register& rn,
481             const Register& rm,
482             Condition cond) {
483    VIXL_ASSERT(allow_macro_instructions_);
484    VIXL_ASSERT(!rd.IsZero());
485    VIXL_ASSERT(!rn.IsZero());
486    VIXL_ASSERT(!rm.IsZero());
487    VIXL_ASSERT((cond != al) && (cond != nv));
488    csinc(rd, rn, rm, cond);
489  }
490  void Csinv(const Register& rd,
491             const Register& rn,
492             const Register& rm,
493             Condition cond) {
494    VIXL_ASSERT(allow_macro_instructions_);
495    VIXL_ASSERT(!rd.IsZero());
496    VIXL_ASSERT(!rn.IsZero());
497    VIXL_ASSERT(!rm.IsZero());
498    VIXL_ASSERT((cond != al) && (cond != nv));
499    csinv(rd, rn, rm, cond);
500  }
501  void Csneg(const Register& rd,
502             const Register& rn,
503             const Register& rm,
504             Condition cond) {
505    VIXL_ASSERT(allow_macro_instructions_);
506    VIXL_ASSERT(!rd.IsZero());
507    VIXL_ASSERT(!rn.IsZero());
508    VIXL_ASSERT(!rm.IsZero());
509    VIXL_ASSERT((cond != al) && (cond != nv));
510    csneg(rd, rn, rm, cond);
511  }
512  void Dmb(BarrierDomain domain, BarrierType type) {
513    VIXL_ASSERT(allow_macro_instructions_);
514    dmb(domain, type);
515  }
516  void Dsb(BarrierDomain domain, BarrierType type) {
517    VIXL_ASSERT(allow_macro_instructions_);
518    dsb(domain, type);
519  }
520  void Extr(const Register& rd,
521            const Register& rn,
522            const Register& rm,
523            unsigned lsb) {
524    VIXL_ASSERT(allow_macro_instructions_);
525    VIXL_ASSERT(!rd.IsZero());
526    VIXL_ASSERT(!rn.IsZero());
527    VIXL_ASSERT(!rm.IsZero());
528    extr(rd, rn, rm, lsb);
529  }
530  void Fabs(const FPRegister& fd, const FPRegister& fn) {
531    VIXL_ASSERT(allow_macro_instructions_);
532    fabs(fd, fn);
533  }
534  void Fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm) {
535    VIXL_ASSERT(allow_macro_instructions_);
536    fadd(fd, fn, fm);
537  }
538  void Fccmp(const FPRegister& fn,
539             const FPRegister& fm,
540             StatusFlags nzcv,
541             Condition cond) {
542    VIXL_ASSERT(allow_macro_instructions_);
543    VIXL_ASSERT((cond != al) && (cond != nv));
544    fccmp(fn, fm, nzcv, cond);
545  }
546  void Fcmp(const FPRegister& fn, const FPRegister& fm) {
547    VIXL_ASSERT(allow_macro_instructions_);
548    fcmp(fn, fm);
549  }
550  void Fcmp(const FPRegister& fn, double value);
551  void Fcsel(const FPRegister& fd,
552             const FPRegister& fn,
553             const FPRegister& fm,
554             Condition cond) {
555    VIXL_ASSERT(allow_macro_instructions_);
556    VIXL_ASSERT((cond != al) && (cond != nv));
557    fcsel(fd, fn, fm, cond);
558  }
559  void Fcvt(const FPRegister& fd, const FPRegister& fn) {
560    VIXL_ASSERT(allow_macro_instructions_);
561    fcvt(fd, fn);
562  }
563  void Fcvtas(const Register& rd, const FPRegister& fn) {
564    VIXL_ASSERT(allow_macro_instructions_);
565    VIXL_ASSERT(!rd.IsZero());
566    fcvtas(rd, fn);
567  }
568  void Fcvtau(const Register& rd, const FPRegister& fn) {
569    VIXL_ASSERT(allow_macro_instructions_);
570    VIXL_ASSERT(!rd.IsZero());
571    fcvtau(rd, fn);
572  }
573  void Fcvtms(const Register& rd, const FPRegister& fn) {
574    VIXL_ASSERT(allow_macro_instructions_);
575    VIXL_ASSERT(!rd.IsZero());
576    fcvtms(rd, fn);
577  }
578  void Fcvtmu(const Register& rd, const FPRegister& fn) {
579    VIXL_ASSERT(allow_macro_instructions_);
580    VIXL_ASSERT(!rd.IsZero());
581    fcvtmu(rd, fn);
582  }
583  void Fcvtns(const Register& rd, const FPRegister& fn) {
584    VIXL_ASSERT(allow_macro_instructions_);
585    VIXL_ASSERT(!rd.IsZero());
586    fcvtns(rd, fn);
587  }
588  void Fcvtnu(const Register& rd, const FPRegister& fn) {
589    VIXL_ASSERT(allow_macro_instructions_);
590    VIXL_ASSERT(!rd.IsZero());
591    fcvtnu(rd, fn);
592  }
593  void Fcvtzs(const Register& rd, const FPRegister& fn) {
594    VIXL_ASSERT(allow_macro_instructions_);
595    VIXL_ASSERT(!rd.IsZero());
596    fcvtzs(rd, fn);
597  }
598  void Fcvtzu(const Register& rd, const FPRegister& fn) {
599    VIXL_ASSERT(allow_macro_instructions_);
600    VIXL_ASSERT(!rd.IsZero());
601    fcvtzu(rd, fn);
602  }
603  void Fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm) {
604    VIXL_ASSERT(allow_macro_instructions_);
605    fdiv(fd, fn, fm);
606  }
607  void Fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm) {
608    VIXL_ASSERT(allow_macro_instructions_);
609    fmax(fd, fn, fm);
610  }
611  void Fmaxnm(const FPRegister& fd,
612              const FPRegister& fn,
613              const FPRegister& fm) {
614    VIXL_ASSERT(allow_macro_instructions_);
615    fmaxnm(fd, fn, fm);
616  }
617  void Fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm) {
618    VIXL_ASSERT(allow_macro_instructions_);
619    fmin(fd, fn, fm);
620  }
621  void Fminnm(const FPRegister& fd,
622              const FPRegister& fn,
623              const FPRegister& fm) {
624    VIXL_ASSERT(allow_macro_instructions_);
625    fminnm(fd, fn, fm);
626  }
627  void Fmov(FPRegister fd, FPRegister fn) {
628    VIXL_ASSERT(allow_macro_instructions_);
629    // Only emit an instruction if fd and fn are different, and they are both D
630    // registers. fmov(s0, s0) is not a no-op because it clears the top word of
631    // d0. Technically, fmov(d0, d0) is not a no-op either because it clears
632    // the top of q0, but FPRegister does not currently support Q registers.
633    if (!fd.Is(fn) || !fd.Is64Bits()) {
634      fmov(fd, fn);
635    }
636  }
637  void Fmov(FPRegister fd, Register rn) {
638    VIXL_ASSERT(allow_macro_instructions_);
639    VIXL_ASSERT(!rn.IsZero());
640    fmov(fd, rn);
641  }
642  // Provide explicit double and float interfaces for FP immediate moves, rather
643  // than relying on implicit C++ casts. This allows signalling NaNs to be
644  // preserved when the immediate matches the format of fd. Most systems convert
645  // signalling NaNs to quiet NaNs when converting between float and double.
646  void Fmov(FPRegister fd, double imm);
647  void Fmov(FPRegister fd, float imm);
648  // Provide a template to allow other types to be converted automatically.
649  template<typename T>
650  void Fmov(FPRegister fd, T imm) {
651    VIXL_ASSERT(allow_macro_instructions_);
652    Fmov(fd, static_cast<double>(imm));
653  }
654  void Fmov(Register rd, FPRegister fn) {
655    VIXL_ASSERT(allow_macro_instructions_);
656    VIXL_ASSERT(!rd.IsZero());
657    fmov(rd, fn);
658  }
659  void Fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm) {
660    VIXL_ASSERT(allow_macro_instructions_);
661    fmul(fd, fn, fm);
662  }
663  void Fmadd(const FPRegister& fd,
664             const FPRegister& fn,
665             const FPRegister& fm,
666             const FPRegister& fa) {
667    VIXL_ASSERT(allow_macro_instructions_);
668    fmadd(fd, fn, fm, fa);
669  }
670  void Fmsub(const FPRegister& fd,
671             const FPRegister& fn,
672             const FPRegister& fm,
673             const FPRegister& fa) {
674    VIXL_ASSERT(allow_macro_instructions_);
675    fmsub(fd, fn, fm, fa);
676  }
677  void Fnmadd(const FPRegister& fd,
678              const FPRegister& fn,
679              const FPRegister& fm,
680              const FPRegister& fa) {
681    VIXL_ASSERT(allow_macro_instructions_);
682    fnmadd(fd, fn, fm, fa);
683  }
684  void Fnmsub(const FPRegister& fd,
685              const FPRegister& fn,
686              const FPRegister& fm,
687              const FPRegister& fa) {
688    VIXL_ASSERT(allow_macro_instructions_);
689    fnmsub(fd, fn, fm, fa);
690  }
691  void Fneg(const FPRegister& fd, const FPRegister& fn) {
692    VIXL_ASSERT(allow_macro_instructions_);
693    fneg(fd, fn);
694  }
695  void Frinta(const FPRegister& fd, const FPRegister& fn) {
696    VIXL_ASSERT(allow_macro_instructions_);
697    frinta(fd, fn);
698  }
699  void Frintm(const FPRegister& fd, const FPRegister& fn) {
700    VIXL_ASSERT(allow_macro_instructions_);
701    frintm(fd, fn);
702  }
703  void Frintn(const FPRegister& fd, const FPRegister& fn) {
704    VIXL_ASSERT(allow_macro_instructions_);
705    frintn(fd, fn);
706  }
707  void Frintz(const FPRegister& fd, const FPRegister& fn) {
708    VIXL_ASSERT(allow_macro_instructions_);
709    frintz(fd, fn);
710  }
711  void Fsqrt(const FPRegister& fd, const FPRegister& fn) {
712    VIXL_ASSERT(allow_macro_instructions_);
713    fsqrt(fd, fn);
714  }
715  void Fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm) {
716    VIXL_ASSERT(allow_macro_instructions_);
717    fsub(fd, fn, fm);
718  }
719  void Hint(SystemHint code) {
720    VIXL_ASSERT(allow_macro_instructions_);
721    hint(code);
722  }
723  void Hlt(int code) {
724    VIXL_ASSERT(allow_macro_instructions_);
725    hlt(code);
726  }
727  void Isb() {
728    VIXL_ASSERT(allow_macro_instructions_);
729    isb();
730  }
731  void Ldnp(const CPURegister& rt,
732            const CPURegister& rt2,
733            const MemOperand& src) {
734    VIXL_ASSERT(allow_macro_instructions_);
735    ldnp(rt, rt2, src);
736  }
737  void Ldp(const CPURegister& rt,
738           const CPURegister& rt2,
739           const MemOperand& src) {
740    VIXL_ASSERT(allow_macro_instructions_);
741    ldp(rt, rt2, src);
742  }
743  void Ldpsw(const Register& rt, const Register& rt2, const MemOperand& src) {
744    VIXL_ASSERT(allow_macro_instructions_);
745    ldpsw(rt, rt2, src);
746  }
747  // Provide both double and float interfaces for FP immediate loads, rather
748  // than relying on implicit C++ casts. This allows signalling NaNs to be
749  // preserved when the immediate matches the format of fd. Most systems convert
750  // signalling NaNs to quiet NaNs when converting between float and double.
751  void Ldr(const FPRegister& ft, double imm) {
752    VIXL_ASSERT(allow_macro_instructions_);
753    if (ft.Is64Bits()) {
754      ldr(ft, imm);
755    } else {
756      ldr(ft, static_cast<float>(imm));
757    }
758  }
759  void Ldr(const FPRegister& ft, float imm) {
760    VIXL_ASSERT(allow_macro_instructions_);
761    if (ft.Is32Bits()) {
762      ldr(ft, imm);
763    } else {
764      ldr(ft, static_cast<double>(imm));
765    }
766  }
767  void Ldr(const Register& rt, uint64_t imm) {
768    VIXL_ASSERT(allow_macro_instructions_);
769    VIXL_ASSERT(!rt.IsZero());
770    ldr(rt, imm);
771  }
772  void Lsl(const Register& rd, const Register& rn, unsigned shift) {
773    VIXL_ASSERT(allow_macro_instructions_);
774    VIXL_ASSERT(!rd.IsZero());
775    VIXL_ASSERT(!rn.IsZero());
776    lsl(rd, rn, shift);
777  }
778  void Lsl(const Register& rd, const Register& rn, const Register& rm) {
779    VIXL_ASSERT(allow_macro_instructions_);
780    VIXL_ASSERT(!rd.IsZero());
781    VIXL_ASSERT(!rn.IsZero());
782    VIXL_ASSERT(!rm.IsZero());
783    lslv(rd, rn, rm);
784  }
785  void Lsr(const Register& rd, const Register& rn, unsigned shift) {
786    VIXL_ASSERT(allow_macro_instructions_);
787    VIXL_ASSERT(!rd.IsZero());
788    VIXL_ASSERT(!rn.IsZero());
789    lsr(rd, rn, shift);
790  }
791  void Lsr(const Register& rd, const Register& rn, const Register& rm) {
792    VIXL_ASSERT(allow_macro_instructions_);
793    VIXL_ASSERT(!rd.IsZero());
794    VIXL_ASSERT(!rn.IsZero());
795    VIXL_ASSERT(!rm.IsZero());
796    lsrv(rd, rn, rm);
797  }
798  void Madd(const Register& rd,
799            const Register& rn,
800            const Register& rm,
801            const Register& ra) {
802    VIXL_ASSERT(allow_macro_instructions_);
803    VIXL_ASSERT(!rd.IsZero());
804    VIXL_ASSERT(!rn.IsZero());
805    VIXL_ASSERT(!rm.IsZero());
806    VIXL_ASSERT(!ra.IsZero());
807    madd(rd, rn, rm, ra);
808  }
809  void Mneg(const Register& rd, const Register& rn, const Register& rm) {
810    VIXL_ASSERT(allow_macro_instructions_);
811    VIXL_ASSERT(!rd.IsZero());
812    VIXL_ASSERT(!rn.IsZero());
813    VIXL_ASSERT(!rm.IsZero());
814    mneg(rd, rn, rm);
815  }
816  void Mov(const Register& rd, const Register& rn) {
817    VIXL_ASSERT(allow_macro_instructions_);
818    mov(rd, rn);
819  }
820  void Movk(const Register& rd, uint64_t imm, int shift = -1) {
821    VIXL_ASSERT(allow_macro_instructions_);
822    VIXL_ASSERT(!rd.IsZero());
823    movk(rd, imm, shift);
824  }
825  void Mrs(const Register& rt, SystemRegister sysreg) {
826    VIXL_ASSERT(allow_macro_instructions_);
827    VIXL_ASSERT(!rt.IsZero());
828    mrs(rt, sysreg);
829  }
830  void Msr(SystemRegister sysreg, const Register& rt) {
831    VIXL_ASSERT(allow_macro_instructions_);
832    VIXL_ASSERT(!rt.IsZero());
833    msr(sysreg, rt);
834  }
835  void Msub(const Register& rd,
836            const Register& rn,
837            const Register& rm,
838            const Register& ra) {
839    VIXL_ASSERT(allow_macro_instructions_);
840    VIXL_ASSERT(!rd.IsZero());
841    VIXL_ASSERT(!rn.IsZero());
842    VIXL_ASSERT(!rm.IsZero());
843    VIXL_ASSERT(!ra.IsZero());
844    msub(rd, rn, rm, ra);
845  }
846  void Mul(const Register& rd, const Register& rn, const Register& rm) {
847    VIXL_ASSERT(allow_macro_instructions_);
848    VIXL_ASSERT(!rd.IsZero());
849    VIXL_ASSERT(!rn.IsZero());
850    VIXL_ASSERT(!rm.IsZero());
851    mul(rd, rn, rm);
852  }
853  void Nop() {
854    VIXL_ASSERT(allow_macro_instructions_);
855    nop();
856  }
857  void Rbit(const Register& rd, const Register& rn) {
858    VIXL_ASSERT(allow_macro_instructions_);
859    VIXL_ASSERT(!rd.IsZero());
860    VIXL_ASSERT(!rn.IsZero());
861    rbit(rd, rn);
862  }
863  void Ret(const Register& xn = lr) {
864    VIXL_ASSERT(allow_macro_instructions_);
865    VIXL_ASSERT(!xn.IsZero());
866    ret(xn);
867  }
868  void Rev(const Register& rd, const Register& rn) {
869    VIXL_ASSERT(allow_macro_instructions_);
870    VIXL_ASSERT(!rd.IsZero());
871    VIXL_ASSERT(!rn.IsZero());
872    rev(rd, rn);
873  }
874  void Rev16(const Register& rd, const Register& rn) {
875    VIXL_ASSERT(allow_macro_instructions_);
876    VIXL_ASSERT(!rd.IsZero());
877    VIXL_ASSERT(!rn.IsZero());
878    rev16(rd, rn);
879  }
880  void Rev32(const Register& rd, const Register& rn) {
881    VIXL_ASSERT(allow_macro_instructions_);
882    VIXL_ASSERT(!rd.IsZero());
883    VIXL_ASSERT(!rn.IsZero());
884    rev32(rd, rn);
885  }
886  void Ror(const Register& rd, const Register& rs, unsigned shift) {
887    VIXL_ASSERT(allow_macro_instructions_);
888    VIXL_ASSERT(!rd.IsZero());
889    VIXL_ASSERT(!rs.IsZero());
890    ror(rd, rs, shift);
891  }
892  void Ror(const Register& rd, const Register& rn, const Register& rm) {
893    VIXL_ASSERT(allow_macro_instructions_);
894    VIXL_ASSERT(!rd.IsZero());
895    VIXL_ASSERT(!rn.IsZero());
896    VIXL_ASSERT(!rm.IsZero());
897    rorv(rd, rn, rm);
898  }
899  void Sbfiz(const Register& rd,
900             const Register& rn,
901             unsigned lsb,
902             unsigned width) {
903    VIXL_ASSERT(allow_macro_instructions_);
904    VIXL_ASSERT(!rd.IsZero());
905    VIXL_ASSERT(!rn.IsZero());
906    sbfiz(rd, rn, lsb, width);
907  }
908  void Sbfx(const Register& rd,
909            const Register& rn,
910            unsigned lsb,
911            unsigned width) {
912    VIXL_ASSERT(allow_macro_instructions_);
913    VIXL_ASSERT(!rd.IsZero());
914    VIXL_ASSERT(!rn.IsZero());
915    sbfx(rd, rn, lsb, width);
916  }
917  void Scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0) {
918    VIXL_ASSERT(allow_macro_instructions_);
919    VIXL_ASSERT(!rn.IsZero());
920    scvtf(fd, rn, fbits);
921  }
922  void Sdiv(const Register& rd, const Register& rn, const Register& rm) {
923    VIXL_ASSERT(allow_macro_instructions_);
924    VIXL_ASSERT(!rd.IsZero());
925    VIXL_ASSERT(!rn.IsZero());
926    VIXL_ASSERT(!rm.IsZero());
927    sdiv(rd, rn, rm);
928  }
929  void Smaddl(const Register& rd,
930              const Register& rn,
931              const Register& rm,
932              const Register& ra) {
933    VIXL_ASSERT(allow_macro_instructions_);
934    VIXL_ASSERT(!rd.IsZero());
935    VIXL_ASSERT(!rn.IsZero());
936    VIXL_ASSERT(!rm.IsZero());
937    VIXL_ASSERT(!ra.IsZero());
938    smaddl(rd, rn, rm, ra);
939  }
940  void Smsubl(const Register& rd,
941              const Register& rn,
942              const Register& rm,
943              const Register& ra) {
944    VIXL_ASSERT(allow_macro_instructions_);
945    VIXL_ASSERT(!rd.IsZero());
946    VIXL_ASSERT(!rn.IsZero());
947    VIXL_ASSERT(!rm.IsZero());
948    VIXL_ASSERT(!ra.IsZero());
949    smsubl(rd, rn, rm, ra);
950  }
951  void Smull(const Register& rd, const Register& rn, const Register& rm) {
952    VIXL_ASSERT(allow_macro_instructions_);
953    VIXL_ASSERT(!rd.IsZero());
954    VIXL_ASSERT(!rn.IsZero());
955    VIXL_ASSERT(!rm.IsZero());
956    smull(rd, rn, rm);
957  }
958  void Smulh(const Register& xd, const Register& xn, const Register& xm) {
959    VIXL_ASSERT(allow_macro_instructions_);
960    VIXL_ASSERT(!xd.IsZero());
961    VIXL_ASSERT(!xn.IsZero());
962    VIXL_ASSERT(!xm.IsZero());
963    smulh(xd, xn, xm);
964  }
965  void Stnp(const CPURegister& rt,
966            const CPURegister& rt2,
967            const MemOperand& dst) {
968    VIXL_ASSERT(allow_macro_instructions_);
969    stnp(rt, rt2, dst);
970  }
971  void Stp(const CPURegister& rt,
972           const CPURegister& rt2,
973           const MemOperand& dst) {
974    VIXL_ASSERT(allow_macro_instructions_);
975    stp(rt, rt2, dst);
976  }
977  void Sxtb(const Register& rd, const Register& rn) {
978    VIXL_ASSERT(allow_macro_instructions_);
979    VIXL_ASSERT(!rd.IsZero());
980    VIXL_ASSERT(!rn.IsZero());
981    sxtb(rd, rn);
982  }
983  void Sxth(const Register& rd, const Register& rn) {
984    VIXL_ASSERT(allow_macro_instructions_);
985    VIXL_ASSERT(!rd.IsZero());
986    VIXL_ASSERT(!rn.IsZero());
987    sxth(rd, rn);
988  }
989  void Sxtw(const Register& rd, const Register& rn) {
990    VIXL_ASSERT(allow_macro_instructions_);
991    VIXL_ASSERT(!rd.IsZero());
992    VIXL_ASSERT(!rn.IsZero());
993    sxtw(rd, rn);
994  }
995  void Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
996    VIXL_ASSERT(allow_macro_instructions_);
997    VIXL_ASSERT(!rt.IsZero());
998    tbnz(rt, bit_pos, label);
999  }
1000  void Tbz(const Register& rt, unsigned bit_pos, Label* label) {
1001    VIXL_ASSERT(allow_macro_instructions_);
1002    VIXL_ASSERT(!rt.IsZero());
1003    tbz(rt, bit_pos, label);
1004  }
1005  void Ubfiz(const Register& rd,
1006             const Register& rn,
1007             unsigned lsb,
1008             unsigned width) {
1009    VIXL_ASSERT(allow_macro_instructions_);
1010    VIXL_ASSERT(!rd.IsZero());
1011    VIXL_ASSERT(!rn.IsZero());
1012    ubfiz(rd, rn, lsb, width);
1013  }
1014  void Ubfx(const Register& rd,
1015            const Register& rn,
1016            unsigned lsb,
1017            unsigned width) {
1018    VIXL_ASSERT(allow_macro_instructions_);
1019    VIXL_ASSERT(!rd.IsZero());
1020    VIXL_ASSERT(!rn.IsZero());
1021    ubfx(rd, rn, lsb, width);
1022  }
1023  void Ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0) {
1024    VIXL_ASSERT(allow_macro_instructions_);
1025    VIXL_ASSERT(!rn.IsZero());
1026    ucvtf(fd, rn, fbits);
1027  }
1028  void Udiv(const Register& rd, const Register& rn, const Register& rm) {
1029    VIXL_ASSERT(allow_macro_instructions_);
1030    VIXL_ASSERT(!rd.IsZero());
1031    VIXL_ASSERT(!rn.IsZero());
1032    VIXL_ASSERT(!rm.IsZero());
1033    udiv(rd, rn, rm);
1034  }
1035  void Umaddl(const Register& rd,
1036              const Register& rn,
1037              const Register& rm,
1038              const Register& ra) {
1039    VIXL_ASSERT(allow_macro_instructions_);
1040    VIXL_ASSERT(!rd.IsZero());
1041    VIXL_ASSERT(!rn.IsZero());
1042    VIXL_ASSERT(!rm.IsZero());
1043    VIXL_ASSERT(!ra.IsZero());
1044    umaddl(rd, rn, rm, ra);
1045  }
1046  void Umsubl(const Register& rd,
1047              const Register& rn,
1048              const Register& rm,
1049              const Register& ra) {
1050    VIXL_ASSERT(allow_macro_instructions_);
1051    VIXL_ASSERT(!rd.IsZero());
1052    VIXL_ASSERT(!rn.IsZero());
1053    VIXL_ASSERT(!rm.IsZero());
1054    VIXL_ASSERT(!ra.IsZero());
1055    umsubl(rd, rn, rm, ra);
1056  }
1057  void Unreachable() {
1058    VIXL_ASSERT(allow_macro_instructions_);
1059#ifdef USE_SIMULATOR
1060    hlt(kUnreachableOpcode);
1061#else
1062    // Branch to 0 to generate a segfault.
1063    // lr - kInstructionSize is the address of the offending instruction.
1064    blr(xzr);
1065#endif
1066  }
1067  void Uxtb(const Register& rd, const Register& rn) {
1068    VIXL_ASSERT(allow_macro_instructions_);
1069    VIXL_ASSERT(!rd.IsZero());
1070    VIXL_ASSERT(!rn.IsZero());
1071    uxtb(rd, rn);
1072  }
1073  void Uxth(const Register& rd, const Register& rn) {
1074    VIXL_ASSERT(allow_macro_instructions_);
1075    VIXL_ASSERT(!rd.IsZero());
1076    VIXL_ASSERT(!rn.IsZero());
1077    uxth(rd, rn);
1078  }
1079  void Uxtw(const Register& rd, const Register& rn) {
1080    VIXL_ASSERT(allow_macro_instructions_);
1081    VIXL_ASSERT(!rd.IsZero());
1082    VIXL_ASSERT(!rn.IsZero());
1083    uxtw(rd, rn);
1084  }
1085
1086  // Push the system stack pointer (sp) down to allow the same to be done to
1087  // the current stack pointer (according to StackPointer()). This must be
1088  // called _before_ accessing the memory.
1089  //
1090  // This is necessary when pushing or otherwise adding things to the stack, to
1091  // satisfy the AAPCS64 constraint that the memory below the system stack
1092  // pointer is not accessed.
1093  //
1094  // This method asserts that StackPointer() is not sp, since the call does
1095  // not make sense in that context.
1096  //
1097  // TODO: This method can only accept values of 'space' that can be encoded in
1098  // one instruction. Refer to the implementation for details.
1099  void BumpSystemStackPointer(const Operand& space);
1100
1101#if DEBUG
1102  void SetAllowMacroInstructions(bool value) {
1103    allow_macro_instructions_ = value;
1104  }
1105
1106  bool AllowMacroInstructions() const {
1107    return allow_macro_instructions_;
1108  }
1109#endif
1110
1111  // Set the current stack pointer, but don't generate any code.
1112  void SetStackPointer(const Register& stack_pointer) {
1113    VIXL_ASSERT(!TmpList()->IncludesAliasOf(stack_pointer));
1114    sp_ = stack_pointer;
1115  }
1116
1117  // Return the current stack pointer, as set by SetStackPointer.
1118  const Register& StackPointer() const {
1119    return sp_;
1120  }
1121
1122  CPURegList* TmpList() { return &tmp_list_; }
1123  CPURegList* FPTmpList() { return &fptmp_list_; }
1124
1125  // Like printf, but print at run-time from generated code.
1126  //
1127  // The caller must ensure that arguments for floating-point placeholders
1128  // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
1129  // placeholders are Registers.
1130  //
1131  // At the moment it is only possible to print the value of sp if it is the
1132  // current stack pointer. Otherwise, the MacroAssembler will automatically
1133  // update sp on every push (using BumpSystemStackPointer), so determining its
1134  // value is difficult.
1135  //
1136  // Format placeholders that refer to more than one argument, or to a specific
1137  // argument, are not supported. This includes formats like "%1$d" or "%.*d".
1138  //
1139  // This function automatically preserves caller-saved registers so that
1140  // calling code can use Printf at any point without having to worry about
1141  // corruption. The preservation mechanism generates a lot of code. If this is
1142  // a problem, preserve the important registers manually and then call
1143  // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
1144  // implicitly preserved.
1145  void Printf(const char * format,
1146              CPURegister arg0 = NoCPUReg,
1147              CPURegister arg1 = NoCPUReg,
1148              CPURegister arg2 = NoCPUReg,
1149              CPURegister arg3 = NoCPUReg);
1150
1151  // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
1152  //
1153  // The return code from the system printf call will be returned in x0.
1154  void PrintfNoPreserve(const char * format,
1155                        const CPURegister& arg0 = NoCPUReg,
1156                        const CPURegister& arg1 = NoCPUReg,
1157                        const CPURegister& arg2 = NoCPUReg,
1158                        const CPURegister& arg3 = NoCPUReg);
1159
1160  // Trace control when running the debug simulator.
1161  //
1162  // For example:
1163  //
1164  // __ Trace(LOG_REGS, TRACE_ENABLE);
1165  // Will add registers to the trace if it wasn't already the case.
1166  //
1167  // __ Trace(LOG_DISASM, TRACE_DISABLE);
1168  // Will stop logging disassembly. It has no effect if the disassembly wasn't
1169  // already being logged.
1170  void Trace(TraceParameters parameters, TraceCommand command);
1171
1172  // Log the requested data independently of what is being traced.
1173  //
1174  // For example:
1175  //
1176  // __ Log(LOG_FLAGS)
1177  // Will output the flags.
1178  void Log(TraceParameters parameters);
1179
1180  // Enable or disable instrumentation when an Instrument visitor is attached to
1181  // the simulator.
1182  void EnableInstrumentation();
1183  void DisableInstrumentation();
1184
1185  // Add a marker to the instrumentation data produced by an Instrument visitor.
1186  // The name is a two character string that will be attached to the marker in
1187  // the output data.
1188  void AnnotateInstrumentation(const char* marker_name);
1189
1190 private:
1191  // The actual Push and Pop implementations. These don't generate any code
1192  // other than that required for the push or pop. This allows
1193  // (Push|Pop)CPURegList to bundle together setup code for a large block of
1194  // registers.
1195  //
1196  // Note that size is per register, and is specified in bytes.
1197  void PushHelper(int count, int size,
1198                  const CPURegister& src0, const CPURegister& src1,
1199                  const CPURegister& src2, const CPURegister& src3);
1200  void PopHelper(int count, int size,
1201                 const CPURegister& dst0, const CPURegister& dst1,
1202                 const CPURegister& dst2, const CPURegister& dst3);
1203
1204  // Perform necessary maintenance operations before a push or pop.
1205  //
1206  // Note that size is per register, and is specified in bytes.
1207  void PrepareForPush(int count, int size);
1208  void PrepareForPop(int count, int size);
1209
1210#if DEBUG
1211  // Tell whether any of the macro instruction can be used. When false the
1212  // MacroAssembler will assert if a method which can emit a variable number
1213  // of instructions is called.
1214  bool allow_macro_instructions_;
1215#endif
1216
1217  // The register to use as a stack pointer for stack operations.
1218  Register sp_;
1219
1220  // Scratch registers available for use by the MacroAssembler.
1221  CPURegList tmp_list_;
1222  CPURegList fptmp_list_;
1223};
1224
1225
1226// Use this scope when you need a one-to-one mapping between methods and
1227// instructions. This scope prevents the MacroAssembler from being called and
1228// literal pools from being emitted. It also asserts the number of instructions
1229// emitted is what you specified when creating the scope.
1230class InstructionAccurateScope {
1231 public:
1232  explicit InstructionAccurateScope(MacroAssembler* masm)
1233      : masm_(masm) {
1234    masm_->BlockLiteralPool();
1235#ifdef DEBUG
1236    size_ = 0;
1237    old_allow_macro_instructions_ = masm_->AllowMacroInstructions();
1238    masm_->SetAllowMacroInstructions(false);
1239#endif
1240  }
1241
1242  InstructionAccurateScope(MacroAssembler* masm, int count)
1243      : masm_(masm) {
1244    USE(count);
1245    masm_->BlockLiteralPool();
1246#ifdef DEBUG
1247    size_ = count * kInstructionSize;
1248    masm_->bind(&start_);
1249    old_allow_macro_instructions_ = masm_->AllowMacroInstructions();
1250    masm_->SetAllowMacroInstructions(false);
1251#endif
1252  }
1253
1254  ~InstructionAccurateScope() {
1255    masm_->ReleaseLiteralPool();
1256#ifdef DEBUG
1257    if (start_.IsBound()) {
1258      VIXL_ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
1259    }
1260    masm_->SetAllowMacroInstructions(old_allow_macro_instructions_);
1261#endif
1262  }
1263
1264 private:
1265  MacroAssembler* masm_;
1266#ifdef DEBUG
1267  uint64_t size_;
1268  Label start_;
1269  bool old_allow_macro_instructions_;
1270#endif
1271};
1272
1273
1274// This scope utility allows scratch registers to be managed safely. The
1275// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
1276// registers. These registers can be allocated on demand, and will be returned
1277// at the end of the scope.
1278//
1279// When the scope ends, the MacroAssembler's lists will be restored to their
1280// original state, even if the lists were modified by some other means.
1281class UseScratchRegisterScope {
1282 public:
1283  explicit UseScratchRegisterScope(MacroAssembler* masm)
1284      : available_(masm->TmpList()),
1285        availablefp_(masm->FPTmpList()),
1286        old_available_(available_->list()),
1287        old_availablefp_(availablefp_->list()) {
1288    VIXL_ASSERT(available_->type() == CPURegister::kRegister);
1289    VIXL_ASSERT(availablefp_->type() == CPURegister::kFPRegister);
1290  }
1291
1292
1293  ~UseScratchRegisterScope();
1294
1295
1296  bool IsAvailable(const CPURegister& reg) const;
1297
1298
1299  // Take a register from the appropriate temps list. It will be returned
1300  // automatically when the scope ends.
1301  Register AcquireW() { return AcquireNextAvailable(available_).W(); }
1302  Register AcquireX() { return AcquireNextAvailable(available_).X(); }
1303  FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
1304  FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
1305
1306
1307  Register AcquireSameSizeAs(const Register& reg);
1308  FPRegister AcquireSameSizeAs(const FPRegister& reg);
1309
1310
1311  // Explicitly release an acquired (or excluded) register, putting it back in
1312  // the appropriate temps list.
1313  void Release(const CPURegister& reg);
1314
1315
1316  // Make the specified registers available as scratch registers for the
1317  // duration of this scope.
1318  void Include(const CPURegList& list);
1319  void Include(const Register& reg1,
1320               const Register& reg2 = NoReg,
1321               const Register& reg3 = NoReg,
1322               const Register& reg4 = NoReg);
1323  void Include(const FPRegister& reg1,
1324               const FPRegister& reg2 = NoFPReg,
1325               const FPRegister& reg3 = NoFPReg,
1326               const FPRegister& reg4 = NoFPReg);
1327
1328
1329  // Make sure that the specified registers are not available in this scope.
1330  // This can be used to prevent helper functions from using sensitive
1331  // registers, for example.
1332  void Exclude(const CPURegList& list);
1333  void Exclude(const Register& reg1,
1334               const Register& reg2 = NoReg,
1335               const Register& reg3 = NoReg,
1336               const Register& reg4 = NoReg);
1337  void Exclude(const FPRegister& reg1,
1338               const FPRegister& reg2 = NoFPReg,
1339               const FPRegister& reg3 = NoFPReg,
1340               const FPRegister& reg4 = NoFPReg);
1341  void Exclude(const CPURegister& reg1,
1342               const CPURegister& reg2 = NoCPUReg,
1343               const CPURegister& reg3 = NoCPUReg,
1344               const CPURegister& reg4 = NoCPUReg);
1345
1346
1347  // Prevent any scratch registers from being used in this scope.
1348  void ExcludeAll();
1349
1350
1351 private:
1352  static CPURegister AcquireNextAvailable(CPURegList* available);
1353
1354  static void ReleaseByCode(CPURegList* available, int code);
1355
1356  static void ReleaseByRegList(CPURegList* available,
1357                               RegList regs);
1358
1359  static void IncludeByRegList(CPURegList* available,
1360                               RegList exclude);
1361
1362  static void ExcludeByRegList(CPURegList* available,
1363                               RegList exclude);
1364
1365  // Available scratch registers.
1366  CPURegList* available_;     // kRegister
1367  CPURegList* availablefp_;   // kFPRegister
1368
1369  // The state of the available lists at the start of this scope.
1370  RegList old_available_;     // kRegister
1371  RegList old_availablefp_;   // kFPRegister
1372};
1373
1374
1375}  // namespace vixl
1376
1377#endif  // VIXL_A64_MACRO_ASSEMBLER_A64_H_
1378