macro-assembler-aarch64.h revision 9fbd11bbc6a56071f455df28e08854a848f46c3b
1// Copyright 2015, VIXL authors
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are met:
6//
7//   * Redistributions of source code must retain the above copyright notice,
8//     this list of conditions and the following disclaimer.
9//   * Redistributions in binary form must reproduce the above copyright notice,
10//     this list of conditions and the following disclaimer in the documentation
11//     and/or other materials provided with the distribution.
12//   * Neither the name of ARM Limited nor the names of its contributors may be
13//     used to endorse or promote products derived from this software without
14//     specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#ifndef VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
28#define VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
29
30#include <algorithm>
31#include <limits>
32
33#include "globals-vixl.h"
34
35#include "aarch64/assembler-aarch64.h"
36#include "aarch64/debugger-aarch64.h"
37#include "aarch64/instrument-aarch64.h"
38#include "aarch64/simulator-aarch64.h"
39
40
41#define LS_MACRO_LIST(V)                                     \
42  V(Ldrb, Register&, rt, LDRB_w)                             \
43  V(Strb, Register&, rt, STRB_w)                             \
44  V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
45  V(Ldrh, Register&, rt, LDRH_w)                             \
46  V(Strh, Register&, rt, STRH_w)                             \
47  V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
48  V(Ldr, CPURegister&, rt, LoadOpFor(rt))                    \
49  V(Str, CPURegister&, rt, StoreOpFor(rt))                   \
50  V(Ldrsw, Register&, rt, LDRSW_x)
51
52
53#define LSPAIR_MACRO_LIST(V)                             \
54  V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2))  \
55  V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
56  V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
57
58namespace vixl {
59namespace aarch64 {
60
61// Forward declaration
62class MacroAssembler;
63class UseScratchRegisterScope;
64
65class Pool {
66 public:
67  explicit Pool(MacroAssembler* masm)
68      : checkpoint_(kNoCheckpointRequired), masm_(masm) {
69    Reset();
70  }
71
72  void Reset() {
73    checkpoint_ = kNoCheckpointRequired;
74    monitor_ = 0;
75  }
76
77  void Block() { monitor_++; }
78  void Release();
79  bool IsBlocked() const { return monitor_ != 0; }
80
81  static const ptrdiff_t kNoCheckpointRequired = PTRDIFF_MAX;
82
83  void SetNextCheckpoint(ptrdiff_t checkpoint);
84  ptrdiff_t GetCheckpoint() const { return checkpoint_; }
85  VIXL_DEPRECATED("GetCheckpoint", ptrdiff_t checkpoint() const) {
86    return GetCheckpoint();
87  }
88
89  enum EmitOption { kBranchRequired, kNoBranchRequired };
90
91 protected:
92  // Next buffer offset at which a check is required for this pool.
93  ptrdiff_t checkpoint_;
94  // Indicates whether the emission of this pool is blocked.
95  int monitor_;
96  // The MacroAssembler using this pool.
97  MacroAssembler* masm_;
98};
99
100
101class LiteralPool : public Pool {
102 public:
103  explicit LiteralPool(MacroAssembler* masm);
104  ~LiteralPool();
105  void Reset();
106
107  void AddEntry(RawLiteral* literal);
108  bool IsEmpty() const { return entries_.empty(); }
109  size_t GetSize() const;
110  VIXL_DEPRECATED("GetSize", size_t Size() const) { return GetSize(); }
111
112  size_t GetMaxSize() const;
113  VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); }
114
115  size_t GetOtherPoolsMaxSize() const;
116  VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) {
117    return GetOtherPoolsMaxSize();
118  }
119
120  void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired);
121  void Emit(EmitOption option = kNoBranchRequired);
122
123  void SetNextRecommendedCheckpoint(ptrdiff_t offset);
124  ptrdiff_t GetNextRecommendedCheckpoint();
125  VIXL_DEPRECATED("GetNextRecommendedCheckpoint",
126                  ptrdiff_t NextRecommendedCheckpoint()) {
127    return GetNextRecommendedCheckpoint();
128  }
129
130  void UpdateFirstUse(ptrdiff_t use_position);
131
132  void DeleteOnDestruction(RawLiteral* literal) {
133    deleted_on_destruction_.push_back(literal);
134  }
135
136  // Recommended not exact since the pool can be blocked for short periods.
137  static const ptrdiff_t kRecommendedLiteralPoolRange = 128 * KBytes;
138
139 private:
140  std::vector<RawLiteral*> entries_;
141  size_t size_;
142  ptrdiff_t first_use_;
143  // The parent class `Pool` provides a `checkpoint_`, which is the buffer
144  // offset before which a check *must* occur. This recommended checkpoint
145  // indicates when we would like to start emitting the constant pool. The
146  // MacroAssembler can, but does not have to, check the buffer when the
147  // checkpoint is reached.
148  ptrdiff_t recommended_checkpoint_;
149
150  std::vector<RawLiteral*> deleted_on_destruction_;
151};
152
153
154inline size_t LiteralPool::GetSize() const {
155  // Account for the pool header.
156  return size_ + kInstructionSize;
157}
158
159
160inline size_t LiteralPool::GetMaxSize() const {
161  // Account for the potential branch over the pool.
162  return GetSize() + kInstructionSize;
163}
164
165
166inline ptrdiff_t LiteralPool::GetNextRecommendedCheckpoint() {
167  return first_use_ + kRecommendedLiteralPoolRange;
168}
169
170
171class VeneerPool : public Pool {
172 public:
173  explicit VeneerPool(MacroAssembler* masm) : Pool(masm) {}
174
175  void Reset();
176
177  void Block() { monitor_++; }
178  void Release();
179  bool IsBlocked() const { return monitor_ != 0; }
180  bool IsEmpty() const { return unresolved_branches_.IsEmpty(); }
181
182  class BranchInfo {
183   public:
184    BranchInfo()
185        : max_reachable_pc_(0),
186          pc_offset_(0),
187          label_(NULL),
188          branch_type_(UnknownBranchType) {}
189    BranchInfo(ptrdiff_t offset, Label* label, ImmBranchType branch_type)
190        : pc_offset_(offset), label_(label), branch_type_(branch_type) {
191      max_reachable_pc_ =
192          pc_offset_ + Instruction::GetImmBranchForwardRange(branch_type_);
193    }
194
195    static bool IsValidComparison(const BranchInfo& branch_1,
196                                  const BranchInfo& branch_2) {
197      // BranchInfo are always compared against against other objects with
198      // the same branch type.
199      if (branch_1.branch_type_ != branch_2.branch_type_) {
200        return false;
201      }
202      // Since we should never have two branch infos with the same offsets, it
203      // first looks like we should check that offsets are different. However
204      // the operators may also be used to *search* for a branch info in the
205      // set.
206      bool same_offsets = (branch_1.pc_offset_ == branch_2.pc_offset_);
207      return (!same_offsets ||
208              ((branch_1.label_ == branch_2.label_) &&
209               (branch_1.max_reachable_pc_ == branch_2.max_reachable_pc_)));
210    }
211
212    // We must provide comparison operators to work with InvalSet.
213    bool operator==(const BranchInfo& other) const {
214      VIXL_ASSERT(IsValidComparison(*this, other));
215      return pc_offset_ == other.pc_offset_;
216    }
217    bool operator<(const BranchInfo& other) const {
218      VIXL_ASSERT(IsValidComparison(*this, other));
219      return pc_offset_ < other.pc_offset_;
220    }
221    bool operator<=(const BranchInfo& other) const {
222      VIXL_ASSERT(IsValidComparison(*this, other));
223      return pc_offset_ <= other.pc_offset_;
224    }
225    bool operator>(const BranchInfo& other) const {
226      VIXL_ASSERT(IsValidComparison(*this, other));
227      return pc_offset_ > other.pc_offset_;
228    }
229
230    // Maximum position reachable by the branch using a positive branch offset.
231    ptrdiff_t max_reachable_pc_;
232    // Offset of the branch in the code generation buffer.
233    ptrdiff_t pc_offset_;
234    // The label branched to.
235    Label* label_;
236    ImmBranchType branch_type_;
237  };
238
239  bool BranchTypeUsesVeneers(ImmBranchType type) {
240    return (type != UnknownBranchType) && (type != UncondBranchType);
241  }
242
243  void RegisterUnresolvedBranch(ptrdiff_t branch_pos,
244                                Label* label,
245                                ImmBranchType branch_type);
246  void DeleteUnresolvedBranchInfoForLabel(Label* label);
247
248  bool ShouldEmitVeneer(int64_t max_reachable_pc, size_t amount);
249  bool ShouldEmitVeneers(size_t amount) {
250    return ShouldEmitVeneer(unresolved_branches_.GetFirstLimit(), amount);
251  }
252
253  void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired);
254  void Emit(EmitOption option, size_t margin);
255
256  // The code size generated for a veneer. Currently one branch instruction.
257  // This is for code size checking purposes, and can be extended in the future
258  // for example if we decide to add nops between the veneers.
259  static const int kVeneerCodeSize = 1 * kInstructionSize;
260  // The maximum size of code other than veneers that can be generated when
261  // emitting a veneer pool. Currently there can be an additional branch to jump
262  // over the pool.
263  static const int kPoolNonVeneerCodeSize = 1 * kInstructionSize;
264
265  void UpdateNextCheckPoint() { SetNextCheckpoint(GetNextCheckPoint()); }
266
267  int GetNumberOfPotentialVeneers() const {
268    return static_cast<int>(unresolved_branches_.GetSize());
269  }
270  VIXL_DEPRECATED("GetNumberOfPotentialVeneers",
271                  int NumberOfPotentialVeneers() const) {
272    return GetNumberOfPotentialVeneers();
273  }
274
275  size_t GetMaxSize() const {
276    return kPoolNonVeneerCodeSize +
277           unresolved_branches_.GetSize() * kVeneerCodeSize;
278  }
279  VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); }
280
281  size_t GetOtherPoolsMaxSize() const;
282  VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) {
283    return GetOtherPoolsMaxSize();
284  }
285
286  static const int kNPreallocatedInfos = 4;
287  static const ptrdiff_t kInvalidOffset = PTRDIFF_MAX;
288  static const size_t kReclaimFrom = 128;
289  static const size_t kReclaimFactor = 16;
290
291 private:
292  typedef InvalSet<BranchInfo,
293                   kNPreallocatedInfos,
294                   ptrdiff_t,
295                   kInvalidOffset,
296                   kReclaimFrom,
297                   kReclaimFactor> BranchInfoTypedSetBase;
298  typedef InvalSetIterator<BranchInfoTypedSetBase> BranchInfoTypedSetIterBase;
299
300  class BranchInfoTypedSet : public BranchInfoTypedSetBase {
301   public:
302    BranchInfoTypedSet() : BranchInfoTypedSetBase() {}
303
304    ptrdiff_t GetFirstLimit() {
305      if (empty()) {
306        return kInvalidOffset;
307      }
308      return GetMinElementKey();
309    }
310    VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) {
311      return GetFirstLimit();
312    }
313  };
314
315  class BranchInfoTypedSetIterator : public BranchInfoTypedSetIterBase {
316   public:
317    BranchInfoTypedSetIterator() : BranchInfoTypedSetIterBase(NULL) {}
318    explicit BranchInfoTypedSetIterator(BranchInfoTypedSet* typed_set)
319        : BranchInfoTypedSetIterBase(typed_set) {}
320
321    // TODO: Remove these and use the STL-like interface instead.
322    using BranchInfoTypedSetIterBase::Advance;
323    using BranchInfoTypedSetIterBase::Current;
324  };
325
326  class BranchInfoSet {
327   public:
328    void insert(BranchInfo branch_info) {
329      ImmBranchType type = branch_info.branch_type_;
330      VIXL_ASSERT(IsValidBranchType(type));
331      typed_set_[BranchIndexFromType(type)].insert(branch_info);
332    }
333
334    void erase(BranchInfo branch_info) {
335      if (IsValidBranchType(branch_info.branch_type_)) {
336        int index =
337            BranchInfoSet::BranchIndexFromType(branch_info.branch_type_);
338        typed_set_[index].erase(branch_info);
339      }
340    }
341
342    size_t GetSize() const {
343      size_t res = 0;
344      for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
345        res += typed_set_[i].size();
346      }
347      return res;
348    }
349    VIXL_DEPRECATED("GetSize", size_t size() const) { return GetSize(); }
350
351    bool IsEmpty() const {
352      for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
353        if (!typed_set_[i].empty()) {
354          return false;
355        }
356      }
357      return true;
358    }
359    VIXL_DEPRECATED("IsEmpty", bool empty() const) { return IsEmpty(); }
360
361    ptrdiff_t GetFirstLimit() {
362      ptrdiff_t res = kInvalidOffset;
363      for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
364        res = std::min(res, typed_set_[i].GetFirstLimit());
365      }
366      return res;
367    }
368    VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) {
369      return GetFirstLimit();
370    }
371
372    void Reset() {
373      for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
374        typed_set_[i].clear();
375      }
376    }
377
378    static ImmBranchType BranchTypeFromIndex(int index) {
379      switch (index) {
380        case 0:
381          return CondBranchType;
382        case 1:
383          return CompareBranchType;
384        case 2:
385          return TestBranchType;
386        default:
387          VIXL_UNREACHABLE();
388          return UnknownBranchType;
389      }
390    }
391    static int BranchIndexFromType(ImmBranchType branch_type) {
392      switch (branch_type) {
393        case CondBranchType:
394          return 0;
395        case CompareBranchType:
396          return 1;
397        case TestBranchType:
398          return 2;
399        default:
400          VIXL_UNREACHABLE();
401          return 0;
402      }
403    }
404
405    bool IsValidBranchType(ImmBranchType branch_type) {
406      return (branch_type != UnknownBranchType) &&
407             (branch_type != UncondBranchType);
408    }
409
410   private:
411    static const int kNumberOfTrackedBranchTypes = 3;
412    BranchInfoTypedSet typed_set_[kNumberOfTrackedBranchTypes];
413
414    friend class VeneerPool;
415    friend class BranchInfoSetIterator;
416  };
417
418  class BranchInfoSetIterator {
419   public:
420    explicit BranchInfoSetIterator(BranchInfoSet* set) : set_(set) {
421      for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
422        new (&sub_iterator_[i])
423            BranchInfoTypedSetIterator(&(set_->typed_set_[i]));
424      }
425    }
426
427    VeneerPool::BranchInfo* Current() {
428      for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
429        if (!sub_iterator_[i].Done()) {
430          return sub_iterator_[i].Current();
431        }
432      }
433      VIXL_UNREACHABLE();
434      return NULL;
435    }
436
437    void Advance() {
438      VIXL_ASSERT(!Done());
439      for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
440        if (!sub_iterator_[i].Done()) {
441          sub_iterator_[i].Advance();
442          return;
443        }
444      }
445      VIXL_UNREACHABLE();
446    }
447
448    bool Done() const {
449      for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
450        if (!sub_iterator_[i].Done()) return false;
451      }
452      return true;
453    }
454
455    void AdvanceToNextType() {
456      VIXL_ASSERT(!Done());
457      for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
458        if (!sub_iterator_[i].Done()) {
459          sub_iterator_[i].Finish();
460          return;
461        }
462      }
463      VIXL_UNREACHABLE();
464    }
465
466    void DeleteCurrentAndAdvance() {
467      for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
468        if (!sub_iterator_[i].Done()) {
469          sub_iterator_[i].DeleteCurrentAndAdvance();
470          return;
471        }
472      }
473    }
474
475   private:
476    BranchInfoSet* set_;
477    BranchInfoTypedSetIterator
478        sub_iterator_[BranchInfoSet::kNumberOfTrackedBranchTypes];
479  };
480
481  ptrdiff_t GetNextCheckPoint() {
482    if (unresolved_branches_.IsEmpty()) {
483      return kNoCheckpointRequired;
484    } else {
485      return unresolved_branches_.GetFirstLimit();
486    }
487  }
488  VIXL_DEPRECATED("GetNextCheckPoint", ptrdiff_t NextCheckPoint()) {
489    return GetNextCheckPoint();
490  }
491
492  // Information about unresolved (forward) branches.
493  BranchInfoSet unresolved_branches_;
494};
495
496
497// This scope has the following purposes:
498//  * Acquire/Release the underlying assembler's code buffer.
499//     * This is mandatory before emitting.
500//  * Emit the literal or veneer pools if necessary before emitting the
501//    macro-instruction.
502//  * Ensure there is enough space to emit the macro-instruction.
503class EmissionCheckScope : public CodeBufferCheckScope {
504 public:
505  EmissionCheckScope(MacroAssembler* masm, size_t size);
506  ~EmissionCheckScope();
507
508 protected:
509  MacroAssembler* masm_;
510};
511
512
513// Helper for common Emission checks.
514// The macro-instruction maps to a single instruction.
515class SingleEmissionCheckScope : public EmissionCheckScope {
516 public:
517  explicit SingleEmissionCheckScope(MacroAssembler* masm)
518      : EmissionCheckScope(masm, kInstructionSize) {}
519};
520
521
522// The macro instruction is a "typical" macro-instruction. Typical macro-
523// instruction only emit a few instructions, a few being defined as 8 here.
524class MacroEmissionCheckScope : public EmissionCheckScope {
525 public:
526  explicit MacroEmissionCheckScope(MacroAssembler* masm)
527      : EmissionCheckScope(masm, kTypicalMacroInstructionMaxSize) {}
528
529 private:
530  static const size_t kTypicalMacroInstructionMaxSize = 8 * kInstructionSize;
531};
532
533
534enum BranchType {
535  // Copies of architectural conditions.
536  // The associated conditions can be used in place of those, the code will
537  // take care of reinterpreting them with the correct type.
538  integer_eq = eq,
539  integer_ne = ne,
540  integer_hs = hs,
541  integer_lo = lo,
542  integer_mi = mi,
543  integer_pl = pl,
544  integer_vs = vs,
545  integer_vc = vc,
546  integer_hi = hi,
547  integer_ls = ls,
548  integer_ge = ge,
549  integer_lt = lt,
550  integer_gt = gt,
551  integer_le = le,
552  integer_al = al,
553  integer_nv = nv,
554
555  // These two are *different* from the architectural codes al and nv.
556  // 'always' is used to generate unconditional branches.
557  // 'never' is used to not generate a branch (generally as the inverse
558  // branch type of 'always).
559  always,
560  never,
561  // cbz and cbnz
562  reg_zero,
563  reg_not_zero,
564  // tbz and tbnz
565  reg_bit_clear,
566  reg_bit_set,
567
568  // Aliases.
569  kBranchTypeFirstCondition = eq,
570  kBranchTypeLastCondition = nv,
571  kBranchTypeFirstUsingReg = reg_zero,
572  kBranchTypeFirstUsingBit = reg_bit_clear
573};
574
575
576enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
577
578
579class MacroAssembler : public Assembler {
580 public:
581  explicit MacroAssembler(
582      PositionIndependentCodeOption pic = PositionIndependentCode);
583  MacroAssembler(size_t capacity,
584                 PositionIndependentCodeOption pic = PositionIndependentCode);
585  MacroAssembler(byte* buffer,
586                 size_t capacity,
587                 PositionIndependentCodeOption pic = PositionIndependentCode);
588  ~MacroAssembler();
589
590  // Start generating code from the beginning of the buffer, discarding any code
591  // and data that has already been emitted into the buffer.
592  //
593  // In order to avoid any accidental transfer of state, Reset ASSERTs that the
594  // constant pool is not blocked.
595  void Reset();
596
597  // Finalize a code buffer of generated instructions. This function must be
598  // called before executing or copying code from the buffer.
599  void FinalizeCode();
600
601
602  // Constant generation helpers.
603  // These functions return the number of instructions required to move the
604  // immediate into the destination register. Also, if the masm pointer is
605  // non-null, it generates the code to do so.
606  // The two features are implemented using one function to avoid duplication of
607  // the logic.
608  // The function can be used to evaluate the cost of synthesizing an
609  // instruction using 'mov immediate' instructions. A user might prefer loading
610  // a constant using the literal pool instead of using multiple 'mov immediate'
611  // instructions.
612  static int MoveImmediateHelper(MacroAssembler* masm,
613                                 const Register& rd,
614                                 uint64_t imm);
615  static bool OneInstrMoveImmediateHelper(MacroAssembler* masm,
616                                          const Register& dst,
617                                          int64_t imm);
618
619
620  // Logical macros.
621  void And(const Register& rd, const Register& rn, const Operand& operand);
622  void Ands(const Register& rd, const Register& rn, const Operand& operand);
623  void Bic(const Register& rd, const Register& rn, const Operand& operand);
624  void Bics(const Register& rd, const Register& rn, const Operand& operand);
625  void Orr(const Register& rd, const Register& rn, const Operand& operand);
626  void Orn(const Register& rd, const Register& rn, const Operand& operand);
627  void Eor(const Register& rd, const Register& rn, const Operand& operand);
628  void Eon(const Register& rd, const Register& rn, const Operand& operand);
629  void Tst(const Register& rn, const Operand& operand);
630  void LogicalMacro(const Register& rd,
631                    const Register& rn,
632                    const Operand& operand,
633                    LogicalOp op);
634
635  // Add and sub macros.
636  void Add(const Register& rd,
637           const Register& rn,
638           const Operand& operand,
639           FlagsUpdate S = LeaveFlags);
640  void Adds(const Register& rd, const Register& rn, const Operand& operand);
641  void Sub(const Register& rd,
642           const Register& rn,
643           const Operand& operand,
644           FlagsUpdate S = LeaveFlags);
645  void Subs(const Register& rd, const Register& rn, const Operand& operand);
646  void Cmn(const Register& rn, const Operand& operand);
647  void Cmp(const Register& rn, const Operand& operand);
648  void Neg(const Register& rd, const Operand& operand);
649  void Negs(const Register& rd, const Operand& operand);
650
651  void AddSubMacro(const Register& rd,
652                   const Register& rn,
653                   const Operand& operand,
654                   FlagsUpdate S,
655                   AddSubOp op);
656
657  // Add/sub with carry macros.
658  void Adc(const Register& rd, const Register& rn, const Operand& operand);
659  void Adcs(const Register& rd, const Register& rn, const Operand& operand);
660  void Sbc(const Register& rd, const Register& rn, const Operand& operand);
661  void Sbcs(const Register& rd, const Register& rn, const Operand& operand);
662  void Ngc(const Register& rd, const Operand& operand);
663  void Ngcs(const Register& rd, const Operand& operand);
664  void AddSubWithCarryMacro(const Register& rd,
665                            const Register& rn,
666                            const Operand& operand,
667                            FlagsUpdate S,
668                            AddSubWithCarryOp op);
669
670  // Move macros.
671  void Mov(const Register& rd, uint64_t imm);
672  void Mov(const Register& rd,
673           const Operand& operand,
674           DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
675  void Mvn(const Register& rd, uint64_t imm) {
676    Mov(rd, (rd.GetSizeInBits() == kXRegSize) ? ~imm : (~imm & kWRegMask));
677  }
678  void Mvn(const Register& rd, const Operand& operand);
679
680  // Try to move an immediate into the destination register in a single
681  // instruction. Returns true for success, and updates the contents of dst.
682  // Returns false, otherwise.
683  bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
684
685  // Move an immediate into register dst, and return an Operand object for
686  // use with a subsequent instruction that accepts a shift. The value moved
687  // into dst is not necessarily equal to imm; it may have had a shifting
688  // operation applied to it that will be subsequently undone by the shift
689  // applied in the Operand.
690  Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
691
692  void Move(const GenericOperand& dst, const GenericOperand& src);
693
694  // Synthesises the address represented by a MemOperand into a register.
695  void ComputeAddress(const Register& dst, const MemOperand& mem_op);
696
697  // Conditional macros.
698  void Ccmp(const Register& rn,
699            const Operand& operand,
700            StatusFlags nzcv,
701            Condition cond);
702  void Ccmn(const Register& rn,
703            const Operand& operand,
704            StatusFlags nzcv,
705            Condition cond);
706  void ConditionalCompareMacro(const Register& rn,
707                               const Operand& operand,
708                               StatusFlags nzcv,
709                               Condition cond,
710                               ConditionalCompareOp op);
711
712  // On return, the boolean values pointed to will indicate whether `left` and
713  // `right` should be synthesised in a temporary register.
714  static void GetCselSynthesisInformation(const Register& rd,
715                                          const Operand& left,
716                                          const Operand& right,
717                                          bool* should_synthesise_left,
718                                          bool* should_synthesise_right) {
719    // Note that the helper does not need to look at the condition.
720    CselHelper(NULL,
721               rd,
722               left,
723               right,
724               eq,
725               should_synthesise_left,
726               should_synthesise_right);
727  }
728
729  void Csel(const Register& rd,
730            const Operand& left,
731            const Operand& right,
732            Condition cond) {
733    CselHelper(this, rd, left, right, cond);
734  }
735
736// Load/store macros.
737#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
738  void FN(const REGTYPE REG, const MemOperand& addr);
739  LS_MACRO_LIST(DECLARE_FUNCTION)
740#undef DECLARE_FUNCTION
741
742  void LoadStoreMacro(const CPURegister& rt,
743                      const MemOperand& addr,
744                      LoadStoreOp op);
745
746#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
747  void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
748  LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
749#undef DECLARE_FUNCTION
750
751  void LoadStorePairMacro(const CPURegister& rt,
752                          const CPURegister& rt2,
753                          const MemOperand& addr,
754                          LoadStorePairOp op);
755
756  void Prfm(PrefetchOperation op, const MemOperand& addr);
757
758  // Push or pop up to 4 registers of the same width to or from the stack,
759  // using the current stack pointer as set by SetStackPointer.
760  //
761  // If an argument register is 'NoReg', all further arguments are also assumed
762  // to be 'NoReg', and are thus not pushed or popped.
763  //
764  // Arguments are ordered such that "Push(a, b);" is functionally equivalent
765  // to "Push(a); Push(b);".
766  //
767  // It is valid to push the same register more than once, and there is no
768  // restriction on the order in which registers are specified.
769  //
770  // It is not valid to pop into the same register more than once in one
771  // operation, not even into the zero register.
772  //
773  // If the current stack pointer (as set by SetStackPointer) is sp, then it
774  // must be aligned to 16 bytes on entry and the total size of the specified
775  // registers must also be a multiple of 16 bytes.
776  //
777  // Even if the current stack pointer is not the system stack pointer (sp),
778  // Push (and derived methods) will still modify the system stack pointer in
779  // order to comply with ABI rules about accessing memory below the system
780  // stack pointer.
781  //
782  // Other than the registers passed into Pop, the stack pointer and (possibly)
783  // the system stack pointer, these methods do not modify any other registers.
784  void Push(const CPURegister& src0,
785            const CPURegister& src1 = NoReg,
786            const CPURegister& src2 = NoReg,
787            const CPURegister& src3 = NoReg);
788  void Pop(const CPURegister& dst0,
789           const CPURegister& dst1 = NoReg,
790           const CPURegister& dst2 = NoReg,
791           const CPURegister& dst3 = NoReg);
792
793  // Alternative forms of Push and Pop, taking a RegList or CPURegList that
794  // specifies the registers that are to be pushed or popped. Higher-numbered
795  // registers are associated with higher memory addresses (as in the A32 push
796  // and pop instructions).
797  //
798  // (Push|Pop)SizeRegList allow you to specify the register size as a
799  // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
800  // supported.
801  //
802  // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
803  void PushCPURegList(CPURegList registers);
804  void PopCPURegList(CPURegList registers);
805
806  void PushSizeRegList(
807      RegList registers,
808      unsigned reg_size,
809      CPURegister::RegisterType type = CPURegister::kRegister) {
810    PushCPURegList(CPURegList(type, reg_size, registers));
811  }
812  void PopSizeRegList(RegList registers,
813                      unsigned reg_size,
814                      CPURegister::RegisterType type = CPURegister::kRegister) {
815    PopCPURegList(CPURegList(type, reg_size, registers));
816  }
817  void PushXRegList(RegList regs) { PushSizeRegList(regs, kXRegSize); }
818  void PopXRegList(RegList regs) { PopSizeRegList(regs, kXRegSize); }
819  void PushWRegList(RegList regs) { PushSizeRegList(regs, kWRegSize); }
820  void PopWRegList(RegList regs) { PopSizeRegList(regs, kWRegSize); }
821  void PushDRegList(RegList regs) {
822    PushSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
823  }
824  void PopDRegList(RegList regs) {
825    PopSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
826  }
827  void PushSRegList(RegList regs) {
828    PushSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
829  }
830  void PopSRegList(RegList regs) {
831    PopSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
832  }
833
834  // Push the specified register 'count' times.
835  void PushMultipleTimes(int count, Register src);
836
837  // Poke 'src' onto the stack. The offset is in bytes.
838  //
839  // If the current stack pointer (as set by SetStackPointer) is sp, then sp
840  // must be aligned to 16 bytes.
841  void Poke(const Register& src, const Operand& offset);
842
843  // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
844  //
845  // If the current stack pointer (as set by SetStackPointer) is sp, then sp
846  // must be aligned to 16 bytes.
847  void Peek(const Register& dst, const Operand& offset);
848
849  // Alternative forms of Peek and Poke, taking a RegList or CPURegList that
850  // specifies the registers that are to be pushed or popped. Higher-numbered
851  // registers are associated with higher memory addresses.
852  //
853  // (Peek|Poke)SizeRegList allow you to specify the register size as a
854  // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
855  // supported.
856  //
857  // Otherwise, (Peek|Poke)(CPU|X|W|D|S)RegList is preferred.
858  void PeekCPURegList(CPURegList registers, int64_t offset) {
859    LoadCPURegList(registers, MemOperand(StackPointer(), offset));
860  }
861  void PokeCPURegList(CPURegList registers, int64_t offset) {
862    StoreCPURegList(registers, MemOperand(StackPointer(), offset));
863  }
864
865  void PeekSizeRegList(
866      RegList registers,
867      int64_t offset,
868      unsigned reg_size,
869      CPURegister::RegisterType type = CPURegister::kRegister) {
870    PeekCPURegList(CPURegList(type, reg_size, registers), offset);
871  }
872  void PokeSizeRegList(
873      RegList registers,
874      int64_t offset,
875      unsigned reg_size,
876      CPURegister::RegisterType type = CPURegister::kRegister) {
877    PokeCPURegList(CPURegList(type, reg_size, registers), offset);
878  }
879  void PeekXRegList(RegList regs, int64_t offset) {
880    PeekSizeRegList(regs, offset, kXRegSize);
881  }
882  void PokeXRegList(RegList regs, int64_t offset) {
883    PokeSizeRegList(regs, offset, kXRegSize);
884  }
885  void PeekWRegList(RegList regs, int64_t offset) {
886    PeekSizeRegList(regs, offset, kWRegSize);
887  }
888  void PokeWRegList(RegList regs, int64_t offset) {
889    PokeSizeRegList(regs, offset, kWRegSize);
890  }
891  void PeekDRegList(RegList regs, int64_t offset) {
892    PeekSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
893  }
894  void PokeDRegList(RegList regs, int64_t offset) {
895    PokeSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
896  }
897  void PeekSRegList(RegList regs, int64_t offset) {
898    PeekSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
899  }
900  void PokeSRegList(RegList regs, int64_t offset) {
901    PokeSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
902  }
903
904
905  // Claim or drop stack space without actually accessing memory.
906  //
907  // If the current stack pointer (as set by SetStackPointer) is sp, then it
908  // must be aligned to 16 bytes and the size claimed or dropped must be a
909  // multiple of 16 bytes.
910  void Claim(const Operand& size);
911  void Drop(const Operand& size);
912
913  // Preserve the callee-saved registers (as defined by AAPCS64).
914  //
915  // Higher-numbered registers are pushed before lower-numbered registers, and
916  // thus get higher addresses.
917  // Floating-point registers are pushed before general-purpose registers, and
918  // thus get higher addresses.
919  //
920  // This method must not be called unless StackPointer() is sp, and it is
921  // aligned to 16 bytes.
922  void PushCalleeSavedRegisters();
923
924  // Restore the callee-saved registers (as defined by AAPCS64).
925  //
926  // Higher-numbered registers are popped after lower-numbered registers, and
927  // thus come from higher addresses.
928  // Floating-point registers are popped after general-purpose registers, and
929  // thus come from higher addresses.
930  //
931  // This method must not be called unless StackPointer() is sp, and it is
932  // aligned to 16 bytes.
933  void PopCalleeSavedRegisters();
934
935  void LoadCPURegList(CPURegList registers, const MemOperand& src);
936  void StoreCPURegList(CPURegList registers, const MemOperand& dst);
937
938  // Remaining instructions are simple pass-through calls to the assembler.
939  void Adr(const Register& rd, Label* label) {
940    VIXL_ASSERT(allow_macro_instructions_);
941    VIXL_ASSERT(!rd.IsZero());
942    SingleEmissionCheckScope guard(this);
943    adr(rd, label);
944  }
945  void Adrp(const Register& rd, Label* label) {
946    VIXL_ASSERT(allow_macro_instructions_);
947    VIXL_ASSERT(!rd.IsZero());
948    SingleEmissionCheckScope guard(this);
949    adrp(rd, label);
950  }
951  void Asr(const Register& rd, const Register& rn, unsigned shift) {
952    VIXL_ASSERT(allow_macro_instructions_);
953    VIXL_ASSERT(!rd.IsZero());
954    VIXL_ASSERT(!rn.IsZero());
955    SingleEmissionCheckScope guard(this);
956    asr(rd, rn, shift);
957  }
958  void Asr(const Register& rd, const Register& rn, const Register& rm) {
959    VIXL_ASSERT(allow_macro_instructions_);
960    VIXL_ASSERT(!rd.IsZero());
961    VIXL_ASSERT(!rn.IsZero());
962    VIXL_ASSERT(!rm.IsZero());
963    SingleEmissionCheckScope guard(this);
964    asrv(rd, rn, rm);
965  }
966
967  // Branch type inversion relies on these relations.
968  VIXL_STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
969                     (reg_bit_clear == (reg_bit_set ^ 1)) &&
970                     (always == (never ^ 1)));
971
972  BranchType InvertBranchType(BranchType type) {
973    if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
974      return static_cast<BranchType>(
975          InvertCondition(static_cast<Condition>(type)));
976    } else {
977      return static_cast<BranchType>(type ^ 1);
978    }
979  }
980
981  void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
982
983  void B(Label* label);
984  void B(Label* label, Condition cond);
985  void B(Condition cond, Label* label) { B(label, cond); }
986  void Bfm(const Register& rd,
987           const Register& rn,
988           unsigned immr,
989           unsigned imms) {
990    VIXL_ASSERT(allow_macro_instructions_);
991    VIXL_ASSERT(!rd.IsZero());
992    VIXL_ASSERT(!rn.IsZero());
993    SingleEmissionCheckScope guard(this);
994    bfm(rd, rn, immr, imms);
995  }
996  void Bfi(const Register& rd,
997           const Register& rn,
998           unsigned lsb,
999           unsigned width) {
1000    VIXL_ASSERT(allow_macro_instructions_);
1001    VIXL_ASSERT(!rd.IsZero());
1002    VIXL_ASSERT(!rn.IsZero());
1003    SingleEmissionCheckScope guard(this);
1004    bfi(rd, rn, lsb, width);
1005  }
1006  void Bfxil(const Register& rd,
1007             const Register& rn,
1008             unsigned lsb,
1009             unsigned width) {
1010    VIXL_ASSERT(allow_macro_instructions_);
1011    VIXL_ASSERT(!rd.IsZero());
1012    VIXL_ASSERT(!rn.IsZero());
1013    SingleEmissionCheckScope guard(this);
1014    bfxil(rd, rn, lsb, width);
1015  }
1016  void Bind(Label* label);
1017  // Bind a label to a specified offset from the start of the buffer.
1018  void BindToOffset(Label* label, ptrdiff_t offset);
1019  void Bl(Label* label) {
1020    VIXL_ASSERT(allow_macro_instructions_);
1021    SingleEmissionCheckScope guard(this);
1022    bl(label);
1023  }
1024  void Blr(const Register& xn) {
1025    VIXL_ASSERT(allow_macro_instructions_);
1026    VIXL_ASSERT(!xn.IsZero());
1027    SingleEmissionCheckScope guard(this);
1028    blr(xn);
1029  }
1030  void Br(const Register& xn) {
1031    VIXL_ASSERT(allow_macro_instructions_);
1032    VIXL_ASSERT(!xn.IsZero());
1033    SingleEmissionCheckScope guard(this);
1034    br(xn);
1035  }
1036  void Brk(int code = 0) {
1037    VIXL_ASSERT(allow_macro_instructions_);
1038    SingleEmissionCheckScope guard(this);
1039    brk(code);
1040  }
1041  void Cbnz(const Register& rt, Label* label);
1042  void Cbz(const Register& rt, Label* label);
1043  void Cinc(const Register& rd, const Register& rn, Condition cond) {
1044    VIXL_ASSERT(allow_macro_instructions_);
1045    VIXL_ASSERT(!rd.IsZero());
1046    VIXL_ASSERT(!rn.IsZero());
1047    SingleEmissionCheckScope guard(this);
1048    cinc(rd, rn, cond);
1049  }
1050  void Cinv(const Register& rd, const Register& rn, Condition cond) {
1051    VIXL_ASSERT(allow_macro_instructions_);
1052    VIXL_ASSERT(!rd.IsZero());
1053    VIXL_ASSERT(!rn.IsZero());
1054    SingleEmissionCheckScope guard(this);
1055    cinv(rd, rn, cond);
1056  }
1057  void Clrex() {
1058    VIXL_ASSERT(allow_macro_instructions_);
1059    SingleEmissionCheckScope guard(this);
1060    clrex();
1061  }
1062  void Cls(const Register& rd, const Register& rn) {
1063    VIXL_ASSERT(allow_macro_instructions_);
1064    VIXL_ASSERT(!rd.IsZero());
1065    VIXL_ASSERT(!rn.IsZero());
1066    SingleEmissionCheckScope guard(this);
1067    cls(rd, rn);
1068  }
1069  void Clz(const Register& rd, const Register& rn) {
1070    VIXL_ASSERT(allow_macro_instructions_);
1071    VIXL_ASSERT(!rd.IsZero());
1072    VIXL_ASSERT(!rn.IsZero());
1073    SingleEmissionCheckScope guard(this);
1074    clz(rd, rn);
1075  }
1076  void Cneg(const Register& rd, const Register& rn, Condition cond) {
1077    VIXL_ASSERT(allow_macro_instructions_);
1078    VIXL_ASSERT(!rd.IsZero());
1079    VIXL_ASSERT(!rn.IsZero());
1080    SingleEmissionCheckScope guard(this);
1081    cneg(rd, rn, cond);
1082  }
1083  void Cset(const Register& rd, Condition cond) {
1084    VIXL_ASSERT(allow_macro_instructions_);
1085    VIXL_ASSERT(!rd.IsZero());
1086    SingleEmissionCheckScope guard(this);
1087    cset(rd, cond);
1088  }
1089  void Csetm(const Register& rd, Condition cond) {
1090    VIXL_ASSERT(allow_macro_instructions_);
1091    VIXL_ASSERT(!rd.IsZero());
1092    SingleEmissionCheckScope guard(this);
1093    csetm(rd, cond);
1094  }
1095  void Csinc(const Register& rd,
1096             const Register& rn,
1097             const Register& rm,
1098             Condition cond) {
1099    VIXL_ASSERT(allow_macro_instructions_);
1100    VIXL_ASSERT(!rd.IsZero());
1101    VIXL_ASSERT(!rn.IsZero());
1102    VIXL_ASSERT(!rm.IsZero());
1103    VIXL_ASSERT((cond != al) && (cond != nv));
1104    SingleEmissionCheckScope guard(this);
1105    csinc(rd, rn, rm, cond);
1106  }
1107  void Csinv(const Register& rd,
1108             const Register& rn,
1109             const Register& rm,
1110             Condition cond) {
1111    VIXL_ASSERT(allow_macro_instructions_);
1112    VIXL_ASSERT(!rd.IsZero());
1113    VIXL_ASSERT(!rn.IsZero());
1114    VIXL_ASSERT(!rm.IsZero());
1115    VIXL_ASSERT((cond != al) && (cond != nv));
1116    SingleEmissionCheckScope guard(this);
1117    csinv(rd, rn, rm, cond);
1118  }
1119  void Csneg(const Register& rd,
1120             const Register& rn,
1121             const Register& rm,
1122             Condition cond) {
1123    VIXL_ASSERT(allow_macro_instructions_);
1124    VIXL_ASSERT(!rd.IsZero());
1125    VIXL_ASSERT(!rn.IsZero());
1126    VIXL_ASSERT(!rm.IsZero());
1127    VIXL_ASSERT((cond != al) && (cond != nv));
1128    SingleEmissionCheckScope guard(this);
1129    csneg(rd, rn, rm, cond);
1130  }
1131  void Dmb(BarrierDomain domain, BarrierType type) {
1132    VIXL_ASSERT(allow_macro_instructions_);
1133    SingleEmissionCheckScope guard(this);
1134    dmb(domain, type);
1135  }
1136  void Dsb(BarrierDomain domain, BarrierType type) {
1137    VIXL_ASSERT(allow_macro_instructions_);
1138    SingleEmissionCheckScope guard(this);
1139    dsb(domain, type);
1140  }
1141  void Extr(const Register& rd,
1142            const Register& rn,
1143            const Register& rm,
1144            unsigned lsb) {
1145    VIXL_ASSERT(allow_macro_instructions_);
1146    VIXL_ASSERT(!rd.IsZero());
1147    VIXL_ASSERT(!rn.IsZero());
1148    VIXL_ASSERT(!rm.IsZero());
1149    SingleEmissionCheckScope guard(this);
1150    extr(rd, rn, rm, lsb);
1151  }
1152  void Fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1153    VIXL_ASSERT(allow_macro_instructions_);
1154    SingleEmissionCheckScope guard(this);
1155    fadd(vd, vn, vm);
1156  }
1157  void Fccmp(const VRegister& vn,
1158             const VRegister& vm,
1159             StatusFlags nzcv,
1160             Condition cond,
1161             FPTrapFlags trap = DisableTrap) {
1162    VIXL_ASSERT(allow_macro_instructions_);
1163    VIXL_ASSERT((cond != al) && (cond != nv));
1164    SingleEmissionCheckScope guard(this);
1165    FPCCompareMacro(vn, vm, nzcv, cond, trap);
1166  }
1167  void Fccmpe(const VRegister& vn,
1168              const VRegister& vm,
1169              StatusFlags nzcv,
1170              Condition cond) {
1171    Fccmp(vn, vm, nzcv, cond, EnableTrap);
1172  }
1173  void Fcmp(const VRegister& vn,
1174            const VRegister& vm,
1175            FPTrapFlags trap = DisableTrap) {
1176    VIXL_ASSERT(allow_macro_instructions_);
1177    SingleEmissionCheckScope guard(this);
1178    FPCompareMacro(vn, vm, trap);
1179  }
1180  void Fcmp(const VRegister& vn, double value, FPTrapFlags trap = DisableTrap);
1181  void Fcmpe(const VRegister& vn, double value);
1182  void Fcmpe(const VRegister& vn, const VRegister& vm) {
1183    Fcmp(vn, vm, EnableTrap);
1184  }
1185  void Fcsel(const VRegister& vd,
1186             const VRegister& vn,
1187             const VRegister& vm,
1188             Condition cond) {
1189    VIXL_ASSERT(allow_macro_instructions_);
1190    VIXL_ASSERT((cond != al) && (cond != nv));
1191    SingleEmissionCheckScope guard(this);
1192    fcsel(vd, vn, vm, cond);
1193  }
1194  void Fcvt(const VRegister& vd, const VRegister& vn) {
1195    VIXL_ASSERT(allow_macro_instructions_);
1196    SingleEmissionCheckScope guard(this);
1197    fcvt(vd, vn);
1198  }
1199  void Fcvtl(const VRegister& vd, const VRegister& vn) {
1200    VIXL_ASSERT(allow_macro_instructions_);
1201    SingleEmissionCheckScope guard(this);
1202    fcvtl(vd, vn);
1203  }
1204  void Fcvtl2(const VRegister& vd, const VRegister& vn) {
1205    VIXL_ASSERT(allow_macro_instructions_);
1206    SingleEmissionCheckScope guard(this);
1207    fcvtl2(vd, vn);
1208  }
1209  void Fcvtn(const VRegister& vd, const VRegister& vn) {
1210    VIXL_ASSERT(allow_macro_instructions_);
1211    SingleEmissionCheckScope guard(this);
1212    fcvtn(vd, vn);
1213  }
1214  void Fcvtn2(const VRegister& vd, const VRegister& vn) {
1215    VIXL_ASSERT(allow_macro_instructions_);
1216    SingleEmissionCheckScope guard(this);
1217    fcvtn2(vd, vn);
1218  }
1219  void Fcvtxn(const VRegister& vd, const VRegister& vn) {
1220    VIXL_ASSERT(allow_macro_instructions_);
1221    SingleEmissionCheckScope guard(this);
1222    fcvtxn(vd, vn);
1223  }
1224  void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
1225    VIXL_ASSERT(allow_macro_instructions_);
1226    SingleEmissionCheckScope guard(this);
1227    fcvtxn2(vd, vn);
1228  }
1229  void Fcvtas(const Register& rd, const VRegister& vn) {
1230    VIXL_ASSERT(allow_macro_instructions_);
1231    VIXL_ASSERT(!rd.IsZero());
1232    SingleEmissionCheckScope guard(this);
1233    fcvtas(rd, vn);
1234  }
1235  void Fcvtau(const Register& rd, const VRegister& vn) {
1236    VIXL_ASSERT(allow_macro_instructions_);
1237    VIXL_ASSERT(!rd.IsZero());
1238    SingleEmissionCheckScope guard(this);
1239    fcvtau(rd, vn);
1240  }
1241  void Fcvtms(const Register& rd, const VRegister& vn) {
1242    VIXL_ASSERT(allow_macro_instructions_);
1243    VIXL_ASSERT(!rd.IsZero());
1244    SingleEmissionCheckScope guard(this);
1245    fcvtms(rd, vn);
1246  }
1247  void Fcvtmu(const Register& rd, const VRegister& vn) {
1248    VIXL_ASSERT(allow_macro_instructions_);
1249    VIXL_ASSERT(!rd.IsZero());
1250    SingleEmissionCheckScope guard(this);
1251    fcvtmu(rd, vn);
1252  }
1253  void Fcvtns(const Register& rd, const VRegister& vn) {
1254    VIXL_ASSERT(allow_macro_instructions_);
1255    VIXL_ASSERT(!rd.IsZero());
1256    SingleEmissionCheckScope guard(this);
1257    fcvtns(rd, vn);
1258  }
1259  void Fcvtnu(const Register& rd, const VRegister& vn) {
1260    VIXL_ASSERT(allow_macro_instructions_);
1261    VIXL_ASSERT(!rd.IsZero());
1262    SingleEmissionCheckScope guard(this);
1263    fcvtnu(rd, vn);
1264  }
1265  void Fcvtps(const Register& rd, const VRegister& vn) {
1266    VIXL_ASSERT(allow_macro_instructions_);
1267    VIXL_ASSERT(!rd.IsZero());
1268    SingleEmissionCheckScope guard(this);
1269    fcvtps(rd, vn);
1270  }
1271  void Fcvtpu(const Register& rd, const VRegister& vn) {
1272    VIXL_ASSERT(allow_macro_instructions_);
1273    VIXL_ASSERT(!rd.IsZero());
1274    SingleEmissionCheckScope guard(this);
1275    fcvtpu(rd, vn);
1276  }
1277  void Fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0) {
1278    VIXL_ASSERT(allow_macro_instructions_);
1279    VIXL_ASSERT(!rd.IsZero());
1280    SingleEmissionCheckScope guard(this);
1281    fcvtzs(rd, vn, fbits);
1282  }
1283  void Fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0) {
1284    VIXL_ASSERT(allow_macro_instructions_);
1285    VIXL_ASSERT(!rd.IsZero());
1286    SingleEmissionCheckScope guard(this);
1287    fcvtzu(rd, vn, fbits);
1288  }
1289  void Fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1290    VIXL_ASSERT(allow_macro_instructions_);
1291    SingleEmissionCheckScope guard(this);
1292    fdiv(vd, vn, vm);
1293  }
1294  void Fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1295    VIXL_ASSERT(allow_macro_instructions_);
1296    SingleEmissionCheckScope guard(this);
1297    fmax(vd, vn, vm);
1298  }
1299  void Fmaxnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1300    VIXL_ASSERT(allow_macro_instructions_);
1301    SingleEmissionCheckScope guard(this);
1302    fmaxnm(vd, vn, vm);
1303  }
1304  void Fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1305    VIXL_ASSERT(allow_macro_instructions_);
1306    SingleEmissionCheckScope guard(this);
1307    fmin(vd, vn, vm);
1308  }
1309  void Fminnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1310    VIXL_ASSERT(allow_macro_instructions_);
1311    SingleEmissionCheckScope guard(this);
1312    fminnm(vd, vn, vm);
1313  }
1314  void Fmov(VRegister vd, VRegister vn) {
1315    VIXL_ASSERT(allow_macro_instructions_);
1316    SingleEmissionCheckScope guard(this);
1317    // Only emit an instruction if vd and vn are different, and they are both D
1318    // registers. fmov(s0, s0) is not a no-op because it clears the top word of
1319    // d0. Technically, fmov(d0, d0) is not a no-op either because it clears
1320    // the top of q0, but VRegister does not currently support Q registers.
1321    if (!vd.Is(vn) || !vd.Is64Bits()) {
1322      fmov(vd, vn);
1323    }
1324  }
1325  void Fmov(VRegister vd, Register rn) {
1326    VIXL_ASSERT(allow_macro_instructions_);
1327    VIXL_ASSERT(!rn.IsZero());
1328    SingleEmissionCheckScope guard(this);
1329    fmov(vd, rn);
1330  }
1331  void Fmov(const VRegister& vd, int index, const Register& rn) {
1332    VIXL_ASSERT(allow_macro_instructions_);
1333    SingleEmissionCheckScope guard(this);
1334    fmov(vd, index, rn);
1335  }
1336  void Fmov(const Register& rd, const VRegister& vn, int index) {
1337    VIXL_ASSERT(allow_macro_instructions_);
1338    SingleEmissionCheckScope guard(this);
1339    fmov(rd, vn, index);
1340  }
1341
1342  // Provide explicit double and float interfaces for FP immediate moves, rather
1343  // than relying on implicit C++ casts. This allows signalling NaNs to be
1344  // preserved when the immediate matches the format of vd. Most systems convert
1345  // signalling NaNs to quiet NaNs when converting between float and double.
1346  void Fmov(VRegister vd, double imm);
1347  void Fmov(VRegister vd, float imm);
1348  // Provide a template to allow other types to be converted automatically.
1349  template <typename T>
1350  void Fmov(VRegister vd, T imm) {
1351    VIXL_ASSERT(allow_macro_instructions_);
1352    Fmov(vd, static_cast<double>(imm));
1353  }
1354  void Fmov(Register rd, VRegister vn) {
1355    VIXL_ASSERT(allow_macro_instructions_);
1356    VIXL_ASSERT(!rd.IsZero());
1357    SingleEmissionCheckScope guard(this);
1358    fmov(rd, vn);
1359  }
1360  void Fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1361    VIXL_ASSERT(allow_macro_instructions_);
1362    SingleEmissionCheckScope guard(this);
1363    fmul(vd, vn, vm);
1364  }
1365  void Fnmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1366    VIXL_ASSERT(allow_macro_instructions_);
1367    SingleEmissionCheckScope guard(this);
1368    fnmul(vd, vn, vm);
1369  }
1370  void Fmadd(const VRegister& vd,
1371             const VRegister& vn,
1372             const VRegister& vm,
1373             const VRegister& va) {
1374    VIXL_ASSERT(allow_macro_instructions_);
1375    SingleEmissionCheckScope guard(this);
1376    fmadd(vd, vn, vm, va);
1377  }
1378  void Fmsub(const VRegister& vd,
1379             const VRegister& vn,
1380             const VRegister& vm,
1381             const VRegister& va) {
1382    VIXL_ASSERT(allow_macro_instructions_);
1383    SingleEmissionCheckScope guard(this);
1384    fmsub(vd, vn, vm, va);
1385  }
1386  void Fnmadd(const VRegister& vd,
1387              const VRegister& vn,
1388              const VRegister& vm,
1389              const VRegister& va) {
1390    VIXL_ASSERT(allow_macro_instructions_);
1391    SingleEmissionCheckScope guard(this);
1392    fnmadd(vd, vn, vm, va);
1393  }
1394  void Fnmsub(const VRegister& vd,
1395              const VRegister& vn,
1396              const VRegister& vm,
1397              const VRegister& va) {
1398    VIXL_ASSERT(allow_macro_instructions_);
1399    SingleEmissionCheckScope guard(this);
1400    fnmsub(vd, vn, vm, va);
1401  }
1402  void Fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1403    VIXL_ASSERT(allow_macro_instructions_);
1404    SingleEmissionCheckScope guard(this);
1405    fsub(vd, vn, vm);
1406  }
1407  void Hint(SystemHint code) {
1408    VIXL_ASSERT(allow_macro_instructions_);
1409    SingleEmissionCheckScope guard(this);
1410    hint(code);
1411  }
1412  void Hlt(int code) {
1413    VIXL_ASSERT(allow_macro_instructions_);
1414    SingleEmissionCheckScope guard(this);
1415    hlt(code);
1416  }
1417  void Isb() {
1418    VIXL_ASSERT(allow_macro_instructions_);
1419    SingleEmissionCheckScope guard(this);
1420    isb();
1421  }
1422  void Ldar(const Register& rt, const MemOperand& src) {
1423    VIXL_ASSERT(allow_macro_instructions_);
1424    SingleEmissionCheckScope guard(this);
1425    ldar(rt, src);
1426  }
1427  void Ldarb(const Register& rt, const MemOperand& src) {
1428    VIXL_ASSERT(allow_macro_instructions_);
1429    SingleEmissionCheckScope guard(this);
1430    ldarb(rt, src);
1431  }
1432  void Ldarh(const Register& rt, const MemOperand& src) {
1433    VIXL_ASSERT(allow_macro_instructions_);
1434    SingleEmissionCheckScope guard(this);
1435    ldarh(rt, src);
1436  }
1437  void Ldaxp(const Register& rt, const Register& rt2, const MemOperand& src) {
1438    VIXL_ASSERT(allow_macro_instructions_);
1439    VIXL_ASSERT(!rt.Aliases(rt2));
1440    SingleEmissionCheckScope guard(this);
1441    ldaxp(rt, rt2, src);
1442  }
1443  void Ldaxr(const Register& rt, const MemOperand& src) {
1444    VIXL_ASSERT(allow_macro_instructions_);
1445    SingleEmissionCheckScope guard(this);
1446    ldaxr(rt, src);
1447  }
1448  void Ldaxrb(const Register& rt, const MemOperand& src) {
1449    VIXL_ASSERT(allow_macro_instructions_);
1450    SingleEmissionCheckScope guard(this);
1451    ldaxrb(rt, src);
1452  }
1453  void Ldaxrh(const Register& rt, const MemOperand& src) {
1454    VIXL_ASSERT(allow_macro_instructions_);
1455    SingleEmissionCheckScope guard(this);
1456    ldaxrh(rt, src);
1457  }
1458  void Ldnp(const CPURegister& rt,
1459            const CPURegister& rt2,
1460            const MemOperand& src) {
1461    VIXL_ASSERT(allow_macro_instructions_);
1462    SingleEmissionCheckScope guard(this);
1463    ldnp(rt, rt2, src);
1464  }
1465  // Provide both double and float interfaces for FP immediate loads, rather
1466  // than relying on implicit C++ casts. This allows signalling NaNs to be
1467  // preserved when the immediate matches the format of fd. Most systems convert
1468  // signalling NaNs to quiet NaNs when converting between float and double.
1469  void Ldr(const VRegister& vt, double imm) {
1470    VIXL_ASSERT(allow_macro_instructions_);
1471    SingleEmissionCheckScope guard(this);
1472    RawLiteral* literal;
1473    if (vt.IsD()) {
1474      literal = new Literal<double>(imm,
1475                                    &literal_pool_,
1476                                    RawLiteral::kDeletedOnPlacementByPool);
1477    } else {
1478      literal = new Literal<float>(static_cast<float>(imm),
1479                                   &literal_pool_,
1480                                   RawLiteral::kDeletedOnPlacementByPool);
1481    }
1482    ldr(vt, literal);
1483  }
1484  void Ldr(const VRegister& vt, float imm) {
1485    VIXL_ASSERT(allow_macro_instructions_);
1486    SingleEmissionCheckScope guard(this);
1487    RawLiteral* literal;
1488    if (vt.IsS()) {
1489      literal = new Literal<float>(imm,
1490                                   &literal_pool_,
1491                                   RawLiteral::kDeletedOnPlacementByPool);
1492    } else {
1493      literal = new Literal<double>(static_cast<double>(imm),
1494                                    &literal_pool_,
1495                                    RawLiteral::kDeletedOnPlacementByPool);
1496    }
1497    ldr(vt, literal);
1498  }
1499  void Ldr(const VRegister& vt, uint64_t high64, uint64_t low64) {
1500    VIXL_ASSERT(allow_macro_instructions_);
1501    VIXL_ASSERT(vt.IsQ());
1502    SingleEmissionCheckScope guard(this);
1503    ldr(vt,
1504        new Literal<uint64_t>(high64,
1505                              low64,
1506                              &literal_pool_,
1507                              RawLiteral::kDeletedOnPlacementByPool));
1508  }
1509  void Ldr(const Register& rt, uint64_t imm) {
1510    VIXL_ASSERT(allow_macro_instructions_);
1511    VIXL_ASSERT(!rt.IsZero());
1512    SingleEmissionCheckScope guard(this);
1513    RawLiteral* literal;
1514    if (rt.Is64Bits()) {
1515      literal = new Literal<uint64_t>(imm,
1516                                      &literal_pool_,
1517                                      RawLiteral::kDeletedOnPlacementByPool);
1518    } else {
1519      VIXL_ASSERT(rt.Is32Bits());
1520      VIXL_ASSERT(IsUint32(imm) || IsInt32(imm));
1521      literal = new Literal<uint32_t>(static_cast<uint32_t>(imm),
1522                                      &literal_pool_,
1523                                      RawLiteral::kDeletedOnPlacementByPool);
1524    }
1525    ldr(rt, literal);
1526  }
1527  void Ldrsw(const Register& rt, uint32_t imm) {
1528    VIXL_ASSERT(allow_macro_instructions_);
1529    VIXL_ASSERT(!rt.IsZero());
1530    SingleEmissionCheckScope guard(this);
1531    ldrsw(rt,
1532          new Literal<uint32_t>(imm,
1533                                &literal_pool_,
1534                                RawLiteral::kDeletedOnPlacementByPool));
1535  }
1536  void Ldr(const CPURegister& rt, RawLiteral* literal) {
1537    VIXL_ASSERT(allow_macro_instructions_);
1538    SingleEmissionCheckScope guard(this);
1539    ldr(rt, literal);
1540  }
1541  void Ldrsw(const Register& rt, RawLiteral* literal) {
1542    VIXL_ASSERT(allow_macro_instructions_);
1543    SingleEmissionCheckScope guard(this);
1544    ldrsw(rt, literal);
1545  }
1546  void Ldxp(const Register& rt, const Register& rt2, const MemOperand& src) {
1547    VIXL_ASSERT(allow_macro_instructions_);
1548    VIXL_ASSERT(!rt.Aliases(rt2));
1549    SingleEmissionCheckScope guard(this);
1550    ldxp(rt, rt2, src);
1551  }
1552  void Ldxr(const Register& rt, const MemOperand& src) {
1553    VIXL_ASSERT(allow_macro_instructions_);
1554    SingleEmissionCheckScope guard(this);
1555    ldxr(rt, src);
1556  }
1557  void Ldxrb(const Register& rt, const MemOperand& src) {
1558    VIXL_ASSERT(allow_macro_instructions_);
1559    SingleEmissionCheckScope guard(this);
1560    ldxrb(rt, src);
1561  }
1562  void Ldxrh(const Register& rt, const MemOperand& src) {
1563    VIXL_ASSERT(allow_macro_instructions_);
1564    SingleEmissionCheckScope guard(this);
1565    ldxrh(rt, src);
1566  }
1567  void Lsl(const Register& rd, const Register& rn, unsigned shift) {
1568    VIXL_ASSERT(allow_macro_instructions_);
1569    VIXL_ASSERT(!rd.IsZero());
1570    VIXL_ASSERT(!rn.IsZero());
1571    SingleEmissionCheckScope guard(this);
1572    lsl(rd, rn, shift);
1573  }
1574  void Lsl(const Register& rd, const Register& rn, const Register& rm) {
1575    VIXL_ASSERT(allow_macro_instructions_);
1576    VIXL_ASSERT(!rd.IsZero());
1577    VIXL_ASSERT(!rn.IsZero());
1578    VIXL_ASSERT(!rm.IsZero());
1579    SingleEmissionCheckScope guard(this);
1580    lslv(rd, rn, rm);
1581  }
1582  void Lsr(const Register& rd, const Register& rn, unsigned shift) {
1583    VIXL_ASSERT(allow_macro_instructions_);
1584    VIXL_ASSERT(!rd.IsZero());
1585    VIXL_ASSERT(!rn.IsZero());
1586    SingleEmissionCheckScope guard(this);
1587    lsr(rd, rn, shift);
1588  }
1589  void Lsr(const Register& rd, const Register& rn, const Register& rm) {
1590    VIXL_ASSERT(allow_macro_instructions_);
1591    VIXL_ASSERT(!rd.IsZero());
1592    VIXL_ASSERT(!rn.IsZero());
1593    VIXL_ASSERT(!rm.IsZero());
1594    SingleEmissionCheckScope guard(this);
1595    lsrv(rd, rn, rm);
1596  }
1597  void Madd(const Register& rd,
1598            const Register& rn,
1599            const Register& rm,
1600            const Register& ra) {
1601    VIXL_ASSERT(allow_macro_instructions_);
1602    VIXL_ASSERT(!rd.IsZero());
1603    VIXL_ASSERT(!rn.IsZero());
1604    VIXL_ASSERT(!rm.IsZero());
1605    VIXL_ASSERT(!ra.IsZero());
1606    SingleEmissionCheckScope guard(this);
1607    madd(rd, rn, rm, ra);
1608  }
1609  void Mneg(const Register& rd, const Register& rn, const Register& rm) {
1610    VIXL_ASSERT(allow_macro_instructions_);
1611    VIXL_ASSERT(!rd.IsZero());
1612    VIXL_ASSERT(!rn.IsZero());
1613    VIXL_ASSERT(!rm.IsZero());
1614    SingleEmissionCheckScope guard(this);
1615    mneg(rd, rn, rm);
1616  }
1617  void Mov(const Register& rd, const Register& rn) {
1618    VIXL_ASSERT(allow_macro_instructions_);
1619    SingleEmissionCheckScope guard(this);
1620    mov(rd, rn);
1621  }
1622  void Movk(const Register& rd, uint64_t imm, int shift = -1) {
1623    VIXL_ASSERT(allow_macro_instructions_);
1624    VIXL_ASSERT(!rd.IsZero());
1625    SingleEmissionCheckScope guard(this);
1626    movk(rd, imm, shift);
1627  }
1628  void Mrs(const Register& rt, SystemRegister sysreg) {
1629    VIXL_ASSERT(allow_macro_instructions_);
1630    VIXL_ASSERT(!rt.IsZero());
1631    SingleEmissionCheckScope guard(this);
1632    mrs(rt, sysreg);
1633  }
1634  void Msr(SystemRegister sysreg, const Register& rt) {
1635    VIXL_ASSERT(allow_macro_instructions_);
1636    VIXL_ASSERT(!rt.IsZero());
1637    SingleEmissionCheckScope guard(this);
1638    msr(sysreg, rt);
1639  }
1640  void Sys(int op1, int crn, int crm, int op2, const Register& rt = xzr) {
1641    VIXL_ASSERT(allow_macro_instructions_);
1642    SingleEmissionCheckScope guard(this);
1643    sys(op1, crn, crm, op2, rt);
1644  }
1645  void Dc(DataCacheOp op, const Register& rt) {
1646    VIXL_ASSERT(allow_macro_instructions_);
1647    SingleEmissionCheckScope guard(this);
1648    dc(op, rt);
1649  }
1650  void Ic(InstructionCacheOp op, const Register& rt) {
1651    VIXL_ASSERT(allow_macro_instructions_);
1652    SingleEmissionCheckScope guard(this);
1653    ic(op, rt);
1654  }
1655  void Msub(const Register& rd,
1656            const Register& rn,
1657            const Register& rm,
1658            const Register& ra) {
1659    VIXL_ASSERT(allow_macro_instructions_);
1660    VIXL_ASSERT(!rd.IsZero());
1661    VIXL_ASSERT(!rn.IsZero());
1662    VIXL_ASSERT(!rm.IsZero());
1663    VIXL_ASSERT(!ra.IsZero());
1664    SingleEmissionCheckScope guard(this);
1665    msub(rd, rn, rm, ra);
1666  }
1667  void Mul(const Register& rd, const Register& rn, const Register& rm) {
1668    VIXL_ASSERT(allow_macro_instructions_);
1669    VIXL_ASSERT(!rd.IsZero());
1670    VIXL_ASSERT(!rn.IsZero());
1671    VIXL_ASSERT(!rm.IsZero());
1672    SingleEmissionCheckScope guard(this);
1673    mul(rd, rn, rm);
1674  }
1675  void Nop() {
1676    VIXL_ASSERT(allow_macro_instructions_);
1677    SingleEmissionCheckScope guard(this);
1678    nop();
1679  }
1680  void Rbit(const Register& rd, const Register& rn) {
1681    VIXL_ASSERT(allow_macro_instructions_);
1682    VIXL_ASSERT(!rd.IsZero());
1683    VIXL_ASSERT(!rn.IsZero());
1684    SingleEmissionCheckScope guard(this);
1685    rbit(rd, rn);
1686  }
1687  void Ret(const Register& xn = lr) {
1688    VIXL_ASSERT(allow_macro_instructions_);
1689    VIXL_ASSERT(!xn.IsZero());
1690    SingleEmissionCheckScope guard(this);
1691    ret(xn);
1692  }
1693  void Rev(const Register& rd, const Register& rn) {
1694    VIXL_ASSERT(allow_macro_instructions_);
1695    VIXL_ASSERT(!rd.IsZero());
1696    VIXL_ASSERT(!rn.IsZero());
1697    SingleEmissionCheckScope guard(this);
1698    rev(rd, rn);
1699  }
1700  void Rev16(const Register& rd, const Register& rn) {
1701    VIXL_ASSERT(allow_macro_instructions_);
1702    VIXL_ASSERT(!rd.IsZero());
1703    VIXL_ASSERT(!rn.IsZero());
1704    SingleEmissionCheckScope guard(this);
1705    rev16(rd, rn);
1706  }
1707  void Rev32(const Register& rd, const Register& rn) {
1708    VIXL_ASSERT(allow_macro_instructions_);
1709    VIXL_ASSERT(!rd.IsZero());
1710    VIXL_ASSERT(!rn.IsZero());
1711    SingleEmissionCheckScope guard(this);
1712    rev32(rd, rn);
1713  }
1714  void Ror(const Register& rd, const Register& rs, unsigned shift) {
1715    VIXL_ASSERT(allow_macro_instructions_);
1716    VIXL_ASSERT(!rd.IsZero());
1717    VIXL_ASSERT(!rs.IsZero());
1718    SingleEmissionCheckScope guard(this);
1719    ror(rd, rs, shift);
1720  }
1721  void Ror(const Register& rd, const Register& rn, const Register& rm) {
1722    VIXL_ASSERT(allow_macro_instructions_);
1723    VIXL_ASSERT(!rd.IsZero());
1724    VIXL_ASSERT(!rn.IsZero());
1725    VIXL_ASSERT(!rm.IsZero());
1726    SingleEmissionCheckScope guard(this);
1727    rorv(rd, rn, rm);
1728  }
1729  void Sbfiz(const Register& rd,
1730             const Register& rn,
1731             unsigned lsb,
1732             unsigned width) {
1733    VIXL_ASSERT(allow_macro_instructions_);
1734    VIXL_ASSERT(!rd.IsZero());
1735    VIXL_ASSERT(!rn.IsZero());
1736    SingleEmissionCheckScope guard(this);
1737    sbfiz(rd, rn, lsb, width);
1738  }
1739  void Sbfm(const Register& rd,
1740            const Register& rn,
1741            unsigned immr,
1742            unsigned imms) {
1743    VIXL_ASSERT(allow_macro_instructions_);
1744    VIXL_ASSERT(!rd.IsZero());
1745    VIXL_ASSERT(!rn.IsZero());
1746    SingleEmissionCheckScope guard(this);
1747    sbfm(rd, rn, immr, imms);
1748  }
1749  void Sbfx(const Register& rd,
1750            const Register& rn,
1751            unsigned lsb,
1752            unsigned width) {
1753    VIXL_ASSERT(allow_macro_instructions_);
1754    VIXL_ASSERT(!rd.IsZero());
1755    VIXL_ASSERT(!rn.IsZero());
1756    SingleEmissionCheckScope guard(this);
1757    sbfx(rd, rn, lsb, width);
1758  }
1759  void Scvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
1760    VIXL_ASSERT(allow_macro_instructions_);
1761    VIXL_ASSERT(!rn.IsZero());
1762    SingleEmissionCheckScope guard(this);
1763    scvtf(vd, rn, fbits);
1764  }
1765  void Sdiv(const Register& rd, const Register& rn, const Register& rm) {
1766    VIXL_ASSERT(allow_macro_instructions_);
1767    VIXL_ASSERT(!rd.IsZero());
1768    VIXL_ASSERT(!rn.IsZero());
1769    VIXL_ASSERT(!rm.IsZero());
1770    SingleEmissionCheckScope guard(this);
1771    sdiv(rd, rn, rm);
1772  }
1773  void Smaddl(const Register& rd,
1774              const Register& rn,
1775              const Register& rm,
1776              const Register& ra) {
1777    VIXL_ASSERT(allow_macro_instructions_);
1778    VIXL_ASSERT(!rd.IsZero());
1779    VIXL_ASSERT(!rn.IsZero());
1780    VIXL_ASSERT(!rm.IsZero());
1781    VIXL_ASSERT(!ra.IsZero());
1782    SingleEmissionCheckScope guard(this);
1783    smaddl(rd, rn, rm, ra);
1784  }
1785  void Smsubl(const Register& rd,
1786              const Register& rn,
1787              const Register& rm,
1788              const Register& ra) {
1789    VIXL_ASSERT(allow_macro_instructions_);
1790    VIXL_ASSERT(!rd.IsZero());
1791    VIXL_ASSERT(!rn.IsZero());
1792    VIXL_ASSERT(!rm.IsZero());
1793    VIXL_ASSERT(!ra.IsZero());
1794    SingleEmissionCheckScope guard(this);
1795    smsubl(rd, rn, rm, ra);
1796  }
1797  void Smull(const Register& rd, const Register& rn, const Register& rm) {
1798    VIXL_ASSERT(allow_macro_instructions_);
1799    VIXL_ASSERT(!rd.IsZero());
1800    VIXL_ASSERT(!rn.IsZero());
1801    VIXL_ASSERT(!rm.IsZero());
1802    SingleEmissionCheckScope guard(this);
1803    smull(rd, rn, rm);
1804  }
1805  void Smulh(const Register& xd, const Register& xn, const Register& xm) {
1806    VIXL_ASSERT(allow_macro_instructions_);
1807    VIXL_ASSERT(!xd.IsZero());
1808    VIXL_ASSERT(!xn.IsZero());
1809    VIXL_ASSERT(!xm.IsZero());
1810    SingleEmissionCheckScope guard(this);
1811    smulh(xd, xn, xm);
1812  }
1813  void Stlr(const Register& rt, const MemOperand& dst) {
1814    VIXL_ASSERT(allow_macro_instructions_);
1815    SingleEmissionCheckScope guard(this);
1816    stlr(rt, dst);
1817  }
1818  void Stlrb(const Register& rt, const MemOperand& dst) {
1819    VIXL_ASSERT(allow_macro_instructions_);
1820    SingleEmissionCheckScope guard(this);
1821    stlrb(rt, dst);
1822  }
1823  void Stlrh(const Register& rt, const MemOperand& dst) {
1824    VIXL_ASSERT(allow_macro_instructions_);
1825    SingleEmissionCheckScope guard(this);
1826    stlrh(rt, dst);
1827  }
1828  void Stlxp(const Register& rs,
1829             const Register& rt,
1830             const Register& rt2,
1831             const MemOperand& dst) {
1832    VIXL_ASSERT(allow_macro_instructions_);
1833    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1834    VIXL_ASSERT(!rs.Aliases(rt));
1835    VIXL_ASSERT(!rs.Aliases(rt2));
1836    SingleEmissionCheckScope guard(this);
1837    stlxp(rs, rt, rt2, dst);
1838  }
1839  void Stlxr(const Register& rs, const Register& rt, const MemOperand& dst) {
1840    VIXL_ASSERT(allow_macro_instructions_);
1841    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1842    VIXL_ASSERT(!rs.Aliases(rt));
1843    SingleEmissionCheckScope guard(this);
1844    stlxr(rs, rt, dst);
1845  }
1846  void Stlxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
1847    VIXL_ASSERT(allow_macro_instructions_);
1848    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1849    VIXL_ASSERT(!rs.Aliases(rt));
1850    SingleEmissionCheckScope guard(this);
1851    stlxrb(rs, rt, dst);
1852  }
1853  void Stlxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
1854    VIXL_ASSERT(allow_macro_instructions_);
1855    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1856    VIXL_ASSERT(!rs.Aliases(rt));
1857    SingleEmissionCheckScope guard(this);
1858    stlxrh(rs, rt, dst);
1859  }
1860  void Stnp(const CPURegister& rt,
1861            const CPURegister& rt2,
1862            const MemOperand& dst) {
1863    VIXL_ASSERT(allow_macro_instructions_);
1864    SingleEmissionCheckScope guard(this);
1865    stnp(rt, rt2, dst);
1866  }
1867  void Stxp(const Register& rs,
1868            const Register& rt,
1869            const Register& rt2,
1870            const MemOperand& dst) {
1871    VIXL_ASSERT(allow_macro_instructions_);
1872    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1873    VIXL_ASSERT(!rs.Aliases(rt));
1874    VIXL_ASSERT(!rs.Aliases(rt2));
1875    SingleEmissionCheckScope guard(this);
1876    stxp(rs, rt, rt2, dst);
1877  }
1878  void Stxr(const Register& rs, const Register& rt, const MemOperand& dst) {
1879    VIXL_ASSERT(allow_macro_instructions_);
1880    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1881    VIXL_ASSERT(!rs.Aliases(rt));
1882    SingleEmissionCheckScope guard(this);
1883    stxr(rs, rt, dst);
1884  }
1885  void Stxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
1886    VIXL_ASSERT(allow_macro_instructions_);
1887    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1888    VIXL_ASSERT(!rs.Aliases(rt));
1889    SingleEmissionCheckScope guard(this);
1890    stxrb(rs, rt, dst);
1891  }
1892  void Stxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
1893    VIXL_ASSERT(allow_macro_instructions_);
1894    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1895    VIXL_ASSERT(!rs.Aliases(rt));
1896    SingleEmissionCheckScope guard(this);
1897    stxrh(rs, rt, dst);
1898  }
1899  void Svc(int code) {
1900    VIXL_ASSERT(allow_macro_instructions_);
1901    SingleEmissionCheckScope guard(this);
1902    svc(code);
1903  }
1904  void Sxtb(const Register& rd, const Register& rn) {
1905    VIXL_ASSERT(allow_macro_instructions_);
1906    VIXL_ASSERT(!rd.IsZero());
1907    VIXL_ASSERT(!rn.IsZero());
1908    SingleEmissionCheckScope guard(this);
1909    sxtb(rd, rn);
1910  }
1911  void Sxth(const Register& rd, const Register& rn) {
1912    VIXL_ASSERT(allow_macro_instructions_);
1913    VIXL_ASSERT(!rd.IsZero());
1914    VIXL_ASSERT(!rn.IsZero());
1915    SingleEmissionCheckScope guard(this);
1916    sxth(rd, rn);
1917  }
1918  void Sxtw(const Register& rd, const Register& rn) {
1919    VIXL_ASSERT(allow_macro_instructions_);
1920    VIXL_ASSERT(!rd.IsZero());
1921    VIXL_ASSERT(!rn.IsZero());
1922    SingleEmissionCheckScope guard(this);
1923    sxtw(rd, rn);
1924  }
1925  void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1926    VIXL_ASSERT(allow_macro_instructions_);
1927    SingleEmissionCheckScope guard(this);
1928    tbl(vd, vn, vm);
1929  }
1930  void Tbl(const VRegister& vd,
1931           const VRegister& vn,
1932           const VRegister& vn2,
1933           const VRegister& vm) {
1934    VIXL_ASSERT(allow_macro_instructions_);
1935    SingleEmissionCheckScope guard(this);
1936    tbl(vd, vn, vn2, vm);
1937  }
1938  void Tbl(const VRegister& vd,
1939           const VRegister& vn,
1940           const VRegister& vn2,
1941           const VRegister& vn3,
1942           const VRegister& vm) {
1943    VIXL_ASSERT(allow_macro_instructions_);
1944    SingleEmissionCheckScope guard(this);
1945    tbl(vd, vn, vn2, vn3, vm);
1946  }
1947  void Tbl(const VRegister& vd,
1948           const VRegister& vn,
1949           const VRegister& vn2,
1950           const VRegister& vn3,
1951           const VRegister& vn4,
1952           const VRegister& vm) {
1953    VIXL_ASSERT(allow_macro_instructions_);
1954    SingleEmissionCheckScope guard(this);
1955    tbl(vd, vn, vn2, vn3, vn4, vm);
1956  }
1957  void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1958    VIXL_ASSERT(allow_macro_instructions_);
1959    SingleEmissionCheckScope guard(this);
1960    tbx(vd, vn, vm);
1961  }
1962  void Tbx(const VRegister& vd,
1963           const VRegister& vn,
1964           const VRegister& vn2,
1965           const VRegister& vm) {
1966    VIXL_ASSERT(allow_macro_instructions_);
1967    SingleEmissionCheckScope guard(this);
1968    tbx(vd, vn, vn2, vm);
1969  }
1970  void Tbx(const VRegister& vd,
1971           const VRegister& vn,
1972           const VRegister& vn2,
1973           const VRegister& vn3,
1974           const VRegister& vm) {
1975    VIXL_ASSERT(allow_macro_instructions_);
1976    SingleEmissionCheckScope guard(this);
1977    tbx(vd, vn, vn2, vn3, vm);
1978  }
1979  void Tbx(const VRegister& vd,
1980           const VRegister& vn,
1981           const VRegister& vn2,
1982           const VRegister& vn3,
1983           const VRegister& vn4,
1984           const VRegister& vm) {
1985    VIXL_ASSERT(allow_macro_instructions_);
1986    SingleEmissionCheckScope guard(this);
1987    tbx(vd, vn, vn2, vn3, vn4, vm);
1988  }
1989  void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
1990  void Tbz(const Register& rt, unsigned bit_pos, Label* label);
1991  void Ubfiz(const Register& rd,
1992             const Register& rn,
1993             unsigned lsb,
1994             unsigned width) {
1995    VIXL_ASSERT(allow_macro_instructions_);
1996    VIXL_ASSERT(!rd.IsZero());
1997    VIXL_ASSERT(!rn.IsZero());
1998    SingleEmissionCheckScope guard(this);
1999    ubfiz(rd, rn, lsb, width);
2000  }
2001  void Ubfm(const Register& rd,
2002            const Register& rn,
2003            unsigned immr,
2004            unsigned imms) {
2005    VIXL_ASSERT(allow_macro_instructions_);
2006    VIXL_ASSERT(!rd.IsZero());
2007    VIXL_ASSERT(!rn.IsZero());
2008    SingleEmissionCheckScope guard(this);
2009    ubfm(rd, rn, immr, imms);
2010  }
2011  void Ubfx(const Register& rd,
2012            const Register& rn,
2013            unsigned lsb,
2014            unsigned width) {
2015    VIXL_ASSERT(allow_macro_instructions_);
2016    VIXL_ASSERT(!rd.IsZero());
2017    VIXL_ASSERT(!rn.IsZero());
2018    SingleEmissionCheckScope guard(this);
2019    ubfx(rd, rn, lsb, width);
2020  }
2021  void Ucvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
2022    VIXL_ASSERT(allow_macro_instructions_);
2023    VIXL_ASSERT(!rn.IsZero());
2024    SingleEmissionCheckScope guard(this);
2025    ucvtf(vd, rn, fbits);
2026  }
2027  void Udiv(const Register& rd, const Register& rn, const Register& rm) {
2028    VIXL_ASSERT(allow_macro_instructions_);
2029    VIXL_ASSERT(!rd.IsZero());
2030    VIXL_ASSERT(!rn.IsZero());
2031    VIXL_ASSERT(!rm.IsZero());
2032    SingleEmissionCheckScope guard(this);
2033    udiv(rd, rn, rm);
2034  }
2035  void Umaddl(const Register& rd,
2036              const Register& rn,
2037              const Register& rm,
2038              const Register& ra) {
2039    VIXL_ASSERT(allow_macro_instructions_);
2040    VIXL_ASSERT(!rd.IsZero());
2041    VIXL_ASSERT(!rn.IsZero());
2042    VIXL_ASSERT(!rm.IsZero());
2043    VIXL_ASSERT(!ra.IsZero());
2044    SingleEmissionCheckScope guard(this);
2045    umaddl(rd, rn, rm, ra);
2046  }
2047  void Umull(const Register& rd, const Register& rn, const Register& rm) {
2048    VIXL_ASSERT(allow_macro_instructions_);
2049    VIXL_ASSERT(!rd.IsZero());
2050    VIXL_ASSERT(!rn.IsZero());
2051    VIXL_ASSERT(!rm.IsZero());
2052    SingleEmissionCheckScope guard(this);
2053    umull(rd, rn, rm);
2054  }
2055  void Umulh(const Register& xd, const Register& xn, const Register& xm) {
2056    VIXL_ASSERT(allow_macro_instructions_);
2057    VIXL_ASSERT(!xd.IsZero());
2058    VIXL_ASSERT(!xn.IsZero());
2059    VIXL_ASSERT(!xm.IsZero());
2060    SingleEmissionCheckScope guard(this);
2061    umulh(xd, xn, xm);
2062  }
2063  void Umsubl(const Register& rd,
2064              const Register& rn,
2065              const Register& rm,
2066              const Register& ra) {
2067    VIXL_ASSERT(allow_macro_instructions_);
2068    VIXL_ASSERT(!rd.IsZero());
2069    VIXL_ASSERT(!rn.IsZero());
2070    VIXL_ASSERT(!rm.IsZero());
2071    VIXL_ASSERT(!ra.IsZero());
2072    SingleEmissionCheckScope guard(this);
2073    umsubl(rd, rn, rm, ra);
2074  }
2075  void Unreachable() {
2076    VIXL_ASSERT(allow_macro_instructions_);
2077    SingleEmissionCheckScope guard(this);
2078    if (generate_simulator_code_) {
2079      hlt(kUnreachableOpcode);
2080    } else {
2081      // Branch to 0 to generate a segfault.
2082      // lr - kInstructionSize is the address of the offending instruction.
2083      blr(xzr);
2084    }
2085  }
2086  void Uxtb(const Register& rd, const Register& rn) {
2087    VIXL_ASSERT(allow_macro_instructions_);
2088    VIXL_ASSERT(!rd.IsZero());
2089    VIXL_ASSERT(!rn.IsZero());
2090    SingleEmissionCheckScope guard(this);
2091    uxtb(rd, rn);
2092  }
2093  void Uxth(const Register& rd, const Register& rn) {
2094    VIXL_ASSERT(allow_macro_instructions_);
2095    VIXL_ASSERT(!rd.IsZero());
2096    VIXL_ASSERT(!rn.IsZero());
2097    SingleEmissionCheckScope guard(this);
2098    uxth(rd, rn);
2099  }
2100  void Uxtw(const Register& rd, const Register& rn) {
2101    VIXL_ASSERT(allow_macro_instructions_);
2102    VIXL_ASSERT(!rd.IsZero());
2103    VIXL_ASSERT(!rn.IsZero());
2104    SingleEmissionCheckScope guard(this);
2105    uxtw(rd, rn);
2106  }
2107
2108// NEON 3 vector register instructions.
2109#define NEON_3VREG_MACRO_LIST(V) \
2110  V(add, Add)                    \
2111  V(addhn, Addhn)                \
2112  V(addhn2, Addhn2)              \
2113  V(addp, Addp)                  \
2114  V(and_, And)                   \
2115  V(bic, Bic)                    \
2116  V(bif, Bif)                    \
2117  V(bit, Bit)                    \
2118  V(bsl, Bsl)                    \
2119  V(cmeq, Cmeq)                  \
2120  V(cmge, Cmge)                  \
2121  V(cmgt, Cmgt)                  \
2122  V(cmhi, Cmhi)                  \
2123  V(cmhs, Cmhs)                  \
2124  V(cmtst, Cmtst)                \
2125  V(eor, Eor)                    \
2126  V(fabd, Fabd)                  \
2127  V(facge, Facge)                \
2128  V(facgt, Facgt)                \
2129  V(faddp, Faddp)                \
2130  V(fcmeq, Fcmeq)                \
2131  V(fcmge, Fcmge)                \
2132  V(fcmgt, Fcmgt)                \
2133  V(fmaxnmp, Fmaxnmp)            \
2134  V(fmaxp, Fmaxp)                \
2135  V(fminnmp, Fminnmp)            \
2136  V(fminp, Fminp)                \
2137  V(fmla, Fmla)                  \
2138  V(fmls, Fmls)                  \
2139  V(fmulx, Fmulx)                \
2140  V(frecps, Frecps)              \
2141  V(frsqrts, Frsqrts)            \
2142  V(mla, Mla)                    \
2143  V(mls, Mls)                    \
2144  V(mul, Mul)                    \
2145  V(orn, Orn)                    \
2146  V(orr, Orr)                    \
2147  V(pmul, Pmul)                  \
2148  V(pmull, Pmull)                \
2149  V(pmull2, Pmull2)              \
2150  V(raddhn, Raddhn)              \
2151  V(raddhn2, Raddhn2)            \
2152  V(rsubhn, Rsubhn)              \
2153  V(rsubhn2, Rsubhn2)            \
2154  V(saba, Saba)                  \
2155  V(sabal, Sabal)                \
2156  V(sabal2, Sabal2)              \
2157  V(sabd, Sabd)                  \
2158  V(sabdl, Sabdl)                \
2159  V(sabdl2, Sabdl2)              \
2160  V(saddl, Saddl)                \
2161  V(saddl2, Saddl2)              \
2162  V(saddw, Saddw)                \
2163  V(saddw2, Saddw2)              \
2164  V(shadd, Shadd)                \
2165  V(shsub, Shsub)                \
2166  V(smax, Smax)                  \
2167  V(smaxp, Smaxp)                \
2168  V(smin, Smin)                  \
2169  V(sminp, Sminp)                \
2170  V(smlal, Smlal)                \
2171  V(smlal2, Smlal2)              \
2172  V(smlsl, Smlsl)                \
2173  V(smlsl2, Smlsl2)              \
2174  V(smull, Smull)                \
2175  V(smull2, Smull2)              \
2176  V(sqadd, Sqadd)                \
2177  V(sqdmlal, Sqdmlal)            \
2178  V(sqdmlal2, Sqdmlal2)          \
2179  V(sqdmlsl, Sqdmlsl)            \
2180  V(sqdmlsl2, Sqdmlsl2)          \
2181  V(sqdmulh, Sqdmulh)            \
2182  V(sqdmull, Sqdmull)            \
2183  V(sqdmull2, Sqdmull2)          \
2184  V(sqrdmulh, Sqrdmulh)          \
2185  V(sqrshl, Sqrshl)              \
2186  V(sqshl, Sqshl)                \
2187  V(sqsub, Sqsub)                \
2188  V(srhadd, Srhadd)              \
2189  V(srshl, Srshl)                \
2190  V(sshl, Sshl)                  \
2191  V(ssubl, Ssubl)                \
2192  V(ssubl2, Ssubl2)              \
2193  V(ssubw, Ssubw)                \
2194  V(ssubw2, Ssubw2)              \
2195  V(sub, Sub)                    \
2196  V(subhn, Subhn)                \
2197  V(subhn2, Subhn2)              \
2198  V(trn1, Trn1)                  \
2199  V(trn2, Trn2)                  \
2200  V(uaba, Uaba)                  \
2201  V(uabal, Uabal)                \
2202  V(uabal2, Uabal2)              \
2203  V(uabd, Uabd)                  \
2204  V(uabdl, Uabdl)                \
2205  V(uabdl2, Uabdl2)              \
2206  V(uaddl, Uaddl)                \
2207  V(uaddl2, Uaddl2)              \
2208  V(uaddw, Uaddw)                \
2209  V(uaddw2, Uaddw2)              \
2210  V(uhadd, Uhadd)                \
2211  V(uhsub, Uhsub)                \
2212  V(umax, Umax)                  \
2213  V(umaxp, Umaxp)                \
2214  V(umin, Umin)                  \
2215  V(uminp, Uminp)                \
2216  V(umlal, Umlal)                \
2217  V(umlal2, Umlal2)              \
2218  V(umlsl, Umlsl)                \
2219  V(umlsl2, Umlsl2)              \
2220  V(umull, Umull)                \
2221  V(umull2, Umull2)              \
2222  V(uqadd, Uqadd)                \
2223  V(uqrshl, Uqrshl)              \
2224  V(uqshl, Uqshl)                \
2225  V(uqsub, Uqsub)                \
2226  V(urhadd, Urhadd)              \
2227  V(urshl, Urshl)                \
2228  V(ushl, Ushl)                  \
2229  V(usubl, Usubl)                \
2230  V(usubl2, Usubl2)              \
2231  V(usubw, Usubw)                \
2232  V(usubw2, Usubw2)              \
2233  V(uzp1, Uzp1)                  \
2234  V(uzp2, Uzp2)                  \
2235  V(zip1, Zip1)                  \
2236  V(zip2, Zip2)
2237
2238#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                                     \
2239  void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
2240    VIXL_ASSERT(allow_macro_instructions_);                                  \
2241    SingleEmissionCheckScope guard(this);                                    \
2242    ASM(vd, vn, vm);                                                         \
2243  }
2244  NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2245#undef DEFINE_MACRO_ASM_FUNC
2246
2247// NEON 2 vector register instructions.
2248#define NEON_2VREG_MACRO_LIST(V) \
2249  V(abs, Abs)                    \
2250  V(addp, Addp)                  \
2251  V(addv, Addv)                  \
2252  V(cls, Cls)                    \
2253  V(clz, Clz)                    \
2254  V(cnt, Cnt)                    \
2255  V(fabs, Fabs)                  \
2256  V(faddp, Faddp)                \
2257  V(fcvtas, Fcvtas)              \
2258  V(fcvtau, Fcvtau)              \
2259  V(fcvtms, Fcvtms)              \
2260  V(fcvtmu, Fcvtmu)              \
2261  V(fcvtns, Fcvtns)              \
2262  V(fcvtnu, Fcvtnu)              \
2263  V(fcvtps, Fcvtps)              \
2264  V(fcvtpu, Fcvtpu)              \
2265  V(fmaxnmp, Fmaxnmp)            \
2266  V(fmaxnmv, Fmaxnmv)            \
2267  V(fmaxp, Fmaxp)                \
2268  V(fmaxv, Fmaxv)                \
2269  V(fminnmp, Fminnmp)            \
2270  V(fminnmv, Fminnmv)            \
2271  V(fminp, Fminp)                \
2272  V(fminv, Fminv)                \
2273  V(fneg, Fneg)                  \
2274  V(frecpe, Frecpe)              \
2275  V(frecpx, Frecpx)              \
2276  V(frinta, Frinta)              \
2277  V(frinti, Frinti)              \
2278  V(frintm, Frintm)              \
2279  V(frintn, Frintn)              \
2280  V(frintp, Frintp)              \
2281  V(frintx, Frintx)              \
2282  V(frintz, Frintz)              \
2283  V(frsqrte, Frsqrte)            \
2284  V(fsqrt, Fsqrt)                \
2285  V(mov, Mov)                    \
2286  V(mvn, Mvn)                    \
2287  V(neg, Neg)                    \
2288  V(not_, Not)                   \
2289  V(rbit, Rbit)                  \
2290  V(rev16, Rev16)                \
2291  V(rev32, Rev32)                \
2292  V(rev64, Rev64)                \
2293  V(sadalp, Sadalp)              \
2294  V(saddlp, Saddlp)              \
2295  V(saddlv, Saddlv)              \
2296  V(smaxv, Smaxv)                \
2297  V(sminv, Sminv)                \
2298  V(sqabs, Sqabs)                \
2299  V(sqneg, Sqneg)                \
2300  V(sqxtn, Sqxtn)                \
2301  V(sqxtn2, Sqxtn2)              \
2302  V(sqxtun, Sqxtun)              \
2303  V(sqxtun2, Sqxtun2)            \
2304  V(suqadd, Suqadd)              \
2305  V(sxtl, Sxtl)                  \
2306  V(sxtl2, Sxtl2)                \
2307  V(uadalp, Uadalp)              \
2308  V(uaddlp, Uaddlp)              \
2309  V(uaddlv, Uaddlv)              \
2310  V(umaxv, Umaxv)                \
2311  V(uminv, Uminv)                \
2312  V(uqxtn, Uqxtn)                \
2313  V(uqxtn2, Uqxtn2)              \
2314  V(urecpe, Urecpe)              \
2315  V(ursqrte, Ursqrte)            \
2316  V(usqadd, Usqadd)              \
2317  V(uxtl, Uxtl)                  \
2318  V(uxtl2, Uxtl2)                \
2319  V(xtn, Xtn)                    \
2320  V(xtn2, Xtn2)
2321
2322#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                \
2323  void MASM(const VRegister& vd, const VRegister& vn) { \
2324    VIXL_ASSERT(allow_macro_instructions_);             \
2325    SingleEmissionCheckScope guard(this);               \
2326    ASM(vd, vn);                                        \
2327  }
2328  NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2329#undef DEFINE_MACRO_ASM_FUNC
2330
2331// NEON 2 vector register with immediate instructions.
2332#define NEON_2VREG_FPIMM_MACRO_LIST(V) \
2333  V(fcmeq, Fcmeq)                      \
2334  V(fcmge, Fcmge)                      \
2335  V(fcmgt, Fcmgt)                      \
2336  V(fcmle, Fcmle)                      \
2337  V(fcmlt, Fcmlt)
2338
2339#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                            \
2340  void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
2341    VIXL_ASSERT(allow_macro_instructions_);                         \
2342    SingleEmissionCheckScope guard(this);                           \
2343    ASM(vd, vn, imm);                                               \
2344  }
2345  NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2346#undef DEFINE_MACRO_ASM_FUNC
2347
2348// NEON by element instructions.
2349#define NEON_BYELEMENT_MACRO_LIST(V) \
2350  V(fmul, Fmul)                      \
2351  V(fmla, Fmla)                      \
2352  V(fmls, Fmls)                      \
2353  V(fmulx, Fmulx)                    \
2354  V(mul, Mul)                        \
2355  V(mla, Mla)                        \
2356  V(mls, Mls)                        \
2357  V(sqdmulh, Sqdmulh)                \
2358  V(sqrdmulh, Sqrdmulh)              \
2359  V(sqdmull, Sqdmull)                \
2360  V(sqdmull2, Sqdmull2)              \
2361  V(sqdmlal, Sqdmlal)                \
2362  V(sqdmlal2, Sqdmlal2)              \
2363  V(sqdmlsl, Sqdmlsl)                \
2364  V(sqdmlsl2, Sqdmlsl2)              \
2365  V(smull, Smull)                    \
2366  V(smull2, Smull2)                  \
2367  V(smlal, Smlal)                    \
2368  V(smlal2, Smlal2)                  \
2369  V(smlsl, Smlsl)                    \
2370  V(smlsl2, Smlsl2)                  \
2371  V(umull, Umull)                    \
2372  V(umull2, Umull2)                  \
2373  V(umlal, Umlal)                    \
2374  V(umlal2, Umlal2)                  \
2375  V(umlsl, Umlsl)                    \
2376  V(umlsl2, Umlsl2)
2377
2378#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)    \
2379  void MASM(const VRegister& vd,            \
2380            const VRegister& vn,            \
2381            const VRegister& vm,            \
2382            int vm_index) {                 \
2383    VIXL_ASSERT(allow_macro_instructions_); \
2384    SingleEmissionCheckScope guard(this);   \
2385    ASM(vd, vn, vm, vm_index);              \
2386  }
2387  NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2388#undef DEFINE_MACRO_ASM_FUNC
2389
2390#define NEON_2VREG_SHIFT_MACRO_LIST(V) \
2391  V(rshrn, Rshrn)                      \
2392  V(rshrn2, Rshrn2)                    \
2393  V(shl, Shl)                          \
2394  V(shll, Shll)                        \
2395  V(shll2, Shll2)                      \
2396  V(shrn, Shrn)                        \
2397  V(shrn2, Shrn2)                      \
2398  V(sli, Sli)                          \
2399  V(sqrshrn, Sqrshrn)                  \
2400  V(sqrshrn2, Sqrshrn2)                \
2401  V(sqrshrun, Sqrshrun)                \
2402  V(sqrshrun2, Sqrshrun2)              \
2403  V(sqshl, Sqshl)                      \
2404  V(sqshlu, Sqshlu)                    \
2405  V(sqshrn, Sqshrn)                    \
2406  V(sqshrn2, Sqshrn2)                  \
2407  V(sqshrun, Sqshrun)                  \
2408  V(sqshrun2, Sqshrun2)                \
2409  V(sri, Sri)                          \
2410  V(srshr, Srshr)                      \
2411  V(srsra, Srsra)                      \
2412  V(sshll, Sshll)                      \
2413  V(sshll2, Sshll2)                    \
2414  V(sshr, Sshr)                        \
2415  V(ssra, Ssra)                        \
2416  V(uqrshrn, Uqrshrn)                  \
2417  V(uqrshrn2, Uqrshrn2)                \
2418  V(uqshl, Uqshl)                      \
2419  V(uqshrn, Uqshrn)                    \
2420  V(uqshrn2, Uqshrn2)                  \
2421  V(urshr, Urshr)                      \
2422  V(ursra, Ursra)                      \
2423  V(ushll, Ushll)                      \
2424  V(ushll2, Ushll2)                    \
2425  V(ushr, Ushr)                        \
2426  V(usra, Usra)
2427
2428#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                           \
2429  void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
2430    VIXL_ASSERT(allow_macro_instructions_);                        \
2431    SingleEmissionCheckScope guard(this);                          \
2432    ASM(vd, vn, shift);                                            \
2433  }
2434  NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2435#undef DEFINE_MACRO_ASM_FUNC
2436
2437  void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
2438    VIXL_ASSERT(allow_macro_instructions_);
2439    SingleEmissionCheckScope guard(this);
2440    bic(vd, imm8, left_shift);
2441  }
2442  void Cmeq(const VRegister& vd, const VRegister& vn, int imm) {
2443    VIXL_ASSERT(allow_macro_instructions_);
2444    SingleEmissionCheckScope guard(this);
2445    cmeq(vd, vn, imm);
2446  }
2447  void Cmge(const VRegister& vd, const VRegister& vn, int imm) {
2448    VIXL_ASSERT(allow_macro_instructions_);
2449    SingleEmissionCheckScope guard(this);
2450    cmge(vd, vn, imm);
2451  }
2452  void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
2453    VIXL_ASSERT(allow_macro_instructions_);
2454    SingleEmissionCheckScope guard(this);
2455    cmgt(vd, vn, imm);
2456  }
2457  void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
2458    VIXL_ASSERT(allow_macro_instructions_);
2459    SingleEmissionCheckScope guard(this);
2460    cmle(vd, vn, imm);
2461  }
2462  void Cmlt(const VRegister& vd, const VRegister& vn, int imm) {
2463    VIXL_ASSERT(allow_macro_instructions_);
2464    SingleEmissionCheckScope guard(this);
2465    cmlt(vd, vn, imm);
2466  }
2467  void Dup(const VRegister& vd, const VRegister& vn, int index) {
2468    VIXL_ASSERT(allow_macro_instructions_);
2469    SingleEmissionCheckScope guard(this);
2470    dup(vd, vn, index);
2471  }
2472  void Dup(const VRegister& vd, const Register& rn) {
2473    VIXL_ASSERT(allow_macro_instructions_);
2474    SingleEmissionCheckScope guard(this);
2475    dup(vd, rn);
2476  }
2477  void Ext(const VRegister& vd,
2478           const VRegister& vn,
2479           const VRegister& vm,
2480           int index) {
2481    VIXL_ASSERT(allow_macro_instructions_);
2482    SingleEmissionCheckScope guard(this);
2483    ext(vd, vn, vm, index);
2484  }
2485  void Ins(const VRegister& vd,
2486           int vd_index,
2487           const VRegister& vn,
2488           int vn_index) {
2489    VIXL_ASSERT(allow_macro_instructions_);
2490    SingleEmissionCheckScope guard(this);
2491    ins(vd, vd_index, vn, vn_index);
2492  }
2493  void Ins(const VRegister& vd, int vd_index, const Register& rn) {
2494    VIXL_ASSERT(allow_macro_instructions_);
2495    SingleEmissionCheckScope guard(this);
2496    ins(vd, vd_index, rn);
2497  }
2498  void Ld1(const VRegister& vt, const MemOperand& src) {
2499    VIXL_ASSERT(allow_macro_instructions_);
2500    SingleEmissionCheckScope guard(this);
2501    ld1(vt, src);
2502  }
2503  void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
2504    VIXL_ASSERT(allow_macro_instructions_);
2505    SingleEmissionCheckScope guard(this);
2506    ld1(vt, vt2, src);
2507  }
2508  void Ld1(const VRegister& vt,
2509           const VRegister& vt2,
2510           const VRegister& vt3,
2511           const MemOperand& src) {
2512    VIXL_ASSERT(allow_macro_instructions_);
2513    SingleEmissionCheckScope guard(this);
2514    ld1(vt, vt2, vt3, src);
2515  }
2516  void Ld1(const VRegister& vt,
2517           const VRegister& vt2,
2518           const VRegister& vt3,
2519           const VRegister& vt4,
2520           const MemOperand& src) {
2521    VIXL_ASSERT(allow_macro_instructions_);
2522    SingleEmissionCheckScope guard(this);
2523    ld1(vt, vt2, vt3, vt4, src);
2524  }
2525  void Ld1(const VRegister& vt, int lane, const MemOperand& src) {
2526    VIXL_ASSERT(allow_macro_instructions_);
2527    SingleEmissionCheckScope guard(this);
2528    ld1(vt, lane, src);
2529  }
2530  void Ld1r(const VRegister& vt, const MemOperand& src) {
2531    VIXL_ASSERT(allow_macro_instructions_);
2532    SingleEmissionCheckScope guard(this);
2533    ld1r(vt, src);
2534  }
2535  void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
2536    VIXL_ASSERT(allow_macro_instructions_);
2537    SingleEmissionCheckScope guard(this);
2538    ld2(vt, vt2, src);
2539  }
2540  void Ld2(const VRegister& vt,
2541           const VRegister& vt2,
2542           int lane,
2543           const MemOperand& src) {
2544    VIXL_ASSERT(allow_macro_instructions_);
2545    SingleEmissionCheckScope guard(this);
2546    ld2(vt, vt2, lane, src);
2547  }
2548  void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
2549    VIXL_ASSERT(allow_macro_instructions_);
2550    SingleEmissionCheckScope guard(this);
2551    ld2r(vt, vt2, src);
2552  }
2553  void Ld3(const VRegister& vt,
2554           const VRegister& vt2,
2555           const VRegister& vt3,
2556           const MemOperand& src) {
2557    VIXL_ASSERT(allow_macro_instructions_);
2558    SingleEmissionCheckScope guard(this);
2559    ld3(vt, vt2, vt3, src);
2560  }
2561  void Ld3(const VRegister& vt,
2562           const VRegister& vt2,
2563           const VRegister& vt3,
2564           int lane,
2565           const MemOperand& src) {
2566    VIXL_ASSERT(allow_macro_instructions_);
2567    SingleEmissionCheckScope guard(this);
2568    ld3(vt, vt2, vt3, lane, src);
2569  }
2570  void Ld3r(const VRegister& vt,
2571            const VRegister& vt2,
2572            const VRegister& vt3,
2573            const MemOperand& src) {
2574    VIXL_ASSERT(allow_macro_instructions_);
2575    SingleEmissionCheckScope guard(this);
2576    ld3r(vt, vt2, vt3, src);
2577  }
2578  void Ld4(const VRegister& vt,
2579           const VRegister& vt2,
2580           const VRegister& vt3,
2581           const VRegister& vt4,
2582           const MemOperand& src) {
2583    VIXL_ASSERT(allow_macro_instructions_);
2584    SingleEmissionCheckScope guard(this);
2585    ld4(vt, vt2, vt3, vt4, src);
2586  }
2587  void Ld4(const VRegister& vt,
2588           const VRegister& vt2,
2589           const VRegister& vt3,
2590           const VRegister& vt4,
2591           int lane,
2592           const MemOperand& src) {
2593    VIXL_ASSERT(allow_macro_instructions_);
2594    SingleEmissionCheckScope guard(this);
2595    ld4(vt, vt2, vt3, vt4, lane, src);
2596  }
2597  void Ld4r(const VRegister& vt,
2598            const VRegister& vt2,
2599            const VRegister& vt3,
2600            const VRegister& vt4,
2601            const MemOperand& src) {
2602    VIXL_ASSERT(allow_macro_instructions_);
2603    SingleEmissionCheckScope guard(this);
2604    ld4r(vt, vt2, vt3, vt4, src);
2605  }
2606  void Mov(const VRegister& vd,
2607           int vd_index,
2608           const VRegister& vn,
2609           int vn_index) {
2610    VIXL_ASSERT(allow_macro_instructions_);
2611    SingleEmissionCheckScope guard(this);
2612    mov(vd, vd_index, vn, vn_index);
2613  }
2614  void Mov(const VRegister& vd, const VRegister& vn, int index) {
2615    VIXL_ASSERT(allow_macro_instructions_);
2616    SingleEmissionCheckScope guard(this);
2617    mov(vd, vn, index);
2618  }
2619  void Mov(const VRegister& vd, int vd_index, const Register& rn) {
2620    VIXL_ASSERT(allow_macro_instructions_);
2621    SingleEmissionCheckScope guard(this);
2622    mov(vd, vd_index, rn);
2623  }
2624  void Mov(const Register& rd, const VRegister& vn, int vn_index) {
2625    VIXL_ASSERT(allow_macro_instructions_);
2626    SingleEmissionCheckScope guard(this);
2627    mov(rd, vn, vn_index);
2628  }
2629  void Movi(const VRegister& vd,
2630            uint64_t imm,
2631            Shift shift = LSL,
2632            int shift_amount = 0);
2633  void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
2634  void Mvni(const VRegister& vd,
2635            const int imm8,
2636            Shift shift = LSL,
2637            const int shift_amount = 0) {
2638    VIXL_ASSERT(allow_macro_instructions_);
2639    SingleEmissionCheckScope guard(this);
2640    mvni(vd, imm8, shift, shift_amount);
2641  }
2642  void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
2643    VIXL_ASSERT(allow_macro_instructions_);
2644    SingleEmissionCheckScope guard(this);
2645    orr(vd, imm8, left_shift);
2646  }
2647  void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2648    VIXL_ASSERT(allow_macro_instructions_);
2649    SingleEmissionCheckScope guard(this);
2650    scvtf(vd, vn, fbits);
2651  }
2652  void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2653    VIXL_ASSERT(allow_macro_instructions_);
2654    SingleEmissionCheckScope guard(this);
2655    ucvtf(vd, vn, fbits);
2656  }
2657  void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2658    VIXL_ASSERT(allow_macro_instructions_);
2659    SingleEmissionCheckScope guard(this);
2660    fcvtzs(vd, vn, fbits);
2661  }
2662  void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2663    VIXL_ASSERT(allow_macro_instructions_);
2664    SingleEmissionCheckScope guard(this);
2665    fcvtzu(vd, vn, fbits);
2666  }
2667  void St1(const VRegister& vt, const MemOperand& dst) {
2668    VIXL_ASSERT(allow_macro_instructions_);
2669    SingleEmissionCheckScope guard(this);
2670    st1(vt, dst);
2671  }
2672  void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
2673    VIXL_ASSERT(allow_macro_instructions_);
2674    SingleEmissionCheckScope guard(this);
2675    st1(vt, vt2, dst);
2676  }
2677  void St1(const VRegister& vt,
2678           const VRegister& vt2,
2679           const VRegister& vt3,
2680           const MemOperand& dst) {
2681    VIXL_ASSERT(allow_macro_instructions_);
2682    SingleEmissionCheckScope guard(this);
2683    st1(vt, vt2, vt3, dst);
2684  }
2685  void St1(const VRegister& vt,
2686           const VRegister& vt2,
2687           const VRegister& vt3,
2688           const VRegister& vt4,
2689           const MemOperand& dst) {
2690    VIXL_ASSERT(allow_macro_instructions_);
2691    SingleEmissionCheckScope guard(this);
2692    st1(vt, vt2, vt3, vt4, dst);
2693  }
2694  void St1(const VRegister& vt, int lane, const MemOperand& dst) {
2695    VIXL_ASSERT(allow_macro_instructions_);
2696    SingleEmissionCheckScope guard(this);
2697    st1(vt, lane, dst);
2698  }
2699  void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
2700    VIXL_ASSERT(allow_macro_instructions_);
2701    SingleEmissionCheckScope guard(this);
2702    st2(vt, vt2, dst);
2703  }
2704  void St3(const VRegister& vt,
2705           const VRegister& vt2,
2706           const VRegister& vt3,
2707           const MemOperand& dst) {
2708    VIXL_ASSERT(allow_macro_instructions_);
2709    SingleEmissionCheckScope guard(this);
2710    st3(vt, vt2, vt3, dst);
2711  }
2712  void St4(const VRegister& vt,
2713           const VRegister& vt2,
2714           const VRegister& vt3,
2715           const VRegister& vt4,
2716           const MemOperand& dst) {
2717    VIXL_ASSERT(allow_macro_instructions_);
2718    SingleEmissionCheckScope guard(this);
2719    st4(vt, vt2, vt3, vt4, dst);
2720  }
2721  void St2(const VRegister& vt,
2722           const VRegister& vt2,
2723           int lane,
2724           const MemOperand& dst) {
2725    VIXL_ASSERT(allow_macro_instructions_);
2726    SingleEmissionCheckScope guard(this);
2727    st2(vt, vt2, lane, dst);
2728  }
2729  void St3(const VRegister& vt,
2730           const VRegister& vt2,
2731           const VRegister& vt3,
2732           int lane,
2733           const MemOperand& dst) {
2734    VIXL_ASSERT(allow_macro_instructions_);
2735    SingleEmissionCheckScope guard(this);
2736    st3(vt, vt2, vt3, lane, dst);
2737  }
2738  void St4(const VRegister& vt,
2739           const VRegister& vt2,
2740           const VRegister& vt3,
2741           const VRegister& vt4,
2742           int lane,
2743           const MemOperand& dst) {
2744    VIXL_ASSERT(allow_macro_instructions_);
2745    SingleEmissionCheckScope guard(this);
2746    st4(vt, vt2, vt3, vt4, lane, dst);
2747  }
2748  void Smov(const Register& rd, const VRegister& vn, int vn_index) {
2749    VIXL_ASSERT(allow_macro_instructions_);
2750    SingleEmissionCheckScope guard(this);
2751    smov(rd, vn, vn_index);
2752  }
2753  void Umov(const Register& rd, const VRegister& vn, int vn_index) {
2754    VIXL_ASSERT(allow_macro_instructions_);
2755    SingleEmissionCheckScope guard(this);
2756    umov(rd, vn, vn_index);
2757  }
2758  void Crc32b(const Register& rd, const Register& rn, const Register& rm) {
2759    VIXL_ASSERT(allow_macro_instructions_);
2760    SingleEmissionCheckScope guard(this);
2761    crc32b(rd, rn, rm);
2762  }
2763  void Crc32h(const Register& rd, const Register& rn, const Register& rm) {
2764    VIXL_ASSERT(allow_macro_instructions_);
2765    SingleEmissionCheckScope guard(this);
2766    crc32h(rd, rn, rm);
2767  }
2768  void Crc32w(const Register& rd, const Register& rn, const Register& rm) {
2769    VIXL_ASSERT(allow_macro_instructions_);
2770    SingleEmissionCheckScope guard(this);
2771    crc32w(rd, rn, rm);
2772  }
2773  void Crc32x(const Register& rd, const Register& rn, const Register& rm) {
2774    VIXL_ASSERT(allow_macro_instructions_);
2775    SingleEmissionCheckScope guard(this);
2776    crc32x(rd, rn, rm);
2777  }
2778  void Crc32cb(const Register& rd, const Register& rn, const Register& rm) {
2779    VIXL_ASSERT(allow_macro_instructions_);
2780    SingleEmissionCheckScope guard(this);
2781    crc32cb(rd, rn, rm);
2782  }
2783  void Crc32ch(const Register& rd, const Register& rn, const Register& rm) {
2784    VIXL_ASSERT(allow_macro_instructions_);
2785    SingleEmissionCheckScope guard(this);
2786    crc32ch(rd, rn, rm);
2787  }
2788  void Crc32cw(const Register& rd, const Register& rn, const Register& rm) {
2789    VIXL_ASSERT(allow_macro_instructions_);
2790    SingleEmissionCheckScope guard(this);
2791    crc32cw(rd, rn, rm);
2792  }
2793  void Crc32cx(const Register& rd, const Register& rn, const Register& rm) {
2794    VIXL_ASSERT(allow_macro_instructions_);
2795    SingleEmissionCheckScope guard(this);
2796    crc32cx(rd, rn, rm);
2797  }
2798
2799  template <typename T>
2800  Literal<T>* CreateLiteralDestroyedWithPool(T value) {
2801    return new Literal<T>(value,
2802                          &literal_pool_,
2803                          RawLiteral::kDeletedOnPoolDestruction);
2804  }
2805
2806  template <typename T>
2807  Literal<T>* CreateLiteralDestroyedWithPool(T high64, T low64) {
2808    return new Literal<T>(high64,
2809                          low64,
2810                          &literal_pool_,
2811                          RawLiteral::kDeletedOnPoolDestruction);
2812  }
2813
2814  // Push the system stack pointer (sp) down to allow the same to be done to
2815  // the current stack pointer (according to StackPointer()). This must be
2816  // called _before_ accessing the memory.
2817  //
2818  // This is necessary when pushing or otherwise adding things to the stack, to
2819  // satisfy the AAPCS64 constraint that the memory below the system stack
2820  // pointer is not accessed.
2821  //
2822  // This method asserts that StackPointer() is not sp, since the call does
2823  // not make sense in that context.
2824  //
2825  // TODO: This method can only accept values of 'space' that can be encoded in
2826  // one instruction. Refer to the implementation for details.
2827  void BumpSystemStackPointer(const Operand& space);
2828
2829#ifdef VIXL_DEBUG
2830  void SetAllowMacroInstructions(bool value) {
2831    allow_macro_instructions_ = value;
2832  }
2833
2834  bool AllowMacroInstructions() const { return allow_macro_instructions_; }
2835#endif
2836
2837  void SetGenerateSimulatorCode(bool value) {
2838    generate_simulator_code_ = value;
2839  }
2840
2841  bool GenerateSimulatorCode() const { return generate_simulator_code_; }
2842
2843  void BlockLiteralPool() { literal_pool_.Block(); }
2844  void ReleaseLiteralPool() { literal_pool_.Release(); }
2845  bool IsLiteralPoolBlocked() const { return literal_pool_.IsBlocked(); }
2846  void BlockVeneerPool() { veneer_pool_.Block(); }
2847  void ReleaseVeneerPool() { veneer_pool_.Release(); }
2848  bool IsVeneerPoolBlocked() const { return veneer_pool_.IsBlocked(); }
2849
2850  void BlockPools() {
2851    BlockLiteralPool();
2852    BlockVeneerPool();
2853  }
2854
2855  void ReleasePools() {
2856    ReleaseLiteralPool();
2857    ReleaseVeneerPool();
2858  }
2859
2860  size_t GetLiteralPoolSize() const { return literal_pool_.GetSize(); }
2861  VIXL_DEPRECATED("GetLiteralPoolSize", size_t LiteralPoolSize() const) {
2862    return GetLiteralPoolSize();
2863  }
2864
2865  size_t GetLiteralPoolMaxSize() const { return literal_pool_.GetMaxSize(); }
2866  VIXL_DEPRECATED("GetLiteralPoolMaxSize", size_t LiteralPoolMaxSize() const) {
2867    return GetLiteralPoolMaxSize();
2868  }
2869
2870  size_t GetVeneerPoolMaxSize() const { return veneer_pool_.GetMaxSize(); }
2871  VIXL_DEPRECATED("GetVeneerPoolMaxSize", size_t VeneerPoolMaxSize() const) {
2872    return GetVeneerPoolMaxSize();
2873  }
2874
2875  // The number of unresolved branches that may require a veneer.
2876  int GetNumberOfPotentialVeneers() const {
2877    return veneer_pool_.GetNumberOfPotentialVeneers();
2878  }
2879  VIXL_DEPRECATED("GetNumberOfPotentialVeneers",
2880                  int NumberOfPotentialVeneers() const) {
2881    return GetNumberOfPotentialVeneers();
2882  }
2883
2884  ptrdiff_t GetNextCheckPoint() const {
2885    ptrdiff_t next_checkpoint_for_pools =
2886        std::min(literal_pool_.GetCheckpoint(), veneer_pool_.GetCheckpoint());
2887    return std::min(next_checkpoint_for_pools, GetBufferEndOffset());
2888  }
2889  VIXL_DEPRECATED("GetNextCheckPoint", ptrdiff_t NextCheckPoint()) {
2890    return GetNextCheckPoint();
2891  }
2892
2893  void EmitLiteralPool(LiteralPool::EmitOption option) {
2894    if (!literal_pool_.IsEmpty()) literal_pool_.Emit(option);
2895
2896    checkpoint_ = GetNextCheckPoint();
2897    recommended_checkpoint_ = literal_pool_.GetNextRecommendedCheckpoint();
2898  }
2899
2900  void CheckEmitFor(size_t amount);
2901  void EnsureEmitFor(size_t amount) {
2902    ptrdiff_t offset = amount;
2903    ptrdiff_t max_pools_size =
2904        literal_pool_.GetMaxSize() + veneer_pool_.GetMaxSize();
2905    ptrdiff_t cursor = GetCursorOffset();
2906    if ((cursor >= recommended_checkpoint_) ||
2907        ((cursor + offset + max_pools_size) >= checkpoint_)) {
2908      CheckEmitFor(amount);
2909    }
2910  }
2911
2912  void CheckEmitPoolsFor(size_t amount);
2913  void EnsureEmitPoolsFor(size_t amount) {
2914    ptrdiff_t offset = amount;
2915    ptrdiff_t max_pools_size =
2916        literal_pool_.GetMaxSize() + veneer_pool_.GetMaxSize();
2917    ptrdiff_t cursor = GetCursorOffset();
2918    if ((cursor >= recommended_checkpoint_) ||
2919        ((cursor + offset + max_pools_size) >= checkpoint_)) {
2920      CheckEmitPoolsFor(amount);
2921    }
2922  }
2923
2924  // Set the current stack pointer, but don't generate any code.
2925  void SetStackPointer(const Register& stack_pointer) {
2926    VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(stack_pointer));
2927    sp_ = stack_pointer;
2928  }
2929
2930  // Return the current stack pointer, as set by SetStackPointer.
2931  const Register& StackPointer() const { return sp_; }
2932
2933  CPURegList* GetScratchRegisterList() { return &tmp_list_; }
2934  VIXL_DEPRECATED("GetScratchRegisterList", CPURegList* TmpList()) {
2935    return GetScratchRegisterList();
2936  }
2937
2938  CPURegList* GetScratchFPRegisterList() { return &fptmp_list_; }
2939  VIXL_DEPRECATED("GetScratchFPRegisterList", CPURegList* FPTmpList()) {
2940    return GetScratchFPRegisterList();
2941  }
2942
2943  // Like printf, but print at run-time from generated code.
2944  //
2945  // The caller must ensure that arguments for floating-point placeholders
2946  // (such as %e, %f or %g) are VRegisters in format 1S or 1D, and that
2947  // arguments for integer placeholders are Registers.
2948  //
2949  // At the moment it is only possible to print the value of sp if it is the
2950  // current stack pointer. Otherwise, the MacroAssembler will automatically
2951  // update sp on every push (using BumpSystemStackPointer), so determining its
2952  // value is difficult.
2953  //
2954  // Format placeholders that refer to more than one argument, or to a specific
2955  // argument, are not supported. This includes formats like "%1$d" or "%.*d".
2956  //
2957  // This function automatically preserves caller-saved registers so that
2958  // calling code can use Printf at any point without having to worry about
2959  // corruption. The preservation mechanism generates a lot of code. If this is
2960  // a problem, preserve the important registers manually and then call
2961  // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
2962  // implicitly preserved.
2963  void Printf(const char* format,
2964              CPURegister arg0 = NoCPUReg,
2965              CPURegister arg1 = NoCPUReg,
2966              CPURegister arg2 = NoCPUReg,
2967              CPURegister arg3 = NoCPUReg);
2968
2969  // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
2970  //
2971  // The return code from the system printf call will be returned in x0.
2972  void PrintfNoPreserve(const char* format,
2973                        const CPURegister& arg0 = NoCPUReg,
2974                        const CPURegister& arg1 = NoCPUReg,
2975                        const CPURegister& arg2 = NoCPUReg,
2976                        const CPURegister& arg3 = NoCPUReg);
2977
2978  // Trace control when running the debug simulator.
2979  //
2980  // For example:
2981  //
2982  // __ Trace(LOG_REGS, TRACE_ENABLE);
2983  // Will add registers to the trace if it wasn't already the case.
2984  //
2985  // __ Trace(LOG_DISASM, TRACE_DISABLE);
2986  // Will stop logging disassembly. It has no effect if the disassembly wasn't
2987  // already being logged.
2988  void Trace(TraceParameters parameters, TraceCommand command);
2989
2990  // Log the requested data independently of what is being traced.
2991  //
2992  // For example:
2993  //
2994  // __ Log(LOG_FLAGS)
2995  // Will output the flags.
2996  void Log(TraceParameters parameters);
2997
2998  // Enable or disable instrumentation when an Instrument visitor is attached to
2999  // the simulator.
3000  void EnableInstrumentation();
3001  void DisableInstrumentation();
3002
3003  // Add a marker to the instrumentation data produced by an Instrument visitor.
3004  // The name is a two character string that will be attached to the marker in
3005  // the output data.
3006  void AnnotateInstrumentation(const char* marker_name);
3007
3008  LiteralPool* GetLiteralPool() { return &literal_pool_; }
3009
3010// Support for simulated runtime calls.
3011
3012// Variadic templating is only available from C++11.
3013#if __cplusplus >= 201103L
3014  template <typename R, typename... P>
3015  void CallRuntime(R (*function)(P...));
3016#endif
3017
3018 protected:
3019  // Helper used to query information about code generation and to generate
3020  // code for `csel`.
3021  // Here and for the related helpers below:
3022  // - Code is generated when `masm` is not `NULL`.
3023  // - On return and when set, `should_synthesise_left` and
3024  //   `should_synthesise_right` will indicate whether `left` and `right`
3025  //   should be synthesized in a temporary register.
3026  static void CselHelper(MacroAssembler* masm,
3027                         const Register& rd,
3028                         Operand left,
3029                         Operand right,
3030                         Condition cond,
3031                         bool* should_synthesise_left = NULL,
3032                         bool* should_synthesise_right = NULL);
3033
3034  // The helper returns `true` if it can handle the specified arguments.
3035  // Also see comments for `CselHelper()`.
3036  static bool CselSubHelperTwoImmediates(MacroAssembler* masm,
3037                                         const Register& rd,
3038                                         int64_t left,
3039                                         int64_t right,
3040                                         Condition cond,
3041                                         bool* should_synthesise_left,
3042                                         bool* should_synthesise_right);
3043
3044  // See comments for `CselHelper()`.
3045  static bool CselSubHelperTwoOrderedImmediates(MacroAssembler* masm,
3046                                                const Register& rd,
3047                                                int64_t left,
3048                                                int64_t right,
3049                                                Condition cond);
3050
3051  // See comments for `CselHelper()`.
3052  static void CselSubHelperRightSmallImmediate(MacroAssembler* masm,
3053                                               UseScratchRegisterScope* temps,
3054                                               const Register& rd,
3055                                               const Operand& left,
3056                                               const Operand& right,
3057                                               Condition cond,
3058                                               bool* should_synthesise_left);
3059
3060 private:
3061  // The actual Push and Pop implementations. These don't generate any code
3062  // other than that required for the push or pop. This allows
3063  // (Push|Pop)CPURegList to bundle together setup code for a large block of
3064  // registers.
3065  //
3066  // Note that size is per register, and is specified in bytes.
3067  void PushHelper(int count,
3068                  int size,
3069                  const CPURegister& src0,
3070                  const CPURegister& src1,
3071                  const CPURegister& src2,
3072                  const CPURegister& src3);
3073  void PopHelper(int count,
3074                 int size,
3075                 const CPURegister& dst0,
3076                 const CPURegister& dst1,
3077                 const CPURegister& dst2,
3078                 const CPURegister& dst3);
3079
3080  void Movi16bitHelper(const VRegister& vd, uint64_t imm);
3081  void Movi32bitHelper(const VRegister& vd, uint64_t imm);
3082  void Movi64bitHelper(const VRegister& vd, uint64_t imm);
3083
3084  // Perform necessary maintenance operations before a push or pop.
3085  //
3086  // Note that size is per register, and is specified in bytes.
3087  void PrepareForPush(int count, int size);
3088  void PrepareForPop(int count, int size);
3089
3090  // The actual implementation of load and store operations for CPURegList.
3091  enum LoadStoreCPURegListAction { kLoad, kStore };
3092  void LoadStoreCPURegListHelper(LoadStoreCPURegListAction operation,
3093                                 CPURegList registers,
3094                                 const MemOperand& mem);
3095  // Returns a MemOperand suitable for loading or storing a CPURegList at `dst`.
3096  // This helper may allocate registers from `scratch_scope` and generate code
3097  // to compute an intermediate address. The resulting MemOperand is only valid
3098  // as long as `scratch_scope` remains valid.
3099  MemOperand BaseMemOperandForLoadStoreCPURegList(
3100      const CPURegList& registers,
3101      const MemOperand& mem,
3102      UseScratchRegisterScope* scratch_scope);
3103
3104  bool LabelIsOutOfRange(Label* label, ImmBranchType branch_type) {
3105    return !Instruction::IsValidImmPCOffset(branch_type,
3106                                            label->GetLocation() -
3107                                                GetCursorOffset());
3108  }
3109
3110#ifdef VIXL_DEBUG
3111  // Tell whether any of the macro instruction can be used. When false the
3112  // MacroAssembler will assert if a method which can emit a variable number
3113  // of instructions is called.
3114  bool allow_macro_instructions_;
3115#endif
3116
3117  // Indicates whether we should generate simulator or native code.
3118  bool generate_simulator_code_;
3119
3120  // The register to use as a stack pointer for stack operations.
3121  Register sp_;
3122
3123  // Scratch registers available for use by the MacroAssembler.
3124  CPURegList tmp_list_;
3125  CPURegList fptmp_list_;
3126
3127  LiteralPool literal_pool_;
3128  VeneerPool veneer_pool_;
3129
3130  ptrdiff_t checkpoint_;
3131  ptrdiff_t recommended_checkpoint_;
3132
3133  friend class Pool;
3134  friend class LiteralPool;
3135};
3136
3137
3138inline size_t VeneerPool::GetOtherPoolsMaxSize() const {
3139  return masm_->GetLiteralPoolMaxSize();
3140}
3141
3142
3143inline size_t LiteralPool::GetOtherPoolsMaxSize() const {
3144  return masm_->GetVeneerPoolMaxSize();
3145}
3146
3147
3148inline void LiteralPool::SetNextRecommendedCheckpoint(ptrdiff_t offset) {
3149  masm_->recommended_checkpoint_ =
3150      std::min(masm_->recommended_checkpoint_, offset);
3151  recommended_checkpoint_ = offset;
3152}
3153
3154// Use this scope when you need a one-to-one mapping between methods and
3155// instructions. This scope prevents the MacroAssembler from being called and
3156// literal pools from being emitted. It also asserts the number of instructions
3157// emitted is what you specified when creating the scope.
3158class InstructionAccurateScope : public CodeBufferCheckScope {
3159 public:
3160  InstructionAccurateScope(MacroAssembler* masm,
3161                           int64_t count,
3162                           AssertPolicy policy = kExactSize)
3163      : CodeBufferCheckScope(masm, (count * kInstructionSize), kCheck, policy) {
3164    VIXL_ASSERT(policy != kNoAssert);
3165#ifdef VIXL_DEBUG
3166    old_allow_macro_instructions_ = masm->AllowMacroInstructions();
3167    masm->SetAllowMacroInstructions(false);
3168#endif
3169  }
3170
3171  ~InstructionAccurateScope() {
3172#ifdef VIXL_DEBUG
3173    MacroAssembler* masm = reinterpret_cast<MacroAssembler*>(assm_);
3174    masm->SetAllowMacroInstructions(old_allow_macro_instructions_);
3175#endif
3176  }
3177
3178 private:
3179#ifdef VIXL_DEBUG
3180  bool old_allow_macro_instructions_;
3181#endif
3182};
3183
3184
3185class BlockLiteralPoolScope {
3186 public:
3187  explicit BlockLiteralPoolScope(MacroAssembler* masm) : masm_(masm) {
3188    masm_->BlockLiteralPool();
3189  }
3190
3191  ~BlockLiteralPoolScope() { masm_->ReleaseLiteralPool(); }
3192
3193 private:
3194  MacroAssembler* masm_;
3195};
3196
3197
3198class BlockVeneerPoolScope {
3199 public:
3200  explicit BlockVeneerPoolScope(MacroAssembler* masm) : masm_(masm) {
3201    masm_->BlockVeneerPool();
3202  }
3203
3204  ~BlockVeneerPoolScope() { masm_->ReleaseVeneerPool(); }
3205
3206 private:
3207  MacroAssembler* masm_;
3208};
3209
3210
3211class BlockPoolsScope {
3212 public:
3213  explicit BlockPoolsScope(MacroAssembler* masm) : masm_(masm) {
3214    masm_->BlockPools();
3215  }
3216
3217  ~BlockPoolsScope() { masm_->ReleasePools(); }
3218
3219 private:
3220  MacroAssembler* masm_;
3221};
3222
3223
3224// This scope utility allows scratch registers to be managed safely. The
3225// MacroAssembler's GetScratchRegisterList() (and GetScratchFPRegisterList()) is
3226// used as a pool of scratch registers. These registers can be allocated on
3227// demand, and will be returned at the end of the scope.
3228//
3229// When the scope ends, the MacroAssembler's lists will be restored to their
3230// original state, even if the lists were modified by some other means.
3231class UseScratchRegisterScope {
3232 public:
3233  // This constructor implicitly calls `Open` to initialise the scope (`masm`
3234  // must not be `NULL`), so it is ready to use immediately after it has been
3235  // constructed.
3236  explicit UseScratchRegisterScope(MacroAssembler* masm);
3237  // This constructor does not implicitly initialise the scope. Instead, the
3238  // user is required to explicitly call the `Open` function before using the
3239  // scope.
3240  UseScratchRegisterScope();
3241  // This function performs the actual initialisation work.
3242  void Open(MacroAssembler* masm);
3243
3244  // The destructor always implicitly calls the `Close` function.
3245  ~UseScratchRegisterScope();
3246  // This function performs the cleaning-up work. It must succeed even if the
3247  // scope has not been opened. It is safe to call multiple times.
3248  void Close();
3249
3250
3251  bool IsAvailable(const CPURegister& reg) const;
3252
3253
3254  // Take a register from the appropriate temps list. It will be returned
3255  // automatically when the scope ends.
3256  Register AcquireW() { return AcquireNextAvailable(available_).W(); }
3257  Register AcquireX() { return AcquireNextAvailable(available_).X(); }
3258  VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
3259  VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
3260
3261
3262  Register AcquireRegisterOfSize(int size_in_bits);
3263  Register AcquireSameSizeAs(const Register& reg) {
3264    return AcquireRegisterOfSize(reg.GetSizeInBits());
3265  }
3266  VRegister AcquireVRegisterOfSize(int size_in_bits);
3267  VRegister AcquireSameSizeAs(const VRegister& reg) {
3268    return AcquireVRegisterOfSize(reg.GetSizeInBits());
3269  }
3270  CPURegister AcquireCPURegisterOfSize(int size_in_bits) {
3271    return available_->IsEmpty()
3272               ? CPURegister(AcquireVRegisterOfSize(size_in_bits))
3273               : CPURegister(AcquireRegisterOfSize(size_in_bits));
3274  }
3275
3276
3277  // Explicitly release an acquired (or excluded) register, putting it back in
3278  // the appropriate temps list.
3279  void Release(const CPURegister& reg);
3280
3281
3282  // Make the specified registers available as scratch registers for the
3283  // duration of this scope.
3284  void Include(const CPURegList& list);
3285  void Include(const Register& reg1,
3286               const Register& reg2 = NoReg,
3287               const Register& reg3 = NoReg,
3288               const Register& reg4 = NoReg);
3289  void Include(const VRegister& reg1,
3290               const VRegister& reg2 = NoVReg,
3291               const VRegister& reg3 = NoVReg,
3292               const VRegister& reg4 = NoVReg);
3293
3294
3295  // Make sure that the specified registers are not available in this scope.
3296  // This can be used to prevent helper functions from using sensitive
3297  // registers, for example.
3298  void Exclude(const CPURegList& list);
3299  void Exclude(const Register& reg1,
3300               const Register& reg2 = NoReg,
3301               const Register& reg3 = NoReg,
3302               const Register& reg4 = NoReg);
3303  void Exclude(const VRegister& reg1,
3304               const VRegister& reg2 = NoVReg,
3305               const VRegister& reg3 = NoVReg,
3306               const VRegister& reg4 = NoVReg);
3307  void Exclude(const CPURegister& reg1,
3308               const CPURegister& reg2 = NoCPUReg,
3309               const CPURegister& reg3 = NoCPUReg,
3310               const CPURegister& reg4 = NoCPUReg);
3311
3312
3313  // Prevent any scratch registers from being used in this scope.
3314  void ExcludeAll();
3315
3316 private:
3317  static CPURegister AcquireNextAvailable(CPURegList* available);
3318
3319  static void ReleaseByCode(CPURegList* available, int code);
3320
3321  static void ReleaseByRegList(CPURegList* available, RegList regs);
3322
3323  static void IncludeByRegList(CPURegList* available, RegList exclude);
3324
3325  static void ExcludeByRegList(CPURegList* available, RegList exclude);
3326
3327  // Available scratch registers.
3328  CPURegList* available_;    // kRegister
3329  CPURegList* availablefp_;  // kVRegister
3330
3331  // The state of the available lists at the start of this scope.
3332  RegList old_available_;    // kRegister
3333  RegList old_availablefp_;  // kVRegister
3334#ifdef VIXL_DEBUG
3335  bool initialised_;
3336#endif
3337
3338  // Disallow copy constructor and operator=.
3339  VIXL_DEBUG_NO_RETURN UseScratchRegisterScope(const UseScratchRegisterScope&) {
3340    VIXL_UNREACHABLE();
3341  }
3342  VIXL_DEBUG_NO_RETURN void operator=(const UseScratchRegisterScope&) {
3343    VIXL_UNREACHABLE();
3344  }
3345};
3346
3347// Variadic templating is only available from C++11.
3348#if __cplusplus >= 201103L
3349
3350// `R` stands for 'return type', and `P` for 'parameter types'.
3351template <typename R, typename... P>
3352void MacroAssembler::CallRuntime(R (*function)(P...)) {
3353  if (generate_simulator_code_) {
3354#ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT
3355    uint64_t runtime_call_wrapper_address = reinterpret_cast<uint64_t>(
3356        &(Simulator::RuntimeCallStructHelper<R, P...>::Wrapper));
3357    uint64_t function_address = reinterpret_cast<uint64_t>(function);
3358
3359    InstructionAccurateScope scope(this, 5);
3360    Label start;
3361    bind(&start);
3362    hlt(kRuntimeCallOpcode);
3363    VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) ==
3364                kRuntimeCallWrapperOffset);
3365    dc64(runtime_call_wrapper_address);
3366    VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) ==
3367                kRuntimeCallFunctionOffset);
3368    dc64(function_address);
3369    VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) ==
3370                kRuntimeCallFunctionOffset + kRuntimeCallAddressSize);
3371#else
3372    VIXL_UNREACHABLE();
3373#endif
3374  } else {
3375    UseScratchRegisterScope temps(this);
3376    Register temp = temps.AcquireX();
3377    Mov(temp, reinterpret_cast<uint64_t>(function));
3378    Blr(temp);
3379  }
3380}
3381
3382#endif
3383
3384}  // namespace aarch64
3385
3386// Required InvalSet template specialisations.
3387// TODO: These template specialisations should not live in this file.  Move
3388// VeneerPool out of the aarch64 namespace in order to share its implementation
3389// later.
3390template <>
3391inline ptrdiff_t InvalSet<aarch64::VeneerPool::BranchInfo,
3392                          aarch64::VeneerPool::kNPreallocatedInfos,
3393                          ptrdiff_t,
3394                          aarch64::VeneerPool::kInvalidOffset,
3395                          aarch64::VeneerPool::kReclaimFrom,
3396                          aarch64::VeneerPool::kReclaimFactor>::
3397    GetKey(const aarch64::VeneerPool::BranchInfo& branch_info) {
3398  return branch_info.max_reachable_pc_;
3399}
3400template <>
3401inline void InvalSet<aarch64::VeneerPool::BranchInfo,
3402                     aarch64::VeneerPool::kNPreallocatedInfos,
3403                     ptrdiff_t,
3404                     aarch64::VeneerPool::kInvalidOffset,
3405                     aarch64::VeneerPool::kReclaimFrom,
3406                     aarch64::VeneerPool::kReclaimFactor>::
3407    SetKey(aarch64::VeneerPool::BranchInfo* branch_info, ptrdiff_t key) {
3408  branch_info->max_reachable_pc_ = key;
3409}
3410
3411}  // namespace vixl
3412
3413#endif  // VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
3414