macro-assembler-aarch64.h revision 8d191abf32edf41421f68f35585e4fce8da4d50c
1// Copyright 2015, VIXL authors
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are met:
6//
7//   * Redistributions of source code must retain the above copyright notice,
8//     this list of conditions and the following disclaimer.
9//   * Redistributions in binary form must reproduce the above copyright notice,
10//     this list of conditions and the following disclaimer in the documentation
11//     and/or other materials provided with the distribution.
12//   * Neither the name of ARM Limited nor the names of its contributors may be
13//     used to endorse or promote products derived from this software without
14//     specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#ifndef VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
28#define VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
29
30#include <algorithm>
31#include <limits>
32
33#include "../code-generation-scopes-vixl.h"
34#include "../globals-vixl.h"
35#include "../macro-assembler-interface.h"
36
37#include "assembler-aarch64.h"
38#include "debugger-aarch64.h"
39#include "instrument-aarch64.h"
40// Required in order to generate debugging instructions for the simulator. This
41// is needed regardless of whether the simulator is included or not, since
42// generating simulator specific instructions is controlled at runtime.
43#include "simulator-constants-aarch64.h"
44
45
46#define LS_MACRO_LIST(V)                                     \
47  V(Ldrb, Register&, rt, LDRB_w)                             \
48  V(Strb, Register&, rt, STRB_w)                             \
49  V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
50  V(Ldrh, Register&, rt, LDRH_w)                             \
51  V(Strh, Register&, rt, STRH_w)                             \
52  V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
53  V(Ldr, CPURegister&, rt, LoadOpFor(rt))                    \
54  V(Str, CPURegister&, rt, StoreOpFor(rt))                   \
55  V(Ldrsw, Register&, rt, LDRSW_x)
56
57
58#define LSPAIR_MACRO_LIST(V)                             \
59  V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2))  \
60  V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
61  V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
62
63namespace vixl {
64namespace aarch64 {
65
66// Forward declaration
67class MacroAssembler;
68class UseScratchRegisterScope;
69
70class Pool {
71 public:
72  explicit Pool(MacroAssembler* masm)
73      : checkpoint_(kNoCheckpointRequired), masm_(masm) {
74    Reset();
75  }
76
77  void Reset() {
78    checkpoint_ = kNoCheckpointRequired;
79    monitor_ = 0;
80  }
81
82  void Block() { monitor_++; }
83  void Release();
84  bool IsBlocked() const { return monitor_ != 0; }
85
86  static const ptrdiff_t kNoCheckpointRequired = PTRDIFF_MAX;
87
88  void SetNextCheckpoint(ptrdiff_t checkpoint);
89  ptrdiff_t GetCheckpoint() const { return checkpoint_; }
90  VIXL_DEPRECATED("GetCheckpoint", ptrdiff_t checkpoint() const) {
91    return GetCheckpoint();
92  }
93
94  enum EmitOption { kBranchRequired, kNoBranchRequired };
95
96 protected:
97  // Next buffer offset at which a check is required for this pool.
98  ptrdiff_t checkpoint_;
99  // Indicates whether the emission of this pool is blocked.
100  int monitor_;
101  // The MacroAssembler using this pool.
102  MacroAssembler* masm_;
103};
104
105
106class LiteralPool : public Pool {
107 public:
108  explicit LiteralPool(MacroAssembler* masm);
109  ~LiteralPool();
110  void Reset();
111
112  void AddEntry(RawLiteral* literal);
113  bool IsEmpty() const { return entries_.empty(); }
114  size_t GetSize() const;
115  VIXL_DEPRECATED("GetSize", size_t Size() const) { return GetSize(); }
116
117  size_t GetMaxSize() const;
118  VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); }
119
120  size_t GetOtherPoolsMaxSize() const;
121  VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) {
122    return GetOtherPoolsMaxSize();
123  }
124
125  void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired);
126  void Emit(EmitOption option = kNoBranchRequired);
127
128  void SetNextRecommendedCheckpoint(ptrdiff_t offset);
129  ptrdiff_t GetNextRecommendedCheckpoint();
130  VIXL_DEPRECATED("GetNextRecommendedCheckpoint",
131                  ptrdiff_t NextRecommendedCheckpoint()) {
132    return GetNextRecommendedCheckpoint();
133  }
134
135  void UpdateFirstUse(ptrdiff_t use_position);
136
137  void DeleteOnDestruction(RawLiteral* literal) {
138    deleted_on_destruction_.push_back(literal);
139  }
140
141  // Recommended not exact since the pool can be blocked for short periods.
142  static const ptrdiff_t kRecommendedLiteralPoolRange = 128 * KBytes;
143
144 private:
145  std::vector<RawLiteral*> entries_;
146  size_t size_;
147  ptrdiff_t first_use_;
148  // The parent class `Pool` provides a `checkpoint_`, which is the buffer
149  // offset before which a check *must* occur. This recommended checkpoint
150  // indicates when we would like to start emitting the constant pool. The
151  // MacroAssembler can, but does not have to, check the buffer when the
152  // checkpoint is reached.
153  ptrdiff_t recommended_checkpoint_;
154
155  std::vector<RawLiteral*> deleted_on_destruction_;
156};
157
158
159inline size_t LiteralPool::GetSize() const {
160  // Account for the pool header.
161  return size_ + kInstructionSize;
162}
163
164
165inline size_t LiteralPool::GetMaxSize() const {
166  // Account for the potential branch over the pool.
167  return GetSize() + kInstructionSize;
168}
169
170
171inline ptrdiff_t LiteralPool::GetNextRecommendedCheckpoint() {
172  return first_use_ + kRecommendedLiteralPoolRange;
173}
174
175
176class VeneerPool : public Pool {
177 public:
178  explicit VeneerPool(MacroAssembler* masm) : Pool(masm) {}
179
180  void Reset();
181
182  void Block() { monitor_++; }
183  void Release();
184  bool IsBlocked() const { return monitor_ != 0; }
185  bool IsEmpty() const { return unresolved_branches_.IsEmpty(); }
186
187  class BranchInfo {
188   public:
189    BranchInfo()
190        : max_reachable_pc_(0),
191          pc_offset_(0),
192          label_(NULL),
193          branch_type_(UnknownBranchType) {}
194    BranchInfo(ptrdiff_t offset, Label* label, ImmBranchType branch_type)
195        : pc_offset_(offset), label_(label), branch_type_(branch_type) {
196      max_reachable_pc_ =
197          pc_offset_ + Instruction::GetImmBranchForwardRange(branch_type_);
198    }
199
200    static bool IsValidComparison(const BranchInfo& branch_1,
201                                  const BranchInfo& branch_2) {
202      // BranchInfo are always compared against against other objects with
203      // the same branch type.
204      if (branch_1.branch_type_ != branch_2.branch_type_) {
205        return false;
206      }
207      // Since we should never have two branch infos with the same offsets, it
208      // first looks like we should check that offsets are different. However
209      // the operators may also be used to *search* for a branch info in the
210      // set.
211      bool same_offsets = (branch_1.pc_offset_ == branch_2.pc_offset_);
212      return (!same_offsets ||
213              ((branch_1.label_ == branch_2.label_) &&
214               (branch_1.max_reachable_pc_ == branch_2.max_reachable_pc_)));
215    }
216
217    // We must provide comparison operators to work with InvalSet.
218    bool operator==(const BranchInfo& other) const {
219      VIXL_ASSERT(IsValidComparison(*this, other));
220      return pc_offset_ == other.pc_offset_;
221    }
222    bool operator<(const BranchInfo& other) const {
223      VIXL_ASSERT(IsValidComparison(*this, other));
224      return pc_offset_ < other.pc_offset_;
225    }
226    bool operator<=(const BranchInfo& other) const {
227      VIXL_ASSERT(IsValidComparison(*this, other));
228      return pc_offset_ <= other.pc_offset_;
229    }
230    bool operator>(const BranchInfo& other) const {
231      VIXL_ASSERT(IsValidComparison(*this, other));
232      return pc_offset_ > other.pc_offset_;
233    }
234
235    // Maximum position reachable by the branch using a positive branch offset.
236    ptrdiff_t max_reachable_pc_;
237    // Offset of the branch in the code generation buffer.
238    ptrdiff_t pc_offset_;
239    // The label branched to.
240    Label* label_;
241    ImmBranchType branch_type_;
242  };
243
244  bool BranchTypeUsesVeneers(ImmBranchType type) {
245    return (type != UnknownBranchType) && (type != UncondBranchType);
246  }
247
248  void RegisterUnresolvedBranch(ptrdiff_t branch_pos,
249                                Label* label,
250                                ImmBranchType branch_type);
251  void DeleteUnresolvedBranchInfoForLabel(Label* label);
252
253  bool ShouldEmitVeneer(int64_t max_reachable_pc, size_t amount);
254  bool ShouldEmitVeneers(size_t amount) {
255    return ShouldEmitVeneer(unresolved_branches_.GetFirstLimit(), amount);
256  }
257
258  void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired);
259  void Emit(EmitOption option, size_t margin);
260
261  // The code size generated for a veneer. Currently one branch instruction.
262  // This is for code size checking purposes, and can be extended in the future
263  // for example if we decide to add nops between the veneers.
264  static const int kVeneerCodeSize = 1 * kInstructionSize;
265  // The maximum size of code other than veneers that can be generated when
266  // emitting a veneer pool. Currently there can be an additional branch to jump
267  // over the pool.
268  static const int kPoolNonVeneerCodeSize = 1 * kInstructionSize;
269
270  void UpdateNextCheckPoint() { SetNextCheckpoint(GetNextCheckPoint()); }
271
272  int GetNumberOfPotentialVeneers() const {
273    return static_cast<int>(unresolved_branches_.GetSize());
274  }
275  VIXL_DEPRECATED("GetNumberOfPotentialVeneers",
276                  int NumberOfPotentialVeneers() const) {
277    return GetNumberOfPotentialVeneers();
278  }
279
280  size_t GetMaxSize() const {
281    return kPoolNonVeneerCodeSize +
282           unresolved_branches_.GetSize() * kVeneerCodeSize;
283  }
284  VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); }
285
286  size_t GetOtherPoolsMaxSize() const;
287  VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) {
288    return GetOtherPoolsMaxSize();
289  }
290
291  static const int kNPreallocatedInfos = 4;
292  static const ptrdiff_t kInvalidOffset = PTRDIFF_MAX;
293  static const size_t kReclaimFrom = 128;
294  static const size_t kReclaimFactor = 16;
295
296 private:
297  typedef InvalSet<BranchInfo,
298                   kNPreallocatedInfos,
299                   ptrdiff_t,
300                   kInvalidOffset,
301                   kReclaimFrom,
302                   kReclaimFactor> BranchInfoTypedSetBase;
303  typedef InvalSetIterator<BranchInfoTypedSetBase> BranchInfoTypedSetIterBase;
304
305  class BranchInfoTypedSet : public BranchInfoTypedSetBase {
306   public:
307    BranchInfoTypedSet() : BranchInfoTypedSetBase() {}
308
309    ptrdiff_t GetFirstLimit() {
310      if (empty()) {
311        return kInvalidOffset;
312      }
313      return GetMinElementKey();
314    }
315    VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) {
316      return GetFirstLimit();
317    }
318  };
319
320  class BranchInfoTypedSetIterator : public BranchInfoTypedSetIterBase {
321   public:
322    BranchInfoTypedSetIterator() : BranchInfoTypedSetIterBase(NULL) {}
323    explicit BranchInfoTypedSetIterator(BranchInfoTypedSet* typed_set)
324        : BranchInfoTypedSetIterBase(typed_set) {}
325
326    // TODO: Remove these and use the STL-like interface instead.
327    using BranchInfoTypedSetIterBase::Advance;
328    using BranchInfoTypedSetIterBase::Current;
329  };
330
331  class BranchInfoSet {
332   public:
333    void insert(BranchInfo branch_info) {
334      ImmBranchType type = branch_info.branch_type_;
335      VIXL_ASSERT(IsValidBranchType(type));
336      typed_set_[BranchIndexFromType(type)].insert(branch_info);
337    }
338
339    void erase(BranchInfo branch_info) {
340      if (IsValidBranchType(branch_info.branch_type_)) {
341        int index =
342            BranchInfoSet::BranchIndexFromType(branch_info.branch_type_);
343        typed_set_[index].erase(branch_info);
344      }
345    }
346
347    size_t GetSize() const {
348      size_t res = 0;
349      for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
350        res += typed_set_[i].size();
351      }
352      return res;
353    }
354    VIXL_DEPRECATED("GetSize", size_t size() const) { return GetSize(); }
355
356    bool IsEmpty() const {
357      for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
358        if (!typed_set_[i].empty()) {
359          return false;
360        }
361      }
362      return true;
363    }
364    VIXL_DEPRECATED("IsEmpty", bool empty() const) { return IsEmpty(); }
365
366    ptrdiff_t GetFirstLimit() {
367      ptrdiff_t res = kInvalidOffset;
368      for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
369        res = std::min(res, typed_set_[i].GetFirstLimit());
370      }
371      return res;
372    }
373    VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) {
374      return GetFirstLimit();
375    }
376
377    void Reset() {
378      for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
379        typed_set_[i].clear();
380      }
381    }
382
383    static ImmBranchType BranchTypeFromIndex(int index) {
384      switch (index) {
385        case 0:
386          return CondBranchType;
387        case 1:
388          return CompareBranchType;
389        case 2:
390          return TestBranchType;
391        default:
392          VIXL_UNREACHABLE();
393          return UnknownBranchType;
394      }
395    }
396    static int BranchIndexFromType(ImmBranchType branch_type) {
397      switch (branch_type) {
398        case CondBranchType:
399          return 0;
400        case CompareBranchType:
401          return 1;
402        case TestBranchType:
403          return 2;
404        default:
405          VIXL_UNREACHABLE();
406          return 0;
407      }
408    }
409
410    bool IsValidBranchType(ImmBranchType branch_type) {
411      return (branch_type != UnknownBranchType) &&
412             (branch_type != UncondBranchType);
413    }
414
415   private:
416    static const int kNumberOfTrackedBranchTypes = 3;
417    BranchInfoTypedSet typed_set_[kNumberOfTrackedBranchTypes];
418
419    friend class VeneerPool;
420    friend class BranchInfoSetIterator;
421  };
422
423  class BranchInfoSetIterator {
424   public:
425    explicit BranchInfoSetIterator(BranchInfoSet* set) : set_(set) {
426      for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
427        new (&sub_iterator_[i])
428            BranchInfoTypedSetIterator(&(set_->typed_set_[i]));
429      }
430    }
431
432    VeneerPool::BranchInfo* Current() {
433      for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
434        if (!sub_iterator_[i].Done()) {
435          return sub_iterator_[i].Current();
436        }
437      }
438      VIXL_UNREACHABLE();
439      return NULL;
440    }
441
442    void Advance() {
443      VIXL_ASSERT(!Done());
444      for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
445        if (!sub_iterator_[i].Done()) {
446          sub_iterator_[i].Advance();
447          return;
448        }
449      }
450      VIXL_UNREACHABLE();
451    }
452
453    bool Done() const {
454      for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
455        if (!sub_iterator_[i].Done()) return false;
456      }
457      return true;
458    }
459
460    void AdvanceToNextType() {
461      VIXL_ASSERT(!Done());
462      for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
463        if (!sub_iterator_[i].Done()) {
464          sub_iterator_[i].Finish();
465          return;
466        }
467      }
468      VIXL_UNREACHABLE();
469    }
470
471    void DeleteCurrentAndAdvance() {
472      for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
473        if (!sub_iterator_[i].Done()) {
474          sub_iterator_[i].DeleteCurrentAndAdvance();
475          return;
476        }
477      }
478    }
479
480   private:
481    BranchInfoSet* set_;
482    BranchInfoTypedSetIterator
483        sub_iterator_[BranchInfoSet::kNumberOfTrackedBranchTypes];
484  };
485
486  ptrdiff_t GetNextCheckPoint() {
487    if (unresolved_branches_.IsEmpty()) {
488      return kNoCheckpointRequired;
489    } else {
490      return unresolved_branches_.GetFirstLimit();
491    }
492  }
493  VIXL_DEPRECATED("GetNextCheckPoint", ptrdiff_t NextCheckPoint()) {
494    return GetNextCheckPoint();
495  }
496
497  // Information about unresolved (forward) branches.
498  BranchInfoSet unresolved_branches_;
499};
500
501
502// Helper for common Emission checks.
503// The macro-instruction maps to a single instruction.
504class SingleEmissionCheckScope : public EmissionCheckScope {
505 public:
506  explicit SingleEmissionCheckScope(MacroAssemblerInterface* masm)
507      : EmissionCheckScope(masm, kInstructionSize) {}
508};
509
510
511// The macro instruction is a "typical" macro-instruction. Typical macro-
512// instruction only emit a few instructions, a few being defined as 8 here.
513class MacroEmissionCheckScope : public EmissionCheckScope {
514 public:
515  explicit MacroEmissionCheckScope(MacroAssemblerInterface* masm)
516      : EmissionCheckScope(masm, kTypicalMacroInstructionMaxSize) {}
517
518 private:
519  static const size_t kTypicalMacroInstructionMaxSize = 8 * kInstructionSize;
520};
521
522
523enum BranchType {
524  // Copies of architectural conditions.
525  // The associated conditions can be used in place of those, the code will
526  // take care of reinterpreting them with the correct type.
527  integer_eq = eq,
528  integer_ne = ne,
529  integer_hs = hs,
530  integer_lo = lo,
531  integer_mi = mi,
532  integer_pl = pl,
533  integer_vs = vs,
534  integer_vc = vc,
535  integer_hi = hi,
536  integer_ls = ls,
537  integer_ge = ge,
538  integer_lt = lt,
539  integer_gt = gt,
540  integer_le = le,
541  integer_al = al,
542  integer_nv = nv,
543
544  // These two are *different* from the architectural codes al and nv.
545  // 'always' is used to generate unconditional branches.
546  // 'never' is used to not generate a branch (generally as the inverse
547  // branch type of 'always).
548  always,
549  never,
550  // cbz and cbnz
551  reg_zero,
552  reg_not_zero,
553  // tbz and tbnz
554  reg_bit_clear,
555  reg_bit_set,
556
557  // Aliases.
558  kBranchTypeFirstCondition = eq,
559  kBranchTypeLastCondition = nv,
560  kBranchTypeFirstUsingReg = reg_zero,
561  kBranchTypeFirstUsingBit = reg_bit_clear
562};
563
564
565enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
566
567
568class MacroAssembler : public Assembler, public MacroAssemblerInterface {
569 public:
570  explicit MacroAssembler(
571      PositionIndependentCodeOption pic = PositionIndependentCode);
572  MacroAssembler(size_t capacity,
573                 PositionIndependentCodeOption pic = PositionIndependentCode);
574  MacroAssembler(byte* buffer,
575                 size_t capacity,
576                 PositionIndependentCodeOption pic = PositionIndependentCode);
577  ~MacroAssembler();
578
579  AssemblerBase* AsAssemblerBase() VIXL_OVERRIDE { return this; }
580
581  // Start generating code from the beginning of the buffer, discarding any code
582  // and data that has already been emitted into the buffer.
583  //
584  // In order to avoid any accidental transfer of state, Reset ASSERTs that the
585  // constant pool is not blocked.
586  void Reset();
587
588  // Finalize a code buffer of generated instructions. This function must be
589  // called before executing or copying code from the buffer.
590  void FinalizeCode();
591
592
593  // Constant generation helpers.
594  // These functions return the number of instructions required to move the
595  // immediate into the destination register. Also, if the masm pointer is
596  // non-null, it generates the code to do so.
597  // The two features are implemented using one function to avoid duplication of
598  // the logic.
599  // The function can be used to evaluate the cost of synthesizing an
600  // instruction using 'mov immediate' instructions. A user might prefer loading
601  // a constant using the literal pool instead of using multiple 'mov immediate'
602  // instructions.
603  static int MoveImmediateHelper(MacroAssembler* masm,
604                                 const Register& rd,
605                                 uint64_t imm);
606  static bool OneInstrMoveImmediateHelper(MacroAssembler* masm,
607                                          const Register& dst,
608                                          int64_t imm);
609
610
611  // Logical macros.
612  void And(const Register& rd, const Register& rn, const Operand& operand);
613  void Ands(const Register& rd, const Register& rn, const Operand& operand);
614  void Bic(const Register& rd, const Register& rn, const Operand& operand);
615  void Bics(const Register& rd, const Register& rn, const Operand& operand);
616  void Orr(const Register& rd, const Register& rn, const Operand& operand);
617  void Orn(const Register& rd, const Register& rn, const Operand& operand);
618  void Eor(const Register& rd, const Register& rn, const Operand& operand);
619  void Eon(const Register& rd, const Register& rn, const Operand& operand);
620  void Tst(const Register& rn, const Operand& operand);
621  void LogicalMacro(const Register& rd,
622                    const Register& rn,
623                    const Operand& operand,
624                    LogicalOp op);
625
626  // Add and sub macros.
627  void Add(const Register& rd,
628           const Register& rn,
629           const Operand& operand,
630           FlagsUpdate S = LeaveFlags);
631  void Adds(const Register& rd, const Register& rn, const Operand& operand);
632  void Sub(const Register& rd,
633           const Register& rn,
634           const Operand& operand,
635           FlagsUpdate S = LeaveFlags);
636  void Subs(const Register& rd, const Register& rn, const Operand& operand);
637  void Cmn(const Register& rn, const Operand& operand);
638  void Cmp(const Register& rn, const Operand& operand);
639  void Neg(const Register& rd, const Operand& operand);
640  void Negs(const Register& rd, const Operand& operand);
641
642  void AddSubMacro(const Register& rd,
643                   const Register& rn,
644                   const Operand& operand,
645                   FlagsUpdate S,
646                   AddSubOp op);
647
648  // Add/sub with carry macros.
649  void Adc(const Register& rd, const Register& rn, const Operand& operand);
650  void Adcs(const Register& rd, const Register& rn, const Operand& operand);
651  void Sbc(const Register& rd, const Register& rn, const Operand& operand);
652  void Sbcs(const Register& rd, const Register& rn, const Operand& operand);
653  void Ngc(const Register& rd, const Operand& operand);
654  void Ngcs(const Register& rd, const Operand& operand);
655  void AddSubWithCarryMacro(const Register& rd,
656                            const Register& rn,
657                            const Operand& operand,
658                            FlagsUpdate S,
659                            AddSubWithCarryOp op);
660
661  // Move macros.
662  void Mov(const Register& rd, uint64_t imm);
663  void Mov(const Register& rd,
664           const Operand& operand,
665           DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
666  void Mvn(const Register& rd, uint64_t imm) {
667    Mov(rd, (rd.GetSizeInBits() == kXRegSize) ? ~imm : (~imm & kWRegMask));
668  }
669  void Mvn(const Register& rd, const Operand& operand);
670
671  // Try to move an immediate into the destination register in a single
672  // instruction. Returns true for success, and updates the contents of dst.
673  // Returns false, otherwise.
674  bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
675
676  // Move an immediate into register dst, and return an Operand object for
677  // use with a subsequent instruction that accepts a shift. The value moved
678  // into dst is not necessarily equal to imm; it may have had a shifting
679  // operation applied to it that will be subsequently undone by the shift
680  // applied in the Operand.
681  Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
682
683  void Move(const GenericOperand& dst, const GenericOperand& src);
684
685  // Synthesises the address represented by a MemOperand into a register.
686  void ComputeAddress(const Register& dst, const MemOperand& mem_op);
687
688  // Conditional macros.
689  void Ccmp(const Register& rn,
690            const Operand& operand,
691            StatusFlags nzcv,
692            Condition cond);
693  void Ccmn(const Register& rn,
694            const Operand& operand,
695            StatusFlags nzcv,
696            Condition cond);
697  void ConditionalCompareMacro(const Register& rn,
698                               const Operand& operand,
699                               StatusFlags nzcv,
700                               Condition cond,
701                               ConditionalCompareOp op);
702
703  // On return, the boolean values pointed to will indicate whether `left` and
704  // `right` should be synthesised in a temporary register.
705  static void GetCselSynthesisInformation(const Register& rd,
706                                          const Operand& left,
707                                          const Operand& right,
708                                          bool* should_synthesise_left,
709                                          bool* should_synthesise_right) {
710    // Note that the helper does not need to look at the condition.
711    CselHelper(NULL,
712               rd,
713               left,
714               right,
715               eq,
716               should_synthesise_left,
717               should_synthesise_right);
718  }
719
720  void Csel(const Register& rd,
721            const Operand& left,
722            const Operand& right,
723            Condition cond) {
724    CselHelper(this, rd, left, right, cond);
725  }
726
727// Load/store macros.
728#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
729  void FN(const REGTYPE REG, const MemOperand& addr);
730  LS_MACRO_LIST(DECLARE_FUNCTION)
731#undef DECLARE_FUNCTION
732
733  void LoadStoreMacro(const CPURegister& rt,
734                      const MemOperand& addr,
735                      LoadStoreOp op);
736
737#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
738  void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
739  LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
740#undef DECLARE_FUNCTION
741
742  void LoadStorePairMacro(const CPURegister& rt,
743                          const CPURegister& rt2,
744                          const MemOperand& addr,
745                          LoadStorePairOp op);
746
747  void Prfm(PrefetchOperation op, const MemOperand& addr);
748
749  // Push or pop up to 4 registers of the same width to or from the stack,
750  // using the current stack pointer as set by SetStackPointer.
751  //
752  // If an argument register is 'NoReg', all further arguments are also assumed
753  // to be 'NoReg', and are thus not pushed or popped.
754  //
755  // Arguments are ordered such that "Push(a, b);" is functionally equivalent
756  // to "Push(a); Push(b);".
757  //
758  // It is valid to push the same register more than once, and there is no
759  // restriction on the order in which registers are specified.
760  //
761  // It is not valid to pop into the same register more than once in one
762  // operation, not even into the zero register.
763  //
764  // If the current stack pointer (as set by SetStackPointer) is sp, then it
765  // must be aligned to 16 bytes on entry and the total size of the specified
766  // registers must also be a multiple of 16 bytes.
767  //
768  // Even if the current stack pointer is not the system stack pointer (sp),
769  // Push (and derived methods) will still modify the system stack pointer in
770  // order to comply with ABI rules about accessing memory below the system
771  // stack pointer.
772  //
773  // Other than the registers passed into Pop, the stack pointer and (possibly)
774  // the system stack pointer, these methods do not modify any other registers.
775  void Push(const CPURegister& src0,
776            const CPURegister& src1 = NoReg,
777            const CPURegister& src2 = NoReg,
778            const CPURegister& src3 = NoReg);
779  void Pop(const CPURegister& dst0,
780           const CPURegister& dst1 = NoReg,
781           const CPURegister& dst2 = NoReg,
782           const CPURegister& dst3 = NoReg);
783
784  // Alternative forms of Push and Pop, taking a RegList or CPURegList that
785  // specifies the registers that are to be pushed or popped. Higher-numbered
786  // registers are associated with higher memory addresses (as in the A32 push
787  // and pop instructions).
788  //
789  // (Push|Pop)SizeRegList allow you to specify the register size as a
790  // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
791  // supported.
792  //
793  // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
794  void PushCPURegList(CPURegList registers);
795  void PopCPURegList(CPURegList registers);
796
797  void PushSizeRegList(
798      RegList registers,
799      unsigned reg_size,
800      CPURegister::RegisterType type = CPURegister::kRegister) {
801    PushCPURegList(CPURegList(type, reg_size, registers));
802  }
803  void PopSizeRegList(RegList registers,
804                      unsigned reg_size,
805                      CPURegister::RegisterType type = CPURegister::kRegister) {
806    PopCPURegList(CPURegList(type, reg_size, registers));
807  }
808  void PushXRegList(RegList regs) { PushSizeRegList(regs, kXRegSize); }
809  void PopXRegList(RegList regs) { PopSizeRegList(regs, kXRegSize); }
810  void PushWRegList(RegList regs) { PushSizeRegList(regs, kWRegSize); }
811  void PopWRegList(RegList regs) { PopSizeRegList(regs, kWRegSize); }
812  void PushDRegList(RegList regs) {
813    PushSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
814  }
815  void PopDRegList(RegList regs) {
816    PopSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
817  }
818  void PushSRegList(RegList regs) {
819    PushSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
820  }
821  void PopSRegList(RegList regs) {
822    PopSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
823  }
824
825  // Push the specified register 'count' times.
826  void PushMultipleTimes(int count, Register src);
827
828  // Poke 'src' onto the stack. The offset is in bytes.
829  //
830  // If the current stack pointer (as set by SetStackPointer) is sp, then sp
831  // must be aligned to 16 bytes.
832  void Poke(const Register& src, const Operand& offset);
833
834  // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
835  //
836  // If the current stack pointer (as set by SetStackPointer) is sp, then sp
837  // must be aligned to 16 bytes.
838  void Peek(const Register& dst, const Operand& offset);
839
840  // Alternative forms of Peek and Poke, taking a RegList or CPURegList that
841  // specifies the registers that are to be pushed or popped. Higher-numbered
842  // registers are associated with higher memory addresses.
843  //
844  // (Peek|Poke)SizeRegList allow you to specify the register size as a
845  // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
846  // supported.
847  //
848  // Otherwise, (Peek|Poke)(CPU|X|W|D|S)RegList is preferred.
849  void PeekCPURegList(CPURegList registers, int64_t offset) {
850    LoadCPURegList(registers, MemOperand(StackPointer(), offset));
851  }
852  void PokeCPURegList(CPURegList registers, int64_t offset) {
853    StoreCPURegList(registers, MemOperand(StackPointer(), offset));
854  }
855
856  void PeekSizeRegList(
857      RegList registers,
858      int64_t offset,
859      unsigned reg_size,
860      CPURegister::RegisterType type = CPURegister::kRegister) {
861    PeekCPURegList(CPURegList(type, reg_size, registers), offset);
862  }
863  void PokeSizeRegList(
864      RegList registers,
865      int64_t offset,
866      unsigned reg_size,
867      CPURegister::RegisterType type = CPURegister::kRegister) {
868    PokeCPURegList(CPURegList(type, reg_size, registers), offset);
869  }
870  void PeekXRegList(RegList regs, int64_t offset) {
871    PeekSizeRegList(regs, offset, kXRegSize);
872  }
873  void PokeXRegList(RegList regs, int64_t offset) {
874    PokeSizeRegList(regs, offset, kXRegSize);
875  }
876  void PeekWRegList(RegList regs, int64_t offset) {
877    PeekSizeRegList(regs, offset, kWRegSize);
878  }
879  void PokeWRegList(RegList regs, int64_t offset) {
880    PokeSizeRegList(regs, offset, kWRegSize);
881  }
882  void PeekDRegList(RegList regs, int64_t offset) {
883    PeekSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
884  }
885  void PokeDRegList(RegList regs, int64_t offset) {
886    PokeSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
887  }
888  void PeekSRegList(RegList regs, int64_t offset) {
889    PeekSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
890  }
891  void PokeSRegList(RegList regs, int64_t offset) {
892    PokeSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
893  }
894
895
896  // Claim or drop stack space without actually accessing memory.
897  //
898  // If the current stack pointer (as set by SetStackPointer) is sp, then it
899  // must be aligned to 16 bytes and the size claimed or dropped must be a
900  // multiple of 16 bytes.
901  void Claim(const Operand& size);
902  void Drop(const Operand& size);
903
904  // Preserve the callee-saved registers (as defined by AAPCS64).
905  //
906  // Higher-numbered registers are pushed before lower-numbered registers, and
907  // thus get higher addresses.
908  // Floating-point registers are pushed before general-purpose registers, and
909  // thus get higher addresses.
910  //
911  // This method must not be called unless StackPointer() is sp, and it is
912  // aligned to 16 bytes.
913  void PushCalleeSavedRegisters();
914
915  // Restore the callee-saved registers (as defined by AAPCS64).
916  //
917  // Higher-numbered registers are popped after lower-numbered registers, and
918  // thus come from higher addresses.
919  // Floating-point registers are popped after general-purpose registers, and
920  // thus come from higher addresses.
921  //
922  // This method must not be called unless StackPointer() is sp, and it is
923  // aligned to 16 bytes.
924  void PopCalleeSavedRegisters();
925
926  void LoadCPURegList(CPURegList registers, const MemOperand& src);
927  void StoreCPURegList(CPURegList registers, const MemOperand& dst);
928
929  // Remaining instructions are simple pass-through calls to the assembler.
930  void Adr(const Register& rd, Label* label) {
931    VIXL_ASSERT(allow_macro_instructions_);
932    VIXL_ASSERT(!rd.IsZero());
933    SingleEmissionCheckScope guard(this);
934    adr(rd, label);
935  }
936  void Adrp(const Register& rd, Label* label) {
937    VIXL_ASSERT(allow_macro_instructions_);
938    VIXL_ASSERT(!rd.IsZero());
939    SingleEmissionCheckScope guard(this);
940    adrp(rd, label);
941  }
942  void Asr(const Register& rd, const Register& rn, unsigned shift) {
943    VIXL_ASSERT(allow_macro_instructions_);
944    VIXL_ASSERT(!rd.IsZero());
945    VIXL_ASSERT(!rn.IsZero());
946    SingleEmissionCheckScope guard(this);
947    asr(rd, rn, shift);
948  }
949  void Asr(const Register& rd, const Register& rn, const Register& rm) {
950    VIXL_ASSERT(allow_macro_instructions_);
951    VIXL_ASSERT(!rd.IsZero());
952    VIXL_ASSERT(!rn.IsZero());
953    VIXL_ASSERT(!rm.IsZero());
954    SingleEmissionCheckScope guard(this);
955    asrv(rd, rn, rm);
956  }
957
958  // Branch type inversion relies on these relations.
959  VIXL_STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
960                     (reg_bit_clear == (reg_bit_set ^ 1)) &&
961                     (always == (never ^ 1)));
962
963  BranchType InvertBranchType(BranchType type) {
964    if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
965      return static_cast<BranchType>(
966          InvertCondition(static_cast<Condition>(type)));
967    } else {
968      return static_cast<BranchType>(type ^ 1);
969    }
970  }
971
972  void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
973
974  void B(Label* label);
975  void B(Label* label, Condition cond);
976  void B(Condition cond, Label* label) { B(label, cond); }
977  void Bfm(const Register& rd,
978           const Register& rn,
979           unsigned immr,
980           unsigned imms) {
981    VIXL_ASSERT(allow_macro_instructions_);
982    VIXL_ASSERT(!rd.IsZero());
983    VIXL_ASSERT(!rn.IsZero());
984    SingleEmissionCheckScope guard(this);
985    bfm(rd, rn, immr, imms);
986  }
987  void Bfi(const Register& rd,
988           const Register& rn,
989           unsigned lsb,
990           unsigned width) {
991    VIXL_ASSERT(allow_macro_instructions_);
992    VIXL_ASSERT(!rd.IsZero());
993    VIXL_ASSERT(!rn.IsZero());
994    SingleEmissionCheckScope guard(this);
995    bfi(rd, rn, lsb, width);
996  }
997  void Bfxil(const Register& rd,
998             const Register& rn,
999             unsigned lsb,
1000             unsigned width) {
1001    VIXL_ASSERT(allow_macro_instructions_);
1002    VIXL_ASSERT(!rd.IsZero());
1003    VIXL_ASSERT(!rn.IsZero());
1004    SingleEmissionCheckScope guard(this);
1005    bfxil(rd, rn, lsb, width);
1006  }
1007  void Bind(Label* label);
1008  // Bind a label to a specified offset from the start of the buffer.
1009  void BindToOffset(Label* label, ptrdiff_t offset);
1010  void Bl(Label* label) {
1011    VIXL_ASSERT(allow_macro_instructions_);
1012    SingleEmissionCheckScope guard(this);
1013    bl(label);
1014  }
1015  void Blr(const Register& xn) {
1016    VIXL_ASSERT(allow_macro_instructions_);
1017    VIXL_ASSERT(!xn.IsZero());
1018    SingleEmissionCheckScope guard(this);
1019    blr(xn);
1020  }
1021  void Br(const Register& xn) {
1022    VIXL_ASSERT(allow_macro_instructions_);
1023    VIXL_ASSERT(!xn.IsZero());
1024    SingleEmissionCheckScope guard(this);
1025    br(xn);
1026  }
1027  void Brk(int code = 0) {
1028    VIXL_ASSERT(allow_macro_instructions_);
1029    SingleEmissionCheckScope guard(this);
1030    brk(code);
1031  }
1032  void Cbnz(const Register& rt, Label* label);
1033  void Cbz(const Register& rt, Label* label);
1034  void Cinc(const Register& rd, const Register& rn, Condition cond) {
1035    VIXL_ASSERT(allow_macro_instructions_);
1036    VIXL_ASSERT(!rd.IsZero());
1037    VIXL_ASSERT(!rn.IsZero());
1038    SingleEmissionCheckScope guard(this);
1039    cinc(rd, rn, cond);
1040  }
1041  void Cinv(const Register& rd, const Register& rn, Condition cond) {
1042    VIXL_ASSERT(allow_macro_instructions_);
1043    VIXL_ASSERT(!rd.IsZero());
1044    VIXL_ASSERT(!rn.IsZero());
1045    SingleEmissionCheckScope guard(this);
1046    cinv(rd, rn, cond);
1047  }
1048  void Clrex() {
1049    VIXL_ASSERT(allow_macro_instructions_);
1050    SingleEmissionCheckScope guard(this);
1051    clrex();
1052  }
1053  void Cls(const Register& rd, const Register& rn) {
1054    VIXL_ASSERT(allow_macro_instructions_);
1055    VIXL_ASSERT(!rd.IsZero());
1056    VIXL_ASSERT(!rn.IsZero());
1057    SingleEmissionCheckScope guard(this);
1058    cls(rd, rn);
1059  }
1060  void Clz(const Register& rd, const Register& rn) {
1061    VIXL_ASSERT(allow_macro_instructions_);
1062    VIXL_ASSERT(!rd.IsZero());
1063    VIXL_ASSERT(!rn.IsZero());
1064    SingleEmissionCheckScope guard(this);
1065    clz(rd, rn);
1066  }
1067  void Cneg(const Register& rd, const Register& rn, Condition cond) {
1068    VIXL_ASSERT(allow_macro_instructions_);
1069    VIXL_ASSERT(!rd.IsZero());
1070    VIXL_ASSERT(!rn.IsZero());
1071    SingleEmissionCheckScope guard(this);
1072    cneg(rd, rn, cond);
1073  }
1074  void Cset(const Register& rd, Condition cond) {
1075    VIXL_ASSERT(allow_macro_instructions_);
1076    VIXL_ASSERT(!rd.IsZero());
1077    SingleEmissionCheckScope guard(this);
1078    cset(rd, cond);
1079  }
1080  void Csetm(const Register& rd, Condition cond) {
1081    VIXL_ASSERT(allow_macro_instructions_);
1082    VIXL_ASSERT(!rd.IsZero());
1083    SingleEmissionCheckScope guard(this);
1084    csetm(rd, cond);
1085  }
1086  void Csinc(const Register& rd,
1087             const Register& rn,
1088             const Register& rm,
1089             Condition cond) {
1090    VIXL_ASSERT(allow_macro_instructions_);
1091    VIXL_ASSERT(!rd.IsZero());
1092    VIXL_ASSERT(!rn.IsZero());
1093    VIXL_ASSERT(!rm.IsZero());
1094    VIXL_ASSERT((cond != al) && (cond != nv));
1095    SingleEmissionCheckScope guard(this);
1096    csinc(rd, rn, rm, cond);
1097  }
1098  void Csinv(const Register& rd,
1099             const Register& rn,
1100             const Register& rm,
1101             Condition cond) {
1102    VIXL_ASSERT(allow_macro_instructions_);
1103    VIXL_ASSERT(!rd.IsZero());
1104    VIXL_ASSERT(!rn.IsZero());
1105    VIXL_ASSERT(!rm.IsZero());
1106    VIXL_ASSERT((cond != al) && (cond != nv));
1107    SingleEmissionCheckScope guard(this);
1108    csinv(rd, rn, rm, cond);
1109  }
1110  void Csneg(const Register& rd,
1111             const Register& rn,
1112             const Register& rm,
1113             Condition cond) {
1114    VIXL_ASSERT(allow_macro_instructions_);
1115    VIXL_ASSERT(!rd.IsZero());
1116    VIXL_ASSERT(!rn.IsZero());
1117    VIXL_ASSERT(!rm.IsZero());
1118    VIXL_ASSERT((cond != al) && (cond != nv));
1119    SingleEmissionCheckScope guard(this);
1120    csneg(rd, rn, rm, cond);
1121  }
1122  void Dmb(BarrierDomain domain, BarrierType type) {
1123    VIXL_ASSERT(allow_macro_instructions_);
1124    SingleEmissionCheckScope guard(this);
1125    dmb(domain, type);
1126  }
1127  void Dsb(BarrierDomain domain, BarrierType type) {
1128    VIXL_ASSERT(allow_macro_instructions_);
1129    SingleEmissionCheckScope guard(this);
1130    dsb(domain, type);
1131  }
1132  void Extr(const Register& rd,
1133            const Register& rn,
1134            const Register& rm,
1135            unsigned lsb) {
1136    VIXL_ASSERT(allow_macro_instructions_);
1137    VIXL_ASSERT(!rd.IsZero());
1138    VIXL_ASSERT(!rn.IsZero());
1139    VIXL_ASSERT(!rm.IsZero());
1140    SingleEmissionCheckScope guard(this);
1141    extr(rd, rn, rm, lsb);
1142  }
1143  void Fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1144    VIXL_ASSERT(allow_macro_instructions_);
1145    SingleEmissionCheckScope guard(this);
1146    fadd(vd, vn, vm);
1147  }
1148  void Fccmp(const VRegister& vn,
1149             const VRegister& vm,
1150             StatusFlags nzcv,
1151             Condition cond,
1152             FPTrapFlags trap = DisableTrap) {
1153    VIXL_ASSERT(allow_macro_instructions_);
1154    VIXL_ASSERT((cond != al) && (cond != nv));
1155    SingleEmissionCheckScope guard(this);
1156    FPCCompareMacro(vn, vm, nzcv, cond, trap);
1157  }
1158  void Fccmpe(const VRegister& vn,
1159              const VRegister& vm,
1160              StatusFlags nzcv,
1161              Condition cond) {
1162    Fccmp(vn, vm, nzcv, cond, EnableTrap);
1163  }
1164  void Fcmp(const VRegister& vn,
1165            const VRegister& vm,
1166            FPTrapFlags trap = DisableTrap) {
1167    VIXL_ASSERT(allow_macro_instructions_);
1168    SingleEmissionCheckScope guard(this);
1169    FPCompareMacro(vn, vm, trap);
1170  }
1171  void Fcmp(const VRegister& vn, double value, FPTrapFlags trap = DisableTrap);
1172  void Fcmpe(const VRegister& vn, double value);
1173  void Fcmpe(const VRegister& vn, const VRegister& vm) {
1174    Fcmp(vn, vm, EnableTrap);
1175  }
1176  void Fcsel(const VRegister& vd,
1177             const VRegister& vn,
1178             const VRegister& vm,
1179             Condition cond) {
1180    VIXL_ASSERT(allow_macro_instructions_);
1181    VIXL_ASSERT((cond != al) && (cond != nv));
1182    SingleEmissionCheckScope guard(this);
1183    fcsel(vd, vn, vm, cond);
1184  }
1185  void Fcvt(const VRegister& vd, const VRegister& vn) {
1186    VIXL_ASSERT(allow_macro_instructions_);
1187    SingleEmissionCheckScope guard(this);
1188    fcvt(vd, vn);
1189  }
1190  void Fcvtl(const VRegister& vd, const VRegister& vn) {
1191    VIXL_ASSERT(allow_macro_instructions_);
1192    SingleEmissionCheckScope guard(this);
1193    fcvtl(vd, vn);
1194  }
1195  void Fcvtl2(const VRegister& vd, const VRegister& vn) {
1196    VIXL_ASSERT(allow_macro_instructions_);
1197    SingleEmissionCheckScope guard(this);
1198    fcvtl2(vd, vn);
1199  }
1200  void Fcvtn(const VRegister& vd, const VRegister& vn) {
1201    VIXL_ASSERT(allow_macro_instructions_);
1202    SingleEmissionCheckScope guard(this);
1203    fcvtn(vd, vn);
1204  }
1205  void Fcvtn2(const VRegister& vd, const VRegister& vn) {
1206    VIXL_ASSERT(allow_macro_instructions_);
1207    SingleEmissionCheckScope guard(this);
1208    fcvtn2(vd, vn);
1209  }
1210  void Fcvtxn(const VRegister& vd, const VRegister& vn) {
1211    VIXL_ASSERT(allow_macro_instructions_);
1212    SingleEmissionCheckScope guard(this);
1213    fcvtxn(vd, vn);
1214  }
1215  void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
1216    VIXL_ASSERT(allow_macro_instructions_);
1217    SingleEmissionCheckScope guard(this);
1218    fcvtxn2(vd, vn);
1219  }
1220  void Fcvtas(const Register& rd, const VRegister& vn) {
1221    VIXL_ASSERT(allow_macro_instructions_);
1222    VIXL_ASSERT(!rd.IsZero());
1223    SingleEmissionCheckScope guard(this);
1224    fcvtas(rd, vn);
1225  }
1226  void Fcvtau(const Register& rd, const VRegister& vn) {
1227    VIXL_ASSERT(allow_macro_instructions_);
1228    VIXL_ASSERT(!rd.IsZero());
1229    SingleEmissionCheckScope guard(this);
1230    fcvtau(rd, vn);
1231  }
1232  void Fcvtms(const Register& rd, const VRegister& vn) {
1233    VIXL_ASSERT(allow_macro_instructions_);
1234    VIXL_ASSERT(!rd.IsZero());
1235    SingleEmissionCheckScope guard(this);
1236    fcvtms(rd, vn);
1237  }
1238  void Fcvtmu(const Register& rd, const VRegister& vn) {
1239    VIXL_ASSERT(allow_macro_instructions_);
1240    VIXL_ASSERT(!rd.IsZero());
1241    SingleEmissionCheckScope guard(this);
1242    fcvtmu(rd, vn);
1243  }
1244  void Fcvtns(const Register& rd, const VRegister& vn) {
1245    VIXL_ASSERT(allow_macro_instructions_);
1246    VIXL_ASSERT(!rd.IsZero());
1247    SingleEmissionCheckScope guard(this);
1248    fcvtns(rd, vn);
1249  }
1250  void Fcvtnu(const Register& rd, const VRegister& vn) {
1251    VIXL_ASSERT(allow_macro_instructions_);
1252    VIXL_ASSERT(!rd.IsZero());
1253    SingleEmissionCheckScope guard(this);
1254    fcvtnu(rd, vn);
1255  }
1256  void Fcvtps(const Register& rd, const VRegister& vn) {
1257    VIXL_ASSERT(allow_macro_instructions_);
1258    VIXL_ASSERT(!rd.IsZero());
1259    SingleEmissionCheckScope guard(this);
1260    fcvtps(rd, vn);
1261  }
1262  void Fcvtpu(const Register& rd, const VRegister& vn) {
1263    VIXL_ASSERT(allow_macro_instructions_);
1264    VIXL_ASSERT(!rd.IsZero());
1265    SingleEmissionCheckScope guard(this);
1266    fcvtpu(rd, vn);
1267  }
1268  void Fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0) {
1269    VIXL_ASSERT(allow_macro_instructions_);
1270    VIXL_ASSERT(!rd.IsZero());
1271    SingleEmissionCheckScope guard(this);
1272    fcvtzs(rd, vn, fbits);
1273  }
1274  void Fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0) {
1275    VIXL_ASSERT(allow_macro_instructions_);
1276    VIXL_ASSERT(!rd.IsZero());
1277    SingleEmissionCheckScope guard(this);
1278    fcvtzu(rd, vn, fbits);
1279  }
1280  void Fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1281    VIXL_ASSERT(allow_macro_instructions_);
1282    SingleEmissionCheckScope guard(this);
1283    fdiv(vd, vn, vm);
1284  }
1285  void Fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1286    VIXL_ASSERT(allow_macro_instructions_);
1287    SingleEmissionCheckScope guard(this);
1288    fmax(vd, vn, vm);
1289  }
1290  void Fmaxnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1291    VIXL_ASSERT(allow_macro_instructions_);
1292    SingleEmissionCheckScope guard(this);
1293    fmaxnm(vd, vn, vm);
1294  }
1295  void Fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1296    VIXL_ASSERT(allow_macro_instructions_);
1297    SingleEmissionCheckScope guard(this);
1298    fmin(vd, vn, vm);
1299  }
1300  void Fminnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1301    VIXL_ASSERT(allow_macro_instructions_);
1302    SingleEmissionCheckScope guard(this);
1303    fminnm(vd, vn, vm);
1304  }
1305  void Fmov(VRegister vd, VRegister vn) {
1306    VIXL_ASSERT(allow_macro_instructions_);
1307    SingleEmissionCheckScope guard(this);
1308    // Only emit an instruction if vd and vn are different, and they are both D
1309    // registers. fmov(s0, s0) is not a no-op because it clears the top word of
1310    // d0. Technically, fmov(d0, d0) is not a no-op either because it clears
1311    // the top of q0, but VRegister does not currently support Q registers.
1312    if (!vd.Is(vn) || !vd.Is64Bits()) {
1313      fmov(vd, vn);
1314    }
1315  }
1316  void Fmov(VRegister vd, Register rn) {
1317    VIXL_ASSERT(allow_macro_instructions_);
1318    VIXL_ASSERT(!rn.IsZero());
1319    SingleEmissionCheckScope guard(this);
1320    fmov(vd, rn);
1321  }
1322  void Fmov(const VRegister& vd, int index, const Register& rn) {
1323    VIXL_ASSERT(allow_macro_instructions_);
1324    SingleEmissionCheckScope guard(this);
1325    fmov(vd, index, rn);
1326  }
1327  void Fmov(const Register& rd, const VRegister& vn, int index) {
1328    VIXL_ASSERT(allow_macro_instructions_);
1329    SingleEmissionCheckScope guard(this);
1330    fmov(rd, vn, index);
1331  }
1332
1333  // Provide explicit double and float interfaces for FP immediate moves, rather
1334  // than relying on implicit C++ casts. This allows signalling NaNs to be
1335  // preserved when the immediate matches the format of vd. Most systems convert
1336  // signalling NaNs to quiet NaNs when converting between float and double.
1337  void Fmov(VRegister vd, double imm);
1338  void Fmov(VRegister vd, float imm);
1339  // Provide a template to allow other types to be converted automatically.
1340  template <typename T>
1341  void Fmov(VRegister vd, T imm) {
1342    VIXL_ASSERT(allow_macro_instructions_);
1343    Fmov(vd, static_cast<double>(imm));
1344  }
1345  void Fmov(Register rd, VRegister vn) {
1346    VIXL_ASSERT(allow_macro_instructions_);
1347    VIXL_ASSERT(!rd.IsZero());
1348    SingleEmissionCheckScope guard(this);
1349    fmov(rd, vn);
1350  }
1351  void Fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1352    VIXL_ASSERT(allow_macro_instructions_);
1353    SingleEmissionCheckScope guard(this);
1354    fmul(vd, vn, vm);
1355  }
1356  void Fnmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1357    VIXL_ASSERT(allow_macro_instructions_);
1358    SingleEmissionCheckScope guard(this);
1359    fnmul(vd, vn, vm);
1360  }
1361  void Fmadd(const VRegister& vd,
1362             const VRegister& vn,
1363             const VRegister& vm,
1364             const VRegister& va) {
1365    VIXL_ASSERT(allow_macro_instructions_);
1366    SingleEmissionCheckScope guard(this);
1367    fmadd(vd, vn, vm, va);
1368  }
1369  void Fmsub(const VRegister& vd,
1370             const VRegister& vn,
1371             const VRegister& vm,
1372             const VRegister& va) {
1373    VIXL_ASSERT(allow_macro_instructions_);
1374    SingleEmissionCheckScope guard(this);
1375    fmsub(vd, vn, vm, va);
1376  }
1377  void Fnmadd(const VRegister& vd,
1378              const VRegister& vn,
1379              const VRegister& vm,
1380              const VRegister& va) {
1381    VIXL_ASSERT(allow_macro_instructions_);
1382    SingleEmissionCheckScope guard(this);
1383    fnmadd(vd, vn, vm, va);
1384  }
1385  void Fnmsub(const VRegister& vd,
1386              const VRegister& vn,
1387              const VRegister& vm,
1388              const VRegister& va) {
1389    VIXL_ASSERT(allow_macro_instructions_);
1390    SingleEmissionCheckScope guard(this);
1391    fnmsub(vd, vn, vm, va);
1392  }
1393  void Fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1394    VIXL_ASSERT(allow_macro_instructions_);
1395    SingleEmissionCheckScope guard(this);
1396    fsub(vd, vn, vm);
1397  }
1398  void Hint(SystemHint code) {
1399    VIXL_ASSERT(allow_macro_instructions_);
1400    SingleEmissionCheckScope guard(this);
1401    hint(code);
1402  }
1403  void Hlt(int code) {
1404    VIXL_ASSERT(allow_macro_instructions_);
1405    SingleEmissionCheckScope guard(this);
1406    hlt(code);
1407  }
1408  void Isb() {
1409    VIXL_ASSERT(allow_macro_instructions_);
1410    SingleEmissionCheckScope guard(this);
1411    isb();
1412  }
1413  void Ldar(const Register& rt, const MemOperand& src) {
1414    VIXL_ASSERT(allow_macro_instructions_);
1415    SingleEmissionCheckScope guard(this);
1416    ldar(rt, src);
1417  }
1418  void Ldarb(const Register& rt, const MemOperand& src) {
1419    VIXL_ASSERT(allow_macro_instructions_);
1420    SingleEmissionCheckScope guard(this);
1421    ldarb(rt, src);
1422  }
1423  void Ldarh(const Register& rt, const MemOperand& src) {
1424    VIXL_ASSERT(allow_macro_instructions_);
1425    SingleEmissionCheckScope guard(this);
1426    ldarh(rt, src);
1427  }
1428  void Ldaxp(const Register& rt, const Register& rt2, const MemOperand& src) {
1429    VIXL_ASSERT(allow_macro_instructions_);
1430    VIXL_ASSERT(!rt.Aliases(rt2));
1431    SingleEmissionCheckScope guard(this);
1432    ldaxp(rt, rt2, src);
1433  }
1434  void Ldaxr(const Register& rt, const MemOperand& src) {
1435    VIXL_ASSERT(allow_macro_instructions_);
1436    SingleEmissionCheckScope guard(this);
1437    ldaxr(rt, src);
1438  }
1439  void Ldaxrb(const Register& rt, const MemOperand& src) {
1440    VIXL_ASSERT(allow_macro_instructions_);
1441    SingleEmissionCheckScope guard(this);
1442    ldaxrb(rt, src);
1443  }
1444  void Ldaxrh(const Register& rt, const MemOperand& src) {
1445    VIXL_ASSERT(allow_macro_instructions_);
1446    SingleEmissionCheckScope guard(this);
1447    ldaxrh(rt, src);
1448  }
1449  void Ldnp(const CPURegister& rt,
1450            const CPURegister& rt2,
1451            const MemOperand& src) {
1452    VIXL_ASSERT(allow_macro_instructions_);
1453    SingleEmissionCheckScope guard(this);
1454    ldnp(rt, rt2, src);
1455  }
1456  // Provide both double and float interfaces for FP immediate loads, rather
1457  // than relying on implicit C++ casts. This allows signalling NaNs to be
1458  // preserved when the immediate matches the format of fd. Most systems convert
1459  // signalling NaNs to quiet NaNs when converting between float and double.
1460  void Ldr(const VRegister& vt, double imm) {
1461    VIXL_ASSERT(allow_macro_instructions_);
1462    SingleEmissionCheckScope guard(this);
1463    RawLiteral* literal;
1464    if (vt.IsD()) {
1465      literal = new Literal<double>(imm,
1466                                    &literal_pool_,
1467                                    RawLiteral::kDeletedOnPlacementByPool);
1468    } else {
1469      literal = new Literal<float>(static_cast<float>(imm),
1470                                   &literal_pool_,
1471                                   RawLiteral::kDeletedOnPlacementByPool);
1472    }
1473    ldr(vt, literal);
1474  }
1475  void Ldr(const VRegister& vt, float imm) {
1476    VIXL_ASSERT(allow_macro_instructions_);
1477    SingleEmissionCheckScope guard(this);
1478    RawLiteral* literal;
1479    if (vt.IsS()) {
1480      literal = new Literal<float>(imm,
1481                                   &literal_pool_,
1482                                   RawLiteral::kDeletedOnPlacementByPool);
1483    } else {
1484      literal = new Literal<double>(static_cast<double>(imm),
1485                                    &literal_pool_,
1486                                    RawLiteral::kDeletedOnPlacementByPool);
1487    }
1488    ldr(vt, literal);
1489  }
1490  void Ldr(const VRegister& vt, uint64_t high64, uint64_t low64) {
1491    VIXL_ASSERT(allow_macro_instructions_);
1492    VIXL_ASSERT(vt.IsQ());
1493    SingleEmissionCheckScope guard(this);
1494    ldr(vt,
1495        new Literal<uint64_t>(high64,
1496                              low64,
1497                              &literal_pool_,
1498                              RawLiteral::kDeletedOnPlacementByPool));
1499  }
1500  void Ldr(const Register& rt, uint64_t imm) {
1501    VIXL_ASSERT(allow_macro_instructions_);
1502    VIXL_ASSERT(!rt.IsZero());
1503    SingleEmissionCheckScope guard(this);
1504    RawLiteral* literal;
1505    if (rt.Is64Bits()) {
1506      literal = new Literal<uint64_t>(imm,
1507                                      &literal_pool_,
1508                                      RawLiteral::kDeletedOnPlacementByPool);
1509    } else {
1510      VIXL_ASSERT(rt.Is32Bits());
1511      VIXL_ASSERT(IsUint32(imm) || IsInt32(imm));
1512      literal = new Literal<uint32_t>(static_cast<uint32_t>(imm),
1513                                      &literal_pool_,
1514                                      RawLiteral::kDeletedOnPlacementByPool);
1515    }
1516    ldr(rt, literal);
1517  }
1518  void Ldrsw(const Register& rt, uint32_t imm) {
1519    VIXL_ASSERT(allow_macro_instructions_);
1520    VIXL_ASSERT(!rt.IsZero());
1521    SingleEmissionCheckScope guard(this);
1522    ldrsw(rt,
1523          new Literal<uint32_t>(imm,
1524                                &literal_pool_,
1525                                RawLiteral::kDeletedOnPlacementByPool));
1526  }
1527  void Ldr(const CPURegister& rt, RawLiteral* literal) {
1528    VIXL_ASSERT(allow_macro_instructions_);
1529    SingleEmissionCheckScope guard(this);
1530    ldr(rt, literal);
1531  }
1532  void Ldrsw(const Register& rt, RawLiteral* literal) {
1533    VIXL_ASSERT(allow_macro_instructions_);
1534    SingleEmissionCheckScope guard(this);
1535    ldrsw(rt, literal);
1536  }
1537  void Ldxp(const Register& rt, const Register& rt2, const MemOperand& src) {
1538    VIXL_ASSERT(allow_macro_instructions_);
1539    VIXL_ASSERT(!rt.Aliases(rt2));
1540    SingleEmissionCheckScope guard(this);
1541    ldxp(rt, rt2, src);
1542  }
1543  void Ldxr(const Register& rt, const MemOperand& src) {
1544    VIXL_ASSERT(allow_macro_instructions_);
1545    SingleEmissionCheckScope guard(this);
1546    ldxr(rt, src);
1547  }
1548  void Ldxrb(const Register& rt, const MemOperand& src) {
1549    VIXL_ASSERT(allow_macro_instructions_);
1550    SingleEmissionCheckScope guard(this);
1551    ldxrb(rt, src);
1552  }
1553  void Ldxrh(const Register& rt, const MemOperand& src) {
1554    VIXL_ASSERT(allow_macro_instructions_);
1555    SingleEmissionCheckScope guard(this);
1556    ldxrh(rt, src);
1557  }
1558  void Lsl(const Register& rd, const Register& rn, unsigned shift) {
1559    VIXL_ASSERT(allow_macro_instructions_);
1560    VIXL_ASSERT(!rd.IsZero());
1561    VIXL_ASSERT(!rn.IsZero());
1562    SingleEmissionCheckScope guard(this);
1563    lsl(rd, rn, shift);
1564  }
1565  void Lsl(const Register& rd, const Register& rn, const Register& rm) {
1566    VIXL_ASSERT(allow_macro_instructions_);
1567    VIXL_ASSERT(!rd.IsZero());
1568    VIXL_ASSERT(!rn.IsZero());
1569    VIXL_ASSERT(!rm.IsZero());
1570    SingleEmissionCheckScope guard(this);
1571    lslv(rd, rn, rm);
1572  }
1573  void Lsr(const Register& rd, const Register& rn, unsigned shift) {
1574    VIXL_ASSERT(allow_macro_instructions_);
1575    VIXL_ASSERT(!rd.IsZero());
1576    VIXL_ASSERT(!rn.IsZero());
1577    SingleEmissionCheckScope guard(this);
1578    lsr(rd, rn, shift);
1579  }
1580  void Lsr(const Register& rd, const Register& rn, const Register& rm) {
1581    VIXL_ASSERT(allow_macro_instructions_);
1582    VIXL_ASSERT(!rd.IsZero());
1583    VIXL_ASSERT(!rn.IsZero());
1584    VIXL_ASSERT(!rm.IsZero());
1585    SingleEmissionCheckScope guard(this);
1586    lsrv(rd, rn, rm);
1587  }
1588  void Madd(const Register& rd,
1589            const Register& rn,
1590            const Register& rm,
1591            const Register& ra) {
1592    VIXL_ASSERT(allow_macro_instructions_);
1593    VIXL_ASSERT(!rd.IsZero());
1594    VIXL_ASSERT(!rn.IsZero());
1595    VIXL_ASSERT(!rm.IsZero());
1596    VIXL_ASSERT(!ra.IsZero());
1597    SingleEmissionCheckScope guard(this);
1598    madd(rd, rn, rm, ra);
1599  }
1600  void Mneg(const Register& rd, const Register& rn, const Register& rm) {
1601    VIXL_ASSERT(allow_macro_instructions_);
1602    VIXL_ASSERT(!rd.IsZero());
1603    VIXL_ASSERT(!rn.IsZero());
1604    VIXL_ASSERT(!rm.IsZero());
1605    SingleEmissionCheckScope guard(this);
1606    mneg(rd, rn, rm);
1607  }
1608  void Mov(const Register& rd,
1609           const Register& rn,
1610           DiscardMoveMode discard_mode = kDontDiscardForSameWReg) {
1611    VIXL_ASSERT(allow_macro_instructions_);
1612    // Emit a register move only if the registers are distinct, or if they are
1613    // not X registers.
1614    //
1615    // Note that mov(w0, w0) is not a no-op because it clears the top word of
1616    // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
1617    // registers is not required to clear the top word of the X register. In
1618    // this case, the instruction is discarded.
1619    //
1620    // If the sp is an operand, add #0 is emitted, otherwise, orr #0.
1621    if (!rd.Is(rn) ||
1622        (rd.Is32Bits() && (discard_mode == kDontDiscardForSameWReg))) {
1623      SingleEmissionCheckScope guard(this);
1624      mov(rd, rn);
1625    }
1626  }
1627  void Movk(const Register& rd, uint64_t imm, int shift = -1) {
1628    VIXL_ASSERT(allow_macro_instructions_);
1629    VIXL_ASSERT(!rd.IsZero());
1630    SingleEmissionCheckScope guard(this);
1631    movk(rd, imm, shift);
1632  }
1633  void Mrs(const Register& rt, SystemRegister sysreg) {
1634    VIXL_ASSERT(allow_macro_instructions_);
1635    VIXL_ASSERT(!rt.IsZero());
1636    SingleEmissionCheckScope guard(this);
1637    mrs(rt, sysreg);
1638  }
1639  void Msr(SystemRegister sysreg, const Register& rt) {
1640    VIXL_ASSERT(allow_macro_instructions_);
1641    VIXL_ASSERT(!rt.IsZero());
1642    SingleEmissionCheckScope guard(this);
1643    msr(sysreg, rt);
1644  }
1645  void Sys(int op1, int crn, int crm, int op2, const Register& rt = xzr) {
1646    VIXL_ASSERT(allow_macro_instructions_);
1647    SingleEmissionCheckScope guard(this);
1648    sys(op1, crn, crm, op2, rt);
1649  }
1650  void Dc(DataCacheOp op, const Register& rt) {
1651    VIXL_ASSERT(allow_macro_instructions_);
1652    SingleEmissionCheckScope guard(this);
1653    dc(op, rt);
1654  }
1655  void Ic(InstructionCacheOp op, const Register& rt) {
1656    VIXL_ASSERT(allow_macro_instructions_);
1657    SingleEmissionCheckScope guard(this);
1658    ic(op, rt);
1659  }
1660  void Msub(const Register& rd,
1661            const Register& rn,
1662            const Register& rm,
1663            const Register& ra) {
1664    VIXL_ASSERT(allow_macro_instructions_);
1665    VIXL_ASSERT(!rd.IsZero());
1666    VIXL_ASSERT(!rn.IsZero());
1667    VIXL_ASSERT(!rm.IsZero());
1668    VIXL_ASSERT(!ra.IsZero());
1669    SingleEmissionCheckScope guard(this);
1670    msub(rd, rn, rm, ra);
1671  }
1672  void Mul(const Register& rd, const Register& rn, const Register& rm) {
1673    VIXL_ASSERT(allow_macro_instructions_);
1674    VIXL_ASSERT(!rd.IsZero());
1675    VIXL_ASSERT(!rn.IsZero());
1676    VIXL_ASSERT(!rm.IsZero());
1677    SingleEmissionCheckScope guard(this);
1678    mul(rd, rn, rm);
1679  }
1680  void Nop() {
1681    VIXL_ASSERT(allow_macro_instructions_);
1682    SingleEmissionCheckScope guard(this);
1683    nop();
1684  }
1685  void Rbit(const Register& rd, const Register& rn) {
1686    VIXL_ASSERT(allow_macro_instructions_);
1687    VIXL_ASSERT(!rd.IsZero());
1688    VIXL_ASSERT(!rn.IsZero());
1689    SingleEmissionCheckScope guard(this);
1690    rbit(rd, rn);
1691  }
1692  void Ret(const Register& xn = lr) {
1693    VIXL_ASSERT(allow_macro_instructions_);
1694    VIXL_ASSERT(!xn.IsZero());
1695    SingleEmissionCheckScope guard(this);
1696    ret(xn);
1697  }
1698  void Rev(const Register& rd, const Register& rn) {
1699    VIXL_ASSERT(allow_macro_instructions_);
1700    VIXL_ASSERT(!rd.IsZero());
1701    VIXL_ASSERT(!rn.IsZero());
1702    SingleEmissionCheckScope guard(this);
1703    rev(rd, rn);
1704  }
1705  void Rev16(const Register& rd, const Register& rn) {
1706    VIXL_ASSERT(allow_macro_instructions_);
1707    VIXL_ASSERT(!rd.IsZero());
1708    VIXL_ASSERT(!rn.IsZero());
1709    SingleEmissionCheckScope guard(this);
1710    rev16(rd, rn);
1711  }
1712  void Rev32(const Register& rd, const Register& rn) {
1713    VIXL_ASSERT(allow_macro_instructions_);
1714    VIXL_ASSERT(!rd.IsZero());
1715    VIXL_ASSERT(!rn.IsZero());
1716    SingleEmissionCheckScope guard(this);
1717    rev32(rd, rn);
1718  }
1719  void Ror(const Register& rd, const Register& rs, unsigned shift) {
1720    VIXL_ASSERT(allow_macro_instructions_);
1721    VIXL_ASSERT(!rd.IsZero());
1722    VIXL_ASSERT(!rs.IsZero());
1723    SingleEmissionCheckScope guard(this);
1724    ror(rd, rs, shift);
1725  }
1726  void Ror(const Register& rd, const Register& rn, const Register& rm) {
1727    VIXL_ASSERT(allow_macro_instructions_);
1728    VIXL_ASSERT(!rd.IsZero());
1729    VIXL_ASSERT(!rn.IsZero());
1730    VIXL_ASSERT(!rm.IsZero());
1731    SingleEmissionCheckScope guard(this);
1732    rorv(rd, rn, rm);
1733  }
1734  void Sbfiz(const Register& rd,
1735             const Register& rn,
1736             unsigned lsb,
1737             unsigned width) {
1738    VIXL_ASSERT(allow_macro_instructions_);
1739    VIXL_ASSERT(!rd.IsZero());
1740    VIXL_ASSERT(!rn.IsZero());
1741    SingleEmissionCheckScope guard(this);
1742    sbfiz(rd, rn, lsb, width);
1743  }
1744  void Sbfm(const Register& rd,
1745            const Register& rn,
1746            unsigned immr,
1747            unsigned imms) {
1748    VIXL_ASSERT(allow_macro_instructions_);
1749    VIXL_ASSERT(!rd.IsZero());
1750    VIXL_ASSERT(!rn.IsZero());
1751    SingleEmissionCheckScope guard(this);
1752    sbfm(rd, rn, immr, imms);
1753  }
1754  void Sbfx(const Register& rd,
1755            const Register& rn,
1756            unsigned lsb,
1757            unsigned width) {
1758    VIXL_ASSERT(allow_macro_instructions_);
1759    VIXL_ASSERT(!rd.IsZero());
1760    VIXL_ASSERT(!rn.IsZero());
1761    SingleEmissionCheckScope guard(this);
1762    sbfx(rd, rn, lsb, width);
1763  }
1764  void Scvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
1765    VIXL_ASSERT(allow_macro_instructions_);
1766    VIXL_ASSERT(!rn.IsZero());
1767    SingleEmissionCheckScope guard(this);
1768    scvtf(vd, rn, fbits);
1769  }
1770  void Sdiv(const Register& rd, const Register& rn, const Register& rm) {
1771    VIXL_ASSERT(allow_macro_instructions_);
1772    VIXL_ASSERT(!rd.IsZero());
1773    VIXL_ASSERT(!rn.IsZero());
1774    VIXL_ASSERT(!rm.IsZero());
1775    SingleEmissionCheckScope guard(this);
1776    sdiv(rd, rn, rm);
1777  }
1778  void Smaddl(const Register& rd,
1779              const Register& rn,
1780              const Register& rm,
1781              const Register& ra) {
1782    VIXL_ASSERT(allow_macro_instructions_);
1783    VIXL_ASSERT(!rd.IsZero());
1784    VIXL_ASSERT(!rn.IsZero());
1785    VIXL_ASSERT(!rm.IsZero());
1786    VIXL_ASSERT(!ra.IsZero());
1787    SingleEmissionCheckScope guard(this);
1788    smaddl(rd, rn, rm, ra);
1789  }
1790  void Smsubl(const Register& rd,
1791              const Register& rn,
1792              const Register& rm,
1793              const Register& ra) {
1794    VIXL_ASSERT(allow_macro_instructions_);
1795    VIXL_ASSERT(!rd.IsZero());
1796    VIXL_ASSERT(!rn.IsZero());
1797    VIXL_ASSERT(!rm.IsZero());
1798    VIXL_ASSERT(!ra.IsZero());
1799    SingleEmissionCheckScope guard(this);
1800    smsubl(rd, rn, rm, ra);
1801  }
1802  void Smull(const Register& rd, const Register& rn, const Register& rm) {
1803    VIXL_ASSERT(allow_macro_instructions_);
1804    VIXL_ASSERT(!rd.IsZero());
1805    VIXL_ASSERT(!rn.IsZero());
1806    VIXL_ASSERT(!rm.IsZero());
1807    SingleEmissionCheckScope guard(this);
1808    smull(rd, rn, rm);
1809  }
1810  void Smulh(const Register& xd, const Register& xn, const Register& xm) {
1811    VIXL_ASSERT(allow_macro_instructions_);
1812    VIXL_ASSERT(!xd.IsZero());
1813    VIXL_ASSERT(!xn.IsZero());
1814    VIXL_ASSERT(!xm.IsZero());
1815    SingleEmissionCheckScope guard(this);
1816    smulh(xd, xn, xm);
1817  }
1818  void Stlr(const Register& rt, const MemOperand& dst) {
1819    VIXL_ASSERT(allow_macro_instructions_);
1820    SingleEmissionCheckScope guard(this);
1821    stlr(rt, dst);
1822  }
1823  void Stlrb(const Register& rt, const MemOperand& dst) {
1824    VIXL_ASSERT(allow_macro_instructions_);
1825    SingleEmissionCheckScope guard(this);
1826    stlrb(rt, dst);
1827  }
1828  void Stlrh(const Register& rt, const MemOperand& dst) {
1829    VIXL_ASSERT(allow_macro_instructions_);
1830    SingleEmissionCheckScope guard(this);
1831    stlrh(rt, dst);
1832  }
1833  void Stlxp(const Register& rs,
1834             const Register& rt,
1835             const Register& rt2,
1836             const MemOperand& dst) {
1837    VIXL_ASSERT(allow_macro_instructions_);
1838    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1839    VIXL_ASSERT(!rs.Aliases(rt));
1840    VIXL_ASSERT(!rs.Aliases(rt2));
1841    SingleEmissionCheckScope guard(this);
1842    stlxp(rs, rt, rt2, dst);
1843  }
1844  void Stlxr(const Register& rs, const Register& rt, const MemOperand& dst) {
1845    VIXL_ASSERT(allow_macro_instructions_);
1846    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1847    VIXL_ASSERT(!rs.Aliases(rt));
1848    SingleEmissionCheckScope guard(this);
1849    stlxr(rs, rt, dst);
1850  }
1851  void Stlxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
1852    VIXL_ASSERT(allow_macro_instructions_);
1853    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1854    VIXL_ASSERT(!rs.Aliases(rt));
1855    SingleEmissionCheckScope guard(this);
1856    stlxrb(rs, rt, dst);
1857  }
1858  void Stlxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
1859    VIXL_ASSERT(allow_macro_instructions_);
1860    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1861    VIXL_ASSERT(!rs.Aliases(rt));
1862    SingleEmissionCheckScope guard(this);
1863    stlxrh(rs, rt, dst);
1864  }
1865  void Stnp(const CPURegister& rt,
1866            const CPURegister& rt2,
1867            const MemOperand& dst) {
1868    VIXL_ASSERT(allow_macro_instructions_);
1869    SingleEmissionCheckScope guard(this);
1870    stnp(rt, rt2, dst);
1871  }
1872  void Stxp(const Register& rs,
1873            const Register& rt,
1874            const Register& rt2,
1875            const MemOperand& dst) {
1876    VIXL_ASSERT(allow_macro_instructions_);
1877    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1878    VIXL_ASSERT(!rs.Aliases(rt));
1879    VIXL_ASSERT(!rs.Aliases(rt2));
1880    SingleEmissionCheckScope guard(this);
1881    stxp(rs, rt, rt2, dst);
1882  }
1883  void Stxr(const Register& rs, const Register& rt, const MemOperand& dst) {
1884    VIXL_ASSERT(allow_macro_instructions_);
1885    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1886    VIXL_ASSERT(!rs.Aliases(rt));
1887    SingleEmissionCheckScope guard(this);
1888    stxr(rs, rt, dst);
1889  }
1890  void Stxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
1891    VIXL_ASSERT(allow_macro_instructions_);
1892    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1893    VIXL_ASSERT(!rs.Aliases(rt));
1894    SingleEmissionCheckScope guard(this);
1895    stxrb(rs, rt, dst);
1896  }
1897  void Stxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
1898    VIXL_ASSERT(allow_macro_instructions_);
1899    VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1900    VIXL_ASSERT(!rs.Aliases(rt));
1901    SingleEmissionCheckScope guard(this);
1902    stxrh(rs, rt, dst);
1903  }
1904  void Svc(int code) {
1905    VIXL_ASSERT(allow_macro_instructions_);
1906    SingleEmissionCheckScope guard(this);
1907    svc(code);
1908  }
1909  void Sxtb(const Register& rd, const Register& rn) {
1910    VIXL_ASSERT(allow_macro_instructions_);
1911    VIXL_ASSERT(!rd.IsZero());
1912    VIXL_ASSERT(!rn.IsZero());
1913    SingleEmissionCheckScope guard(this);
1914    sxtb(rd, rn);
1915  }
1916  void Sxth(const Register& rd, const Register& rn) {
1917    VIXL_ASSERT(allow_macro_instructions_);
1918    VIXL_ASSERT(!rd.IsZero());
1919    VIXL_ASSERT(!rn.IsZero());
1920    SingleEmissionCheckScope guard(this);
1921    sxth(rd, rn);
1922  }
1923  void Sxtw(const Register& rd, const Register& rn) {
1924    VIXL_ASSERT(allow_macro_instructions_);
1925    VIXL_ASSERT(!rd.IsZero());
1926    VIXL_ASSERT(!rn.IsZero());
1927    SingleEmissionCheckScope guard(this);
1928    sxtw(rd, rn);
1929  }
1930  void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1931    VIXL_ASSERT(allow_macro_instructions_);
1932    SingleEmissionCheckScope guard(this);
1933    tbl(vd, vn, vm);
1934  }
1935  void Tbl(const VRegister& vd,
1936           const VRegister& vn,
1937           const VRegister& vn2,
1938           const VRegister& vm) {
1939    VIXL_ASSERT(allow_macro_instructions_);
1940    SingleEmissionCheckScope guard(this);
1941    tbl(vd, vn, vn2, vm);
1942  }
1943  void Tbl(const VRegister& vd,
1944           const VRegister& vn,
1945           const VRegister& vn2,
1946           const VRegister& vn3,
1947           const VRegister& vm) {
1948    VIXL_ASSERT(allow_macro_instructions_);
1949    SingleEmissionCheckScope guard(this);
1950    tbl(vd, vn, vn2, vn3, vm);
1951  }
1952  void Tbl(const VRegister& vd,
1953           const VRegister& vn,
1954           const VRegister& vn2,
1955           const VRegister& vn3,
1956           const VRegister& vn4,
1957           const VRegister& vm) {
1958    VIXL_ASSERT(allow_macro_instructions_);
1959    SingleEmissionCheckScope guard(this);
1960    tbl(vd, vn, vn2, vn3, vn4, vm);
1961  }
1962  void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1963    VIXL_ASSERT(allow_macro_instructions_);
1964    SingleEmissionCheckScope guard(this);
1965    tbx(vd, vn, vm);
1966  }
1967  void Tbx(const VRegister& vd,
1968           const VRegister& vn,
1969           const VRegister& vn2,
1970           const VRegister& vm) {
1971    VIXL_ASSERT(allow_macro_instructions_);
1972    SingleEmissionCheckScope guard(this);
1973    tbx(vd, vn, vn2, vm);
1974  }
1975  void Tbx(const VRegister& vd,
1976           const VRegister& vn,
1977           const VRegister& vn2,
1978           const VRegister& vn3,
1979           const VRegister& vm) {
1980    VIXL_ASSERT(allow_macro_instructions_);
1981    SingleEmissionCheckScope guard(this);
1982    tbx(vd, vn, vn2, vn3, vm);
1983  }
1984  void Tbx(const VRegister& vd,
1985           const VRegister& vn,
1986           const VRegister& vn2,
1987           const VRegister& vn3,
1988           const VRegister& vn4,
1989           const VRegister& vm) {
1990    VIXL_ASSERT(allow_macro_instructions_);
1991    SingleEmissionCheckScope guard(this);
1992    tbx(vd, vn, vn2, vn3, vn4, vm);
1993  }
1994  void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
1995  void Tbz(const Register& rt, unsigned bit_pos, Label* label);
1996  void Ubfiz(const Register& rd,
1997             const Register& rn,
1998             unsigned lsb,
1999             unsigned width) {
2000    VIXL_ASSERT(allow_macro_instructions_);
2001    VIXL_ASSERT(!rd.IsZero());
2002    VIXL_ASSERT(!rn.IsZero());
2003    SingleEmissionCheckScope guard(this);
2004    ubfiz(rd, rn, lsb, width);
2005  }
2006  void Ubfm(const Register& rd,
2007            const Register& rn,
2008            unsigned immr,
2009            unsigned imms) {
2010    VIXL_ASSERT(allow_macro_instructions_);
2011    VIXL_ASSERT(!rd.IsZero());
2012    VIXL_ASSERT(!rn.IsZero());
2013    SingleEmissionCheckScope guard(this);
2014    ubfm(rd, rn, immr, imms);
2015  }
2016  void Ubfx(const Register& rd,
2017            const Register& rn,
2018            unsigned lsb,
2019            unsigned width) {
2020    VIXL_ASSERT(allow_macro_instructions_);
2021    VIXL_ASSERT(!rd.IsZero());
2022    VIXL_ASSERT(!rn.IsZero());
2023    SingleEmissionCheckScope guard(this);
2024    ubfx(rd, rn, lsb, width);
2025  }
2026  void Ucvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
2027    VIXL_ASSERT(allow_macro_instructions_);
2028    VIXL_ASSERT(!rn.IsZero());
2029    SingleEmissionCheckScope guard(this);
2030    ucvtf(vd, rn, fbits);
2031  }
2032  void Udiv(const Register& rd, const Register& rn, const Register& rm) {
2033    VIXL_ASSERT(allow_macro_instructions_);
2034    VIXL_ASSERT(!rd.IsZero());
2035    VIXL_ASSERT(!rn.IsZero());
2036    VIXL_ASSERT(!rm.IsZero());
2037    SingleEmissionCheckScope guard(this);
2038    udiv(rd, rn, rm);
2039  }
2040  void Umaddl(const Register& rd,
2041              const Register& rn,
2042              const Register& rm,
2043              const Register& ra) {
2044    VIXL_ASSERT(allow_macro_instructions_);
2045    VIXL_ASSERT(!rd.IsZero());
2046    VIXL_ASSERT(!rn.IsZero());
2047    VIXL_ASSERT(!rm.IsZero());
2048    VIXL_ASSERT(!ra.IsZero());
2049    SingleEmissionCheckScope guard(this);
2050    umaddl(rd, rn, rm, ra);
2051  }
2052  void Umull(const Register& rd, const Register& rn, const Register& rm) {
2053    VIXL_ASSERT(allow_macro_instructions_);
2054    VIXL_ASSERT(!rd.IsZero());
2055    VIXL_ASSERT(!rn.IsZero());
2056    VIXL_ASSERT(!rm.IsZero());
2057    SingleEmissionCheckScope guard(this);
2058    umull(rd, rn, rm);
2059  }
2060  void Umulh(const Register& xd, const Register& xn, const Register& xm) {
2061    VIXL_ASSERT(allow_macro_instructions_);
2062    VIXL_ASSERT(!xd.IsZero());
2063    VIXL_ASSERT(!xn.IsZero());
2064    VIXL_ASSERT(!xm.IsZero());
2065    SingleEmissionCheckScope guard(this);
2066    umulh(xd, xn, xm);
2067  }
2068  void Umsubl(const Register& rd,
2069              const Register& rn,
2070              const Register& rm,
2071              const Register& ra) {
2072    VIXL_ASSERT(allow_macro_instructions_);
2073    VIXL_ASSERT(!rd.IsZero());
2074    VIXL_ASSERT(!rn.IsZero());
2075    VIXL_ASSERT(!rm.IsZero());
2076    VIXL_ASSERT(!ra.IsZero());
2077    SingleEmissionCheckScope guard(this);
2078    umsubl(rd, rn, rm, ra);
2079  }
2080  void Unreachable() {
2081    VIXL_ASSERT(allow_macro_instructions_);
2082    SingleEmissionCheckScope guard(this);
2083    if (generate_simulator_code_) {
2084      hlt(kUnreachableOpcode);
2085    } else {
2086      // Branch to 0 to generate a segfault.
2087      // lr - kInstructionSize is the address of the offending instruction.
2088      blr(xzr);
2089    }
2090  }
2091  void Uxtb(const Register& rd, const Register& rn) {
2092    VIXL_ASSERT(allow_macro_instructions_);
2093    VIXL_ASSERT(!rd.IsZero());
2094    VIXL_ASSERT(!rn.IsZero());
2095    SingleEmissionCheckScope guard(this);
2096    uxtb(rd, rn);
2097  }
2098  void Uxth(const Register& rd, const Register& rn) {
2099    VIXL_ASSERT(allow_macro_instructions_);
2100    VIXL_ASSERT(!rd.IsZero());
2101    VIXL_ASSERT(!rn.IsZero());
2102    SingleEmissionCheckScope guard(this);
2103    uxth(rd, rn);
2104  }
2105  void Uxtw(const Register& rd, const Register& rn) {
2106    VIXL_ASSERT(allow_macro_instructions_);
2107    VIXL_ASSERT(!rd.IsZero());
2108    VIXL_ASSERT(!rn.IsZero());
2109    SingleEmissionCheckScope guard(this);
2110    uxtw(rd, rn);
2111  }
2112
2113// NEON 3 vector register instructions.
2114#define NEON_3VREG_MACRO_LIST(V) \
2115  V(add, Add)                    \
2116  V(addhn, Addhn)                \
2117  V(addhn2, Addhn2)              \
2118  V(addp, Addp)                  \
2119  V(and_, And)                   \
2120  V(bic, Bic)                    \
2121  V(bif, Bif)                    \
2122  V(bit, Bit)                    \
2123  V(bsl, Bsl)                    \
2124  V(cmeq, Cmeq)                  \
2125  V(cmge, Cmge)                  \
2126  V(cmgt, Cmgt)                  \
2127  V(cmhi, Cmhi)                  \
2128  V(cmhs, Cmhs)                  \
2129  V(cmtst, Cmtst)                \
2130  V(eor, Eor)                    \
2131  V(fabd, Fabd)                  \
2132  V(facge, Facge)                \
2133  V(facgt, Facgt)                \
2134  V(faddp, Faddp)                \
2135  V(fcmeq, Fcmeq)                \
2136  V(fcmge, Fcmge)                \
2137  V(fcmgt, Fcmgt)                \
2138  V(fmaxnmp, Fmaxnmp)            \
2139  V(fmaxp, Fmaxp)                \
2140  V(fminnmp, Fminnmp)            \
2141  V(fminp, Fminp)                \
2142  V(fmla, Fmla)                  \
2143  V(fmls, Fmls)                  \
2144  V(fmulx, Fmulx)                \
2145  V(frecps, Frecps)              \
2146  V(frsqrts, Frsqrts)            \
2147  V(mla, Mla)                    \
2148  V(mls, Mls)                    \
2149  V(mul, Mul)                    \
2150  V(orn, Orn)                    \
2151  V(orr, Orr)                    \
2152  V(pmul, Pmul)                  \
2153  V(pmull, Pmull)                \
2154  V(pmull2, Pmull2)              \
2155  V(raddhn, Raddhn)              \
2156  V(raddhn2, Raddhn2)            \
2157  V(rsubhn, Rsubhn)              \
2158  V(rsubhn2, Rsubhn2)            \
2159  V(saba, Saba)                  \
2160  V(sabal, Sabal)                \
2161  V(sabal2, Sabal2)              \
2162  V(sabd, Sabd)                  \
2163  V(sabdl, Sabdl)                \
2164  V(sabdl2, Sabdl2)              \
2165  V(saddl, Saddl)                \
2166  V(saddl2, Saddl2)              \
2167  V(saddw, Saddw)                \
2168  V(saddw2, Saddw2)              \
2169  V(shadd, Shadd)                \
2170  V(shsub, Shsub)                \
2171  V(smax, Smax)                  \
2172  V(smaxp, Smaxp)                \
2173  V(smin, Smin)                  \
2174  V(sminp, Sminp)                \
2175  V(smlal, Smlal)                \
2176  V(smlal2, Smlal2)              \
2177  V(smlsl, Smlsl)                \
2178  V(smlsl2, Smlsl2)              \
2179  V(smull, Smull)                \
2180  V(smull2, Smull2)              \
2181  V(sqadd, Sqadd)                \
2182  V(sqdmlal, Sqdmlal)            \
2183  V(sqdmlal2, Sqdmlal2)          \
2184  V(sqdmlsl, Sqdmlsl)            \
2185  V(sqdmlsl2, Sqdmlsl2)          \
2186  V(sqdmulh, Sqdmulh)            \
2187  V(sqdmull, Sqdmull)            \
2188  V(sqdmull2, Sqdmull2)          \
2189  V(sqrdmulh, Sqrdmulh)          \
2190  V(sqrshl, Sqrshl)              \
2191  V(sqshl, Sqshl)                \
2192  V(sqsub, Sqsub)                \
2193  V(srhadd, Srhadd)              \
2194  V(srshl, Srshl)                \
2195  V(sshl, Sshl)                  \
2196  V(ssubl, Ssubl)                \
2197  V(ssubl2, Ssubl2)              \
2198  V(ssubw, Ssubw)                \
2199  V(ssubw2, Ssubw2)              \
2200  V(sub, Sub)                    \
2201  V(subhn, Subhn)                \
2202  V(subhn2, Subhn2)              \
2203  V(trn1, Trn1)                  \
2204  V(trn2, Trn2)                  \
2205  V(uaba, Uaba)                  \
2206  V(uabal, Uabal)                \
2207  V(uabal2, Uabal2)              \
2208  V(uabd, Uabd)                  \
2209  V(uabdl, Uabdl)                \
2210  V(uabdl2, Uabdl2)              \
2211  V(uaddl, Uaddl)                \
2212  V(uaddl2, Uaddl2)              \
2213  V(uaddw, Uaddw)                \
2214  V(uaddw2, Uaddw2)              \
2215  V(uhadd, Uhadd)                \
2216  V(uhsub, Uhsub)                \
2217  V(umax, Umax)                  \
2218  V(umaxp, Umaxp)                \
2219  V(umin, Umin)                  \
2220  V(uminp, Uminp)                \
2221  V(umlal, Umlal)                \
2222  V(umlal2, Umlal2)              \
2223  V(umlsl, Umlsl)                \
2224  V(umlsl2, Umlsl2)              \
2225  V(umull, Umull)                \
2226  V(umull2, Umull2)              \
2227  V(uqadd, Uqadd)                \
2228  V(uqrshl, Uqrshl)              \
2229  V(uqshl, Uqshl)                \
2230  V(uqsub, Uqsub)                \
2231  V(urhadd, Urhadd)              \
2232  V(urshl, Urshl)                \
2233  V(ushl, Ushl)                  \
2234  V(usubl, Usubl)                \
2235  V(usubl2, Usubl2)              \
2236  V(usubw, Usubw)                \
2237  V(usubw2, Usubw2)              \
2238  V(uzp1, Uzp1)                  \
2239  V(uzp2, Uzp2)                  \
2240  V(zip1, Zip1)                  \
2241  V(zip2, Zip2)
2242
2243#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                                     \
2244  void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
2245    VIXL_ASSERT(allow_macro_instructions_);                                  \
2246    SingleEmissionCheckScope guard(this);                                    \
2247    ASM(vd, vn, vm);                                                         \
2248  }
2249  NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2250#undef DEFINE_MACRO_ASM_FUNC
2251
2252// NEON 2 vector register instructions.
2253#define NEON_2VREG_MACRO_LIST(V) \
2254  V(abs, Abs)                    \
2255  V(addp, Addp)                  \
2256  V(addv, Addv)                  \
2257  V(cls, Cls)                    \
2258  V(clz, Clz)                    \
2259  V(cnt, Cnt)                    \
2260  V(fabs, Fabs)                  \
2261  V(faddp, Faddp)                \
2262  V(fcvtas, Fcvtas)              \
2263  V(fcvtau, Fcvtau)              \
2264  V(fcvtms, Fcvtms)              \
2265  V(fcvtmu, Fcvtmu)              \
2266  V(fcvtns, Fcvtns)              \
2267  V(fcvtnu, Fcvtnu)              \
2268  V(fcvtps, Fcvtps)              \
2269  V(fcvtpu, Fcvtpu)              \
2270  V(fmaxnmp, Fmaxnmp)            \
2271  V(fmaxnmv, Fmaxnmv)            \
2272  V(fmaxp, Fmaxp)                \
2273  V(fmaxv, Fmaxv)                \
2274  V(fminnmp, Fminnmp)            \
2275  V(fminnmv, Fminnmv)            \
2276  V(fminp, Fminp)                \
2277  V(fminv, Fminv)                \
2278  V(fneg, Fneg)                  \
2279  V(frecpe, Frecpe)              \
2280  V(frecpx, Frecpx)              \
2281  V(frinta, Frinta)              \
2282  V(frinti, Frinti)              \
2283  V(frintm, Frintm)              \
2284  V(frintn, Frintn)              \
2285  V(frintp, Frintp)              \
2286  V(frintx, Frintx)              \
2287  V(frintz, Frintz)              \
2288  V(frsqrte, Frsqrte)            \
2289  V(fsqrt, Fsqrt)                \
2290  V(mov, Mov)                    \
2291  V(mvn, Mvn)                    \
2292  V(neg, Neg)                    \
2293  V(not_, Not)                   \
2294  V(rbit, Rbit)                  \
2295  V(rev16, Rev16)                \
2296  V(rev32, Rev32)                \
2297  V(rev64, Rev64)                \
2298  V(sadalp, Sadalp)              \
2299  V(saddlp, Saddlp)              \
2300  V(saddlv, Saddlv)              \
2301  V(smaxv, Smaxv)                \
2302  V(sminv, Sminv)                \
2303  V(sqabs, Sqabs)                \
2304  V(sqneg, Sqneg)                \
2305  V(sqxtn, Sqxtn)                \
2306  V(sqxtn2, Sqxtn2)              \
2307  V(sqxtun, Sqxtun)              \
2308  V(sqxtun2, Sqxtun2)            \
2309  V(suqadd, Suqadd)              \
2310  V(sxtl, Sxtl)                  \
2311  V(sxtl2, Sxtl2)                \
2312  V(uadalp, Uadalp)              \
2313  V(uaddlp, Uaddlp)              \
2314  V(uaddlv, Uaddlv)              \
2315  V(umaxv, Umaxv)                \
2316  V(uminv, Uminv)                \
2317  V(uqxtn, Uqxtn)                \
2318  V(uqxtn2, Uqxtn2)              \
2319  V(urecpe, Urecpe)              \
2320  V(ursqrte, Ursqrte)            \
2321  V(usqadd, Usqadd)              \
2322  V(uxtl, Uxtl)                  \
2323  V(uxtl2, Uxtl2)                \
2324  V(xtn, Xtn)                    \
2325  V(xtn2, Xtn2)
2326
2327#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                \
2328  void MASM(const VRegister& vd, const VRegister& vn) { \
2329    VIXL_ASSERT(allow_macro_instructions_);             \
2330    SingleEmissionCheckScope guard(this);               \
2331    ASM(vd, vn);                                        \
2332  }
2333  NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2334#undef DEFINE_MACRO_ASM_FUNC
2335
2336// NEON 2 vector register with immediate instructions.
2337#define NEON_2VREG_FPIMM_MACRO_LIST(V) \
2338  V(fcmeq, Fcmeq)                      \
2339  V(fcmge, Fcmge)                      \
2340  V(fcmgt, Fcmgt)                      \
2341  V(fcmle, Fcmle)                      \
2342  V(fcmlt, Fcmlt)
2343
2344#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                            \
2345  void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
2346    VIXL_ASSERT(allow_macro_instructions_);                         \
2347    SingleEmissionCheckScope guard(this);                           \
2348    ASM(vd, vn, imm);                                               \
2349  }
2350  NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2351#undef DEFINE_MACRO_ASM_FUNC
2352
2353// NEON by element instructions.
2354#define NEON_BYELEMENT_MACRO_LIST(V) \
2355  V(fmul, Fmul)                      \
2356  V(fmla, Fmla)                      \
2357  V(fmls, Fmls)                      \
2358  V(fmulx, Fmulx)                    \
2359  V(mul, Mul)                        \
2360  V(mla, Mla)                        \
2361  V(mls, Mls)                        \
2362  V(sqdmulh, Sqdmulh)                \
2363  V(sqrdmulh, Sqrdmulh)              \
2364  V(sqdmull, Sqdmull)                \
2365  V(sqdmull2, Sqdmull2)              \
2366  V(sqdmlal, Sqdmlal)                \
2367  V(sqdmlal2, Sqdmlal2)              \
2368  V(sqdmlsl, Sqdmlsl)                \
2369  V(sqdmlsl2, Sqdmlsl2)              \
2370  V(smull, Smull)                    \
2371  V(smull2, Smull2)                  \
2372  V(smlal, Smlal)                    \
2373  V(smlal2, Smlal2)                  \
2374  V(smlsl, Smlsl)                    \
2375  V(smlsl2, Smlsl2)                  \
2376  V(umull, Umull)                    \
2377  V(umull2, Umull2)                  \
2378  V(umlal, Umlal)                    \
2379  V(umlal2, Umlal2)                  \
2380  V(umlsl, Umlsl)                    \
2381  V(umlsl2, Umlsl2)
2382
2383#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)    \
2384  void MASM(const VRegister& vd,            \
2385            const VRegister& vn,            \
2386            const VRegister& vm,            \
2387            int vm_index) {                 \
2388    VIXL_ASSERT(allow_macro_instructions_); \
2389    SingleEmissionCheckScope guard(this);   \
2390    ASM(vd, vn, vm, vm_index);              \
2391  }
2392  NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2393#undef DEFINE_MACRO_ASM_FUNC
2394
2395#define NEON_2VREG_SHIFT_MACRO_LIST(V) \
2396  V(rshrn, Rshrn)                      \
2397  V(rshrn2, Rshrn2)                    \
2398  V(shl, Shl)                          \
2399  V(shll, Shll)                        \
2400  V(shll2, Shll2)                      \
2401  V(shrn, Shrn)                        \
2402  V(shrn2, Shrn2)                      \
2403  V(sli, Sli)                          \
2404  V(sqrshrn, Sqrshrn)                  \
2405  V(sqrshrn2, Sqrshrn2)                \
2406  V(sqrshrun, Sqrshrun)                \
2407  V(sqrshrun2, Sqrshrun2)              \
2408  V(sqshl, Sqshl)                      \
2409  V(sqshlu, Sqshlu)                    \
2410  V(sqshrn, Sqshrn)                    \
2411  V(sqshrn2, Sqshrn2)                  \
2412  V(sqshrun, Sqshrun)                  \
2413  V(sqshrun2, Sqshrun2)                \
2414  V(sri, Sri)                          \
2415  V(srshr, Srshr)                      \
2416  V(srsra, Srsra)                      \
2417  V(sshll, Sshll)                      \
2418  V(sshll2, Sshll2)                    \
2419  V(sshr, Sshr)                        \
2420  V(ssra, Ssra)                        \
2421  V(uqrshrn, Uqrshrn)                  \
2422  V(uqrshrn2, Uqrshrn2)                \
2423  V(uqshl, Uqshl)                      \
2424  V(uqshrn, Uqshrn)                    \
2425  V(uqshrn2, Uqshrn2)                  \
2426  V(urshr, Urshr)                      \
2427  V(ursra, Ursra)                      \
2428  V(ushll, Ushll)                      \
2429  V(ushll2, Ushll2)                    \
2430  V(ushr, Ushr)                        \
2431  V(usra, Usra)
2432
2433#define DEFINE_MACRO_ASM_FUNC(ASM, MASM)                           \
2434  void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
2435    VIXL_ASSERT(allow_macro_instructions_);                        \
2436    SingleEmissionCheckScope guard(this);                          \
2437    ASM(vd, vn, shift);                                            \
2438  }
2439  NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2440#undef DEFINE_MACRO_ASM_FUNC
2441
2442  void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
2443    VIXL_ASSERT(allow_macro_instructions_);
2444    SingleEmissionCheckScope guard(this);
2445    bic(vd, imm8, left_shift);
2446  }
2447  void Cmeq(const VRegister& vd, const VRegister& vn, int imm) {
2448    VIXL_ASSERT(allow_macro_instructions_);
2449    SingleEmissionCheckScope guard(this);
2450    cmeq(vd, vn, imm);
2451  }
2452  void Cmge(const VRegister& vd, const VRegister& vn, int imm) {
2453    VIXL_ASSERT(allow_macro_instructions_);
2454    SingleEmissionCheckScope guard(this);
2455    cmge(vd, vn, imm);
2456  }
2457  void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
2458    VIXL_ASSERT(allow_macro_instructions_);
2459    SingleEmissionCheckScope guard(this);
2460    cmgt(vd, vn, imm);
2461  }
2462  void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
2463    VIXL_ASSERT(allow_macro_instructions_);
2464    SingleEmissionCheckScope guard(this);
2465    cmle(vd, vn, imm);
2466  }
2467  void Cmlt(const VRegister& vd, const VRegister& vn, int imm) {
2468    VIXL_ASSERT(allow_macro_instructions_);
2469    SingleEmissionCheckScope guard(this);
2470    cmlt(vd, vn, imm);
2471  }
2472  void Dup(const VRegister& vd, const VRegister& vn, int index) {
2473    VIXL_ASSERT(allow_macro_instructions_);
2474    SingleEmissionCheckScope guard(this);
2475    dup(vd, vn, index);
2476  }
2477  void Dup(const VRegister& vd, const Register& rn) {
2478    VIXL_ASSERT(allow_macro_instructions_);
2479    SingleEmissionCheckScope guard(this);
2480    dup(vd, rn);
2481  }
2482  void Ext(const VRegister& vd,
2483           const VRegister& vn,
2484           const VRegister& vm,
2485           int index) {
2486    VIXL_ASSERT(allow_macro_instructions_);
2487    SingleEmissionCheckScope guard(this);
2488    ext(vd, vn, vm, index);
2489  }
2490  void Ins(const VRegister& vd,
2491           int vd_index,
2492           const VRegister& vn,
2493           int vn_index) {
2494    VIXL_ASSERT(allow_macro_instructions_);
2495    SingleEmissionCheckScope guard(this);
2496    ins(vd, vd_index, vn, vn_index);
2497  }
2498  void Ins(const VRegister& vd, int vd_index, const Register& rn) {
2499    VIXL_ASSERT(allow_macro_instructions_);
2500    SingleEmissionCheckScope guard(this);
2501    ins(vd, vd_index, rn);
2502  }
2503  void Ld1(const VRegister& vt, const MemOperand& src) {
2504    VIXL_ASSERT(allow_macro_instructions_);
2505    SingleEmissionCheckScope guard(this);
2506    ld1(vt, src);
2507  }
2508  void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
2509    VIXL_ASSERT(allow_macro_instructions_);
2510    SingleEmissionCheckScope guard(this);
2511    ld1(vt, vt2, src);
2512  }
2513  void Ld1(const VRegister& vt,
2514           const VRegister& vt2,
2515           const VRegister& vt3,
2516           const MemOperand& src) {
2517    VIXL_ASSERT(allow_macro_instructions_);
2518    SingleEmissionCheckScope guard(this);
2519    ld1(vt, vt2, vt3, src);
2520  }
2521  void Ld1(const VRegister& vt,
2522           const VRegister& vt2,
2523           const VRegister& vt3,
2524           const VRegister& vt4,
2525           const MemOperand& src) {
2526    VIXL_ASSERT(allow_macro_instructions_);
2527    SingleEmissionCheckScope guard(this);
2528    ld1(vt, vt2, vt3, vt4, src);
2529  }
2530  void Ld1(const VRegister& vt, int lane, const MemOperand& src) {
2531    VIXL_ASSERT(allow_macro_instructions_);
2532    SingleEmissionCheckScope guard(this);
2533    ld1(vt, lane, src);
2534  }
2535  void Ld1r(const VRegister& vt, const MemOperand& src) {
2536    VIXL_ASSERT(allow_macro_instructions_);
2537    SingleEmissionCheckScope guard(this);
2538    ld1r(vt, src);
2539  }
2540  void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
2541    VIXL_ASSERT(allow_macro_instructions_);
2542    SingleEmissionCheckScope guard(this);
2543    ld2(vt, vt2, src);
2544  }
2545  void Ld2(const VRegister& vt,
2546           const VRegister& vt2,
2547           int lane,
2548           const MemOperand& src) {
2549    VIXL_ASSERT(allow_macro_instructions_);
2550    SingleEmissionCheckScope guard(this);
2551    ld2(vt, vt2, lane, src);
2552  }
2553  void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
2554    VIXL_ASSERT(allow_macro_instructions_);
2555    SingleEmissionCheckScope guard(this);
2556    ld2r(vt, vt2, src);
2557  }
2558  void Ld3(const VRegister& vt,
2559           const VRegister& vt2,
2560           const VRegister& vt3,
2561           const MemOperand& src) {
2562    VIXL_ASSERT(allow_macro_instructions_);
2563    SingleEmissionCheckScope guard(this);
2564    ld3(vt, vt2, vt3, src);
2565  }
2566  void Ld3(const VRegister& vt,
2567           const VRegister& vt2,
2568           const VRegister& vt3,
2569           int lane,
2570           const MemOperand& src) {
2571    VIXL_ASSERT(allow_macro_instructions_);
2572    SingleEmissionCheckScope guard(this);
2573    ld3(vt, vt2, vt3, lane, src);
2574  }
2575  void Ld3r(const VRegister& vt,
2576            const VRegister& vt2,
2577            const VRegister& vt3,
2578            const MemOperand& src) {
2579    VIXL_ASSERT(allow_macro_instructions_);
2580    SingleEmissionCheckScope guard(this);
2581    ld3r(vt, vt2, vt3, src);
2582  }
2583  void Ld4(const VRegister& vt,
2584           const VRegister& vt2,
2585           const VRegister& vt3,
2586           const VRegister& vt4,
2587           const MemOperand& src) {
2588    VIXL_ASSERT(allow_macro_instructions_);
2589    SingleEmissionCheckScope guard(this);
2590    ld4(vt, vt2, vt3, vt4, src);
2591  }
2592  void Ld4(const VRegister& vt,
2593           const VRegister& vt2,
2594           const VRegister& vt3,
2595           const VRegister& vt4,
2596           int lane,
2597           const MemOperand& src) {
2598    VIXL_ASSERT(allow_macro_instructions_);
2599    SingleEmissionCheckScope guard(this);
2600    ld4(vt, vt2, vt3, vt4, lane, src);
2601  }
2602  void Ld4r(const VRegister& vt,
2603            const VRegister& vt2,
2604            const VRegister& vt3,
2605            const VRegister& vt4,
2606            const MemOperand& src) {
2607    VIXL_ASSERT(allow_macro_instructions_);
2608    SingleEmissionCheckScope guard(this);
2609    ld4r(vt, vt2, vt3, vt4, src);
2610  }
2611  void Mov(const VRegister& vd,
2612           int vd_index,
2613           const VRegister& vn,
2614           int vn_index) {
2615    VIXL_ASSERT(allow_macro_instructions_);
2616    SingleEmissionCheckScope guard(this);
2617    mov(vd, vd_index, vn, vn_index);
2618  }
2619  void Mov(const VRegister& vd, const VRegister& vn, int index) {
2620    VIXL_ASSERT(allow_macro_instructions_);
2621    SingleEmissionCheckScope guard(this);
2622    mov(vd, vn, index);
2623  }
2624  void Mov(const VRegister& vd, int vd_index, const Register& rn) {
2625    VIXL_ASSERT(allow_macro_instructions_);
2626    SingleEmissionCheckScope guard(this);
2627    mov(vd, vd_index, rn);
2628  }
2629  void Mov(const Register& rd, const VRegister& vn, int vn_index) {
2630    VIXL_ASSERT(allow_macro_instructions_);
2631    SingleEmissionCheckScope guard(this);
2632    mov(rd, vn, vn_index);
2633  }
2634  void Movi(const VRegister& vd,
2635            uint64_t imm,
2636            Shift shift = LSL,
2637            int shift_amount = 0);
2638  void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
2639  void Mvni(const VRegister& vd,
2640            const int imm8,
2641            Shift shift = LSL,
2642            const int shift_amount = 0) {
2643    VIXL_ASSERT(allow_macro_instructions_);
2644    SingleEmissionCheckScope guard(this);
2645    mvni(vd, imm8, shift, shift_amount);
2646  }
2647  void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
2648    VIXL_ASSERT(allow_macro_instructions_);
2649    SingleEmissionCheckScope guard(this);
2650    orr(vd, imm8, left_shift);
2651  }
2652  void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2653    VIXL_ASSERT(allow_macro_instructions_);
2654    SingleEmissionCheckScope guard(this);
2655    scvtf(vd, vn, fbits);
2656  }
2657  void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2658    VIXL_ASSERT(allow_macro_instructions_);
2659    SingleEmissionCheckScope guard(this);
2660    ucvtf(vd, vn, fbits);
2661  }
2662  void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2663    VIXL_ASSERT(allow_macro_instructions_);
2664    SingleEmissionCheckScope guard(this);
2665    fcvtzs(vd, vn, fbits);
2666  }
2667  void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2668    VIXL_ASSERT(allow_macro_instructions_);
2669    SingleEmissionCheckScope guard(this);
2670    fcvtzu(vd, vn, fbits);
2671  }
2672  void St1(const VRegister& vt, const MemOperand& dst) {
2673    VIXL_ASSERT(allow_macro_instructions_);
2674    SingleEmissionCheckScope guard(this);
2675    st1(vt, dst);
2676  }
2677  void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
2678    VIXL_ASSERT(allow_macro_instructions_);
2679    SingleEmissionCheckScope guard(this);
2680    st1(vt, vt2, dst);
2681  }
2682  void St1(const VRegister& vt,
2683           const VRegister& vt2,
2684           const VRegister& vt3,
2685           const MemOperand& dst) {
2686    VIXL_ASSERT(allow_macro_instructions_);
2687    SingleEmissionCheckScope guard(this);
2688    st1(vt, vt2, vt3, dst);
2689  }
2690  void St1(const VRegister& vt,
2691           const VRegister& vt2,
2692           const VRegister& vt3,
2693           const VRegister& vt4,
2694           const MemOperand& dst) {
2695    VIXL_ASSERT(allow_macro_instructions_);
2696    SingleEmissionCheckScope guard(this);
2697    st1(vt, vt2, vt3, vt4, dst);
2698  }
2699  void St1(const VRegister& vt, int lane, const MemOperand& dst) {
2700    VIXL_ASSERT(allow_macro_instructions_);
2701    SingleEmissionCheckScope guard(this);
2702    st1(vt, lane, dst);
2703  }
2704  void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
2705    VIXL_ASSERT(allow_macro_instructions_);
2706    SingleEmissionCheckScope guard(this);
2707    st2(vt, vt2, dst);
2708  }
2709  void St3(const VRegister& vt,
2710           const VRegister& vt2,
2711           const VRegister& vt3,
2712           const MemOperand& dst) {
2713    VIXL_ASSERT(allow_macro_instructions_);
2714    SingleEmissionCheckScope guard(this);
2715    st3(vt, vt2, vt3, dst);
2716  }
2717  void St4(const VRegister& vt,
2718           const VRegister& vt2,
2719           const VRegister& vt3,
2720           const VRegister& vt4,
2721           const MemOperand& dst) {
2722    VIXL_ASSERT(allow_macro_instructions_);
2723    SingleEmissionCheckScope guard(this);
2724    st4(vt, vt2, vt3, vt4, dst);
2725  }
2726  void St2(const VRegister& vt,
2727           const VRegister& vt2,
2728           int lane,
2729           const MemOperand& dst) {
2730    VIXL_ASSERT(allow_macro_instructions_);
2731    SingleEmissionCheckScope guard(this);
2732    st2(vt, vt2, lane, dst);
2733  }
2734  void St3(const VRegister& vt,
2735           const VRegister& vt2,
2736           const VRegister& vt3,
2737           int lane,
2738           const MemOperand& dst) {
2739    VIXL_ASSERT(allow_macro_instructions_);
2740    SingleEmissionCheckScope guard(this);
2741    st3(vt, vt2, vt3, lane, dst);
2742  }
2743  void St4(const VRegister& vt,
2744           const VRegister& vt2,
2745           const VRegister& vt3,
2746           const VRegister& vt4,
2747           int lane,
2748           const MemOperand& dst) {
2749    VIXL_ASSERT(allow_macro_instructions_);
2750    SingleEmissionCheckScope guard(this);
2751    st4(vt, vt2, vt3, vt4, lane, dst);
2752  }
2753  void Smov(const Register& rd, const VRegister& vn, int vn_index) {
2754    VIXL_ASSERT(allow_macro_instructions_);
2755    SingleEmissionCheckScope guard(this);
2756    smov(rd, vn, vn_index);
2757  }
2758  void Umov(const Register& rd, const VRegister& vn, int vn_index) {
2759    VIXL_ASSERT(allow_macro_instructions_);
2760    SingleEmissionCheckScope guard(this);
2761    umov(rd, vn, vn_index);
2762  }
2763  void Crc32b(const Register& rd, const Register& rn, const Register& rm) {
2764    VIXL_ASSERT(allow_macro_instructions_);
2765    SingleEmissionCheckScope guard(this);
2766    crc32b(rd, rn, rm);
2767  }
2768  void Crc32h(const Register& rd, const Register& rn, const Register& rm) {
2769    VIXL_ASSERT(allow_macro_instructions_);
2770    SingleEmissionCheckScope guard(this);
2771    crc32h(rd, rn, rm);
2772  }
2773  void Crc32w(const Register& rd, const Register& rn, const Register& rm) {
2774    VIXL_ASSERT(allow_macro_instructions_);
2775    SingleEmissionCheckScope guard(this);
2776    crc32w(rd, rn, rm);
2777  }
2778  void Crc32x(const Register& rd, const Register& rn, const Register& rm) {
2779    VIXL_ASSERT(allow_macro_instructions_);
2780    SingleEmissionCheckScope guard(this);
2781    crc32x(rd, rn, rm);
2782  }
2783  void Crc32cb(const Register& rd, const Register& rn, const Register& rm) {
2784    VIXL_ASSERT(allow_macro_instructions_);
2785    SingleEmissionCheckScope guard(this);
2786    crc32cb(rd, rn, rm);
2787  }
2788  void Crc32ch(const Register& rd, const Register& rn, const Register& rm) {
2789    VIXL_ASSERT(allow_macro_instructions_);
2790    SingleEmissionCheckScope guard(this);
2791    crc32ch(rd, rn, rm);
2792  }
2793  void Crc32cw(const Register& rd, const Register& rn, const Register& rm) {
2794    VIXL_ASSERT(allow_macro_instructions_);
2795    SingleEmissionCheckScope guard(this);
2796    crc32cw(rd, rn, rm);
2797  }
2798  void Crc32cx(const Register& rd, const Register& rn, const Register& rm) {
2799    VIXL_ASSERT(allow_macro_instructions_);
2800    SingleEmissionCheckScope guard(this);
2801    crc32cx(rd, rn, rm);
2802  }
2803
2804  template <typename T>
2805  Literal<T>* CreateLiteralDestroyedWithPool(T value) {
2806    return new Literal<T>(value,
2807                          &literal_pool_,
2808                          RawLiteral::kDeletedOnPoolDestruction);
2809  }
2810
2811  template <typename T>
2812  Literal<T>* CreateLiteralDestroyedWithPool(T high64, T low64) {
2813    return new Literal<T>(high64,
2814                          low64,
2815                          &literal_pool_,
2816                          RawLiteral::kDeletedOnPoolDestruction);
2817  }
2818
2819  // Push the system stack pointer (sp) down to allow the same to be done to
2820  // the current stack pointer (according to StackPointer()). This must be
2821  // called _before_ accessing the memory.
2822  //
2823  // This is necessary when pushing or otherwise adding things to the stack, to
2824  // satisfy the AAPCS64 constraint that the memory below the system stack
2825  // pointer is not accessed.
2826  //
2827  // This method asserts that StackPointer() is not sp, since the call does
2828  // not make sense in that context.
2829  //
2830  // TODO: This method can only accept values of 'space' that can be encoded in
2831  // one instruction. Refer to the implementation for details.
2832  void BumpSystemStackPointer(const Operand& space);
2833
2834#ifdef VIXL_DEBUG
2835  void SetAllowMacroInstructions(bool value) VIXL_OVERRIDE {
2836    allow_macro_instructions_ = value;
2837  }
2838
2839  bool AllowMacroInstructions() const VIXL_OVERRIDE {
2840    return allow_macro_instructions_;
2841  }
2842#endif
2843
2844  void SetGenerateSimulatorCode(bool value) {
2845    generate_simulator_code_ = value;
2846  }
2847
2848  bool GenerateSimulatorCode() const { return generate_simulator_code_; }
2849
2850  void BlockLiteralPool() { literal_pool_.Block(); }
2851  void ReleaseLiteralPool() { literal_pool_.Release(); }
2852  bool IsLiteralPoolBlocked() const { return literal_pool_.IsBlocked(); }
2853  void BlockVeneerPool() { veneer_pool_.Block(); }
2854  void ReleaseVeneerPool() { veneer_pool_.Release(); }
2855  bool IsVeneerPoolBlocked() const { return veneer_pool_.IsBlocked(); }
2856
2857  void BlockPools() VIXL_OVERRIDE {
2858    BlockLiteralPool();
2859    BlockVeneerPool();
2860  }
2861
2862  void ReleasePools() VIXL_OVERRIDE {
2863    ReleaseLiteralPool();
2864    ReleaseVeneerPool();
2865  }
2866
2867  size_t GetLiteralPoolSize() const { return literal_pool_.GetSize(); }
2868  VIXL_DEPRECATED("GetLiteralPoolSize", size_t LiteralPoolSize() const) {
2869    return GetLiteralPoolSize();
2870  }
2871
2872  size_t GetLiteralPoolMaxSize() const { return literal_pool_.GetMaxSize(); }
2873  VIXL_DEPRECATED("GetLiteralPoolMaxSize", size_t LiteralPoolMaxSize() const) {
2874    return GetLiteralPoolMaxSize();
2875  }
2876
2877  size_t GetVeneerPoolMaxSize() const { return veneer_pool_.GetMaxSize(); }
2878  VIXL_DEPRECATED("GetVeneerPoolMaxSize", size_t VeneerPoolMaxSize() const) {
2879    return GetVeneerPoolMaxSize();
2880  }
2881
2882  // The number of unresolved branches that may require a veneer.
2883  int GetNumberOfPotentialVeneers() const {
2884    return veneer_pool_.GetNumberOfPotentialVeneers();
2885  }
2886  VIXL_DEPRECATED("GetNumberOfPotentialVeneers",
2887                  int NumberOfPotentialVeneers() const) {
2888    return GetNumberOfPotentialVeneers();
2889  }
2890
2891  ptrdiff_t GetNextCheckPoint() const {
2892    ptrdiff_t next_checkpoint_for_pools =
2893        std::min(literal_pool_.GetCheckpoint(), veneer_pool_.GetCheckpoint());
2894    return std::min(next_checkpoint_for_pools,
2895                    static_cast<ptrdiff_t>(GetBuffer().GetCapacity()));
2896  }
2897  VIXL_DEPRECATED("GetNextCheckPoint", ptrdiff_t NextCheckPoint()) {
2898    return GetNextCheckPoint();
2899  }
2900
2901  void EmitLiteralPool(LiteralPool::EmitOption option) {
2902    if (!literal_pool_.IsEmpty()) literal_pool_.Emit(option);
2903
2904    checkpoint_ = GetNextCheckPoint();
2905    recommended_checkpoint_ = literal_pool_.GetNextRecommendedCheckpoint();
2906  }
2907
2908  void CheckEmitFor(size_t amount);
2909  void EnsureEmitFor(size_t amount) {
2910    ptrdiff_t offset = amount;
2911    ptrdiff_t max_pools_size =
2912        literal_pool_.GetMaxSize() + veneer_pool_.GetMaxSize();
2913    ptrdiff_t cursor = GetCursorOffset();
2914    if ((cursor >= recommended_checkpoint_) ||
2915        ((cursor + offset + max_pools_size) >= checkpoint_)) {
2916      CheckEmitFor(amount);
2917    }
2918  }
2919
2920  void CheckEmitPoolsFor(size_t amount);
2921  void EnsureEmitPoolsFor(size_t amount) VIXL_OVERRIDE {
2922    ptrdiff_t offset = amount;
2923    ptrdiff_t max_pools_size =
2924        literal_pool_.GetMaxSize() + veneer_pool_.GetMaxSize();
2925    ptrdiff_t cursor = GetCursorOffset();
2926    if ((cursor >= recommended_checkpoint_) ||
2927        ((cursor + offset + max_pools_size) >= checkpoint_)) {
2928      CheckEmitPoolsFor(amount);
2929    }
2930  }
2931
2932  // Set the current stack pointer, but don't generate any code.
2933  void SetStackPointer(const Register& stack_pointer) {
2934    VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(stack_pointer));
2935    sp_ = stack_pointer;
2936  }
2937
2938  // Return the current stack pointer, as set by SetStackPointer.
2939  const Register& StackPointer() const { return sp_; }
2940
2941  CPURegList* GetScratchRegisterList() { return &tmp_list_; }
2942  VIXL_DEPRECATED("GetScratchRegisterList", CPURegList* TmpList()) {
2943    return GetScratchRegisterList();
2944  }
2945
2946  CPURegList* GetScratchFPRegisterList() { return &fptmp_list_; }
2947  VIXL_DEPRECATED("GetScratchFPRegisterList", CPURegList* FPTmpList()) {
2948    return GetScratchFPRegisterList();
2949  }
2950
2951  // Like printf, but print at run-time from generated code.
2952  //
2953  // The caller must ensure that arguments for floating-point placeholders
2954  // (such as %e, %f or %g) are VRegisters in format 1S or 1D, and that
2955  // arguments for integer placeholders are Registers.
2956  //
2957  // At the moment it is only possible to print the value of sp if it is the
2958  // current stack pointer. Otherwise, the MacroAssembler will automatically
2959  // update sp on every push (using BumpSystemStackPointer), so determining its
2960  // value is difficult.
2961  //
2962  // Format placeholders that refer to more than one argument, or to a specific
2963  // argument, are not supported. This includes formats like "%1$d" or "%.*d".
2964  //
2965  // This function automatically preserves caller-saved registers so that
2966  // calling code can use Printf at any point without having to worry about
2967  // corruption. The preservation mechanism generates a lot of code. If this is
2968  // a problem, preserve the important registers manually and then call
2969  // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
2970  // implicitly preserved.
2971  void Printf(const char* format,
2972              CPURegister arg0 = NoCPUReg,
2973              CPURegister arg1 = NoCPUReg,
2974              CPURegister arg2 = NoCPUReg,
2975              CPURegister arg3 = NoCPUReg);
2976
2977  // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
2978  //
2979  // The return code from the system printf call will be returned in x0.
2980  void PrintfNoPreserve(const char* format,
2981                        const CPURegister& arg0 = NoCPUReg,
2982                        const CPURegister& arg1 = NoCPUReg,
2983                        const CPURegister& arg2 = NoCPUReg,
2984                        const CPURegister& arg3 = NoCPUReg);
2985
2986  // Trace control when running the debug simulator.
2987  //
2988  // For example:
2989  //
2990  // __ Trace(LOG_REGS, TRACE_ENABLE);
2991  // Will add registers to the trace if it wasn't already the case.
2992  //
2993  // __ Trace(LOG_DISASM, TRACE_DISABLE);
2994  // Will stop logging disassembly. It has no effect if the disassembly wasn't
2995  // already being logged.
2996  void Trace(TraceParameters parameters, TraceCommand command);
2997
2998  // Log the requested data independently of what is being traced.
2999  //
3000  // For example:
3001  //
3002  // __ Log(LOG_FLAGS)
3003  // Will output the flags.
3004  void Log(TraceParameters parameters);
3005
3006  // Enable or disable instrumentation when an Instrument visitor is attached to
3007  // the simulator.
3008  void EnableInstrumentation();
3009  void DisableInstrumentation();
3010
3011  // Add a marker to the instrumentation data produced by an Instrument visitor.
3012  // The name is a two character string that will be attached to the marker in
3013  // the output data.
3014  void AnnotateInstrumentation(const char* marker_name);
3015
3016  LiteralPool* GetLiteralPool() { return &literal_pool_; }
3017
3018// Support for simulated runtime calls.
3019
3020// `CallRuntime` requires variadic templating, that is only available from
3021// C++11.
3022#if __cplusplus >= 201103L
3023#define VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3024#endif  // #if __cplusplus >= 201103L
3025
3026#ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3027  template <typename R, typename... P>
3028  void CallRuntime(R (*function)(P...));
3029#endif  // #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3030
3031 protected:
3032  // Helper used to query information about code generation and to generate
3033  // code for `csel`.
3034  // Here and for the related helpers below:
3035  // - Code is generated when `masm` is not `NULL`.
3036  // - On return and when set, `should_synthesise_left` and
3037  //   `should_synthesise_right` will indicate whether `left` and `right`
3038  //   should be synthesized in a temporary register.
3039  static void CselHelper(MacroAssembler* masm,
3040                         const Register& rd,
3041                         Operand left,
3042                         Operand right,
3043                         Condition cond,
3044                         bool* should_synthesise_left = NULL,
3045                         bool* should_synthesise_right = NULL);
3046
3047  // The helper returns `true` if it can handle the specified arguments.
3048  // Also see comments for `CselHelper()`.
3049  static bool CselSubHelperTwoImmediates(MacroAssembler* masm,
3050                                         const Register& rd,
3051                                         int64_t left,
3052                                         int64_t right,
3053                                         Condition cond,
3054                                         bool* should_synthesise_left,
3055                                         bool* should_synthesise_right);
3056
3057  // See comments for `CselHelper()`.
3058  static bool CselSubHelperTwoOrderedImmediates(MacroAssembler* masm,
3059                                                const Register& rd,
3060                                                int64_t left,
3061                                                int64_t right,
3062                                                Condition cond);
3063
3064  // See comments for `CselHelper()`.
3065  static void CselSubHelperRightSmallImmediate(MacroAssembler* masm,
3066                                               UseScratchRegisterScope* temps,
3067                                               const Register& rd,
3068                                               const Operand& left,
3069                                               const Operand& right,
3070                                               Condition cond,
3071                                               bool* should_synthesise_left);
3072
3073 private:
3074  // The actual Push and Pop implementations. These don't generate any code
3075  // other than that required for the push or pop. This allows
3076  // (Push|Pop)CPURegList to bundle together setup code for a large block of
3077  // registers.
3078  //
3079  // Note that size is per register, and is specified in bytes.
3080  void PushHelper(int count,
3081                  int size,
3082                  const CPURegister& src0,
3083                  const CPURegister& src1,
3084                  const CPURegister& src2,
3085                  const CPURegister& src3);
3086  void PopHelper(int count,
3087                 int size,
3088                 const CPURegister& dst0,
3089                 const CPURegister& dst1,
3090                 const CPURegister& dst2,
3091                 const CPURegister& dst3);
3092
3093  void Movi16bitHelper(const VRegister& vd, uint64_t imm);
3094  void Movi32bitHelper(const VRegister& vd, uint64_t imm);
3095  void Movi64bitHelper(const VRegister& vd, uint64_t imm);
3096
3097  // Perform necessary maintenance operations before a push or pop.
3098  //
3099  // Note that size is per register, and is specified in bytes.
3100  void PrepareForPush(int count, int size);
3101  void PrepareForPop(int count, int size);
3102
3103  // The actual implementation of load and store operations for CPURegList.
3104  enum LoadStoreCPURegListAction { kLoad, kStore };
3105  void LoadStoreCPURegListHelper(LoadStoreCPURegListAction operation,
3106                                 CPURegList registers,
3107                                 const MemOperand& mem);
3108  // Returns a MemOperand suitable for loading or storing a CPURegList at `dst`.
3109  // This helper may allocate registers from `scratch_scope` and generate code
3110  // to compute an intermediate address. The resulting MemOperand is only valid
3111  // as long as `scratch_scope` remains valid.
3112  MemOperand BaseMemOperandForLoadStoreCPURegList(
3113      const CPURegList& registers,
3114      const MemOperand& mem,
3115      UseScratchRegisterScope* scratch_scope);
3116
3117  bool LabelIsOutOfRange(Label* label, ImmBranchType branch_type) {
3118    return !Instruction::IsValidImmPCOffset(branch_type,
3119                                            label->GetLocation() -
3120                                                GetCursorOffset());
3121  }
3122
3123  // Tell whether any of the macro instruction can be used. When false the
3124  // MacroAssembler will assert if a method which can emit a variable number
3125  // of instructions is called.
3126  bool allow_macro_instructions_;
3127
3128  // Indicates whether we should generate simulator or native code.
3129  bool generate_simulator_code_;
3130
3131  // The register to use as a stack pointer for stack operations.
3132  Register sp_;
3133
3134  // Scratch registers available for use by the MacroAssembler.
3135  CPURegList tmp_list_;
3136  CPURegList fptmp_list_;
3137
3138  LiteralPool literal_pool_;
3139  VeneerPool veneer_pool_;
3140
3141  ptrdiff_t checkpoint_;
3142  ptrdiff_t recommended_checkpoint_;
3143
3144  friend class Pool;
3145  friend class LiteralPool;
3146};
3147
3148
3149inline size_t VeneerPool::GetOtherPoolsMaxSize() const {
3150  return masm_->GetLiteralPoolMaxSize();
3151}
3152
3153
3154inline size_t LiteralPool::GetOtherPoolsMaxSize() const {
3155  return masm_->GetVeneerPoolMaxSize();
3156}
3157
3158
3159inline void LiteralPool::SetNextRecommendedCheckpoint(ptrdiff_t offset) {
3160  masm_->recommended_checkpoint_ =
3161      std::min(masm_->recommended_checkpoint_, offset);
3162  recommended_checkpoint_ = offset;
3163}
3164
3165class InstructionAccurateScope : public ExactAssemblyScope {
3166 public:
3167  VIXL_DEPRECATED("ExactAssemblyScope",
3168                  InstructionAccurateScope(MacroAssembler* masm,
3169                                           int64_t count,
3170                                           SizePolicy size_policy = kExactSize))
3171      : ExactAssemblyScope(masm, count * kInstructionSize, size_policy) {}
3172};
3173
3174class BlockLiteralPoolScope {
3175 public:
3176  explicit BlockLiteralPoolScope(MacroAssembler* masm) : masm_(masm) {
3177    masm_->BlockLiteralPool();
3178  }
3179
3180  ~BlockLiteralPoolScope() { masm_->ReleaseLiteralPool(); }
3181
3182 private:
3183  MacroAssembler* masm_;
3184};
3185
3186
3187class BlockVeneerPoolScope {
3188 public:
3189  explicit BlockVeneerPoolScope(MacroAssembler* masm) : masm_(masm) {
3190    masm_->BlockVeneerPool();
3191  }
3192
3193  ~BlockVeneerPoolScope() { masm_->ReleaseVeneerPool(); }
3194
3195 private:
3196  MacroAssembler* masm_;
3197};
3198
3199
3200class BlockPoolsScope {
3201 public:
3202  explicit BlockPoolsScope(MacroAssembler* masm) : masm_(masm) {
3203    masm_->BlockPools();
3204  }
3205
3206  ~BlockPoolsScope() { masm_->ReleasePools(); }
3207
3208 private:
3209  MacroAssembler* masm_;
3210};
3211
3212
3213// This scope utility allows scratch registers to be managed safely. The
3214// MacroAssembler's GetScratchRegisterList() (and GetScratchFPRegisterList()) is
3215// used as a pool of scratch registers. These registers can be allocated on
3216// demand, and will be returned at the end of the scope.
3217//
3218// When the scope ends, the MacroAssembler's lists will be restored to their
3219// original state, even if the lists were modified by some other means.
3220class UseScratchRegisterScope {
3221 public:
3222  // This constructor implicitly calls `Open` to initialise the scope (`masm`
3223  // must not be `NULL`), so it is ready to use immediately after it has been
3224  // constructed.
3225  explicit UseScratchRegisterScope(MacroAssembler* masm);
3226  // This constructor does not implicitly initialise the scope. Instead, the
3227  // user is required to explicitly call the `Open` function before using the
3228  // scope.
3229  UseScratchRegisterScope();
3230  // This function performs the actual initialisation work.
3231  void Open(MacroAssembler* masm);
3232
3233  // The destructor always implicitly calls the `Close` function.
3234  ~UseScratchRegisterScope();
3235  // This function performs the cleaning-up work. It must succeed even if the
3236  // scope has not been opened. It is safe to call multiple times.
3237  void Close();
3238
3239
3240  bool IsAvailable(const CPURegister& reg) const;
3241
3242
3243  // Take a register from the appropriate temps list. It will be returned
3244  // automatically when the scope ends.
3245  Register AcquireW() { return AcquireNextAvailable(available_).W(); }
3246  Register AcquireX() { return AcquireNextAvailable(available_).X(); }
3247  VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
3248  VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
3249
3250
3251  Register AcquireRegisterOfSize(int size_in_bits);
3252  Register AcquireSameSizeAs(const Register& reg) {
3253    return AcquireRegisterOfSize(reg.GetSizeInBits());
3254  }
3255  VRegister AcquireVRegisterOfSize(int size_in_bits);
3256  VRegister AcquireSameSizeAs(const VRegister& reg) {
3257    return AcquireVRegisterOfSize(reg.GetSizeInBits());
3258  }
3259  CPURegister AcquireCPURegisterOfSize(int size_in_bits) {
3260    return available_->IsEmpty()
3261               ? CPURegister(AcquireVRegisterOfSize(size_in_bits))
3262               : CPURegister(AcquireRegisterOfSize(size_in_bits));
3263  }
3264
3265
3266  // Explicitly release an acquired (or excluded) register, putting it back in
3267  // the appropriate temps list.
3268  void Release(const CPURegister& reg);
3269
3270
3271  // Make the specified registers available as scratch registers for the
3272  // duration of this scope.
3273  void Include(const CPURegList& list);
3274  void Include(const Register& reg1,
3275               const Register& reg2 = NoReg,
3276               const Register& reg3 = NoReg,
3277               const Register& reg4 = NoReg);
3278  void Include(const VRegister& reg1,
3279               const VRegister& reg2 = NoVReg,
3280               const VRegister& reg3 = NoVReg,
3281               const VRegister& reg4 = NoVReg);
3282
3283
3284  // Make sure that the specified registers are not available in this scope.
3285  // This can be used to prevent helper functions from using sensitive
3286  // registers, for example.
3287  void Exclude(const CPURegList& list);
3288  void Exclude(const Register& reg1,
3289               const Register& reg2 = NoReg,
3290               const Register& reg3 = NoReg,
3291               const Register& reg4 = NoReg);
3292  void Exclude(const VRegister& reg1,
3293               const VRegister& reg2 = NoVReg,
3294               const VRegister& reg3 = NoVReg,
3295               const VRegister& reg4 = NoVReg);
3296  void Exclude(const CPURegister& reg1,
3297               const CPURegister& reg2 = NoCPUReg,
3298               const CPURegister& reg3 = NoCPUReg,
3299               const CPURegister& reg4 = NoCPUReg);
3300
3301
3302  // Prevent any scratch registers from being used in this scope.
3303  void ExcludeAll();
3304
3305 private:
3306  static CPURegister AcquireNextAvailable(CPURegList* available);
3307
3308  static void ReleaseByCode(CPURegList* available, int code);
3309
3310  static void ReleaseByRegList(CPURegList* available, RegList regs);
3311
3312  static void IncludeByRegList(CPURegList* available, RegList exclude);
3313
3314  static void ExcludeByRegList(CPURegList* available, RegList exclude);
3315
3316  // Available scratch registers.
3317  CPURegList* available_;    // kRegister
3318  CPURegList* availablefp_;  // kVRegister
3319
3320  // The state of the available lists at the start of this scope.
3321  RegList old_available_;    // kRegister
3322  RegList old_availablefp_;  // kVRegister
3323  bool initialised_;
3324
3325  // Disallow copy constructor and operator=.
3326  VIXL_DEBUG_NO_RETURN UseScratchRegisterScope(const UseScratchRegisterScope&) {
3327    VIXL_UNREACHABLE();
3328  }
3329  VIXL_DEBUG_NO_RETURN void operator=(const UseScratchRegisterScope&) {
3330    VIXL_UNREACHABLE();
3331  }
3332};
3333
3334// Variadic templating is only available from C++11.
3335#ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3336
3337// `R` stands for 'return type', and `P` for 'parameter types'.
3338template <typename R, typename... P>
3339void MacroAssembler::CallRuntime(R (*function)(P...)) {
3340  if (generate_simulator_code_) {
3341#ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT
3342    uintptr_t runtime_call_wrapper_address = reinterpret_cast<uintptr_t>(
3343        &(Simulator::RuntimeCallStructHelper<R, P...>::Wrapper));
3344    uintptr_t function_address = reinterpret_cast<uintptr_t>(function);
3345
3346    EmissionCheckScope guard(this,
3347                             kInstructionSize + 2 * kRuntimeCallAddressSize,
3348                             CodeBufferCheckScope::kExactSize);
3349    Label start;
3350    bind(&start);
3351    {
3352      ExactAssemblyScope scope(this, kInstructionSize);
3353      hlt(kRuntimeCallOpcode);
3354    }
3355    VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) ==
3356                kRuntimeCallWrapperOffset);
3357    dc(runtime_call_wrapper_address);
3358    VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) ==
3359                kRuntimeCallFunctionOffset);
3360    dc(function_address);
3361    VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) ==
3362                kRuntimeCallFunctionOffset + kRuntimeCallAddressSize);
3363#else
3364    VIXL_UNREACHABLE();
3365#endif  // #ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT
3366  } else {
3367    UseScratchRegisterScope temps(this);
3368    Register temp = temps.AcquireX();
3369    Mov(temp, reinterpret_cast<uint64_t>(function));
3370    Blr(temp);
3371  }
3372}
3373
3374#endif  // #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3375
3376}  // namespace aarch64
3377
3378// Required InvalSet template specialisations.
3379// TODO: These template specialisations should not live in this file.  Move
3380// VeneerPool out of the aarch64 namespace in order to share its implementation
3381// later.
3382template <>
3383inline ptrdiff_t InvalSet<aarch64::VeneerPool::BranchInfo,
3384                          aarch64::VeneerPool::kNPreallocatedInfos,
3385                          ptrdiff_t,
3386                          aarch64::VeneerPool::kInvalidOffset,
3387                          aarch64::VeneerPool::kReclaimFrom,
3388                          aarch64::VeneerPool::kReclaimFactor>::
3389    GetKey(const aarch64::VeneerPool::BranchInfo& branch_info) {
3390  return branch_info.max_reachable_pc_;
3391}
3392template <>
3393inline void InvalSet<aarch64::VeneerPool::BranchInfo,
3394                     aarch64::VeneerPool::kNPreallocatedInfos,
3395                     ptrdiff_t,
3396                     aarch64::VeneerPool::kInvalidOffset,
3397                     aarch64::VeneerPool::kReclaimFrom,
3398                     aarch64::VeneerPool::kReclaimFactor>::
3399    SetKey(aarch64::VeneerPool::BranchInfo* branch_info, ptrdiff_t key) {
3400  branch_info->max_reachable_pc_ = key;
3401}
3402
3403}  // namespace vixl
3404
3405#endif  // VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
3406