1// Copyright 2013, ARM Limited
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are met:
6//
7//   * Redistributions of source code must retain the above copyright notice,
8//     this list of conditions and the following disclaimer.
9//   * Redistributions in binary form must reproduce the above copyright notice,
10//     this list of conditions and the following disclaimer in the documentation
11//     and/or other materials provided with the distribution.
12//   * Neither the name of ARM Limited nor the names of its contributors may be
13//     used to endorse or promote products derived from this software without
14//     specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#ifndef VIXL_A64_INSTRUCTIONS_A64_H_
28#define VIXL_A64_INSTRUCTIONS_A64_H_
29
30#include "globals-vixl.h"
31#include "utils-vixl.h"
32#include "a64/constants-a64.h"
33
34namespace vixl {
35// ISA constants. --------------------------------------------------------------
36
37typedef uint32_t Instr;
38const unsigned kInstructionSize = 4;
39const unsigned kInstructionSizeLog2 = 2;
40const unsigned kLiteralEntrySize = 4;
41const unsigned kLiteralEntrySizeLog2 = 2;
42const unsigned kMaxLoadLiteralRange = 1 * MBytes;
43
44const unsigned kWRegSize = 32;
45const unsigned kWRegSizeLog2 = 5;
46const unsigned kWRegSizeInBytes = kWRegSize / 8;
47const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
48const unsigned kXRegSize = 64;
49const unsigned kXRegSizeLog2 = 6;
50const unsigned kXRegSizeInBytes = kXRegSize / 8;
51const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
52const unsigned kSRegSize = 32;
53const unsigned kSRegSizeLog2 = 5;
54const unsigned kSRegSizeInBytes = kSRegSize / 8;
55const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
56const unsigned kDRegSize = 64;
57const unsigned kDRegSizeLog2 = 6;
58const unsigned kDRegSizeInBytes = kDRegSize / 8;
59const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
60const uint64_t kWRegMask = UINT64_C(0xffffffff);
61const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
62const uint64_t kSRegMask = UINT64_C(0xffffffff);
63const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
64const uint64_t kSSignMask = UINT64_C(0x80000000);
65const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
66const uint64_t kWSignMask = UINT64_C(0x80000000);
67const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
68const uint64_t kByteMask = UINT64_C(0xff);
69const uint64_t kHalfWordMask = UINT64_C(0xffff);
70const uint64_t kWordMask = UINT64_C(0xffffffff);
71const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
72const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
73const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
74const int64_t kXMinInt = INT64_C(0x8000000000000000);
75const int32_t kWMaxInt = INT32_C(0x7fffffff);
76const int32_t kWMinInt = INT32_C(0x80000000);
77const unsigned kLinkRegCode = 30;
78const unsigned kZeroRegCode = 31;
79const unsigned kSPRegInternalCode = 63;
80const unsigned kRegCodeMask = 0x1f;
81
82// AArch64 floating-point specifics. These match IEEE-754.
83const unsigned kDoubleMantissaBits = 52;
84const unsigned kDoubleExponentBits = 11;
85const unsigned kFloatMantissaBits = 23;
86const unsigned kFloatExponentBits = 8;
87
88const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
89const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
90const double kFP64PositiveInfinity =
91    rawbits_to_double(UINT64_C(0x7ff0000000000000));
92const double kFP64NegativeInfinity =
93    rawbits_to_double(UINT64_C(0xfff0000000000000));
94
95// This value is a signalling NaN as both a double and as a float (taking the
96// least-significant word).
97static const double kFP64SignallingNaN =
98    rawbits_to_double(UINT64_C(0x7ff000007f800001));
99static const float kFP32SignallingNaN = rawbits_to_float(0x7f800001);
100
101// A similar value, but as a quiet NaN.
102static const double kFP64QuietNaN =
103    rawbits_to_double(UINT64_C(0x7ff800007fc00001));
104static const float kFP32QuietNaN = rawbits_to_float(0x7fc00001);
105
106// The default NaN values (for FPCR.DN=1).
107static const double kFP64DefaultNaN =
108    rawbits_to_double(UINT64_C(0x7ff8000000000000));
109static const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
110
111
112enum LSDataSize {
113  LSByte        = 0,
114  LSHalfword    = 1,
115  LSWord        = 2,
116  LSDoubleWord  = 3
117};
118
119LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
120
121enum ImmBranchType {
122  UnknownBranchType = 0,
123  CondBranchType    = 1,
124  UncondBranchType  = 2,
125  CompareBranchType = 3,
126  TestBranchType    = 4
127};
128
129enum AddrMode {
130  Offset,
131  PreIndex,
132  PostIndex
133};
134
135enum FPRounding {
136  // The first four values are encodable directly by FPCR<RMode>.
137  FPTieEven = 0x0,
138  FPPositiveInfinity = 0x1,
139  FPNegativeInfinity = 0x2,
140  FPZero = 0x3,
141
142  // The final rounding mode is only available when explicitly specified by the
143  // instruction (such as with fcvta). It cannot be set in FPCR.
144  FPTieAway
145};
146
147enum Reg31Mode {
148  Reg31IsStackPointer,
149  Reg31IsZeroRegister
150};
151
152// Instructions. ---------------------------------------------------------------
153
154class Instruction {
155 public:
156  inline Instr InstructionBits() const {
157    return *(reinterpret_cast<const Instr*>(this));
158  }
159
160  inline void SetInstructionBits(Instr new_instr) {
161    *(reinterpret_cast<Instr*>(this)) = new_instr;
162  }
163
164  inline int Bit(int pos) const {
165    return (InstructionBits() >> pos) & 1;
166  }
167
168  inline uint32_t Bits(int msb, int lsb) const {
169    return unsigned_bitextract_32(msb, lsb, InstructionBits());
170  }
171
172  inline int32_t SignedBits(int msb, int lsb) const {
173    int32_t bits = *(reinterpret_cast<const int32_t*>(this));
174    return signed_bitextract_32(msb, lsb, bits);
175  }
176
177  inline Instr Mask(uint32_t mask) const {
178    return InstructionBits() & mask;
179  }
180
181  #define DEFINE_GETTER(Name, HighBit, LowBit, Func)             \
182  inline int64_t Name() const { return Func(HighBit, LowBit); }
183  INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
184  #undef DEFINE_GETTER
185
186  // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
187  // formed from ImmPCRelLo and ImmPCRelHi.
188  int ImmPCRel() const {
189    int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
190    int const width = ImmPCRelLo_width + ImmPCRelHi_width;
191    return signed_bitextract_32(width-1, 0, offset);
192  }
193
194  uint64_t ImmLogical();
195  float ImmFP32();
196  double ImmFP64();
197
198  inline LSDataSize SizeLSPair() const {
199    return CalcLSPairDataSize(
200             static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
201  }
202
203  // Helpers.
204  inline bool IsCondBranchImm() const {
205    return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
206  }
207
208  inline bool IsUncondBranchImm() const {
209    return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
210  }
211
212  inline bool IsCompareBranch() const {
213    return Mask(CompareBranchFMask) == CompareBranchFixed;
214  }
215
216  inline bool IsTestBranch() const {
217    return Mask(TestBranchFMask) == TestBranchFixed;
218  }
219
220  inline bool IsPCRelAddressing() const {
221    return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
222  }
223
224  inline bool IsLogicalImmediate() const {
225    return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
226  }
227
228  inline bool IsAddSubImmediate() const {
229    return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
230  }
231
232  inline bool IsAddSubExtended() const {
233    return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
234  }
235
236  inline bool IsLoadOrStore() const {
237    return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
238  }
239
240  inline bool IsMovn() const {
241    return (Mask(MoveWideImmediateMask) == MOVN_x) ||
242           (Mask(MoveWideImmediateMask) == MOVN_w);
243  }
244
245  // Indicate whether Rd can be the stack pointer or the zero register. This
246  // does not check that the instruction actually has an Rd field.
247  inline Reg31Mode RdMode() const {
248    // The following instructions use sp or wsp as Rd:
249    //  Add/sub (immediate) when not setting the flags.
250    //  Add/sub (extended) when not setting the flags.
251    //  Logical (immediate) when not setting the flags.
252    // Otherwise, r31 is the zero register.
253    if (IsAddSubImmediate() || IsAddSubExtended()) {
254      if (Mask(AddSubSetFlagsBit)) {
255        return Reg31IsZeroRegister;
256      } else {
257        return Reg31IsStackPointer;
258      }
259    }
260    if (IsLogicalImmediate()) {
261      // Of the logical (immediate) instructions, only ANDS (and its aliases)
262      // can set the flags. The others can all write into sp.
263      // Note that some logical operations are not available to
264      // immediate-operand instructions, so we have to combine two masks here.
265      if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
266        return Reg31IsZeroRegister;
267      } else {
268        return Reg31IsStackPointer;
269      }
270    }
271    return Reg31IsZeroRegister;
272  }
273
274  // Indicate whether Rn can be the stack pointer or the zero register. This
275  // does not check that the instruction actually has an Rn field.
276  inline Reg31Mode RnMode() const {
277    // The following instructions use sp or wsp as Rn:
278    //  All loads and stores.
279    //  Add/sub (immediate).
280    //  Add/sub (extended).
281    // Otherwise, r31 is the zero register.
282    if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
283      return Reg31IsStackPointer;
284    }
285    return Reg31IsZeroRegister;
286  }
287
288  inline ImmBranchType BranchType() const {
289    if (IsCondBranchImm()) {
290      return CondBranchType;
291    } else if (IsUncondBranchImm()) {
292      return UncondBranchType;
293    } else if (IsCompareBranch()) {
294      return CompareBranchType;
295    } else if (IsTestBranch()) {
296      return TestBranchType;
297    } else {
298      return UnknownBranchType;
299    }
300  }
301
302  // Find the target of this instruction. 'this' may be a branch or a
303  // PC-relative addressing instruction.
304  Instruction* ImmPCOffsetTarget();
305
306  // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
307  // a PC-relative addressing instruction.
308  void SetImmPCOffsetTarget(Instruction* target);
309  // Patch a literal load instruction to load from 'source'.
310  void SetImmLLiteral(Instruction* source);
311
312  inline uint8_t* LiteralAddress() {
313    int offset = ImmLLiteral() << kLiteralEntrySizeLog2;
314    return reinterpret_cast<uint8_t*>(this) + offset;
315  }
316
317  inline uint32_t Literal32() {
318    uint32_t literal;
319    memcpy(&literal, LiteralAddress(), sizeof(literal));
320
321    return literal;
322  }
323
324  inline uint64_t Literal64() {
325    uint64_t literal;
326    memcpy(&literal, LiteralAddress(), sizeof(literal));
327
328    return literal;
329  }
330
331  inline float LiteralFP32() {
332    return rawbits_to_float(Literal32());
333  }
334
335  inline double LiteralFP64() {
336    return rawbits_to_double(Literal64());
337  }
338
339  inline Instruction* NextInstruction() {
340    return this + kInstructionSize;
341  }
342
343  inline Instruction* InstructionAtOffset(int64_t offset) {
344    VIXL_ASSERT(IsWordAligned(this + offset));
345    return this + offset;
346  }
347
348  template<typename T> static inline Instruction* Cast(T src) {
349    return reinterpret_cast<Instruction*>(src);
350  }
351
352 private:
353  inline int ImmBranch() const;
354
355  void SetPCRelImmTarget(Instruction* target);
356  void SetBranchImmTarget(Instruction* target);
357};
358}  // namespace vixl
359
360#endif  // VIXL_A64_INSTRUCTIONS_A64_H_
361