1// Copyright 2015, VIXL authors
2// All rights reserved.
3//
4// Redistribution and use in source and binary forms, with or without
5// modification, are permitted provided that the following conditions are met:
6//
7//   * Redistributions of source code must retain the above copyright notice,
8//     this list of conditions and the following disclaimer.
9//   * Redistributions in binary form must reproduce the above copyright notice,
10//     this list of conditions and the following disclaimer in the documentation
11//     and/or other materials provided with the distribution.
12//   * Neither the name of ARM Limited nor the names of its contributors may be
13//     used to endorse or promote products derived from this software without
14//     specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27#include "instructions-aarch64.h"
28#include "assembler-aarch64.h"
29
30namespace vixl {
31namespace aarch64 {
32
33
34// Floating-point infinity values.
35const float16 kFP16PositiveInfinity = 0x7c00;
36const float16 kFP16NegativeInfinity = 0xfc00;
37const float kFP32PositiveInfinity = RawbitsToFloat(0x7f800000);
38const float kFP32NegativeInfinity = RawbitsToFloat(0xff800000);
39const double kFP64PositiveInfinity =
40    RawbitsToDouble(UINT64_C(0x7ff0000000000000));
41const double kFP64NegativeInfinity =
42    RawbitsToDouble(UINT64_C(0xfff0000000000000));
43
44
45// The default NaN values (for FPCR.DN=1).
46const double kFP64DefaultNaN = RawbitsToDouble(UINT64_C(0x7ff8000000000000));
47const float kFP32DefaultNaN = RawbitsToFloat(0x7fc00000);
48const float16 kFP16DefaultNaN = 0x7e00;
49
50
51static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
52                                    uint64_t value,
53                                    unsigned width) {
54  VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
55              (width == 32));
56  VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
57  uint64_t result = value & ((UINT64_C(1) << width) - 1);
58  for (unsigned i = width; i < reg_size; i *= 2) {
59    result |= (result << i);
60  }
61  return result;
62}
63
64
65bool Instruction::IsLoad() const {
66  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
67    return false;
68  }
69
70  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
71    return Mask(LoadStorePairLBit) != 0;
72  } else {
73    LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
74    switch (op) {
75      case LDRB_w:
76      case LDRH_w:
77      case LDR_w:
78      case LDR_x:
79      case LDRSB_w:
80      case LDRSB_x:
81      case LDRSH_w:
82      case LDRSH_x:
83      case LDRSW_x:
84      case LDR_b:
85      case LDR_h:
86      case LDR_s:
87      case LDR_d:
88      case LDR_q:
89        return true;
90      default:
91        return false;
92    }
93  }
94}
95
96
97bool Instruction::IsStore() const {
98  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
99    return false;
100  }
101
102  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
103    return Mask(LoadStorePairLBit) == 0;
104  } else {
105    LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
106    switch (op) {
107      case STRB_w:
108      case STRH_w:
109      case STR_w:
110      case STR_x:
111      case STR_b:
112      case STR_h:
113      case STR_s:
114      case STR_d:
115      case STR_q:
116        return true;
117      default:
118        return false;
119    }
120  }
121}
122
123
124// Logical immediates can't encode zero, so a return value of zero is used to
125// indicate a failure case. Specifically, where the constraints on imm_s are
126// not met.
127uint64_t Instruction::GetImmLogical() const {
128  unsigned reg_size = GetSixtyFourBits() ? kXRegSize : kWRegSize;
129  int32_t n = GetBitN();
130  int32_t imm_s = GetImmSetBits();
131  int32_t imm_r = GetImmRotate();
132
133  // An integer is constructed from the n, imm_s and imm_r bits according to
134  // the following table:
135  //
136  //  N   imms    immr    size        S             R
137  //  1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
138  //  0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
139  //  0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
140  //  0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
141  //  0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
142  //  0  11110s  xxxxxr     2    UInt(s)       UInt(r)
143  // (s bits must not be all set)
144  //
145  // A pattern is constructed of size bits, where the least significant S+1
146  // bits are set. The pattern is rotated right by R, and repeated across a
147  // 32 or 64-bit value, depending on destination register width.
148  //
149
150  if (n == 1) {
151    if (imm_s == 0x3f) {
152      return 0;
153    }
154    uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
155    return RotateRight(bits, imm_r, 64);
156  } else {
157    if ((imm_s >> 1) == 0x1f) {
158      return 0;
159    }
160    for (int width = 0x20; width >= 0x2; width >>= 1) {
161      if ((imm_s & width) == 0) {
162        int mask = width - 1;
163        if ((imm_s & mask) == mask) {
164          return 0;
165        }
166        uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
167        return RepeatBitsAcrossReg(reg_size,
168                                   RotateRight(bits, imm_r & mask, width),
169                                   width);
170      }
171    }
172  }
173  VIXL_UNREACHABLE();
174  return 0;
175}
176
177
178uint32_t Instruction::GetImmNEONabcdefgh() const {
179  return GetImmNEONabc() << 5 | GetImmNEONdefgh();
180}
181
182
183float Instruction::Imm8ToFP32(uint32_t imm8) {
184  //   Imm8: abcdefgh (8 bits)
185  // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
186  // where B is b ^ 1
187  uint32_t bits = imm8;
188  uint32_t bit7 = (bits >> 7) & 0x1;
189  uint32_t bit6 = (bits >> 6) & 0x1;
190  uint32_t bit5_to_0 = bits & 0x3f;
191  uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
192
193  return RawbitsToFloat(result);
194}
195
196
197float Instruction::GetImmFP32() const { return Imm8ToFP32(GetImmFP()); }
198
199
200double Instruction::Imm8ToFP64(uint32_t imm8) {
201  //   Imm8: abcdefgh (8 bits)
202  // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
203  //         0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
204  // where B is b ^ 1
205  uint32_t bits = imm8;
206  uint64_t bit7 = (bits >> 7) & 0x1;
207  uint64_t bit6 = (bits >> 6) & 0x1;
208  uint64_t bit5_to_0 = bits & 0x3f;
209  uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
210
211  return RawbitsToDouble(result);
212}
213
214
215double Instruction::GetImmFP64() const { return Imm8ToFP64(GetImmFP()); }
216
217
218float Instruction::GetImmNEONFP32() const {
219  return Imm8ToFP32(GetImmNEONabcdefgh());
220}
221
222
223double Instruction::GetImmNEONFP64() const {
224  return Imm8ToFP64(GetImmNEONabcdefgh());
225}
226
227
228unsigned CalcLSDataSize(LoadStoreOp op) {
229  VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
230  unsigned size = static_cast<Instr>(op) >> LSSize_offset;
231  if ((op & LSVector_mask) != 0) {
232    // Vector register memory operations encode the access size in the "size"
233    // and "opc" fields.
234    if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
235      size = kQRegSizeInBytesLog2;
236    }
237  }
238  return size;
239}
240
241
242unsigned CalcLSPairDataSize(LoadStorePairOp op) {
243  VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
244  VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
245  switch (op) {
246    case STP_q:
247    case LDP_q:
248      return kQRegSizeInBytesLog2;
249    case STP_x:
250    case LDP_x:
251    case STP_d:
252    case LDP_d:
253      return kXRegSizeInBytesLog2;
254    default:
255      return kWRegSizeInBytesLog2;
256  }
257}
258
259
260int Instruction::GetImmBranchRangeBitwidth(ImmBranchType branch_type) {
261  switch (branch_type) {
262    case UncondBranchType:
263      return ImmUncondBranch_width;
264    case CondBranchType:
265      return ImmCondBranch_width;
266    case CompareBranchType:
267      return ImmCmpBranch_width;
268    case TestBranchType:
269      return ImmTestBranch_width;
270    default:
271      VIXL_UNREACHABLE();
272      return 0;
273  }
274}
275
276
277int32_t Instruction::GetImmBranchForwardRange(ImmBranchType branch_type) {
278  int32_t encoded_max = 1 << (GetImmBranchRangeBitwidth(branch_type) - 1);
279  return encoded_max * kInstructionSize;
280}
281
282
283bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
284                                     int64_t offset) {
285  return IsIntN(GetImmBranchRangeBitwidth(branch_type), offset);
286}
287
288
289const Instruction* Instruction::GetImmPCOffsetTarget() const {
290  const Instruction* base = this;
291  ptrdiff_t offset;
292  if (IsPCRelAddressing()) {
293    // ADR and ADRP.
294    offset = GetImmPCRel();
295    if (Mask(PCRelAddressingMask) == ADRP) {
296      base = AlignDown(base, kPageSize);
297      offset *= kPageSize;
298    } else {
299      VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
300    }
301  } else {
302    // All PC-relative branches.
303    VIXL_ASSERT(GetBranchType() != UnknownBranchType);
304    // Relative branch offsets are instruction-size-aligned.
305    offset = GetImmBranch() * static_cast<int>(kInstructionSize);
306  }
307  return base + offset;
308}
309
310
311int Instruction::GetImmBranch() const {
312  switch (GetBranchType()) {
313    case CondBranchType:
314      return GetImmCondBranch();
315    case UncondBranchType:
316      return GetImmUncondBranch();
317    case CompareBranchType:
318      return GetImmCmpBranch();
319    case TestBranchType:
320      return GetImmTestBranch();
321    default:
322      VIXL_UNREACHABLE();
323  }
324  return 0;
325}
326
327
328void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
329  if (IsPCRelAddressing()) {
330    SetPCRelImmTarget(target);
331  } else {
332    SetBranchImmTarget(target);
333  }
334}
335
336
337void Instruction::SetPCRelImmTarget(const Instruction* target) {
338  ptrdiff_t imm21;
339  if ((Mask(PCRelAddressingMask) == ADR)) {
340    imm21 = target - this;
341  } else {
342    VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
343    uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
344    uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
345    imm21 = target_page - this_page;
346  }
347  Instr imm = Assembler::ImmPCRelAddress(static_cast<int32_t>(imm21));
348
349  SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
350}
351
352
353void Instruction::SetBranchImmTarget(const Instruction* target) {
354  VIXL_ASSERT(((target - this) & 3) == 0);
355  Instr branch_imm = 0;
356  uint32_t imm_mask = 0;
357  int offset = static_cast<int>((target - this) >> kInstructionSizeLog2);
358  switch (GetBranchType()) {
359    case CondBranchType: {
360      branch_imm = Assembler::ImmCondBranch(offset);
361      imm_mask = ImmCondBranch_mask;
362      break;
363    }
364    case UncondBranchType: {
365      branch_imm = Assembler::ImmUncondBranch(offset);
366      imm_mask = ImmUncondBranch_mask;
367      break;
368    }
369    case CompareBranchType: {
370      branch_imm = Assembler::ImmCmpBranch(offset);
371      imm_mask = ImmCmpBranch_mask;
372      break;
373    }
374    case TestBranchType: {
375      branch_imm = Assembler::ImmTestBranch(offset);
376      imm_mask = ImmTestBranch_mask;
377      break;
378    }
379    default:
380      VIXL_UNREACHABLE();
381  }
382  SetInstructionBits(Mask(~imm_mask) | branch_imm);
383}
384
385
386void Instruction::SetImmLLiteral(const Instruction* source) {
387  VIXL_ASSERT(IsWordAligned(source));
388  ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
389  Instr imm = Assembler::ImmLLiteral(static_cast<int>(offset));
390  Instr mask = ImmLLiteral_mask;
391
392  SetInstructionBits(Mask(~mask) | imm);
393}
394
395
396VectorFormat VectorFormatHalfWidth(VectorFormat vform) {
397  VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
398              vform == kFormatH || vform == kFormatS || vform == kFormatD);
399  switch (vform) {
400    case kFormat8H:
401      return kFormat8B;
402    case kFormat4S:
403      return kFormat4H;
404    case kFormat2D:
405      return kFormat2S;
406    case kFormatH:
407      return kFormatB;
408    case kFormatS:
409      return kFormatH;
410    case kFormatD:
411      return kFormatS;
412    default:
413      VIXL_UNREACHABLE();
414      return kFormatUndefined;
415  }
416}
417
418
419VectorFormat VectorFormatDoubleWidth(VectorFormat vform) {
420  VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
421              vform == kFormatB || vform == kFormatH || vform == kFormatS);
422  switch (vform) {
423    case kFormat8B:
424      return kFormat8H;
425    case kFormat4H:
426      return kFormat4S;
427    case kFormat2S:
428      return kFormat2D;
429    case kFormatB:
430      return kFormatH;
431    case kFormatH:
432      return kFormatS;
433    case kFormatS:
434      return kFormatD;
435    default:
436      VIXL_UNREACHABLE();
437      return kFormatUndefined;
438  }
439}
440
441
442VectorFormat VectorFormatFillQ(VectorFormat vform) {
443  switch (vform) {
444    case kFormatB:
445    case kFormat8B:
446    case kFormat16B:
447      return kFormat16B;
448    case kFormatH:
449    case kFormat4H:
450    case kFormat8H:
451      return kFormat8H;
452    case kFormatS:
453    case kFormat2S:
454    case kFormat4S:
455      return kFormat4S;
456    case kFormatD:
457    case kFormat1D:
458    case kFormat2D:
459      return kFormat2D;
460    default:
461      VIXL_UNREACHABLE();
462      return kFormatUndefined;
463  }
464}
465
466VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) {
467  switch (vform) {
468    case kFormat4H:
469      return kFormat8B;
470    case kFormat8H:
471      return kFormat16B;
472    case kFormat2S:
473      return kFormat4H;
474    case kFormat4S:
475      return kFormat8H;
476    case kFormat1D:
477      return kFormat2S;
478    case kFormat2D:
479      return kFormat4S;
480    default:
481      VIXL_UNREACHABLE();
482      return kFormatUndefined;
483  }
484}
485
486VectorFormat VectorFormatDoubleLanes(VectorFormat vform) {
487  VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
488  switch (vform) {
489    case kFormat8B:
490      return kFormat16B;
491    case kFormat4H:
492      return kFormat8H;
493    case kFormat2S:
494      return kFormat4S;
495    default:
496      VIXL_UNREACHABLE();
497      return kFormatUndefined;
498  }
499}
500
501
502VectorFormat VectorFormatHalfLanes(VectorFormat vform) {
503  VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
504  switch (vform) {
505    case kFormat16B:
506      return kFormat8B;
507    case kFormat8H:
508      return kFormat4H;
509    case kFormat4S:
510      return kFormat2S;
511    default:
512      VIXL_UNREACHABLE();
513      return kFormatUndefined;
514  }
515}
516
517
518VectorFormat ScalarFormatFromLaneSize(int laneSize) {
519  switch (laneSize) {
520    case 8:
521      return kFormatB;
522    case 16:
523      return kFormatH;
524    case 32:
525      return kFormatS;
526    case 64:
527      return kFormatD;
528    default:
529      VIXL_UNREACHABLE();
530      return kFormatUndefined;
531  }
532}
533
534
535VectorFormat ScalarFormatFromFormat(VectorFormat vform) {
536  return ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
537}
538
539
540unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
541  VIXL_ASSERT(vform != kFormatUndefined);
542  switch (vform) {
543    case kFormatB:
544      return kBRegSize;
545    case kFormatH:
546      return kHRegSize;
547    case kFormatS:
548      return kSRegSize;
549    case kFormatD:
550      return kDRegSize;
551    case kFormat8B:
552    case kFormat4H:
553    case kFormat2S:
554    case kFormat1D:
555      return kDRegSize;
556    default:
557      return kQRegSize;
558  }
559}
560
561
562unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
563  return RegisterSizeInBitsFromFormat(vform) / 8;
564}
565
566
567unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
568  VIXL_ASSERT(vform != kFormatUndefined);
569  switch (vform) {
570    case kFormatB:
571    case kFormat8B:
572    case kFormat16B:
573      return 8;
574    case kFormatH:
575    case kFormat4H:
576    case kFormat8H:
577      return 16;
578    case kFormatS:
579    case kFormat2S:
580    case kFormat4S:
581      return 32;
582    case kFormatD:
583    case kFormat1D:
584    case kFormat2D:
585      return 64;
586    default:
587      VIXL_UNREACHABLE();
588      return 0;
589  }
590}
591
592
593int LaneSizeInBytesFromFormat(VectorFormat vform) {
594  return LaneSizeInBitsFromFormat(vform) / 8;
595}
596
597
598int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
599  VIXL_ASSERT(vform != kFormatUndefined);
600  switch (vform) {
601    case kFormatB:
602    case kFormat8B:
603    case kFormat16B:
604      return 0;
605    case kFormatH:
606    case kFormat4H:
607    case kFormat8H:
608      return 1;
609    case kFormatS:
610    case kFormat2S:
611    case kFormat4S:
612      return 2;
613    case kFormatD:
614    case kFormat1D:
615    case kFormat2D:
616      return 3;
617    default:
618      VIXL_UNREACHABLE();
619      return 0;
620  }
621}
622
623
624int LaneCountFromFormat(VectorFormat vform) {
625  VIXL_ASSERT(vform != kFormatUndefined);
626  switch (vform) {
627    case kFormat16B:
628      return 16;
629    case kFormat8B:
630    case kFormat8H:
631      return 8;
632    case kFormat4H:
633    case kFormat4S:
634      return 4;
635    case kFormat2S:
636    case kFormat2D:
637      return 2;
638    case kFormat1D:
639    case kFormatB:
640    case kFormatH:
641    case kFormatS:
642    case kFormatD:
643      return 1;
644    default:
645      VIXL_UNREACHABLE();
646      return 0;
647  }
648}
649
650
651int MaxLaneCountFromFormat(VectorFormat vform) {
652  VIXL_ASSERT(vform != kFormatUndefined);
653  switch (vform) {
654    case kFormatB:
655    case kFormat8B:
656    case kFormat16B:
657      return 16;
658    case kFormatH:
659    case kFormat4H:
660    case kFormat8H:
661      return 8;
662    case kFormatS:
663    case kFormat2S:
664    case kFormat4S:
665      return 4;
666    case kFormatD:
667    case kFormat1D:
668    case kFormat2D:
669      return 2;
670    default:
671      VIXL_UNREACHABLE();
672      return 0;
673  }
674}
675
676
677// Does 'vform' indicate a vector format or a scalar format?
678bool IsVectorFormat(VectorFormat vform) {
679  VIXL_ASSERT(vform != kFormatUndefined);
680  switch (vform) {
681    case kFormatB:
682    case kFormatH:
683    case kFormatS:
684    case kFormatD:
685      return false;
686    default:
687      return true;
688  }
689}
690
691
692int64_t MaxIntFromFormat(VectorFormat vform) {
693  return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
694}
695
696
697int64_t MinIntFromFormat(VectorFormat vform) {
698  return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
699}
700
701
702uint64_t MaxUintFromFormat(VectorFormat vform) {
703  return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
704}
705}  // namespace aarch64
706}  // namespace vixl
707