1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#if V8_TARGET_ARCH_ARM64
6
7#define ARM64_DEFINE_FP_STATICS
8
9#include "src/arm64/assembler-arm64-inl.h"
10#include "src/arm64/instructions-arm64.h"
11
12namespace v8 {
13namespace internal {
14
15
16bool Instruction::IsLoad() const {
17  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
18    return false;
19  }
20
21  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
22    return Mask(LoadStorePairLBit) != 0;
23  } else {
24    LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
25    switch (op) {
26      case LDRB_w:
27      case LDRH_w:
28      case LDR_w:
29      case LDR_x:
30      case LDRSB_w:
31      case LDRSB_x:
32      case LDRSH_w:
33      case LDRSH_x:
34      case LDRSW_x:
35      case LDR_s:
36      case LDR_d: return true;
37      default: return false;
38    }
39  }
40}
41
42
43bool Instruction::IsStore() const {
44  if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
45    return false;
46  }
47
48  if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
49    return Mask(LoadStorePairLBit) == 0;
50  } else {
51    LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
52    switch (op) {
53      case STRB_w:
54      case STRH_w:
55      case STR_w:
56      case STR_x:
57      case STR_s:
58      case STR_d: return true;
59      default: return false;
60    }
61  }
62}
63
64
65static uint64_t RotateRight(uint64_t value,
66                            unsigned int rotate,
67                            unsigned int width) {
68  DCHECK(width <= 64);
69  rotate &= 63;
70  return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
71         (value >> rotate);
72}
73
74
75static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
76                                    uint64_t value,
77                                    unsigned width) {
78  DCHECK((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
79         (width == 32));
80  DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
81  uint64_t result = value & ((1UL << width) - 1UL);
82  for (unsigned i = width; i < reg_size; i *= 2) {
83    result |= (result << i);
84  }
85  return result;
86}
87
88
89// Logical immediates can't encode zero, so a return value of zero is used to
90// indicate a failure case. Specifically, where the constraints on imm_s are not
91// met.
92uint64_t Instruction::ImmLogical() {
93  unsigned reg_size = SixtyFourBits() ? kXRegSizeInBits : kWRegSizeInBits;
94  int32_t n = BitN();
95  int32_t imm_s = ImmSetBits();
96  int32_t imm_r = ImmRotate();
97
98  // An integer is constructed from the n, imm_s and imm_r bits according to
99  // the following table:
100  //
101  //  N   imms    immr    size        S             R
102  //  1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
103  //  0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
104  //  0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
105  //  0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
106  //  0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
107  //  0  11110s  xxxxxr     2    UInt(s)       UInt(r)
108  // (s bits must not be all set)
109  //
110  // A pattern is constructed of size bits, where the least significant S+1
111  // bits are set. The pattern is rotated right by R, and repeated across a
112  // 32 or 64-bit value, depending on destination register width.
113  //
114
115  if (n == 1) {
116    if (imm_s == 0x3F) {
117      return 0;
118    }
119    uint64_t bits = (1UL << (imm_s + 1)) - 1;
120    return RotateRight(bits, imm_r, 64);
121  } else {
122    if ((imm_s >> 1) == 0x1F) {
123      return 0;
124    }
125    for (int width = 0x20; width >= 0x2; width >>= 1) {
126      if ((imm_s & width) == 0) {
127        int mask = width - 1;
128        if ((imm_s & mask) == mask) {
129          return 0;
130        }
131        uint64_t bits = (1UL << ((imm_s & mask) + 1)) - 1;
132        return RepeatBitsAcrossReg(reg_size,
133                                   RotateRight(bits, imm_r & mask, width),
134                                   width);
135      }
136    }
137  }
138  UNREACHABLE();
139  return 0;
140}
141
142
143float Instruction::ImmFP32() {
144  //  ImmFP: abcdefgh (8 bits)
145  // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
146  // where B is b ^ 1
147  uint32_t bits = ImmFP();
148  uint32_t bit7 = (bits >> 7) & 0x1;
149  uint32_t bit6 = (bits >> 6) & 0x1;
150  uint32_t bit5_to_0 = bits & 0x3f;
151  uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
152
153  return rawbits_to_float(result);
154}
155
156
157double Instruction::ImmFP64() {
158  //  ImmFP: abcdefgh (8 bits)
159  // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
160  //         0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
161  // where B is b ^ 1
162  uint32_t bits = ImmFP();
163  uint64_t bit7 = (bits >> 7) & 0x1;
164  uint64_t bit6 = (bits >> 6) & 0x1;
165  uint64_t bit5_to_0 = bits & 0x3f;
166  uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
167
168  return rawbits_to_double(result);
169}
170
171
172LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
173  switch (op) {
174    case STP_x:
175    case LDP_x:
176    case STP_d:
177    case LDP_d: return LSDoubleWord;
178    default: return LSWord;
179  }
180}
181
182
183int64_t Instruction::ImmPCOffset() {
184  int64_t offset;
185  if (IsPCRelAddressing()) {
186    // PC-relative addressing. Only ADR is supported.
187    offset = ImmPCRel();
188  } else if (BranchType() != UnknownBranchType) {
189    // All PC-relative branches.
190    // Relative branch offsets are instruction-size-aligned.
191    offset = ImmBranch() << kInstructionSizeLog2;
192  } else if (IsUnresolvedInternalReference()) {
193    // Internal references are always word-aligned.
194    offset = ImmUnresolvedInternalReference() << kInstructionSizeLog2;
195  } else {
196    // Load literal (offset from PC).
197    DCHECK(IsLdrLiteral());
198    // The offset is always shifted by 2 bits, even for loads to 64-bits
199    // registers.
200    offset = ImmLLiteral() << kInstructionSizeLog2;
201  }
202  return offset;
203}
204
205
206Instruction* Instruction::ImmPCOffsetTarget() {
207  return InstructionAtOffset(ImmPCOffset());
208}
209
210
211bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
212                                     ptrdiff_t offset) {
213  return is_intn(offset, ImmBranchRangeBitwidth(branch_type));
214}
215
216
217bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
218  return IsValidImmPCOffset(BranchType(), DistanceTo(target));
219}
220
221
222void Instruction::SetImmPCOffsetTarget(Isolate* isolate, Instruction* target) {
223  if (IsPCRelAddressing()) {
224    SetPCRelImmTarget(isolate, target);
225  } else if (BranchType() != UnknownBranchType) {
226    SetBranchImmTarget(target);
227  } else if (IsUnresolvedInternalReference()) {
228    SetUnresolvedInternalReferenceImmTarget(isolate, target);
229  } else {
230    // Load literal (offset from PC).
231    SetImmLLiteral(target);
232  }
233}
234
235
236void Instruction::SetPCRelImmTarget(Isolate* isolate, Instruction* target) {
237  // ADRP is not supported, so 'this' must point to an ADR instruction.
238  DCHECK(IsAdr());
239
240  ptrdiff_t target_offset = DistanceTo(target);
241  Instr imm;
242  if (Instruction::IsValidPCRelOffset(target_offset)) {
243    imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
244    SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
245  } else {
246    PatchingAssembler patcher(isolate, this,
247                              PatchingAssembler::kAdrFarPatchableNInstrs);
248    patcher.PatchAdrFar(target_offset);
249  }
250}
251
252
253void Instruction::SetBranchImmTarget(Instruction* target) {
254  DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
255  DCHECK(IsValidImmPCOffset(BranchType(),
256                            DistanceTo(target) >> kInstructionSizeLog2));
257  int offset = static_cast<int>(DistanceTo(target) >> kInstructionSizeLog2);
258  Instr branch_imm = 0;
259  uint32_t imm_mask = 0;
260  switch (BranchType()) {
261    case CondBranchType: {
262      branch_imm = Assembler::ImmCondBranch(offset);
263      imm_mask = ImmCondBranch_mask;
264      break;
265    }
266    case UncondBranchType: {
267      branch_imm = Assembler::ImmUncondBranch(offset);
268      imm_mask = ImmUncondBranch_mask;
269      break;
270    }
271    case CompareBranchType: {
272      branch_imm = Assembler::ImmCmpBranch(offset);
273      imm_mask = ImmCmpBranch_mask;
274      break;
275    }
276    case TestBranchType: {
277      branch_imm = Assembler::ImmTestBranch(offset);
278      imm_mask = ImmTestBranch_mask;
279      break;
280    }
281    default: UNREACHABLE();
282  }
283  SetInstructionBits(Mask(~imm_mask) | branch_imm);
284}
285
286
287void Instruction::SetUnresolvedInternalReferenceImmTarget(Isolate* isolate,
288                                                          Instruction* target) {
289  DCHECK(IsUnresolvedInternalReference());
290  DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
291  DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
292  int32_t target_offset =
293      static_cast<int32_t>(DistanceTo(target) >> kInstructionSizeLog2);
294  uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
295  uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
296
297  PatchingAssembler patcher(isolate, this, 2);
298  patcher.brk(high16);
299  patcher.brk(low16);
300}
301
302
303void Instruction::SetImmLLiteral(Instruction* source) {
304  DCHECK(IsLdrLiteral());
305  DCHECK(IsAligned(DistanceTo(source), kInstructionSize));
306  DCHECK(Assembler::IsImmLLiteral(DistanceTo(source)));
307  Instr imm = Assembler::ImmLLiteral(
308      static_cast<int>(DistanceTo(source) >> kLoadLiteralScaleLog2));
309  Instr mask = ImmLLiteral_mask;
310
311  SetInstructionBits(Mask(~mask) | imm);
312}
313
314
315// TODO(jbramley): We can't put this inline in the class because things like
316// xzr and Register are not defined in that header. Consider adding
317// instructions-arm64-inl.h to work around this.
318bool InstructionSequence::IsInlineData() const {
319  // Inline data is encoded as a single movz instruction which writes to xzr
320  // (x31).
321  return IsMovz() && SixtyFourBits() && (Rd() == kZeroRegCode);
322  // TODO(all): If we extend ::InlineData() to support bigger data, we need
323  // to update this method too.
324}
325
326
327// TODO(jbramley): We can't put this inline in the class because things like
328// xzr and Register are not defined in that header. Consider adding
329// instructions-arm64-inl.h to work around this.
330uint64_t InstructionSequence::InlineData() const {
331  DCHECK(IsInlineData());
332  uint64_t payload = ImmMoveWide();
333  // TODO(all): If we extend ::InlineData() to support bigger data, we need
334  // to update this method too.
335  return payload;
336}
337
338
339}  // namespace internal
340}  // namespace v8
341
342#endif  // V8_TARGET_ARCH_ARM64
343