1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the (GNU-style) assembly parser for the AArch64
11// architecture.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "MCTargetDesc/AArch64MCTargetDesc.h"
17#include "MCTargetDesc/AArch64MCExpr.h"
18#include "Utils/AArch64BaseInfo.h"
19#include "llvm/ADT/APFloat.h"
20#include "llvm/ADT/APInt.h"
21#include "llvm/ADT/StringSwitch.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/MC/MCContext.h"
24#include "llvm/MC/MCInst.h"
25#include "llvm/MC/MCSubtargetInfo.h"
26#include "llvm/MC/MCTargetAsmParser.h"
27#include "llvm/MC/MCExpr.h"
28#include "llvm/MC/MCRegisterInfo.h"
29#include "llvm/MC/MCStreamer.h"
30#include "llvm/MC/MCParser/MCAsmLexer.h"
31#include "llvm/MC/MCParser/MCAsmParser.h"
32#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
35#include "llvm/Support/TargetRegistry.h"
36
37using namespace llvm;
38
39namespace {
40
41class AArch64Operand;
42
43class AArch64AsmParser : public MCTargetAsmParser {
44  MCSubtargetInfo &STI;
45  MCAsmParser &Parser;
46
47#define GET_ASSEMBLER_HEADER
48#include "AArch64GenAsmMatcher.inc"
49
50public:
51  enum AArch64MatchResultTy {
52    Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
53#define GET_OPERAND_DIAGNOSTIC_TYPES
54#include "AArch64GenAsmMatcher.inc"
55  };
56
57  AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
58    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
59    MCAsmParserExtension::Initialize(_Parser);
60
61    // Initialize the set of available features.
62    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
63  }
64
65  // These are the public interface of the MCTargetAsmParser
66  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
67  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
68                        SMLoc NameLoc,
69                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
70
71  bool ParseDirective(AsmToken DirectiveID);
72  bool ParseDirectiveTLSDescCall(SMLoc L);
73  bool ParseDirectiveWord(unsigned Size, SMLoc L);
74
75  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
76                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
77                               MCStreamer&Out, unsigned &ErrorInfo,
78                               bool MatchingInlineAsm);
79
80  // The rest of the sub-parsers have more freedom over interface: they return
81  // an OperandMatchResultTy because it's less ambiguous than true/false or
82  // -1/0/1 even if it is more verbose
83  OperandMatchResultTy
84  ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
85               StringRef Mnemonic);
86
87  OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
88
89  OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
90
91  OperandMatchResultTy
92  ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
93                uint32_t NumLanes);
94
95  OperandMatchResultTy
96  ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
97                uint32_t &NumLanes);
98
99  OperandMatchResultTy
100  ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
101
102  OperandMatchResultTy
103  ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
104
105  OperandMatchResultTy
106  ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
107
108  OperandMatchResultTy
109  ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
110
111  template<typename SomeNamedImmMapper> OperandMatchResultTy
112  ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
113    return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
114  }
115
116  OperandMatchResultTy
117  ParseNamedImmOperand(const NamedImmMapper &Mapper,
118                       SmallVectorImpl<MCParsedAsmOperand*> &Operands);
119
120  OperandMatchResultTy
121  ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
122
123  OperandMatchResultTy
124  ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
125
126  OperandMatchResultTy
127  ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
128
129  bool validateInstruction(MCInst &Inst,
130                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
131
132  /// Scan the next token (which had better be an identifier) and determine
133  /// whether it represents a general-purpose or vector register. It returns
134  /// true if an identifier was found and populates its reference arguments. It
135  /// does not consume the token.
136  bool
137  IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
138                   SMLoc &LayoutLoc) const;
139
140};
141
142}
143
144namespace {
145
146/// Instances of this class represent a parsed AArch64 machine instruction.
147class AArch64Operand : public MCParsedAsmOperand {
148private:
149  enum KindTy {
150    k_ImmWithLSL,     // #uimm {, LSL #amt }
151    k_CondCode,       // eq/ne/...
152    k_FPImmediate,    // Limited-precision floating-point imm
153    k_Immediate,      // Including expressions referencing symbols
154    k_Register,
155    k_ShiftExtend,
156    k_SysReg,         // The register operand of MRS and MSR instructions
157    k_Token,          // The mnemonic; other raw tokens the auto-generated
158    k_WrappedRegister // Load/store exclusive permit a wrapped register.
159  } Kind;
160
161  SMLoc StartLoc, EndLoc;
162
163  struct ImmWithLSLOp {
164    const MCExpr *Val;
165    unsigned ShiftAmount;
166    bool ImplicitAmount;
167  };
168
169  struct CondCodeOp {
170    A64CC::CondCodes Code;
171  };
172
173  struct FPImmOp {
174    double Val;
175  };
176
177  struct ImmOp {
178    const MCExpr *Val;
179  };
180
181  struct RegOp {
182    unsigned RegNum;
183  };
184
185  struct ShiftExtendOp {
186    A64SE::ShiftExtSpecifiers ShiftType;
187    unsigned Amount;
188    bool ImplicitAmount;
189  };
190
191  struct SysRegOp {
192    const char *Data;
193    unsigned Length;
194  };
195
196  struct TokOp {
197    const char *Data;
198    unsigned Length;
199  };
200
201  union {
202    struct ImmWithLSLOp ImmWithLSL;
203    struct CondCodeOp CondCode;
204    struct FPImmOp FPImm;
205    struct ImmOp Imm;
206    struct RegOp Reg;
207    struct ShiftExtendOp ShiftExtend;
208    struct SysRegOp SysReg;
209    struct TokOp Tok;
210  };
211
212  AArch64Operand(KindTy K, SMLoc S, SMLoc E)
213    : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
214
215public:
216  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
217  }
218
219  SMLoc getStartLoc() const { return StartLoc; }
220  SMLoc getEndLoc() const { return EndLoc; }
221  void print(raw_ostream&) const;
222  void dump() const;
223
224  StringRef getToken() const {
225    assert(Kind == k_Token && "Invalid access!");
226    return StringRef(Tok.Data, Tok.Length);
227  }
228
229  unsigned getReg() const {
230    assert((Kind == k_Register || Kind == k_WrappedRegister)
231           && "Invalid access!");
232    return Reg.RegNum;
233  }
234
235  const MCExpr *getImm() const {
236    assert(Kind == k_Immediate && "Invalid access!");
237    return Imm.Val;
238  }
239
240  A64CC::CondCodes getCondCode() const {
241    assert(Kind == k_CondCode && "Invalid access!");
242    return CondCode.Code;
243  }
244
245  static bool isNonConstantExpr(const MCExpr *E,
246                                AArch64MCExpr::VariantKind &Variant) {
247    if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
248      Variant = A64E->getKind();
249      return true;
250    } else if (!isa<MCConstantExpr>(E)) {
251      Variant = AArch64MCExpr::VK_AARCH64_None;
252      return true;
253    }
254
255    return false;
256  }
257
258  bool isCondCode() const { return Kind == k_CondCode; }
259  bool isToken() const { return Kind == k_Token; }
260  bool isReg() const { return Kind == k_Register; }
261  bool isImm() const { return Kind == k_Immediate; }
262  bool isMem() const { return false; }
263  bool isFPImm() const { return Kind == k_FPImmediate; }
264  bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
265  bool isSysReg() const { return Kind == k_SysReg; }
266  bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
267  bool isWrappedReg() const { return Kind == k_WrappedRegister; }
268
269  bool isAddSubImmLSL0() const {
270    if (!isImmWithLSL()) return false;
271    if (ImmWithLSL.ShiftAmount != 0) return false;
272
273    AArch64MCExpr::VariantKind Variant;
274    if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
275      return Variant == AArch64MCExpr::VK_AARCH64_LO12
276          || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
277          || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
278          || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
279          || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
280          || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
281    }
282
283    // Otherwise it should be a real immediate in range:
284    const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
285    return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
286  }
287
288  bool isAddSubImmLSL12() const {
289    if (!isImmWithLSL()) return false;
290    if (ImmWithLSL.ShiftAmount != 12) return false;
291
292    AArch64MCExpr::VariantKind Variant;
293    if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
294      return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
295          || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
296    }
297
298    // Otherwise it should be a real immediate in range:
299    const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
300    return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
301  }
302
303  template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
304    if (!isShiftOrExtend()) return false;
305
306    A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
307    if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
308      return false;
309
310    if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
311      return false;
312
313    return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
314  }
315
316  bool isAdrpLabel() const {
317    if (!isImm()) return false;
318
319    AArch64MCExpr::VariantKind Variant;
320    if (isNonConstantExpr(getImm(), Variant)) {
321      return Variant == AArch64MCExpr::VK_AARCH64_None
322        || Variant == AArch64MCExpr::VK_AARCH64_GOT
323        || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
324        || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
325    }
326
327    return isLabel<21, 4096>();
328  }
329
330  template<unsigned RegWidth>  bool isBitfieldWidth() const {
331    if (!isImm()) return false;
332
333    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
334    if (!CE) return false;
335
336    return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
337  }
338
339  template<int RegWidth>
340  bool isCVTFixedPos() const {
341    if (!isImm()) return false;
342
343    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
344    if (!CE) return false;
345
346    return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
347  }
348
349  bool isFMOVImm() const {
350    if (!isFPImm()) return false;
351
352    APFloat RealVal(FPImm.Val);
353    uint32_t ImmVal;
354    return A64Imms::isFPImm(RealVal, ImmVal);
355  }
356
357  bool isFPZero() const {
358    if (!isFPImm()) return false;
359
360    APFloat RealVal(FPImm.Val);
361    return RealVal.isPosZero();
362  }
363
364  template<unsigned field_width, unsigned scale>
365  bool isLabel() const {
366    if (!isImm()) return false;
367
368    if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
369      return true;
370    } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
371      int64_t Val = CE->getValue();
372      int64_t Min = - (scale * (1LL << (field_width - 1)));
373      int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
374      return (Val % scale) == 0 && Val >= Min && Val <= Max;
375    }
376
377    // N.b. this disallows explicit relocation specifications via an
378    // AArch64MCExpr. Users needing that behaviour
379    return false;
380  }
381
382  bool isLane1() const {
383    if (!isImm()) return false;
384
385    // Because it's come through custom assembly parsing, it must always be a
386    // constant expression.
387    return cast<MCConstantExpr>(getImm())->getValue() == 1;
388  }
389
390  bool isLoadLitLabel() const {
391    if (!isImm()) return false;
392
393    AArch64MCExpr::VariantKind Variant;
394    if (isNonConstantExpr(getImm(), Variant)) {
395      return Variant == AArch64MCExpr::VK_AARCH64_None
396          || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
397    }
398
399    return isLabel<19, 4>();
400  }
401
402  template<unsigned RegWidth> bool isLogicalImm() const {
403    if (!isImm()) return false;
404
405    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
406    if (!CE) return false;
407
408    uint32_t Bits;
409    return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
410  }
411
412  template<unsigned RegWidth> bool isLogicalImmMOV() const {
413    if (!isLogicalImm<RegWidth>()) return false;
414
415    const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
416
417    // The move alias for ORR is only valid if the immediate cannot be
418    // represented with a move (immediate) instruction; they take priority.
419    int UImm16, Shift;
420    return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
421      && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
422  }
423
424  template<int MemSize>
425  bool isOffsetUImm12() const {
426    if (!isImm()) return false;
427
428    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
429
430    // Assume they know what they're doing for now if they've given us a
431    // non-constant expression. In principle we could check for ridiculous
432    // things that can't possibly work or relocations that would almost
433    // certainly break resulting code.
434    if (!CE)
435      return true;
436
437    int64_t Val = CE->getValue();
438
439    // Must be a multiple of the access size in bytes.
440    if ((Val & (MemSize - 1)) != 0) return false;
441
442    // Must be 12-bit unsigned
443    return Val >= 0 && Val <= 0xfff * MemSize;
444  }
445
446  template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
447  bool isShift() const {
448    if (!isShiftOrExtend()) return false;
449
450    if (ShiftExtend.ShiftType != SHKind)
451      return false;
452
453    return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
454  }
455
456  bool isMOVN32Imm() const {
457    static AArch64MCExpr::VariantKind PermittedModifiers[] = {
458      AArch64MCExpr::VK_AARCH64_SABS_G0,
459      AArch64MCExpr::VK_AARCH64_SABS_G1,
460      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
461      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
462      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
463      AArch64MCExpr::VK_AARCH64_TPREL_G1,
464      AArch64MCExpr::VK_AARCH64_TPREL_G0,
465    };
466    unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
467
468    return isMoveWideImm(32, PermittedModifiers, NumModifiers);
469  }
470
471  bool isMOVN64Imm() const {
472    static AArch64MCExpr::VariantKind PermittedModifiers[] = {
473      AArch64MCExpr::VK_AARCH64_SABS_G0,
474      AArch64MCExpr::VK_AARCH64_SABS_G1,
475      AArch64MCExpr::VK_AARCH64_SABS_G2,
476      AArch64MCExpr::VK_AARCH64_DTPREL_G2,
477      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
478      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
479      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
480      AArch64MCExpr::VK_AARCH64_TPREL_G2,
481      AArch64MCExpr::VK_AARCH64_TPREL_G1,
482      AArch64MCExpr::VK_AARCH64_TPREL_G0,
483    };
484    unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
485
486    return isMoveWideImm(64, PermittedModifiers, NumModifiers);
487  }
488
489
490  bool isMOVZ32Imm() const {
491    static AArch64MCExpr::VariantKind PermittedModifiers[] = {
492      AArch64MCExpr::VK_AARCH64_ABS_G0,
493      AArch64MCExpr::VK_AARCH64_ABS_G1,
494      AArch64MCExpr::VK_AARCH64_SABS_G0,
495      AArch64MCExpr::VK_AARCH64_SABS_G1,
496      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
497      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
498      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
499      AArch64MCExpr::VK_AARCH64_TPREL_G1,
500      AArch64MCExpr::VK_AARCH64_TPREL_G0,
501    };
502    unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
503
504    return isMoveWideImm(32, PermittedModifiers, NumModifiers);
505  }
506
507  bool isMOVZ64Imm() const {
508    static AArch64MCExpr::VariantKind PermittedModifiers[] = {
509      AArch64MCExpr::VK_AARCH64_ABS_G0,
510      AArch64MCExpr::VK_AARCH64_ABS_G1,
511      AArch64MCExpr::VK_AARCH64_ABS_G2,
512      AArch64MCExpr::VK_AARCH64_ABS_G3,
513      AArch64MCExpr::VK_AARCH64_SABS_G0,
514      AArch64MCExpr::VK_AARCH64_SABS_G1,
515      AArch64MCExpr::VK_AARCH64_SABS_G2,
516      AArch64MCExpr::VK_AARCH64_DTPREL_G2,
517      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
518      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
519      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
520      AArch64MCExpr::VK_AARCH64_TPREL_G2,
521      AArch64MCExpr::VK_AARCH64_TPREL_G1,
522      AArch64MCExpr::VK_AARCH64_TPREL_G0,
523    };
524    unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
525
526    return isMoveWideImm(64, PermittedModifiers, NumModifiers);
527  }
528
529  bool isMOVK32Imm() const {
530    static AArch64MCExpr::VariantKind PermittedModifiers[] = {
531      AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
532      AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
533      AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
534      AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
535      AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
536      AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
537      AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
538    };
539    unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
540
541    return isMoveWideImm(32, PermittedModifiers, NumModifiers);
542  }
543
544  bool isMOVK64Imm() const {
545    static AArch64MCExpr::VariantKind PermittedModifiers[] = {
546      AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
547      AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
548      AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
549      AArch64MCExpr::VK_AARCH64_ABS_G3,
550      AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
551      AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
552      AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
553      AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
554      AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
555    };
556    unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
557
558    return isMoveWideImm(64, PermittedModifiers, NumModifiers);
559  }
560
561  bool isMoveWideImm(unsigned RegWidth,
562                     AArch64MCExpr::VariantKind *PermittedModifiers,
563                     unsigned NumModifiers) const {
564    if (!isImmWithLSL()) return false;
565
566    if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
567    if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
568
569    AArch64MCExpr::VariantKind Modifier;
570    if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
571      // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
572      if (!ImmWithLSL.ImplicitAmount) return false;
573
574      for (unsigned i = 0; i < NumModifiers; ++i)
575        if (PermittedModifiers[i] == Modifier) return true;
576
577      return false;
578    }
579
580    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
581    return CE && CE->getValue() >= 0  && CE->getValue() <= 0xffff;
582  }
583
584  template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
585  bool isMoveWideMovAlias() const {
586    if (!isImm()) return false;
587
588    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
589    if (!CE) return false;
590
591    int UImm16, Shift;
592    uint64_t Value = CE->getValue();
593
594    // If this is a 32-bit instruction then all bits above 32 should be the
595    // same: either of these is fine because signed/unsigned values should be
596    // permitted.
597    if (RegWidth == 32) {
598      if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
599        return false;
600
601      Value &= 0xffffffffULL;
602    }
603
604    return isValidImm(RegWidth, Value, UImm16, Shift);
605  }
606
607  bool isMSRWithReg() const {
608    if (!isSysReg()) return false;
609
610    bool IsKnownRegister;
611    StringRef Name(SysReg.Data, SysReg.Length);
612    A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
613
614    return IsKnownRegister;
615  }
616
617  bool isMSRPState() const {
618    if (!isSysReg()) return false;
619
620    bool IsKnownRegister;
621    StringRef Name(SysReg.Data, SysReg.Length);
622    A64PState::PStateMapper().fromString(Name, IsKnownRegister);
623
624    return IsKnownRegister;
625  }
626
627  bool isMRS() const {
628    if (!isSysReg()) return false;
629
630    // First check against specific MSR-only (write-only) registers
631    bool IsKnownRegister;
632    StringRef Name(SysReg.Data, SysReg.Length);
633    A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
634
635    return IsKnownRegister;
636  }
637
638  bool isPRFM() const {
639    if (!isImm()) return false;
640
641    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
642
643    if (!CE)
644      return false;
645
646    return CE->getValue() >= 0 && CE->getValue() <= 31;
647  }
648
649  template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
650    if (!isShiftOrExtend()) return false;
651
652    if (ShiftExtend.ShiftType != SHKind)
653      return false;
654
655    return ShiftExtend.Amount <= 4;
656  }
657
658  bool isRegExtendLSL() const {
659    if (!isShiftOrExtend()) return false;
660
661    if (ShiftExtend.ShiftType != A64SE::LSL)
662      return false;
663
664    return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
665  }
666
667  template<int MemSize>  bool isSImm7Scaled() const {
668    if (!isImm()) return false;
669
670    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
671    if (!CE) return false;
672
673    int64_t Val = CE->getValue();
674    if (Val % MemSize != 0) return false;
675
676    Val /= MemSize;
677
678    return Val >= -64 && Val < 64;
679  }
680
681  template<int BitWidth>
682  bool isSImm() const {
683    if (!isImm()) return false;
684
685    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
686    if (!CE) return false;
687
688    return CE->getValue() >= -(1LL << (BitWidth - 1))
689      && CE->getValue() < (1LL << (BitWidth - 1));
690  }
691
692  template<int bitWidth>
693  bool isUImm() const {
694    if (!isImm()) return false;
695
696    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
697    if (!CE) return false;
698
699    return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
700  }
701
702  bool isUImm() const {
703    if (!isImm()) return false;
704
705    return isa<MCConstantExpr>(getImm());
706  }
707
708  static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
709                                          unsigned ShiftAmount,
710                                          bool ImplicitAmount,
711                                          SMLoc S, SMLoc E) {
712    AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
713    Op->ImmWithLSL.Val = Val;
714    Op->ImmWithLSL.ShiftAmount = ShiftAmount;
715    Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
716    return Op;
717  }
718
719  static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
720                                        SMLoc S, SMLoc E) {
721    AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
722    Op->CondCode.Code = Code;
723    return Op;
724  }
725
726  static AArch64Operand *CreateFPImm(double Val,
727                                     SMLoc S, SMLoc E) {
728    AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
729    Op->FPImm.Val = Val;
730    return Op;
731  }
732
733  static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
734    AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
735    Op->Imm.Val = Val;
736    return Op;
737  }
738
739  static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
740    AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
741    Op->Reg.RegNum = RegNum;
742    return Op;
743  }
744
745  static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
746    AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
747    Op->Reg.RegNum = RegNum;
748    return Op;
749  }
750
751  static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
752                                           unsigned Amount,
753                                           bool ImplicitAmount,
754                                           SMLoc S, SMLoc E) {
755    AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
756    Op->ShiftExtend.ShiftType = ShiftTyp;
757    Op->ShiftExtend.Amount = Amount;
758    Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
759    return Op;
760  }
761
762  static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
763    AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
764    Op->Tok.Data = Str.data();
765    Op->Tok.Length = Str.size();
766    return Op;
767  }
768
769  static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
770    AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
771    Op->Tok.Data = Str.data();
772    Op->Tok.Length = Str.size();
773    return Op;
774  }
775
776
777  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
778    // Add as immediates when possible.
779    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
780      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
781    else
782      Inst.addOperand(MCOperand::CreateExpr(Expr));
783  }
784
785  template<unsigned RegWidth>
786  void addBFILSBOperands(MCInst &Inst, unsigned N) const {
787    assert(N == 1 && "Invalid number of operands!");
788    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
789    unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
790    Inst.addOperand(MCOperand::CreateImm(EncodedVal));
791  }
792
793  void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
794    assert(N == 1 && "Invalid number of operands!");
795    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
796    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
797  }
798
799  void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
800    assert(N == 1 && "Invalid number of operands!");
801
802    uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
803    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
804
805    Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
806  }
807
808  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
809    assert(N == 1 && "Invalid number of operands!");
810    Inst.addOperand(MCOperand::CreateImm(getCondCode()));
811  }
812
813  void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
814    assert(N == 1 && "Invalid number of operands!");
815
816    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
817    Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
818  }
819
820  void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
821    assert(N == 1 && "Invalid number of operands!");
822
823    APFloat RealVal(FPImm.Val);
824    uint32_t ImmVal;
825    A64Imms::isFPImm(RealVal, ImmVal);
826
827    Inst.addOperand(MCOperand::CreateImm(ImmVal));
828  }
829
830  void addFPZeroOperands(MCInst &Inst, unsigned N) const {
831    assert(N == 1 && "Invalid number of operands");
832    Inst.addOperand(MCOperand::CreateImm(0));
833  }
834
835  void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
836    assert(N == 1 && "Invalid number of operands!");
837    unsigned Encoded = A64InvertCondCode(getCondCode());
838    Inst.addOperand(MCOperand::CreateImm(Encoded));
839  }
840
841  void addRegOperands(MCInst &Inst, unsigned N) const {
842    assert(N == 1 && "Invalid number of operands!");
843    Inst.addOperand(MCOperand::CreateReg(getReg()));
844  }
845
846  void addImmOperands(MCInst &Inst, unsigned N) const {
847    assert(N == 1 && "Invalid number of operands!");
848    addExpr(Inst, getImm());
849  }
850
851  template<int MemSize>
852  void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
853    assert(N == 1 && "Invalid number of operands!");
854
855    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
856    uint64_t Val = CE->getValue() / MemSize;
857    Inst.addOperand(MCOperand::CreateImm(Val  & 0x7f));
858  }
859
860  template<int BitWidth>
861  void addSImmOperands(MCInst &Inst, unsigned N) const {
862    assert(N == 1 && "Invalid number of operands!");
863
864    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
865    uint64_t Val = CE->getValue();
866    Inst.addOperand(MCOperand::CreateImm(Val  & ((1ULL << BitWidth) - 1)));
867  }
868
869  void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
870    assert (N == 1 && "Invalid number of operands!");
871
872    addExpr(Inst, ImmWithLSL.Val);
873  }
874
875  template<unsigned field_width, unsigned scale>
876  void addLabelOperands(MCInst &Inst, unsigned N) const {
877    assert(N == 1 && "Invalid number of operands!");
878
879    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
880
881    if (!CE) {
882      addExpr(Inst, Imm.Val);
883      return;
884    }
885
886    int64_t Val = CE->getValue();
887    assert(Val % scale == 0 && "Unaligned immediate in instruction");
888    Val /= scale;
889
890    Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
891  }
892
893  template<int MemSize>
894  void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
895    assert(N == 1 && "Invalid number of operands!");
896
897    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
898      Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
899    } else {
900      Inst.addOperand(MCOperand::CreateExpr(getImm()));
901    }
902  }
903
904  template<unsigned RegWidth>
905  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
906    assert(N == 1 && "Invalid number of operands");
907    const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
908
909    uint32_t Bits;
910    A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
911
912    Inst.addOperand(MCOperand::CreateImm(Bits));
913  }
914
915  void addMRSOperands(MCInst &Inst, unsigned N) const {
916    assert(N == 1 && "Invalid number of operands!");
917
918    bool Valid;
919    StringRef Name(SysReg.Data, SysReg.Length);
920    uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
921
922    Inst.addOperand(MCOperand::CreateImm(Bits));
923  }
924
925  void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
926    assert(N == 1 && "Invalid number of operands!");
927
928    bool Valid;
929    StringRef Name(SysReg.Data, SysReg.Length);
930    uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
931
932    Inst.addOperand(MCOperand::CreateImm(Bits));
933  }
934
935  void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
936    assert(N == 1 && "Invalid number of operands!");
937
938    bool Valid;
939    StringRef Name(SysReg.Data, SysReg.Length);
940    uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
941
942    Inst.addOperand(MCOperand::CreateImm(Bits));
943  }
944
945  void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
946    assert(N == 2 && "Invalid number of operands!");
947
948    addExpr(Inst, ImmWithLSL.Val);
949
950    AArch64MCExpr::VariantKind Variant;
951    if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
952      Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
953      return;
954    }
955
956    // We know it's relocated
957    switch (Variant) {
958    case AArch64MCExpr::VK_AARCH64_ABS_G0:
959    case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
960    case AArch64MCExpr::VK_AARCH64_SABS_G0:
961    case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
962    case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
963    case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
964    case AArch64MCExpr::VK_AARCH64_TPREL_G0:
965    case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
966      Inst.addOperand(MCOperand::CreateImm(0));
967      break;
968    case AArch64MCExpr::VK_AARCH64_ABS_G1:
969    case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
970    case AArch64MCExpr::VK_AARCH64_SABS_G1:
971    case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
972    case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
973    case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
974    case AArch64MCExpr::VK_AARCH64_TPREL_G1:
975    case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
976      Inst.addOperand(MCOperand::CreateImm(1));
977      break;
978    case AArch64MCExpr::VK_AARCH64_ABS_G2:
979    case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
980    case AArch64MCExpr::VK_AARCH64_SABS_G2:
981    case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
982    case AArch64MCExpr::VK_AARCH64_TPREL_G2:
983      Inst.addOperand(MCOperand::CreateImm(2));
984      break;
985    case AArch64MCExpr::VK_AARCH64_ABS_G3:
986      Inst.addOperand(MCOperand::CreateImm(3));
987      break;
988    default: llvm_unreachable("Inappropriate move wide relocation");
989    }
990  }
991
992  template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
993  void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
994    assert(N == 2 && "Invalid number of operands!");
995    int UImm16, Shift;
996
997    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
998    uint64_t Value = CE->getValue();
999
1000    if (RegWidth == 32) {
1001      Value &= 0xffffffffULL;
1002    }
1003
1004    bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
1005    (void)Valid;
1006    assert(Valid && "Invalid immediates should have been weeded out by now");
1007
1008    Inst.addOperand(MCOperand::CreateImm(UImm16));
1009    Inst.addOperand(MCOperand::CreateImm(Shift));
1010  }
1011
1012  void addPRFMOperands(MCInst &Inst, unsigned N) const {
1013    assert(N == 1 && "Invalid number of operands!");
1014
1015    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1016    assert(CE->getValue() >= 0 && CE->getValue() <= 31
1017           && "PRFM operand should be 5-bits");
1018
1019    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1020  }
1021
1022  // For Add-sub (extended register) operands.
1023  void addRegExtendOperands(MCInst &Inst, unsigned N) const {
1024    assert(N == 1 && "Invalid number of operands!");
1025
1026    Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1027  }
1028
1029  // For the extend in load-store (register offset) instructions.
1030  template<unsigned MemSize>
1031  void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
1032    addAddrRegExtendOperands(Inst, N, MemSize);
1033  }
1034
1035  void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
1036                                unsigned MemSize) const {
1037    assert(N == 1 && "Invalid number of operands!");
1038
1039    // First bit of Option is set in instruction classes, the high two bits are
1040    // as follows:
1041    unsigned OptionHi = 0;
1042    switch (ShiftExtend.ShiftType) {
1043    case A64SE::UXTW:
1044    case A64SE::LSL:
1045      OptionHi = 1;
1046      break;
1047    case A64SE::SXTW:
1048    case A64SE::SXTX:
1049      OptionHi = 3;
1050      break;
1051    default:
1052      llvm_unreachable("Invalid extend type for register offset");
1053    }
1054
1055    unsigned S = 0;
1056    if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
1057      S = 1;
1058    else if (MemSize != 1 && ShiftExtend.Amount != 0)
1059      S = 1;
1060
1061    Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
1062  }
1063  void addShiftOperands(MCInst &Inst, unsigned N) const {
1064    assert(N == 1 && "Invalid number of operands!");
1065
1066    Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1067  }
1068};
1069
1070} // end anonymous namespace.
1071
1072AArch64AsmParser::OperandMatchResultTy
1073AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1074                               StringRef Mnemonic) {
1075
1076  // See if the operand has a custom parser
1077  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1078
1079  // It could either succeed, fail or just not care.
1080  if (ResTy != MatchOperand_NoMatch)
1081    return ResTy;
1082
1083  switch (getLexer().getKind()) {
1084  default:
1085    Error(Parser.getTok().getLoc(), "unexpected token in operand");
1086    return MatchOperand_ParseFail;
1087  case AsmToken::Identifier: {
1088    // It might be in the LSL/UXTB family ...
1089    OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
1090
1091    // We can only continue if no tokens were eaten.
1092    if (GotShift != MatchOperand_NoMatch)
1093      return GotShift;
1094
1095    // ... or it might be a register ...
1096    uint32_t NumLanes = 0;
1097    OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
1098    assert(GotReg != MatchOperand_ParseFail
1099           && "register parsing shouldn't partially succeed");
1100
1101    if (GotReg == MatchOperand_Success) {
1102      if (Parser.getTok().is(AsmToken::LBrac))
1103        return ParseNEONLane(Operands, NumLanes);
1104      else
1105        return MatchOperand_Success;
1106    }
1107
1108    // ... or it might be a symbolish thing
1109  }
1110    // Fall through
1111  case AsmToken::LParen:  // E.g. (strcmp-4)
1112  case AsmToken::Integer: // 1f, 2b labels
1113  case AsmToken::String:  // quoted labels
1114  case AsmToken::Dot:     // . is Current location
1115  case AsmToken::Dollar:  // $ is PC
1116  case AsmToken::Colon: {
1117    SMLoc StartLoc  = Parser.getTok().getLoc();
1118    SMLoc EndLoc;
1119    const MCExpr *ImmVal = 0;
1120
1121    if (ParseImmediate(ImmVal) != MatchOperand_Success)
1122      return MatchOperand_ParseFail;
1123
1124    EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1125    Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1126    return MatchOperand_Success;
1127  }
1128  case AsmToken::Hash: {   // Immediates
1129    SMLoc StartLoc = Parser.getTok().getLoc();
1130    SMLoc EndLoc;
1131    const MCExpr *ImmVal = 0;
1132    Parser.Lex();
1133
1134    if (ParseImmediate(ImmVal) != MatchOperand_Success)
1135      return MatchOperand_ParseFail;
1136
1137    EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1138    Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1139    return MatchOperand_Success;
1140  }
1141  case AsmToken::LBrac: {
1142    SMLoc Loc = Parser.getTok().getLoc();
1143    Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1144    Parser.Lex(); // Eat '['
1145
1146    // There's no comma after a '[', so we can parse the next operand
1147    // immediately.
1148    return ParseOperand(Operands, Mnemonic);
1149  }
1150  // The following will likely be useful later, but not in very early cases
1151  case AsmToken::LCurly:  // Weird SIMD lists
1152    llvm_unreachable("Don't know how to deal with '{' in operand");
1153    return MatchOperand_ParseFail;
1154  }
1155}
1156
1157AArch64AsmParser::OperandMatchResultTy
1158AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
1159  if (getLexer().is(AsmToken::Colon)) {
1160    AArch64MCExpr::VariantKind RefKind;
1161
1162    OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
1163    if (ResTy != MatchOperand_Success)
1164      return ResTy;
1165
1166    const MCExpr *SubExprVal;
1167    if (getParser().parseExpression(SubExprVal))
1168      return MatchOperand_ParseFail;
1169
1170    ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
1171    return MatchOperand_Success;
1172  }
1173
1174  // No weird AArch64MCExpr prefix
1175  return getParser().parseExpression(ExprVal)
1176    ? MatchOperand_ParseFail : MatchOperand_Success;
1177}
1178
1179// A lane attached to a NEON register. "[N]", which should yield three tokens:
1180// '[', N, ']'. A hash is not allowed to precede the immediate here.
1181AArch64AsmParser::OperandMatchResultTy
1182AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1183                                uint32_t NumLanes) {
1184  SMLoc Loc = Parser.getTok().getLoc();
1185
1186  assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
1187  Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1188  Parser.Lex(); // Eat '['
1189
1190  if (Parser.getTok().isNot(AsmToken::Integer)) {
1191    Error(Parser.getTok().getLoc(), "expected lane number");
1192    return MatchOperand_ParseFail;
1193  }
1194
1195  if (Parser.getTok().getIntVal() >= NumLanes) {
1196    Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
1197    return MatchOperand_ParseFail;
1198  }
1199
1200  const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
1201                                              getContext());
1202  SMLoc S = Parser.getTok().getLoc();
1203  Parser.Lex(); // Eat actual lane
1204  SMLoc E = Parser.getTok().getLoc();
1205  Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
1206
1207
1208  if (Parser.getTok().isNot(AsmToken::RBrac)) {
1209    Error(Parser.getTok().getLoc(), "expected ']' after lane");
1210    return MatchOperand_ParseFail;
1211  }
1212
1213  Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1214  Parser.Lex(); // Eat ']'
1215
1216  return MatchOperand_Success;
1217}
1218
1219AArch64AsmParser::OperandMatchResultTy
1220AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
1221  assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
1222  Parser.Lex();
1223
1224  if (getLexer().isNot(AsmToken::Identifier)) {
1225    Error(Parser.getTok().getLoc(),
1226          "expected relocation specifier in operand after ':'");
1227    return MatchOperand_ParseFail;
1228  }
1229
1230  std::string LowerCase = Parser.getTok().getIdentifier().lower();
1231  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
1232    .Case("got",              AArch64MCExpr::VK_AARCH64_GOT)
1233    .Case("got_lo12",         AArch64MCExpr::VK_AARCH64_GOT_LO12)
1234    .Case("lo12",             AArch64MCExpr::VK_AARCH64_LO12)
1235    .Case("abs_g0",           AArch64MCExpr::VK_AARCH64_ABS_G0)
1236    .Case("abs_g0_nc",        AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
1237    .Case("abs_g1",           AArch64MCExpr::VK_AARCH64_ABS_G1)
1238    .Case("abs_g1_nc",        AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
1239    .Case("abs_g2",           AArch64MCExpr::VK_AARCH64_ABS_G2)
1240    .Case("abs_g2_nc",        AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
1241    .Case("abs_g3",           AArch64MCExpr::VK_AARCH64_ABS_G3)
1242    .Case("abs_g0_s",         AArch64MCExpr::VK_AARCH64_SABS_G0)
1243    .Case("abs_g1_s",         AArch64MCExpr::VK_AARCH64_SABS_G1)
1244    .Case("abs_g2_s",         AArch64MCExpr::VK_AARCH64_SABS_G2)
1245    .Case("dtprel_g2",        AArch64MCExpr::VK_AARCH64_DTPREL_G2)
1246    .Case("dtprel_g1",        AArch64MCExpr::VK_AARCH64_DTPREL_G1)
1247    .Case("dtprel_g1_nc",     AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
1248    .Case("dtprel_g0",        AArch64MCExpr::VK_AARCH64_DTPREL_G0)
1249    .Case("dtprel_g0_nc",     AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
1250    .Case("dtprel_hi12",      AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
1251    .Case("dtprel_lo12",      AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
1252    .Case("dtprel_lo12_nc",   AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
1253    .Case("gottprel_g1",      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
1254    .Case("gottprel_g0_nc",   AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
1255    .Case("gottprel",         AArch64MCExpr::VK_AARCH64_GOTTPREL)
1256    .Case("gottprel_lo12",    AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
1257    .Case("tprel_g2",         AArch64MCExpr::VK_AARCH64_TPREL_G2)
1258    .Case("tprel_g1",         AArch64MCExpr::VK_AARCH64_TPREL_G1)
1259    .Case("tprel_g1_nc",      AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
1260    .Case("tprel_g0",         AArch64MCExpr::VK_AARCH64_TPREL_G0)
1261    .Case("tprel_g0_nc",      AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
1262    .Case("tprel_hi12",       AArch64MCExpr::VK_AARCH64_TPREL_HI12)
1263    .Case("tprel_lo12",       AArch64MCExpr::VK_AARCH64_TPREL_LO12)
1264    .Case("tprel_lo12_nc",    AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
1265    .Case("tlsdesc",          AArch64MCExpr::VK_AARCH64_TLSDESC)
1266    .Case("tlsdesc_lo12",     AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
1267    .Default(AArch64MCExpr::VK_AARCH64_None);
1268
1269  if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
1270    Error(Parser.getTok().getLoc(),
1271          "expected relocation specifier in operand after ':'");
1272    return MatchOperand_ParseFail;
1273  }
1274  Parser.Lex(); // Eat identifier
1275
1276  if (getLexer().isNot(AsmToken::Colon)) {
1277    Error(Parser.getTok().getLoc(),
1278          "expected ':' after relocation specifier");
1279    return MatchOperand_ParseFail;
1280  }
1281  Parser.Lex();
1282  return MatchOperand_Success;
1283}
1284
1285AArch64AsmParser::OperandMatchResultTy
1286AArch64AsmParser::ParseImmWithLSLOperand(
1287                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1288  // FIXME?: I want to live in a world where immediates must start with
1289  // #. Please don't dash my hopes (well, do if you have a good reason).
1290  if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1291
1292  SMLoc S = Parser.getTok().getLoc();
1293  Parser.Lex(); // Eat '#'
1294
1295  const MCExpr *Imm;
1296  if (ParseImmediate(Imm) != MatchOperand_Success)
1297    return MatchOperand_ParseFail;
1298  else if (Parser.getTok().isNot(AsmToken::Comma)) {
1299    SMLoc E = Parser.getTok().getLoc();
1300    Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
1301    return MatchOperand_Success;
1302  }
1303
1304  // Eat ','
1305  Parser.Lex();
1306
1307  // The optional operand must be "lsl #N" where N is non-negative.
1308  if (Parser.getTok().is(AsmToken::Identifier)
1309      && Parser.getTok().getIdentifier().lower() == "lsl") {
1310    Parser.Lex();
1311
1312    if (Parser.getTok().is(AsmToken::Hash)) {
1313      Parser.Lex();
1314
1315      if (Parser.getTok().isNot(AsmToken::Integer)) {
1316        Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
1317        return MatchOperand_ParseFail;
1318      }
1319    }
1320  }
1321
1322  int64_t ShiftAmount = Parser.getTok().getIntVal();
1323
1324  if (ShiftAmount < 0) {
1325    Error(Parser.getTok().getLoc(), "positive shift amount required");
1326    return MatchOperand_ParseFail;
1327  }
1328  Parser.Lex(); // Eat the number
1329
1330  SMLoc E = Parser.getTok().getLoc();
1331  Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
1332                                                      false, S, E));
1333  return MatchOperand_Success;
1334}
1335
1336
1337AArch64AsmParser::OperandMatchResultTy
1338AArch64AsmParser::ParseCondCodeOperand(
1339                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1340  if (Parser.getTok().isNot(AsmToken::Identifier))
1341    return MatchOperand_NoMatch;
1342
1343  StringRef Tok = Parser.getTok().getIdentifier();
1344  A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
1345
1346  if (CondCode == A64CC::Invalid)
1347    return MatchOperand_NoMatch;
1348
1349  SMLoc S = Parser.getTok().getLoc();
1350  Parser.Lex(); // Eat condition code
1351  SMLoc E = Parser.getTok().getLoc();
1352
1353  Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
1354  return MatchOperand_Success;
1355}
1356
1357AArch64AsmParser::OperandMatchResultTy
1358AArch64AsmParser::ParseCRxOperand(
1359                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1360  SMLoc S = Parser.getTok().getLoc();
1361  if (Parser.getTok().isNot(AsmToken::Identifier)) {
1362    Error(S, "Expected cN operand where 0 <= N <= 15");
1363    return MatchOperand_ParseFail;
1364  }
1365
1366  std::string LowerTok = Parser.getTok().getIdentifier().lower();
1367  StringRef Tok(LowerTok);
1368  if (Tok[0] != 'c') {
1369    Error(S, "Expected cN operand where 0 <= N <= 15");
1370    return MatchOperand_ParseFail;
1371  }
1372
1373  uint32_t CRNum;
1374  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1375  if (BadNum || CRNum > 15) {
1376    Error(S, "Expected cN operand where 0 <= N <= 15");
1377    return MatchOperand_ParseFail;
1378  }
1379
1380  const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
1381
1382  Parser.Lex();
1383  SMLoc E = Parser.getTok().getLoc();
1384
1385  Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
1386  return MatchOperand_Success;
1387}
1388
1389AArch64AsmParser::OperandMatchResultTy
1390AArch64AsmParser::ParseFPImmOperand(
1391                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1392
1393  // FIXME?: I want to live in a world where immediates must start with
1394  // #. Please don't dash my hopes (well, do if you have a good reason).
1395  if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1396
1397  SMLoc S = Parser.getTok().getLoc();
1398  Parser.Lex(); // Eat '#'
1399
1400  bool Negative = false;
1401  if (Parser.getTok().is(AsmToken::Minus)) {
1402    Negative = true;
1403    Parser.Lex(); // Eat '-'
1404  } else if (Parser.getTok().is(AsmToken::Plus)) {
1405    Parser.Lex(); // Eat '+'
1406  }
1407
1408  if (Parser.getTok().isNot(AsmToken::Real)) {
1409    Error(S, "Expected floating-point immediate");
1410    return MatchOperand_ParseFail;
1411  }
1412
1413  APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
1414  if (Negative) RealVal.changeSign();
1415  double DblVal = RealVal.convertToDouble();
1416
1417  Parser.Lex(); // Eat real number
1418  SMLoc E = Parser.getTok().getLoc();
1419
1420  Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
1421  return MatchOperand_Success;
1422}
1423
1424
1425// Automatically generated
1426static unsigned MatchRegisterName(StringRef Name);
1427
1428bool
1429AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
1430                                   StringRef &Layout,
1431                                   SMLoc &LayoutLoc) const {
1432  const AsmToken &Tok = Parser.getTok();
1433
1434  if (Tok.isNot(AsmToken::Identifier))
1435    return false;
1436
1437  std::string LowerReg = Tok.getString().lower();
1438  size_t DotPos = LowerReg.find('.');
1439
1440  RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
1441  if (RegNum == AArch64::NoRegister) {
1442    RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
1443      .Case("ip0", AArch64::X16)
1444      .Case("ip1", AArch64::X17)
1445      .Case("fp", AArch64::X29)
1446      .Case("lr", AArch64::X30)
1447      .Default(AArch64::NoRegister);
1448  }
1449  if (RegNum == AArch64::NoRegister)
1450    return false;
1451
1452  SMLoc S = Tok.getLoc();
1453  RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
1454
1455  if (DotPos == StringRef::npos) {
1456    Layout = StringRef();
1457  } else {
1458    // Everything afterwards needs to be a literal token, expected to be
1459    // '.2d','.b' etc for vector registers.
1460
1461    // This StringSwitch validates the input and (perhaps more importantly)
1462    // gives us a permanent string to use in the token (a pointer into LowerReg
1463    // would go out of scope when we return).
1464    LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
1465    std::string LayoutText = LowerReg.substr(DotPos, StringRef::npos);
1466    Layout = StringSwitch<const char *>(LayoutText)
1467      .Case(".d", ".d").Case(".1d", ".1d").Case(".2d", ".2d")
1468      .Case(".s", ".s").Case(".2s", ".2s").Case(".4s", ".4s")
1469      .Case(".h", ".h").Case(".4h", ".4h").Case(".8h", ".8h")
1470      .Case(".b", ".b").Case(".8b", ".8b").Case(".16b", ".16b")
1471      .Default("");
1472
1473    if (Layout.size() == 0) {
1474      // Malformed register
1475      return false;
1476    }
1477  }
1478
1479  return true;
1480}
1481
1482AArch64AsmParser::OperandMatchResultTy
1483AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1484                                uint32_t &NumLanes) {
1485  unsigned RegNum;
1486  StringRef Layout;
1487  SMLoc RegEndLoc, LayoutLoc;
1488  SMLoc S = Parser.getTok().getLoc();
1489
1490  if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1491    return MatchOperand_NoMatch;
1492
1493  Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
1494
1495  if (Layout.size() != 0) {
1496    unsigned long long TmpLanes = 0;
1497    llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
1498    if (TmpLanes != 0) {
1499      NumLanes = TmpLanes;
1500    } else {
1501      // If the number of lanes isn't specified explicitly, a valid instruction
1502      // will have an element specifier and be capable of acting on the entire
1503      // vector register.
1504      switch (Layout.back()) {
1505      default: llvm_unreachable("Invalid layout specifier");
1506      case 'b': NumLanes = 16; break;
1507      case 'h': NumLanes = 8; break;
1508      case 's': NumLanes = 4; break;
1509      case 'd': NumLanes = 2; break;
1510      }
1511    }
1512
1513    Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
1514  }
1515
1516  Parser.Lex();
1517  return MatchOperand_Success;
1518}
1519
1520bool
1521AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1522                                SMLoc &EndLoc) {
1523  // This callback is used for things like DWARF frame directives in
1524  // assembly. They don't care about things like NEON layouts or lanes, they
1525  // just want to be able to produce the DWARF register number.
1526  StringRef LayoutSpec;
1527  SMLoc RegEndLoc, LayoutLoc;
1528  StartLoc = Parser.getTok().getLoc();
1529
1530  if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
1531    return true;
1532
1533  Parser.Lex();
1534  EndLoc = Parser.getTok().getLoc();
1535
1536  return false;
1537}
1538
1539AArch64AsmParser::OperandMatchResultTy
1540AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
1541                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1542  // Since these operands occur in very limited circumstances, without
1543  // alternatives, we actually signal an error if there is no match. If relaxing
1544  // this, beware of unintended consequences: an immediate will be accepted
1545  // during matching, no matter how it gets into the AArch64Operand.
1546  const AsmToken &Tok = Parser.getTok();
1547  SMLoc S = Tok.getLoc();
1548
1549  if (Tok.is(AsmToken::Identifier)) {
1550    bool ValidName;
1551    uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
1552
1553    if (!ValidName) {
1554      Error(S, "operand specifier not recognised");
1555      return MatchOperand_ParseFail;
1556    }
1557
1558    Parser.Lex(); // We're done with the identifier. Eat it
1559
1560    SMLoc E = Parser.getTok().getLoc();
1561    const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
1562    Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
1563    return MatchOperand_Success;
1564  } else if (Tok.is(AsmToken::Hash)) {
1565    Parser.Lex();
1566
1567    const MCExpr *ImmVal;
1568    if (ParseImmediate(ImmVal) != MatchOperand_Success)
1569      return MatchOperand_ParseFail;
1570
1571    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
1572    if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
1573      Error(S, "Invalid immediate for instruction");
1574      return MatchOperand_ParseFail;
1575    }
1576
1577    SMLoc E = Parser.getTok().getLoc();
1578    Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
1579    return MatchOperand_Success;
1580  }
1581
1582  Error(S, "unexpected operand for instruction");
1583  return MatchOperand_ParseFail;
1584}
1585
1586AArch64AsmParser::OperandMatchResultTy
1587AArch64AsmParser::ParseSysRegOperand(
1588                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1589  const AsmToken &Tok = Parser.getTok();
1590
1591  // Any MSR/MRS operand will be an identifier, and we want to store it as some
1592  // kind of string: SPSel is valid for two different forms of MSR with two
1593  // different encodings. There's no collision at the moment, but the potential
1594  // is there.
1595  if (!Tok.is(AsmToken::Identifier)) {
1596    return MatchOperand_NoMatch;
1597  }
1598
1599  SMLoc S = Tok.getLoc();
1600  Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
1601  Parser.Lex(); // Eat identifier
1602
1603  return MatchOperand_Success;
1604}
1605
1606AArch64AsmParser::OperandMatchResultTy
1607AArch64AsmParser::ParseLSXAddressOperand(
1608                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1609  SMLoc S = Parser.getTok().getLoc();
1610
1611  unsigned RegNum;
1612  SMLoc RegEndLoc, LayoutLoc;
1613  StringRef Layout;
1614  if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
1615     || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
1616     || Layout.size() != 0) {
1617    // Check Layout.size because we don't want to let "x3.4s" or similar
1618    // through.
1619    return MatchOperand_NoMatch;
1620  }
1621  Parser.Lex(); // Eat register
1622
1623  if (Parser.getTok().is(AsmToken::RBrac)) {
1624    // We're done
1625    SMLoc E = Parser.getTok().getLoc();
1626    Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1627    return MatchOperand_Success;
1628  }
1629
1630  // Otherwise, only ", #0" is valid
1631
1632  if (Parser.getTok().isNot(AsmToken::Comma)) {
1633    Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
1634    return MatchOperand_ParseFail;
1635  }
1636  Parser.Lex(); // Eat ','
1637
1638  if (Parser.getTok().isNot(AsmToken::Hash)) {
1639    Error(Parser.getTok().getLoc(), "expected '#0'");
1640    return MatchOperand_ParseFail;
1641  }
1642  Parser.Lex(); // Eat '#'
1643
1644  if (Parser.getTok().isNot(AsmToken::Integer)
1645      || Parser.getTok().getIntVal() != 0 ) {
1646    Error(Parser.getTok().getLoc(), "expected '#0'");
1647    return MatchOperand_ParseFail;
1648  }
1649  Parser.Lex(); // Eat '0'
1650
1651  SMLoc E = Parser.getTok().getLoc();
1652  Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1653  return MatchOperand_Success;
1654}
1655
1656AArch64AsmParser::OperandMatchResultTy
1657AArch64AsmParser::ParseShiftExtend(
1658                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1659  StringRef IDVal = Parser.getTok().getIdentifier();
1660  std::string LowerID = IDVal.lower();
1661
1662  A64SE::ShiftExtSpecifiers Spec =
1663    StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
1664      .Case("lsl", A64SE::LSL)
1665      .Case("lsr", A64SE::LSR)
1666      .Case("asr", A64SE::ASR)
1667      .Case("ror", A64SE::ROR)
1668      .Case("uxtb", A64SE::UXTB)
1669      .Case("uxth", A64SE::UXTH)
1670      .Case("uxtw", A64SE::UXTW)
1671      .Case("uxtx", A64SE::UXTX)
1672      .Case("sxtb", A64SE::SXTB)
1673      .Case("sxth", A64SE::SXTH)
1674      .Case("sxtw", A64SE::SXTW)
1675      .Case("sxtx", A64SE::SXTX)
1676      .Default(A64SE::Invalid);
1677
1678  if (Spec == A64SE::Invalid)
1679    return MatchOperand_NoMatch;
1680
1681  // Eat the shift
1682  SMLoc S, E;
1683  S = Parser.getTok().getLoc();
1684  Parser.Lex();
1685
1686  if (Spec != A64SE::LSL && Spec != A64SE::LSR &&
1687      Spec != A64SE::ASR && Spec != A64SE::ROR) {
1688    // The shift amount can be omitted for the extending versions, but not real
1689    // shifts:
1690    //     add x0, x0, x0, uxtb
1691    // is valid, and equivalent to
1692    //     add x0, x0, x0, uxtb #0
1693
1694    if (Parser.getTok().is(AsmToken::Comma) ||
1695        Parser.getTok().is(AsmToken::EndOfStatement) ||
1696        Parser.getTok().is(AsmToken::RBrac)) {
1697      Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
1698                                                           S, E));
1699      return MatchOperand_Success;
1700    }
1701  }
1702
1703  // Eat # at beginning of immediate
1704  if (!Parser.getTok().is(AsmToken::Hash)) {
1705    Error(Parser.getTok().getLoc(),
1706          "expected #imm after shift specifier");
1707    return MatchOperand_ParseFail;
1708  }
1709  Parser.Lex();
1710
1711  // Make sure we do actually have a number
1712  if (!Parser.getTok().is(AsmToken::Integer)) {
1713    Error(Parser.getTok().getLoc(),
1714          "expected integer shift amount");
1715    return MatchOperand_ParseFail;
1716  }
1717  unsigned Amount = Parser.getTok().getIntVal();
1718  Parser.Lex();
1719  E = Parser.getTok().getLoc();
1720
1721  Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
1722                                                       S, E));
1723
1724  return MatchOperand_Success;
1725}
1726
1727// FIXME: We would really like to be able to tablegen'erate this.
1728bool AArch64AsmParser::
1729validateInstruction(MCInst &Inst,
1730                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1731  switch (Inst.getOpcode()) {
1732  case AArch64::BFIwwii:
1733  case AArch64::BFIxxii:
1734  case AArch64::SBFIZwwii:
1735  case AArch64::SBFIZxxii:
1736  case AArch64::UBFIZwwii:
1737  case AArch64::UBFIZxxii:  {
1738    unsigned ImmOps = Inst.getNumOperands() - 2;
1739    int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1740    int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1741
1742    if (ImmR != 0 && ImmS >= ImmR) {
1743      return Error(Operands[4]->getStartLoc(),
1744                   "requested insert overflows register");
1745    }
1746    return false;
1747  }
1748  case AArch64::BFXILwwii:
1749  case AArch64::BFXILxxii:
1750  case AArch64::SBFXwwii:
1751  case AArch64::SBFXxxii:
1752  case AArch64::UBFXwwii:
1753  case AArch64::UBFXxxii: {
1754    unsigned ImmOps = Inst.getNumOperands() - 2;
1755    int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1756    int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1757    int64_t RegWidth = 0;
1758    switch (Inst.getOpcode()) {
1759    case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
1760      RegWidth = 64;
1761      break;
1762    case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
1763      RegWidth = 32;
1764      break;
1765    }
1766
1767    if (ImmS >= RegWidth || ImmS < ImmR) {
1768      return Error(Operands[4]->getStartLoc(),
1769                   "requested extract overflows register");
1770    }
1771    return false;
1772  }
1773  case AArch64::ICix: {
1774    int64_t ImmVal = Inst.getOperand(0).getImm();
1775    A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1776    if (!A64IC::NeedsRegister(ICOp)) {
1777      return Error(Operands[1]->getStartLoc(),
1778                   "specified IC op does not use a register");
1779    }
1780    return false;
1781  }
1782  case AArch64::ICi: {
1783    int64_t ImmVal = Inst.getOperand(0).getImm();
1784    A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1785    if (A64IC::NeedsRegister(ICOp)) {
1786      return Error(Operands[1]->getStartLoc(),
1787                   "specified IC op requires a register");
1788    }
1789    return false;
1790  }
1791  case AArch64::TLBIix: {
1792    int64_t ImmVal = Inst.getOperand(0).getImm();
1793    A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1794    if (!A64TLBI::NeedsRegister(TLBIOp)) {
1795      return Error(Operands[1]->getStartLoc(),
1796                   "specified TLBI op does not use a register");
1797    }
1798    return false;
1799  }
1800  case AArch64::TLBIi: {
1801    int64_t ImmVal = Inst.getOperand(0).getImm();
1802    A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1803    if (A64TLBI::NeedsRegister(TLBIOp)) {
1804      return Error(Operands[1]->getStartLoc(),
1805                   "specified TLBI op requires a register");
1806    }
1807    return false;
1808  }
1809  }
1810
1811  return false;
1812}
1813
1814
1815// Parses the instruction *together with* all operands, appending each parsed
1816// operand to the "Operands" list
1817bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
1818                                        StringRef Name, SMLoc NameLoc,
1819                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1820  size_t CondCodePos = Name.find('.');
1821
1822  StringRef Mnemonic = Name.substr(0, CondCodePos);
1823  Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
1824
1825  if (CondCodePos != StringRef::npos) {
1826    // We have a condition code
1827    SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
1828    StringRef CondStr = Name.substr(CondCodePos + 1, StringRef::npos);
1829    A64CC::CondCodes Code;
1830
1831    Code = A64StringToCondCode(CondStr);
1832
1833    if (Code == A64CC::Invalid) {
1834      Error(S, "invalid condition code");
1835      Parser.eatToEndOfStatement();
1836      return true;
1837    }
1838
1839    SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
1840
1841    Operands.push_back(AArch64Operand::CreateToken(".",  DotL));
1842    SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
1843    Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
1844  }
1845
1846  // Now we parse the operands of this instruction
1847  if (getLexer().isNot(AsmToken::EndOfStatement)) {
1848    // Read the first operand.
1849    if (ParseOperand(Operands, Mnemonic)) {
1850      Parser.eatToEndOfStatement();
1851      return true;
1852    }
1853
1854    while (getLexer().is(AsmToken::Comma)) {
1855      Parser.Lex();  // Eat the comma.
1856
1857      // Parse and remember the operand.
1858      if (ParseOperand(Operands, Mnemonic)) {
1859        Parser.eatToEndOfStatement();
1860        return true;
1861      }
1862
1863
1864      // After successfully parsing some operands there are two special cases to
1865      // consider (i.e. notional operands not separated by commas). Both are due
1866      // to memory specifiers:
1867      //  + An RBrac will end an address for load/store/prefetch
1868      //  + An '!' will indicate a pre-indexed operation.
1869      //
1870      // It's someone else's responsibility to make sure these tokens are sane
1871      // in the given context!
1872      if (Parser.getTok().is(AsmToken::RBrac)) {
1873        SMLoc Loc = Parser.getTok().getLoc();
1874        Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1875        Parser.Lex();
1876      }
1877
1878      if (Parser.getTok().is(AsmToken::Exclaim)) {
1879        SMLoc Loc = Parser.getTok().getLoc();
1880        Operands.push_back(AArch64Operand::CreateToken("!", Loc));
1881        Parser.Lex();
1882      }
1883    }
1884  }
1885
1886  if (getLexer().isNot(AsmToken::EndOfStatement)) {
1887    SMLoc Loc = getLexer().getLoc();
1888    Parser.eatToEndOfStatement();
1889    return Error(Loc, "expected comma before next operand");
1890  }
1891
1892  // Eat the EndOfStatement
1893  Parser.Lex();
1894
1895  return false;
1896}
1897
1898bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
1899  StringRef IDVal = DirectiveID.getIdentifier();
1900  if (IDVal == ".hword")
1901    return ParseDirectiveWord(2, DirectiveID.getLoc());
1902  else if (IDVal == ".word")
1903    return ParseDirectiveWord(4, DirectiveID.getLoc());
1904  else if (IDVal == ".xword")
1905    return ParseDirectiveWord(8, DirectiveID.getLoc());
1906  else if (IDVal == ".tlsdesccall")
1907    return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
1908
1909  return true;
1910}
1911
1912/// parseDirectiveWord
1913///  ::= .word [ expression (, expression)* ]
1914bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
1915  if (getLexer().isNot(AsmToken::EndOfStatement)) {
1916    for (;;) {
1917      const MCExpr *Value;
1918      if (getParser().parseExpression(Value))
1919        return true;
1920
1921      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
1922
1923      if (getLexer().is(AsmToken::EndOfStatement))
1924        break;
1925
1926      // FIXME: Improve diagnostic.
1927      if (getLexer().isNot(AsmToken::Comma))
1928        return Error(L, "unexpected token in directive");
1929      Parser.Lex();
1930    }
1931  }
1932
1933  Parser.Lex();
1934  return false;
1935}
1936
1937// parseDirectiveTLSDescCall:
1938//   ::= .tlsdesccall symbol
1939bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
1940  StringRef Name;
1941  if (getParser().parseIdentifier(Name))
1942    return Error(L, "expected symbol after directive");
1943
1944  MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
1945  const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
1946
1947  MCInst Inst;
1948  Inst.setOpcode(AArch64::TLSDESCCALL);
1949  Inst.addOperand(MCOperand::CreateExpr(Expr));
1950
1951  getParser().getStreamer().EmitInstruction(Inst);
1952  return false;
1953}
1954
1955
1956bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1957                                 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1958                                 MCStreamer &Out, unsigned &ErrorInfo,
1959                                 bool MatchingInlineAsm) {
1960  MCInst Inst;
1961  unsigned MatchResult;
1962  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
1963                                     MatchingInlineAsm);
1964
1965  if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
1966    return Error(IDLoc, "too few operands for instruction");
1967
1968  switch (MatchResult) {
1969  default: break;
1970  case Match_Success:
1971    if (validateInstruction(Inst, Operands))
1972      return true;
1973
1974    Out.EmitInstruction(Inst);
1975    return false;
1976  case Match_MissingFeature:
1977    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
1978    return true;
1979  case Match_InvalidOperand: {
1980    SMLoc ErrorLoc = IDLoc;
1981    if (ErrorInfo != ~0U) {
1982      ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
1983      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
1984    }
1985
1986    return Error(ErrorLoc, "invalid operand for instruction");
1987  }
1988  case Match_MnemonicFail:
1989    return Error(IDLoc, "invalid instruction");
1990
1991  case Match_AddSubRegExtendSmall:
1992    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1993      "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
1994  case Match_AddSubRegExtendLarge:
1995    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1996      "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
1997  case Match_AddSubRegShift32:
1998    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
1999       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
2000  case Match_AddSubRegShift64:
2001    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2002       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
2003  case Match_AddSubSecondSource:
2004      return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2005          "expected compatible register, symbol or integer in range [0, 4095]");
2006  case Match_CVTFixedPos32:
2007    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2008                 "expected integer in range [1, 32]");
2009  case Match_CVTFixedPos64:
2010    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2011                 "expected integer in range [1, 64]");
2012  case Match_CondCode:
2013    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2014                 "expected AArch64 condition code");
2015  case Match_FPImm:
2016    // Any situation which allows a nontrivial floating-point constant also
2017    // allows a register.
2018    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2019                 "expected compatible register or floating-point constant");
2020  case Match_FPZero:
2021    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2022                 "expected floating-point constant #0.0");
2023  case Match_Label:
2024    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2025                 "expected label or encodable integer pc offset");
2026  case Match_Lane1:
2027    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2028                 "expected lane specifier '[1]'");
2029  case Match_LoadStoreExtend32_1:
2030    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2031                 "expected 'uxtw' or 'sxtw' with optional shift of #0");
2032  case Match_LoadStoreExtend32_2:
2033    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2034                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
2035  case Match_LoadStoreExtend32_4:
2036    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2037                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
2038  case Match_LoadStoreExtend32_8:
2039    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2040                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
2041  case Match_LoadStoreExtend32_16:
2042    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2043                 "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
2044  case Match_LoadStoreExtend64_1:
2045    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2046                 "expected 'lsl' or 'sxtx' with optional shift of #0");
2047  case Match_LoadStoreExtend64_2:
2048    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2049                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
2050  case Match_LoadStoreExtend64_4:
2051    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2052                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
2053  case Match_LoadStoreExtend64_8:
2054    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2055                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
2056  case Match_LoadStoreExtend64_16:
2057    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2058                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
2059  case Match_LoadStoreSImm7_4:
2060    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2061                 "expected integer multiple of 4 in range [-256, 252]");
2062  case Match_LoadStoreSImm7_8:
2063    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2064                 "expected integer multiple of 8 in range [-512, 508]");
2065  case Match_LoadStoreSImm7_16:
2066    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2067                 "expected integer multiple of 16 in range [-1024, 1016]");
2068  case Match_LoadStoreSImm9:
2069    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2070                 "expected integer in range [-256, 255]");
2071  case Match_LoadStoreUImm12_1:
2072    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2073                 "expected symbolic reference or integer in range [0, 4095]");
2074  case Match_LoadStoreUImm12_2:
2075    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2076                 "expected symbolic reference or integer in range [0, 8190]");
2077  case Match_LoadStoreUImm12_4:
2078    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2079                 "expected symbolic reference or integer in range [0, 16380]");
2080  case Match_LoadStoreUImm12_8:
2081    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2082                 "expected symbolic reference or integer in range [0, 32760]");
2083  case Match_LoadStoreUImm12_16:
2084    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2085                 "expected symbolic reference or integer in range [0, 65520]");
2086  case Match_LogicalSecondSource:
2087    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2088                 "expected compatible register or logical immediate");
2089  case Match_MOVWUImm16:
2090    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2091                 "expected relocated symbol or integer in range [0, 65535]");
2092  case Match_MRS:
2093    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2094                 "expected readable system register");
2095  case Match_MSR:
2096    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2097                 "expected writable system register or pstate");
2098  case Match_NamedImm_at:
2099    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2100                "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
2101  case Match_NamedImm_dbarrier:
2102    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2103             "expected integer in range [0, 15] or symbolic barrier operand");
2104  case Match_NamedImm_dc:
2105    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2106                 "expected symbolic 'dc' operand");
2107  case Match_NamedImm_ic:
2108    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2109                 "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
2110  case Match_NamedImm_isb:
2111    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2112                 "expected integer in range [0, 15] or 'sy'");
2113  case Match_NamedImm_prefetch:
2114    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2115                 "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
2116  case Match_NamedImm_tlbi:
2117    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2118                 "expected translation buffer invalidation operand");
2119  case Match_UImm16:
2120    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2121                 "expected integer in range [0, 65535]");
2122  case Match_UImm3:
2123    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2124                 "expected integer in range [0, 7]");
2125  case Match_UImm4:
2126    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2127                 "expected integer in range [0, 15]");
2128  case Match_UImm5:
2129    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2130                 "expected integer in range [0, 31]");
2131  case Match_UImm6:
2132    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2133                 "expected integer in range [0, 63]");
2134  case Match_UImm7:
2135    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2136                 "expected integer in range [0, 127]");
2137  case Match_Width32:
2138    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2139                 "expected integer in range [<lsb>, 31]");
2140  case Match_Width64:
2141    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2142                 "expected integer in range [<lsb>, 63]");
2143  }
2144
2145  llvm_unreachable("Implement any new match types added!");
2146  return true;
2147}
2148
2149void AArch64Operand::print(raw_ostream &OS) const {
2150  switch (Kind) {
2151  case k_CondCode:
2152    OS << "<CondCode: " << CondCode.Code << ">";
2153    break;
2154  case k_FPImmediate:
2155    OS << "<fpimm: " << FPImm.Val << ">";
2156    break;
2157  case k_ImmWithLSL:
2158    OS << "<immwithlsl: imm=" << ImmWithLSL.Val
2159       << ", shift=" << ImmWithLSL.ShiftAmount << ">";
2160    break;
2161  case k_Immediate:
2162    getImm()->print(OS);
2163    break;
2164  case k_Register:
2165    OS << "<register " << getReg() << '>';
2166    break;
2167  case k_Token:
2168    OS << '\'' << getToken() << '\'';
2169    break;
2170  case k_ShiftExtend:
2171    OS << "<shift: type=" << ShiftExtend.ShiftType
2172       << ", amount=" << ShiftExtend.Amount << ">";
2173    break;
2174  case k_SysReg: {
2175    StringRef Name(SysReg.Data, SysReg.Length);
2176    OS << "<sysreg: " << Name << '>';
2177    break;
2178  }
2179  default:
2180    llvm_unreachable("No idea how to print this kind of operand");
2181    break;
2182  }
2183}
2184
2185void AArch64Operand::dump() const {
2186  print(errs());
2187}
2188
2189
2190/// Force static initialization.
2191extern "C" void LLVMInitializeAArch64AsmParser() {
2192  RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64Target);
2193}
2194
2195#define GET_REGISTER_MATCHER
2196#define GET_MATCHER_IMPLEMENTATION
2197#include "AArch64GenAsmMatcher.inc"
2198