1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the (GNU-style) assembly parser for the AArch64
11// architecture.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "MCTargetDesc/AArch64MCTargetDesc.h"
17#include "MCTargetDesc/AArch64MCExpr.h"
18#include "Utils/AArch64BaseInfo.h"
19#include "llvm/ADT/APFloat.h"
20#include "llvm/ADT/APInt.h"
21#include "llvm/ADT/StringSwitch.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/MC/MCContext.h"
24#include "llvm/MC/MCInst.h"
25#include "llvm/MC/MCSubtargetInfo.h"
26#include "llvm/MC/MCTargetAsmParser.h"
27#include "llvm/MC/MCExpr.h"
28#include "llvm/MC/MCRegisterInfo.h"
29#include "llvm/MC/MCStreamer.h"
30#include "llvm/MC/MCParser/MCAsmLexer.h"
31#include "llvm/MC/MCParser/MCAsmParser.h"
32#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
35#include "llvm/Support/TargetRegistry.h"
36
37using namespace llvm;
38
39namespace {
40
41class AArch64Operand;
42
43class AArch64AsmParser : public MCTargetAsmParser {
44  MCSubtargetInfo &STI;
45  MCAsmParser &Parser;
46
47#define GET_ASSEMBLER_HEADER
48#include "AArch64GenAsmMatcher.inc"
49
50public:
51  enum AArch64MatchResultTy {
52    Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
53#define GET_OPERAND_DIAGNOSTIC_TYPES
54#include "AArch64GenAsmMatcher.inc"
55  };
56
57  AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
58    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
59    MCAsmParserExtension::Initialize(_Parser);
60
61    // Initialize the set of available features.
62    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
63  }
64
65  // These are the public interface of the MCTargetAsmParser
66  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
67  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
68                        SMLoc NameLoc,
69                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
70
71  bool ParseDirective(AsmToken DirectiveID);
72  bool ParseDirectiveTLSDescCall(SMLoc L);
73  bool ParseDirectiveWord(unsigned Size, SMLoc L);
74
75  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
76                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
77                               MCStreamer&Out, unsigned &ErrorInfo,
78                               bool MatchingInlineAsm);
79
80  // The rest of the sub-parsers have more freedom over interface: they return
81  // an OperandMatchResultTy because it's less ambiguous than true/false or
82  // -1/0/1 even if it is more verbose
83  OperandMatchResultTy
84  ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
85               StringRef Mnemonic);
86
87  OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
88
89  OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
90
91  OperandMatchResultTy
92  ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
93                uint32_t NumLanes);
94
95  OperandMatchResultTy
96  ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
97                uint32_t &NumLanes);
98
99  OperandMatchResultTy
100  ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
101
102  OperandMatchResultTy
103  ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
104
105  OperandMatchResultTy
106  ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
107
108  OperandMatchResultTy
109  ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
110
111  template<typename SomeNamedImmMapper> OperandMatchResultTy
112  ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
113    return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
114  }
115
116  OperandMatchResultTy
117  ParseNamedImmOperand(const NamedImmMapper &Mapper,
118                       SmallVectorImpl<MCParsedAsmOperand*> &Operands);
119
120  OperandMatchResultTy
121  ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
122
123  OperandMatchResultTy
124  ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
125
126  OperandMatchResultTy
127  ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
128
129  bool validateInstruction(MCInst &Inst,
130                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
131
132  /// Scan the next token (which had better be an identifier) and determine
133  /// whether it represents a general-purpose or vector register. It returns
134  /// true if an identifier was found and populates its reference arguments. It
135  /// does not consume the token.
136  bool
137  IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
138                   SMLoc &LayoutLoc) const;
139
140};
141
142}
143
144namespace {
145
146/// Instances of this class represent a parsed AArch64 machine instruction.
147class AArch64Operand : public MCParsedAsmOperand {
148private:
149  enum KindTy {
150    k_ImmWithLSL,     // #uimm {, LSL #amt }
151    k_CondCode,       // eq/ne/...
152    k_FPImmediate,    // Limited-precision floating-point imm
153    k_Immediate,      // Including expressions referencing symbols
154    k_Register,
155    k_ShiftExtend,
156    k_SysReg,         // The register operand of MRS and MSR instructions
157    k_Token,          // The mnemonic; other raw tokens the auto-generated
158    k_WrappedRegister // Load/store exclusive permit a wrapped register.
159  } Kind;
160
161  SMLoc StartLoc, EndLoc;
162
163  struct ImmWithLSLOp {
164    const MCExpr *Val;
165    unsigned ShiftAmount;
166    bool ImplicitAmount;
167  };
168
169  struct CondCodeOp {
170    A64CC::CondCodes Code;
171  };
172
173  struct FPImmOp {
174    double Val;
175  };
176
177  struct ImmOp {
178    const MCExpr *Val;
179  };
180
181  struct RegOp {
182    unsigned RegNum;
183  };
184
185  struct ShiftExtendOp {
186    A64SE::ShiftExtSpecifiers ShiftType;
187    unsigned Amount;
188    bool ImplicitAmount;
189  };
190
191  struct SysRegOp {
192    const char *Data;
193    unsigned Length;
194  };
195
196  struct TokOp {
197    const char *Data;
198    unsigned Length;
199  };
200
201  union {
202    struct ImmWithLSLOp ImmWithLSL;
203    struct CondCodeOp CondCode;
204    struct FPImmOp FPImm;
205    struct ImmOp Imm;
206    struct RegOp Reg;
207    struct ShiftExtendOp ShiftExtend;
208    struct SysRegOp SysReg;
209    struct TokOp Tok;
210  };
211
212  AArch64Operand(KindTy K, SMLoc S, SMLoc E)
213    : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
214
215public:
216  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
217  }
218
219  SMLoc getStartLoc() const { return StartLoc; }
220  SMLoc getEndLoc() const { return EndLoc; }
221  void print(raw_ostream&) const;
222  void dump() const;
223
224  StringRef getToken() const {
225    assert(Kind == k_Token && "Invalid access!");
226    return StringRef(Tok.Data, Tok.Length);
227  }
228
229  unsigned getReg() const {
230    assert((Kind == k_Register || Kind == k_WrappedRegister)
231           && "Invalid access!");
232    return Reg.RegNum;
233  }
234
235  const MCExpr *getImm() const {
236    assert(Kind == k_Immediate && "Invalid access!");
237    return Imm.Val;
238  }
239
240  A64CC::CondCodes getCondCode() const {
241    assert(Kind == k_CondCode && "Invalid access!");
242    return CondCode.Code;
243  }
244
245  static bool isNonConstantExpr(const MCExpr *E,
246                                AArch64MCExpr::VariantKind &Variant) {
247    if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
248      Variant = A64E->getKind();
249      return true;
250    } else if (!isa<MCConstantExpr>(E)) {
251      Variant = AArch64MCExpr::VK_AARCH64_None;
252      return true;
253    }
254
255    return false;
256  }
257
258  bool isCondCode() const { return Kind == k_CondCode; }
259  bool isToken() const { return Kind == k_Token; }
260  bool isReg() const { return Kind == k_Register; }
261  bool isImm() const { return Kind == k_Immediate; }
262  bool isMem() const { return false; }
263  bool isFPImm() const { return Kind == k_FPImmediate; }
264  bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
265  bool isSysReg() const { return Kind == k_SysReg; }
266  bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
267  bool isWrappedReg() const { return Kind == k_WrappedRegister; }
268
269  bool isAddSubImmLSL0() const {
270    if (!isImmWithLSL()) return false;
271    if (ImmWithLSL.ShiftAmount != 0) return false;
272
273    AArch64MCExpr::VariantKind Variant;
274    if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
275      return Variant == AArch64MCExpr::VK_AARCH64_LO12
276          || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
277          || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
278          || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
279          || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
280          || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
281    }
282
283    // Otherwise it should be a real immediate in range:
284    const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
285    return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
286  }
287
288  bool isAddSubImmLSL12() const {
289    if (!isImmWithLSL()) return false;
290    if (ImmWithLSL.ShiftAmount != 12) return false;
291
292    AArch64MCExpr::VariantKind Variant;
293    if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
294      return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
295          || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
296    }
297
298    // Otherwise it should be a real immediate in range:
299    const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
300    return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
301  }
302
303  template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
304    if (!isShiftOrExtend()) return false;
305
306    A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
307    if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
308      return false;
309
310    if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
311      return false;
312
313    return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
314  }
315
316  bool isAdrpLabel() const {
317    if (!isImm()) return false;
318
319    AArch64MCExpr::VariantKind Variant;
320    if (isNonConstantExpr(getImm(), Variant)) {
321      return Variant == AArch64MCExpr::VK_AARCH64_None
322        || Variant == AArch64MCExpr::VK_AARCH64_GOT
323        || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
324        || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
325    }
326
327    return isLabel<21, 4096>();
328  }
329
330  template<unsigned RegWidth>  bool isBitfieldWidth() const {
331    if (!isImm()) return false;
332
333    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
334    if (!CE) return false;
335
336    return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
337  }
338
339  template<int RegWidth>
340  bool isCVTFixedPos() const {
341    if (!isImm()) return false;
342
343    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
344    if (!CE) return false;
345
346    return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
347  }
348
349  bool isFMOVImm() const {
350    if (!isFPImm()) return false;
351
352    APFloat RealVal(FPImm.Val);
353    uint32_t ImmVal;
354    return A64Imms::isFPImm(RealVal, ImmVal);
355  }
356
357  bool isFPZero() const {
358    if (!isFPImm()) return false;
359
360    APFloat RealVal(FPImm.Val);
361    return RealVal.isPosZero();
362  }
363
364  template<unsigned field_width, unsigned scale>
365  bool isLabel() const {
366    if (!isImm()) return false;
367
368    if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
369      return true;
370    } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
371      int64_t Val = CE->getValue();
372      int64_t Min = - (scale * (1LL << (field_width - 1)));
373      int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
374      return (Val % scale) == 0 && Val >= Min && Val <= Max;
375    }
376
377    // N.b. this disallows explicit relocation specifications via an
378    // AArch64MCExpr. Users needing that behaviour
379    return false;
380  }
381
382  bool isLane1() const {
383    if (!isImm()) return false;
384
385    // Because it's come through custom assembly parsing, it must always be a
386    // constant expression.
387    return cast<MCConstantExpr>(getImm())->getValue() == 1;
388  }
389
390  bool isLoadLitLabel() const {
391    if (!isImm()) return false;
392
393    AArch64MCExpr::VariantKind Variant;
394    if (isNonConstantExpr(getImm(), Variant)) {
395      return Variant == AArch64MCExpr::VK_AARCH64_None
396          || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
397    }
398
399    return isLabel<19, 4>();
400  }
401
402  template<unsigned RegWidth> bool isLogicalImm() const {
403    if (!isImm()) return false;
404
405    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
406    if (!CE) return false;
407
408    uint32_t Bits;
409    return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
410  }
411
412  template<unsigned RegWidth> bool isLogicalImmMOV() const {
413    if (!isLogicalImm<RegWidth>()) return false;
414
415    const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
416
417    // The move alias for ORR is only valid if the immediate cannot be
418    // represented with a move (immediate) instruction; they take priority.
419    int UImm16, Shift;
420    return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
421      && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
422  }
423
424  template<int MemSize>
425  bool isOffsetUImm12() const {
426    if (!isImm()) return false;
427
428    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
429
430    // Assume they know what they're doing for now if they've given us a
431    // non-constant expression. In principle we could check for ridiculous
432    // things that can't possibly work or relocations that would almost
433    // certainly break resulting code.
434    if (!CE)
435      return true;
436
437    int64_t Val = CE->getValue();
438
439    // Must be a multiple of the access size in bytes.
440    if ((Val & (MemSize - 1)) != 0) return false;
441
442    // Must be 12-bit unsigned
443    return Val >= 0 && Val <= 0xfff * MemSize;
444  }
445
446  template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
447  bool isShift() const {
448    if (!isShiftOrExtend()) return false;
449
450    if (ShiftExtend.ShiftType != SHKind)
451      return false;
452
453    return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
454  }
455
456  bool isMOVN32Imm() const {
457    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
458      AArch64MCExpr::VK_AARCH64_SABS_G0,
459      AArch64MCExpr::VK_AARCH64_SABS_G1,
460      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
461      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
462      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
463      AArch64MCExpr::VK_AARCH64_TPREL_G1,
464      AArch64MCExpr::VK_AARCH64_TPREL_G0,
465    };
466    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
467
468    return isMoveWideImm(32, PermittedModifiers, NumModifiers);
469  }
470
471  bool isMOVN64Imm() const {
472    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
473      AArch64MCExpr::VK_AARCH64_SABS_G0,
474      AArch64MCExpr::VK_AARCH64_SABS_G1,
475      AArch64MCExpr::VK_AARCH64_SABS_G2,
476      AArch64MCExpr::VK_AARCH64_DTPREL_G2,
477      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
478      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
479      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
480      AArch64MCExpr::VK_AARCH64_TPREL_G2,
481      AArch64MCExpr::VK_AARCH64_TPREL_G1,
482      AArch64MCExpr::VK_AARCH64_TPREL_G0,
483    };
484    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
485
486    return isMoveWideImm(64, PermittedModifiers, NumModifiers);
487  }
488
489
490  bool isMOVZ32Imm() const {
491    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
492      AArch64MCExpr::VK_AARCH64_ABS_G0,
493      AArch64MCExpr::VK_AARCH64_ABS_G1,
494      AArch64MCExpr::VK_AARCH64_SABS_G0,
495      AArch64MCExpr::VK_AARCH64_SABS_G1,
496      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
497      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
498      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
499      AArch64MCExpr::VK_AARCH64_TPREL_G1,
500      AArch64MCExpr::VK_AARCH64_TPREL_G0,
501    };
502    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
503
504    return isMoveWideImm(32, PermittedModifiers, NumModifiers);
505  }
506
507  bool isMOVZ64Imm() const {
508    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
509      AArch64MCExpr::VK_AARCH64_ABS_G0,
510      AArch64MCExpr::VK_AARCH64_ABS_G1,
511      AArch64MCExpr::VK_AARCH64_ABS_G2,
512      AArch64MCExpr::VK_AARCH64_ABS_G3,
513      AArch64MCExpr::VK_AARCH64_SABS_G0,
514      AArch64MCExpr::VK_AARCH64_SABS_G1,
515      AArch64MCExpr::VK_AARCH64_SABS_G2,
516      AArch64MCExpr::VK_AARCH64_DTPREL_G2,
517      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
518      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
519      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
520      AArch64MCExpr::VK_AARCH64_TPREL_G2,
521      AArch64MCExpr::VK_AARCH64_TPREL_G1,
522      AArch64MCExpr::VK_AARCH64_TPREL_G0,
523    };
524    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
525
526    return isMoveWideImm(64, PermittedModifiers, NumModifiers);
527  }
528
529  bool isMOVK32Imm() const {
530    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
531      AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
532      AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
533      AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
534      AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
535      AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
536      AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
537      AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
538    };
539    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
540
541    return isMoveWideImm(32, PermittedModifiers, NumModifiers);
542  }
543
544  bool isMOVK64Imm() const {
545    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
546      AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
547      AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
548      AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
549      AArch64MCExpr::VK_AARCH64_ABS_G3,
550      AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
551      AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
552      AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
553      AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
554      AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
555    };
556    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
557
558    return isMoveWideImm(64, PermittedModifiers, NumModifiers);
559  }
560
561  bool isMoveWideImm(unsigned RegWidth,
562                     const AArch64MCExpr::VariantKind *PermittedModifiers,
563                     unsigned NumModifiers) const {
564    if (!isImmWithLSL()) return false;
565
566    if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
567    if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
568
569    AArch64MCExpr::VariantKind Modifier;
570    if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
571      // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
572      if (!ImmWithLSL.ImplicitAmount) return false;
573
574      for (unsigned i = 0; i < NumModifiers; ++i)
575        if (PermittedModifiers[i] == Modifier) return true;
576
577      return false;
578    }
579
580    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
581    return CE && CE->getValue() >= 0  && CE->getValue() <= 0xffff;
582  }
583
584  template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
585  bool isMoveWideMovAlias() const {
586    if (!isImm()) return false;
587
588    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
589    if (!CE) return false;
590
591    int UImm16, Shift;
592    uint64_t Value = CE->getValue();
593
594    // If this is a 32-bit instruction then all bits above 32 should be the
595    // same: either of these is fine because signed/unsigned values should be
596    // permitted.
597    if (RegWidth == 32) {
598      if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
599        return false;
600
601      Value &= 0xffffffffULL;
602    }
603
604    return isValidImm(RegWidth, Value, UImm16, Shift);
605  }
606
607  bool isMSRWithReg() const {
608    if (!isSysReg()) return false;
609
610    bool IsKnownRegister;
611    StringRef Name(SysReg.Data, SysReg.Length);
612    A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
613
614    return IsKnownRegister;
615  }
616
617  bool isMSRPState() const {
618    if (!isSysReg()) return false;
619
620    bool IsKnownRegister;
621    StringRef Name(SysReg.Data, SysReg.Length);
622    A64PState::PStateMapper().fromString(Name, IsKnownRegister);
623
624    return IsKnownRegister;
625  }
626
627  bool isMRS() const {
628    if (!isSysReg()) return false;
629
630    // First check against specific MSR-only (write-only) registers
631    bool IsKnownRegister;
632    StringRef Name(SysReg.Data, SysReg.Length);
633    A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
634
635    return IsKnownRegister;
636  }
637
638  bool isPRFM() const {
639    if (!isImm()) return false;
640
641    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
642
643    if (!CE)
644      return false;
645
646    return CE->getValue() >= 0 && CE->getValue() <= 31;
647  }
648
649  template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
650    if (!isShiftOrExtend()) return false;
651
652    if (ShiftExtend.ShiftType != SHKind)
653      return false;
654
655    return ShiftExtend.Amount <= 4;
656  }
657
658  bool isRegExtendLSL() const {
659    if (!isShiftOrExtend()) return false;
660
661    if (ShiftExtend.ShiftType != A64SE::LSL)
662      return false;
663
664    return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
665  }
666
667  bool isNeonMovImmShiftLSL() const {
668    if (!isShiftOrExtend())
669      return false;
670
671    if (ShiftExtend.ShiftType != A64SE::LSL)
672      return false;
673
674    // Valid shift amount is 0, 8, 16 and 24.
675    return ShiftExtend.Amount % 8 == 0 && ShiftExtend.Amount <= 24;
676  }
677
678  bool isNeonMovImmShiftLSLH() const {
679    if (!isShiftOrExtend())
680      return false;
681
682    if (ShiftExtend.ShiftType != A64SE::LSL)
683      return false;
684
685    // Valid shift amount is 0 and 8.
686    return ShiftExtend.Amount == 0 || ShiftExtend.Amount == 8;
687  }
688
689  bool isNeonMovImmShiftMSL() const {
690    if (!isShiftOrExtend())
691      return false;
692
693    if (ShiftExtend.ShiftType != A64SE::MSL)
694      return false;
695
696    // Valid shift amount is 8 and 16.
697    return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16;
698  }
699
700  template <int MemSize> bool isSImm7Scaled() const {
701    if (!isImm())
702      return false;
703
704    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
705    if (!CE) return false;
706
707    int64_t Val = CE->getValue();
708    if (Val % MemSize != 0) return false;
709
710    Val /= MemSize;
711
712    return Val >= -64 && Val < 64;
713  }
714
715  template<int BitWidth>
716  bool isSImm() const {
717    if (!isImm()) return false;
718
719    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
720    if (!CE) return false;
721
722    return CE->getValue() >= -(1LL << (BitWidth - 1))
723      && CE->getValue() < (1LL << (BitWidth - 1));
724  }
725
726  template<int bitWidth>
727  bool isUImm() const {
728    if (!isImm()) return false;
729
730    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
731    if (!CE) return false;
732
733    return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
734  }
735
736  bool isUImm() const {
737    if (!isImm()) return false;
738
739    return isa<MCConstantExpr>(getImm());
740  }
741
742  bool isNeonUImm64Mask() const {
743    if (!isImm())
744      return false;
745
746    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
747    if (!CE)
748      return false;
749
750    uint64_t Value = CE->getValue();
751
752    // i64 value with each byte being either 0x00 or 0xff.
753    for (unsigned i = 0; i < 8; ++i, Value >>= 8)
754      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff)
755        return false;
756    return true;
757  }
758
759  static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
760                                          unsigned ShiftAmount,
761                                          bool ImplicitAmount,
762										  SMLoc S,SMLoc E) {
763    AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
764    Op->ImmWithLSL.Val = Val;
765    Op->ImmWithLSL.ShiftAmount = ShiftAmount;
766    Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
767    return Op;
768  }
769
770  static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
771                                        SMLoc S, SMLoc E) {
772    AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
773    Op->CondCode.Code = Code;
774    return Op;
775  }
776
777  static AArch64Operand *CreateFPImm(double Val,
778                                     SMLoc S, SMLoc E) {
779    AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
780    Op->FPImm.Val = Val;
781    return Op;
782  }
783
784  static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
785    AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
786    Op->Imm.Val = Val;
787    return Op;
788  }
789
790  static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
791    AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
792    Op->Reg.RegNum = RegNum;
793    return Op;
794  }
795
796  static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
797    AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
798    Op->Reg.RegNum = RegNum;
799    return Op;
800  }
801
802  static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
803                                           unsigned Amount,
804                                           bool ImplicitAmount,
805                                           SMLoc S, SMLoc E) {
806    AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
807    Op->ShiftExtend.ShiftType = ShiftTyp;
808    Op->ShiftExtend.Amount = Amount;
809    Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
810    return Op;
811  }
812
813  static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
814    AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
815    Op->Tok.Data = Str.data();
816    Op->Tok.Length = Str.size();
817    return Op;
818  }
819
820  static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
821    AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
822    Op->Tok.Data = Str.data();
823    Op->Tok.Length = Str.size();
824    return Op;
825  }
826
827
828  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
829    // Add as immediates when possible.
830    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
831      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
832    else
833      Inst.addOperand(MCOperand::CreateExpr(Expr));
834  }
835
836  template<unsigned RegWidth>
837  void addBFILSBOperands(MCInst &Inst, unsigned N) const {
838    assert(N == 1 && "Invalid number of operands!");
839    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
840    unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
841    Inst.addOperand(MCOperand::CreateImm(EncodedVal));
842  }
843
844  void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
845    assert(N == 1 && "Invalid number of operands!");
846    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
847    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
848  }
849
850  void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
851    assert(N == 1 && "Invalid number of operands!");
852
853    uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
854    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
855
856    Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
857  }
858
859  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
860    assert(N == 1 && "Invalid number of operands!");
861    Inst.addOperand(MCOperand::CreateImm(getCondCode()));
862  }
863
864  void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
865    assert(N == 1 && "Invalid number of operands!");
866
867    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
868    Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
869  }
870
871  void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
872    assert(N == 1 && "Invalid number of operands!");
873
874    APFloat RealVal(FPImm.Val);
875    uint32_t ImmVal;
876    A64Imms::isFPImm(RealVal, ImmVal);
877
878    Inst.addOperand(MCOperand::CreateImm(ImmVal));
879  }
880
881  void addFPZeroOperands(MCInst &Inst, unsigned N) const {
882    assert(N == 1 && "Invalid number of operands");
883    Inst.addOperand(MCOperand::CreateImm(0));
884  }
885
886  void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
887    assert(N == 1 && "Invalid number of operands!");
888    unsigned Encoded = A64InvertCondCode(getCondCode());
889    Inst.addOperand(MCOperand::CreateImm(Encoded));
890  }
891
892  void addRegOperands(MCInst &Inst, unsigned N) const {
893    assert(N == 1 && "Invalid number of operands!");
894    Inst.addOperand(MCOperand::CreateReg(getReg()));
895  }
896
897  void addImmOperands(MCInst &Inst, unsigned N) const {
898    assert(N == 1 && "Invalid number of operands!");
899    addExpr(Inst, getImm());
900  }
901
902  template<int MemSize>
903  void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
904    assert(N == 1 && "Invalid number of operands!");
905
906    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
907    uint64_t Val = CE->getValue() / MemSize;
908    Inst.addOperand(MCOperand::CreateImm(Val  & 0x7f));
909  }
910
911  template<int BitWidth>
912  void addSImmOperands(MCInst &Inst, unsigned N) const {
913    assert(N == 1 && "Invalid number of operands!");
914
915    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
916    uint64_t Val = CE->getValue();
917    Inst.addOperand(MCOperand::CreateImm(Val  & ((1ULL << BitWidth) - 1)));
918  }
919
920  void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
921    assert (N == 1 && "Invalid number of operands!");
922
923    addExpr(Inst, ImmWithLSL.Val);
924  }
925
926  template<unsigned field_width, unsigned scale>
927  void addLabelOperands(MCInst &Inst, unsigned N) const {
928    assert(N == 1 && "Invalid number of operands!");
929
930    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
931
932    if (!CE) {
933      addExpr(Inst, Imm.Val);
934      return;
935    }
936
937    int64_t Val = CE->getValue();
938    assert(Val % scale == 0 && "Unaligned immediate in instruction");
939    Val /= scale;
940
941    Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
942  }
943
944  template<int MemSize>
945  void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
946    assert(N == 1 && "Invalid number of operands!");
947
948    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
949      Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
950    } else {
951      Inst.addOperand(MCOperand::CreateExpr(getImm()));
952    }
953  }
954
955  template<unsigned RegWidth>
956  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
957    assert(N == 1 && "Invalid number of operands");
958    const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
959
960    uint32_t Bits;
961    A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
962
963    Inst.addOperand(MCOperand::CreateImm(Bits));
964  }
965
966  void addMRSOperands(MCInst &Inst, unsigned N) const {
967    assert(N == 1 && "Invalid number of operands!");
968
969    bool Valid;
970    StringRef Name(SysReg.Data, SysReg.Length);
971    uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
972
973    Inst.addOperand(MCOperand::CreateImm(Bits));
974  }
975
976  void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
977    assert(N == 1 && "Invalid number of operands!");
978
979    bool Valid;
980    StringRef Name(SysReg.Data, SysReg.Length);
981    uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
982
983    Inst.addOperand(MCOperand::CreateImm(Bits));
984  }
985
986  void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
987    assert(N == 1 && "Invalid number of operands!");
988
989    bool Valid;
990    StringRef Name(SysReg.Data, SysReg.Length);
991    uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
992
993    Inst.addOperand(MCOperand::CreateImm(Bits));
994  }
995
996  void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
997    assert(N == 2 && "Invalid number of operands!");
998
999    addExpr(Inst, ImmWithLSL.Val);
1000
1001    AArch64MCExpr::VariantKind Variant;
1002    if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
1003      Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
1004      return;
1005    }
1006
1007    // We know it's relocated
1008    switch (Variant) {
1009    case AArch64MCExpr::VK_AARCH64_ABS_G0:
1010    case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
1011    case AArch64MCExpr::VK_AARCH64_SABS_G0:
1012    case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
1013    case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
1014    case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
1015    case AArch64MCExpr::VK_AARCH64_TPREL_G0:
1016    case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
1017      Inst.addOperand(MCOperand::CreateImm(0));
1018      break;
1019    case AArch64MCExpr::VK_AARCH64_ABS_G1:
1020    case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
1021    case AArch64MCExpr::VK_AARCH64_SABS_G1:
1022    case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
1023    case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
1024    case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
1025    case AArch64MCExpr::VK_AARCH64_TPREL_G1:
1026    case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
1027      Inst.addOperand(MCOperand::CreateImm(1));
1028      break;
1029    case AArch64MCExpr::VK_AARCH64_ABS_G2:
1030    case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
1031    case AArch64MCExpr::VK_AARCH64_SABS_G2:
1032    case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
1033    case AArch64MCExpr::VK_AARCH64_TPREL_G2:
1034      Inst.addOperand(MCOperand::CreateImm(2));
1035      break;
1036    case AArch64MCExpr::VK_AARCH64_ABS_G3:
1037      Inst.addOperand(MCOperand::CreateImm(3));
1038      break;
1039    default: llvm_unreachable("Inappropriate move wide relocation");
1040    }
1041  }
1042
1043  template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
1044  void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
1045    assert(N == 2 && "Invalid number of operands!");
1046    int UImm16, Shift;
1047
1048    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1049    uint64_t Value = CE->getValue();
1050
1051    if (RegWidth == 32) {
1052      Value &= 0xffffffffULL;
1053    }
1054
1055    bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
1056    (void)Valid;
1057    assert(Valid && "Invalid immediates should have been weeded out by now");
1058
1059    Inst.addOperand(MCOperand::CreateImm(UImm16));
1060    Inst.addOperand(MCOperand::CreateImm(Shift));
1061  }
1062
1063  void addPRFMOperands(MCInst &Inst, unsigned N) const {
1064    assert(N == 1 && "Invalid number of operands!");
1065
1066    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1067    assert(CE->getValue() >= 0 && CE->getValue() <= 31
1068           && "PRFM operand should be 5-bits");
1069
1070    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1071  }
1072
1073  // For Add-sub (extended register) operands.
1074  void addRegExtendOperands(MCInst &Inst, unsigned N) const {
1075    assert(N == 1 && "Invalid number of operands!");
1076
1077    Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1078  }
1079
1080  // For Vector Immediates shifted imm operands.
1081  void addNeonMovImmShiftLSLOperands(MCInst &Inst, unsigned N) const {
1082    assert(N == 1 && "Invalid number of operands!");
1083
1084    if (ShiftExtend.Amount % 8 != 0 || ShiftExtend.Amount > 24)
1085      llvm_unreachable("Invalid shift amount for vector immediate inst.");
1086
1087    // Encode LSL shift amount 0, 8, 16, 24 as 0, 1, 2, 3.
1088    int64_t Imm = ShiftExtend.Amount / 8;
1089    Inst.addOperand(MCOperand::CreateImm(Imm));
1090  }
1091
1092  void addNeonMovImmShiftLSLHOperands(MCInst &Inst, unsigned N) const {
1093    assert(N == 1 && "Invalid number of operands!");
1094
1095    if (ShiftExtend.Amount != 0 && ShiftExtend.Amount != 8)
1096      llvm_unreachable("Invalid shift amount for vector immediate inst.");
1097
1098    // Encode LSLH shift amount 0, 8  as 0, 1.
1099    int64_t Imm = ShiftExtend.Amount / 8;
1100    Inst.addOperand(MCOperand::CreateImm(Imm));
1101  }
1102
1103  void addNeonMovImmShiftMSLOperands(MCInst &Inst, unsigned N) const {
1104    assert(N == 1 && "Invalid number of operands!");
1105
1106    if (ShiftExtend.Amount != 8 && ShiftExtend.Amount != 16)
1107      llvm_unreachable("Invalid shift amount for vector immediate inst.");
1108
1109    // Encode MSL shift amount 8, 16  as 0, 1.
1110    int64_t Imm = ShiftExtend.Amount / 8 - 1;
1111    Inst.addOperand(MCOperand::CreateImm(Imm));
1112  }
1113
1114  // For the extend in load-store (register offset) instructions.
1115  template<unsigned MemSize>
1116  void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
1117    addAddrRegExtendOperands(Inst, N, MemSize);
1118  }
1119
1120  void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
1121                                unsigned MemSize) const {
1122    assert(N == 1 && "Invalid number of operands!");
1123
1124    // First bit of Option is set in instruction classes, the high two bits are
1125    // as follows:
1126    unsigned OptionHi = 0;
1127    switch (ShiftExtend.ShiftType) {
1128    case A64SE::UXTW:
1129    case A64SE::LSL:
1130      OptionHi = 1;
1131      break;
1132    case A64SE::SXTW:
1133    case A64SE::SXTX:
1134      OptionHi = 3;
1135      break;
1136    default:
1137      llvm_unreachable("Invalid extend type for register offset");
1138    }
1139
1140    unsigned S = 0;
1141    if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
1142      S = 1;
1143    else if (MemSize != 1 && ShiftExtend.Amount != 0)
1144      S = 1;
1145
1146    Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
1147  }
1148  void addShiftOperands(MCInst &Inst, unsigned N) const {
1149    assert(N == 1 && "Invalid number of operands!");
1150
1151    Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1152  }
1153
1154  void addNeonUImm64MaskOperands(MCInst &Inst, unsigned N) const {
1155    assert(N == 1 && "Invalid number of operands!");
1156
1157    // A bit from each byte in the constant forms the encoded immediate
1158    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1159    uint64_t Value = CE->getValue();
1160
1161    unsigned Imm = 0;
1162    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1163      Imm |= (Value & 1) << i;
1164    }
1165    Inst.addOperand(MCOperand::CreateImm(Imm));
1166  }
1167};
1168
1169} // end anonymous namespace.
1170
1171AArch64AsmParser::OperandMatchResultTy
1172AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1173                               StringRef Mnemonic) {
1174
1175  // See if the operand has a custom parser
1176  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1177
1178  // It could either succeed, fail or just not care.
1179  if (ResTy != MatchOperand_NoMatch)
1180    return ResTy;
1181
1182  switch (getLexer().getKind()) {
1183  default:
1184    Error(Parser.getTok().getLoc(), "unexpected token in operand");
1185    return MatchOperand_ParseFail;
1186  case AsmToken::Identifier: {
1187    // It might be in the LSL/UXTB family ...
1188    OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
1189
1190    // We can only continue if no tokens were eaten.
1191    if (GotShift != MatchOperand_NoMatch)
1192      return GotShift;
1193
1194    // ... or it might be a register ...
1195    uint32_t NumLanes = 0;
1196    OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
1197    assert(GotReg != MatchOperand_ParseFail
1198           && "register parsing shouldn't partially succeed");
1199
1200    if (GotReg == MatchOperand_Success) {
1201      if (Parser.getTok().is(AsmToken::LBrac))
1202        return ParseNEONLane(Operands, NumLanes);
1203      else
1204        return MatchOperand_Success;
1205    }
1206
1207    // ... or it might be a symbolish thing
1208  }
1209    // Fall through
1210  case AsmToken::LParen:  // E.g. (strcmp-4)
1211  case AsmToken::Integer: // 1f, 2b labels
1212  case AsmToken::String:  // quoted labels
1213  case AsmToken::Dot:     // . is Current location
1214  case AsmToken::Dollar:  // $ is PC
1215  case AsmToken::Colon: {
1216    SMLoc StartLoc  = Parser.getTok().getLoc();
1217    SMLoc EndLoc;
1218    const MCExpr *ImmVal = 0;
1219
1220    if (ParseImmediate(ImmVal) != MatchOperand_Success)
1221      return MatchOperand_ParseFail;
1222
1223    EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1224    Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1225    return MatchOperand_Success;
1226  }
1227  case AsmToken::Hash: {   // Immediates
1228    SMLoc StartLoc = Parser.getTok().getLoc();
1229    SMLoc EndLoc;
1230    const MCExpr *ImmVal = 0;
1231    Parser.Lex();
1232
1233    if (ParseImmediate(ImmVal) != MatchOperand_Success)
1234      return MatchOperand_ParseFail;
1235
1236    EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1237    Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1238    return MatchOperand_Success;
1239  }
1240  case AsmToken::LBrac: {
1241    SMLoc Loc = Parser.getTok().getLoc();
1242    Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1243    Parser.Lex(); // Eat '['
1244
1245    // There's no comma after a '[', so we can parse the next operand
1246    // immediately.
1247    return ParseOperand(Operands, Mnemonic);
1248  }
1249  // The following will likely be useful later, but not in very early cases
1250  case AsmToken::LCurly:  // Weird SIMD lists
1251    llvm_unreachable("Don't know how to deal with '{' in operand");
1252    return MatchOperand_ParseFail;
1253  }
1254}
1255
1256AArch64AsmParser::OperandMatchResultTy
1257AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
1258  if (getLexer().is(AsmToken::Colon)) {
1259    AArch64MCExpr::VariantKind RefKind;
1260
1261    OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
1262    if (ResTy != MatchOperand_Success)
1263      return ResTy;
1264
1265    const MCExpr *SubExprVal;
1266    if (getParser().parseExpression(SubExprVal))
1267      return MatchOperand_ParseFail;
1268
1269    ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
1270    return MatchOperand_Success;
1271  }
1272
1273  // No weird AArch64MCExpr prefix
1274  return getParser().parseExpression(ExprVal)
1275    ? MatchOperand_ParseFail : MatchOperand_Success;
1276}
1277
1278// A lane attached to a NEON register. "[N]", which should yield three tokens:
1279// '[', N, ']'. A hash is not allowed to precede the immediate here.
1280AArch64AsmParser::OperandMatchResultTy
1281AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1282                                uint32_t NumLanes) {
1283  SMLoc Loc = Parser.getTok().getLoc();
1284
1285  assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
1286  Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1287  Parser.Lex(); // Eat '['
1288
1289  if (Parser.getTok().isNot(AsmToken::Integer)) {
1290    Error(Parser.getTok().getLoc(), "expected lane number");
1291    return MatchOperand_ParseFail;
1292  }
1293
1294  if (Parser.getTok().getIntVal() >= NumLanes) {
1295    Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
1296    return MatchOperand_ParseFail;
1297  }
1298
1299  const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
1300                                              getContext());
1301  SMLoc S = Parser.getTok().getLoc();
1302  Parser.Lex(); // Eat actual lane
1303  SMLoc E = Parser.getTok().getLoc();
1304  Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
1305
1306
1307  if (Parser.getTok().isNot(AsmToken::RBrac)) {
1308    Error(Parser.getTok().getLoc(), "expected ']' after lane");
1309    return MatchOperand_ParseFail;
1310  }
1311
1312  Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1313  Parser.Lex(); // Eat ']'
1314
1315  return MatchOperand_Success;
1316}
1317
1318AArch64AsmParser::OperandMatchResultTy
1319AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
1320  assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
1321  Parser.Lex();
1322
1323  if (getLexer().isNot(AsmToken::Identifier)) {
1324    Error(Parser.getTok().getLoc(),
1325          "expected relocation specifier in operand after ':'");
1326    return MatchOperand_ParseFail;
1327  }
1328
1329  std::string LowerCase = Parser.getTok().getIdentifier().lower();
1330  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
1331    .Case("got",              AArch64MCExpr::VK_AARCH64_GOT)
1332    .Case("got_lo12",         AArch64MCExpr::VK_AARCH64_GOT_LO12)
1333    .Case("lo12",             AArch64MCExpr::VK_AARCH64_LO12)
1334    .Case("abs_g0",           AArch64MCExpr::VK_AARCH64_ABS_G0)
1335    .Case("abs_g0_nc",        AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
1336    .Case("abs_g1",           AArch64MCExpr::VK_AARCH64_ABS_G1)
1337    .Case("abs_g1_nc",        AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
1338    .Case("abs_g2",           AArch64MCExpr::VK_AARCH64_ABS_G2)
1339    .Case("abs_g2_nc",        AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
1340    .Case("abs_g3",           AArch64MCExpr::VK_AARCH64_ABS_G3)
1341    .Case("abs_g0_s",         AArch64MCExpr::VK_AARCH64_SABS_G0)
1342    .Case("abs_g1_s",         AArch64MCExpr::VK_AARCH64_SABS_G1)
1343    .Case("abs_g2_s",         AArch64MCExpr::VK_AARCH64_SABS_G2)
1344    .Case("dtprel_g2",        AArch64MCExpr::VK_AARCH64_DTPREL_G2)
1345    .Case("dtprel_g1",        AArch64MCExpr::VK_AARCH64_DTPREL_G1)
1346    .Case("dtprel_g1_nc",     AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
1347    .Case("dtprel_g0",        AArch64MCExpr::VK_AARCH64_DTPREL_G0)
1348    .Case("dtprel_g0_nc",     AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
1349    .Case("dtprel_hi12",      AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
1350    .Case("dtprel_lo12",      AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
1351    .Case("dtprel_lo12_nc",   AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
1352    .Case("gottprel_g1",      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
1353    .Case("gottprel_g0_nc",   AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
1354    .Case("gottprel",         AArch64MCExpr::VK_AARCH64_GOTTPREL)
1355    .Case("gottprel_lo12",    AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
1356    .Case("tprel_g2",         AArch64MCExpr::VK_AARCH64_TPREL_G2)
1357    .Case("tprel_g1",         AArch64MCExpr::VK_AARCH64_TPREL_G1)
1358    .Case("tprel_g1_nc",      AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
1359    .Case("tprel_g0",         AArch64MCExpr::VK_AARCH64_TPREL_G0)
1360    .Case("tprel_g0_nc",      AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
1361    .Case("tprel_hi12",       AArch64MCExpr::VK_AARCH64_TPREL_HI12)
1362    .Case("tprel_lo12",       AArch64MCExpr::VK_AARCH64_TPREL_LO12)
1363    .Case("tprel_lo12_nc",    AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
1364    .Case("tlsdesc",          AArch64MCExpr::VK_AARCH64_TLSDESC)
1365    .Case("tlsdesc_lo12",     AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
1366    .Default(AArch64MCExpr::VK_AARCH64_None);
1367
1368  if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
1369    Error(Parser.getTok().getLoc(),
1370          "expected relocation specifier in operand after ':'");
1371    return MatchOperand_ParseFail;
1372  }
1373  Parser.Lex(); // Eat identifier
1374
1375  if (getLexer().isNot(AsmToken::Colon)) {
1376    Error(Parser.getTok().getLoc(),
1377          "expected ':' after relocation specifier");
1378    return MatchOperand_ParseFail;
1379  }
1380  Parser.Lex();
1381  return MatchOperand_Success;
1382}
1383
1384AArch64AsmParser::OperandMatchResultTy
1385AArch64AsmParser::ParseImmWithLSLOperand(
1386                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1387  // FIXME?: I want to live in a world where immediates must start with
1388  // #. Please don't dash my hopes (well, do if you have a good reason).
1389  if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1390
1391  SMLoc S = Parser.getTok().getLoc();
1392  Parser.Lex(); // Eat '#'
1393
1394  const MCExpr *Imm;
1395  if (ParseImmediate(Imm) != MatchOperand_Success)
1396    return MatchOperand_ParseFail;
1397  else if (Parser.getTok().isNot(AsmToken::Comma)) {
1398    SMLoc E = Parser.getTok().getLoc();
1399    Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
1400    return MatchOperand_Success;
1401  }
1402
1403  // Eat ','
1404  Parser.Lex();
1405
1406  // The optional operand must be "lsl #N" where N is non-negative.
1407  if (Parser.getTok().is(AsmToken::Identifier)
1408      && Parser.getTok().getIdentifier().lower() == "lsl") {
1409    Parser.Lex();
1410
1411    if (Parser.getTok().is(AsmToken::Hash)) {
1412      Parser.Lex();
1413
1414      if (Parser.getTok().isNot(AsmToken::Integer)) {
1415        Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
1416        return MatchOperand_ParseFail;
1417      }
1418    }
1419  }
1420
1421  int64_t ShiftAmount = Parser.getTok().getIntVal();
1422
1423  if (ShiftAmount < 0) {
1424    Error(Parser.getTok().getLoc(), "positive shift amount required");
1425    return MatchOperand_ParseFail;
1426  }
1427  Parser.Lex(); // Eat the number
1428
1429  SMLoc E = Parser.getTok().getLoc();
1430  Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
1431                                                      false, S, E));
1432  return MatchOperand_Success;
1433}
1434
1435
1436AArch64AsmParser::OperandMatchResultTy
1437AArch64AsmParser::ParseCondCodeOperand(
1438                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1439  if (Parser.getTok().isNot(AsmToken::Identifier))
1440    return MatchOperand_NoMatch;
1441
1442  StringRef Tok = Parser.getTok().getIdentifier();
1443  A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
1444
1445  if (CondCode == A64CC::Invalid)
1446    return MatchOperand_NoMatch;
1447
1448  SMLoc S = Parser.getTok().getLoc();
1449  Parser.Lex(); // Eat condition code
1450  SMLoc E = Parser.getTok().getLoc();
1451
1452  Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
1453  return MatchOperand_Success;
1454}
1455
1456AArch64AsmParser::OperandMatchResultTy
1457AArch64AsmParser::ParseCRxOperand(
1458                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1459  SMLoc S = Parser.getTok().getLoc();
1460  if (Parser.getTok().isNot(AsmToken::Identifier)) {
1461    Error(S, "Expected cN operand where 0 <= N <= 15");
1462    return MatchOperand_ParseFail;
1463  }
1464
1465  std::string LowerTok = Parser.getTok().getIdentifier().lower();
1466  StringRef Tok(LowerTok);
1467  if (Tok[0] != 'c') {
1468    Error(S, "Expected cN operand where 0 <= N <= 15");
1469    return MatchOperand_ParseFail;
1470  }
1471
1472  uint32_t CRNum;
1473  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1474  if (BadNum || CRNum > 15) {
1475    Error(S, "Expected cN operand where 0 <= N <= 15");
1476    return MatchOperand_ParseFail;
1477  }
1478
1479  const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
1480
1481  Parser.Lex();
1482  SMLoc E = Parser.getTok().getLoc();
1483
1484  Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
1485  return MatchOperand_Success;
1486}
1487
1488AArch64AsmParser::OperandMatchResultTy
1489AArch64AsmParser::ParseFPImmOperand(
1490                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1491
1492  // FIXME?: I want to live in a world where immediates must start with
1493  // #. Please don't dash my hopes (well, do if you have a good reason).
1494  if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1495
1496  SMLoc S = Parser.getTok().getLoc();
1497  Parser.Lex(); // Eat '#'
1498
1499  bool Negative = false;
1500  if (Parser.getTok().is(AsmToken::Minus)) {
1501    Negative = true;
1502    Parser.Lex(); // Eat '-'
1503  } else if (Parser.getTok().is(AsmToken::Plus)) {
1504    Parser.Lex(); // Eat '+'
1505  }
1506
1507  if (Parser.getTok().isNot(AsmToken::Real)) {
1508    Error(S, "Expected floating-point immediate");
1509    return MatchOperand_ParseFail;
1510  }
1511
1512  APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
1513  if (Negative) RealVal.changeSign();
1514  double DblVal = RealVal.convertToDouble();
1515
1516  Parser.Lex(); // Eat real number
1517  SMLoc E = Parser.getTok().getLoc();
1518
1519  Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
1520  return MatchOperand_Success;
1521}
1522
1523
1524// Automatically generated
1525static unsigned MatchRegisterName(StringRef Name);
1526
1527bool
1528AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
1529                                   StringRef &Layout,
1530                                   SMLoc &LayoutLoc) const {
1531  const AsmToken &Tok = Parser.getTok();
1532
1533  if (Tok.isNot(AsmToken::Identifier))
1534    return false;
1535
1536  std::string LowerReg = Tok.getString().lower();
1537  size_t DotPos = LowerReg.find('.');
1538
1539  RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
1540  if (RegNum == AArch64::NoRegister) {
1541    RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
1542      .Case("ip0", AArch64::X16)
1543      .Case("ip1", AArch64::X17)
1544      .Case("fp", AArch64::X29)
1545      .Case("lr", AArch64::X30)
1546      .Default(AArch64::NoRegister);
1547  }
1548  if (RegNum == AArch64::NoRegister)
1549    return false;
1550
1551  SMLoc S = Tok.getLoc();
1552  RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
1553
1554  if (DotPos == StringRef::npos) {
1555    Layout = StringRef();
1556  } else {
1557    // Everything afterwards needs to be a literal token, expected to be
1558    // '.2d','.b' etc for vector registers.
1559
1560    // This StringSwitch validates the input and (perhaps more importantly)
1561    // gives us a permanent string to use in the token (a pointer into LowerReg
1562    // would go out of scope when we return).
1563    LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
1564    std::string LayoutText = LowerReg.substr(DotPos, StringRef::npos);
1565    Layout = StringSwitch<const char *>(LayoutText)
1566      .Case(".d", ".d").Case(".1d", ".1d").Case(".2d", ".2d")
1567      .Case(".s", ".s").Case(".2s", ".2s").Case(".4s", ".4s")
1568      .Case(".h", ".h").Case(".4h", ".4h").Case(".8h", ".8h")
1569      .Case(".b", ".b").Case(".8b", ".8b").Case(".16b", ".16b")
1570      .Default("");
1571
1572    if (Layout.size() == 0) {
1573      // Malformed register
1574      return false;
1575    }
1576  }
1577
1578  return true;
1579}
1580
1581AArch64AsmParser::OperandMatchResultTy
1582AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1583                                uint32_t &NumLanes) {
1584  unsigned RegNum;
1585  StringRef Layout;
1586  SMLoc RegEndLoc, LayoutLoc;
1587  SMLoc S = Parser.getTok().getLoc();
1588
1589  if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1590    return MatchOperand_NoMatch;
1591
1592  Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
1593
1594  if (Layout.size() != 0) {
1595    unsigned long long TmpLanes = 0;
1596    llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
1597    if (TmpLanes != 0) {
1598      NumLanes = TmpLanes;
1599    } else {
1600      // If the number of lanes isn't specified explicitly, a valid instruction
1601      // will have an element specifier and be capable of acting on the entire
1602      // vector register.
1603      switch (Layout.back()) {
1604      default: llvm_unreachable("Invalid layout specifier");
1605      case 'b': NumLanes = 16; break;
1606      case 'h': NumLanes = 8; break;
1607      case 's': NumLanes = 4; break;
1608      case 'd': NumLanes = 2; break;
1609      }
1610    }
1611
1612    Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
1613  }
1614
1615  Parser.Lex();
1616  return MatchOperand_Success;
1617}
1618
1619bool
1620AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1621                                SMLoc &EndLoc) {
1622  // This callback is used for things like DWARF frame directives in
1623  // assembly. They don't care about things like NEON layouts or lanes, they
1624  // just want to be able to produce the DWARF register number.
1625  StringRef LayoutSpec;
1626  SMLoc RegEndLoc, LayoutLoc;
1627  StartLoc = Parser.getTok().getLoc();
1628
1629  if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
1630    return true;
1631
1632  Parser.Lex();
1633  EndLoc = Parser.getTok().getLoc();
1634
1635  return false;
1636}
1637
1638AArch64AsmParser::OperandMatchResultTy
1639AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
1640                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1641  // Since these operands occur in very limited circumstances, without
1642  // alternatives, we actually signal an error if there is no match. If relaxing
1643  // this, beware of unintended consequences: an immediate will be accepted
1644  // during matching, no matter how it gets into the AArch64Operand.
1645  const AsmToken &Tok = Parser.getTok();
1646  SMLoc S = Tok.getLoc();
1647
1648  if (Tok.is(AsmToken::Identifier)) {
1649    bool ValidName;
1650    uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
1651
1652    if (!ValidName) {
1653      Error(S, "operand specifier not recognised");
1654      return MatchOperand_ParseFail;
1655    }
1656
1657    Parser.Lex(); // We're done with the identifier. Eat it
1658
1659    SMLoc E = Parser.getTok().getLoc();
1660    const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
1661    Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
1662    return MatchOperand_Success;
1663  } else if (Tok.is(AsmToken::Hash)) {
1664    Parser.Lex();
1665
1666    const MCExpr *ImmVal;
1667    if (ParseImmediate(ImmVal) != MatchOperand_Success)
1668      return MatchOperand_ParseFail;
1669
1670    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
1671    if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
1672      Error(S, "Invalid immediate for instruction");
1673      return MatchOperand_ParseFail;
1674    }
1675
1676    SMLoc E = Parser.getTok().getLoc();
1677    Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
1678    return MatchOperand_Success;
1679  }
1680
1681  Error(S, "unexpected operand for instruction");
1682  return MatchOperand_ParseFail;
1683}
1684
1685AArch64AsmParser::OperandMatchResultTy
1686AArch64AsmParser::ParseSysRegOperand(
1687                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1688  const AsmToken &Tok = Parser.getTok();
1689
1690  // Any MSR/MRS operand will be an identifier, and we want to store it as some
1691  // kind of string: SPSel is valid for two different forms of MSR with two
1692  // different encodings. There's no collision at the moment, but the potential
1693  // is there.
1694  if (!Tok.is(AsmToken::Identifier)) {
1695    return MatchOperand_NoMatch;
1696  }
1697
1698  SMLoc S = Tok.getLoc();
1699  Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
1700  Parser.Lex(); // Eat identifier
1701
1702  return MatchOperand_Success;
1703}
1704
1705AArch64AsmParser::OperandMatchResultTy
1706AArch64AsmParser::ParseLSXAddressOperand(
1707                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1708  SMLoc S = Parser.getTok().getLoc();
1709
1710  unsigned RegNum;
1711  SMLoc RegEndLoc, LayoutLoc;
1712  StringRef Layout;
1713  if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
1714     || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
1715     || Layout.size() != 0) {
1716    // Check Layout.size because we don't want to let "x3.4s" or similar
1717    // through.
1718    return MatchOperand_NoMatch;
1719  }
1720  Parser.Lex(); // Eat register
1721
1722  if (Parser.getTok().is(AsmToken::RBrac)) {
1723    // We're done
1724    SMLoc E = Parser.getTok().getLoc();
1725    Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1726    return MatchOperand_Success;
1727  }
1728
1729  // Otherwise, only ", #0" is valid
1730
1731  if (Parser.getTok().isNot(AsmToken::Comma)) {
1732    Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
1733    return MatchOperand_ParseFail;
1734  }
1735  Parser.Lex(); // Eat ','
1736
1737  if (Parser.getTok().isNot(AsmToken::Hash)) {
1738    Error(Parser.getTok().getLoc(), "expected '#0'");
1739    return MatchOperand_ParseFail;
1740  }
1741  Parser.Lex(); // Eat '#'
1742
1743  if (Parser.getTok().isNot(AsmToken::Integer)
1744      || Parser.getTok().getIntVal() != 0 ) {
1745    Error(Parser.getTok().getLoc(), "expected '#0'");
1746    return MatchOperand_ParseFail;
1747  }
1748  Parser.Lex(); // Eat '0'
1749
1750  SMLoc E = Parser.getTok().getLoc();
1751  Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1752  return MatchOperand_Success;
1753}
1754
1755AArch64AsmParser::OperandMatchResultTy
1756AArch64AsmParser::ParseShiftExtend(
1757                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1758  StringRef IDVal = Parser.getTok().getIdentifier();
1759  std::string LowerID = IDVal.lower();
1760
1761  A64SE::ShiftExtSpecifiers Spec =
1762      StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
1763        .Case("lsl", A64SE::LSL)
1764	.Case("msl", A64SE::MSL)
1765	.Case("lsr", A64SE::LSR)
1766	.Case("asr", A64SE::ASR)
1767	.Case("ror", A64SE::ROR)
1768	.Case("uxtb", A64SE::UXTB)
1769	.Case("uxth", A64SE::UXTH)
1770	.Case("uxtw", A64SE::UXTW)
1771	.Case("uxtx", A64SE::UXTX)
1772	.Case("sxtb", A64SE::SXTB)
1773	.Case("sxth", A64SE::SXTH)
1774	.Case("sxtw", A64SE::SXTW)
1775	.Case("sxtx", A64SE::SXTX)
1776	.Default(A64SE::Invalid);
1777
1778  if (Spec == A64SE::Invalid)
1779    return MatchOperand_NoMatch;
1780
1781  // Eat the shift
1782  SMLoc S, E;
1783  S = Parser.getTok().getLoc();
1784  Parser.Lex();
1785
1786  if (Spec != A64SE::LSL && Spec != A64SE::LSR && Spec != A64SE::ASR &&
1787      Spec != A64SE::ROR && Spec != A64SE::MSL) {
1788    // The shift amount can be omitted for the extending versions, but not real
1789    // shifts:
1790    //     add x0, x0, x0, uxtb
1791    // is valid, and equivalent to
1792    //     add x0, x0, x0, uxtb #0
1793
1794    if (Parser.getTok().is(AsmToken::Comma) ||
1795        Parser.getTok().is(AsmToken::EndOfStatement) ||
1796        Parser.getTok().is(AsmToken::RBrac)) {
1797      Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
1798                                                           S, E));
1799      return MatchOperand_Success;
1800    }
1801  }
1802
1803  // Eat # at beginning of immediate
1804  if (!Parser.getTok().is(AsmToken::Hash)) {
1805    Error(Parser.getTok().getLoc(),
1806          "expected #imm after shift specifier");
1807    return MatchOperand_ParseFail;
1808  }
1809  Parser.Lex();
1810
1811  // Make sure we do actually have a number
1812  if (!Parser.getTok().is(AsmToken::Integer)) {
1813    Error(Parser.getTok().getLoc(),
1814          "expected integer shift amount");
1815    return MatchOperand_ParseFail;
1816  }
1817  unsigned Amount = Parser.getTok().getIntVal();
1818  Parser.Lex();
1819  E = Parser.getTok().getLoc();
1820
1821  Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
1822                                                       S, E));
1823
1824  return MatchOperand_Success;
1825}
1826
1827// FIXME: We would really like to be able to tablegen'erate this.
1828bool AArch64AsmParser::
1829validateInstruction(MCInst &Inst,
1830                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1831  switch (Inst.getOpcode()) {
1832  case AArch64::BFIwwii:
1833  case AArch64::BFIxxii:
1834  case AArch64::SBFIZwwii:
1835  case AArch64::SBFIZxxii:
1836  case AArch64::UBFIZwwii:
1837  case AArch64::UBFIZxxii:  {
1838    unsigned ImmOps = Inst.getNumOperands() - 2;
1839    int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1840    int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1841
1842    if (ImmR != 0 && ImmS >= ImmR) {
1843      return Error(Operands[4]->getStartLoc(),
1844                   "requested insert overflows register");
1845    }
1846    return false;
1847  }
1848  case AArch64::BFXILwwii:
1849  case AArch64::BFXILxxii:
1850  case AArch64::SBFXwwii:
1851  case AArch64::SBFXxxii:
1852  case AArch64::UBFXwwii:
1853  case AArch64::UBFXxxii: {
1854    unsigned ImmOps = Inst.getNumOperands() - 2;
1855    int64_t ImmR = Inst.getOperand(ImmOps).getImm();
1856    int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
1857    int64_t RegWidth = 0;
1858    switch (Inst.getOpcode()) {
1859    case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
1860      RegWidth = 64;
1861      break;
1862    case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
1863      RegWidth = 32;
1864      break;
1865    }
1866
1867    if (ImmS >= RegWidth || ImmS < ImmR) {
1868      return Error(Operands[4]->getStartLoc(),
1869                   "requested extract overflows register");
1870    }
1871    return false;
1872  }
1873  case AArch64::ICix: {
1874    int64_t ImmVal = Inst.getOperand(0).getImm();
1875    A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1876    if (!A64IC::NeedsRegister(ICOp)) {
1877      return Error(Operands[1]->getStartLoc(),
1878                   "specified IC op does not use a register");
1879    }
1880    return false;
1881  }
1882  case AArch64::ICi: {
1883    int64_t ImmVal = Inst.getOperand(0).getImm();
1884    A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
1885    if (A64IC::NeedsRegister(ICOp)) {
1886      return Error(Operands[1]->getStartLoc(),
1887                   "specified IC op requires a register");
1888    }
1889    return false;
1890  }
1891  case AArch64::TLBIix: {
1892    int64_t ImmVal = Inst.getOperand(0).getImm();
1893    A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1894    if (!A64TLBI::NeedsRegister(TLBIOp)) {
1895      return Error(Operands[1]->getStartLoc(),
1896                   "specified TLBI op does not use a register");
1897    }
1898    return false;
1899  }
1900  case AArch64::TLBIi: {
1901    int64_t ImmVal = Inst.getOperand(0).getImm();
1902    A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
1903    if (A64TLBI::NeedsRegister(TLBIOp)) {
1904      return Error(Operands[1]->getStartLoc(),
1905                   "specified TLBI op requires a register");
1906    }
1907    return false;
1908  }
1909  }
1910
1911  return false;
1912}
1913
1914
1915// Parses the instruction *together with* all operands, appending each parsed
1916// operand to the "Operands" list
1917bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
1918                                        StringRef Name, SMLoc NameLoc,
1919                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1920  size_t CondCodePos = Name.find('.');
1921
1922  StringRef Mnemonic = Name.substr(0, CondCodePos);
1923  Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
1924
1925  if (CondCodePos != StringRef::npos) {
1926    // We have a condition code
1927    SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
1928    StringRef CondStr = Name.substr(CondCodePos + 1, StringRef::npos);
1929    A64CC::CondCodes Code;
1930
1931    Code = A64StringToCondCode(CondStr);
1932
1933    if (Code == A64CC::Invalid) {
1934      Error(S, "invalid condition code");
1935      Parser.eatToEndOfStatement();
1936      return true;
1937    }
1938
1939    SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
1940
1941    Operands.push_back(AArch64Operand::CreateToken(".",  DotL));
1942    SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
1943    Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
1944  }
1945
1946  // Now we parse the operands of this instruction
1947  if (getLexer().isNot(AsmToken::EndOfStatement)) {
1948    // Read the first operand.
1949    if (ParseOperand(Operands, Mnemonic)) {
1950      Parser.eatToEndOfStatement();
1951      return true;
1952    }
1953
1954    while (getLexer().is(AsmToken::Comma)) {
1955      Parser.Lex();  // Eat the comma.
1956
1957      // Parse and remember the operand.
1958      if (ParseOperand(Operands, Mnemonic)) {
1959        Parser.eatToEndOfStatement();
1960        return true;
1961      }
1962
1963
1964      // After successfully parsing some operands there are two special cases to
1965      // consider (i.e. notional operands not separated by commas). Both are due
1966      // to memory specifiers:
1967      //  + An RBrac will end an address for load/store/prefetch
1968      //  + An '!' will indicate a pre-indexed operation.
1969      //
1970      // It's someone else's responsibility to make sure these tokens are sane
1971      // in the given context!
1972      if (Parser.getTok().is(AsmToken::RBrac)) {
1973        SMLoc Loc = Parser.getTok().getLoc();
1974        Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1975        Parser.Lex();
1976      }
1977
1978      if (Parser.getTok().is(AsmToken::Exclaim)) {
1979        SMLoc Loc = Parser.getTok().getLoc();
1980        Operands.push_back(AArch64Operand::CreateToken("!", Loc));
1981        Parser.Lex();
1982      }
1983    }
1984  }
1985
1986  if (getLexer().isNot(AsmToken::EndOfStatement)) {
1987    SMLoc Loc = getLexer().getLoc();
1988    Parser.eatToEndOfStatement();
1989    return Error(Loc, "expected comma before next operand");
1990  }
1991
1992  // Eat the EndOfStatement
1993  Parser.Lex();
1994
1995  return false;
1996}
1997
1998bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
1999  StringRef IDVal = DirectiveID.getIdentifier();
2000  if (IDVal == ".hword")
2001    return ParseDirectiveWord(2, DirectiveID.getLoc());
2002  else if (IDVal == ".word")
2003    return ParseDirectiveWord(4, DirectiveID.getLoc());
2004  else if (IDVal == ".xword")
2005    return ParseDirectiveWord(8, DirectiveID.getLoc());
2006  else if (IDVal == ".tlsdesccall")
2007    return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
2008
2009  return true;
2010}
2011
2012/// parseDirectiveWord
2013///  ::= .word [ expression (, expression)* ]
2014bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
2015  if (getLexer().isNot(AsmToken::EndOfStatement)) {
2016    for (;;) {
2017      const MCExpr *Value;
2018      if (getParser().parseExpression(Value))
2019        return true;
2020
2021      getParser().getStreamer().EmitValue(Value, Size);
2022
2023      if (getLexer().is(AsmToken::EndOfStatement))
2024        break;
2025
2026      // FIXME: Improve diagnostic.
2027      if (getLexer().isNot(AsmToken::Comma))
2028        return Error(L, "unexpected token in directive");
2029      Parser.Lex();
2030    }
2031  }
2032
2033  Parser.Lex();
2034  return false;
2035}
2036
2037// parseDirectiveTLSDescCall:
2038//   ::= .tlsdesccall symbol
2039bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
2040  StringRef Name;
2041  if (getParser().parseIdentifier(Name))
2042    return Error(L, "expected symbol after directive");
2043
2044  MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
2045  const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
2046
2047  MCInst Inst;
2048  Inst.setOpcode(AArch64::TLSDESCCALL);
2049  Inst.addOperand(MCOperand::CreateExpr(Expr));
2050
2051  getParser().getStreamer().EmitInstruction(Inst);
2052  return false;
2053}
2054
2055
2056bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2057                                 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
2058                                 MCStreamer &Out, unsigned &ErrorInfo,
2059                                 bool MatchingInlineAsm) {
2060  MCInst Inst;
2061  unsigned MatchResult;
2062  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
2063                                     MatchingInlineAsm);
2064
2065  if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
2066    return Error(IDLoc, "too few operands for instruction");
2067
2068  switch (MatchResult) {
2069  default: break;
2070  case Match_Success:
2071    if (validateInstruction(Inst, Operands))
2072      return true;
2073
2074    Out.EmitInstruction(Inst);
2075    return false;
2076  case Match_MissingFeature:
2077    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
2078    return true;
2079  case Match_InvalidOperand: {
2080    SMLoc ErrorLoc = IDLoc;
2081    if (ErrorInfo != ~0U) {
2082      ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
2083      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
2084    }
2085
2086    return Error(ErrorLoc, "invalid operand for instruction");
2087  }
2088  case Match_MnemonicFail:
2089    return Error(IDLoc, "invalid instruction");
2090
2091  case Match_AddSubRegExtendSmall:
2092    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2093      "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
2094  case Match_AddSubRegExtendLarge:
2095    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2096      "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
2097  case Match_AddSubRegShift32:
2098    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2099       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
2100  case Match_AddSubRegShift64:
2101    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2102       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
2103  case Match_AddSubSecondSource:
2104      return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2105          "expected compatible register, symbol or integer in range [0, 4095]");
2106  case Match_CVTFixedPos32:
2107    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2108                 "expected integer in range [1, 32]");
2109  case Match_CVTFixedPos64:
2110    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2111                 "expected integer in range [1, 64]");
2112  case Match_CondCode:
2113    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2114                 "expected AArch64 condition code");
2115  case Match_FPImm:
2116    // Any situation which allows a nontrivial floating-point constant also
2117    // allows a register.
2118    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2119                 "expected compatible register or floating-point constant");
2120  case Match_FPZero:
2121    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2122                 "expected floating-point constant #0.0 or invalid register type");
2123  case Match_Label:
2124    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2125                 "expected label or encodable integer pc offset");
2126  case Match_Lane1:
2127    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2128                 "expected lane specifier '[1]'");
2129  case Match_LoadStoreExtend32_1:
2130    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2131                 "expected 'uxtw' or 'sxtw' with optional shift of #0");
2132  case Match_LoadStoreExtend32_2:
2133    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2134                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
2135  case Match_LoadStoreExtend32_4:
2136    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2137                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
2138  case Match_LoadStoreExtend32_8:
2139    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2140                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
2141  case Match_LoadStoreExtend32_16:
2142    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2143                 "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
2144  case Match_LoadStoreExtend64_1:
2145    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2146                 "expected 'lsl' or 'sxtx' with optional shift of #0");
2147  case Match_LoadStoreExtend64_2:
2148    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2149                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
2150  case Match_LoadStoreExtend64_4:
2151    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2152                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
2153  case Match_LoadStoreExtend64_8:
2154    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2155                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
2156  case Match_LoadStoreExtend64_16:
2157    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2158                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
2159  case Match_LoadStoreSImm7_4:
2160    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2161                 "expected integer multiple of 4 in range [-256, 252]");
2162  case Match_LoadStoreSImm7_8:
2163    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2164                 "expected integer multiple of 8 in range [-512, 508]");
2165  case Match_LoadStoreSImm7_16:
2166    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2167                 "expected integer multiple of 16 in range [-1024, 1016]");
2168  case Match_LoadStoreSImm9:
2169    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2170                 "expected integer in range [-256, 255]");
2171  case Match_LoadStoreUImm12_1:
2172    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2173                 "expected symbolic reference or integer in range [0, 4095]");
2174  case Match_LoadStoreUImm12_2:
2175    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2176                 "expected symbolic reference or integer in range [0, 8190]");
2177  case Match_LoadStoreUImm12_4:
2178    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2179                 "expected symbolic reference or integer in range [0, 16380]");
2180  case Match_LoadStoreUImm12_8:
2181    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2182                 "expected symbolic reference or integer in range [0, 32760]");
2183  case Match_LoadStoreUImm12_16:
2184    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2185                 "expected symbolic reference or integer in range [0, 65520]");
2186  case Match_LogicalSecondSource:
2187    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2188                 "expected compatible register or logical immediate");
2189  case Match_MOVWUImm16:
2190    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2191                 "expected relocated symbol or integer in range [0, 65535]");
2192  case Match_MRS:
2193    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2194                 "expected readable system register");
2195  case Match_MSR:
2196    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2197                 "expected writable system register or pstate");
2198  case Match_NamedImm_at:
2199    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2200                "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
2201  case Match_NamedImm_dbarrier:
2202    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2203             "expected integer in range [0, 15] or symbolic barrier operand");
2204  case Match_NamedImm_dc:
2205    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2206                 "expected symbolic 'dc' operand");
2207  case Match_NamedImm_ic:
2208    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2209                 "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
2210  case Match_NamedImm_isb:
2211    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2212                 "expected integer in range [0, 15] or 'sy'");
2213  case Match_NamedImm_prefetch:
2214    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2215                 "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
2216  case Match_NamedImm_tlbi:
2217    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2218                 "expected translation buffer invalidation operand");
2219  case Match_UImm16:
2220    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2221                 "expected integer in range [0, 65535]");
2222  case Match_UImm3:
2223    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2224                 "expected integer in range [0, 7]");
2225  case Match_UImm4:
2226    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2227                 "expected integer in range [0, 15]");
2228  case Match_UImm5:
2229    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2230                 "expected integer in range [0, 31]");
2231  case Match_UImm6:
2232    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2233                 "expected integer in range [0, 63]");
2234  case Match_UImm7:
2235    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2236                 "expected integer in range [0, 127]");
2237  case Match_Width32:
2238    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2239                 "expected integer in range [<lsb>, 31]");
2240  case Match_Width64:
2241    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2242                 "expected integer in range [<lsb>, 63]");
2243  }
2244
2245  llvm_unreachable("Implement any new match types added!");
2246  return true;
2247}
2248
2249void AArch64Operand::print(raw_ostream &OS) const {
2250  switch (Kind) {
2251  case k_CondCode:
2252    OS << "<CondCode: " << CondCode.Code << ">";
2253    break;
2254  case k_FPImmediate:
2255    OS << "<fpimm: " << FPImm.Val << ">";
2256    break;
2257  case k_ImmWithLSL:
2258    OS << "<immwithlsl: imm=" << ImmWithLSL.Val
2259       << ", shift=" << ImmWithLSL.ShiftAmount << ">";
2260    break;
2261  case k_Immediate:
2262    getImm()->print(OS);
2263    break;
2264  case k_Register:
2265    OS << "<register " << getReg() << '>';
2266    break;
2267  case k_Token:
2268    OS << '\'' << getToken() << '\'';
2269    break;
2270  case k_ShiftExtend:
2271    OS << "<shift: type=" << ShiftExtend.ShiftType
2272       << ", amount=" << ShiftExtend.Amount << ">";
2273    break;
2274  case k_SysReg: {
2275    StringRef Name(SysReg.Data, SysReg.Length);
2276    OS << "<sysreg: " << Name << '>';
2277    break;
2278  }
2279  default:
2280    llvm_unreachable("No idea how to print this kind of operand");
2281    break;
2282  }
2283}
2284
2285void AArch64Operand::dump() const {
2286  print(errs());
2287}
2288
2289
2290/// Force static initialization.
2291extern "C" void LLVMInitializeAArch64AsmParser() {
2292  RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64Target);
2293}
2294
2295#define GET_REGISTER_MATCHER
2296#define GET_MATCHER_IMPLEMENTATION
2297#include "AArch64GenAsmMatcher.inc"
2298