AArch64AsmParser.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the (GNU-style) assembly parser for the AArch64
11// architecture.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "MCTargetDesc/AArch64MCTargetDesc.h"
17#include "MCTargetDesc/AArch64MCExpr.h"
18#include "Utils/AArch64BaseInfo.h"
19#include "llvm/ADT/APFloat.h"
20#include "llvm/ADT/APInt.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringSwitch.h"
23#include "llvm/MC/MCContext.h"
24#include "llvm/MC/MCExpr.h"
25#include "llvm/MC/MCInst.h"
26#include "llvm/MC/MCParser/MCAsmLexer.h"
27#include "llvm/MC/MCParser/MCAsmParser.h"
28#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
29#include "llvm/MC/MCRegisterInfo.h"
30#include "llvm/MC/MCStreamer.h"
31#include "llvm/MC/MCSubtargetInfo.h"
32#include "llvm/MC/MCTargetAsmParser.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/TargetRegistry.h"
35#include "llvm/Support/raw_ostream.h"
36
37using namespace llvm;
38
39namespace {
40
41class AArch64Operand;
42
43class AArch64AsmParser : public MCTargetAsmParser {
44  MCSubtargetInfo &STI;
45  MCAsmParser &Parser;
46
47#define GET_ASSEMBLER_HEADER
48#include "AArch64GenAsmMatcher.inc"
49
50public:
51  enum AArch64MatchResultTy {
52    Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
53#define GET_OPERAND_DIAGNOSTIC_TYPES
54#include "AArch64GenAsmMatcher.inc"
55  };
56
57  AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
58                   const MCInstrInfo &MII)
59      : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
60    MCAsmParserExtension::Initialize(_Parser);
61
62    // Initialize the set of available features.
63    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
64  }
65
66  // These are the public interface of the MCTargetAsmParser
67  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
68  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
69                        SMLoc NameLoc,
70                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
71
72  bool ParseDirective(AsmToken DirectiveID);
73  bool ParseDirectiveTLSDescCall(SMLoc L);
74  bool ParseDirectiveWord(unsigned Size, SMLoc L);
75
76  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
77                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
78                               MCStreamer&Out, unsigned &ErrorInfo,
79                               bool MatchingInlineAsm);
80
81  // The rest of the sub-parsers have more freedom over interface: they return
82  // an OperandMatchResultTy because it's less ambiguous than true/false or
83  // -1/0/1 even if it is more verbose
84  OperandMatchResultTy
85  ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
86               StringRef Mnemonic);
87
88  OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
89
90  OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
91
92  OperandMatchResultTy
93  ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
94                uint32_t NumLanes);
95
96  OperandMatchResultTy
97  ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
98                uint32_t &NumLanes);
99
100  OperandMatchResultTy
101  ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
102
103  OperandMatchResultTy
104  ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
105
106  OperandMatchResultTy
107  ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
108
109  OperandMatchResultTy
110  ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
111
112  OperandMatchResultTy
113  ParseFPImm0AndImm0Operand( SmallVectorImpl<MCParsedAsmOperand*> &Operands);
114
115  template<typename SomeNamedImmMapper> OperandMatchResultTy
116  ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
117    return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
118  }
119
120  OperandMatchResultTy
121  ParseNamedImmOperand(const NamedImmMapper &Mapper,
122                       SmallVectorImpl<MCParsedAsmOperand*> &Operands);
123
124  OperandMatchResultTy
125  ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
126
127  OperandMatchResultTy
128  ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
129
130  OperandMatchResultTy
131  ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
132
133  bool TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc, StringRef &Layout,
134                      SMLoc &LayoutLoc);
135
136  OperandMatchResultTy ParseVectorList(SmallVectorImpl<MCParsedAsmOperand *> &);
137
138  bool validateInstruction(MCInst &Inst,
139                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
140
141  /// Scan the next token (which had better be an identifier) and determine
142  /// whether it represents a general-purpose or vector register. It returns
143  /// true if an identifier was found and populates its reference arguments. It
144  /// does not consume the token.
145  bool
146  IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
147                   SMLoc &LayoutLoc) const;
148
149};
150
151}
152
153namespace {
154
155/// Instances of this class represent a parsed AArch64 machine instruction.
156class AArch64Operand : public MCParsedAsmOperand {
157private:
158  enum KindTy {
159    k_ImmWithLSL,     // #uimm {, LSL #amt }
160    k_CondCode,       // eq/ne/...
161    k_FPImmediate,    // Limited-precision floating-point imm
162    k_Immediate,      // Including expressions referencing symbols
163    k_Register,
164    k_ShiftExtend,
165    k_VectorList,     // A sequential list of 1 to 4 registers.
166    k_SysReg,         // The register operand of MRS and MSR instructions
167    k_Token,          // The mnemonic; other raw tokens the auto-generated
168    k_WrappedRegister // Load/store exclusive permit a wrapped register.
169  } Kind;
170
171  SMLoc StartLoc, EndLoc;
172
173  struct ImmWithLSLOp {
174    const MCExpr *Val;
175    unsigned ShiftAmount;
176    bool ImplicitAmount;
177  };
178
179  struct CondCodeOp {
180    A64CC::CondCodes Code;
181  };
182
183  struct FPImmOp {
184    double Val;
185  };
186
187  struct ImmOp {
188    const MCExpr *Val;
189  };
190
191  struct RegOp {
192    unsigned RegNum;
193  };
194
195  struct ShiftExtendOp {
196    A64SE::ShiftExtSpecifiers ShiftType;
197    unsigned Amount;
198    bool ImplicitAmount;
199  };
200
201  // A vector register list is a sequential list of 1 to 4 registers.
202  struct VectorListOp {
203    unsigned RegNum;
204    unsigned Count;
205    A64Layout::VectorLayout Layout;
206  };
207
208  struct SysRegOp {
209    const char *Data;
210    unsigned Length;
211  };
212
213  struct TokOp {
214    const char *Data;
215    unsigned Length;
216  };
217
218  union {
219    struct ImmWithLSLOp ImmWithLSL;
220    struct CondCodeOp CondCode;
221    struct FPImmOp FPImm;
222    struct ImmOp Imm;
223    struct RegOp Reg;
224    struct ShiftExtendOp ShiftExtend;
225    struct VectorListOp VectorList;
226    struct SysRegOp SysReg;
227    struct TokOp Tok;
228  };
229
230  AArch64Operand(KindTy K, SMLoc S, SMLoc E)
231    : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
232
233public:
234  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
235  }
236
237  SMLoc getStartLoc() const { return StartLoc; }
238  SMLoc getEndLoc() const { return EndLoc; }
239  void print(raw_ostream&) const;
240  void dump() const;
241
242  StringRef getToken() const {
243    assert(Kind == k_Token && "Invalid access!");
244    return StringRef(Tok.Data, Tok.Length);
245  }
246
247  unsigned getReg() const {
248    assert((Kind == k_Register || Kind == k_WrappedRegister)
249           && "Invalid access!");
250    return Reg.RegNum;
251  }
252
253  const MCExpr *getImm() const {
254    assert(Kind == k_Immediate && "Invalid access!");
255    return Imm.Val;
256  }
257
258  A64CC::CondCodes getCondCode() const {
259    assert(Kind == k_CondCode && "Invalid access!");
260    return CondCode.Code;
261  }
262
263  static bool isNonConstantExpr(const MCExpr *E,
264                                AArch64MCExpr::VariantKind &Variant) {
265    if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
266      Variant = A64E->getKind();
267      return true;
268    } else if (!isa<MCConstantExpr>(E)) {
269      Variant = AArch64MCExpr::VK_AARCH64_None;
270      return true;
271    }
272
273    return false;
274  }
275
276  bool isCondCode() const { return Kind == k_CondCode; }
277  bool isToken() const { return Kind == k_Token; }
278  bool isReg() const { return Kind == k_Register; }
279  bool isImm() const { return Kind == k_Immediate; }
280  bool isMem() const { return false; }
281  bool isFPImm() const { return Kind == k_FPImmediate; }
282  bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
283  bool isSysReg() const { return Kind == k_SysReg; }
284  bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
285  bool isWrappedReg() const { return Kind == k_WrappedRegister; }
286
287  bool isAddSubImmLSL0() const {
288    if (!isImmWithLSL()) return false;
289    if (ImmWithLSL.ShiftAmount != 0) return false;
290
291    AArch64MCExpr::VariantKind Variant;
292    if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
293      return Variant == AArch64MCExpr::VK_AARCH64_LO12
294          || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
295          || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
296          || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
297          || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
298          || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
299    }
300
301    // Otherwise it should be a real immediate in range:
302    const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
303    return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
304  }
305
306  bool isAddSubImmLSL12() const {
307    if (!isImmWithLSL()) return false;
308    if (ImmWithLSL.ShiftAmount != 12) return false;
309
310    AArch64MCExpr::VariantKind Variant;
311    if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
312      return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
313          || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
314    }
315
316    // Otherwise it should be a real immediate in range:
317    const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
318    return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
319  }
320
321  template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
322    if (!isShiftOrExtend()) return false;
323
324    A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
325    if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
326      return false;
327
328    if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
329      return false;
330
331    return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
332  }
333
334  bool isAdrpLabel() const {
335    if (!isImm()) return false;
336
337    AArch64MCExpr::VariantKind Variant;
338    if (isNonConstantExpr(getImm(), Variant)) {
339      return Variant == AArch64MCExpr::VK_AARCH64_None
340        || Variant == AArch64MCExpr::VK_AARCH64_GOT
341        || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
342        || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
343    }
344
345    return isLabel<21, 4096>();
346  }
347
348  template<unsigned RegWidth>  bool isBitfieldWidth() const {
349    if (!isImm()) return false;
350
351    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
352    if (!CE) return false;
353
354    return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
355  }
356
357  template<int RegWidth>
358  bool isCVTFixedPos() const {
359    if (!isImm()) return false;
360
361    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
362    if (!CE) return false;
363
364    return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
365  }
366
367  bool isFMOVImm() const {
368    if (!isFPImm()) return false;
369
370    APFloat RealVal(FPImm.Val);
371    uint32_t ImmVal;
372    return A64Imms::isFPImm(RealVal, ImmVal);
373  }
374
375  bool isFPZero() const {
376    if (!isFPImm()) return false;
377
378    APFloat RealVal(FPImm.Val);
379    return RealVal.isPosZero();
380  }
381
382  template<unsigned field_width, unsigned scale>
383  bool isLabel() const {
384    if (!isImm()) return false;
385
386    if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
387      return true;
388    } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
389      int64_t Val = CE->getValue();
390      int64_t Min = - (scale * (1LL << (field_width - 1)));
391      int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
392      return (Val % scale) == 0 && Val >= Min && Val <= Max;
393    }
394
395    // N.b. this disallows explicit relocation specifications via an
396    // AArch64MCExpr. Users needing that behaviour
397    return false;
398  }
399
400  bool isLane1() const {
401    if (!isImm()) return false;
402
403    // Because it's come through custom assembly parsing, it must always be a
404    // constant expression.
405    return cast<MCConstantExpr>(getImm())->getValue() == 1;
406  }
407
408  bool isLoadLitLabel() const {
409    if (!isImm()) return false;
410
411    AArch64MCExpr::VariantKind Variant;
412    if (isNonConstantExpr(getImm(), Variant)) {
413      return Variant == AArch64MCExpr::VK_AARCH64_None
414          || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
415    }
416
417    return isLabel<19, 4>();
418  }
419
420  template<unsigned RegWidth> bool isLogicalImm() const {
421    if (!isImm()) return false;
422
423    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
424    if (!CE) return false;
425
426    uint32_t Bits;
427    return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
428  }
429
430  template<unsigned RegWidth> bool isLogicalImmMOV() const {
431    if (!isLogicalImm<RegWidth>()) return false;
432
433    const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
434
435    // The move alias for ORR is only valid if the immediate cannot be
436    // represented with a move (immediate) instruction; they take priority.
437    int UImm16, Shift;
438    return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
439      && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
440  }
441
442  template<int MemSize>
443  bool isOffsetUImm12() const {
444    if (!isImm()) return false;
445
446    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
447
448    // Assume they know what they're doing for now if they've given us a
449    // non-constant expression. In principle we could check for ridiculous
450    // things that can't possibly work or relocations that would almost
451    // certainly break resulting code.
452    if (!CE)
453      return true;
454
455    int64_t Val = CE->getValue();
456
457    // Must be a multiple of the access size in bytes.
458    if ((Val & (MemSize - 1)) != 0) return false;
459
460    // Must be 12-bit unsigned
461    return Val >= 0 && Val <= 0xfff * MemSize;
462  }
463
464  template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
465  bool isShift() const {
466    if (!isShiftOrExtend()) return false;
467
468    if (ShiftExtend.ShiftType != SHKind)
469      return false;
470
471    return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
472  }
473
474  bool isMOVN32Imm() const {
475    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
476      AArch64MCExpr::VK_AARCH64_SABS_G0,
477      AArch64MCExpr::VK_AARCH64_SABS_G1,
478      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
479      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
480      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
481      AArch64MCExpr::VK_AARCH64_TPREL_G1,
482      AArch64MCExpr::VK_AARCH64_TPREL_G0,
483    };
484    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
485
486    return isMoveWideImm(32, PermittedModifiers, NumModifiers);
487  }
488
489  bool isMOVN64Imm() const {
490    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
491      AArch64MCExpr::VK_AARCH64_SABS_G0,
492      AArch64MCExpr::VK_AARCH64_SABS_G1,
493      AArch64MCExpr::VK_AARCH64_SABS_G2,
494      AArch64MCExpr::VK_AARCH64_DTPREL_G2,
495      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
496      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
497      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
498      AArch64MCExpr::VK_AARCH64_TPREL_G2,
499      AArch64MCExpr::VK_AARCH64_TPREL_G1,
500      AArch64MCExpr::VK_AARCH64_TPREL_G0,
501    };
502    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
503
504    return isMoveWideImm(64, PermittedModifiers, NumModifiers);
505  }
506
507
508  bool isMOVZ32Imm() const {
509    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
510      AArch64MCExpr::VK_AARCH64_ABS_G0,
511      AArch64MCExpr::VK_AARCH64_ABS_G1,
512      AArch64MCExpr::VK_AARCH64_SABS_G0,
513      AArch64MCExpr::VK_AARCH64_SABS_G1,
514      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
515      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
516      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
517      AArch64MCExpr::VK_AARCH64_TPREL_G1,
518      AArch64MCExpr::VK_AARCH64_TPREL_G0,
519    };
520    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
521
522    return isMoveWideImm(32, PermittedModifiers, NumModifiers);
523  }
524
525  bool isMOVZ64Imm() const {
526    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
527      AArch64MCExpr::VK_AARCH64_ABS_G0,
528      AArch64MCExpr::VK_AARCH64_ABS_G1,
529      AArch64MCExpr::VK_AARCH64_ABS_G2,
530      AArch64MCExpr::VK_AARCH64_ABS_G3,
531      AArch64MCExpr::VK_AARCH64_SABS_G0,
532      AArch64MCExpr::VK_AARCH64_SABS_G1,
533      AArch64MCExpr::VK_AARCH64_SABS_G2,
534      AArch64MCExpr::VK_AARCH64_DTPREL_G2,
535      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
536      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
537      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
538      AArch64MCExpr::VK_AARCH64_TPREL_G2,
539      AArch64MCExpr::VK_AARCH64_TPREL_G1,
540      AArch64MCExpr::VK_AARCH64_TPREL_G0,
541    };
542    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
543
544    return isMoveWideImm(64, PermittedModifiers, NumModifiers);
545  }
546
547  bool isMOVK32Imm() const {
548    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
549      AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
550      AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
551      AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
552      AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
553      AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
554      AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
555      AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
556    };
557    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
558
559    return isMoveWideImm(32, PermittedModifiers, NumModifiers);
560  }
561
562  bool isMOVK64Imm() const {
563    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
564      AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
565      AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
566      AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
567      AArch64MCExpr::VK_AARCH64_ABS_G3,
568      AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
569      AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
570      AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
571      AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
572      AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
573    };
574    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
575
576    return isMoveWideImm(64, PermittedModifiers, NumModifiers);
577  }
578
579  bool isMoveWideImm(unsigned RegWidth,
580                     const AArch64MCExpr::VariantKind *PermittedModifiers,
581                     unsigned NumModifiers) const {
582    if (!isImmWithLSL()) return false;
583
584    if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
585    if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
586
587    AArch64MCExpr::VariantKind Modifier;
588    if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
589      // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
590      if (!ImmWithLSL.ImplicitAmount) return false;
591
592      for (unsigned i = 0; i < NumModifiers; ++i)
593        if (PermittedModifiers[i] == Modifier) return true;
594
595      return false;
596    }
597
598    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
599    return CE && CE->getValue() >= 0  && CE->getValue() <= 0xffff;
600  }
601
602  template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
603  bool isMoveWideMovAlias() const {
604    if (!isImm()) return false;
605
606    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
607    if (!CE) return false;
608
609    int UImm16, Shift;
610    uint64_t Value = CE->getValue();
611
612    // If this is a 32-bit instruction then all bits above 32 should be the
613    // same: either of these is fine because signed/unsigned values should be
614    // permitted.
615    if (RegWidth == 32) {
616      if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
617        return false;
618
619      Value &= 0xffffffffULL;
620    }
621
622    return isValidImm(RegWidth, Value, UImm16, Shift);
623  }
624
625  bool isMSRWithReg() const {
626    if (!isSysReg()) return false;
627
628    bool IsKnownRegister;
629    StringRef Name(SysReg.Data, SysReg.Length);
630    A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
631
632    return IsKnownRegister;
633  }
634
635  bool isMSRPState() const {
636    if (!isSysReg()) return false;
637
638    bool IsKnownRegister;
639    StringRef Name(SysReg.Data, SysReg.Length);
640    A64PState::PStateMapper().fromString(Name, IsKnownRegister);
641
642    return IsKnownRegister;
643  }
644
645  bool isMRS() const {
646    if (!isSysReg()) return false;
647
648    // First check against specific MSR-only (write-only) registers
649    bool IsKnownRegister;
650    StringRef Name(SysReg.Data, SysReg.Length);
651    A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
652
653    return IsKnownRegister;
654  }
655
656  bool isPRFM() const {
657    if (!isImm()) return false;
658
659    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
660
661    if (!CE)
662      return false;
663
664    return CE->getValue() >= 0 && CE->getValue() <= 31;
665  }
666
667  template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
668    if (!isShiftOrExtend()) return false;
669
670    if (ShiftExtend.ShiftType != SHKind)
671      return false;
672
673    return ShiftExtend.Amount <= 4;
674  }
675
676  bool isRegExtendLSL() const {
677    if (!isShiftOrExtend()) return false;
678
679    if (ShiftExtend.ShiftType != A64SE::LSL)
680      return false;
681
682    return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
683  }
684
685  // if 0 < value <= w, return true
686  bool isShrFixedWidth(int w) const {
687    if (!isImm())
688      return false;
689    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
690    if (!CE)
691      return false;
692    int64_t Value = CE->getValue();
693    return Value > 0 && Value <= w;
694  }
695
696  bool isShrImm8() const { return isShrFixedWidth(8); }
697
698  bool isShrImm16() const { return isShrFixedWidth(16); }
699
700  bool isShrImm32() const { return isShrFixedWidth(32); }
701
702  bool isShrImm64() const { return isShrFixedWidth(64); }
703
704  // if 0 <= value < w, return true
705  bool isShlFixedWidth(int w) const {
706    if (!isImm())
707      return false;
708    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
709    if (!CE)
710      return false;
711    int64_t Value = CE->getValue();
712    return Value >= 0 && Value < w;
713  }
714
715  bool isShlImm8() const { return isShlFixedWidth(8); }
716
717  bool isShlImm16() const { return isShlFixedWidth(16); }
718
719  bool isShlImm32() const { return isShlFixedWidth(32); }
720
721  bool isShlImm64() const { return isShlFixedWidth(64); }
722
723  bool isNeonMovImmShiftLSL() const {
724    if (!isShiftOrExtend())
725      return false;
726
727    if (ShiftExtend.ShiftType != A64SE::LSL)
728      return false;
729
730    // Valid shift amount is 0, 8, 16 and 24.
731    return ShiftExtend.Amount % 8 == 0 && ShiftExtend.Amount <= 24;
732  }
733
734  bool isNeonMovImmShiftLSLH() const {
735    if (!isShiftOrExtend())
736      return false;
737
738    if (ShiftExtend.ShiftType != A64SE::LSL)
739      return false;
740
741    // Valid shift amount is 0 and 8.
742    return ShiftExtend.Amount == 0 || ShiftExtend.Amount == 8;
743  }
744
745  bool isNeonMovImmShiftMSL() const {
746    if (!isShiftOrExtend())
747      return false;
748
749    if (ShiftExtend.ShiftType != A64SE::MSL)
750      return false;
751
752    // Valid shift amount is 8 and 16.
753    return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16;
754  }
755
756  template <A64Layout::VectorLayout Layout, unsigned Count>
757  bool isVectorList() const {
758    return Kind == k_VectorList && VectorList.Layout == Layout &&
759           VectorList.Count == Count;
760  }
761
762  template <int MemSize> bool isSImm7Scaled() const {
763    if (!isImm())
764      return false;
765
766    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
767    if (!CE) return false;
768
769    int64_t Val = CE->getValue();
770    if (Val % MemSize != 0) return false;
771
772    Val /= MemSize;
773
774    return Val >= -64 && Val < 64;
775  }
776
777  template<int BitWidth>
778  bool isSImm() const {
779    if (!isImm()) return false;
780
781    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
782    if (!CE) return false;
783
784    return CE->getValue() >= -(1LL << (BitWidth - 1))
785      && CE->getValue() < (1LL << (BitWidth - 1));
786  }
787
788  template<int bitWidth>
789  bool isUImm() const {
790    if (!isImm()) return false;
791
792    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793    if (!CE) return false;
794
795    return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
796  }
797
798  bool isUImm() const {
799    if (!isImm()) return false;
800
801    return isa<MCConstantExpr>(getImm());
802  }
803
804  bool isNeonUImm64Mask() const {
805    if (!isImm())
806      return false;
807
808    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
809    if (!CE)
810      return false;
811
812    uint64_t Value = CE->getValue();
813
814    // i64 value with each byte being either 0x00 or 0xff.
815    for (unsigned i = 0; i < 8; ++i, Value >>= 8)
816      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff)
817        return false;
818    return true;
819  }
820
821  // if value == N, return true
822  template<int N>
823  bool isExactImm() const {
824    if (!isImm()) return false;
825
826    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
827    if (!CE) return false;
828
829    return CE->getValue() == N;
830  }
831
832  bool isFPZeroIZero() const {
833    return isFPZero();
834  }
835
836  static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
837                                          unsigned ShiftAmount,
838                                          bool ImplicitAmount,
839										  SMLoc S,SMLoc E) {
840    AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
841    Op->ImmWithLSL.Val = Val;
842    Op->ImmWithLSL.ShiftAmount = ShiftAmount;
843    Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
844    return Op;
845  }
846
847  static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
848                                        SMLoc S, SMLoc E) {
849    AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
850    Op->CondCode.Code = Code;
851    return Op;
852  }
853
854  static AArch64Operand *CreateFPImm(double Val,
855                                     SMLoc S, SMLoc E) {
856    AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
857    Op->FPImm.Val = Val;
858    return Op;
859  }
860
861  static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
862    AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
863    Op->Imm.Val = Val;
864    return Op;
865  }
866
867  static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
868    AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
869    Op->Reg.RegNum = RegNum;
870    return Op;
871  }
872
873  static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
874    AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
875    Op->Reg.RegNum = RegNum;
876    return Op;
877  }
878
879  static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
880                                           unsigned Amount,
881                                           bool ImplicitAmount,
882                                           SMLoc S, SMLoc E) {
883    AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
884    Op->ShiftExtend.ShiftType = ShiftTyp;
885    Op->ShiftExtend.Amount = Amount;
886    Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
887    return Op;
888  }
889
890  static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
891    AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
892    Op->Tok.Data = Str.data();
893    Op->Tok.Length = Str.size();
894    return Op;
895  }
896
897  static AArch64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
898                                          A64Layout::VectorLayout Layout,
899                                          SMLoc S, SMLoc E) {
900    AArch64Operand *Op = new AArch64Operand(k_VectorList, S, E);
901    Op->VectorList.RegNum = RegNum;
902    Op->VectorList.Count = Count;
903    Op->VectorList.Layout = Layout;
904    Op->StartLoc = S;
905    Op->EndLoc = E;
906    return Op;
907  }
908
909  static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
910    AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
911    Op->Tok.Data = Str.data();
912    Op->Tok.Length = Str.size();
913    return Op;
914  }
915
916
917  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
918    // Add as immediates when possible.
919    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
920      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
921    else
922      Inst.addOperand(MCOperand::CreateExpr(Expr));
923  }
924
925  template<unsigned RegWidth>
926  void addBFILSBOperands(MCInst &Inst, unsigned N) const {
927    assert(N == 1 && "Invalid number of operands!");
928    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
929    unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
930    Inst.addOperand(MCOperand::CreateImm(EncodedVal));
931  }
932
933  void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
934    assert(N == 1 && "Invalid number of operands!");
935    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
936    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
937  }
938
939  void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
940    assert(N == 1 && "Invalid number of operands!");
941
942    uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
943    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
944
945    Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
946  }
947
948  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
949    assert(N == 1 && "Invalid number of operands!");
950    Inst.addOperand(MCOperand::CreateImm(getCondCode()));
951  }
952
953  void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
954    assert(N == 1 && "Invalid number of operands!");
955
956    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
957    Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
958  }
959
960  void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
961    assert(N == 1 && "Invalid number of operands!");
962
963    APFloat RealVal(FPImm.Val);
964    uint32_t ImmVal;
965    A64Imms::isFPImm(RealVal, ImmVal);
966
967    Inst.addOperand(MCOperand::CreateImm(ImmVal));
968  }
969
970  void addFPZeroOperands(MCInst &Inst, unsigned N) const {
971    assert(N == 1 && "Invalid number of operands");
972    Inst.addOperand(MCOperand::CreateImm(0));
973  }
974
975  void addFPZeroIZeroOperands(MCInst &Inst, unsigned N) const {
976    addFPZeroOperands(Inst, N);
977  }
978
979  void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
980    assert(N == 1 && "Invalid number of operands!");
981    unsigned Encoded = A64InvertCondCode(getCondCode());
982    Inst.addOperand(MCOperand::CreateImm(Encoded));
983  }
984
985  void addRegOperands(MCInst &Inst, unsigned N) const {
986    assert(N == 1 && "Invalid number of operands!");
987    Inst.addOperand(MCOperand::CreateReg(getReg()));
988  }
989
990  void addImmOperands(MCInst &Inst, unsigned N) const {
991    assert(N == 1 && "Invalid number of operands!");
992    addExpr(Inst, getImm());
993  }
994
995  template<int MemSize>
996  void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
997    assert(N == 1 && "Invalid number of operands!");
998
999    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1000    uint64_t Val = CE->getValue() / MemSize;
1001    Inst.addOperand(MCOperand::CreateImm(Val  & 0x7f));
1002  }
1003
1004  template<int BitWidth>
1005  void addSImmOperands(MCInst &Inst, unsigned N) const {
1006    assert(N == 1 && "Invalid number of operands!");
1007
1008    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1009    uint64_t Val = CE->getValue();
1010    Inst.addOperand(MCOperand::CreateImm(Val  & ((1ULL << BitWidth) - 1)));
1011  }
1012
1013  void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
1014    assert (N == 1 && "Invalid number of operands!");
1015
1016    addExpr(Inst, ImmWithLSL.Val);
1017  }
1018
1019  template<unsigned field_width, unsigned scale>
1020  void addLabelOperands(MCInst &Inst, unsigned N) const {
1021    assert(N == 1 && "Invalid number of operands!");
1022
1023    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1024
1025    if (!CE) {
1026      addExpr(Inst, Imm.Val);
1027      return;
1028    }
1029
1030    int64_t Val = CE->getValue();
1031    assert(Val % scale == 0 && "Unaligned immediate in instruction");
1032    Val /= scale;
1033
1034    Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
1035  }
1036
1037  template<int MemSize>
1038  void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
1039    assert(N == 1 && "Invalid number of operands!");
1040
1041    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
1042      Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
1043    } else {
1044      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1045    }
1046  }
1047
1048  template<unsigned RegWidth>
1049  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1050    assert(N == 1 && "Invalid number of operands");
1051    const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
1052
1053    uint32_t Bits;
1054    A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
1055
1056    Inst.addOperand(MCOperand::CreateImm(Bits));
1057  }
1058
1059  void addMRSOperands(MCInst &Inst, unsigned N) const {
1060    assert(N == 1 && "Invalid number of operands!");
1061
1062    bool Valid;
1063    StringRef Name(SysReg.Data, SysReg.Length);
1064    uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
1065
1066    Inst.addOperand(MCOperand::CreateImm(Bits));
1067  }
1068
1069  void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
1070    assert(N == 1 && "Invalid number of operands!");
1071
1072    bool Valid;
1073    StringRef Name(SysReg.Data, SysReg.Length);
1074    uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
1075
1076    Inst.addOperand(MCOperand::CreateImm(Bits));
1077  }
1078
1079  void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
1080    assert(N == 1 && "Invalid number of operands!");
1081
1082    bool Valid;
1083    StringRef Name(SysReg.Data, SysReg.Length);
1084    uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
1085
1086    Inst.addOperand(MCOperand::CreateImm(Bits));
1087  }
1088
1089  void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
1090    assert(N == 2 && "Invalid number of operands!");
1091
1092    addExpr(Inst, ImmWithLSL.Val);
1093
1094    AArch64MCExpr::VariantKind Variant;
1095    if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
1096      Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
1097      return;
1098    }
1099
1100    // We know it's relocated
1101    switch (Variant) {
1102    case AArch64MCExpr::VK_AARCH64_ABS_G0:
1103    case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
1104    case AArch64MCExpr::VK_AARCH64_SABS_G0:
1105    case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
1106    case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
1107    case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
1108    case AArch64MCExpr::VK_AARCH64_TPREL_G0:
1109    case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
1110      Inst.addOperand(MCOperand::CreateImm(0));
1111      break;
1112    case AArch64MCExpr::VK_AARCH64_ABS_G1:
1113    case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
1114    case AArch64MCExpr::VK_AARCH64_SABS_G1:
1115    case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
1116    case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
1117    case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
1118    case AArch64MCExpr::VK_AARCH64_TPREL_G1:
1119    case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
1120      Inst.addOperand(MCOperand::CreateImm(1));
1121      break;
1122    case AArch64MCExpr::VK_AARCH64_ABS_G2:
1123    case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
1124    case AArch64MCExpr::VK_AARCH64_SABS_G2:
1125    case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
1126    case AArch64MCExpr::VK_AARCH64_TPREL_G2:
1127      Inst.addOperand(MCOperand::CreateImm(2));
1128      break;
1129    case AArch64MCExpr::VK_AARCH64_ABS_G3:
1130      Inst.addOperand(MCOperand::CreateImm(3));
1131      break;
1132    default: llvm_unreachable("Inappropriate move wide relocation");
1133    }
1134  }
1135
1136  template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
1137  void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
1138    assert(N == 2 && "Invalid number of operands!");
1139    int UImm16, Shift;
1140
1141    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1142    uint64_t Value = CE->getValue();
1143
1144    if (RegWidth == 32) {
1145      Value &= 0xffffffffULL;
1146    }
1147
1148    bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
1149    (void)Valid;
1150    assert(Valid && "Invalid immediates should have been weeded out by now");
1151
1152    Inst.addOperand(MCOperand::CreateImm(UImm16));
1153    Inst.addOperand(MCOperand::CreateImm(Shift));
1154  }
1155
1156  void addPRFMOperands(MCInst &Inst, unsigned N) const {
1157    assert(N == 1 && "Invalid number of operands!");
1158
1159    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1160    assert(CE->getValue() >= 0 && CE->getValue() <= 31
1161           && "PRFM operand should be 5-bits");
1162
1163    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1164  }
1165
1166  // For Add-sub (extended register) operands.
1167  void addRegExtendOperands(MCInst &Inst, unsigned N) const {
1168    assert(N == 1 && "Invalid number of operands!");
1169
1170    Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1171  }
1172
1173  // For Vector Immediates shifted imm operands.
1174  void addNeonMovImmShiftLSLOperands(MCInst &Inst, unsigned N) const {
1175    assert(N == 1 && "Invalid number of operands!");
1176
1177    if (ShiftExtend.Amount % 8 != 0 || ShiftExtend.Amount > 24)
1178      llvm_unreachable("Invalid shift amount for vector immediate inst.");
1179
1180    // Encode LSL shift amount 0, 8, 16, 24 as 0, 1, 2, 3.
1181    int64_t Imm = ShiftExtend.Amount / 8;
1182    Inst.addOperand(MCOperand::CreateImm(Imm));
1183  }
1184
1185  void addNeonMovImmShiftLSLHOperands(MCInst &Inst, unsigned N) const {
1186    assert(N == 1 && "Invalid number of operands!");
1187
1188    if (ShiftExtend.Amount != 0 && ShiftExtend.Amount != 8)
1189      llvm_unreachable("Invalid shift amount for vector immediate inst.");
1190
1191    // Encode LSLH shift amount 0, 8  as 0, 1.
1192    int64_t Imm = ShiftExtend.Amount / 8;
1193    Inst.addOperand(MCOperand::CreateImm(Imm));
1194  }
1195
1196  void addNeonMovImmShiftMSLOperands(MCInst &Inst, unsigned N) const {
1197    assert(N == 1 && "Invalid number of operands!");
1198
1199    if (ShiftExtend.Amount != 8 && ShiftExtend.Amount != 16)
1200      llvm_unreachable("Invalid shift amount for vector immediate inst.");
1201
1202    // Encode MSL shift amount 8, 16  as 0, 1.
1203    int64_t Imm = ShiftExtend.Amount / 8 - 1;
1204    Inst.addOperand(MCOperand::CreateImm(Imm));
1205  }
1206
1207  // For the extend in load-store (register offset) instructions.
1208  template<unsigned MemSize>
1209  void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
1210    addAddrRegExtendOperands(Inst, N, MemSize);
1211  }
1212
1213  void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
1214                                unsigned MemSize) const {
1215    assert(N == 1 && "Invalid number of operands!");
1216
1217    // First bit of Option is set in instruction classes, the high two bits are
1218    // as follows:
1219    unsigned OptionHi = 0;
1220    switch (ShiftExtend.ShiftType) {
1221    case A64SE::UXTW:
1222    case A64SE::LSL:
1223      OptionHi = 1;
1224      break;
1225    case A64SE::SXTW:
1226    case A64SE::SXTX:
1227      OptionHi = 3;
1228      break;
1229    default:
1230      llvm_unreachable("Invalid extend type for register offset");
1231    }
1232
1233    unsigned S = 0;
1234    if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
1235      S = 1;
1236    else if (MemSize != 1 && ShiftExtend.Amount != 0)
1237      S = 1;
1238
1239    Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
1240  }
1241  void addShiftOperands(MCInst &Inst, unsigned N) const {
1242    assert(N == 1 && "Invalid number of operands!");
1243
1244    Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1245  }
1246
1247  void addNeonUImm64MaskOperands(MCInst &Inst, unsigned N) const {
1248    assert(N == 1 && "Invalid number of operands!");
1249
1250    // A bit from each byte in the constant forms the encoded immediate
1251    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1252    uint64_t Value = CE->getValue();
1253
1254    unsigned Imm = 0;
1255    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1256      Imm |= (Value & 1) << i;
1257    }
1258    Inst.addOperand(MCOperand::CreateImm(Imm));
1259  }
1260
1261  void addVectorListOperands(MCInst &Inst, unsigned N) const {
1262    assert(N == 1 && "Invalid number of operands!");
1263    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1264  }
1265};
1266
1267} // end anonymous namespace.
1268
1269AArch64AsmParser::OperandMatchResultTy
1270AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1271                               StringRef Mnemonic) {
1272
1273  // See if the operand has a custom parser
1274  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1275
1276  // It could either succeed, fail or just not care.
1277  if (ResTy != MatchOperand_NoMatch)
1278    return ResTy;
1279
1280  switch (getLexer().getKind()) {
1281  default:
1282    Error(Parser.getTok().getLoc(), "unexpected token in operand");
1283    return MatchOperand_ParseFail;
1284  case AsmToken::Identifier: {
1285    // It might be in the LSL/UXTB family ...
1286    OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
1287
1288    // We can only continue if no tokens were eaten.
1289    if (GotShift != MatchOperand_NoMatch)
1290      return GotShift;
1291
1292    // ... or it might be a register ...
1293    uint32_t NumLanes = 0;
1294    OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
1295    assert(GotReg != MatchOperand_ParseFail
1296           && "register parsing shouldn't partially succeed");
1297
1298    if (GotReg == MatchOperand_Success) {
1299      if (Parser.getTok().is(AsmToken::LBrac))
1300        return ParseNEONLane(Operands, NumLanes);
1301      else
1302        return MatchOperand_Success;
1303    }
1304    // ... or it might be a symbolish thing
1305  }
1306    // Fall through
1307  case AsmToken::LParen:  // E.g. (strcmp-4)
1308  case AsmToken::Integer: // 1f, 2b labels
1309  case AsmToken::String:  // quoted labels
1310  case AsmToken::Dot:     // . is Current location
1311  case AsmToken::Dollar:  // $ is PC
1312  case AsmToken::Colon: {
1313    SMLoc StartLoc  = Parser.getTok().getLoc();
1314    SMLoc EndLoc;
1315    const MCExpr *ImmVal = 0;
1316
1317    if (ParseImmediate(ImmVal) != MatchOperand_Success)
1318      return MatchOperand_ParseFail;
1319
1320    EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1321    Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1322    return MatchOperand_Success;
1323  }
1324  case AsmToken::Hash: {   // Immediates
1325    SMLoc StartLoc = Parser.getTok().getLoc();
1326    SMLoc EndLoc;
1327    const MCExpr *ImmVal = 0;
1328    Parser.Lex();
1329
1330    if (ParseImmediate(ImmVal) != MatchOperand_Success)
1331      return MatchOperand_ParseFail;
1332
1333    EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1334    Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1335    return MatchOperand_Success;
1336  }
1337  case AsmToken::LBrac: {
1338    SMLoc Loc = Parser.getTok().getLoc();
1339    Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1340    Parser.Lex(); // Eat '['
1341
1342    // There's no comma after a '[', so we can parse the next operand
1343    // immediately.
1344    return ParseOperand(Operands, Mnemonic);
1345  }
1346  // The following will likely be useful later, but not in very early cases
1347  case AsmToken::LCurly: // SIMD vector list is not parsed here
1348    llvm_unreachable("Don't know how to deal with '{' in operand");
1349    return MatchOperand_ParseFail;
1350  }
1351}
1352
1353AArch64AsmParser::OperandMatchResultTy
1354AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
1355  if (getLexer().is(AsmToken::Colon)) {
1356    AArch64MCExpr::VariantKind RefKind;
1357
1358    OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
1359    if (ResTy != MatchOperand_Success)
1360      return ResTy;
1361
1362    const MCExpr *SubExprVal;
1363    if (getParser().parseExpression(SubExprVal))
1364      return MatchOperand_ParseFail;
1365
1366    ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
1367    return MatchOperand_Success;
1368  }
1369
1370  // No weird AArch64MCExpr prefix
1371  return getParser().parseExpression(ExprVal)
1372    ? MatchOperand_ParseFail : MatchOperand_Success;
1373}
1374
1375// A lane attached to a NEON register. "[N]", which should yield three tokens:
1376// '[', N, ']'. A hash is not allowed to precede the immediate here.
1377AArch64AsmParser::OperandMatchResultTy
1378AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1379                                uint32_t NumLanes) {
1380  SMLoc Loc = Parser.getTok().getLoc();
1381
1382  assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
1383  Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1384  Parser.Lex(); // Eat '['
1385
1386  if (Parser.getTok().isNot(AsmToken::Integer)) {
1387    Error(Parser.getTok().getLoc(), "expected lane number");
1388    return MatchOperand_ParseFail;
1389  }
1390
1391  if (Parser.getTok().getIntVal() >= NumLanes) {
1392    Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
1393    return MatchOperand_ParseFail;
1394  }
1395
1396  const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
1397                                              getContext());
1398  SMLoc S = Parser.getTok().getLoc();
1399  Parser.Lex(); // Eat actual lane
1400  SMLoc E = Parser.getTok().getLoc();
1401  Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
1402
1403
1404  if (Parser.getTok().isNot(AsmToken::RBrac)) {
1405    Error(Parser.getTok().getLoc(), "expected ']' after lane");
1406    return MatchOperand_ParseFail;
1407  }
1408
1409  Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1410  Parser.Lex(); // Eat ']'
1411
1412  return MatchOperand_Success;
1413}
1414
1415AArch64AsmParser::OperandMatchResultTy
1416AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
1417  assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
1418  Parser.Lex();
1419
1420  if (getLexer().isNot(AsmToken::Identifier)) {
1421    Error(Parser.getTok().getLoc(),
1422          "expected relocation specifier in operand after ':'");
1423    return MatchOperand_ParseFail;
1424  }
1425
1426  std::string LowerCase = Parser.getTok().getIdentifier().lower();
1427  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
1428    .Case("got",              AArch64MCExpr::VK_AARCH64_GOT)
1429    .Case("got_lo12",         AArch64MCExpr::VK_AARCH64_GOT_LO12)
1430    .Case("lo12",             AArch64MCExpr::VK_AARCH64_LO12)
1431    .Case("abs_g0",           AArch64MCExpr::VK_AARCH64_ABS_G0)
1432    .Case("abs_g0_nc",        AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
1433    .Case("abs_g1",           AArch64MCExpr::VK_AARCH64_ABS_G1)
1434    .Case("abs_g1_nc",        AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
1435    .Case("abs_g2",           AArch64MCExpr::VK_AARCH64_ABS_G2)
1436    .Case("abs_g2_nc",        AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
1437    .Case("abs_g3",           AArch64MCExpr::VK_AARCH64_ABS_G3)
1438    .Case("abs_g0_s",         AArch64MCExpr::VK_AARCH64_SABS_G0)
1439    .Case("abs_g1_s",         AArch64MCExpr::VK_AARCH64_SABS_G1)
1440    .Case("abs_g2_s",         AArch64MCExpr::VK_AARCH64_SABS_G2)
1441    .Case("dtprel_g2",        AArch64MCExpr::VK_AARCH64_DTPREL_G2)
1442    .Case("dtprel_g1",        AArch64MCExpr::VK_AARCH64_DTPREL_G1)
1443    .Case("dtprel_g1_nc",     AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
1444    .Case("dtprel_g0",        AArch64MCExpr::VK_AARCH64_DTPREL_G0)
1445    .Case("dtprel_g0_nc",     AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
1446    .Case("dtprel_hi12",      AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
1447    .Case("dtprel_lo12",      AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
1448    .Case("dtprel_lo12_nc",   AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
1449    .Case("gottprel_g1",      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
1450    .Case("gottprel_g0_nc",   AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
1451    .Case("gottprel",         AArch64MCExpr::VK_AARCH64_GOTTPREL)
1452    .Case("gottprel_lo12",    AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
1453    .Case("tprel_g2",         AArch64MCExpr::VK_AARCH64_TPREL_G2)
1454    .Case("tprel_g1",         AArch64MCExpr::VK_AARCH64_TPREL_G1)
1455    .Case("tprel_g1_nc",      AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
1456    .Case("tprel_g0",         AArch64MCExpr::VK_AARCH64_TPREL_G0)
1457    .Case("tprel_g0_nc",      AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
1458    .Case("tprel_hi12",       AArch64MCExpr::VK_AARCH64_TPREL_HI12)
1459    .Case("tprel_lo12",       AArch64MCExpr::VK_AARCH64_TPREL_LO12)
1460    .Case("tprel_lo12_nc",    AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
1461    .Case("tlsdesc",          AArch64MCExpr::VK_AARCH64_TLSDESC)
1462    .Case("tlsdesc_lo12",     AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
1463    .Default(AArch64MCExpr::VK_AARCH64_None);
1464
1465  if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
1466    Error(Parser.getTok().getLoc(),
1467          "expected relocation specifier in operand after ':'");
1468    return MatchOperand_ParseFail;
1469  }
1470  Parser.Lex(); // Eat identifier
1471
1472  if (getLexer().isNot(AsmToken::Colon)) {
1473    Error(Parser.getTok().getLoc(),
1474          "expected ':' after relocation specifier");
1475    return MatchOperand_ParseFail;
1476  }
1477  Parser.Lex();
1478  return MatchOperand_Success;
1479}
1480
1481AArch64AsmParser::OperandMatchResultTy
1482AArch64AsmParser::ParseImmWithLSLOperand(
1483                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1484
1485  SMLoc S = Parser.getTok().getLoc();
1486
1487  if (Parser.getTok().is(AsmToken::Hash))
1488    Parser.Lex(); // Eat '#'
1489  else if (Parser.getTok().isNot(AsmToken::Integer))
1490    // Operand should start from # or should be integer, emit error otherwise.
1491    return MatchOperand_NoMatch;
1492
1493  const MCExpr *Imm;
1494  if (ParseImmediate(Imm) != MatchOperand_Success)
1495    return MatchOperand_ParseFail;
1496  else if (Parser.getTok().isNot(AsmToken::Comma)) {
1497    SMLoc E = Parser.getTok().getLoc();
1498    Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
1499    return MatchOperand_Success;
1500  }
1501
1502  // Eat ','
1503  Parser.Lex();
1504
1505  // The optional operand must be "lsl #N" where N is non-negative.
1506  if (Parser.getTok().is(AsmToken::Identifier)
1507      && Parser.getTok().getIdentifier().equals_lower("lsl")) {
1508    Parser.Lex();
1509
1510    if (Parser.getTok().is(AsmToken::Hash)) {
1511      Parser.Lex();
1512
1513      if (Parser.getTok().isNot(AsmToken::Integer)) {
1514        Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
1515        return MatchOperand_ParseFail;
1516      }
1517    }
1518  }
1519
1520  int64_t ShiftAmount = Parser.getTok().getIntVal();
1521
1522  if (ShiftAmount < 0) {
1523    Error(Parser.getTok().getLoc(), "positive shift amount required");
1524    return MatchOperand_ParseFail;
1525  }
1526  Parser.Lex(); // Eat the number
1527
1528  SMLoc E = Parser.getTok().getLoc();
1529  Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
1530                                                      false, S, E));
1531  return MatchOperand_Success;
1532}
1533
1534
1535AArch64AsmParser::OperandMatchResultTy
1536AArch64AsmParser::ParseCondCodeOperand(
1537                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1538  if (Parser.getTok().isNot(AsmToken::Identifier))
1539    return MatchOperand_NoMatch;
1540
1541  StringRef Tok = Parser.getTok().getIdentifier();
1542  A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
1543
1544  if (CondCode == A64CC::Invalid)
1545    return MatchOperand_NoMatch;
1546
1547  SMLoc S = Parser.getTok().getLoc();
1548  Parser.Lex(); // Eat condition code
1549  SMLoc E = Parser.getTok().getLoc();
1550
1551  Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
1552  return MatchOperand_Success;
1553}
1554
1555AArch64AsmParser::OperandMatchResultTy
1556AArch64AsmParser::ParseCRxOperand(
1557                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1558  SMLoc S = Parser.getTok().getLoc();
1559  if (Parser.getTok().isNot(AsmToken::Identifier)) {
1560    Error(S, "Expected cN operand where 0 <= N <= 15");
1561    return MatchOperand_ParseFail;
1562  }
1563
1564  StringRef Tok = Parser.getTok().getIdentifier();
1565  if (Tok[0] != 'c' && Tok[0] != 'C') {
1566    Error(S, "Expected cN operand where 0 <= N <= 15");
1567    return MatchOperand_ParseFail;
1568  }
1569
1570  uint32_t CRNum;
1571  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1572  if (BadNum || CRNum > 15) {
1573    Error(S, "Expected cN operand where 0 <= N <= 15");
1574    return MatchOperand_ParseFail;
1575  }
1576
1577  const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
1578
1579  Parser.Lex();
1580  SMLoc E = Parser.getTok().getLoc();
1581
1582  Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
1583  return MatchOperand_Success;
1584}
1585
1586AArch64AsmParser::OperandMatchResultTy
1587AArch64AsmParser::ParseFPImmOperand(
1588                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1589
1590  SMLoc S = Parser.getTok().getLoc();
1591
1592  bool Hash = false;
1593  if (Parser.getTok().is(AsmToken::Hash)) {
1594    Parser.Lex(); // Eat '#'
1595    Hash = true;
1596  }
1597
1598  bool Negative = false;
1599  if (Parser.getTok().is(AsmToken::Minus)) {
1600    Negative = true;
1601    Parser.Lex(); // Eat '-'
1602  } else if (Parser.getTok().is(AsmToken::Plus)) {
1603    Parser.Lex(); // Eat '+'
1604  }
1605
1606  if (Parser.getTok().isNot(AsmToken::Real)) {
1607    if (!Hash)
1608      return MatchOperand_NoMatch;
1609    Error(S, "Expected floating-point immediate");
1610    return MatchOperand_ParseFail;
1611  }
1612
1613  APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
1614  if (Negative) RealVal.changeSign();
1615  double DblVal = RealVal.convertToDouble();
1616
1617  Parser.Lex(); // Eat real number
1618  SMLoc E = Parser.getTok().getLoc();
1619
1620  Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
1621  return MatchOperand_Success;
1622}
1623
1624AArch64AsmParser::OperandMatchResultTy
1625AArch64AsmParser::ParseFPImm0AndImm0Operand(
1626                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1627
1628  SMLoc S = Parser.getTok().getLoc();
1629
1630  bool Hash = false;
1631  if (Parser.getTok().is(AsmToken::Hash)) {
1632    Parser.Lex(); // Eat '#'
1633    Hash = true;
1634  }
1635
1636  APFloat RealVal(0.0);
1637  if (Parser.getTok().is(AsmToken::Real)) {
1638    if(Parser.getTok().getString() != "0.0") {
1639      Error(S, "only #0.0 is acceptable as immediate");
1640      return MatchOperand_ParseFail;
1641    }
1642  }
1643  else if (Parser.getTok().is(AsmToken::Integer)) {
1644    if(Parser.getTok().getIntVal() != 0) {
1645      Error(S, "only #0.0 is acceptable as immediate");
1646      return MatchOperand_ParseFail;
1647    }
1648  }
1649  else {
1650    if (!Hash)
1651      return MatchOperand_NoMatch;
1652    Error(S, "only #0.0 is acceptable as immediate");
1653    return MatchOperand_ParseFail;
1654  }
1655
1656  Parser.Lex(); // Eat real number
1657  SMLoc E = Parser.getTok().getLoc();
1658
1659  Operands.push_back(AArch64Operand::CreateFPImm(0.0, S, E));
1660  return MatchOperand_Success;
1661}
1662
1663// Automatically generated
1664static unsigned MatchRegisterName(StringRef Name);
1665
1666bool
1667AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
1668                                   StringRef &Layout,
1669                                   SMLoc &LayoutLoc) const {
1670  const AsmToken &Tok = Parser.getTok();
1671
1672  if (Tok.isNot(AsmToken::Identifier))
1673    return false;
1674
1675  std::string LowerReg = Tok.getString().lower();
1676  size_t DotPos = LowerReg.find('.');
1677
1678  bool IsVec128 = false;
1679  SMLoc S = Tok.getLoc();
1680  RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
1681
1682  if (DotPos == std::string::npos) {
1683    Layout = StringRef();
1684  } else {
1685    // Everything afterwards needs to be a literal token, expected to be
1686    // '.2d','.b' etc for vector registers.
1687
1688    // This StringSwitch validates the input and (perhaps more importantly)
1689    // gives us a permanent string to use in the token (a pointer into LowerReg
1690    // would go out of scope when we return).
1691    LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
1692    StringRef LayoutText = StringRef(LowerReg).substr(DotPos);
1693
1694    // See if it's a 128-bit layout first.
1695    Layout = StringSwitch<const char *>(LayoutText)
1696      .Case(".q", ".q").Case(".1q", ".1q")
1697      .Case(".d", ".d").Case(".2d", ".2d")
1698      .Case(".s", ".s").Case(".4s", ".4s")
1699      .Case(".h", ".h").Case(".8h", ".8h")
1700      .Case(".b", ".b").Case(".16b", ".16b")
1701      .Default("");
1702
1703    if (Layout.size() != 0)
1704      IsVec128 = true;
1705    else {
1706      Layout = StringSwitch<const char *>(LayoutText)
1707                   .Case(".1d", ".1d")
1708                   .Case(".2s", ".2s")
1709                   .Case(".4h", ".4h")
1710                   .Case(".8b", ".8b")
1711                   .Default("");
1712    }
1713
1714    if (Layout.size() == 0) {
1715      // If we've still not pinned it down the register is malformed.
1716      return false;
1717    }
1718  }
1719
1720  RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
1721  if (RegNum == AArch64::NoRegister) {
1722    RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
1723      .Case("ip0", AArch64::X16)
1724      .Case("ip1", AArch64::X17)
1725      .Case("fp", AArch64::X29)
1726      .Case("lr", AArch64::X30)
1727      .Case("v0", IsVec128 ? AArch64::Q0 : AArch64::D0)
1728      .Case("v1", IsVec128 ? AArch64::Q1 : AArch64::D1)
1729      .Case("v2", IsVec128 ? AArch64::Q2 : AArch64::D2)
1730      .Case("v3", IsVec128 ? AArch64::Q3 : AArch64::D3)
1731      .Case("v4", IsVec128 ? AArch64::Q4 : AArch64::D4)
1732      .Case("v5", IsVec128 ? AArch64::Q5 : AArch64::D5)
1733      .Case("v6", IsVec128 ? AArch64::Q6 : AArch64::D6)
1734      .Case("v7", IsVec128 ? AArch64::Q7 : AArch64::D7)
1735      .Case("v8", IsVec128 ? AArch64::Q8 : AArch64::D8)
1736      .Case("v9", IsVec128 ? AArch64::Q9 : AArch64::D9)
1737      .Case("v10", IsVec128 ? AArch64::Q10 : AArch64::D10)
1738      .Case("v11", IsVec128 ? AArch64::Q11 : AArch64::D11)
1739      .Case("v12", IsVec128 ? AArch64::Q12 : AArch64::D12)
1740      .Case("v13", IsVec128 ? AArch64::Q13 : AArch64::D13)
1741      .Case("v14", IsVec128 ? AArch64::Q14 : AArch64::D14)
1742      .Case("v15", IsVec128 ? AArch64::Q15 : AArch64::D15)
1743      .Case("v16", IsVec128 ? AArch64::Q16 : AArch64::D16)
1744      .Case("v17", IsVec128 ? AArch64::Q17 : AArch64::D17)
1745      .Case("v18", IsVec128 ? AArch64::Q18 : AArch64::D18)
1746      .Case("v19", IsVec128 ? AArch64::Q19 : AArch64::D19)
1747      .Case("v20", IsVec128 ? AArch64::Q20 : AArch64::D20)
1748      .Case("v21", IsVec128 ? AArch64::Q21 : AArch64::D21)
1749      .Case("v22", IsVec128 ? AArch64::Q22 : AArch64::D22)
1750      .Case("v23", IsVec128 ? AArch64::Q23 : AArch64::D23)
1751      .Case("v24", IsVec128 ? AArch64::Q24 : AArch64::D24)
1752      .Case("v25", IsVec128 ? AArch64::Q25 : AArch64::D25)
1753      .Case("v26", IsVec128 ? AArch64::Q26 : AArch64::D26)
1754      .Case("v27", IsVec128 ? AArch64::Q27 : AArch64::D27)
1755      .Case("v28", IsVec128 ? AArch64::Q28 : AArch64::D28)
1756      .Case("v29", IsVec128 ? AArch64::Q29 : AArch64::D29)
1757      .Case("v30", IsVec128 ? AArch64::Q30 : AArch64::D30)
1758      .Case("v31", IsVec128 ? AArch64::Q31 : AArch64::D31)
1759      .Default(AArch64::NoRegister);
1760  }
1761  if (RegNum == AArch64::NoRegister)
1762    return false;
1763
1764  return true;
1765}
1766
1767AArch64AsmParser::OperandMatchResultTy
1768AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1769                                uint32_t &NumLanes) {
1770  unsigned RegNum;
1771  StringRef Layout;
1772  SMLoc RegEndLoc, LayoutLoc;
1773  SMLoc S = Parser.getTok().getLoc();
1774
1775  if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1776    return MatchOperand_NoMatch;
1777
1778  Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
1779
1780  if (Layout.size() != 0) {
1781    unsigned long long TmpLanes = 0;
1782    llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
1783    if (TmpLanes != 0) {
1784      NumLanes = TmpLanes;
1785    } else {
1786      // If the number of lanes isn't specified explicitly, a valid instruction
1787      // will have an element specifier and be capable of acting on the entire
1788      // vector register.
1789      switch (Layout.back()) {
1790      default: llvm_unreachable("Invalid layout specifier");
1791      case 'b': NumLanes = 16; break;
1792      case 'h': NumLanes = 8; break;
1793      case 's': NumLanes = 4; break;
1794      case 'd': NumLanes = 2; break;
1795      case 'q': NumLanes = 1; break;
1796      }
1797    }
1798
1799    Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
1800  }
1801
1802  Parser.Lex();
1803  return MatchOperand_Success;
1804}
1805
1806bool
1807AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1808                                SMLoc &EndLoc) {
1809  // This callback is used for things like DWARF frame directives in
1810  // assembly. They don't care about things like NEON layouts or lanes, they
1811  // just want to be able to produce the DWARF register number.
1812  StringRef LayoutSpec;
1813  SMLoc RegEndLoc, LayoutLoc;
1814  StartLoc = Parser.getTok().getLoc();
1815
1816  if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
1817    return true;
1818
1819  Parser.Lex();
1820  EndLoc = Parser.getTok().getLoc();
1821
1822  return false;
1823}
1824
1825AArch64AsmParser::OperandMatchResultTy
1826AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
1827                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1828  // Since these operands occur in very limited circumstances, without
1829  // alternatives, we actually signal an error if there is no match. If relaxing
1830  // this, beware of unintended consequences: an immediate will be accepted
1831  // during matching, no matter how it gets into the AArch64Operand.
1832  const AsmToken &Tok = Parser.getTok();
1833  SMLoc S = Tok.getLoc();
1834
1835  if (Tok.is(AsmToken::Identifier)) {
1836    bool ValidName;
1837    uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
1838
1839    if (!ValidName) {
1840      Error(S, "operand specifier not recognised");
1841      return MatchOperand_ParseFail;
1842    }
1843
1844    Parser.Lex(); // We're done with the identifier. Eat it
1845
1846    SMLoc E = Parser.getTok().getLoc();
1847    const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
1848    Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
1849    return MatchOperand_Success;
1850  } else if (Tok.is(AsmToken::Hash)) {
1851    Parser.Lex();
1852
1853    const MCExpr *ImmVal;
1854    if (ParseImmediate(ImmVal) != MatchOperand_Success)
1855      return MatchOperand_ParseFail;
1856
1857    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
1858    if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
1859      Error(S, "Invalid immediate for instruction");
1860      return MatchOperand_ParseFail;
1861    }
1862
1863    SMLoc E = Parser.getTok().getLoc();
1864    Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
1865    return MatchOperand_Success;
1866  }
1867
1868  Error(S, "unexpected operand for instruction");
1869  return MatchOperand_ParseFail;
1870}
1871
1872AArch64AsmParser::OperandMatchResultTy
1873AArch64AsmParser::ParseSysRegOperand(
1874                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1875  const AsmToken &Tok = Parser.getTok();
1876
1877  // Any MSR/MRS operand will be an identifier, and we want to store it as some
1878  // kind of string: SPSel is valid for two different forms of MSR with two
1879  // different encodings. There's no collision at the moment, but the potential
1880  // is there.
1881  if (!Tok.is(AsmToken::Identifier)) {
1882    return MatchOperand_NoMatch;
1883  }
1884
1885  SMLoc S = Tok.getLoc();
1886  Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
1887  Parser.Lex(); // Eat identifier
1888
1889  return MatchOperand_Success;
1890}
1891
1892AArch64AsmParser::OperandMatchResultTy
1893AArch64AsmParser::ParseLSXAddressOperand(
1894                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1895  SMLoc S = Parser.getTok().getLoc();
1896
1897  unsigned RegNum;
1898  SMLoc RegEndLoc, LayoutLoc;
1899  StringRef Layout;
1900  if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
1901     || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
1902     || Layout.size() != 0) {
1903    // Check Layout.size because we don't want to let "x3.4s" or similar
1904    // through.
1905    return MatchOperand_NoMatch;
1906  }
1907  Parser.Lex(); // Eat register
1908
1909  if (Parser.getTok().is(AsmToken::RBrac)) {
1910    // We're done
1911    SMLoc E = Parser.getTok().getLoc();
1912    Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1913    return MatchOperand_Success;
1914  }
1915
1916  // Otherwise, only ", #0" is valid
1917
1918  if (Parser.getTok().isNot(AsmToken::Comma)) {
1919    Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
1920    return MatchOperand_ParseFail;
1921  }
1922  Parser.Lex(); // Eat ','
1923
1924  if (Parser.getTok().isNot(AsmToken::Hash)) {
1925    Error(Parser.getTok().getLoc(), "expected '#0'");
1926    return MatchOperand_ParseFail;
1927  }
1928  Parser.Lex(); // Eat '#'
1929
1930  if (Parser.getTok().isNot(AsmToken::Integer)
1931      || Parser.getTok().getIntVal() != 0 ) {
1932    Error(Parser.getTok().getLoc(), "expected '#0'");
1933    return MatchOperand_ParseFail;
1934  }
1935  Parser.Lex(); // Eat '0'
1936
1937  SMLoc E = Parser.getTok().getLoc();
1938  Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1939  return MatchOperand_Success;
1940}
1941
1942AArch64AsmParser::OperandMatchResultTy
1943AArch64AsmParser::ParseShiftExtend(
1944                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1945  StringRef IDVal = Parser.getTok().getIdentifier();
1946  std::string LowerID = IDVal.lower();
1947
1948  A64SE::ShiftExtSpecifiers Spec =
1949      StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
1950        .Case("lsl", A64SE::LSL)
1951	.Case("msl", A64SE::MSL)
1952	.Case("lsr", A64SE::LSR)
1953	.Case("asr", A64SE::ASR)
1954	.Case("ror", A64SE::ROR)
1955	.Case("uxtb", A64SE::UXTB)
1956	.Case("uxth", A64SE::UXTH)
1957	.Case("uxtw", A64SE::UXTW)
1958	.Case("uxtx", A64SE::UXTX)
1959	.Case("sxtb", A64SE::SXTB)
1960	.Case("sxth", A64SE::SXTH)
1961	.Case("sxtw", A64SE::SXTW)
1962	.Case("sxtx", A64SE::SXTX)
1963	.Default(A64SE::Invalid);
1964
1965  if (Spec == A64SE::Invalid)
1966    return MatchOperand_NoMatch;
1967
1968  // Eat the shift
1969  SMLoc S, E;
1970  S = Parser.getTok().getLoc();
1971  Parser.Lex();
1972
1973  if (Spec != A64SE::LSL && Spec != A64SE::LSR && Spec != A64SE::ASR &&
1974      Spec != A64SE::ROR && Spec != A64SE::MSL) {
1975    // The shift amount can be omitted for the extending versions, but not real
1976    // shifts:
1977    //     add x0, x0, x0, uxtb
1978    // is valid, and equivalent to
1979    //     add x0, x0, x0, uxtb #0
1980
1981    if (Parser.getTok().is(AsmToken::Comma) ||
1982        Parser.getTok().is(AsmToken::EndOfStatement) ||
1983        Parser.getTok().is(AsmToken::RBrac)) {
1984      Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
1985                                                           S, E));
1986      return MatchOperand_Success;
1987    }
1988  }
1989
1990  // Eat # at beginning of immediate
1991  if (!Parser.getTok().is(AsmToken::Hash)) {
1992    Error(Parser.getTok().getLoc(),
1993          "expected #imm after shift specifier");
1994    return MatchOperand_ParseFail;
1995  }
1996  Parser.Lex();
1997
1998  // Make sure we do actually have a number
1999  if (!Parser.getTok().is(AsmToken::Integer)) {
2000    Error(Parser.getTok().getLoc(),
2001          "expected integer shift amount");
2002    return MatchOperand_ParseFail;
2003  }
2004  unsigned Amount = Parser.getTok().getIntVal();
2005  Parser.Lex();
2006  E = Parser.getTok().getLoc();
2007
2008  Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
2009                                                       S, E));
2010
2011  return MatchOperand_Success;
2012}
2013
2014/// Try to parse a vector register token, If it is a vector register,
2015/// the token is eaten and return true. Otherwise return false.
2016bool AArch64AsmParser::TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc,
2017                                      StringRef &Layout, SMLoc &LayoutLoc) {
2018  bool IsVector = true;
2019
2020  if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
2021    IsVector = false;
2022  else if (!AArch64MCRegisterClasses[AArch64::FPR64RegClassID]
2023                .contains(RegNum) &&
2024           !AArch64MCRegisterClasses[AArch64::FPR128RegClassID]
2025                .contains(RegNum))
2026    IsVector = false;
2027  else if (Layout.size() == 0)
2028    IsVector = false;
2029
2030  if (!IsVector)
2031    Error(Parser.getTok().getLoc(), "expected vector type register");
2032
2033  Parser.Lex(); // Eat this token.
2034  return IsVector;
2035}
2036
2037
2038// A vector list contains 1-4 consecutive registers.
2039// Now there are two kinds of vector list when number of vector > 1:
2040//   (1) {Vn.layout, Vn+1.layout, ... , Vm.layout}
2041//   (2) {Vn.layout - Vm.layout}
2042// If the layout is like .b/.h/.s/.d, also parse the lane.
2043AArch64AsmParser::OperandMatchResultTy AArch64AsmParser::ParseVectorList(
2044    SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
2045  if (Parser.getTok().isNot(AsmToken::LCurly)) {
2046    Error(Parser.getTok().getLoc(), "'{' expected");
2047    return MatchOperand_ParseFail;
2048  }
2049  SMLoc SLoc = Parser.getTok().getLoc();
2050  Parser.Lex(); // Eat '{' token.
2051
2052  unsigned Reg, Count = 1;
2053  StringRef LayoutStr;
2054  SMLoc RegEndLoc, LayoutLoc;
2055  if (!TryParseVector(Reg, RegEndLoc, LayoutStr, LayoutLoc))
2056    return MatchOperand_ParseFail;
2057
2058  if (Parser.getTok().is(AsmToken::Minus)) {
2059    Parser.Lex(); // Eat the minus.
2060
2061    unsigned Reg2;
2062    StringRef LayoutStr2;
2063    SMLoc RegEndLoc2, LayoutLoc2;
2064    SMLoc RegLoc2 = Parser.getTok().getLoc();
2065
2066    if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
2067      return MatchOperand_ParseFail;
2068    unsigned Space = (Reg < Reg2) ? (Reg2 - Reg) : (Reg2 + 32 - Reg);
2069
2070    if (LayoutStr != LayoutStr2) {
2071      Error(LayoutLoc2, "expected the same vector layout");
2072      return MatchOperand_ParseFail;
2073    }
2074    if (Space == 0 || Space > 3) {
2075      Error(RegLoc2, "invalid number of vectors");
2076      return MatchOperand_ParseFail;
2077    }
2078
2079    Count += Space;
2080  } else {
2081    unsigned LastReg = Reg;
2082    while (Parser.getTok().is(AsmToken::Comma)) {
2083      Parser.Lex(); // Eat the comma.
2084      unsigned Reg2;
2085      StringRef LayoutStr2;
2086      SMLoc RegEndLoc2, LayoutLoc2;
2087      SMLoc RegLoc2 = Parser.getTok().getLoc();
2088
2089      if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
2090        return MatchOperand_ParseFail;
2091      unsigned Space = (LastReg < Reg2) ? (Reg2 - LastReg)
2092                                        : (Reg2 + 32 - LastReg);
2093      Count++;
2094
2095      // The space between two vectors should be 1. And they should have the same layout.
2096      // Total count shouldn't be great than 4
2097      if (Space != 1) {
2098        Error(RegLoc2, "invalid space between two vectors");
2099        return MatchOperand_ParseFail;
2100      }
2101      if (LayoutStr != LayoutStr2) {
2102        Error(LayoutLoc2, "expected the same vector layout");
2103        return MatchOperand_ParseFail;
2104      }
2105      if (Count > 4) {
2106        Error(RegLoc2, "invalid number of vectors");
2107        return MatchOperand_ParseFail;
2108      }
2109
2110      LastReg = Reg2;
2111    }
2112  }
2113
2114  if (Parser.getTok().isNot(AsmToken::RCurly)) {
2115    Error(Parser.getTok().getLoc(), "'}' expected");
2116    return MatchOperand_ParseFail;
2117  }
2118  SMLoc ELoc = Parser.getTok().getLoc();
2119  Parser.Lex(); // Eat '}' token.
2120
2121  A64Layout::VectorLayout Layout = A64StringToVectorLayout(LayoutStr);
2122  if (Count > 1) { // If count > 1, create vector list using super register.
2123    bool IsVec64 = (Layout < A64Layout::VL_16B);
2124    static unsigned SupRegIDs[3][2] = {
2125      { AArch64::QPairRegClassID, AArch64::DPairRegClassID },
2126      { AArch64::QTripleRegClassID, AArch64::DTripleRegClassID },
2127      { AArch64::QQuadRegClassID, AArch64::DQuadRegClassID }
2128    };
2129    unsigned SupRegID = SupRegIDs[Count - 2][static_cast<int>(IsVec64)];
2130    unsigned Sub0 = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0;
2131    const MCRegisterInfo *MRI = getContext().getRegisterInfo();
2132    Reg = MRI->getMatchingSuperReg(Reg, Sub0,
2133                                   &AArch64MCRegisterClasses[SupRegID]);
2134  }
2135  Operands.push_back(
2136      AArch64Operand::CreateVectorList(Reg, Count, Layout, SLoc, ELoc));
2137
2138  if (Parser.getTok().is(AsmToken::LBrac)) {
2139    uint32_t NumLanes = 0;
2140    switch(Layout) {
2141    case A64Layout::VL_B : NumLanes = 16; break;
2142    case A64Layout::VL_H : NumLanes = 8; break;
2143    case A64Layout::VL_S : NumLanes = 4; break;
2144    case A64Layout::VL_D : NumLanes = 2; break;
2145    default:
2146      SMLoc Loc = getLexer().getLoc();
2147      Error(Loc, "expected comma before next operand");
2148      return MatchOperand_ParseFail;
2149    }
2150    return ParseNEONLane(Operands, NumLanes);
2151  } else {
2152    return MatchOperand_Success;
2153  }
2154}
2155
2156// FIXME: We would really like to be able to tablegen'erate this.
2157bool AArch64AsmParser::
2158validateInstruction(MCInst &Inst,
2159                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2160  switch (Inst.getOpcode()) {
2161  case AArch64::BFIwwii:
2162  case AArch64::BFIxxii:
2163  case AArch64::SBFIZwwii:
2164  case AArch64::SBFIZxxii:
2165  case AArch64::UBFIZwwii:
2166  case AArch64::UBFIZxxii:  {
2167    unsigned ImmOps = Inst.getNumOperands() - 2;
2168    int64_t ImmR = Inst.getOperand(ImmOps).getImm();
2169    int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
2170
2171    if (ImmR != 0 && ImmS >= ImmR) {
2172      return Error(Operands[4]->getStartLoc(),
2173                   "requested insert overflows register");
2174    }
2175    return false;
2176  }
2177  case AArch64::BFXILwwii:
2178  case AArch64::BFXILxxii:
2179  case AArch64::SBFXwwii:
2180  case AArch64::SBFXxxii:
2181  case AArch64::UBFXwwii:
2182  case AArch64::UBFXxxii: {
2183    unsigned ImmOps = Inst.getNumOperands() - 2;
2184    int64_t ImmR = Inst.getOperand(ImmOps).getImm();
2185    int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
2186    int64_t RegWidth = 0;
2187    switch (Inst.getOpcode()) {
2188    case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
2189      RegWidth = 64;
2190      break;
2191    case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
2192      RegWidth = 32;
2193      break;
2194    }
2195
2196    if (ImmS >= RegWidth || ImmS < ImmR) {
2197      return Error(Operands[4]->getStartLoc(),
2198                   "requested extract overflows register");
2199    }
2200    return false;
2201  }
2202  case AArch64::ICix: {
2203    int64_t ImmVal = Inst.getOperand(0).getImm();
2204    A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
2205    if (!A64IC::NeedsRegister(ICOp)) {
2206      return Error(Operands[1]->getStartLoc(),
2207                   "specified IC op does not use a register");
2208    }
2209    return false;
2210  }
2211  case AArch64::ICi: {
2212    int64_t ImmVal = Inst.getOperand(0).getImm();
2213    A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
2214    if (A64IC::NeedsRegister(ICOp)) {
2215      return Error(Operands[1]->getStartLoc(),
2216                   "specified IC op requires a register");
2217    }
2218    return false;
2219  }
2220  case AArch64::TLBIix: {
2221    int64_t ImmVal = Inst.getOperand(0).getImm();
2222    A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
2223    if (!A64TLBI::NeedsRegister(TLBIOp)) {
2224      return Error(Operands[1]->getStartLoc(),
2225                   "specified TLBI op does not use a register");
2226    }
2227    return false;
2228  }
2229  case AArch64::TLBIi: {
2230    int64_t ImmVal = Inst.getOperand(0).getImm();
2231    A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
2232    if (A64TLBI::NeedsRegister(TLBIOp)) {
2233      return Error(Operands[1]->getStartLoc(),
2234                   "specified TLBI op requires a register");
2235    }
2236    return false;
2237  }
2238  }
2239
2240  return false;
2241}
2242
2243
2244// Parses the instruction *together with* all operands, appending each parsed
2245// operand to the "Operands" list
2246bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
2247                                        StringRef Name, SMLoc NameLoc,
2248                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2249  StringRef PatchedName = StringSwitch<StringRef>(Name.lower())
2250    .Case("beq", "b.eq")
2251    .Case("bne", "b.ne")
2252    .Case("bhs", "b.hs")
2253    .Case("bcs", "b.cs")
2254    .Case("blo", "b.lo")
2255    .Case("bcc", "b.cc")
2256    .Case("bmi", "b.mi")
2257    .Case("bpl", "b.pl")
2258    .Case("bvs", "b.vs")
2259    .Case("bvc", "b.vc")
2260    .Case("bhi", "b.hi")
2261    .Case("bls", "b.ls")
2262    .Case("bge", "b.ge")
2263    .Case("blt", "b.lt")
2264    .Case("bgt", "b.gt")
2265    .Case("ble", "b.le")
2266    .Case("bal", "b.al")
2267    .Case("bnv", "b.nv")
2268    .Default(Name);
2269
2270  size_t CondCodePos = PatchedName.find('.');
2271
2272  StringRef Mnemonic = PatchedName.substr(0, CondCodePos);
2273  Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
2274
2275  if (CondCodePos != StringRef::npos) {
2276    // We have a condition code
2277    SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
2278    StringRef CondStr = PatchedName.substr(CondCodePos + 1, StringRef::npos);
2279    A64CC::CondCodes Code;
2280
2281    Code = A64StringToCondCode(CondStr);
2282
2283    if (Code == A64CC::Invalid) {
2284      Error(S, "invalid condition code");
2285      Parser.eatToEndOfStatement();
2286      return true;
2287    }
2288
2289    SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
2290
2291    Operands.push_back(AArch64Operand::CreateToken(".",  DotL));
2292    SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
2293    Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
2294  }
2295
2296  // Now we parse the operands of this instruction
2297  if (getLexer().isNot(AsmToken::EndOfStatement)) {
2298    // Read the first operand.
2299    if (ParseOperand(Operands, Mnemonic)) {
2300      Parser.eatToEndOfStatement();
2301      return true;
2302    }
2303
2304    while (getLexer().is(AsmToken::Comma)) {
2305      Parser.Lex();  // Eat the comma.
2306
2307      // Parse and remember the operand.
2308      if (ParseOperand(Operands, Mnemonic)) {
2309        Parser.eatToEndOfStatement();
2310        return true;
2311      }
2312
2313
2314      // After successfully parsing some operands there are two special cases to
2315      // consider (i.e. notional operands not separated by commas). Both are due
2316      // to memory specifiers:
2317      //  + An RBrac will end an address for load/store/prefetch
2318      //  + An '!' will indicate a pre-indexed operation.
2319      //
2320      // It's someone else's responsibility to make sure these tokens are sane
2321      // in the given context!
2322      if (Parser.getTok().is(AsmToken::RBrac)) {
2323        SMLoc Loc = Parser.getTok().getLoc();
2324        Operands.push_back(AArch64Operand::CreateToken("]", Loc));
2325        Parser.Lex();
2326      }
2327
2328      if (Parser.getTok().is(AsmToken::Exclaim)) {
2329        SMLoc Loc = Parser.getTok().getLoc();
2330        Operands.push_back(AArch64Operand::CreateToken("!", Loc));
2331        Parser.Lex();
2332      }
2333    }
2334  }
2335
2336  if (getLexer().isNot(AsmToken::EndOfStatement)) {
2337    SMLoc Loc = getLexer().getLoc();
2338    Parser.eatToEndOfStatement();
2339    return Error(Loc, "expected comma before next operand");
2340  }
2341
2342  // Eat the EndOfStatement
2343  Parser.Lex();
2344
2345  return false;
2346}
2347
2348bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
2349  StringRef IDVal = DirectiveID.getIdentifier();
2350  if (IDVal == ".hword")
2351    return ParseDirectiveWord(2, DirectiveID.getLoc());
2352  else if (IDVal == ".word")
2353    return ParseDirectiveWord(4, DirectiveID.getLoc());
2354  else if (IDVal == ".xword")
2355    return ParseDirectiveWord(8, DirectiveID.getLoc());
2356  else if (IDVal == ".tlsdesccall")
2357    return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
2358
2359  return true;
2360}
2361
2362/// parseDirectiveWord
2363///  ::= .word [ expression (, expression)* ]
2364bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
2365  if (getLexer().isNot(AsmToken::EndOfStatement)) {
2366    for (;;) {
2367      const MCExpr *Value;
2368      if (getParser().parseExpression(Value))
2369        return false;
2370
2371      getParser().getStreamer().EmitValue(Value, Size);
2372
2373      if (getLexer().is(AsmToken::EndOfStatement))
2374        break;
2375
2376      // FIXME: Improve diagnostic.
2377      if (getLexer().isNot(AsmToken::Comma)) {
2378        Error(L, "unexpected token in directive");
2379        return false;
2380      }
2381      Parser.Lex();
2382    }
2383  }
2384
2385  Parser.Lex();
2386  return false;
2387}
2388
2389// parseDirectiveTLSDescCall:
2390//   ::= .tlsdesccall symbol
2391bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
2392  StringRef Name;
2393  if (getParser().parseIdentifier(Name)) {
2394    Error(L, "expected symbol after directive");
2395    return false;
2396  }
2397
2398  MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
2399  const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
2400
2401  MCInst Inst;
2402  Inst.setOpcode(AArch64::TLSDESCCALL);
2403  Inst.addOperand(MCOperand::CreateExpr(Expr));
2404
2405  getParser().getStreamer().EmitInstruction(Inst, STI);
2406  return false;
2407}
2408
2409
2410bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2411                                 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
2412                                 MCStreamer &Out, unsigned &ErrorInfo,
2413                                 bool MatchingInlineAsm) {
2414  MCInst Inst;
2415  unsigned MatchResult;
2416  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
2417                                     MatchingInlineAsm);
2418
2419  if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
2420    return Error(IDLoc, "too few operands for instruction");
2421
2422  switch (MatchResult) {
2423  default: break;
2424  case Match_Success:
2425    if (validateInstruction(Inst, Operands))
2426      return true;
2427
2428    Out.EmitInstruction(Inst, STI);
2429    return false;
2430  case Match_MissingFeature:
2431    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
2432    return true;
2433  case Match_InvalidOperand: {
2434    SMLoc ErrorLoc = IDLoc;
2435    if (ErrorInfo != ~0U) {
2436      ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
2437      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
2438    }
2439
2440    return Error(ErrorLoc, "invalid operand for instruction");
2441  }
2442  case Match_MnemonicFail:
2443    return Error(IDLoc, "invalid instruction");
2444
2445  case Match_AddSubRegExtendSmall:
2446    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2447      "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
2448  case Match_AddSubRegExtendLarge:
2449    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2450      "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
2451  case Match_AddSubRegShift32:
2452    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2453       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
2454  case Match_AddSubRegShift64:
2455    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2456       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
2457  case Match_AddSubSecondSource:
2458      return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2459          "expected compatible register, symbol or integer in range [0, 4095]");
2460  case Match_CVTFixedPos32:
2461    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2462                 "expected integer in range [1, 32]");
2463  case Match_CVTFixedPos64:
2464    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2465                 "expected integer in range [1, 64]");
2466  case Match_CondCode:
2467    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2468                 "expected AArch64 condition code");
2469  case Match_FPImm:
2470    // Any situation which allows a nontrivial floating-point constant also
2471    // allows a register.
2472    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2473                 "expected compatible register or floating-point constant");
2474  case Match_FPZero:
2475    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2476                 "expected floating-point constant #0.0 or invalid register type");
2477  case Match_Label:
2478    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2479                 "expected label or encodable integer pc offset");
2480  case Match_Lane1:
2481    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2482                 "expected lane specifier '[1]'");
2483  case Match_LoadStoreExtend32_1:
2484    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2485                 "expected 'uxtw' or 'sxtw' with optional shift of #0");
2486  case Match_LoadStoreExtend32_2:
2487    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2488                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
2489  case Match_LoadStoreExtend32_4:
2490    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2491                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
2492  case Match_LoadStoreExtend32_8:
2493    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2494                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
2495  case Match_LoadStoreExtend32_16:
2496    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2497                 "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
2498  case Match_LoadStoreExtend64_1:
2499    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2500                 "expected 'lsl' or 'sxtx' with optional shift of #0");
2501  case Match_LoadStoreExtend64_2:
2502    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2503                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
2504  case Match_LoadStoreExtend64_4:
2505    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2506                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
2507  case Match_LoadStoreExtend64_8:
2508    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2509                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
2510  case Match_LoadStoreExtend64_16:
2511    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2512                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
2513  case Match_LoadStoreSImm7_4:
2514    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2515                 "expected integer multiple of 4 in range [-256, 252]");
2516  case Match_LoadStoreSImm7_8:
2517    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2518                 "expected integer multiple of 8 in range [-512, 504]");
2519  case Match_LoadStoreSImm7_16:
2520    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2521                 "expected integer multiple of 16 in range [-1024, 1008]");
2522  case Match_LoadStoreSImm9:
2523    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2524                 "expected integer in range [-256, 255]");
2525  case Match_LoadStoreUImm12_1:
2526    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2527                 "expected symbolic reference or integer in range [0, 4095]");
2528  case Match_LoadStoreUImm12_2:
2529    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2530                 "expected symbolic reference or integer in range [0, 8190]");
2531  case Match_LoadStoreUImm12_4:
2532    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2533                 "expected symbolic reference or integer in range [0, 16380]");
2534  case Match_LoadStoreUImm12_8:
2535    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2536                 "expected symbolic reference or integer in range [0, 32760]");
2537  case Match_LoadStoreUImm12_16:
2538    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2539                 "expected symbolic reference or integer in range [0, 65520]");
2540  case Match_LogicalSecondSource:
2541    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2542                 "expected compatible register or logical immediate");
2543  case Match_MOVWUImm16:
2544    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2545                 "expected relocated symbol or integer in range [0, 65535]");
2546  case Match_MRS:
2547    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2548                 "expected readable system register");
2549  case Match_MSR:
2550    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2551                 "expected writable system register or pstate");
2552  case Match_NamedImm_at:
2553    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2554                "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
2555  case Match_NamedImm_dbarrier:
2556    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2557             "expected integer in range [0, 15] or symbolic barrier operand");
2558  case Match_NamedImm_dc:
2559    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2560                 "expected symbolic 'dc' operand");
2561  case Match_NamedImm_ic:
2562    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2563                 "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
2564  case Match_NamedImm_isb:
2565    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2566                 "expected integer in range [0, 15] or 'sy'");
2567  case Match_NamedImm_prefetch:
2568    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2569                 "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
2570  case Match_NamedImm_tlbi:
2571    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2572                 "expected translation buffer invalidation operand");
2573  case Match_UImm16:
2574    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2575                 "expected integer in range [0, 65535]");
2576  case Match_UImm3:
2577    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2578                 "expected integer in range [0, 7]");
2579  case Match_UImm4:
2580    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2581                 "expected integer in range [0, 15]");
2582  case Match_UImm5:
2583    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2584                 "expected integer in range [0, 31]");
2585  case Match_UImm6:
2586    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2587                 "expected integer in range [0, 63]");
2588  case Match_UImm7:
2589    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2590                 "expected integer in range [0, 127]");
2591  case Match_Width32:
2592    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2593                 "expected integer in range [<lsb>, 31]");
2594  case Match_Width64:
2595    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2596                 "expected integer in range [<lsb>, 63]");
2597  case Match_ShrImm8:
2598    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2599                 "expected integer in range [1, 8]");
2600  case Match_ShrImm16:
2601    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2602                 "expected integer in range [1, 16]");
2603  case Match_ShrImm32:
2604    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2605                 "expected integer in range [1, 32]");
2606  case Match_ShrImm64:
2607    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2608                 "expected integer in range [1, 64]");
2609  case Match_ShlImm8:
2610    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2611                 "expected integer in range [0, 7]");
2612  case Match_ShlImm16:
2613    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2614                 "expected integer in range [0, 15]");
2615  case Match_ShlImm32:
2616    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2617                 "expected integer in range [0, 31]");
2618  case Match_ShlImm64:
2619    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2620                 "expected integer in range [0, 63]");
2621  }
2622
2623  llvm_unreachable("Implement any new match types added!");
2624  return true;
2625}
2626
2627void AArch64Operand::print(raw_ostream &OS) const {
2628  switch (Kind) {
2629  case k_CondCode:
2630    OS << "<CondCode: " << CondCode.Code << ">";
2631    break;
2632  case k_FPImmediate:
2633    OS << "<fpimm: " << FPImm.Val << ">";
2634    break;
2635  case k_ImmWithLSL:
2636    OS << "<immwithlsl: imm=" << ImmWithLSL.Val
2637       << ", shift=" << ImmWithLSL.ShiftAmount << ">";
2638    break;
2639  case k_Immediate:
2640    getImm()->print(OS);
2641    break;
2642  case k_Register:
2643    OS << "<register " << getReg() << '>';
2644    break;
2645  case k_Token:
2646    OS << '\'' << getToken() << '\'';
2647    break;
2648  case k_ShiftExtend:
2649    OS << "<shift: type=" << ShiftExtend.ShiftType
2650       << ", amount=" << ShiftExtend.Amount << ">";
2651    break;
2652  case k_SysReg: {
2653    StringRef Name(SysReg.Data, SysReg.Length);
2654    OS << "<sysreg: " << Name << '>';
2655    break;
2656  }
2657  default:
2658    llvm_unreachable("No idea how to print this kind of operand");
2659    break;
2660  }
2661}
2662
2663void AArch64Operand::dump() const {
2664  print(errs());
2665}
2666
2667
2668/// Force static initialization.
2669extern "C" void LLVMInitializeAArch64AsmParser() {
2670  RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
2671  RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
2672}
2673
2674#define GET_REGISTER_MATCHER
2675#define GET_MATCHER_IMPLEMENTATION
2676#include "AArch64GenAsmMatcher.inc"
2677