1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64MCExpr.h"
12#include "Utils/AArch64BaseInfo.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCContext.h"
17#include "llvm/MC/MCExpr.h"
18#include "llvm/MC/MCInst.h"
19#include "llvm/MC/MCRegisterInfo.h"
20#include "llvm/MC/MCStreamer.h"
21#include "llvm/MC/MCSubtargetInfo.h"
22#include "llvm/MC/MCSymbol.h"
23#include "llvm/MC/MCTargetAsmParser.h"
24#include "llvm/Support/SourceMgr.h"
25#include "llvm/Support/TargetRegistry.h"
26#include "llvm/Support/ErrorHandling.h"
27#include "llvm/Support/raw_ostream.h"
28#include "llvm/ADT/SmallString.h"
29#include "llvm/ADT/SmallVector.h"
30#include "llvm/ADT/STLExtras.h"
31#include "llvm/ADT/StringSwitch.h"
32#include "llvm/ADT/Twine.h"
33#include <cstdio>
34using namespace llvm;
35
36namespace {
37
38class AArch64Operand;
39
40class AArch64AsmParser : public MCTargetAsmParser {
41private:
42  StringRef Mnemonic; ///< Instruction mnemonic.
43  MCSubtargetInfo &STI;
44  MCAsmParser &Parser;
45
46  // Map of register aliases registers via the .req directive.
47  StringMap<std::pair<bool, unsigned> > RegisterReqs;
48
49  AArch64TargetStreamer &getTargetStreamer() {
50    MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
51    return static_cast<AArch64TargetStreamer &>(TS);
52  }
53
54  MCAsmParser &getParser() const { return Parser; }
55  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
56
57  SMLoc getLoc() const { return Parser.getTok().getLoc(); }
58
59  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
60  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
61  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
62  unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
63  int tryParseRegister();
64  int tryMatchVectorRegister(StringRef &Kind, bool expected);
65  bool parseRegister(OperandVector &Operands);
66  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
67  bool parseVectorList(OperandVector &Operands);
68  bool parseOperand(OperandVector &Operands, bool isCondCode,
69                    bool invertCondCode);
70
71  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
72  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
73  bool showMatchError(SMLoc Loc, unsigned ErrCode);
74
75  bool parseDirectiveWord(unsigned Size, SMLoc L);
76  bool parseDirectiveTLSDescCall(SMLoc L);
77
78  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
79  bool parseDirectiveLtorg(SMLoc L);
80
81  bool parseDirectiveReq(StringRef Name, SMLoc L);
82  bool parseDirectiveUnreq(SMLoc L);
83
84  bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
85  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
86                               OperandVector &Operands, MCStreamer &Out,
87                               unsigned &ErrorInfo,
88                               bool MatchingInlineAsm) override;
89/// @name Auto-generated Match Functions
90/// {
91
92#define GET_ASSEMBLER_HEADER
93#include "AArch64GenAsmMatcher.inc"
94
95  /// }
96
97  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
98  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
99  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
100  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
101  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
102  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
103  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
104  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
105  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
106  OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
107  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
108  bool tryParseVectorRegister(OperandVector &Operands);
109
110public:
111  enum AArch64MatchResultTy {
112    Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
113#define GET_OPERAND_DIAGNOSTIC_TYPES
114#include "AArch64GenAsmMatcher.inc"
115  };
116  AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
117                 const MCInstrInfo &MII,
118                 const MCTargetOptions &Options)
119      : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
120    MCAsmParserExtension::Initialize(_Parser);
121    if (Parser.getStreamer().getTargetStreamer() == nullptr)
122      new AArch64TargetStreamer(Parser.getStreamer());
123
124    // Initialize the set of available features.
125    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
126  }
127
128  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
129                        SMLoc NameLoc, OperandVector &Operands) override;
130  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
131  bool ParseDirective(AsmToken DirectiveID) override;
132  unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
133                                      unsigned Kind) override;
134
135  static bool classifySymbolRef(const MCExpr *Expr,
136                                AArch64MCExpr::VariantKind &ELFRefKind,
137                                MCSymbolRefExpr::VariantKind &DarwinRefKind,
138                                int64_t &Addend);
139};
140} // end anonymous namespace
141
142namespace {
143
144/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
145/// instruction.
146class AArch64Operand : public MCParsedAsmOperand {
147private:
148  enum KindTy {
149    k_Immediate,
150    k_ShiftedImm,
151    k_CondCode,
152    k_Register,
153    k_VectorList,
154    k_VectorIndex,
155    k_Token,
156    k_SysReg,
157    k_SysCR,
158    k_Prefetch,
159    k_ShiftExtend,
160    k_FPImm,
161    k_Barrier
162  } Kind;
163
164  SMLoc StartLoc, EndLoc;
165
166  struct TokOp {
167    const char *Data;
168    unsigned Length;
169    bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
170  };
171
172  struct RegOp {
173    unsigned RegNum;
174    bool isVector;
175  };
176
177  struct VectorListOp {
178    unsigned RegNum;
179    unsigned Count;
180    unsigned NumElements;
181    unsigned ElementKind;
182  };
183
184  struct VectorIndexOp {
185    unsigned Val;
186  };
187
188  struct ImmOp {
189    const MCExpr *Val;
190  };
191
192  struct ShiftedImmOp {
193    const MCExpr *Val;
194    unsigned ShiftAmount;
195  };
196
197  struct CondCodeOp {
198    AArch64CC::CondCode Code;
199  };
200
201  struct FPImmOp {
202    unsigned Val; // Encoded 8-bit representation.
203  };
204
205  struct BarrierOp {
206    unsigned Val; // Not the enum since not all values have names.
207  };
208
209  struct SysRegOp {
210    const char *Data;
211    unsigned Length;
212    uint64_t FeatureBits; // We need to pass through information about which
213                          // core we are compiling for so that the SysReg
214                          // Mappers can appropriately conditionalize.
215  };
216
217  struct SysCRImmOp {
218    unsigned Val;
219  };
220
221  struct PrefetchOp {
222    unsigned Val;
223  };
224
225  struct ShiftExtendOp {
226    AArch64_AM::ShiftExtendType Type;
227    unsigned Amount;
228    bool HasExplicitAmount;
229  };
230
231  struct ExtendOp {
232    unsigned Val;
233  };
234
235  union {
236    struct TokOp Tok;
237    struct RegOp Reg;
238    struct VectorListOp VectorList;
239    struct VectorIndexOp VectorIndex;
240    struct ImmOp Imm;
241    struct ShiftedImmOp ShiftedImm;
242    struct CondCodeOp CondCode;
243    struct FPImmOp FPImm;
244    struct BarrierOp Barrier;
245    struct SysRegOp SysReg;
246    struct SysCRImmOp SysCRImm;
247    struct PrefetchOp Prefetch;
248    struct ShiftExtendOp ShiftExtend;
249  };
250
251  // Keep the MCContext around as the MCExprs may need manipulated during
252  // the add<>Operands() calls.
253  MCContext &Ctx;
254
255public:
256  AArch64Operand(KindTy K, MCContext &_Ctx)
257      : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
258
259  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
260    Kind = o.Kind;
261    StartLoc = o.StartLoc;
262    EndLoc = o.EndLoc;
263    switch (Kind) {
264    case k_Token:
265      Tok = o.Tok;
266      break;
267    case k_Immediate:
268      Imm = o.Imm;
269      break;
270    case k_ShiftedImm:
271      ShiftedImm = o.ShiftedImm;
272      break;
273    case k_CondCode:
274      CondCode = o.CondCode;
275      break;
276    case k_FPImm:
277      FPImm = o.FPImm;
278      break;
279    case k_Barrier:
280      Barrier = o.Barrier;
281      break;
282    case k_Register:
283      Reg = o.Reg;
284      break;
285    case k_VectorList:
286      VectorList = o.VectorList;
287      break;
288    case k_VectorIndex:
289      VectorIndex = o.VectorIndex;
290      break;
291    case k_SysReg:
292      SysReg = o.SysReg;
293      break;
294    case k_SysCR:
295      SysCRImm = o.SysCRImm;
296      break;
297    case k_Prefetch:
298      Prefetch = o.Prefetch;
299      break;
300    case k_ShiftExtend:
301      ShiftExtend = o.ShiftExtend;
302      break;
303    }
304  }
305
306  /// getStartLoc - Get the location of the first token of this operand.
307  SMLoc getStartLoc() const override { return StartLoc; }
308  /// getEndLoc - Get the location of the last token of this operand.
309  SMLoc getEndLoc() const override { return EndLoc; }
310
311  StringRef getToken() const {
312    assert(Kind == k_Token && "Invalid access!");
313    return StringRef(Tok.Data, Tok.Length);
314  }
315
316  bool isTokenSuffix() const {
317    assert(Kind == k_Token && "Invalid access!");
318    return Tok.IsSuffix;
319  }
320
321  const MCExpr *getImm() const {
322    assert(Kind == k_Immediate && "Invalid access!");
323    return Imm.Val;
324  }
325
326  const MCExpr *getShiftedImmVal() const {
327    assert(Kind == k_ShiftedImm && "Invalid access!");
328    return ShiftedImm.Val;
329  }
330
331  unsigned getShiftedImmShift() const {
332    assert(Kind == k_ShiftedImm && "Invalid access!");
333    return ShiftedImm.ShiftAmount;
334  }
335
336  AArch64CC::CondCode getCondCode() const {
337    assert(Kind == k_CondCode && "Invalid access!");
338    return CondCode.Code;
339  }
340
341  unsigned getFPImm() const {
342    assert(Kind == k_FPImm && "Invalid access!");
343    return FPImm.Val;
344  }
345
346  unsigned getBarrier() const {
347    assert(Kind == k_Barrier && "Invalid access!");
348    return Barrier.Val;
349  }
350
351  unsigned getReg() const override {
352    assert(Kind == k_Register && "Invalid access!");
353    return Reg.RegNum;
354  }
355
356  unsigned getVectorListStart() const {
357    assert(Kind == k_VectorList && "Invalid access!");
358    return VectorList.RegNum;
359  }
360
361  unsigned getVectorListCount() const {
362    assert(Kind == k_VectorList && "Invalid access!");
363    return VectorList.Count;
364  }
365
366  unsigned getVectorIndex() const {
367    assert(Kind == k_VectorIndex && "Invalid access!");
368    return VectorIndex.Val;
369  }
370
371  StringRef getSysReg() const {
372    assert(Kind == k_SysReg && "Invalid access!");
373    return StringRef(SysReg.Data, SysReg.Length);
374  }
375
376  uint64_t getSysRegFeatureBits() const {
377    assert(Kind == k_SysReg && "Invalid access!");
378    return SysReg.FeatureBits;
379  }
380
381  unsigned getSysCR() const {
382    assert(Kind == k_SysCR && "Invalid access!");
383    return SysCRImm.Val;
384  }
385
386  unsigned getPrefetch() const {
387    assert(Kind == k_Prefetch && "Invalid access!");
388    return Prefetch.Val;
389  }
390
391  AArch64_AM::ShiftExtendType getShiftExtendType() const {
392    assert(Kind == k_ShiftExtend && "Invalid access!");
393    return ShiftExtend.Type;
394  }
395
396  unsigned getShiftExtendAmount() const {
397    assert(Kind == k_ShiftExtend && "Invalid access!");
398    return ShiftExtend.Amount;
399  }
400
401  bool hasShiftExtendAmount() const {
402    assert(Kind == k_ShiftExtend && "Invalid access!");
403    return ShiftExtend.HasExplicitAmount;
404  }
405
406  bool isImm() const override { return Kind == k_Immediate; }
407  bool isMem() const override { return false; }
408  bool isSImm9() const {
409    if (!isImm())
410      return false;
411    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
412    if (!MCE)
413      return false;
414    int64_t Val = MCE->getValue();
415    return (Val >= -256 && Val < 256);
416  }
417  bool isSImm7s4() const {
418    if (!isImm())
419      return false;
420    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
421    if (!MCE)
422      return false;
423    int64_t Val = MCE->getValue();
424    return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
425  }
426  bool isSImm7s8() const {
427    if (!isImm())
428      return false;
429    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
430    if (!MCE)
431      return false;
432    int64_t Val = MCE->getValue();
433    return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
434  }
435  bool isSImm7s16() const {
436    if (!isImm())
437      return false;
438    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
439    if (!MCE)
440      return false;
441    int64_t Val = MCE->getValue();
442    return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
443  }
444
445  bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
446    AArch64MCExpr::VariantKind ELFRefKind;
447    MCSymbolRefExpr::VariantKind DarwinRefKind;
448    int64_t Addend;
449    if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
450                                           Addend)) {
451      // If we don't understand the expression, assume the best and
452      // let the fixup and relocation code deal with it.
453      return true;
454    }
455
456    if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
457        ELFRefKind == AArch64MCExpr::VK_LO12 ||
458        ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
459        ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
460        ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
461        ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
462        ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
463        ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
464        ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
465      // Note that we don't range-check the addend. It's adjusted modulo page
466      // size when converted, so there is no "out of range" condition when using
467      // @pageoff.
468      return Addend >= 0 && (Addend % Scale) == 0;
469    } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
470               DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
471      // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
472      return Addend == 0;
473    }
474
475    return false;
476  }
477
478  template <int Scale> bool isUImm12Offset() const {
479    if (!isImm())
480      return false;
481
482    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
483    if (!MCE)
484      return isSymbolicUImm12Offset(getImm(), Scale);
485
486    int64_t Val = MCE->getValue();
487    return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
488  }
489
490  bool isImm0_7() const {
491    if (!isImm())
492      return false;
493    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
494    if (!MCE)
495      return false;
496    int64_t Val = MCE->getValue();
497    return (Val >= 0 && Val < 8);
498  }
499  bool isImm1_8() const {
500    if (!isImm())
501      return false;
502    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
503    if (!MCE)
504      return false;
505    int64_t Val = MCE->getValue();
506    return (Val > 0 && Val < 9);
507  }
508  bool isImm0_15() const {
509    if (!isImm())
510      return false;
511    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
512    if (!MCE)
513      return false;
514    int64_t Val = MCE->getValue();
515    return (Val >= 0 && Val < 16);
516  }
517  bool isImm1_16() const {
518    if (!isImm())
519      return false;
520    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
521    if (!MCE)
522      return false;
523    int64_t Val = MCE->getValue();
524    return (Val > 0 && Val < 17);
525  }
526  bool isImm0_31() const {
527    if (!isImm())
528      return false;
529    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
530    if (!MCE)
531      return false;
532    int64_t Val = MCE->getValue();
533    return (Val >= 0 && Val < 32);
534  }
535  bool isImm1_31() const {
536    if (!isImm())
537      return false;
538    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
539    if (!MCE)
540      return false;
541    int64_t Val = MCE->getValue();
542    return (Val >= 1 && Val < 32);
543  }
544  bool isImm1_32() const {
545    if (!isImm())
546      return false;
547    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
548    if (!MCE)
549      return false;
550    int64_t Val = MCE->getValue();
551    return (Val >= 1 && Val < 33);
552  }
553  bool isImm0_63() const {
554    if (!isImm())
555      return false;
556    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
557    if (!MCE)
558      return false;
559    int64_t Val = MCE->getValue();
560    return (Val >= 0 && Val < 64);
561  }
562  bool isImm1_63() const {
563    if (!isImm())
564      return false;
565    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
566    if (!MCE)
567      return false;
568    int64_t Val = MCE->getValue();
569    return (Val >= 1 && Val < 64);
570  }
571  bool isImm1_64() const {
572    if (!isImm())
573      return false;
574    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
575    if (!MCE)
576      return false;
577    int64_t Val = MCE->getValue();
578    return (Val >= 1 && Val < 65);
579  }
580  bool isImm0_127() const {
581    if (!isImm())
582      return false;
583    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
584    if (!MCE)
585      return false;
586    int64_t Val = MCE->getValue();
587    return (Val >= 0 && Val < 128);
588  }
589  bool isImm0_255() const {
590    if (!isImm())
591      return false;
592    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
593    if (!MCE)
594      return false;
595    int64_t Val = MCE->getValue();
596    return (Val >= 0 && Val < 256);
597  }
598  bool isImm0_65535() const {
599    if (!isImm())
600      return false;
601    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
602    if (!MCE)
603      return false;
604    int64_t Val = MCE->getValue();
605    return (Val >= 0 && Val < 65536);
606  }
607  bool isImm32_63() const {
608    if (!isImm())
609      return false;
610    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
611    if (!MCE)
612      return false;
613    int64_t Val = MCE->getValue();
614    return (Val >= 32 && Val < 64);
615  }
616  bool isLogicalImm32() const {
617    if (!isImm())
618      return false;
619    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
620    if (!MCE)
621      return false;
622    int64_t Val = MCE->getValue();
623    if (Val >> 32 != 0 && Val >> 32 != ~0LL)
624      return false;
625    Val &= 0xFFFFFFFF;
626    return AArch64_AM::isLogicalImmediate(Val, 32);
627  }
628  bool isLogicalImm64() const {
629    if (!isImm())
630      return false;
631    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
632    if (!MCE)
633      return false;
634    return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
635  }
636  bool isLogicalImm32Not() const {
637    if (!isImm())
638      return false;
639    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
640    if (!MCE)
641      return false;
642    int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
643    return AArch64_AM::isLogicalImmediate(Val, 32);
644  }
645  bool isLogicalImm64Not() const {
646    if (!isImm())
647      return false;
648    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
649    if (!MCE)
650      return false;
651    return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
652  }
653  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
654  bool isAddSubImm() const {
655    if (!isShiftedImm() && !isImm())
656      return false;
657
658    const MCExpr *Expr;
659
660    // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
661    if (isShiftedImm()) {
662      unsigned Shift = ShiftedImm.ShiftAmount;
663      Expr = ShiftedImm.Val;
664      if (Shift != 0 && Shift != 12)
665        return false;
666    } else {
667      Expr = getImm();
668    }
669
670    AArch64MCExpr::VariantKind ELFRefKind;
671    MCSymbolRefExpr::VariantKind DarwinRefKind;
672    int64_t Addend;
673    if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
674                                          DarwinRefKind, Addend)) {
675      return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
676          || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
677          || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
678          || ELFRefKind == AArch64MCExpr::VK_LO12
679          || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
680          || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
681          || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
682          || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
683          || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
684          || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
685          || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
686    }
687
688    // Otherwise it should be a real immediate in range:
689    const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
690    return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
691  }
692  bool isCondCode() const { return Kind == k_CondCode; }
693  bool isSIMDImmType10() const {
694    if (!isImm())
695      return false;
696    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
697    if (!MCE)
698      return false;
699    return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
700  }
701  bool isBranchTarget26() const {
702    if (!isImm())
703      return false;
704    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
705    if (!MCE)
706      return true;
707    int64_t Val = MCE->getValue();
708    if (Val & 0x3)
709      return false;
710    return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
711  }
712  bool isPCRelLabel19() const {
713    if (!isImm())
714      return false;
715    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
716    if (!MCE)
717      return true;
718    int64_t Val = MCE->getValue();
719    if (Val & 0x3)
720      return false;
721    return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
722  }
723  bool isBranchTarget14() const {
724    if (!isImm())
725      return false;
726    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
727    if (!MCE)
728      return true;
729    int64_t Val = MCE->getValue();
730    if (Val & 0x3)
731      return false;
732    return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
733  }
734
735  bool
736  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
737    if (!isImm())
738      return false;
739
740    AArch64MCExpr::VariantKind ELFRefKind;
741    MCSymbolRefExpr::VariantKind DarwinRefKind;
742    int64_t Addend;
743    if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
744                                             DarwinRefKind, Addend)) {
745      return false;
746    }
747    if (DarwinRefKind != MCSymbolRefExpr::VK_None)
748      return false;
749
750    for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
751      if (ELFRefKind == AllowedModifiers[i])
752        return Addend == 0;
753    }
754
755    return false;
756  }
757
758  bool isMovZSymbolG3() const {
759    static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
760    return isMovWSymbol(Variants);
761  }
762
763  bool isMovZSymbolG2() const {
764    static AArch64MCExpr::VariantKind Variants[] = {
765        AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
766        AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
767    return isMovWSymbol(Variants);
768  }
769
770  bool isMovZSymbolG1() const {
771    static AArch64MCExpr::VariantKind Variants[] = {
772        AArch64MCExpr::VK_ABS_G1,      AArch64MCExpr::VK_ABS_G1_S,
773        AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
774        AArch64MCExpr::VK_DTPREL_G1,
775    };
776    return isMovWSymbol(Variants);
777  }
778
779  bool isMovZSymbolG0() const {
780    static AArch64MCExpr::VariantKind Variants[] = {
781        AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
782        AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
783    return isMovWSymbol(Variants);
784  }
785
786  bool isMovKSymbolG3() const {
787    static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
788    return isMovWSymbol(Variants);
789  }
790
791  bool isMovKSymbolG2() const {
792    static AArch64MCExpr::VariantKind Variants[] = {
793        AArch64MCExpr::VK_ABS_G2_NC};
794    return isMovWSymbol(Variants);
795  }
796
797  bool isMovKSymbolG1() const {
798    static AArch64MCExpr::VariantKind Variants[] = {
799      AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
800      AArch64MCExpr::VK_DTPREL_G1_NC
801    };
802    return isMovWSymbol(Variants);
803  }
804
805  bool isMovKSymbolG0() const {
806    static AArch64MCExpr::VariantKind Variants[] = {
807      AArch64MCExpr::VK_ABS_G0_NC,   AArch64MCExpr::VK_GOTTPREL_G0_NC,
808      AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
809    };
810    return isMovWSymbol(Variants);
811  }
812
813  template<int RegWidth, int Shift>
814  bool isMOVZMovAlias() const {
815    if (!isImm()) return false;
816
817    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
818    if (!CE) return false;
819    uint64_t Value = CE->getValue();
820
821    if (RegWidth == 32)
822      Value &= 0xffffffffULL;
823
824    // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
825    if (Value == 0 && Shift != 0)
826      return false;
827
828    return (Value & ~(0xffffULL << Shift)) == 0;
829  }
830
831  template<int RegWidth, int Shift>
832  bool isMOVNMovAlias() const {
833    if (!isImm()) return false;
834
835    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
836    if (!CE) return false;
837    uint64_t Value = CE->getValue();
838
839    // MOVZ takes precedence over MOVN.
840    for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
841      if ((Value & ~(0xffffULL << MOVZShift)) == 0)
842        return false;
843
844    Value = ~Value;
845    if (RegWidth == 32)
846      Value &= 0xffffffffULL;
847
848    return (Value & ~(0xffffULL << Shift)) == 0;
849  }
850
851  bool isFPImm() const { return Kind == k_FPImm; }
852  bool isBarrier() const { return Kind == k_Barrier; }
853  bool isSysReg() const { return Kind == k_SysReg; }
854  bool isMRSSystemRegister() const {
855    if (!isSysReg()) return false;
856
857    bool IsKnownRegister;
858    auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
859    Mapper.fromString(getSysReg(), IsKnownRegister);
860
861    return IsKnownRegister;
862  }
863  bool isMSRSystemRegister() const {
864    if (!isSysReg()) return false;
865
866    bool IsKnownRegister;
867    auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
868    Mapper.fromString(getSysReg(), IsKnownRegister);
869
870    return IsKnownRegister;
871  }
872  bool isSystemPStateField() const {
873    if (!isSysReg()) return false;
874
875    bool IsKnownRegister;
876    AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
877
878    return IsKnownRegister;
879  }
880  bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
881  bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
882  bool isVectorRegLo() const {
883    return Kind == k_Register && Reg.isVector &&
884           AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
885               Reg.RegNum);
886  }
887  bool isGPR32as64() const {
888    return Kind == k_Register && !Reg.isVector &&
889      AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
890  }
891
892  bool isGPR64sp0() const {
893    return Kind == k_Register && !Reg.isVector &&
894      AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
895  }
896
897  /// Is this a vector list with the type implicit (presumably attached to the
898  /// instruction itself)?
899  template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
900    return Kind == k_VectorList && VectorList.Count == NumRegs &&
901           !VectorList.ElementKind;
902  }
903
904  template <unsigned NumRegs, unsigned NumElements, char ElementKind>
905  bool isTypedVectorList() const {
906    if (Kind != k_VectorList)
907      return false;
908    if (VectorList.Count != NumRegs)
909      return false;
910    if (VectorList.ElementKind != ElementKind)
911      return false;
912    return VectorList.NumElements == NumElements;
913  }
914
915  bool isVectorIndex1() const {
916    return Kind == k_VectorIndex && VectorIndex.Val == 1;
917  }
918  bool isVectorIndexB() const {
919    return Kind == k_VectorIndex && VectorIndex.Val < 16;
920  }
921  bool isVectorIndexH() const {
922    return Kind == k_VectorIndex && VectorIndex.Val < 8;
923  }
924  bool isVectorIndexS() const {
925    return Kind == k_VectorIndex && VectorIndex.Val < 4;
926  }
927  bool isVectorIndexD() const {
928    return Kind == k_VectorIndex && VectorIndex.Val < 2;
929  }
930  bool isToken() const override { return Kind == k_Token; }
931  bool isTokenEqual(StringRef Str) const {
932    return Kind == k_Token && getToken() == Str;
933  }
934  bool isSysCR() const { return Kind == k_SysCR; }
935  bool isPrefetch() const { return Kind == k_Prefetch; }
936  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
937  bool isShifter() const {
938    if (!isShiftExtend())
939      return false;
940
941    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
942    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
943            ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
944            ST == AArch64_AM::MSL);
945  }
946  bool isExtend() const {
947    if (!isShiftExtend())
948      return false;
949
950    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
951    return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
952            ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
953            ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
954            ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
955            ET == AArch64_AM::LSL) &&
956           getShiftExtendAmount() <= 4;
957  }
958
959  bool isExtend64() const {
960    if (!isExtend())
961      return false;
962    // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
963    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
964    return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
965  }
966  bool isExtendLSL64() const {
967    if (!isExtend())
968      return false;
969    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
970    return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
971            ET == AArch64_AM::LSL) &&
972           getShiftExtendAmount() <= 4;
973  }
974
975  template<int Width> bool isMemXExtend() const {
976    if (!isExtend())
977      return false;
978    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
979    return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
980           (getShiftExtendAmount() == Log2_32(Width / 8) ||
981            getShiftExtendAmount() == 0);
982  }
983
984  template<int Width> bool isMemWExtend() const {
985    if (!isExtend())
986      return false;
987    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
988    return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
989           (getShiftExtendAmount() == Log2_32(Width / 8) ||
990            getShiftExtendAmount() == 0);
991  }
992
993  template <unsigned width>
994  bool isArithmeticShifter() const {
995    if (!isShifter())
996      return false;
997
998    // An arithmetic shifter is LSL, LSR, or ASR.
999    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1000    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1001            ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1002  }
1003
1004  template <unsigned width>
1005  bool isLogicalShifter() const {
1006    if (!isShifter())
1007      return false;
1008
1009    // A logical shifter is LSL, LSR, ASR or ROR.
1010    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1011    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1012            ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1013           getShiftExtendAmount() < width;
1014  }
1015
1016  bool isMovImm32Shifter() const {
1017    if (!isShifter())
1018      return false;
1019
1020    // A MOVi shifter is LSL of 0, 16, 32, or 48.
1021    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1022    if (ST != AArch64_AM::LSL)
1023      return false;
1024    uint64_t Val = getShiftExtendAmount();
1025    return (Val == 0 || Val == 16);
1026  }
1027
1028  bool isMovImm64Shifter() const {
1029    if (!isShifter())
1030      return false;
1031
1032    // A MOVi shifter is LSL of 0 or 16.
1033    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1034    if (ST != AArch64_AM::LSL)
1035      return false;
1036    uint64_t Val = getShiftExtendAmount();
1037    return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1038  }
1039
1040  bool isLogicalVecShifter() const {
1041    if (!isShifter())
1042      return false;
1043
1044    // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1045    unsigned Shift = getShiftExtendAmount();
1046    return getShiftExtendType() == AArch64_AM::LSL &&
1047           (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1048  }
1049
1050  bool isLogicalVecHalfWordShifter() const {
1051    if (!isLogicalVecShifter())
1052      return false;
1053
1054    // A logical vector shifter is a left shift by 0 or 8.
1055    unsigned Shift = getShiftExtendAmount();
1056    return getShiftExtendType() == AArch64_AM::LSL &&
1057           (Shift == 0 || Shift == 8);
1058  }
1059
1060  bool isMoveVecShifter() const {
1061    if (!isShiftExtend())
1062      return false;
1063
1064    // A logical vector shifter is a left shift by 8 or 16.
1065    unsigned Shift = getShiftExtendAmount();
1066    return getShiftExtendType() == AArch64_AM::MSL &&
1067           (Shift == 8 || Shift == 16);
1068  }
1069
1070  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1071  // to LDUR/STUR when the offset is not legal for the former but is for
1072  // the latter. As such, in addition to checking for being a legal unscaled
1073  // address, also check that it is not a legal scaled address. This avoids
1074  // ambiguity in the matcher.
1075  template<int Width>
1076  bool isSImm9OffsetFB() const {
1077    return isSImm9() && !isUImm12Offset<Width / 8>();
1078  }
1079
1080  bool isAdrpLabel() const {
1081    // Validation was handled during parsing, so we just sanity check that
1082    // something didn't go haywire.
1083    if (!isImm())
1084        return false;
1085
1086    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1087      int64_t Val = CE->getValue();
1088      int64_t Min = - (4096 * (1LL << (21 - 1)));
1089      int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1090      return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1091    }
1092
1093    return true;
1094  }
1095
1096  bool isAdrLabel() const {
1097    // Validation was handled during parsing, so we just sanity check that
1098    // something didn't go haywire.
1099    if (!isImm())
1100        return false;
1101
1102    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1103      int64_t Val = CE->getValue();
1104      int64_t Min = - (1LL << (21 - 1));
1105      int64_t Max = ((1LL << (21 - 1)) - 1);
1106      return Val >= Min && Val <= Max;
1107    }
1108
1109    return true;
1110  }
1111
1112  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1113    // Add as immediates when possible.  Null MCExpr = 0.
1114    if (!Expr)
1115      Inst.addOperand(MCOperand::CreateImm(0));
1116    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1117      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1118    else
1119      Inst.addOperand(MCOperand::CreateExpr(Expr));
1120  }
1121
1122  void addRegOperands(MCInst &Inst, unsigned N) const {
1123    assert(N == 1 && "Invalid number of operands!");
1124    Inst.addOperand(MCOperand::CreateReg(getReg()));
1125  }
1126
1127  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1128    assert(N == 1 && "Invalid number of operands!");
1129    assert(
1130        AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1131
1132    const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1133    uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1134        RI->getEncodingValue(getReg()));
1135
1136    Inst.addOperand(MCOperand::CreateReg(Reg));
1137  }
1138
1139  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1140    assert(N == 1 && "Invalid number of operands!");
1141    assert(
1142        AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1143    Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1144  }
1145
1146  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1147    assert(N == 1 && "Invalid number of operands!");
1148    assert(
1149        AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1150    Inst.addOperand(MCOperand::CreateReg(getReg()));
1151  }
1152
1153  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1154    assert(N == 1 && "Invalid number of operands!");
1155    Inst.addOperand(MCOperand::CreateReg(getReg()));
1156  }
1157
1158  template <unsigned NumRegs>
1159  void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1160    assert(N == 1 && "Invalid number of operands!");
1161    static unsigned FirstRegs[] = { AArch64::D0,       AArch64::D0_D1,
1162                                    AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1163    unsigned FirstReg = FirstRegs[NumRegs - 1];
1164
1165    Inst.addOperand(
1166        MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1167  }
1168
1169  template <unsigned NumRegs>
1170  void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1171    assert(N == 1 && "Invalid number of operands!");
1172    static unsigned FirstRegs[] = { AArch64::Q0,       AArch64::Q0_Q1,
1173                                    AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1174    unsigned FirstReg = FirstRegs[NumRegs - 1];
1175
1176    Inst.addOperand(
1177        MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1178  }
1179
1180  void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1181    assert(N == 1 && "Invalid number of operands!");
1182    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1183  }
1184
1185  void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1186    assert(N == 1 && "Invalid number of operands!");
1187    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1188  }
1189
1190  void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1191    assert(N == 1 && "Invalid number of operands!");
1192    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1193  }
1194
1195  void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1196    assert(N == 1 && "Invalid number of operands!");
1197    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1198  }
1199
1200  void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1201    assert(N == 1 && "Invalid number of operands!");
1202    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1203  }
1204
1205  void addImmOperands(MCInst &Inst, unsigned N) const {
1206    assert(N == 1 && "Invalid number of operands!");
1207    // If this is a pageoff symrefexpr with an addend, adjust the addend
1208    // to be only the page-offset portion. Otherwise, just add the expr
1209    // as-is.
1210    addExpr(Inst, getImm());
1211  }
1212
1213  void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1214    assert(N == 2 && "Invalid number of operands!");
1215    if (isShiftedImm()) {
1216      addExpr(Inst, getShiftedImmVal());
1217      Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1218    } else {
1219      addExpr(Inst, getImm());
1220      Inst.addOperand(MCOperand::CreateImm(0));
1221    }
1222  }
1223
1224  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1225    assert(N == 1 && "Invalid number of operands!");
1226    Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1227  }
1228
1229  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1230    assert(N == 1 && "Invalid number of operands!");
1231    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1232    if (!MCE)
1233      addExpr(Inst, getImm());
1234    else
1235      Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1236  }
1237
1238  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1239    addImmOperands(Inst, N);
1240  }
1241
1242  template<int Scale>
1243  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1244    assert(N == 1 && "Invalid number of operands!");
1245    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1246
1247    if (!MCE) {
1248      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1249      return;
1250    }
1251    Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1252  }
1253
1254  void addSImm9Operands(MCInst &Inst, unsigned N) const {
1255    assert(N == 1 && "Invalid number of operands!");
1256    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1257    assert(MCE && "Invalid constant immediate operand!");
1258    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1259  }
1260
1261  void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1262    assert(N == 1 && "Invalid number of operands!");
1263    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1264    assert(MCE && "Invalid constant immediate operand!");
1265    Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1266  }
1267
1268  void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1269    assert(N == 1 && "Invalid number of operands!");
1270    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1271    assert(MCE && "Invalid constant immediate operand!");
1272    Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1273  }
1274
1275  void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1276    assert(N == 1 && "Invalid number of operands!");
1277    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1278    assert(MCE && "Invalid constant immediate operand!");
1279    Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1280  }
1281
1282  void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1283    assert(N == 1 && "Invalid number of operands!");
1284    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1285    assert(MCE && "Invalid constant immediate operand!");
1286    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1287  }
1288
1289  void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1290    assert(N == 1 && "Invalid number of operands!");
1291    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1292    assert(MCE && "Invalid constant immediate operand!");
1293    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1294  }
1295
1296  void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1297    assert(N == 1 && "Invalid number of operands!");
1298    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1299    assert(MCE && "Invalid constant immediate operand!");
1300    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1301  }
1302
1303  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1304    assert(N == 1 && "Invalid number of operands!");
1305    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1306    assert(MCE && "Invalid constant immediate operand!");
1307    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1308  }
1309
1310  void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1311    assert(N == 1 && "Invalid number of operands!");
1312    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1313    assert(MCE && "Invalid constant immediate operand!");
1314    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1315  }
1316
1317  void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1318    assert(N == 1 && "Invalid number of operands!");
1319    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1320    assert(MCE && "Invalid constant immediate operand!");
1321    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1322  }
1323
1324  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1325    assert(N == 1 && "Invalid number of operands!");
1326    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1327    assert(MCE && "Invalid constant immediate operand!");
1328    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1329  }
1330
1331  void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1332    assert(N == 1 && "Invalid number of operands!");
1333    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1334    assert(MCE && "Invalid constant immediate operand!");
1335    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1336  }
1337
1338  void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1339    assert(N == 1 && "Invalid number of operands!");
1340    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1341    assert(MCE && "Invalid constant immediate operand!");
1342    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1343  }
1344
1345  void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1346    assert(N == 1 && "Invalid number of operands!");
1347    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1348    assert(MCE && "Invalid constant immediate operand!");
1349    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1350  }
1351
1352  void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1353    assert(N == 1 && "Invalid number of operands!");
1354    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1355    assert(MCE && "Invalid constant immediate operand!");
1356    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1357  }
1358
1359  void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1360    assert(N == 1 && "Invalid number of operands!");
1361    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1362    assert(MCE && "Invalid constant immediate operand!");
1363    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1364  }
1365
1366  void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1367    assert(N == 1 && "Invalid number of operands!");
1368    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1369    assert(MCE && "Invalid constant immediate operand!");
1370    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1371  }
1372
1373  void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1374    assert(N == 1 && "Invalid number of operands!");
1375    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1376    assert(MCE && "Invalid constant immediate operand!");
1377    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1378  }
1379
1380  void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1381    assert(N == 1 && "Invalid number of operands!");
1382    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1383    assert(MCE && "Invalid logical immediate operand!");
1384    uint64_t encoding =
1385        AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1386    Inst.addOperand(MCOperand::CreateImm(encoding));
1387  }
1388
1389  void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1390    assert(N == 1 && "Invalid number of operands!");
1391    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1392    assert(MCE && "Invalid logical immediate operand!");
1393    uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1394    Inst.addOperand(MCOperand::CreateImm(encoding));
1395  }
1396
1397  void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1398    assert(N == 1 && "Invalid number of operands!");
1399    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1400    int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1401    uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1402    Inst.addOperand(MCOperand::CreateImm(encoding));
1403  }
1404
1405  void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1406    assert(N == 1 && "Invalid number of operands!");
1407    const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1408    uint64_t encoding =
1409        AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1410    Inst.addOperand(MCOperand::CreateImm(encoding));
1411  }
1412
1413  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1414    assert(N == 1 && "Invalid number of operands!");
1415    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1416    assert(MCE && "Invalid immediate operand!");
1417    uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1418    Inst.addOperand(MCOperand::CreateImm(encoding));
1419  }
1420
1421  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1422    // Branch operands don't encode the low bits, so shift them off
1423    // here. If it's a label, however, just put it on directly as there's
1424    // not enough information now to do anything.
1425    assert(N == 1 && "Invalid number of operands!");
1426    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1427    if (!MCE) {
1428      addExpr(Inst, getImm());
1429      return;
1430    }
1431    assert(MCE && "Invalid constant immediate operand!");
1432    Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1433  }
1434
1435  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1436    // Branch operands don't encode the low bits, so shift them off
1437    // here. If it's a label, however, just put it on directly as there's
1438    // not enough information now to do anything.
1439    assert(N == 1 && "Invalid number of operands!");
1440    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1441    if (!MCE) {
1442      addExpr(Inst, getImm());
1443      return;
1444    }
1445    assert(MCE && "Invalid constant immediate operand!");
1446    Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1447  }
1448
1449  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1450    // Branch operands don't encode the low bits, so shift them off
1451    // here. If it's a label, however, just put it on directly as there's
1452    // not enough information now to do anything.
1453    assert(N == 1 && "Invalid number of operands!");
1454    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1455    if (!MCE) {
1456      addExpr(Inst, getImm());
1457      return;
1458    }
1459    assert(MCE && "Invalid constant immediate operand!");
1460    Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1461  }
1462
1463  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1464    assert(N == 1 && "Invalid number of operands!");
1465    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1466  }
1467
1468  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1469    assert(N == 1 && "Invalid number of operands!");
1470    Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1471  }
1472
1473  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1474    assert(N == 1 && "Invalid number of operands!");
1475
1476    bool Valid;
1477    auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
1478    uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1479
1480    Inst.addOperand(MCOperand::CreateImm(Bits));
1481  }
1482
1483  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1484    assert(N == 1 && "Invalid number of operands!");
1485
1486    bool Valid;
1487    auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
1488    uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1489
1490    Inst.addOperand(MCOperand::CreateImm(Bits));
1491  }
1492
1493  void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1494    assert(N == 1 && "Invalid number of operands!");
1495
1496    bool Valid;
1497    uint32_t Bits =
1498        AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
1499
1500    Inst.addOperand(MCOperand::CreateImm(Bits));
1501  }
1502
1503  void addSysCROperands(MCInst &Inst, unsigned N) const {
1504    assert(N == 1 && "Invalid number of operands!");
1505    Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1506  }
1507
1508  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1509    assert(N == 1 && "Invalid number of operands!");
1510    Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1511  }
1512
1513  void addShifterOperands(MCInst &Inst, unsigned N) const {
1514    assert(N == 1 && "Invalid number of operands!");
1515    unsigned Imm =
1516        AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1517    Inst.addOperand(MCOperand::CreateImm(Imm));
1518  }
1519
1520  void addExtendOperands(MCInst &Inst, unsigned N) const {
1521    assert(N == 1 && "Invalid number of operands!");
1522    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1523    if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1524    unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1525    Inst.addOperand(MCOperand::CreateImm(Imm));
1526  }
1527
1528  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1529    assert(N == 1 && "Invalid number of operands!");
1530    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1531    if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1532    unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1533    Inst.addOperand(MCOperand::CreateImm(Imm));
1534  }
1535
1536  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1537    assert(N == 2 && "Invalid number of operands!");
1538    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1539    bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1540    Inst.addOperand(MCOperand::CreateImm(IsSigned));
1541    Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1542  }
1543
1544  // For 8-bit load/store instructions with a register offset, both the
1545  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1546  // they're disambiguated by whether the shift was explicit or implicit rather
1547  // than its size.
1548  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1549    assert(N == 2 && "Invalid number of operands!");
1550    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1551    bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1552    Inst.addOperand(MCOperand::CreateImm(IsSigned));
1553    Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1554  }
1555
1556  template<int Shift>
1557  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1558    assert(N == 1 && "Invalid number of operands!");
1559
1560    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1561    uint64_t Value = CE->getValue();
1562    Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1563  }
1564
1565  template<int Shift>
1566  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1567    assert(N == 1 && "Invalid number of operands!");
1568
1569    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1570    uint64_t Value = CE->getValue();
1571    Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1572  }
1573
1574  void print(raw_ostream &OS) const override;
1575
1576  static std::unique_ptr<AArch64Operand>
1577  CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1578    auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1579    Op->Tok.Data = Str.data();
1580    Op->Tok.Length = Str.size();
1581    Op->Tok.IsSuffix = IsSuffix;
1582    Op->StartLoc = S;
1583    Op->EndLoc = S;
1584    return Op;
1585  }
1586
1587  static std::unique_ptr<AArch64Operand>
1588  CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1589    auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1590    Op->Reg.RegNum = RegNum;
1591    Op->Reg.isVector = isVector;
1592    Op->StartLoc = S;
1593    Op->EndLoc = E;
1594    return Op;
1595  }
1596
1597  static std::unique_ptr<AArch64Operand>
1598  CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1599                   char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1600    auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1601    Op->VectorList.RegNum = RegNum;
1602    Op->VectorList.Count = Count;
1603    Op->VectorList.NumElements = NumElements;
1604    Op->VectorList.ElementKind = ElementKind;
1605    Op->StartLoc = S;
1606    Op->EndLoc = E;
1607    return Op;
1608  }
1609
1610  static std::unique_ptr<AArch64Operand>
1611  CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1612    auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1613    Op->VectorIndex.Val = Idx;
1614    Op->StartLoc = S;
1615    Op->EndLoc = E;
1616    return Op;
1617  }
1618
1619  static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1620                                                   SMLoc E, MCContext &Ctx) {
1621    auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1622    Op->Imm.Val = Val;
1623    Op->StartLoc = S;
1624    Op->EndLoc = E;
1625    return Op;
1626  }
1627
1628  static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1629                                                          unsigned ShiftAmount,
1630                                                          SMLoc S, SMLoc E,
1631                                                          MCContext &Ctx) {
1632    auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1633    Op->ShiftedImm .Val = Val;
1634    Op->ShiftedImm.ShiftAmount = ShiftAmount;
1635    Op->StartLoc = S;
1636    Op->EndLoc = E;
1637    return Op;
1638  }
1639
1640  static std::unique_ptr<AArch64Operand>
1641  CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1642    auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1643    Op->CondCode.Code = Code;
1644    Op->StartLoc = S;
1645    Op->EndLoc = E;
1646    return Op;
1647  }
1648
1649  static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1650                                                     MCContext &Ctx) {
1651    auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1652    Op->FPImm.Val = Val;
1653    Op->StartLoc = S;
1654    Op->EndLoc = S;
1655    return Op;
1656  }
1657
1658  static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
1659                                                       MCContext &Ctx) {
1660    auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1661    Op->Barrier.Val = Val;
1662    Op->StartLoc = S;
1663    Op->EndLoc = S;
1664    return Op;
1665  }
1666
1667  static std::unique_ptr<AArch64Operand>
1668  CreateSysReg(StringRef Str, SMLoc S, uint64_t FeatureBits, MCContext &Ctx) {
1669    auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1670    Op->SysReg.Data = Str.data();
1671    Op->SysReg.Length = Str.size();
1672    Op->SysReg.FeatureBits = FeatureBits;
1673    Op->StartLoc = S;
1674    Op->EndLoc = S;
1675    return Op;
1676  }
1677
1678  static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1679                                                     SMLoc E, MCContext &Ctx) {
1680    auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1681    Op->SysCRImm.Val = Val;
1682    Op->StartLoc = S;
1683    Op->EndLoc = E;
1684    return Op;
1685  }
1686
1687  static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
1688                                                        MCContext &Ctx) {
1689    auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1690    Op->Prefetch.Val = Val;
1691    Op->StartLoc = S;
1692    Op->EndLoc = S;
1693    return Op;
1694  }
1695
1696  static std::unique_ptr<AArch64Operand>
1697  CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1698                    bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1699    auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1700    Op->ShiftExtend.Type = ShOp;
1701    Op->ShiftExtend.Amount = Val;
1702    Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1703    Op->StartLoc = S;
1704    Op->EndLoc = E;
1705    return Op;
1706  }
1707};
1708
1709} // end anonymous namespace.
1710
1711void AArch64Operand::print(raw_ostream &OS) const {
1712  switch (Kind) {
1713  case k_FPImm:
1714    OS << "<fpimm " << getFPImm() << "("
1715       << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1716    break;
1717  case k_Barrier: {
1718    bool Valid;
1719    StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1720    if (Valid)
1721      OS << "<barrier " << Name << ">";
1722    else
1723      OS << "<barrier invalid #" << getBarrier() << ">";
1724    break;
1725  }
1726  case k_Immediate:
1727    getImm()->print(OS);
1728    break;
1729  case k_ShiftedImm: {
1730    unsigned Shift = getShiftedImmShift();
1731    OS << "<shiftedimm ";
1732    getShiftedImmVal()->print(OS);
1733    OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1734    break;
1735  }
1736  case k_CondCode:
1737    OS << "<condcode " << getCondCode() << ">";
1738    break;
1739  case k_Register:
1740    OS << "<register " << getReg() << ">";
1741    break;
1742  case k_VectorList: {
1743    OS << "<vectorlist ";
1744    unsigned Reg = getVectorListStart();
1745    for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1746      OS << Reg + i << " ";
1747    OS << ">";
1748    break;
1749  }
1750  case k_VectorIndex:
1751    OS << "<vectorindex " << getVectorIndex() << ">";
1752    break;
1753  case k_SysReg:
1754    OS << "<sysreg: " << getSysReg() << '>';
1755    break;
1756  case k_Token:
1757    OS << "'" << getToken() << "'";
1758    break;
1759  case k_SysCR:
1760    OS << "c" << getSysCR();
1761    break;
1762  case k_Prefetch: {
1763    bool Valid;
1764    StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1765    if (Valid)
1766      OS << "<prfop " << Name << ">";
1767    else
1768      OS << "<prfop invalid #" << getPrefetch() << ">";
1769    break;
1770  }
1771  case k_ShiftExtend: {
1772    OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1773       << getShiftExtendAmount();
1774    if (!hasShiftExtendAmount())
1775      OS << "<imp>";
1776    OS << '>';
1777    break;
1778  }
1779  }
1780}
1781
1782/// @name Auto-generated Match Functions
1783/// {
1784
1785static unsigned MatchRegisterName(StringRef Name);
1786
1787/// }
1788
1789static unsigned matchVectorRegName(StringRef Name) {
1790  return StringSwitch<unsigned>(Name)
1791      .Case("v0", AArch64::Q0)
1792      .Case("v1", AArch64::Q1)
1793      .Case("v2", AArch64::Q2)
1794      .Case("v3", AArch64::Q3)
1795      .Case("v4", AArch64::Q4)
1796      .Case("v5", AArch64::Q5)
1797      .Case("v6", AArch64::Q6)
1798      .Case("v7", AArch64::Q7)
1799      .Case("v8", AArch64::Q8)
1800      .Case("v9", AArch64::Q9)
1801      .Case("v10", AArch64::Q10)
1802      .Case("v11", AArch64::Q11)
1803      .Case("v12", AArch64::Q12)
1804      .Case("v13", AArch64::Q13)
1805      .Case("v14", AArch64::Q14)
1806      .Case("v15", AArch64::Q15)
1807      .Case("v16", AArch64::Q16)
1808      .Case("v17", AArch64::Q17)
1809      .Case("v18", AArch64::Q18)
1810      .Case("v19", AArch64::Q19)
1811      .Case("v20", AArch64::Q20)
1812      .Case("v21", AArch64::Q21)
1813      .Case("v22", AArch64::Q22)
1814      .Case("v23", AArch64::Q23)
1815      .Case("v24", AArch64::Q24)
1816      .Case("v25", AArch64::Q25)
1817      .Case("v26", AArch64::Q26)
1818      .Case("v27", AArch64::Q27)
1819      .Case("v28", AArch64::Q28)
1820      .Case("v29", AArch64::Q29)
1821      .Case("v30", AArch64::Q30)
1822      .Case("v31", AArch64::Q31)
1823      .Default(0);
1824}
1825
1826static bool isValidVectorKind(StringRef Name) {
1827  return StringSwitch<bool>(Name.lower())
1828      .Case(".8b", true)
1829      .Case(".16b", true)
1830      .Case(".4h", true)
1831      .Case(".8h", true)
1832      .Case(".2s", true)
1833      .Case(".4s", true)
1834      .Case(".1d", true)
1835      .Case(".2d", true)
1836      .Case(".1q", true)
1837      // Accept the width neutral ones, too, for verbose syntax. If those
1838      // aren't used in the right places, the token operand won't match so
1839      // all will work out.
1840      .Case(".b", true)
1841      .Case(".h", true)
1842      .Case(".s", true)
1843      .Case(".d", true)
1844      .Default(false);
1845}
1846
1847static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1848                                 char &ElementKind) {
1849  assert(isValidVectorKind(Name));
1850
1851  ElementKind = Name.lower()[Name.size() - 1];
1852  NumElements = 0;
1853
1854  if (Name.size() == 2)
1855    return;
1856
1857  // Parse the lane count
1858  Name = Name.drop_front();
1859  while (isdigit(Name.front())) {
1860    NumElements = 10 * NumElements + (Name.front() - '0');
1861    Name = Name.drop_front();
1862  }
1863}
1864
1865bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1866                                     SMLoc &EndLoc) {
1867  StartLoc = getLoc();
1868  RegNo = tryParseRegister();
1869  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1870  return (RegNo == (unsigned)-1);
1871}
1872
1873// Matches a register name or register alias previously defined by '.req'
1874unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1875                                                  bool isVector) {
1876  unsigned RegNum = isVector ? matchVectorRegName(Name)
1877                             : MatchRegisterName(Name);
1878
1879  if (RegNum == 0) {
1880    // Check for aliases registered via .req. Canonicalize to lower case.
1881    // That's more consistent since register names are case insensitive, and
1882    // it's how the original entry was passed in from MC/MCParser/AsmParser.
1883    auto Entry = RegisterReqs.find(Name.lower());
1884    if (Entry == RegisterReqs.end())
1885      return 0;
1886    // set RegNum if the match is the right kind of register
1887    if (isVector == Entry->getValue().first)
1888      RegNum = Entry->getValue().second;
1889  }
1890  return RegNum;
1891}
1892
1893/// tryParseRegister - Try to parse a register name. The token must be an
1894/// Identifier when called, and if it is a register name the token is eaten and
1895/// the register is added to the operand list.
1896int AArch64AsmParser::tryParseRegister() {
1897  const AsmToken &Tok = Parser.getTok();
1898  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1899
1900  std::string lowerCase = Tok.getString().lower();
1901  unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1902  // Also handle a few aliases of registers.
1903  if (RegNum == 0)
1904    RegNum = StringSwitch<unsigned>(lowerCase)
1905                 .Case("fp",  AArch64::FP)
1906                 .Case("lr",  AArch64::LR)
1907                 .Case("x31", AArch64::XZR)
1908                 .Case("w31", AArch64::WZR)
1909                 .Default(0);
1910
1911  if (RegNum == 0)
1912    return -1;
1913
1914  Parser.Lex(); // Eat identifier token.
1915  return RegNum;
1916}
1917
1918/// tryMatchVectorRegister - Try to parse a vector register name with optional
1919/// kind specifier. If it is a register specifier, eat the token and return it.
1920int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1921  if (Parser.getTok().isNot(AsmToken::Identifier)) {
1922    TokError("vector register expected");
1923    return -1;
1924  }
1925
1926  StringRef Name = Parser.getTok().getString();
1927  // If there is a kind specifier, it's separated from the register name by
1928  // a '.'.
1929  size_t Start = 0, Next = Name.find('.');
1930  StringRef Head = Name.slice(Start, Next);
1931  unsigned RegNum = matchRegisterNameAlias(Head, true);
1932
1933  if (RegNum) {
1934    if (Next != StringRef::npos) {
1935      Kind = Name.slice(Next, StringRef::npos);
1936      if (!isValidVectorKind(Kind)) {
1937        TokError("invalid vector kind qualifier");
1938        return -1;
1939      }
1940    }
1941    Parser.Lex(); // Eat the register token.
1942    return RegNum;
1943  }
1944
1945  if (expected)
1946    TokError("vector register expected");
1947  return -1;
1948}
1949
1950/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1951AArch64AsmParser::OperandMatchResultTy
1952AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1953  SMLoc S = getLoc();
1954
1955  if (Parser.getTok().isNot(AsmToken::Identifier)) {
1956    Error(S, "Expected cN operand where 0 <= N <= 15");
1957    return MatchOperand_ParseFail;
1958  }
1959
1960  StringRef Tok = Parser.getTok().getIdentifier();
1961  if (Tok[0] != 'c' && Tok[0] != 'C') {
1962    Error(S, "Expected cN operand where 0 <= N <= 15");
1963    return MatchOperand_ParseFail;
1964  }
1965
1966  uint32_t CRNum;
1967  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1968  if (BadNum || CRNum > 15) {
1969    Error(S, "Expected cN operand where 0 <= N <= 15");
1970    return MatchOperand_ParseFail;
1971  }
1972
1973  Parser.Lex(); // Eat identifier token.
1974  Operands.push_back(
1975      AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1976  return MatchOperand_Success;
1977}
1978
1979/// tryParsePrefetch - Try to parse a prefetch operand.
1980AArch64AsmParser::OperandMatchResultTy
1981AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1982  SMLoc S = getLoc();
1983  const AsmToken &Tok = Parser.getTok();
1984  // Either an identifier for named values or a 5-bit immediate.
1985  bool Hash = Tok.is(AsmToken::Hash);
1986  if (Hash || Tok.is(AsmToken::Integer)) {
1987    if (Hash)
1988      Parser.Lex(); // Eat hash token.
1989    const MCExpr *ImmVal;
1990    if (getParser().parseExpression(ImmVal))
1991      return MatchOperand_ParseFail;
1992
1993    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1994    if (!MCE) {
1995      TokError("immediate value expected for prefetch operand");
1996      return MatchOperand_ParseFail;
1997    }
1998    unsigned prfop = MCE->getValue();
1999    if (prfop > 31) {
2000      TokError("prefetch operand out of range, [0,31] expected");
2001      return MatchOperand_ParseFail;
2002    }
2003
2004    Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
2005    return MatchOperand_Success;
2006  }
2007
2008  if (Tok.isNot(AsmToken::Identifier)) {
2009    TokError("pre-fetch hint expected");
2010    return MatchOperand_ParseFail;
2011  }
2012
2013  bool Valid;
2014  unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
2015  if (!Valid) {
2016    TokError("pre-fetch hint expected");
2017    return MatchOperand_ParseFail;
2018  }
2019
2020  Parser.Lex(); // Eat identifier token.
2021  Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
2022  return MatchOperand_Success;
2023}
2024
2025/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2026/// instruction.
2027AArch64AsmParser::OperandMatchResultTy
2028AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2029  SMLoc S = getLoc();
2030  const MCExpr *Expr;
2031
2032  if (Parser.getTok().is(AsmToken::Hash)) {
2033    Parser.Lex(); // Eat hash token.
2034  }
2035
2036  if (parseSymbolicImmVal(Expr))
2037    return MatchOperand_ParseFail;
2038
2039  AArch64MCExpr::VariantKind ELFRefKind;
2040  MCSymbolRefExpr::VariantKind DarwinRefKind;
2041  int64_t Addend;
2042  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2043    if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2044        ELFRefKind == AArch64MCExpr::VK_INVALID) {
2045      // No modifier was specified at all; this is the syntax for an ELF basic
2046      // ADRP relocation (unfortunately).
2047      Expr =
2048          AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2049    } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2050                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2051               Addend != 0) {
2052      Error(S, "gotpage label reference not allowed an addend");
2053      return MatchOperand_ParseFail;
2054    } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2055               DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2056               DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2057               ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2058               ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2059               ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2060      // The operand must be an @page or @gotpage qualified symbolref.
2061      Error(S, "page or gotpage label reference expected");
2062      return MatchOperand_ParseFail;
2063    }
2064  }
2065
2066  // We have either a label reference possibly with addend or an immediate. The
2067  // addend is a raw value here. The linker will adjust it to only reference the
2068  // page.
2069  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2070  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2071
2072  return MatchOperand_Success;
2073}
2074
2075/// tryParseAdrLabel - Parse and validate a source label for the ADR
2076/// instruction.
2077AArch64AsmParser::OperandMatchResultTy
2078AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2079  SMLoc S = getLoc();
2080  const MCExpr *Expr;
2081
2082  if (Parser.getTok().is(AsmToken::Hash)) {
2083    Parser.Lex(); // Eat hash token.
2084  }
2085
2086  if (getParser().parseExpression(Expr))
2087    return MatchOperand_ParseFail;
2088
2089  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2090  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2091
2092  return MatchOperand_Success;
2093}
2094
2095/// tryParseFPImm - A floating point immediate expression operand.
2096AArch64AsmParser::OperandMatchResultTy
2097AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2098  SMLoc S = getLoc();
2099
2100  bool Hash = false;
2101  if (Parser.getTok().is(AsmToken::Hash)) {
2102    Parser.Lex(); // Eat '#'
2103    Hash = true;
2104  }
2105
2106  // Handle negation, as that still comes through as a separate token.
2107  bool isNegative = false;
2108  if (Parser.getTok().is(AsmToken::Minus)) {
2109    isNegative = true;
2110    Parser.Lex();
2111  }
2112  const AsmToken &Tok = Parser.getTok();
2113  if (Tok.is(AsmToken::Real)) {
2114    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2115    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2116    // If we had a '-' in front, toggle the sign bit.
2117    IntVal ^= (uint64_t)isNegative << 63;
2118    int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2119    Parser.Lex(); // Eat the token.
2120    // Check for out of range values. As an exception, we let Zero through,
2121    // as we handle that special case in post-processing before matching in
2122    // order to use the zero register for it.
2123    if (Val == -1 && !RealVal.isZero()) {
2124      TokError("expected compatible register or floating-point constant");
2125      return MatchOperand_ParseFail;
2126    }
2127    Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2128    return MatchOperand_Success;
2129  }
2130  if (Tok.is(AsmToken::Integer)) {
2131    int64_t Val;
2132    if (!isNegative && Tok.getString().startswith("0x")) {
2133      Val = Tok.getIntVal();
2134      if (Val > 255 || Val < 0) {
2135        TokError("encoded floating point value out of range");
2136        return MatchOperand_ParseFail;
2137      }
2138    } else {
2139      APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2140      uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2141      // If we had a '-' in front, toggle the sign bit.
2142      IntVal ^= (uint64_t)isNegative << 63;
2143      Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2144    }
2145    Parser.Lex(); // Eat the token.
2146    Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2147    return MatchOperand_Success;
2148  }
2149
2150  if (!Hash)
2151    return MatchOperand_NoMatch;
2152
2153  TokError("invalid floating point immediate");
2154  return MatchOperand_ParseFail;
2155}
2156
2157/// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2158AArch64AsmParser::OperandMatchResultTy
2159AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2160  SMLoc S = getLoc();
2161
2162  if (Parser.getTok().is(AsmToken::Hash))
2163    Parser.Lex(); // Eat '#'
2164  else if (Parser.getTok().isNot(AsmToken::Integer))
2165    // Operand should start from # or should be integer, emit error otherwise.
2166    return MatchOperand_NoMatch;
2167
2168  const MCExpr *Imm;
2169  if (parseSymbolicImmVal(Imm))
2170    return MatchOperand_ParseFail;
2171  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2172    uint64_t ShiftAmount = 0;
2173    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2174    if (MCE) {
2175      int64_t Val = MCE->getValue();
2176      if (Val > 0xfff && (Val & 0xfff) == 0) {
2177        Imm = MCConstantExpr::Create(Val >> 12, getContext());
2178        ShiftAmount = 12;
2179      }
2180    }
2181    SMLoc E = Parser.getTok().getLoc();
2182    Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2183                                                        getContext()));
2184    return MatchOperand_Success;
2185  }
2186
2187  // Eat ','
2188  Parser.Lex();
2189
2190  // The optional operand must be "lsl #N" where N is non-negative.
2191  if (!Parser.getTok().is(AsmToken::Identifier) ||
2192      !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2193    Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2194    return MatchOperand_ParseFail;
2195  }
2196
2197  // Eat 'lsl'
2198  Parser.Lex();
2199
2200  if (Parser.getTok().is(AsmToken::Hash)) {
2201    Parser.Lex();
2202  }
2203
2204  if (Parser.getTok().isNot(AsmToken::Integer)) {
2205    Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2206    return MatchOperand_ParseFail;
2207  }
2208
2209  int64_t ShiftAmount = Parser.getTok().getIntVal();
2210
2211  if (ShiftAmount < 0) {
2212    Error(Parser.getTok().getLoc(), "positive shift amount required");
2213    return MatchOperand_ParseFail;
2214  }
2215  Parser.Lex(); // Eat the number
2216
2217  SMLoc E = Parser.getTok().getLoc();
2218  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2219                                                      S, E, getContext()));
2220  return MatchOperand_Success;
2221}
2222
2223/// parseCondCodeString - Parse a Condition Code string.
2224AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2225  AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2226                    .Case("eq", AArch64CC::EQ)
2227                    .Case("ne", AArch64CC::NE)
2228                    .Case("cs", AArch64CC::HS)
2229                    .Case("hs", AArch64CC::HS)
2230                    .Case("cc", AArch64CC::LO)
2231                    .Case("lo", AArch64CC::LO)
2232                    .Case("mi", AArch64CC::MI)
2233                    .Case("pl", AArch64CC::PL)
2234                    .Case("vs", AArch64CC::VS)
2235                    .Case("vc", AArch64CC::VC)
2236                    .Case("hi", AArch64CC::HI)
2237                    .Case("ls", AArch64CC::LS)
2238                    .Case("ge", AArch64CC::GE)
2239                    .Case("lt", AArch64CC::LT)
2240                    .Case("gt", AArch64CC::GT)
2241                    .Case("le", AArch64CC::LE)
2242                    .Case("al", AArch64CC::AL)
2243                    .Case("nv", AArch64CC::NV)
2244                    .Default(AArch64CC::Invalid);
2245  return CC;
2246}
2247
2248/// parseCondCode - Parse a Condition Code operand.
2249bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2250                                     bool invertCondCode) {
2251  SMLoc S = getLoc();
2252  const AsmToken &Tok = Parser.getTok();
2253  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2254
2255  StringRef Cond = Tok.getString();
2256  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2257  if (CC == AArch64CC::Invalid)
2258    return TokError("invalid condition code");
2259  Parser.Lex(); // Eat identifier token.
2260
2261  if (invertCondCode) {
2262    if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2263      return TokError("condition codes AL and NV are invalid for this instruction");
2264    CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2265  }
2266
2267  Operands.push_back(
2268      AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2269  return false;
2270}
2271
2272/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2273/// them if present.
2274AArch64AsmParser::OperandMatchResultTy
2275AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2276  const AsmToken &Tok = Parser.getTok();
2277  std::string LowerID = Tok.getString().lower();
2278  AArch64_AM::ShiftExtendType ShOp =
2279      StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2280          .Case("lsl", AArch64_AM::LSL)
2281          .Case("lsr", AArch64_AM::LSR)
2282          .Case("asr", AArch64_AM::ASR)
2283          .Case("ror", AArch64_AM::ROR)
2284          .Case("msl", AArch64_AM::MSL)
2285          .Case("uxtb", AArch64_AM::UXTB)
2286          .Case("uxth", AArch64_AM::UXTH)
2287          .Case("uxtw", AArch64_AM::UXTW)
2288          .Case("uxtx", AArch64_AM::UXTX)
2289          .Case("sxtb", AArch64_AM::SXTB)
2290          .Case("sxth", AArch64_AM::SXTH)
2291          .Case("sxtw", AArch64_AM::SXTW)
2292          .Case("sxtx", AArch64_AM::SXTX)
2293          .Default(AArch64_AM::InvalidShiftExtend);
2294
2295  if (ShOp == AArch64_AM::InvalidShiftExtend)
2296    return MatchOperand_NoMatch;
2297
2298  SMLoc S = Tok.getLoc();
2299  Parser.Lex();
2300
2301  bool Hash = getLexer().is(AsmToken::Hash);
2302  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2303    if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2304        ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2305        ShOp == AArch64_AM::MSL) {
2306      // We expect a number here.
2307      TokError("expected #imm after shift specifier");
2308      return MatchOperand_ParseFail;
2309    }
2310
2311    // "extend" type operatoins don't need an immediate, #0 is implicit.
2312    SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2313    Operands.push_back(
2314        AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2315    return MatchOperand_Success;
2316  }
2317
2318  if (Hash)
2319    Parser.Lex(); // Eat the '#'.
2320
2321  // Make sure we do actually have a number
2322  if (!Parser.getTok().is(AsmToken::Integer)) {
2323    Error(Parser.getTok().getLoc(),
2324          "expected integer shift amount");
2325    return MatchOperand_ParseFail;
2326  }
2327
2328  const MCExpr *ImmVal;
2329  if (getParser().parseExpression(ImmVal))
2330    return MatchOperand_ParseFail;
2331
2332  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2333  if (!MCE) {
2334    TokError("expected #imm after shift specifier");
2335    return MatchOperand_ParseFail;
2336  }
2337
2338  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2339  Operands.push_back(AArch64Operand::CreateShiftExtend(
2340      ShOp, MCE->getValue(), true, S, E, getContext()));
2341  return MatchOperand_Success;
2342}
2343
2344/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2345/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2346bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2347                                   OperandVector &Operands) {
2348  if (Name.find('.') != StringRef::npos)
2349    return TokError("invalid operand");
2350
2351  Mnemonic = Name;
2352  Operands.push_back(
2353      AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2354
2355  const AsmToken &Tok = Parser.getTok();
2356  StringRef Op = Tok.getString();
2357  SMLoc S = Tok.getLoc();
2358
2359  const MCExpr *Expr = nullptr;
2360
2361#define SYS_ALIAS(op1, Cn, Cm, op2)                                            \
2362  do {                                                                         \
2363    Expr = MCConstantExpr::Create(op1, getContext());                          \
2364    Operands.push_back(                                                        \
2365        AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));           \
2366    Operands.push_back(                                                        \
2367        AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));           \
2368    Operands.push_back(                                                        \
2369        AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));           \
2370    Expr = MCConstantExpr::Create(op2, getContext());                          \
2371    Operands.push_back(                                                        \
2372        AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));           \
2373  } while (0)
2374
2375  if (Mnemonic == "ic") {
2376    if (!Op.compare_lower("ialluis")) {
2377      // SYS #0, C7, C1, #0
2378      SYS_ALIAS(0, 7, 1, 0);
2379    } else if (!Op.compare_lower("iallu")) {
2380      // SYS #0, C7, C5, #0
2381      SYS_ALIAS(0, 7, 5, 0);
2382    } else if (!Op.compare_lower("ivau")) {
2383      // SYS #3, C7, C5, #1
2384      SYS_ALIAS(3, 7, 5, 1);
2385    } else {
2386      return TokError("invalid operand for IC instruction");
2387    }
2388  } else if (Mnemonic == "dc") {
2389    if (!Op.compare_lower("zva")) {
2390      // SYS #3, C7, C4, #1
2391      SYS_ALIAS(3, 7, 4, 1);
2392    } else if (!Op.compare_lower("ivac")) {
2393      // SYS #3, C7, C6, #1
2394      SYS_ALIAS(0, 7, 6, 1);
2395    } else if (!Op.compare_lower("isw")) {
2396      // SYS #0, C7, C6, #2
2397      SYS_ALIAS(0, 7, 6, 2);
2398    } else if (!Op.compare_lower("cvac")) {
2399      // SYS #3, C7, C10, #1
2400      SYS_ALIAS(3, 7, 10, 1);
2401    } else if (!Op.compare_lower("csw")) {
2402      // SYS #0, C7, C10, #2
2403      SYS_ALIAS(0, 7, 10, 2);
2404    } else if (!Op.compare_lower("cvau")) {
2405      // SYS #3, C7, C11, #1
2406      SYS_ALIAS(3, 7, 11, 1);
2407    } else if (!Op.compare_lower("civac")) {
2408      // SYS #3, C7, C14, #1
2409      SYS_ALIAS(3, 7, 14, 1);
2410    } else if (!Op.compare_lower("cisw")) {
2411      // SYS #0, C7, C14, #2
2412      SYS_ALIAS(0, 7, 14, 2);
2413    } else {
2414      return TokError("invalid operand for DC instruction");
2415    }
2416  } else if (Mnemonic == "at") {
2417    if (!Op.compare_lower("s1e1r")) {
2418      // SYS #0, C7, C8, #0
2419      SYS_ALIAS(0, 7, 8, 0);
2420    } else if (!Op.compare_lower("s1e2r")) {
2421      // SYS #4, C7, C8, #0
2422      SYS_ALIAS(4, 7, 8, 0);
2423    } else if (!Op.compare_lower("s1e3r")) {
2424      // SYS #6, C7, C8, #0
2425      SYS_ALIAS(6, 7, 8, 0);
2426    } else if (!Op.compare_lower("s1e1w")) {
2427      // SYS #0, C7, C8, #1
2428      SYS_ALIAS(0, 7, 8, 1);
2429    } else if (!Op.compare_lower("s1e2w")) {
2430      // SYS #4, C7, C8, #1
2431      SYS_ALIAS(4, 7, 8, 1);
2432    } else if (!Op.compare_lower("s1e3w")) {
2433      // SYS #6, C7, C8, #1
2434      SYS_ALIAS(6, 7, 8, 1);
2435    } else if (!Op.compare_lower("s1e0r")) {
2436      // SYS #0, C7, C8, #3
2437      SYS_ALIAS(0, 7, 8, 2);
2438    } else if (!Op.compare_lower("s1e0w")) {
2439      // SYS #0, C7, C8, #3
2440      SYS_ALIAS(0, 7, 8, 3);
2441    } else if (!Op.compare_lower("s12e1r")) {
2442      // SYS #4, C7, C8, #4
2443      SYS_ALIAS(4, 7, 8, 4);
2444    } else if (!Op.compare_lower("s12e1w")) {
2445      // SYS #4, C7, C8, #5
2446      SYS_ALIAS(4, 7, 8, 5);
2447    } else if (!Op.compare_lower("s12e0r")) {
2448      // SYS #4, C7, C8, #6
2449      SYS_ALIAS(4, 7, 8, 6);
2450    } else if (!Op.compare_lower("s12e0w")) {
2451      // SYS #4, C7, C8, #7
2452      SYS_ALIAS(4, 7, 8, 7);
2453    } else {
2454      return TokError("invalid operand for AT instruction");
2455    }
2456  } else if (Mnemonic == "tlbi") {
2457    if (!Op.compare_lower("vmalle1is")) {
2458      // SYS #0, C8, C3, #0
2459      SYS_ALIAS(0, 8, 3, 0);
2460    } else if (!Op.compare_lower("alle2is")) {
2461      // SYS #4, C8, C3, #0
2462      SYS_ALIAS(4, 8, 3, 0);
2463    } else if (!Op.compare_lower("alle3is")) {
2464      // SYS #6, C8, C3, #0
2465      SYS_ALIAS(6, 8, 3, 0);
2466    } else if (!Op.compare_lower("vae1is")) {
2467      // SYS #0, C8, C3, #1
2468      SYS_ALIAS(0, 8, 3, 1);
2469    } else if (!Op.compare_lower("vae2is")) {
2470      // SYS #4, C8, C3, #1
2471      SYS_ALIAS(4, 8, 3, 1);
2472    } else if (!Op.compare_lower("vae3is")) {
2473      // SYS #6, C8, C3, #1
2474      SYS_ALIAS(6, 8, 3, 1);
2475    } else if (!Op.compare_lower("aside1is")) {
2476      // SYS #0, C8, C3, #2
2477      SYS_ALIAS(0, 8, 3, 2);
2478    } else if (!Op.compare_lower("vaae1is")) {
2479      // SYS #0, C8, C3, #3
2480      SYS_ALIAS(0, 8, 3, 3);
2481    } else if (!Op.compare_lower("alle1is")) {
2482      // SYS #4, C8, C3, #4
2483      SYS_ALIAS(4, 8, 3, 4);
2484    } else if (!Op.compare_lower("vale1is")) {
2485      // SYS #0, C8, C3, #5
2486      SYS_ALIAS(0, 8, 3, 5);
2487    } else if (!Op.compare_lower("vaale1is")) {
2488      // SYS #0, C8, C3, #7
2489      SYS_ALIAS(0, 8, 3, 7);
2490    } else if (!Op.compare_lower("vmalle1")) {
2491      // SYS #0, C8, C7, #0
2492      SYS_ALIAS(0, 8, 7, 0);
2493    } else if (!Op.compare_lower("alle2")) {
2494      // SYS #4, C8, C7, #0
2495      SYS_ALIAS(4, 8, 7, 0);
2496    } else if (!Op.compare_lower("vale2is")) {
2497      // SYS #4, C8, C3, #5
2498      SYS_ALIAS(4, 8, 3, 5);
2499    } else if (!Op.compare_lower("vale3is")) {
2500      // SYS #6, C8, C3, #5
2501      SYS_ALIAS(6, 8, 3, 5);
2502    } else if (!Op.compare_lower("alle3")) {
2503      // SYS #6, C8, C7, #0
2504      SYS_ALIAS(6, 8, 7, 0);
2505    } else if (!Op.compare_lower("vae1")) {
2506      // SYS #0, C8, C7, #1
2507      SYS_ALIAS(0, 8, 7, 1);
2508    } else if (!Op.compare_lower("vae2")) {
2509      // SYS #4, C8, C7, #1
2510      SYS_ALIAS(4, 8, 7, 1);
2511    } else if (!Op.compare_lower("vae3")) {
2512      // SYS #6, C8, C7, #1
2513      SYS_ALIAS(6, 8, 7, 1);
2514    } else if (!Op.compare_lower("aside1")) {
2515      // SYS #0, C8, C7, #2
2516      SYS_ALIAS(0, 8, 7, 2);
2517    } else if (!Op.compare_lower("vaae1")) {
2518      // SYS #0, C8, C7, #3
2519      SYS_ALIAS(0, 8, 7, 3);
2520    } else if (!Op.compare_lower("alle1")) {
2521      // SYS #4, C8, C7, #4
2522      SYS_ALIAS(4, 8, 7, 4);
2523    } else if (!Op.compare_lower("vale1")) {
2524      // SYS #0, C8, C7, #5
2525      SYS_ALIAS(0, 8, 7, 5);
2526    } else if (!Op.compare_lower("vale2")) {
2527      // SYS #4, C8, C7, #5
2528      SYS_ALIAS(4, 8, 7, 5);
2529    } else if (!Op.compare_lower("vale3")) {
2530      // SYS #6, C8, C7, #5
2531      SYS_ALIAS(6, 8, 7, 5);
2532    } else if (!Op.compare_lower("vaale1")) {
2533      // SYS #0, C8, C7, #7
2534      SYS_ALIAS(0, 8, 7, 7);
2535    } else if (!Op.compare_lower("ipas2e1")) {
2536      // SYS #4, C8, C4, #1
2537      SYS_ALIAS(4, 8, 4, 1);
2538    } else if (!Op.compare_lower("ipas2le1")) {
2539      // SYS #4, C8, C4, #5
2540      SYS_ALIAS(4, 8, 4, 5);
2541    } else if (!Op.compare_lower("ipas2e1is")) {
2542      // SYS #4, C8, C4, #1
2543      SYS_ALIAS(4, 8, 0, 1);
2544    } else if (!Op.compare_lower("ipas2le1is")) {
2545      // SYS #4, C8, C4, #5
2546      SYS_ALIAS(4, 8, 0, 5);
2547    } else if (!Op.compare_lower("vmalls12e1")) {
2548      // SYS #4, C8, C7, #6
2549      SYS_ALIAS(4, 8, 7, 6);
2550    } else if (!Op.compare_lower("vmalls12e1is")) {
2551      // SYS #4, C8, C3, #6
2552      SYS_ALIAS(4, 8, 3, 6);
2553    } else {
2554      return TokError("invalid operand for TLBI instruction");
2555    }
2556  }
2557
2558#undef SYS_ALIAS
2559
2560  Parser.Lex(); // Eat operand.
2561
2562  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2563  bool HasRegister = false;
2564
2565  // Check for the optional register operand.
2566  if (getLexer().is(AsmToken::Comma)) {
2567    Parser.Lex(); // Eat comma.
2568
2569    if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2570      return TokError("expected register operand");
2571
2572    HasRegister = true;
2573  }
2574
2575  if (getLexer().isNot(AsmToken::EndOfStatement)) {
2576    Parser.eatToEndOfStatement();
2577    return TokError("unexpected token in argument list");
2578  }
2579
2580  if (ExpectRegister && !HasRegister) {
2581    return TokError("specified " + Mnemonic + " op requires a register");
2582  }
2583  else if (!ExpectRegister && HasRegister) {
2584    return TokError("specified " + Mnemonic + " op does not use a register");
2585  }
2586
2587  Parser.Lex(); // Consume the EndOfStatement
2588  return false;
2589}
2590
2591AArch64AsmParser::OperandMatchResultTy
2592AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2593  const AsmToken &Tok = Parser.getTok();
2594
2595  // Can be either a #imm style literal or an option name
2596  bool Hash = Tok.is(AsmToken::Hash);
2597  if (Hash || Tok.is(AsmToken::Integer)) {
2598    // Immediate operand.
2599    if (Hash)
2600      Parser.Lex(); // Eat the '#'
2601    const MCExpr *ImmVal;
2602    SMLoc ExprLoc = getLoc();
2603    if (getParser().parseExpression(ImmVal))
2604      return MatchOperand_ParseFail;
2605    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2606    if (!MCE) {
2607      Error(ExprLoc, "immediate value expected for barrier operand");
2608      return MatchOperand_ParseFail;
2609    }
2610    if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2611      Error(ExprLoc, "barrier operand out of range");
2612      return MatchOperand_ParseFail;
2613    }
2614    Operands.push_back(
2615        AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2616    return MatchOperand_Success;
2617  }
2618
2619  if (Tok.isNot(AsmToken::Identifier)) {
2620    TokError("invalid operand for instruction");
2621    return MatchOperand_ParseFail;
2622  }
2623
2624  bool Valid;
2625  unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2626  if (!Valid) {
2627    TokError("invalid barrier option name");
2628    return MatchOperand_ParseFail;
2629  }
2630
2631  // The only valid named option for ISB is 'sy'
2632  if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2633    TokError("'sy' or #imm operand expected");
2634    return MatchOperand_ParseFail;
2635  }
2636
2637  Operands.push_back(
2638      AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2639  Parser.Lex(); // Consume the option
2640
2641  return MatchOperand_Success;
2642}
2643
2644AArch64AsmParser::OperandMatchResultTy
2645AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2646  const AsmToken &Tok = Parser.getTok();
2647
2648  if (Tok.isNot(AsmToken::Identifier))
2649    return MatchOperand_NoMatch;
2650
2651  Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
2652                     STI.getFeatureBits(), getContext()));
2653  Parser.Lex(); // Eat identifier
2654
2655  return MatchOperand_Success;
2656}
2657
2658/// tryParseVectorRegister - Parse a vector register operand.
2659bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2660  if (Parser.getTok().isNot(AsmToken::Identifier))
2661    return true;
2662
2663  SMLoc S = getLoc();
2664  // Check for a vector register specifier first.
2665  StringRef Kind;
2666  int64_t Reg = tryMatchVectorRegister(Kind, false);
2667  if (Reg == -1)
2668    return true;
2669  Operands.push_back(
2670      AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2671  // If there was an explicit qualifier, that goes on as a literal text
2672  // operand.
2673  if (!Kind.empty())
2674    Operands.push_back(
2675        AArch64Operand::CreateToken(Kind, false, S, getContext()));
2676
2677  // If there is an index specifier following the register, parse that too.
2678  if (Parser.getTok().is(AsmToken::LBrac)) {
2679    SMLoc SIdx = getLoc();
2680    Parser.Lex(); // Eat left bracket token.
2681
2682    const MCExpr *ImmVal;
2683    if (getParser().parseExpression(ImmVal))
2684      return false;
2685    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2686    if (!MCE) {
2687      TokError("immediate value expected for vector index");
2688      return false;
2689    }
2690
2691    SMLoc E = getLoc();
2692    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2693      Error(E, "']' expected");
2694      return false;
2695    }
2696
2697    Parser.Lex(); // Eat right bracket token.
2698
2699    Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2700                                                         E, getContext()));
2701  }
2702
2703  return false;
2704}
2705
2706/// parseRegister - Parse a non-vector register operand.
2707bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2708  SMLoc S = getLoc();
2709  // Try for a vector register.
2710  if (!tryParseVectorRegister(Operands))
2711    return false;
2712
2713  // Try for a scalar register.
2714  int64_t Reg = tryParseRegister();
2715  if (Reg == -1)
2716    return true;
2717  Operands.push_back(
2718      AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2719
2720  // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2721  // as a string token in the instruction itself.
2722  if (getLexer().getKind() == AsmToken::LBrac) {
2723    SMLoc LBracS = getLoc();
2724    Parser.Lex();
2725    const AsmToken &Tok = Parser.getTok();
2726    if (Tok.is(AsmToken::Integer)) {
2727      SMLoc IntS = getLoc();
2728      int64_t Val = Tok.getIntVal();
2729      if (Val == 1) {
2730        Parser.Lex();
2731        if (getLexer().getKind() == AsmToken::RBrac) {
2732          SMLoc RBracS = getLoc();
2733          Parser.Lex();
2734          Operands.push_back(
2735              AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2736          Operands.push_back(
2737              AArch64Operand::CreateToken("1", false, IntS, getContext()));
2738          Operands.push_back(
2739              AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2740          return false;
2741        }
2742      }
2743    }
2744  }
2745
2746  return false;
2747}
2748
2749bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2750  bool HasELFModifier = false;
2751  AArch64MCExpr::VariantKind RefKind;
2752
2753  if (Parser.getTok().is(AsmToken::Colon)) {
2754    Parser.Lex(); // Eat ':"
2755    HasELFModifier = true;
2756
2757    if (Parser.getTok().isNot(AsmToken::Identifier)) {
2758      Error(Parser.getTok().getLoc(),
2759            "expect relocation specifier in operand after ':'");
2760      return true;
2761    }
2762
2763    std::string LowerCase = Parser.getTok().getIdentifier().lower();
2764    RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2765                  .Case("lo12", AArch64MCExpr::VK_LO12)
2766                  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2767                  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2768                  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2769                  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2770                  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2771                  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2772                  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2773                  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2774                  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2775                  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2776                  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2777                  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2778                  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2779                  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2780                  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2781                  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2782                  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2783                  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2784                  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2785                  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2786                  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2787                  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2788                  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2789                  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2790                  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2791                  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2792                  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2793                  .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2794                  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2795                  .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2796                  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2797                  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2798                  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2799                  .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2800                  .Default(AArch64MCExpr::VK_INVALID);
2801
2802    if (RefKind == AArch64MCExpr::VK_INVALID) {
2803      Error(Parser.getTok().getLoc(),
2804            "expect relocation specifier in operand after ':'");
2805      return true;
2806    }
2807
2808    Parser.Lex(); // Eat identifier
2809
2810    if (Parser.getTok().isNot(AsmToken::Colon)) {
2811      Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2812      return true;
2813    }
2814    Parser.Lex(); // Eat ':'
2815  }
2816
2817  if (getParser().parseExpression(ImmVal))
2818    return true;
2819
2820  if (HasELFModifier)
2821    ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2822
2823  return false;
2824}
2825
2826/// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2827bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2828  assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2829  SMLoc S = getLoc();
2830  Parser.Lex(); // Eat left bracket token.
2831  StringRef Kind;
2832  int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2833  if (FirstReg == -1)
2834    return true;
2835  int64_t PrevReg = FirstReg;
2836  unsigned Count = 1;
2837
2838  if (Parser.getTok().is(AsmToken::Minus)) {
2839    Parser.Lex(); // Eat the minus.
2840
2841    SMLoc Loc = getLoc();
2842    StringRef NextKind;
2843    int64_t Reg = tryMatchVectorRegister(NextKind, true);
2844    if (Reg == -1)
2845      return true;
2846    // Any Kind suffices must match on all regs in the list.
2847    if (Kind != NextKind)
2848      return Error(Loc, "mismatched register size suffix");
2849
2850    unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2851
2852    if (Space == 0 || Space > 3) {
2853      return Error(Loc, "invalid number of vectors");
2854    }
2855
2856    Count += Space;
2857  }
2858  else {
2859    while (Parser.getTok().is(AsmToken::Comma)) {
2860      Parser.Lex(); // Eat the comma token.
2861
2862      SMLoc Loc = getLoc();
2863      StringRef NextKind;
2864      int64_t Reg = tryMatchVectorRegister(NextKind, true);
2865      if (Reg == -1)
2866        return true;
2867      // Any Kind suffices must match on all regs in the list.
2868      if (Kind != NextKind)
2869        return Error(Loc, "mismatched register size suffix");
2870
2871      // Registers must be incremental (with wraparound at 31)
2872      if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2873          (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2874       return Error(Loc, "registers must be sequential");
2875
2876      PrevReg = Reg;
2877      ++Count;
2878    }
2879  }
2880
2881  if (Parser.getTok().isNot(AsmToken::RCurly))
2882    return Error(getLoc(), "'}' expected");
2883  Parser.Lex(); // Eat the '}' token.
2884
2885  if (Count > 4)
2886    return Error(S, "invalid number of vectors");
2887
2888  unsigned NumElements = 0;
2889  char ElementKind = 0;
2890  if (!Kind.empty())
2891    parseValidVectorKind(Kind, NumElements, ElementKind);
2892
2893  Operands.push_back(AArch64Operand::CreateVectorList(
2894      FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2895
2896  // If there is an index specifier following the list, parse that too.
2897  if (Parser.getTok().is(AsmToken::LBrac)) {
2898    SMLoc SIdx = getLoc();
2899    Parser.Lex(); // Eat left bracket token.
2900
2901    const MCExpr *ImmVal;
2902    if (getParser().parseExpression(ImmVal))
2903      return false;
2904    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2905    if (!MCE) {
2906      TokError("immediate value expected for vector index");
2907      return false;
2908    }
2909
2910    SMLoc E = getLoc();
2911    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2912      Error(E, "']' expected");
2913      return false;
2914    }
2915
2916    Parser.Lex(); // Eat right bracket token.
2917
2918    Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2919                                                         E, getContext()));
2920  }
2921  return false;
2922}
2923
2924AArch64AsmParser::OperandMatchResultTy
2925AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2926  const AsmToken &Tok = Parser.getTok();
2927  if (!Tok.is(AsmToken::Identifier))
2928    return MatchOperand_NoMatch;
2929
2930  unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2931
2932  MCContext &Ctx = getContext();
2933  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2934  if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2935    return MatchOperand_NoMatch;
2936
2937  SMLoc S = getLoc();
2938  Parser.Lex(); // Eat register
2939
2940  if (Parser.getTok().isNot(AsmToken::Comma)) {
2941    Operands.push_back(
2942        AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2943    return MatchOperand_Success;
2944  }
2945  Parser.Lex(); // Eat comma.
2946
2947  if (Parser.getTok().is(AsmToken::Hash))
2948    Parser.Lex(); // Eat hash
2949
2950  if (Parser.getTok().isNot(AsmToken::Integer)) {
2951    Error(getLoc(), "index must be absent or #0");
2952    return MatchOperand_ParseFail;
2953  }
2954
2955  const MCExpr *ImmVal;
2956  if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2957      cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2958    Error(getLoc(), "index must be absent or #0");
2959    return MatchOperand_ParseFail;
2960  }
2961
2962  Operands.push_back(
2963      AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2964  return MatchOperand_Success;
2965}
2966
2967/// parseOperand - Parse a arm instruction operand.  For now this parses the
2968/// operand regardless of the mnemonic.
2969bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2970                                  bool invertCondCode) {
2971  // Check if the current operand has a custom associated parser, if so, try to
2972  // custom parse the operand, or fallback to the general approach.
2973  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2974  if (ResTy == MatchOperand_Success)
2975    return false;
2976  // If there wasn't a custom match, try the generic matcher below. Otherwise,
2977  // there was a match, but an error occurred, in which case, just return that
2978  // the operand parsing failed.
2979  if (ResTy == MatchOperand_ParseFail)
2980    return true;
2981
2982  // Nothing custom, so do general case parsing.
2983  SMLoc S, E;
2984  switch (getLexer().getKind()) {
2985  default: {
2986    SMLoc S = getLoc();
2987    const MCExpr *Expr;
2988    if (parseSymbolicImmVal(Expr))
2989      return Error(S, "invalid operand");
2990
2991    SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2992    Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2993    return false;
2994  }
2995  case AsmToken::LBrac: {
2996    SMLoc Loc = Parser.getTok().getLoc();
2997    Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2998                                                   getContext()));
2999    Parser.Lex(); // Eat '['
3000
3001    // There's no comma after a '[', so we can parse the next operand
3002    // immediately.
3003    return parseOperand(Operands, false, false);
3004  }
3005  case AsmToken::LCurly:
3006    return parseVectorList(Operands);
3007  case AsmToken::Identifier: {
3008    // If we're expecting a Condition Code operand, then just parse that.
3009    if (isCondCode)
3010      return parseCondCode(Operands, invertCondCode);
3011
3012    // If it's a register name, parse it.
3013    if (!parseRegister(Operands))
3014      return false;
3015
3016    // This could be an optional "shift" or "extend" operand.
3017    OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3018    // We can only continue if no tokens were eaten.
3019    if (GotShift != MatchOperand_NoMatch)
3020      return GotShift;
3021
3022    // This was not a register so parse other operands that start with an
3023    // identifier (like labels) as expressions and create them as immediates.
3024    const MCExpr *IdVal;
3025    S = getLoc();
3026    if (getParser().parseExpression(IdVal))
3027      return true;
3028
3029    E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3030    Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3031    return false;
3032  }
3033  case AsmToken::Integer:
3034  case AsmToken::Real:
3035  case AsmToken::Hash: {
3036    // #42 -> immediate.
3037    S = getLoc();
3038    if (getLexer().is(AsmToken::Hash))
3039      Parser.Lex();
3040
3041    // Parse a negative sign
3042    bool isNegative = false;
3043    if (Parser.getTok().is(AsmToken::Minus)) {
3044      isNegative = true;
3045      // We need to consume this token only when we have a Real, otherwise
3046      // we let parseSymbolicImmVal take care of it
3047      if (Parser.getLexer().peekTok().is(AsmToken::Real))
3048        Parser.Lex();
3049    }
3050
3051    // The only Real that should come through here is a literal #0.0 for
3052    // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3053    // so convert the value.
3054    const AsmToken &Tok = Parser.getTok();
3055    if (Tok.is(AsmToken::Real)) {
3056      APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3057      uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3058      if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3059          Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3060          Mnemonic != "fcmlt")
3061        return TokError("unexpected floating point literal");
3062      else if (IntVal != 0 || isNegative)
3063        return TokError("expected floating-point constant #0.0");
3064      Parser.Lex(); // Eat the token.
3065
3066      Operands.push_back(
3067          AArch64Operand::CreateToken("#0", false, S, getContext()));
3068      Operands.push_back(
3069          AArch64Operand::CreateToken(".0", false, S, getContext()));
3070      return false;
3071    }
3072
3073    const MCExpr *ImmVal;
3074    if (parseSymbolicImmVal(ImmVal))
3075      return true;
3076
3077    E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3078    Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3079    return false;
3080  }
3081  case AsmToken::Equal: {
3082    SMLoc Loc = Parser.getTok().getLoc();
3083    if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3084      return Error(Loc, "unexpected token in operand");
3085    Parser.Lex(); // Eat '='
3086    const MCExpr *SubExprVal;
3087    if (getParser().parseExpression(SubExprVal))
3088      return true;
3089
3090    MCContext& Ctx = getContext();
3091    E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3092    // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3093    if (isa<MCConstantExpr>(SubExprVal) && Operands.size() >= 2 &&
3094        static_cast<AArch64Operand &>(*Operands[1]).isReg()) {
3095      bool IsXReg =  AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3096            Operands[1]->getReg());
3097      uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3098      uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3099      while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3100        ShiftAmt += 16;
3101        Imm >>= 16;
3102      }
3103      if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3104          Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3105          Operands.push_back(AArch64Operand::CreateImm(
3106                     MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3107        if (ShiftAmt)
3108          Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3109                     ShiftAmt, true, S, E, Ctx));
3110        return false;
3111      }
3112    }
3113    // If it is a label or an imm that cannot fit in a movz, put it into CP.
3114    const MCExpr *CPLoc = getTargetStreamer().addConstantPoolEntry(SubExprVal);
3115    Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3116    return false;
3117  }
3118  }
3119}
3120
3121/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3122/// operands.
3123bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3124                                        StringRef Name, SMLoc NameLoc,
3125                                        OperandVector &Operands) {
3126  Name = StringSwitch<StringRef>(Name.lower())
3127             .Case("beq", "b.eq")
3128             .Case("bne", "b.ne")
3129             .Case("bhs", "b.hs")
3130             .Case("bcs", "b.cs")
3131             .Case("blo", "b.lo")
3132             .Case("bcc", "b.cc")
3133             .Case("bmi", "b.mi")
3134             .Case("bpl", "b.pl")
3135             .Case("bvs", "b.vs")
3136             .Case("bvc", "b.vc")
3137             .Case("bhi", "b.hi")
3138             .Case("bls", "b.ls")
3139             .Case("bge", "b.ge")
3140             .Case("blt", "b.lt")
3141             .Case("bgt", "b.gt")
3142             .Case("ble", "b.le")
3143             .Case("bal", "b.al")
3144             .Case("bnv", "b.nv")
3145             .Default(Name);
3146
3147  // First check for the AArch64-specific .req directive.
3148  if (Parser.getTok().is(AsmToken::Identifier) &&
3149      Parser.getTok().getIdentifier() == ".req") {
3150    parseDirectiveReq(Name, NameLoc);
3151    // We always return 'error' for this, as we're done with this
3152    // statement and don't need to match the 'instruction."
3153    return true;
3154  }
3155
3156  // Create the leading tokens for the mnemonic, split by '.' characters.
3157  size_t Start = 0, Next = Name.find('.');
3158  StringRef Head = Name.slice(Start, Next);
3159
3160  // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3161  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3162    bool IsError = parseSysAlias(Head, NameLoc, Operands);
3163    if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3164      Parser.eatToEndOfStatement();
3165    return IsError;
3166  }
3167
3168  Operands.push_back(
3169      AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3170  Mnemonic = Head;
3171
3172  // Handle condition codes for a branch mnemonic
3173  if (Head == "b" && Next != StringRef::npos) {
3174    Start = Next;
3175    Next = Name.find('.', Start + 1);
3176    Head = Name.slice(Start + 1, Next);
3177
3178    SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3179                                            (Head.data() - Name.data()));
3180    AArch64CC::CondCode CC = parseCondCodeString(Head);
3181    if (CC == AArch64CC::Invalid)
3182      return Error(SuffixLoc, "invalid condition code");
3183    Operands.push_back(
3184        AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3185    Operands.push_back(
3186        AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3187  }
3188
3189  // Add the remaining tokens in the mnemonic.
3190  while (Next != StringRef::npos) {
3191    Start = Next;
3192    Next = Name.find('.', Start + 1);
3193    Head = Name.slice(Start, Next);
3194    SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3195                                            (Head.data() - Name.data()) + 1);
3196    Operands.push_back(
3197        AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3198  }
3199
3200  // Conditional compare instructions have a Condition Code operand, which needs
3201  // to be parsed and an immediate operand created.
3202  bool condCodeFourthOperand =
3203      (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3204       Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3205       Head == "csinc" || Head == "csinv" || Head == "csneg");
3206
3207  // These instructions are aliases to some of the conditional select
3208  // instructions. However, the condition code is inverted in the aliased
3209  // instruction.
3210  //
3211  // FIXME: Is this the correct way to handle these? Or should the parser
3212  //        generate the aliased instructions directly?
3213  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3214  bool condCodeThirdOperand =
3215      (Head == "cinc" || Head == "cinv" || Head == "cneg");
3216
3217  // Read the remaining operands.
3218  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3219    // Read the first operand.
3220    if (parseOperand(Operands, false, false)) {
3221      Parser.eatToEndOfStatement();
3222      return true;
3223    }
3224
3225    unsigned N = 2;
3226    while (getLexer().is(AsmToken::Comma)) {
3227      Parser.Lex(); // Eat the comma.
3228
3229      // Parse and remember the operand.
3230      if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3231                                     (N == 3 && condCodeThirdOperand) ||
3232                                     (N == 2 && condCodeSecondOperand),
3233                       condCodeSecondOperand || condCodeThirdOperand)) {
3234        Parser.eatToEndOfStatement();
3235        return true;
3236      }
3237
3238      // After successfully parsing some operands there are two special cases to
3239      // consider (i.e. notional operands not separated by commas). Both are due
3240      // to memory specifiers:
3241      //  + An RBrac will end an address for load/store/prefetch
3242      //  + An '!' will indicate a pre-indexed operation.
3243      //
3244      // It's someone else's responsibility to make sure these tokens are sane
3245      // in the given context!
3246      if (Parser.getTok().is(AsmToken::RBrac)) {
3247        SMLoc Loc = Parser.getTok().getLoc();
3248        Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3249                                                       getContext()));
3250        Parser.Lex();
3251      }
3252
3253      if (Parser.getTok().is(AsmToken::Exclaim)) {
3254        SMLoc Loc = Parser.getTok().getLoc();
3255        Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3256                                                       getContext()));
3257        Parser.Lex();
3258      }
3259
3260      ++N;
3261    }
3262  }
3263
3264  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3265    SMLoc Loc = Parser.getTok().getLoc();
3266    Parser.eatToEndOfStatement();
3267    return Error(Loc, "unexpected token in argument list");
3268  }
3269
3270  Parser.Lex(); // Consume the EndOfStatement
3271  return false;
3272}
3273
3274// FIXME: This entire function is a giant hack to provide us with decent
3275// operand range validation/diagnostics until TableGen/MC can be extended
3276// to support autogeneration of this kind of validation.
3277bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3278                                         SmallVectorImpl<SMLoc> &Loc) {
3279  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3280  // Check for indexed addressing modes w/ the base register being the
3281  // same as a destination/source register or pair load where
3282  // the Rt == Rt2. All of those are undefined behaviour.
3283  switch (Inst.getOpcode()) {
3284  case AArch64::LDPSWpre:
3285  case AArch64::LDPWpost:
3286  case AArch64::LDPWpre:
3287  case AArch64::LDPXpost:
3288  case AArch64::LDPXpre: {
3289    unsigned Rt = Inst.getOperand(1).getReg();
3290    unsigned Rt2 = Inst.getOperand(2).getReg();
3291    unsigned Rn = Inst.getOperand(3).getReg();
3292    if (RI->isSubRegisterEq(Rn, Rt))
3293      return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3294                           "is also a destination");
3295    if (RI->isSubRegisterEq(Rn, Rt2))
3296      return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3297                           "is also a destination");
3298    // FALLTHROUGH
3299  }
3300  case AArch64::LDPDi:
3301  case AArch64::LDPQi:
3302  case AArch64::LDPSi:
3303  case AArch64::LDPSWi:
3304  case AArch64::LDPWi:
3305  case AArch64::LDPXi: {
3306    unsigned Rt = Inst.getOperand(0).getReg();
3307    unsigned Rt2 = Inst.getOperand(1).getReg();
3308    if (Rt == Rt2)
3309      return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3310    break;
3311  }
3312  case AArch64::LDPDpost:
3313  case AArch64::LDPDpre:
3314  case AArch64::LDPQpost:
3315  case AArch64::LDPQpre:
3316  case AArch64::LDPSpost:
3317  case AArch64::LDPSpre:
3318  case AArch64::LDPSWpost: {
3319    unsigned Rt = Inst.getOperand(1).getReg();
3320    unsigned Rt2 = Inst.getOperand(2).getReg();
3321    if (Rt == Rt2)
3322      return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3323    break;
3324  }
3325  case AArch64::STPDpost:
3326  case AArch64::STPDpre:
3327  case AArch64::STPQpost:
3328  case AArch64::STPQpre:
3329  case AArch64::STPSpost:
3330  case AArch64::STPSpre:
3331  case AArch64::STPWpost:
3332  case AArch64::STPWpre:
3333  case AArch64::STPXpost:
3334  case AArch64::STPXpre: {
3335    unsigned Rt = Inst.getOperand(1).getReg();
3336    unsigned Rt2 = Inst.getOperand(2).getReg();
3337    unsigned Rn = Inst.getOperand(3).getReg();
3338    if (RI->isSubRegisterEq(Rn, Rt))
3339      return Error(Loc[0], "unpredictable STP instruction, writeback base "
3340                           "is also a source");
3341    if (RI->isSubRegisterEq(Rn, Rt2))
3342      return Error(Loc[1], "unpredictable STP instruction, writeback base "
3343                           "is also a source");
3344    break;
3345  }
3346  case AArch64::LDRBBpre:
3347  case AArch64::LDRBpre:
3348  case AArch64::LDRHHpre:
3349  case AArch64::LDRHpre:
3350  case AArch64::LDRSBWpre:
3351  case AArch64::LDRSBXpre:
3352  case AArch64::LDRSHWpre:
3353  case AArch64::LDRSHXpre:
3354  case AArch64::LDRSWpre:
3355  case AArch64::LDRWpre:
3356  case AArch64::LDRXpre:
3357  case AArch64::LDRBBpost:
3358  case AArch64::LDRBpost:
3359  case AArch64::LDRHHpost:
3360  case AArch64::LDRHpost:
3361  case AArch64::LDRSBWpost:
3362  case AArch64::LDRSBXpost:
3363  case AArch64::LDRSHWpost:
3364  case AArch64::LDRSHXpost:
3365  case AArch64::LDRSWpost:
3366  case AArch64::LDRWpost:
3367  case AArch64::LDRXpost: {
3368    unsigned Rt = Inst.getOperand(1).getReg();
3369    unsigned Rn = Inst.getOperand(2).getReg();
3370    if (RI->isSubRegisterEq(Rn, Rt))
3371      return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3372                           "is also a source");
3373    break;
3374  }
3375  case AArch64::STRBBpost:
3376  case AArch64::STRBpost:
3377  case AArch64::STRHHpost:
3378  case AArch64::STRHpost:
3379  case AArch64::STRWpost:
3380  case AArch64::STRXpost:
3381  case AArch64::STRBBpre:
3382  case AArch64::STRBpre:
3383  case AArch64::STRHHpre:
3384  case AArch64::STRHpre:
3385  case AArch64::STRWpre:
3386  case AArch64::STRXpre: {
3387    unsigned Rt = Inst.getOperand(1).getReg();
3388    unsigned Rn = Inst.getOperand(2).getReg();
3389    if (RI->isSubRegisterEq(Rn, Rt))
3390      return Error(Loc[0], "unpredictable STR instruction, writeback base "
3391                           "is also a source");
3392    break;
3393  }
3394  }
3395
3396  // Now check immediate ranges. Separate from the above as there is overlap
3397  // in the instructions being checked and this keeps the nested conditionals
3398  // to a minimum.
3399  switch (Inst.getOpcode()) {
3400  case AArch64::ADDSWri:
3401  case AArch64::ADDSXri:
3402  case AArch64::ADDWri:
3403  case AArch64::ADDXri:
3404  case AArch64::SUBSWri:
3405  case AArch64::SUBSXri:
3406  case AArch64::SUBWri:
3407  case AArch64::SUBXri: {
3408    // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3409    // some slight duplication here.
3410    if (Inst.getOperand(2).isExpr()) {
3411      const MCExpr *Expr = Inst.getOperand(2).getExpr();
3412      AArch64MCExpr::VariantKind ELFRefKind;
3413      MCSymbolRefExpr::VariantKind DarwinRefKind;
3414      int64_t Addend;
3415      if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3416        return Error(Loc[2], "invalid immediate expression");
3417      }
3418
3419      // Only allow these with ADDXri.
3420      if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3421          DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3422          Inst.getOpcode() == AArch64::ADDXri)
3423        return false;
3424
3425      // Only allow these with ADDXri/ADDWri
3426      if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3427          ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3428          ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3429          ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3430          ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3431          ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3432          ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3433          ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3434          (Inst.getOpcode() == AArch64::ADDXri ||
3435          Inst.getOpcode() == AArch64::ADDWri))
3436        return false;
3437
3438      // Don't allow expressions in the immediate field otherwise
3439      return Error(Loc[2], "invalid immediate expression");
3440    }
3441    return false;
3442  }
3443  default:
3444    return false;
3445  }
3446}
3447
3448bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3449  switch (ErrCode) {
3450  case Match_MissingFeature:
3451    return Error(Loc,
3452                 "instruction requires a CPU feature not currently enabled");
3453  case Match_InvalidOperand:
3454    return Error(Loc, "invalid operand for instruction");
3455  case Match_InvalidSuffix:
3456    return Error(Loc, "invalid type suffix for instruction");
3457  case Match_InvalidCondCode:
3458    return Error(Loc, "expected AArch64 condition code");
3459  case Match_AddSubRegExtendSmall:
3460    return Error(Loc,
3461      "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3462  case Match_AddSubRegExtendLarge:
3463    return Error(Loc,
3464      "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3465  case Match_AddSubSecondSource:
3466    return Error(Loc,
3467      "expected compatible register, symbol or integer in range [0, 4095]");
3468  case Match_LogicalSecondSource:
3469    return Error(Loc, "expected compatible register or logical immediate");
3470  case Match_InvalidMovImm32Shift:
3471    return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3472  case Match_InvalidMovImm64Shift:
3473    return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3474  case Match_AddSubRegShift32:
3475    return Error(Loc,
3476       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3477  case Match_AddSubRegShift64:
3478    return Error(Loc,
3479       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3480  case Match_InvalidFPImm:
3481    return Error(Loc,
3482                 "expected compatible register or floating-point constant");
3483  case Match_InvalidMemoryIndexedSImm9:
3484    return Error(Loc, "index must be an integer in range [-256, 255].");
3485  case Match_InvalidMemoryIndexed4SImm7:
3486    return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3487  case Match_InvalidMemoryIndexed8SImm7:
3488    return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3489  case Match_InvalidMemoryIndexed16SImm7:
3490    return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3491  case Match_InvalidMemoryWExtend8:
3492    return Error(Loc,
3493                 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3494  case Match_InvalidMemoryWExtend16:
3495    return Error(Loc,
3496                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3497  case Match_InvalidMemoryWExtend32:
3498    return Error(Loc,
3499                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3500  case Match_InvalidMemoryWExtend64:
3501    return Error(Loc,
3502                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3503  case Match_InvalidMemoryWExtend128:
3504    return Error(Loc,
3505                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3506  case Match_InvalidMemoryXExtend8:
3507    return Error(Loc,
3508                 "expected 'lsl' or 'sxtx' with optional shift of #0");
3509  case Match_InvalidMemoryXExtend16:
3510    return Error(Loc,
3511                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3512  case Match_InvalidMemoryXExtend32:
3513    return Error(Loc,
3514                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3515  case Match_InvalidMemoryXExtend64:
3516    return Error(Loc,
3517                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3518  case Match_InvalidMemoryXExtend128:
3519    return Error(Loc,
3520                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3521  case Match_InvalidMemoryIndexed1:
3522    return Error(Loc, "index must be an integer in range [0, 4095].");
3523  case Match_InvalidMemoryIndexed2:
3524    return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3525  case Match_InvalidMemoryIndexed4:
3526    return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3527  case Match_InvalidMemoryIndexed8:
3528    return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3529  case Match_InvalidMemoryIndexed16:
3530    return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3531  case Match_InvalidImm0_7:
3532    return Error(Loc, "immediate must be an integer in range [0, 7].");
3533  case Match_InvalidImm0_15:
3534    return Error(Loc, "immediate must be an integer in range [0, 15].");
3535  case Match_InvalidImm0_31:
3536    return Error(Loc, "immediate must be an integer in range [0, 31].");
3537  case Match_InvalidImm0_63:
3538    return Error(Loc, "immediate must be an integer in range [0, 63].");
3539  case Match_InvalidImm0_127:
3540    return Error(Loc, "immediate must be an integer in range [0, 127].");
3541  case Match_InvalidImm0_65535:
3542    return Error(Loc, "immediate must be an integer in range [0, 65535].");
3543  case Match_InvalidImm1_8:
3544    return Error(Loc, "immediate must be an integer in range [1, 8].");
3545  case Match_InvalidImm1_16:
3546    return Error(Loc, "immediate must be an integer in range [1, 16].");
3547  case Match_InvalidImm1_32:
3548    return Error(Loc, "immediate must be an integer in range [1, 32].");
3549  case Match_InvalidImm1_64:
3550    return Error(Loc, "immediate must be an integer in range [1, 64].");
3551  case Match_InvalidIndex1:
3552    return Error(Loc, "expected lane specifier '[1]'");
3553  case Match_InvalidIndexB:
3554    return Error(Loc, "vector lane must be an integer in range [0, 15].");
3555  case Match_InvalidIndexH:
3556    return Error(Loc, "vector lane must be an integer in range [0, 7].");
3557  case Match_InvalidIndexS:
3558    return Error(Loc, "vector lane must be an integer in range [0, 3].");
3559  case Match_InvalidIndexD:
3560    return Error(Loc, "vector lane must be an integer in range [0, 1].");
3561  case Match_InvalidLabel:
3562    return Error(Loc, "expected label or encodable integer pc offset");
3563  case Match_MRS:
3564    return Error(Loc, "expected readable system register");
3565  case Match_MSR:
3566    return Error(Loc, "expected writable system register or pstate");
3567  case Match_MnemonicFail:
3568    return Error(Loc, "unrecognized instruction mnemonic");
3569  default:
3570    llvm_unreachable("unexpected error code!");
3571  }
3572}
3573
3574static const char *getSubtargetFeatureName(unsigned Val);
3575
3576bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3577                                               OperandVector &Operands,
3578                                               MCStreamer &Out,
3579                                               unsigned &ErrorInfo,
3580                                               bool MatchingInlineAsm) {
3581  assert(!Operands.empty() && "Unexpect empty operand list!");
3582  AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3583  assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3584
3585  StringRef Tok = Op.getToken();
3586  unsigned NumOperands = Operands.size();
3587
3588  if (NumOperands == 4 && Tok == "lsl") {
3589    AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3590    AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3591    if (Op2.isReg() && Op3.isImm()) {
3592      const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3593      if (Op3CE) {
3594        uint64_t Op3Val = Op3CE->getValue();
3595        uint64_t NewOp3Val = 0;
3596        uint64_t NewOp4Val = 0;
3597        if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3598                Op2.getReg())) {
3599          NewOp3Val = (32 - Op3Val) & 0x1f;
3600          NewOp4Val = 31 - Op3Val;
3601        } else {
3602          NewOp3Val = (64 - Op3Val) & 0x3f;
3603          NewOp4Val = 63 - Op3Val;
3604        }
3605
3606        const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3607        const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3608
3609        Operands[0] = AArch64Operand::CreateToken(
3610            "ubfm", false, Op.getStartLoc(), getContext());
3611        Operands.push_back(AArch64Operand::CreateImm(
3612            NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3613        Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3614                                                Op3.getEndLoc(), getContext());
3615      }
3616    }
3617  } else if (NumOperands == 5) {
3618    // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3619    // UBFIZ -> UBFM aliases.
3620    if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3621      AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3622      AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3623      AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3624
3625      if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3626        const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3627        const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3628
3629        if (Op3CE && Op4CE) {
3630          uint64_t Op3Val = Op3CE->getValue();
3631          uint64_t Op4Val = Op4CE->getValue();
3632
3633          uint64_t RegWidth = 0;
3634          if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3635                  Op1.getReg()))
3636            RegWidth = 64;
3637          else
3638            RegWidth = 32;
3639
3640          if (Op3Val >= RegWidth)
3641            return Error(Op3.getStartLoc(),
3642                         "expected integer in range [0, 31]");
3643          if (Op4Val < 1 || Op4Val > RegWidth)
3644            return Error(Op4.getStartLoc(),
3645                         "expected integer in range [1, 32]");
3646
3647          uint64_t NewOp3Val = 0;
3648          if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3649                  Op1.getReg()))
3650            NewOp3Val = (32 - Op3Val) & 0x1f;
3651          else
3652            NewOp3Val = (64 - Op3Val) & 0x3f;
3653
3654          uint64_t NewOp4Val = Op4Val - 1;
3655
3656          if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3657            return Error(Op4.getStartLoc(),
3658                         "requested insert overflows register");
3659
3660          const MCExpr *NewOp3 =
3661              MCConstantExpr::Create(NewOp3Val, getContext());
3662          const MCExpr *NewOp4 =
3663              MCConstantExpr::Create(NewOp4Val, getContext());
3664          Operands[3] = AArch64Operand::CreateImm(
3665              NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3666          Operands[4] = AArch64Operand::CreateImm(
3667              NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3668          if (Tok == "bfi")
3669            Operands[0] = AArch64Operand::CreateToken(
3670                "bfm", false, Op.getStartLoc(), getContext());
3671          else if (Tok == "sbfiz")
3672            Operands[0] = AArch64Operand::CreateToken(
3673                "sbfm", false, Op.getStartLoc(), getContext());
3674          else if (Tok == "ubfiz")
3675            Operands[0] = AArch64Operand::CreateToken(
3676                "ubfm", false, Op.getStartLoc(), getContext());
3677          else
3678            llvm_unreachable("No valid mnemonic for alias?");
3679        }
3680      }
3681
3682      // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3683      // UBFX -> UBFM aliases.
3684    } else if (NumOperands == 5 &&
3685               (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3686      AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3687      AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3688      AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3689
3690      if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3691        const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3692        const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3693
3694        if (Op3CE && Op4CE) {
3695          uint64_t Op3Val = Op3CE->getValue();
3696          uint64_t Op4Val = Op4CE->getValue();
3697
3698          uint64_t RegWidth = 0;
3699          if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3700                  Op1.getReg()))
3701            RegWidth = 64;
3702          else
3703            RegWidth = 32;
3704
3705          if (Op3Val >= RegWidth)
3706            return Error(Op3.getStartLoc(),
3707                         "expected integer in range [0, 31]");
3708          if (Op4Val < 1 || Op4Val > RegWidth)
3709            return Error(Op4.getStartLoc(),
3710                         "expected integer in range [1, 32]");
3711
3712          uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3713
3714          if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3715            return Error(Op4.getStartLoc(),
3716                         "requested extract overflows register");
3717
3718          const MCExpr *NewOp4 =
3719              MCConstantExpr::Create(NewOp4Val, getContext());
3720          Operands[4] = AArch64Operand::CreateImm(
3721              NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3722          if (Tok == "bfxil")
3723            Operands[0] = AArch64Operand::CreateToken(
3724                "bfm", false, Op.getStartLoc(), getContext());
3725          else if (Tok == "sbfx")
3726            Operands[0] = AArch64Operand::CreateToken(
3727                "sbfm", false, Op.getStartLoc(), getContext());
3728          else if (Tok == "ubfx")
3729            Operands[0] = AArch64Operand::CreateToken(
3730                "ubfm", false, Op.getStartLoc(), getContext());
3731          else
3732            llvm_unreachable("No valid mnemonic for alias?");
3733        }
3734      }
3735    }
3736  }
3737  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3738  //        InstAlias can't quite handle this since the reg classes aren't
3739  //        subclasses.
3740  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3741    // The source register can be Wn here, but the matcher expects a
3742    // GPR64. Twiddle it here if necessary.
3743    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3744    if (Op.isReg()) {
3745      unsigned Reg = getXRegFromWReg(Op.getReg());
3746      Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3747                                              Op.getEndLoc(), getContext());
3748    }
3749  }
3750  // FIXME: Likewise for sxt[bh] with a Xd dst operand
3751  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3752    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3753    if (Op.isReg() &&
3754        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3755            Op.getReg())) {
3756      // The source register can be Wn here, but the matcher expects a
3757      // GPR64. Twiddle it here if necessary.
3758      AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3759      if (Op.isReg()) {
3760        unsigned Reg = getXRegFromWReg(Op.getReg());
3761        Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3762                                                Op.getEndLoc(), getContext());
3763      }
3764    }
3765  }
3766  // FIXME: Likewise for uxt[bh] with a Xd dst operand
3767  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3768    AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3769    if (Op.isReg() &&
3770        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3771            Op.getReg())) {
3772      // The source register can be Wn here, but the matcher expects a
3773      // GPR32. Twiddle it here if necessary.
3774      AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3775      if (Op.isReg()) {
3776        unsigned Reg = getWRegFromXReg(Op.getReg());
3777        Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3778                                                Op.getEndLoc(), getContext());
3779      }
3780    }
3781  }
3782
3783  // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3784  if (NumOperands == 3 && Tok == "fmov") {
3785    AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3786    AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3787    if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3788      unsigned zreg =
3789          AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3790              RegOp.getReg())
3791              ? AArch64::WZR
3792              : AArch64::XZR;
3793      Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3794                                              Op.getEndLoc(), getContext());
3795    }
3796  }
3797
3798  MCInst Inst;
3799  // First try to match against the secondary set of tables containing the
3800  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3801  unsigned MatchResult =
3802      MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3803
3804  // If that fails, try against the alternate table containing long-form NEON:
3805  // "fadd v0.2s, v1.2s, v2.2s"
3806  if (MatchResult != Match_Success)
3807    MatchResult =
3808        MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3809
3810  switch (MatchResult) {
3811  case Match_Success: {
3812    // Perform range checking and other semantic validations
3813    SmallVector<SMLoc, 8> OperandLocs;
3814    NumOperands = Operands.size();
3815    for (unsigned i = 1; i < NumOperands; ++i)
3816      OperandLocs.push_back(Operands[i]->getStartLoc());
3817    if (validateInstruction(Inst, OperandLocs))
3818      return true;
3819
3820    Inst.setLoc(IDLoc);
3821    Out.EmitInstruction(Inst, STI);
3822    return false;
3823  }
3824  case Match_MissingFeature: {
3825    assert(ErrorInfo && "Unknown missing feature!");
3826    // Special case the error message for the very common case where only
3827    // a single subtarget feature is missing (neon, e.g.).
3828    std::string Msg = "instruction requires:";
3829    unsigned Mask = 1;
3830    for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3831      if (ErrorInfo & Mask) {
3832        Msg += " ";
3833        Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3834      }
3835      Mask <<= 1;
3836    }
3837    return Error(IDLoc, Msg);
3838  }
3839  case Match_MnemonicFail:
3840    return showMatchError(IDLoc, MatchResult);
3841  case Match_InvalidOperand: {
3842    SMLoc ErrorLoc = IDLoc;
3843    if (ErrorInfo != ~0U) {
3844      if (ErrorInfo >= Operands.size())
3845        return Error(IDLoc, "too few operands for instruction");
3846
3847      ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3848      if (ErrorLoc == SMLoc())
3849        ErrorLoc = IDLoc;
3850    }
3851    // If the match failed on a suffix token operand, tweak the diagnostic
3852    // accordingly.
3853    if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3854        ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3855      MatchResult = Match_InvalidSuffix;
3856
3857    return showMatchError(ErrorLoc, MatchResult);
3858  }
3859  case Match_InvalidMemoryIndexed1:
3860  case Match_InvalidMemoryIndexed2:
3861  case Match_InvalidMemoryIndexed4:
3862  case Match_InvalidMemoryIndexed8:
3863  case Match_InvalidMemoryIndexed16:
3864  case Match_InvalidCondCode:
3865  case Match_AddSubRegExtendSmall:
3866  case Match_AddSubRegExtendLarge:
3867  case Match_AddSubSecondSource:
3868  case Match_LogicalSecondSource:
3869  case Match_AddSubRegShift32:
3870  case Match_AddSubRegShift64:
3871  case Match_InvalidMovImm32Shift:
3872  case Match_InvalidMovImm64Shift:
3873  case Match_InvalidFPImm:
3874  case Match_InvalidMemoryWExtend8:
3875  case Match_InvalidMemoryWExtend16:
3876  case Match_InvalidMemoryWExtend32:
3877  case Match_InvalidMemoryWExtend64:
3878  case Match_InvalidMemoryWExtend128:
3879  case Match_InvalidMemoryXExtend8:
3880  case Match_InvalidMemoryXExtend16:
3881  case Match_InvalidMemoryXExtend32:
3882  case Match_InvalidMemoryXExtend64:
3883  case Match_InvalidMemoryXExtend128:
3884  case Match_InvalidMemoryIndexed4SImm7:
3885  case Match_InvalidMemoryIndexed8SImm7:
3886  case Match_InvalidMemoryIndexed16SImm7:
3887  case Match_InvalidMemoryIndexedSImm9:
3888  case Match_InvalidImm0_7:
3889  case Match_InvalidImm0_15:
3890  case Match_InvalidImm0_31:
3891  case Match_InvalidImm0_63:
3892  case Match_InvalidImm0_127:
3893  case Match_InvalidImm0_65535:
3894  case Match_InvalidImm1_8:
3895  case Match_InvalidImm1_16:
3896  case Match_InvalidImm1_32:
3897  case Match_InvalidImm1_64:
3898  case Match_InvalidIndex1:
3899  case Match_InvalidIndexB:
3900  case Match_InvalidIndexH:
3901  case Match_InvalidIndexS:
3902  case Match_InvalidIndexD:
3903  case Match_InvalidLabel:
3904  case Match_MSR:
3905  case Match_MRS: {
3906    if (ErrorInfo >= Operands.size())
3907      return Error(IDLoc, "too few operands for instruction");
3908    // Any time we get here, there's nothing fancy to do. Just get the
3909    // operand SMLoc and display the diagnostic.
3910    SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3911    if (ErrorLoc == SMLoc())
3912      ErrorLoc = IDLoc;
3913    return showMatchError(ErrorLoc, MatchResult);
3914  }
3915  }
3916
3917  llvm_unreachable("Implement any new match types added!");
3918  return true;
3919}
3920
3921/// ParseDirective parses the arm specific directives
3922bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3923  StringRef IDVal = DirectiveID.getIdentifier();
3924  SMLoc Loc = DirectiveID.getLoc();
3925  if (IDVal == ".hword")
3926    return parseDirectiveWord(2, Loc);
3927  if (IDVal == ".word")
3928    return parseDirectiveWord(4, Loc);
3929  if (IDVal == ".xword")
3930    return parseDirectiveWord(8, Loc);
3931  if (IDVal == ".tlsdesccall")
3932    return parseDirectiveTLSDescCall(Loc);
3933  if (IDVal == ".ltorg" || IDVal == ".pool")
3934    return parseDirectiveLtorg(Loc);
3935  if (IDVal == ".unreq")
3936    return parseDirectiveUnreq(DirectiveID.getLoc());
3937
3938  return parseDirectiveLOH(IDVal, Loc);
3939}
3940
3941/// parseDirectiveWord
3942///  ::= .word [ expression (, expression)* ]
3943bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3944  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3945    for (;;) {
3946      const MCExpr *Value;
3947      if (getParser().parseExpression(Value))
3948        return true;
3949
3950      getParser().getStreamer().EmitValue(Value, Size);
3951
3952      if (getLexer().is(AsmToken::EndOfStatement))
3953        break;
3954
3955      // FIXME: Improve diagnostic.
3956      if (getLexer().isNot(AsmToken::Comma))
3957        return Error(L, "unexpected token in directive");
3958      Parser.Lex();
3959    }
3960  }
3961
3962  Parser.Lex();
3963  return false;
3964}
3965
3966// parseDirectiveTLSDescCall:
3967//   ::= .tlsdesccall symbol
3968bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
3969  StringRef Name;
3970  if (getParser().parseIdentifier(Name))
3971    return Error(L, "expected symbol after directive");
3972
3973  MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
3974  const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
3975  Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
3976
3977  MCInst Inst;
3978  Inst.setOpcode(AArch64::TLSDESCCALL);
3979  Inst.addOperand(MCOperand::CreateExpr(Expr));
3980
3981  getParser().getStreamer().EmitInstruction(Inst, STI);
3982  return false;
3983}
3984
3985/// ::= .loh <lohName | lohId> label1, ..., labelN
3986/// The number of arguments depends on the loh identifier.
3987bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
3988  if (IDVal != MCLOHDirectiveName())
3989    return true;
3990  MCLOHType Kind;
3991  if (getParser().getTok().isNot(AsmToken::Identifier)) {
3992    if (getParser().getTok().isNot(AsmToken::Integer))
3993      return TokError("expected an identifier or a number in directive");
3994    // We successfully get a numeric value for the identifier.
3995    // Check if it is valid.
3996    int64_t Id = getParser().getTok().getIntVal();
3997    Kind = (MCLOHType)Id;
3998    // Check that Id does not overflow MCLOHType.
3999    if (!isValidMCLOHType(Kind) || Id != Kind)
4000      return TokError("invalid numeric identifier in directive");
4001  } else {
4002    StringRef Name = getTok().getIdentifier();
4003    // We successfully parse an identifier.
4004    // Check if it is a recognized one.
4005    int Id = MCLOHNameToId(Name);
4006
4007    if (Id == -1)
4008      return TokError("invalid identifier in directive");
4009    Kind = (MCLOHType)Id;
4010  }
4011  // Consume the identifier.
4012  Lex();
4013  // Get the number of arguments of this LOH.
4014  int NbArgs = MCLOHIdToNbArgs(Kind);
4015
4016  assert(NbArgs != -1 && "Invalid number of arguments");
4017
4018  SmallVector<MCSymbol *, 3> Args;
4019  for (int Idx = 0; Idx < NbArgs; ++Idx) {
4020    StringRef Name;
4021    if (getParser().parseIdentifier(Name))
4022      return TokError("expected identifier in directive");
4023    Args.push_back(getContext().GetOrCreateSymbol(Name));
4024
4025    if (Idx + 1 == NbArgs)
4026      break;
4027    if (getLexer().isNot(AsmToken::Comma))
4028      return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4029    Lex();
4030  }
4031  if (getLexer().isNot(AsmToken::EndOfStatement))
4032    return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4033
4034  getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4035  return false;
4036}
4037
4038/// parseDirectiveLtorg
4039///  ::= .ltorg | .pool
4040bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4041  getTargetStreamer().emitCurrentConstantPool();
4042  return false;
4043}
4044
4045/// parseDirectiveReq
4046///  ::= name .req registername
4047bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4048  Parser.Lex(); // Eat the '.req' token.
4049  SMLoc SRegLoc = getLoc();
4050  unsigned RegNum = tryParseRegister();
4051  bool IsVector = false;
4052
4053  if (RegNum == static_cast<unsigned>(-1)) {
4054    StringRef Kind;
4055    RegNum = tryMatchVectorRegister(Kind, false);
4056    if (!Kind.empty()) {
4057      Error(SRegLoc, "vector register without type specifier expected");
4058      return false;
4059    }
4060    IsVector = true;
4061  }
4062
4063  if (RegNum == static_cast<unsigned>(-1)) {
4064    Parser.eatToEndOfStatement();
4065    Error(SRegLoc, "register name or alias expected");
4066    return false;
4067  }
4068
4069  // Shouldn't be anything else.
4070  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4071    Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4072    Parser.eatToEndOfStatement();
4073    return false;
4074  }
4075
4076  Parser.Lex(); // Consume the EndOfStatement
4077
4078  auto pair = std::make_pair(IsVector, RegNum);
4079  if (RegisterReqs.GetOrCreateValue(Name, pair).getValue() != pair)
4080    Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4081
4082  return true;
4083}
4084
4085/// parseDirectiveUneq
4086///  ::= .unreq registername
4087bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4088  if (Parser.getTok().isNot(AsmToken::Identifier)) {
4089    Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4090    Parser.eatToEndOfStatement();
4091    return false;
4092  }
4093  RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4094  Parser.Lex(); // Eat the identifier.
4095  return false;
4096}
4097
4098bool
4099AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4100                                    AArch64MCExpr::VariantKind &ELFRefKind,
4101                                    MCSymbolRefExpr::VariantKind &DarwinRefKind,
4102                                    int64_t &Addend) {
4103  ELFRefKind = AArch64MCExpr::VK_INVALID;
4104  DarwinRefKind = MCSymbolRefExpr::VK_None;
4105  Addend = 0;
4106
4107  if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4108    ELFRefKind = AE->getKind();
4109    Expr = AE->getSubExpr();
4110  }
4111
4112  const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4113  if (SE) {
4114    // It's a simple symbol reference with no addend.
4115    DarwinRefKind = SE->getKind();
4116    return true;
4117  }
4118
4119  const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4120  if (!BE)
4121    return false;
4122
4123  SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4124  if (!SE)
4125    return false;
4126  DarwinRefKind = SE->getKind();
4127
4128  if (BE->getOpcode() != MCBinaryExpr::Add &&
4129      BE->getOpcode() != MCBinaryExpr::Sub)
4130    return false;
4131
4132  // See if the addend is is a constant, otherwise there's more going
4133  // on here than we can deal with.
4134  auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4135  if (!AddendExpr)
4136    return false;
4137
4138  Addend = AddendExpr->getValue();
4139  if (BE->getOpcode() == MCBinaryExpr::Sub)
4140    Addend = -Addend;
4141
4142  // It's some symbol reference + a constant addend, but really
4143  // shouldn't use both Darwin and ELF syntax.
4144  return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4145         DarwinRefKind == MCSymbolRefExpr::VK_None;
4146}
4147
4148/// Force static initialization.
4149extern "C" void LLVMInitializeAArch64AsmParser() {
4150  RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4151  RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4152
4153  RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64leTarget);
4154  RegisterMCAsmParser<AArch64AsmParser> W(TheARM64beTarget);
4155}
4156
4157#define GET_REGISTER_MATCHER
4158#define GET_SUBTARGET_FEATURE_NAME
4159#define GET_MATCHER_IMPLEMENTATION
4160#include "AArch64GenAsmMatcher.inc"
4161
4162// Define this matcher function after the auto-generated include so we
4163// have the match class enum definitions.
4164unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4165                                                      unsigned Kind) {
4166  AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4167  // If the kind is a token for a literal immediate, check if our asm
4168  // operand matches. This is for InstAliases which have a fixed-value
4169  // immediate in the syntax.
4170  int64_t ExpectedVal;
4171  switch (Kind) {
4172  default:
4173    return Match_InvalidOperand;
4174  case MCK__35_0:
4175    ExpectedVal = 0;
4176    break;
4177  case MCK__35_1:
4178    ExpectedVal = 1;
4179    break;
4180  case MCK__35_12:
4181    ExpectedVal = 12;
4182    break;
4183  case MCK__35_16:
4184    ExpectedVal = 16;
4185    break;
4186  case MCK__35_2:
4187    ExpectedVal = 2;
4188    break;
4189  case MCK__35_24:
4190    ExpectedVal = 24;
4191    break;
4192  case MCK__35_3:
4193    ExpectedVal = 3;
4194    break;
4195  case MCK__35_32:
4196    ExpectedVal = 32;
4197    break;
4198  case MCK__35_4:
4199    ExpectedVal = 4;
4200    break;
4201  case MCK__35_48:
4202    ExpectedVal = 48;
4203    break;
4204  case MCK__35_6:
4205    ExpectedVal = 6;
4206    break;
4207  case MCK__35_64:
4208    ExpectedVal = 64;
4209    break;
4210  case MCK__35_8:
4211    ExpectedVal = 8;
4212    break;
4213  }
4214  if (!Op.isImm())
4215    return Match_InvalidOperand;
4216  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4217  if (!CE)
4218    return Match_InvalidOperand;
4219  if (CE->getValue() == ExpectedVal)
4220    return Match_Success;
4221  return Match_InvalidOperand;
4222}
4223