AArch64AsmParser.cpp revision dce4a407a24b04eebc6a376f8e62b41aaa7b071f
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/AArch64AddressingModes.h"
11#include "MCTargetDesc/AArch64MCExpr.h"
12#include "Utils/AArch64BaseInfo.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCContext.h"
17#include "llvm/MC/MCExpr.h"
18#include "llvm/MC/MCInst.h"
19#include "llvm/MC/MCRegisterInfo.h"
20#include "llvm/MC/MCStreamer.h"
21#include "llvm/MC/MCSubtargetInfo.h"
22#include "llvm/MC/MCSymbol.h"
23#include "llvm/MC/MCTargetAsmParser.h"
24#include "llvm/Support/SourceMgr.h"
25#include "llvm/Support/TargetRegistry.h"
26#include "llvm/Support/ErrorHandling.h"
27#include "llvm/Support/raw_ostream.h"
28#include "llvm/ADT/SmallString.h"
29#include "llvm/ADT/SmallVector.h"
30#include "llvm/ADT/STLExtras.h"
31#include "llvm/ADT/StringSwitch.h"
32#include "llvm/ADT/Twine.h"
33#include <cstdio>
34using namespace llvm;
35
36namespace {
37
38class AArch64Operand;
39
40class AArch64AsmParser : public MCTargetAsmParser {
41public:
42  typedef SmallVectorImpl<MCParsedAsmOperand *> OperandVector;
43
44private:
45  StringRef Mnemonic; ///< Instruction mnemonic.
46  MCSubtargetInfo &STI;
47  MCAsmParser &Parser;
48
49  MCAsmParser &getParser() const { return Parser; }
50  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
51
52  SMLoc getLoc() const { return Parser.getTok().getLoc(); }
53
54  bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
55  AArch64CC::CondCode parseCondCodeString(StringRef Cond);
56  bool parseCondCode(OperandVector &Operands, bool invertCondCode);
57  int tryParseRegister();
58  int tryMatchVectorRegister(StringRef &Kind, bool expected);
59  bool parseRegister(OperandVector &Operands);
60  bool parseSymbolicImmVal(const MCExpr *&ImmVal);
61  bool parseVectorList(OperandVector &Operands);
62  bool parseOperand(OperandVector &Operands, bool isCondCode,
63                    bool invertCondCode);
64
65  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
66  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
67  bool showMatchError(SMLoc Loc, unsigned ErrCode);
68
69  bool parseDirectiveWord(unsigned Size, SMLoc L);
70  bool parseDirectiveTLSDescCall(SMLoc L);
71
72  bool parseDirectiveLOH(StringRef LOH, SMLoc L);
73
74  bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
75  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
76                               OperandVector &Operands, MCStreamer &Out,
77                               unsigned &ErrorInfo,
78                               bool MatchingInlineAsm) override;
79/// @name Auto-generated Match Functions
80/// {
81
82#define GET_ASSEMBLER_HEADER
83#include "AArch64GenAsmMatcher.inc"
84
85  /// }
86
87  OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
88  OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
89  OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
90  OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
91  OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
92  OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
93  OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
94  OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
95  OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
96  OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
97  OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
98  bool tryParseVectorRegister(OperandVector &Operands);
99
100public:
101  enum AArch64MatchResultTy {
102    Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
103#define GET_OPERAND_DIAGNOSTIC_TYPES
104#include "AArch64GenAsmMatcher.inc"
105  };
106  AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
107                 const MCInstrInfo &MII,
108                 const MCTargetOptions &Options)
109      : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
110    MCAsmParserExtension::Initialize(_Parser);
111
112    // Initialize the set of available features.
113    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
114  }
115
116  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
117                        SMLoc NameLoc, OperandVector &Operands) override;
118  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
119  bool ParseDirective(AsmToken DirectiveID) override;
120  unsigned validateTargetOperandClass(MCParsedAsmOperand *Op,
121                                      unsigned Kind) override;
122
123  static bool classifySymbolRef(const MCExpr *Expr,
124                                AArch64MCExpr::VariantKind &ELFRefKind,
125                                MCSymbolRefExpr::VariantKind &DarwinRefKind,
126                                int64_t &Addend);
127};
128} // end anonymous namespace
129
130namespace {
131
132/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
133/// instruction.
134class AArch64Operand : public MCParsedAsmOperand {
135private:
136  enum KindTy {
137    k_Immediate,
138    k_ShiftedImm,
139    k_CondCode,
140    k_Register,
141    k_VectorList,
142    k_VectorIndex,
143    k_Token,
144    k_SysReg,
145    k_SysCR,
146    k_Prefetch,
147    k_ShiftExtend,
148    k_FPImm,
149    k_Barrier
150  } Kind;
151
152  SMLoc StartLoc, EndLoc;
153
154  struct TokOp {
155    const char *Data;
156    unsigned Length;
157    bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
158  };
159
160  struct RegOp {
161    unsigned RegNum;
162    bool isVector;
163  };
164
165  struct VectorListOp {
166    unsigned RegNum;
167    unsigned Count;
168    unsigned NumElements;
169    unsigned ElementKind;
170  };
171
172  struct VectorIndexOp {
173    unsigned Val;
174  };
175
176  struct ImmOp {
177    const MCExpr *Val;
178  };
179
180  struct ShiftedImmOp {
181    const MCExpr *Val;
182    unsigned ShiftAmount;
183  };
184
185  struct CondCodeOp {
186    AArch64CC::CondCode Code;
187  };
188
189  struct FPImmOp {
190    unsigned Val; // Encoded 8-bit representation.
191  };
192
193  struct BarrierOp {
194    unsigned Val; // Not the enum since not all values have names.
195  };
196
197  struct SysRegOp {
198    const char *Data;
199    unsigned Length;
200    uint64_t FeatureBits; // We need to pass through information about which
201                          // core we are compiling for so that the SysReg
202                          // Mappers can appropriately conditionalize.
203  };
204
205  struct SysCRImmOp {
206    unsigned Val;
207  };
208
209  struct PrefetchOp {
210    unsigned Val;
211  };
212
213  struct ShiftExtendOp {
214    AArch64_AM::ShiftExtendType Type;
215    unsigned Amount;
216    bool HasExplicitAmount;
217  };
218
219  struct ExtendOp {
220    unsigned Val;
221  };
222
223  union {
224    struct TokOp Tok;
225    struct RegOp Reg;
226    struct VectorListOp VectorList;
227    struct VectorIndexOp VectorIndex;
228    struct ImmOp Imm;
229    struct ShiftedImmOp ShiftedImm;
230    struct CondCodeOp CondCode;
231    struct FPImmOp FPImm;
232    struct BarrierOp Barrier;
233    struct SysRegOp SysReg;
234    struct SysCRImmOp SysCRImm;
235    struct PrefetchOp Prefetch;
236    struct ShiftExtendOp ShiftExtend;
237  };
238
239  // Keep the MCContext around as the MCExprs may need manipulated during
240  // the add<>Operands() calls.
241  MCContext &Ctx;
242
243  AArch64Operand(KindTy K, MCContext &_Ctx)
244      : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
245
246public:
247  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
248    Kind = o.Kind;
249    StartLoc = o.StartLoc;
250    EndLoc = o.EndLoc;
251    switch (Kind) {
252    case k_Token:
253      Tok = o.Tok;
254      break;
255    case k_Immediate:
256      Imm = o.Imm;
257      break;
258    case k_ShiftedImm:
259      ShiftedImm = o.ShiftedImm;
260      break;
261    case k_CondCode:
262      CondCode = o.CondCode;
263      break;
264    case k_FPImm:
265      FPImm = o.FPImm;
266      break;
267    case k_Barrier:
268      Barrier = o.Barrier;
269      break;
270    case k_Register:
271      Reg = o.Reg;
272      break;
273    case k_VectorList:
274      VectorList = o.VectorList;
275      break;
276    case k_VectorIndex:
277      VectorIndex = o.VectorIndex;
278      break;
279    case k_SysReg:
280      SysReg = o.SysReg;
281      break;
282    case k_SysCR:
283      SysCRImm = o.SysCRImm;
284      break;
285    case k_Prefetch:
286      Prefetch = o.Prefetch;
287      break;
288    case k_ShiftExtend:
289      ShiftExtend = o.ShiftExtend;
290      break;
291    }
292  }
293
294  /// getStartLoc - Get the location of the first token of this operand.
295  SMLoc getStartLoc() const override { return StartLoc; }
296  /// getEndLoc - Get the location of the last token of this operand.
297  SMLoc getEndLoc() const override { return EndLoc; }
298
299  StringRef getToken() const {
300    assert(Kind == k_Token && "Invalid access!");
301    return StringRef(Tok.Data, Tok.Length);
302  }
303
304  bool isTokenSuffix() const {
305    assert(Kind == k_Token && "Invalid access!");
306    return Tok.IsSuffix;
307  }
308
309  const MCExpr *getImm() const {
310    assert(Kind == k_Immediate && "Invalid access!");
311    return Imm.Val;
312  }
313
314  const MCExpr *getShiftedImmVal() const {
315    assert(Kind == k_ShiftedImm && "Invalid access!");
316    return ShiftedImm.Val;
317  }
318
319  unsigned getShiftedImmShift() const {
320    assert(Kind == k_ShiftedImm && "Invalid access!");
321    return ShiftedImm.ShiftAmount;
322  }
323
324  AArch64CC::CondCode getCondCode() const {
325    assert(Kind == k_CondCode && "Invalid access!");
326    return CondCode.Code;
327  }
328
329  unsigned getFPImm() const {
330    assert(Kind == k_FPImm && "Invalid access!");
331    return FPImm.Val;
332  }
333
334  unsigned getBarrier() const {
335    assert(Kind == k_Barrier && "Invalid access!");
336    return Barrier.Val;
337  }
338
339  unsigned getReg() const override {
340    assert(Kind == k_Register && "Invalid access!");
341    return Reg.RegNum;
342  }
343
344  unsigned getVectorListStart() const {
345    assert(Kind == k_VectorList && "Invalid access!");
346    return VectorList.RegNum;
347  }
348
349  unsigned getVectorListCount() const {
350    assert(Kind == k_VectorList && "Invalid access!");
351    return VectorList.Count;
352  }
353
354  unsigned getVectorIndex() const {
355    assert(Kind == k_VectorIndex && "Invalid access!");
356    return VectorIndex.Val;
357  }
358
359  StringRef getSysReg() const {
360    assert(Kind == k_SysReg && "Invalid access!");
361    return StringRef(SysReg.Data, SysReg.Length);
362  }
363
364  uint64_t getSysRegFeatureBits() const {
365    assert(Kind == k_SysReg && "Invalid access!");
366    return SysReg.FeatureBits;
367  }
368
369  unsigned getSysCR() const {
370    assert(Kind == k_SysCR && "Invalid access!");
371    return SysCRImm.Val;
372  }
373
374  unsigned getPrefetch() const {
375    assert(Kind == k_Prefetch && "Invalid access!");
376    return Prefetch.Val;
377  }
378
379  AArch64_AM::ShiftExtendType getShiftExtendType() const {
380    assert(Kind == k_ShiftExtend && "Invalid access!");
381    return ShiftExtend.Type;
382  }
383
384  unsigned getShiftExtendAmount() const {
385    assert(Kind == k_ShiftExtend && "Invalid access!");
386    return ShiftExtend.Amount;
387  }
388
389  bool hasShiftExtendAmount() const {
390    assert(Kind == k_ShiftExtend && "Invalid access!");
391    return ShiftExtend.HasExplicitAmount;
392  }
393
394  bool isImm() const override { return Kind == k_Immediate; }
395  bool isMem() const override { return false; }
396  bool isSImm9() const {
397    if (!isImm())
398      return false;
399    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
400    if (!MCE)
401      return false;
402    int64_t Val = MCE->getValue();
403    return (Val >= -256 && Val < 256);
404  }
405  bool isSImm7s4() const {
406    if (!isImm())
407      return false;
408    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
409    if (!MCE)
410      return false;
411    int64_t Val = MCE->getValue();
412    return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
413  }
414  bool isSImm7s8() const {
415    if (!isImm())
416      return false;
417    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
418    if (!MCE)
419      return false;
420    int64_t Val = MCE->getValue();
421    return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
422  }
423  bool isSImm7s16() const {
424    if (!isImm())
425      return false;
426    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
427    if (!MCE)
428      return false;
429    int64_t Val = MCE->getValue();
430    return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
431  }
432
433  bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
434    AArch64MCExpr::VariantKind ELFRefKind;
435    MCSymbolRefExpr::VariantKind DarwinRefKind;
436    int64_t Addend;
437    if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
438                                           Addend)) {
439      // If we don't understand the expression, assume the best and
440      // let the fixup and relocation code deal with it.
441      return true;
442    }
443
444    if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
445        ELFRefKind == AArch64MCExpr::VK_LO12 ||
446        ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
447        ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
448        ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
449        ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
450        ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
451        ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
452        ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
453      // Note that we don't range-check the addend. It's adjusted modulo page
454      // size when converted, so there is no "out of range" condition when using
455      // @pageoff.
456      return Addend >= 0 && (Addend % Scale) == 0;
457    } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
458               DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
459      // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
460      return Addend == 0;
461    }
462
463    return false;
464  }
465
466  template <int Scale> bool isUImm12Offset() const {
467    if (!isImm())
468      return false;
469
470    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
471    if (!MCE)
472      return isSymbolicUImm12Offset(getImm(), Scale);
473
474    int64_t Val = MCE->getValue();
475    return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
476  }
477
478  bool isImm0_7() const {
479    if (!isImm())
480      return false;
481    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
482    if (!MCE)
483      return false;
484    int64_t Val = MCE->getValue();
485    return (Val >= 0 && Val < 8);
486  }
487  bool isImm1_8() const {
488    if (!isImm())
489      return false;
490    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
491    if (!MCE)
492      return false;
493    int64_t Val = MCE->getValue();
494    return (Val > 0 && Val < 9);
495  }
496  bool isImm0_15() const {
497    if (!isImm())
498      return false;
499    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
500    if (!MCE)
501      return false;
502    int64_t Val = MCE->getValue();
503    return (Val >= 0 && Val < 16);
504  }
505  bool isImm1_16() const {
506    if (!isImm())
507      return false;
508    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
509    if (!MCE)
510      return false;
511    int64_t Val = MCE->getValue();
512    return (Val > 0 && Val < 17);
513  }
514  bool isImm0_31() const {
515    if (!isImm())
516      return false;
517    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
518    if (!MCE)
519      return false;
520    int64_t Val = MCE->getValue();
521    return (Val >= 0 && Val < 32);
522  }
523  bool isImm1_31() const {
524    if (!isImm())
525      return false;
526    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
527    if (!MCE)
528      return false;
529    int64_t Val = MCE->getValue();
530    return (Val >= 1 && Val < 32);
531  }
532  bool isImm1_32() const {
533    if (!isImm())
534      return false;
535    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
536    if (!MCE)
537      return false;
538    int64_t Val = MCE->getValue();
539    return (Val >= 1 && Val < 33);
540  }
541  bool isImm0_63() const {
542    if (!isImm())
543      return false;
544    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
545    if (!MCE)
546      return false;
547    int64_t Val = MCE->getValue();
548    return (Val >= 0 && Val < 64);
549  }
550  bool isImm1_63() const {
551    if (!isImm())
552      return false;
553    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
554    if (!MCE)
555      return false;
556    int64_t Val = MCE->getValue();
557    return (Val >= 1 && Val < 64);
558  }
559  bool isImm1_64() const {
560    if (!isImm())
561      return false;
562    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
563    if (!MCE)
564      return false;
565    int64_t Val = MCE->getValue();
566    return (Val >= 1 && Val < 65);
567  }
568  bool isImm0_127() const {
569    if (!isImm())
570      return false;
571    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
572    if (!MCE)
573      return false;
574    int64_t Val = MCE->getValue();
575    return (Val >= 0 && Val < 128);
576  }
577  bool isImm0_255() const {
578    if (!isImm())
579      return false;
580    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
581    if (!MCE)
582      return false;
583    int64_t Val = MCE->getValue();
584    return (Val >= 0 && Val < 256);
585  }
586  bool isImm0_65535() const {
587    if (!isImm())
588      return false;
589    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
590    if (!MCE)
591      return false;
592    int64_t Val = MCE->getValue();
593    return (Val >= 0 && Val < 65536);
594  }
595  bool isImm32_63() const {
596    if (!isImm())
597      return false;
598    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
599    if (!MCE)
600      return false;
601    int64_t Val = MCE->getValue();
602    return (Val >= 32 && Val < 64);
603  }
604  bool isLogicalImm32() const {
605    if (!isImm())
606      return false;
607    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
608    if (!MCE)
609      return false;
610    return AArch64_AM::isLogicalImmediate(MCE->getValue(), 32);
611  }
612  bool isLogicalImm64() const {
613    if (!isImm())
614      return false;
615    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
616    if (!MCE)
617      return false;
618    return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
619  }
620  bool isShiftedImm() const { return Kind == k_ShiftedImm; }
621  bool isAddSubImm() const {
622    if (!isShiftedImm() && !isImm())
623      return false;
624
625    const MCExpr *Expr;
626
627    // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
628    if (isShiftedImm()) {
629      unsigned Shift = ShiftedImm.ShiftAmount;
630      Expr = ShiftedImm.Val;
631      if (Shift != 0 && Shift != 12)
632        return false;
633    } else {
634      Expr = getImm();
635    }
636
637    AArch64MCExpr::VariantKind ELFRefKind;
638    MCSymbolRefExpr::VariantKind DarwinRefKind;
639    int64_t Addend;
640    if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
641                                          DarwinRefKind, Addend)) {
642      return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
643          || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
644          || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
645          || ELFRefKind == AArch64MCExpr::VK_LO12
646          || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
647          || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
648          || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
649          || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
650          || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
651          || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
652          || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
653    }
654
655    // Otherwise it should be a real immediate in range:
656    const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
657    return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
658  }
659  bool isCondCode() const { return Kind == k_CondCode; }
660  bool isSIMDImmType10() const {
661    if (!isImm())
662      return false;
663    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
664    if (!MCE)
665      return false;
666    return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
667  }
668  bool isBranchTarget26() const {
669    if (!isImm())
670      return false;
671    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
672    if (!MCE)
673      return true;
674    int64_t Val = MCE->getValue();
675    if (Val & 0x3)
676      return false;
677    return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
678  }
679  bool isPCRelLabel19() const {
680    if (!isImm())
681      return false;
682    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
683    if (!MCE)
684      return true;
685    int64_t Val = MCE->getValue();
686    if (Val & 0x3)
687      return false;
688    return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
689  }
690  bool isBranchTarget14() const {
691    if (!isImm())
692      return false;
693    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
694    if (!MCE)
695      return true;
696    int64_t Val = MCE->getValue();
697    if (Val & 0x3)
698      return false;
699    return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
700  }
701
702  bool
703  isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
704    if (!isImm())
705      return false;
706
707    AArch64MCExpr::VariantKind ELFRefKind;
708    MCSymbolRefExpr::VariantKind DarwinRefKind;
709    int64_t Addend;
710    if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
711                                             DarwinRefKind, Addend)) {
712      return false;
713    }
714    if (DarwinRefKind != MCSymbolRefExpr::VK_None)
715      return false;
716
717    for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
718      if (ELFRefKind == AllowedModifiers[i])
719        return Addend == 0;
720    }
721
722    return false;
723  }
724
725  bool isMovZSymbolG3() const {
726    static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
727    return isMovWSymbol(Variants);
728  }
729
730  bool isMovZSymbolG2() const {
731    static AArch64MCExpr::VariantKind Variants[] = {
732        AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
733        AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
734    return isMovWSymbol(Variants);
735  }
736
737  bool isMovZSymbolG1() const {
738    static AArch64MCExpr::VariantKind Variants[] = {
739        AArch64MCExpr::VK_ABS_G1,      AArch64MCExpr::VK_ABS_G1_S,
740        AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
741        AArch64MCExpr::VK_DTPREL_G1,
742    };
743    return isMovWSymbol(Variants);
744  }
745
746  bool isMovZSymbolG0() const {
747    static AArch64MCExpr::VariantKind Variants[] = {
748        AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
749        AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
750    return isMovWSymbol(Variants);
751  }
752
753  bool isMovKSymbolG3() const {
754    static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
755    return isMovWSymbol(Variants);
756  }
757
758  bool isMovKSymbolG2() const {
759    static AArch64MCExpr::VariantKind Variants[] = {
760        AArch64MCExpr::VK_ABS_G2_NC};
761    return isMovWSymbol(Variants);
762  }
763
764  bool isMovKSymbolG1() const {
765    static AArch64MCExpr::VariantKind Variants[] = {
766      AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
767      AArch64MCExpr::VK_DTPREL_G1_NC
768    };
769    return isMovWSymbol(Variants);
770  }
771
772  bool isMovKSymbolG0() const {
773    static AArch64MCExpr::VariantKind Variants[] = {
774      AArch64MCExpr::VK_ABS_G0_NC,   AArch64MCExpr::VK_GOTTPREL_G0_NC,
775      AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
776    };
777    return isMovWSymbol(Variants);
778  }
779
780  template<int RegWidth, int Shift>
781  bool isMOVZMovAlias() const {
782    if (!isImm()) return false;
783
784    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
785    if (!CE) return false;
786    uint64_t Value = CE->getValue();
787
788    if (RegWidth == 32)
789      Value &= 0xffffffffULL;
790
791    // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
792    if (Value == 0 && Shift != 0)
793      return false;
794
795    return (Value & ~(0xffffULL << Shift)) == 0;
796  }
797
798  template<int RegWidth, int Shift>
799  bool isMOVNMovAlias() const {
800    if (!isImm()) return false;
801
802    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
803    if (!CE) return false;
804    uint64_t Value = CE->getValue();
805
806    // MOVZ takes precedence over MOVN.
807    for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
808      if ((Value & ~(0xffffULL << MOVZShift)) == 0)
809        return false;
810
811    Value = ~Value;
812    if (RegWidth == 32)
813      Value &= 0xffffffffULL;
814
815    return (Value & ~(0xffffULL << Shift)) == 0;
816  }
817
818  bool isFPImm() const { return Kind == k_FPImm; }
819  bool isBarrier() const { return Kind == k_Barrier; }
820  bool isSysReg() const { return Kind == k_SysReg; }
821  bool isMRSSystemRegister() const {
822    if (!isSysReg()) return false;
823
824    bool IsKnownRegister;
825    auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
826    Mapper.fromString(getSysReg(), IsKnownRegister);
827
828    return IsKnownRegister;
829  }
830  bool isMSRSystemRegister() const {
831    if (!isSysReg()) return false;
832
833    bool IsKnownRegister;
834    auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
835    Mapper.fromString(getSysReg(), IsKnownRegister);
836
837    return IsKnownRegister;
838  }
839  bool isSystemPStateField() const {
840    if (!isSysReg()) return false;
841
842    bool IsKnownRegister;
843    AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
844
845    return IsKnownRegister;
846  }
847  bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
848  bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
849  bool isVectorRegLo() const {
850    return Kind == k_Register && Reg.isVector &&
851           AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
852               Reg.RegNum);
853  }
854  bool isGPR32as64() const {
855    return Kind == k_Register && !Reg.isVector &&
856      AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
857  }
858
859  bool isGPR64sp0() const {
860    return Kind == k_Register && !Reg.isVector &&
861      AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
862  }
863
864  /// Is this a vector list with the type implicit (presumably attached to the
865  /// instruction itself)?
866  template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
867    return Kind == k_VectorList && VectorList.Count == NumRegs &&
868           !VectorList.ElementKind;
869  }
870
871  template <unsigned NumRegs, unsigned NumElements, char ElementKind>
872  bool isTypedVectorList() const {
873    if (Kind != k_VectorList)
874      return false;
875    if (VectorList.Count != NumRegs)
876      return false;
877    if (VectorList.ElementKind != ElementKind)
878      return false;
879    return VectorList.NumElements == NumElements;
880  }
881
882  bool isVectorIndex1() const {
883    return Kind == k_VectorIndex && VectorIndex.Val == 1;
884  }
885  bool isVectorIndexB() const {
886    return Kind == k_VectorIndex && VectorIndex.Val < 16;
887  }
888  bool isVectorIndexH() const {
889    return Kind == k_VectorIndex && VectorIndex.Val < 8;
890  }
891  bool isVectorIndexS() const {
892    return Kind == k_VectorIndex && VectorIndex.Val < 4;
893  }
894  bool isVectorIndexD() const {
895    return Kind == k_VectorIndex && VectorIndex.Val < 2;
896  }
897  bool isToken() const override { return Kind == k_Token; }
898  bool isTokenEqual(StringRef Str) const {
899    return Kind == k_Token && getToken() == Str;
900  }
901  bool isSysCR() const { return Kind == k_SysCR; }
902  bool isPrefetch() const { return Kind == k_Prefetch; }
903  bool isShiftExtend() const { return Kind == k_ShiftExtend; }
904  bool isShifter() const {
905    if (!isShiftExtend())
906      return false;
907
908    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
909    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
910            ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
911            ST == AArch64_AM::MSL);
912  }
913  bool isExtend() const {
914    if (!isShiftExtend())
915      return false;
916
917    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
918    return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
919            ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
920            ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
921            ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
922            ET == AArch64_AM::LSL) &&
923           getShiftExtendAmount() <= 4;
924  }
925
926  bool isExtend64() const {
927    if (!isExtend())
928      return false;
929    // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
930    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
931    return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
932  }
933  bool isExtendLSL64() const {
934    if (!isExtend())
935      return false;
936    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
937    return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
938            ET == AArch64_AM::LSL) &&
939           getShiftExtendAmount() <= 4;
940  }
941
942  template<int Width> bool isMemXExtend() const {
943    if (!isExtend())
944      return false;
945    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
946    return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
947           (getShiftExtendAmount() == Log2_32(Width / 8) ||
948            getShiftExtendAmount() == 0);
949  }
950
951  template<int Width> bool isMemWExtend() const {
952    if (!isExtend())
953      return false;
954    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
955    return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
956           (getShiftExtendAmount() == Log2_32(Width / 8) ||
957            getShiftExtendAmount() == 0);
958  }
959
960  template <unsigned width>
961  bool isArithmeticShifter() const {
962    if (!isShifter())
963      return false;
964
965    // An arithmetic shifter is LSL, LSR, or ASR.
966    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
967    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
968            ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
969  }
970
971  template <unsigned width>
972  bool isLogicalShifter() const {
973    if (!isShifter())
974      return false;
975
976    // A logical shifter is LSL, LSR, ASR or ROR.
977    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
978    return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
979            ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
980           getShiftExtendAmount() < width;
981  }
982
983  bool isMovImm32Shifter() const {
984    if (!isShifter())
985      return false;
986
987    // A MOVi shifter is LSL of 0, 16, 32, or 48.
988    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
989    if (ST != AArch64_AM::LSL)
990      return false;
991    uint64_t Val = getShiftExtendAmount();
992    return (Val == 0 || Val == 16);
993  }
994
995  bool isMovImm64Shifter() const {
996    if (!isShifter())
997      return false;
998
999    // A MOVi shifter is LSL of 0 or 16.
1000    AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1001    if (ST != AArch64_AM::LSL)
1002      return false;
1003    uint64_t Val = getShiftExtendAmount();
1004    return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1005  }
1006
1007  bool isLogicalVecShifter() const {
1008    if (!isShifter())
1009      return false;
1010
1011    // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1012    unsigned Shift = getShiftExtendAmount();
1013    return getShiftExtendType() == AArch64_AM::LSL &&
1014           (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1015  }
1016
1017  bool isLogicalVecHalfWordShifter() const {
1018    if (!isLogicalVecShifter())
1019      return false;
1020
1021    // A logical vector shifter is a left shift by 0 or 8.
1022    unsigned Shift = getShiftExtendAmount();
1023    return getShiftExtendType() == AArch64_AM::LSL &&
1024           (Shift == 0 || Shift == 8);
1025  }
1026
1027  bool isMoveVecShifter() const {
1028    if (!isShiftExtend())
1029      return false;
1030
1031    // A logical vector shifter is a left shift by 8 or 16.
1032    unsigned Shift = getShiftExtendAmount();
1033    return getShiftExtendType() == AArch64_AM::MSL &&
1034           (Shift == 8 || Shift == 16);
1035  }
1036
1037  // Fallback unscaled operands are for aliases of LDR/STR that fall back
1038  // to LDUR/STUR when the offset is not legal for the former but is for
1039  // the latter. As such, in addition to checking for being a legal unscaled
1040  // address, also check that it is not a legal scaled address. This avoids
1041  // ambiguity in the matcher.
1042  template<int Width>
1043  bool isSImm9OffsetFB() const {
1044    return isSImm9() && !isUImm12Offset<Width / 8>();
1045  }
1046
1047  bool isAdrpLabel() const {
1048    // Validation was handled during parsing, so we just sanity check that
1049    // something didn't go haywire.
1050    if (!isImm())
1051        return false;
1052
1053    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1054      int64_t Val = CE->getValue();
1055      int64_t Min = - (4096 * (1LL << (21 - 1)));
1056      int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1057      return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1058    }
1059
1060    return true;
1061  }
1062
1063  bool isAdrLabel() const {
1064    // Validation was handled during parsing, so we just sanity check that
1065    // something didn't go haywire.
1066    if (!isImm())
1067        return false;
1068
1069    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1070      int64_t Val = CE->getValue();
1071      int64_t Min = - (1LL << (21 - 1));
1072      int64_t Max = ((1LL << (21 - 1)) - 1);
1073      return Val >= Min && Val <= Max;
1074    }
1075
1076    return true;
1077  }
1078
1079  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1080    // Add as immediates when possible.  Null MCExpr = 0.
1081    if (!Expr)
1082      Inst.addOperand(MCOperand::CreateImm(0));
1083    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1084      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1085    else
1086      Inst.addOperand(MCOperand::CreateExpr(Expr));
1087  }
1088
1089  void addRegOperands(MCInst &Inst, unsigned N) const {
1090    assert(N == 1 && "Invalid number of operands!");
1091    Inst.addOperand(MCOperand::CreateReg(getReg()));
1092  }
1093
1094  void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1095    assert(N == 1 && "Invalid number of operands!");
1096    assert(
1097        AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1098
1099    const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1100    uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1101        RI->getEncodingValue(getReg()));
1102
1103    Inst.addOperand(MCOperand::CreateReg(Reg));
1104  }
1105
1106  void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1107    assert(N == 1 && "Invalid number of operands!");
1108    assert(
1109        AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1110    Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1111  }
1112
1113  void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1114    assert(N == 1 && "Invalid number of operands!");
1115    assert(
1116        AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1117    Inst.addOperand(MCOperand::CreateReg(getReg()));
1118  }
1119
1120  void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1121    assert(N == 1 && "Invalid number of operands!");
1122    Inst.addOperand(MCOperand::CreateReg(getReg()));
1123  }
1124
1125  template <unsigned NumRegs>
1126  void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1127    assert(N == 1 && "Invalid number of operands!");
1128    static unsigned FirstRegs[] = { AArch64::D0,       AArch64::D0_D1,
1129                                    AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1130    unsigned FirstReg = FirstRegs[NumRegs - 1];
1131
1132    Inst.addOperand(
1133        MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1134  }
1135
1136  template <unsigned NumRegs>
1137  void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1138    assert(N == 1 && "Invalid number of operands!");
1139    static unsigned FirstRegs[] = { AArch64::Q0,       AArch64::Q0_Q1,
1140                                    AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1141    unsigned FirstReg = FirstRegs[NumRegs - 1];
1142
1143    Inst.addOperand(
1144        MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1145  }
1146
1147  void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1148    assert(N == 1 && "Invalid number of operands!");
1149    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1150  }
1151
1152  void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1153    assert(N == 1 && "Invalid number of operands!");
1154    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1155  }
1156
1157  void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1158    assert(N == 1 && "Invalid number of operands!");
1159    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1160  }
1161
1162  void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1163    assert(N == 1 && "Invalid number of operands!");
1164    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1165  }
1166
1167  void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1168    assert(N == 1 && "Invalid number of operands!");
1169    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1170  }
1171
1172  void addImmOperands(MCInst &Inst, unsigned N) const {
1173    assert(N == 1 && "Invalid number of operands!");
1174    // If this is a pageoff symrefexpr with an addend, adjust the addend
1175    // to be only the page-offset portion. Otherwise, just add the expr
1176    // as-is.
1177    addExpr(Inst, getImm());
1178  }
1179
1180  void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1181    assert(N == 2 && "Invalid number of operands!");
1182    if (isShiftedImm()) {
1183      addExpr(Inst, getShiftedImmVal());
1184      Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1185    } else {
1186      addExpr(Inst, getImm());
1187      Inst.addOperand(MCOperand::CreateImm(0));
1188    }
1189  }
1190
1191  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1192    assert(N == 1 && "Invalid number of operands!");
1193    Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1194  }
1195
1196  void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1197    assert(N == 1 && "Invalid number of operands!");
1198    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1199    if (!MCE)
1200      addExpr(Inst, getImm());
1201    else
1202      Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1203  }
1204
1205  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1206    addImmOperands(Inst, N);
1207  }
1208
1209  template<int Scale>
1210  void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1211    assert(N == 1 && "Invalid number of operands!");
1212    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1213
1214    if (!MCE) {
1215      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1216      return;
1217    }
1218    Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1219  }
1220
1221  void addSImm9Operands(MCInst &Inst, unsigned N) const {
1222    assert(N == 1 && "Invalid number of operands!");
1223    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1224    assert(MCE && "Invalid constant immediate operand!");
1225    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1226  }
1227
1228  void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1229    assert(N == 1 && "Invalid number of operands!");
1230    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1231    assert(MCE && "Invalid constant immediate operand!");
1232    Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1233  }
1234
1235  void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1236    assert(N == 1 && "Invalid number of operands!");
1237    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1238    assert(MCE && "Invalid constant immediate operand!");
1239    Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1240  }
1241
1242  void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1243    assert(N == 1 && "Invalid number of operands!");
1244    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1245    assert(MCE && "Invalid constant immediate operand!");
1246    Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1247  }
1248
1249  void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1250    assert(N == 1 && "Invalid number of operands!");
1251    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1252    assert(MCE && "Invalid constant immediate operand!");
1253    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1254  }
1255
1256  void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1257    assert(N == 1 && "Invalid number of operands!");
1258    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1259    assert(MCE && "Invalid constant immediate operand!");
1260    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1261  }
1262
1263  void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1264    assert(N == 1 && "Invalid number of operands!");
1265    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1266    assert(MCE && "Invalid constant immediate operand!");
1267    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1268  }
1269
1270  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1271    assert(N == 1 && "Invalid number of operands!");
1272    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1273    assert(MCE && "Invalid constant immediate operand!");
1274    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1275  }
1276
1277  void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1278    assert(N == 1 && "Invalid number of operands!");
1279    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1280    assert(MCE && "Invalid constant immediate operand!");
1281    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1282  }
1283
1284  void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1285    assert(N == 1 && "Invalid number of operands!");
1286    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1287    assert(MCE && "Invalid constant immediate operand!");
1288    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1289  }
1290
1291  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1292    assert(N == 1 && "Invalid number of operands!");
1293    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1294    assert(MCE && "Invalid constant immediate operand!");
1295    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1296  }
1297
1298  void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1299    assert(N == 1 && "Invalid number of operands!");
1300    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1301    assert(MCE && "Invalid constant immediate operand!");
1302    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1303  }
1304
1305  void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1306    assert(N == 1 && "Invalid number of operands!");
1307    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1308    assert(MCE && "Invalid constant immediate operand!");
1309    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1310  }
1311
1312  void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1313    assert(N == 1 && "Invalid number of operands!");
1314    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1315    assert(MCE && "Invalid constant immediate operand!");
1316    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1317  }
1318
1319  void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1320    assert(N == 1 && "Invalid number of operands!");
1321    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1322    assert(MCE && "Invalid constant immediate operand!");
1323    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1324  }
1325
1326  void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1327    assert(N == 1 && "Invalid number of operands!");
1328    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1329    assert(MCE && "Invalid constant immediate operand!");
1330    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1331  }
1332
1333  void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1334    assert(N == 1 && "Invalid number of operands!");
1335    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1336    assert(MCE && "Invalid constant immediate operand!");
1337    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1338  }
1339
1340  void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1341    assert(N == 1 && "Invalid number of operands!");
1342    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1343    assert(MCE && "Invalid constant immediate operand!");
1344    Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1345  }
1346
1347  void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1348    assert(N == 1 && "Invalid number of operands!");
1349    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1350    assert(MCE && "Invalid logical immediate operand!");
1351    uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
1352    Inst.addOperand(MCOperand::CreateImm(encoding));
1353  }
1354
1355  void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1356    assert(N == 1 && "Invalid number of operands!");
1357    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1358    assert(MCE && "Invalid logical immediate operand!");
1359    uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1360    Inst.addOperand(MCOperand::CreateImm(encoding));
1361  }
1362
1363  void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1364    assert(N == 1 && "Invalid number of operands!");
1365    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1366    assert(MCE && "Invalid immediate operand!");
1367    uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1368    Inst.addOperand(MCOperand::CreateImm(encoding));
1369  }
1370
1371  void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1372    // Branch operands don't encode the low bits, so shift them off
1373    // here. If it's a label, however, just put it on directly as there's
1374    // not enough information now to do anything.
1375    assert(N == 1 && "Invalid number of operands!");
1376    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1377    if (!MCE) {
1378      addExpr(Inst, getImm());
1379      return;
1380    }
1381    assert(MCE && "Invalid constant immediate operand!");
1382    Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1383  }
1384
1385  void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1386    // Branch operands don't encode the low bits, so shift them off
1387    // here. If it's a label, however, just put it on directly as there's
1388    // not enough information now to do anything.
1389    assert(N == 1 && "Invalid number of operands!");
1390    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1391    if (!MCE) {
1392      addExpr(Inst, getImm());
1393      return;
1394    }
1395    assert(MCE && "Invalid constant immediate operand!");
1396    Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1397  }
1398
1399  void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1400    // Branch operands don't encode the low bits, so shift them off
1401    // here. If it's a label, however, just put it on directly as there's
1402    // not enough information now to do anything.
1403    assert(N == 1 && "Invalid number of operands!");
1404    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1405    if (!MCE) {
1406      addExpr(Inst, getImm());
1407      return;
1408    }
1409    assert(MCE && "Invalid constant immediate operand!");
1410    Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1411  }
1412
1413  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1414    assert(N == 1 && "Invalid number of operands!");
1415    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1416  }
1417
1418  void addBarrierOperands(MCInst &Inst, unsigned N) const {
1419    assert(N == 1 && "Invalid number of operands!");
1420    Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1421  }
1422
1423  void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1424    assert(N == 1 && "Invalid number of operands!");
1425
1426    bool Valid;
1427    auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
1428    uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1429
1430    Inst.addOperand(MCOperand::CreateImm(Bits));
1431  }
1432
1433  void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1434    assert(N == 1 && "Invalid number of operands!");
1435
1436    bool Valid;
1437    auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
1438    uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1439
1440    Inst.addOperand(MCOperand::CreateImm(Bits));
1441  }
1442
1443  void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1444    assert(N == 1 && "Invalid number of operands!");
1445
1446    bool Valid;
1447    uint32_t Bits =
1448        AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
1449
1450    Inst.addOperand(MCOperand::CreateImm(Bits));
1451  }
1452
1453  void addSysCROperands(MCInst &Inst, unsigned N) const {
1454    assert(N == 1 && "Invalid number of operands!");
1455    Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1456  }
1457
1458  void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1459    assert(N == 1 && "Invalid number of operands!");
1460    Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1461  }
1462
1463  void addShifterOperands(MCInst &Inst, unsigned N) const {
1464    assert(N == 1 && "Invalid number of operands!");
1465    unsigned Imm =
1466        AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1467    Inst.addOperand(MCOperand::CreateImm(Imm));
1468  }
1469
1470  void addExtendOperands(MCInst &Inst, unsigned N) const {
1471    assert(N == 1 && "Invalid number of operands!");
1472    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1473    if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1474    unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1475    Inst.addOperand(MCOperand::CreateImm(Imm));
1476  }
1477
1478  void addExtend64Operands(MCInst &Inst, unsigned N) const {
1479    assert(N == 1 && "Invalid number of operands!");
1480    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1481    if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1482    unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1483    Inst.addOperand(MCOperand::CreateImm(Imm));
1484  }
1485
1486  void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1487    assert(N == 2 && "Invalid number of operands!");
1488    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1489    bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1490    Inst.addOperand(MCOperand::CreateImm(IsSigned));
1491    Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1492  }
1493
1494  // For 8-bit load/store instructions with a register offset, both the
1495  // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1496  // they're disambiguated by whether the shift was explicit or implicit rather
1497  // than its size.
1498  void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1499    assert(N == 2 && "Invalid number of operands!");
1500    AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1501    bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1502    Inst.addOperand(MCOperand::CreateImm(IsSigned));
1503    Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1504  }
1505
1506  template<int Shift>
1507  void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1508    assert(N == 1 && "Invalid number of operands!");
1509
1510    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1511    uint64_t Value = CE->getValue();
1512    Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1513  }
1514
1515  template<int Shift>
1516  void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1517    assert(N == 1 && "Invalid number of operands!");
1518
1519    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1520    uint64_t Value = CE->getValue();
1521    Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1522  }
1523
1524  void print(raw_ostream &OS) const override;
1525
1526  static AArch64Operand *CreateToken(StringRef Str, bool IsSuffix, SMLoc S,
1527                                   MCContext &Ctx) {
1528    AArch64Operand *Op = new AArch64Operand(k_Token, Ctx);
1529    Op->Tok.Data = Str.data();
1530    Op->Tok.Length = Str.size();
1531    Op->Tok.IsSuffix = IsSuffix;
1532    Op->StartLoc = S;
1533    Op->EndLoc = S;
1534    return Op;
1535  }
1536
1537  static AArch64Operand *CreateReg(unsigned RegNum, bool isVector, SMLoc S,
1538                                 SMLoc E, MCContext &Ctx) {
1539    AArch64Operand *Op = new AArch64Operand(k_Register, Ctx);
1540    Op->Reg.RegNum = RegNum;
1541    Op->Reg.isVector = isVector;
1542    Op->StartLoc = S;
1543    Op->EndLoc = E;
1544    return Op;
1545  }
1546
1547  static AArch64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
1548                                        unsigned NumElements, char ElementKind,
1549                                        SMLoc S, SMLoc E, MCContext &Ctx) {
1550    AArch64Operand *Op = new AArch64Operand(k_VectorList, Ctx);
1551    Op->VectorList.RegNum = RegNum;
1552    Op->VectorList.Count = Count;
1553    Op->VectorList.NumElements = NumElements;
1554    Op->VectorList.ElementKind = ElementKind;
1555    Op->StartLoc = S;
1556    Op->EndLoc = E;
1557    return Op;
1558  }
1559
1560  static AArch64Operand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1561                                         MCContext &Ctx) {
1562    AArch64Operand *Op = new AArch64Operand(k_VectorIndex, Ctx);
1563    Op->VectorIndex.Val = Idx;
1564    Op->StartLoc = S;
1565    Op->EndLoc = E;
1566    return Op;
1567  }
1568
1569  static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E,
1570                                 MCContext &Ctx) {
1571    AArch64Operand *Op = new AArch64Operand(k_Immediate, Ctx);
1572    Op->Imm.Val = Val;
1573    Op->StartLoc = S;
1574    Op->EndLoc = E;
1575    return Op;
1576  }
1577
1578  static AArch64Operand *CreateShiftedImm(const MCExpr *Val,
1579                                          unsigned ShiftAmount, SMLoc S,
1580                                          SMLoc E, MCContext &Ctx) {
1581    AArch64Operand *Op = new AArch64Operand(k_ShiftedImm, Ctx);
1582    Op->ShiftedImm .Val = Val;
1583    Op->ShiftedImm.ShiftAmount = ShiftAmount;
1584    Op->StartLoc = S;
1585    Op->EndLoc = E;
1586    return Op;
1587  }
1588
1589  static AArch64Operand *CreateCondCode(AArch64CC::CondCode Code, SMLoc S,
1590                                        SMLoc E, MCContext &Ctx) {
1591    AArch64Operand *Op = new AArch64Operand(k_CondCode, Ctx);
1592    Op->CondCode.Code = Code;
1593    Op->StartLoc = S;
1594    Op->EndLoc = E;
1595    return Op;
1596  }
1597
1598  static AArch64Operand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
1599    AArch64Operand *Op = new AArch64Operand(k_FPImm, Ctx);
1600    Op->FPImm.Val = Val;
1601    Op->StartLoc = S;
1602    Op->EndLoc = S;
1603    return Op;
1604  }
1605
1606  static AArch64Operand *CreateBarrier(unsigned Val, SMLoc S, MCContext &Ctx) {
1607    AArch64Operand *Op = new AArch64Operand(k_Barrier, Ctx);
1608    Op->Barrier.Val = Val;
1609    Op->StartLoc = S;
1610    Op->EndLoc = S;
1611    return Op;
1612  }
1613
1614  static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S,
1615                                    uint64_t FeatureBits, MCContext &Ctx) {
1616    AArch64Operand *Op = new AArch64Operand(k_SysReg, Ctx);
1617    Op->SysReg.Data = Str.data();
1618    Op->SysReg.Length = Str.size();
1619    Op->SysReg.FeatureBits = FeatureBits;
1620    Op->StartLoc = S;
1621    Op->EndLoc = S;
1622    return Op;
1623  }
1624
1625  static AArch64Operand *CreateSysCR(unsigned Val, SMLoc S, SMLoc E,
1626                                   MCContext &Ctx) {
1627    AArch64Operand *Op = new AArch64Operand(k_SysCR, Ctx);
1628    Op->SysCRImm.Val = Val;
1629    Op->StartLoc = S;
1630    Op->EndLoc = E;
1631    return Op;
1632  }
1633
1634  static AArch64Operand *CreatePrefetch(unsigned Val, SMLoc S, MCContext &Ctx) {
1635    AArch64Operand *Op = new AArch64Operand(k_Prefetch, Ctx);
1636    Op->Prefetch.Val = Val;
1637    Op->StartLoc = S;
1638    Op->EndLoc = S;
1639    return Op;
1640  }
1641
1642  static AArch64Operand *CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp,
1643                                         unsigned Val, bool HasExplicitAmount,
1644                                         SMLoc S, SMLoc E, MCContext &Ctx) {
1645    AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, Ctx);
1646    Op->ShiftExtend.Type = ShOp;
1647    Op->ShiftExtend.Amount = Val;
1648    Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1649    Op->StartLoc = S;
1650    Op->EndLoc = E;
1651    return Op;
1652  }
1653};
1654
1655} // end anonymous namespace.
1656
1657void AArch64Operand::print(raw_ostream &OS) const {
1658  switch (Kind) {
1659  case k_FPImm:
1660    OS << "<fpimm " << getFPImm() << "("
1661       << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1662    break;
1663  case k_Barrier: {
1664    bool Valid;
1665    StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1666    if (Valid)
1667      OS << "<barrier " << Name << ">";
1668    else
1669      OS << "<barrier invalid #" << getBarrier() << ">";
1670    break;
1671  }
1672  case k_Immediate:
1673    getImm()->print(OS);
1674    break;
1675  case k_ShiftedImm: {
1676    unsigned Shift = getShiftedImmShift();
1677    OS << "<shiftedimm ";
1678    getShiftedImmVal()->print(OS);
1679    OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1680    break;
1681  }
1682  case k_CondCode:
1683    OS << "<condcode " << getCondCode() << ">";
1684    break;
1685  case k_Register:
1686    OS << "<register " << getReg() << ">";
1687    break;
1688  case k_VectorList: {
1689    OS << "<vectorlist ";
1690    unsigned Reg = getVectorListStart();
1691    for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1692      OS << Reg + i << " ";
1693    OS << ">";
1694    break;
1695  }
1696  case k_VectorIndex:
1697    OS << "<vectorindex " << getVectorIndex() << ">";
1698    break;
1699  case k_SysReg:
1700    OS << "<sysreg: " << getSysReg() << '>';
1701    break;
1702  case k_Token:
1703    OS << "'" << getToken() << "'";
1704    break;
1705  case k_SysCR:
1706    OS << "c" << getSysCR();
1707    break;
1708  case k_Prefetch: {
1709    bool Valid;
1710    StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1711    if (Valid)
1712      OS << "<prfop " << Name << ">";
1713    else
1714      OS << "<prfop invalid #" << getPrefetch() << ">";
1715    break;
1716  }
1717  case k_ShiftExtend: {
1718    OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1719       << getShiftExtendAmount();
1720    if (!hasShiftExtendAmount())
1721      OS << "<imp>";
1722    OS << '>';
1723    break;
1724  }
1725  }
1726}
1727
1728/// @name Auto-generated Match Functions
1729/// {
1730
1731static unsigned MatchRegisterName(StringRef Name);
1732
1733/// }
1734
1735static unsigned matchVectorRegName(StringRef Name) {
1736  return StringSwitch<unsigned>(Name)
1737      .Case("v0", AArch64::Q0)
1738      .Case("v1", AArch64::Q1)
1739      .Case("v2", AArch64::Q2)
1740      .Case("v3", AArch64::Q3)
1741      .Case("v4", AArch64::Q4)
1742      .Case("v5", AArch64::Q5)
1743      .Case("v6", AArch64::Q6)
1744      .Case("v7", AArch64::Q7)
1745      .Case("v8", AArch64::Q8)
1746      .Case("v9", AArch64::Q9)
1747      .Case("v10", AArch64::Q10)
1748      .Case("v11", AArch64::Q11)
1749      .Case("v12", AArch64::Q12)
1750      .Case("v13", AArch64::Q13)
1751      .Case("v14", AArch64::Q14)
1752      .Case("v15", AArch64::Q15)
1753      .Case("v16", AArch64::Q16)
1754      .Case("v17", AArch64::Q17)
1755      .Case("v18", AArch64::Q18)
1756      .Case("v19", AArch64::Q19)
1757      .Case("v20", AArch64::Q20)
1758      .Case("v21", AArch64::Q21)
1759      .Case("v22", AArch64::Q22)
1760      .Case("v23", AArch64::Q23)
1761      .Case("v24", AArch64::Q24)
1762      .Case("v25", AArch64::Q25)
1763      .Case("v26", AArch64::Q26)
1764      .Case("v27", AArch64::Q27)
1765      .Case("v28", AArch64::Q28)
1766      .Case("v29", AArch64::Q29)
1767      .Case("v30", AArch64::Q30)
1768      .Case("v31", AArch64::Q31)
1769      .Default(0);
1770}
1771
1772static bool isValidVectorKind(StringRef Name) {
1773  return StringSwitch<bool>(Name.lower())
1774      .Case(".8b", true)
1775      .Case(".16b", true)
1776      .Case(".4h", true)
1777      .Case(".8h", true)
1778      .Case(".2s", true)
1779      .Case(".4s", true)
1780      .Case(".1d", true)
1781      .Case(".2d", true)
1782      .Case(".1q", true)
1783      // Accept the width neutral ones, too, for verbose syntax. If those
1784      // aren't used in the right places, the token operand won't match so
1785      // all will work out.
1786      .Case(".b", true)
1787      .Case(".h", true)
1788      .Case(".s", true)
1789      .Case(".d", true)
1790      .Default(false);
1791}
1792
1793static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1794                                 char &ElementKind) {
1795  assert(isValidVectorKind(Name));
1796
1797  ElementKind = Name.lower()[Name.size() - 1];
1798  NumElements = 0;
1799
1800  if (Name.size() == 2)
1801    return;
1802
1803  // Parse the lane count
1804  Name = Name.drop_front();
1805  while (isdigit(Name.front())) {
1806    NumElements = 10 * NumElements + (Name.front() - '0');
1807    Name = Name.drop_front();
1808  }
1809}
1810
1811bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1812                                     SMLoc &EndLoc) {
1813  StartLoc = getLoc();
1814  RegNo = tryParseRegister();
1815  EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1816  return (RegNo == (unsigned)-1);
1817}
1818
1819/// tryParseRegister - Try to parse a register name. The token must be an
1820/// Identifier when called, and if it is a register name the token is eaten and
1821/// the register is added to the operand list.
1822int AArch64AsmParser::tryParseRegister() {
1823  const AsmToken &Tok = Parser.getTok();
1824  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1825
1826  std::string lowerCase = Tok.getString().lower();
1827  unsigned RegNum = MatchRegisterName(lowerCase);
1828  // Also handle a few aliases of registers.
1829  if (RegNum == 0)
1830    RegNum = StringSwitch<unsigned>(lowerCase)
1831                 .Case("fp",  AArch64::FP)
1832                 .Case("lr",  AArch64::LR)
1833                 .Case("x31", AArch64::XZR)
1834                 .Case("w31", AArch64::WZR)
1835                 .Default(0);
1836
1837  if (RegNum == 0)
1838    return -1;
1839
1840  Parser.Lex(); // Eat identifier token.
1841  return RegNum;
1842}
1843
1844/// tryMatchVectorRegister - Try to parse a vector register name with optional
1845/// kind specifier. If it is a register specifier, eat the token and return it.
1846int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1847  if (Parser.getTok().isNot(AsmToken::Identifier)) {
1848    TokError("vector register expected");
1849    return -1;
1850  }
1851
1852  StringRef Name = Parser.getTok().getString();
1853  // If there is a kind specifier, it's separated from the register name by
1854  // a '.'.
1855  size_t Start = 0, Next = Name.find('.');
1856  StringRef Head = Name.slice(Start, Next);
1857  unsigned RegNum = matchVectorRegName(Head);
1858  if (RegNum) {
1859    if (Next != StringRef::npos) {
1860      Kind = Name.slice(Next, StringRef::npos);
1861      if (!isValidVectorKind(Kind)) {
1862        TokError("invalid vector kind qualifier");
1863        return -1;
1864      }
1865    }
1866    Parser.Lex(); // Eat the register token.
1867    return RegNum;
1868  }
1869
1870  if (expected)
1871    TokError("vector register expected");
1872  return -1;
1873}
1874
1875/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1876AArch64AsmParser::OperandMatchResultTy
1877AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1878  SMLoc S = getLoc();
1879
1880  if (Parser.getTok().isNot(AsmToken::Identifier)) {
1881    Error(S, "Expected cN operand where 0 <= N <= 15");
1882    return MatchOperand_ParseFail;
1883  }
1884
1885  StringRef Tok = Parser.getTok().getIdentifier();
1886  if (Tok[0] != 'c' && Tok[0] != 'C') {
1887    Error(S, "Expected cN operand where 0 <= N <= 15");
1888    return MatchOperand_ParseFail;
1889  }
1890
1891  uint32_t CRNum;
1892  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1893  if (BadNum || CRNum > 15) {
1894    Error(S, "Expected cN operand where 0 <= N <= 15");
1895    return MatchOperand_ParseFail;
1896  }
1897
1898  Parser.Lex(); // Eat identifier token.
1899  Operands.push_back(
1900      AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1901  return MatchOperand_Success;
1902}
1903
1904/// tryParsePrefetch - Try to parse a prefetch operand.
1905AArch64AsmParser::OperandMatchResultTy
1906AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1907  SMLoc S = getLoc();
1908  const AsmToken &Tok = Parser.getTok();
1909  // Either an identifier for named values or a 5-bit immediate.
1910  bool Hash = Tok.is(AsmToken::Hash);
1911  if (Hash || Tok.is(AsmToken::Integer)) {
1912    if (Hash)
1913      Parser.Lex(); // Eat hash token.
1914    const MCExpr *ImmVal;
1915    if (getParser().parseExpression(ImmVal))
1916      return MatchOperand_ParseFail;
1917
1918    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1919    if (!MCE) {
1920      TokError("immediate value expected for prefetch operand");
1921      return MatchOperand_ParseFail;
1922    }
1923    unsigned prfop = MCE->getValue();
1924    if (prfop > 31) {
1925      TokError("prefetch operand out of range, [0,31] expected");
1926      return MatchOperand_ParseFail;
1927    }
1928
1929    Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1930    return MatchOperand_Success;
1931  }
1932
1933  if (Tok.isNot(AsmToken::Identifier)) {
1934    TokError("pre-fetch hint expected");
1935    return MatchOperand_ParseFail;
1936  }
1937
1938  bool Valid;
1939  unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
1940  if (!Valid) {
1941    TokError("pre-fetch hint expected");
1942    return MatchOperand_ParseFail;
1943  }
1944
1945  Parser.Lex(); // Eat identifier token.
1946  Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1947  return MatchOperand_Success;
1948}
1949
1950/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
1951/// instruction.
1952AArch64AsmParser::OperandMatchResultTy
1953AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
1954  SMLoc S = getLoc();
1955  const MCExpr *Expr;
1956
1957  if (Parser.getTok().is(AsmToken::Hash)) {
1958    Parser.Lex(); // Eat hash token.
1959  }
1960
1961  if (parseSymbolicImmVal(Expr))
1962    return MatchOperand_ParseFail;
1963
1964  AArch64MCExpr::VariantKind ELFRefKind;
1965  MCSymbolRefExpr::VariantKind DarwinRefKind;
1966  int64_t Addend;
1967  if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
1968    if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
1969        ELFRefKind == AArch64MCExpr::VK_INVALID) {
1970      // No modifier was specified at all; this is the syntax for an ELF basic
1971      // ADRP relocation (unfortunately).
1972      Expr =
1973          AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
1974    } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
1975                DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
1976               Addend != 0) {
1977      Error(S, "gotpage label reference not allowed an addend");
1978      return MatchOperand_ParseFail;
1979    } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
1980               DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
1981               DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
1982               ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
1983               ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
1984               ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
1985      // The operand must be an @page or @gotpage qualified symbolref.
1986      Error(S, "page or gotpage label reference expected");
1987      return MatchOperand_ParseFail;
1988    }
1989  }
1990
1991  // We have either a label reference possibly with addend or an immediate. The
1992  // addend is a raw value here. The linker will adjust it to only reference the
1993  // page.
1994  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1995  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
1996
1997  return MatchOperand_Success;
1998}
1999
2000/// tryParseAdrLabel - Parse and validate a source label for the ADR
2001/// instruction.
2002AArch64AsmParser::OperandMatchResultTy
2003AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2004  SMLoc S = getLoc();
2005  const MCExpr *Expr;
2006
2007  if (Parser.getTok().is(AsmToken::Hash)) {
2008    Parser.Lex(); // Eat hash token.
2009  }
2010
2011  if (getParser().parseExpression(Expr))
2012    return MatchOperand_ParseFail;
2013
2014  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2015  Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2016
2017  return MatchOperand_Success;
2018}
2019
2020/// tryParseFPImm - A floating point immediate expression operand.
2021AArch64AsmParser::OperandMatchResultTy
2022AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2023  SMLoc S = getLoc();
2024
2025  bool Hash = false;
2026  if (Parser.getTok().is(AsmToken::Hash)) {
2027    Parser.Lex(); // Eat '#'
2028    Hash = true;
2029  }
2030
2031  // Handle negation, as that still comes through as a separate token.
2032  bool isNegative = false;
2033  if (Parser.getTok().is(AsmToken::Minus)) {
2034    isNegative = true;
2035    Parser.Lex();
2036  }
2037  const AsmToken &Tok = Parser.getTok();
2038  if (Tok.is(AsmToken::Real)) {
2039    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2040    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2041    // If we had a '-' in front, toggle the sign bit.
2042    IntVal ^= (uint64_t)isNegative << 63;
2043    int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2044    Parser.Lex(); // Eat the token.
2045    // Check for out of range values. As an exception, we let Zero through,
2046    // as we handle that special case in post-processing before matching in
2047    // order to use the zero register for it.
2048    if (Val == -1 && !RealVal.isZero()) {
2049      TokError("expected compatible register or floating-point constant");
2050      return MatchOperand_ParseFail;
2051    }
2052    Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2053    return MatchOperand_Success;
2054  }
2055  if (Tok.is(AsmToken::Integer)) {
2056    int64_t Val;
2057    if (!isNegative && Tok.getString().startswith("0x")) {
2058      Val = Tok.getIntVal();
2059      if (Val > 255 || Val < 0) {
2060        TokError("encoded floating point value out of range");
2061        return MatchOperand_ParseFail;
2062      }
2063    } else {
2064      APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2065      uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2066      // If we had a '-' in front, toggle the sign bit.
2067      IntVal ^= (uint64_t)isNegative << 63;
2068      Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2069    }
2070    Parser.Lex(); // Eat the token.
2071    Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2072    return MatchOperand_Success;
2073  }
2074
2075  if (!Hash)
2076    return MatchOperand_NoMatch;
2077
2078  TokError("invalid floating point immediate");
2079  return MatchOperand_ParseFail;
2080}
2081
2082/// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2083AArch64AsmParser::OperandMatchResultTy
2084AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2085  SMLoc S = getLoc();
2086
2087  if (Parser.getTok().is(AsmToken::Hash))
2088    Parser.Lex(); // Eat '#'
2089  else if (Parser.getTok().isNot(AsmToken::Integer))
2090    // Operand should start from # or should be integer, emit error otherwise.
2091    return MatchOperand_NoMatch;
2092
2093  const MCExpr *Imm;
2094  if (parseSymbolicImmVal(Imm))
2095    return MatchOperand_ParseFail;
2096  else if (Parser.getTok().isNot(AsmToken::Comma)) {
2097    uint64_t ShiftAmount = 0;
2098    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2099    if (MCE) {
2100      int64_t Val = MCE->getValue();
2101      if (Val > 0xfff && (Val & 0xfff) == 0) {
2102        Imm = MCConstantExpr::Create(Val >> 12, getContext());
2103        ShiftAmount = 12;
2104      }
2105    }
2106    SMLoc E = Parser.getTok().getLoc();
2107    Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2108                                                        getContext()));
2109    return MatchOperand_Success;
2110  }
2111
2112  // Eat ','
2113  Parser.Lex();
2114
2115  // The optional operand must be "lsl #N" where N is non-negative.
2116  if (!Parser.getTok().is(AsmToken::Identifier) ||
2117      !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2118    Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2119    return MatchOperand_ParseFail;
2120  }
2121
2122  // Eat 'lsl'
2123  Parser.Lex();
2124
2125  if (Parser.getTok().is(AsmToken::Hash)) {
2126    Parser.Lex();
2127  }
2128
2129  if (Parser.getTok().isNot(AsmToken::Integer)) {
2130    Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2131    return MatchOperand_ParseFail;
2132  }
2133
2134  int64_t ShiftAmount = Parser.getTok().getIntVal();
2135
2136  if (ShiftAmount < 0) {
2137    Error(Parser.getTok().getLoc(), "positive shift amount required");
2138    return MatchOperand_ParseFail;
2139  }
2140  Parser.Lex(); // Eat the number
2141
2142  SMLoc E = Parser.getTok().getLoc();
2143  Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2144                                                      S, E, getContext()));
2145  return MatchOperand_Success;
2146}
2147
2148/// parseCondCodeString - Parse a Condition Code string.
2149AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2150  AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2151                    .Case("eq", AArch64CC::EQ)
2152                    .Case("ne", AArch64CC::NE)
2153                    .Case("cs", AArch64CC::HS)
2154                    .Case("hs", AArch64CC::HS)
2155                    .Case("cc", AArch64CC::LO)
2156                    .Case("lo", AArch64CC::LO)
2157                    .Case("mi", AArch64CC::MI)
2158                    .Case("pl", AArch64CC::PL)
2159                    .Case("vs", AArch64CC::VS)
2160                    .Case("vc", AArch64CC::VC)
2161                    .Case("hi", AArch64CC::HI)
2162                    .Case("ls", AArch64CC::LS)
2163                    .Case("ge", AArch64CC::GE)
2164                    .Case("lt", AArch64CC::LT)
2165                    .Case("gt", AArch64CC::GT)
2166                    .Case("le", AArch64CC::LE)
2167                    .Case("al", AArch64CC::AL)
2168                    .Case("nv", AArch64CC::NV)
2169                    .Default(AArch64CC::Invalid);
2170  return CC;
2171}
2172
2173/// parseCondCode - Parse a Condition Code operand.
2174bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2175                                     bool invertCondCode) {
2176  SMLoc S = getLoc();
2177  const AsmToken &Tok = Parser.getTok();
2178  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2179
2180  StringRef Cond = Tok.getString();
2181  AArch64CC::CondCode CC = parseCondCodeString(Cond);
2182  if (CC == AArch64CC::Invalid)
2183    return TokError("invalid condition code");
2184  Parser.Lex(); // Eat identifier token.
2185
2186  if (invertCondCode)
2187    CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2188
2189  Operands.push_back(
2190      AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2191  return false;
2192}
2193
2194/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2195/// them if present.
2196AArch64AsmParser::OperandMatchResultTy
2197AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2198  const AsmToken &Tok = Parser.getTok();
2199  std::string LowerID = Tok.getString().lower();
2200  AArch64_AM::ShiftExtendType ShOp =
2201      StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2202          .Case("lsl", AArch64_AM::LSL)
2203          .Case("lsr", AArch64_AM::LSR)
2204          .Case("asr", AArch64_AM::ASR)
2205          .Case("ror", AArch64_AM::ROR)
2206          .Case("msl", AArch64_AM::MSL)
2207          .Case("uxtb", AArch64_AM::UXTB)
2208          .Case("uxth", AArch64_AM::UXTH)
2209          .Case("uxtw", AArch64_AM::UXTW)
2210          .Case("uxtx", AArch64_AM::UXTX)
2211          .Case("sxtb", AArch64_AM::SXTB)
2212          .Case("sxth", AArch64_AM::SXTH)
2213          .Case("sxtw", AArch64_AM::SXTW)
2214          .Case("sxtx", AArch64_AM::SXTX)
2215          .Default(AArch64_AM::InvalidShiftExtend);
2216
2217  if (ShOp == AArch64_AM::InvalidShiftExtend)
2218    return MatchOperand_NoMatch;
2219
2220  SMLoc S = Tok.getLoc();
2221  Parser.Lex();
2222
2223  bool Hash = getLexer().is(AsmToken::Hash);
2224  if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2225    if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2226        ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2227        ShOp == AArch64_AM::MSL) {
2228      // We expect a number here.
2229      TokError("expected #imm after shift specifier");
2230      return MatchOperand_ParseFail;
2231    }
2232
2233    // "extend" type operatoins don't need an immediate, #0 is implicit.
2234    SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2235    Operands.push_back(
2236        AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2237    return MatchOperand_Success;
2238  }
2239
2240  if (Hash)
2241    Parser.Lex(); // Eat the '#'.
2242
2243  // Make sure we do actually have a number
2244  if (!Parser.getTok().is(AsmToken::Integer)) {
2245    Error(Parser.getTok().getLoc(),
2246          "expected integer shift amount");
2247    return MatchOperand_ParseFail;
2248  }
2249
2250  const MCExpr *ImmVal;
2251  if (getParser().parseExpression(ImmVal))
2252    return MatchOperand_ParseFail;
2253
2254  const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2255  if (!MCE) {
2256    TokError("expected #imm after shift specifier");
2257    return MatchOperand_ParseFail;
2258  }
2259
2260  SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2261  Operands.push_back(AArch64Operand::CreateShiftExtend(
2262      ShOp, MCE->getValue(), true, S, E, getContext()));
2263  return MatchOperand_Success;
2264}
2265
2266/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2267/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2268bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2269                                   OperandVector &Operands) {
2270  if (Name.find('.') != StringRef::npos)
2271    return TokError("invalid operand");
2272
2273  Mnemonic = Name;
2274  Operands.push_back(
2275      AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2276
2277  const AsmToken &Tok = Parser.getTok();
2278  StringRef Op = Tok.getString();
2279  SMLoc S = Tok.getLoc();
2280
2281  const MCExpr *Expr = nullptr;
2282
2283#define SYS_ALIAS(op1, Cn, Cm, op2)                                            \
2284  do {                                                                         \
2285    Expr = MCConstantExpr::Create(op1, getContext());                          \
2286    Operands.push_back(                                                        \
2287        AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));           \
2288    Operands.push_back(                                                        \
2289        AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));           \
2290    Operands.push_back(                                                        \
2291        AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));           \
2292    Expr = MCConstantExpr::Create(op2, getContext());                          \
2293    Operands.push_back(                                                        \
2294        AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));           \
2295  } while (0)
2296
2297  if (Mnemonic == "ic") {
2298    if (!Op.compare_lower("ialluis")) {
2299      // SYS #0, C7, C1, #0
2300      SYS_ALIAS(0, 7, 1, 0);
2301    } else if (!Op.compare_lower("iallu")) {
2302      // SYS #0, C7, C5, #0
2303      SYS_ALIAS(0, 7, 5, 0);
2304    } else if (!Op.compare_lower("ivau")) {
2305      // SYS #3, C7, C5, #1
2306      SYS_ALIAS(3, 7, 5, 1);
2307    } else {
2308      return TokError("invalid operand for IC instruction");
2309    }
2310  } else if (Mnemonic == "dc") {
2311    if (!Op.compare_lower("zva")) {
2312      // SYS #3, C7, C4, #1
2313      SYS_ALIAS(3, 7, 4, 1);
2314    } else if (!Op.compare_lower("ivac")) {
2315      // SYS #3, C7, C6, #1
2316      SYS_ALIAS(0, 7, 6, 1);
2317    } else if (!Op.compare_lower("isw")) {
2318      // SYS #0, C7, C6, #2
2319      SYS_ALIAS(0, 7, 6, 2);
2320    } else if (!Op.compare_lower("cvac")) {
2321      // SYS #3, C7, C10, #1
2322      SYS_ALIAS(3, 7, 10, 1);
2323    } else if (!Op.compare_lower("csw")) {
2324      // SYS #0, C7, C10, #2
2325      SYS_ALIAS(0, 7, 10, 2);
2326    } else if (!Op.compare_lower("cvau")) {
2327      // SYS #3, C7, C11, #1
2328      SYS_ALIAS(3, 7, 11, 1);
2329    } else if (!Op.compare_lower("civac")) {
2330      // SYS #3, C7, C14, #1
2331      SYS_ALIAS(3, 7, 14, 1);
2332    } else if (!Op.compare_lower("cisw")) {
2333      // SYS #0, C7, C14, #2
2334      SYS_ALIAS(0, 7, 14, 2);
2335    } else {
2336      return TokError("invalid operand for DC instruction");
2337    }
2338  } else if (Mnemonic == "at") {
2339    if (!Op.compare_lower("s1e1r")) {
2340      // SYS #0, C7, C8, #0
2341      SYS_ALIAS(0, 7, 8, 0);
2342    } else if (!Op.compare_lower("s1e2r")) {
2343      // SYS #4, C7, C8, #0
2344      SYS_ALIAS(4, 7, 8, 0);
2345    } else if (!Op.compare_lower("s1e3r")) {
2346      // SYS #6, C7, C8, #0
2347      SYS_ALIAS(6, 7, 8, 0);
2348    } else if (!Op.compare_lower("s1e1w")) {
2349      // SYS #0, C7, C8, #1
2350      SYS_ALIAS(0, 7, 8, 1);
2351    } else if (!Op.compare_lower("s1e2w")) {
2352      // SYS #4, C7, C8, #1
2353      SYS_ALIAS(4, 7, 8, 1);
2354    } else if (!Op.compare_lower("s1e3w")) {
2355      // SYS #6, C7, C8, #1
2356      SYS_ALIAS(6, 7, 8, 1);
2357    } else if (!Op.compare_lower("s1e0r")) {
2358      // SYS #0, C7, C8, #3
2359      SYS_ALIAS(0, 7, 8, 2);
2360    } else if (!Op.compare_lower("s1e0w")) {
2361      // SYS #0, C7, C8, #3
2362      SYS_ALIAS(0, 7, 8, 3);
2363    } else if (!Op.compare_lower("s12e1r")) {
2364      // SYS #4, C7, C8, #4
2365      SYS_ALIAS(4, 7, 8, 4);
2366    } else if (!Op.compare_lower("s12e1w")) {
2367      // SYS #4, C7, C8, #5
2368      SYS_ALIAS(4, 7, 8, 5);
2369    } else if (!Op.compare_lower("s12e0r")) {
2370      // SYS #4, C7, C8, #6
2371      SYS_ALIAS(4, 7, 8, 6);
2372    } else if (!Op.compare_lower("s12e0w")) {
2373      // SYS #4, C7, C8, #7
2374      SYS_ALIAS(4, 7, 8, 7);
2375    } else {
2376      return TokError("invalid operand for AT instruction");
2377    }
2378  } else if (Mnemonic == "tlbi") {
2379    if (!Op.compare_lower("vmalle1is")) {
2380      // SYS #0, C8, C3, #0
2381      SYS_ALIAS(0, 8, 3, 0);
2382    } else if (!Op.compare_lower("alle2is")) {
2383      // SYS #4, C8, C3, #0
2384      SYS_ALIAS(4, 8, 3, 0);
2385    } else if (!Op.compare_lower("alle3is")) {
2386      // SYS #6, C8, C3, #0
2387      SYS_ALIAS(6, 8, 3, 0);
2388    } else if (!Op.compare_lower("vae1is")) {
2389      // SYS #0, C8, C3, #1
2390      SYS_ALIAS(0, 8, 3, 1);
2391    } else if (!Op.compare_lower("vae2is")) {
2392      // SYS #4, C8, C3, #1
2393      SYS_ALIAS(4, 8, 3, 1);
2394    } else if (!Op.compare_lower("vae3is")) {
2395      // SYS #6, C8, C3, #1
2396      SYS_ALIAS(6, 8, 3, 1);
2397    } else if (!Op.compare_lower("aside1is")) {
2398      // SYS #0, C8, C3, #2
2399      SYS_ALIAS(0, 8, 3, 2);
2400    } else if (!Op.compare_lower("vaae1is")) {
2401      // SYS #0, C8, C3, #3
2402      SYS_ALIAS(0, 8, 3, 3);
2403    } else if (!Op.compare_lower("alle1is")) {
2404      // SYS #4, C8, C3, #4
2405      SYS_ALIAS(4, 8, 3, 4);
2406    } else if (!Op.compare_lower("vale1is")) {
2407      // SYS #0, C8, C3, #5
2408      SYS_ALIAS(0, 8, 3, 5);
2409    } else if (!Op.compare_lower("vaale1is")) {
2410      // SYS #0, C8, C3, #7
2411      SYS_ALIAS(0, 8, 3, 7);
2412    } else if (!Op.compare_lower("vmalle1")) {
2413      // SYS #0, C8, C7, #0
2414      SYS_ALIAS(0, 8, 7, 0);
2415    } else if (!Op.compare_lower("alle2")) {
2416      // SYS #4, C8, C7, #0
2417      SYS_ALIAS(4, 8, 7, 0);
2418    } else if (!Op.compare_lower("vale2is")) {
2419      // SYS #4, C8, C3, #5
2420      SYS_ALIAS(4, 8, 3, 5);
2421    } else if (!Op.compare_lower("vale3is")) {
2422      // SYS #6, C8, C3, #5
2423      SYS_ALIAS(6, 8, 3, 5);
2424    } else if (!Op.compare_lower("alle3")) {
2425      // SYS #6, C8, C7, #0
2426      SYS_ALIAS(6, 8, 7, 0);
2427    } else if (!Op.compare_lower("vae1")) {
2428      // SYS #0, C8, C7, #1
2429      SYS_ALIAS(0, 8, 7, 1);
2430    } else if (!Op.compare_lower("vae2")) {
2431      // SYS #4, C8, C7, #1
2432      SYS_ALIAS(4, 8, 7, 1);
2433    } else if (!Op.compare_lower("vae3")) {
2434      // SYS #6, C8, C7, #1
2435      SYS_ALIAS(6, 8, 7, 1);
2436    } else if (!Op.compare_lower("aside1")) {
2437      // SYS #0, C8, C7, #2
2438      SYS_ALIAS(0, 8, 7, 2);
2439    } else if (!Op.compare_lower("vaae1")) {
2440      // SYS #0, C8, C7, #3
2441      SYS_ALIAS(0, 8, 7, 3);
2442    } else if (!Op.compare_lower("alle1")) {
2443      // SYS #4, C8, C7, #4
2444      SYS_ALIAS(4, 8, 7, 4);
2445    } else if (!Op.compare_lower("vale1")) {
2446      // SYS #0, C8, C7, #5
2447      SYS_ALIAS(0, 8, 7, 5);
2448    } else if (!Op.compare_lower("vale2")) {
2449      // SYS #4, C8, C7, #5
2450      SYS_ALIAS(4, 8, 7, 5);
2451    } else if (!Op.compare_lower("vale3")) {
2452      // SYS #6, C8, C7, #5
2453      SYS_ALIAS(6, 8, 7, 5);
2454    } else if (!Op.compare_lower("vaale1")) {
2455      // SYS #0, C8, C7, #7
2456      SYS_ALIAS(0, 8, 7, 7);
2457    } else if (!Op.compare_lower("ipas2e1")) {
2458      // SYS #4, C8, C4, #1
2459      SYS_ALIAS(4, 8, 4, 1);
2460    } else if (!Op.compare_lower("ipas2le1")) {
2461      // SYS #4, C8, C4, #5
2462      SYS_ALIAS(4, 8, 4, 5);
2463    } else if (!Op.compare_lower("ipas2e1is")) {
2464      // SYS #4, C8, C4, #1
2465      SYS_ALIAS(4, 8, 0, 1);
2466    } else if (!Op.compare_lower("ipas2le1is")) {
2467      // SYS #4, C8, C4, #5
2468      SYS_ALIAS(4, 8, 0, 5);
2469    } else if (!Op.compare_lower("vmalls12e1")) {
2470      // SYS #4, C8, C7, #6
2471      SYS_ALIAS(4, 8, 7, 6);
2472    } else if (!Op.compare_lower("vmalls12e1is")) {
2473      // SYS #4, C8, C3, #6
2474      SYS_ALIAS(4, 8, 3, 6);
2475    } else {
2476      return TokError("invalid operand for TLBI instruction");
2477    }
2478  }
2479
2480#undef SYS_ALIAS
2481
2482  Parser.Lex(); // Eat operand.
2483
2484  bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2485  bool HasRegister = false;
2486
2487  // Check for the optional register operand.
2488  if (getLexer().is(AsmToken::Comma)) {
2489    Parser.Lex(); // Eat comma.
2490
2491    if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2492      return TokError("expected register operand");
2493
2494    HasRegister = true;
2495  }
2496
2497  if (getLexer().isNot(AsmToken::EndOfStatement)) {
2498    Parser.eatToEndOfStatement();
2499    return TokError("unexpected token in argument list");
2500  }
2501
2502  if (ExpectRegister && !HasRegister) {
2503    return TokError("specified " + Mnemonic + " op requires a register");
2504  }
2505  else if (!ExpectRegister && HasRegister) {
2506    return TokError("specified " + Mnemonic + " op does not use a register");
2507  }
2508
2509  Parser.Lex(); // Consume the EndOfStatement
2510  return false;
2511}
2512
2513AArch64AsmParser::OperandMatchResultTy
2514AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2515  const AsmToken &Tok = Parser.getTok();
2516
2517  // Can be either a #imm style literal or an option name
2518  bool Hash = Tok.is(AsmToken::Hash);
2519  if (Hash || Tok.is(AsmToken::Integer)) {
2520    // Immediate operand.
2521    if (Hash)
2522      Parser.Lex(); // Eat the '#'
2523    const MCExpr *ImmVal;
2524    SMLoc ExprLoc = getLoc();
2525    if (getParser().parseExpression(ImmVal))
2526      return MatchOperand_ParseFail;
2527    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2528    if (!MCE) {
2529      Error(ExprLoc, "immediate value expected for barrier operand");
2530      return MatchOperand_ParseFail;
2531    }
2532    if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2533      Error(ExprLoc, "barrier operand out of range");
2534      return MatchOperand_ParseFail;
2535    }
2536    Operands.push_back(
2537        AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2538    return MatchOperand_Success;
2539  }
2540
2541  if (Tok.isNot(AsmToken::Identifier)) {
2542    TokError("invalid operand for instruction");
2543    return MatchOperand_ParseFail;
2544  }
2545
2546  bool Valid;
2547  unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2548  if (!Valid) {
2549    TokError("invalid barrier option name");
2550    return MatchOperand_ParseFail;
2551  }
2552
2553  // The only valid named option for ISB is 'sy'
2554  if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2555    TokError("'sy' or #imm operand expected");
2556    return MatchOperand_ParseFail;
2557  }
2558
2559  Operands.push_back(
2560      AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2561  Parser.Lex(); // Consume the option
2562
2563  return MatchOperand_Success;
2564}
2565
2566AArch64AsmParser::OperandMatchResultTy
2567AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2568  const AsmToken &Tok = Parser.getTok();
2569
2570  if (Tok.isNot(AsmToken::Identifier))
2571    return MatchOperand_NoMatch;
2572
2573  Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
2574                     STI.getFeatureBits(), getContext()));
2575  Parser.Lex(); // Eat identifier
2576
2577  return MatchOperand_Success;
2578}
2579
2580/// tryParseVectorRegister - Parse a vector register operand.
2581bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2582  if (Parser.getTok().isNot(AsmToken::Identifier))
2583    return true;
2584
2585  SMLoc S = getLoc();
2586  // Check for a vector register specifier first.
2587  StringRef Kind;
2588  int64_t Reg = tryMatchVectorRegister(Kind, false);
2589  if (Reg == -1)
2590    return true;
2591  Operands.push_back(
2592      AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2593  // If there was an explicit qualifier, that goes on as a literal text
2594  // operand.
2595  if (!Kind.empty())
2596    Operands.push_back(
2597        AArch64Operand::CreateToken(Kind, false, S, getContext()));
2598
2599  // If there is an index specifier following the register, parse that too.
2600  if (Parser.getTok().is(AsmToken::LBrac)) {
2601    SMLoc SIdx = getLoc();
2602    Parser.Lex(); // Eat left bracket token.
2603
2604    const MCExpr *ImmVal;
2605    if (getParser().parseExpression(ImmVal))
2606      return false;
2607    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2608    if (!MCE) {
2609      TokError("immediate value expected for vector index");
2610      return false;
2611    }
2612
2613    SMLoc E = getLoc();
2614    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2615      Error(E, "']' expected");
2616      return false;
2617    }
2618
2619    Parser.Lex(); // Eat right bracket token.
2620
2621    Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2622                                                         E, getContext()));
2623  }
2624
2625  return false;
2626}
2627
2628/// parseRegister - Parse a non-vector register operand.
2629bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2630  SMLoc S = getLoc();
2631  // Try for a vector register.
2632  if (!tryParseVectorRegister(Operands))
2633    return false;
2634
2635  // Try for a scalar register.
2636  int64_t Reg = tryParseRegister();
2637  if (Reg == -1)
2638    return true;
2639  Operands.push_back(
2640      AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2641
2642  // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2643  // as a string token in the instruction itself.
2644  if (getLexer().getKind() == AsmToken::LBrac) {
2645    SMLoc LBracS = getLoc();
2646    Parser.Lex();
2647    const AsmToken &Tok = Parser.getTok();
2648    if (Tok.is(AsmToken::Integer)) {
2649      SMLoc IntS = getLoc();
2650      int64_t Val = Tok.getIntVal();
2651      if (Val == 1) {
2652        Parser.Lex();
2653        if (getLexer().getKind() == AsmToken::RBrac) {
2654          SMLoc RBracS = getLoc();
2655          Parser.Lex();
2656          Operands.push_back(
2657              AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2658          Operands.push_back(
2659              AArch64Operand::CreateToken("1", false, IntS, getContext()));
2660          Operands.push_back(
2661              AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2662          return false;
2663        }
2664      }
2665    }
2666  }
2667
2668  return false;
2669}
2670
2671bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2672  bool HasELFModifier = false;
2673  AArch64MCExpr::VariantKind RefKind;
2674
2675  if (Parser.getTok().is(AsmToken::Colon)) {
2676    Parser.Lex(); // Eat ':"
2677    HasELFModifier = true;
2678
2679    if (Parser.getTok().isNot(AsmToken::Identifier)) {
2680      Error(Parser.getTok().getLoc(),
2681            "expect relocation specifier in operand after ':'");
2682      return true;
2683    }
2684
2685    std::string LowerCase = Parser.getTok().getIdentifier().lower();
2686    RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2687                  .Case("lo12", AArch64MCExpr::VK_LO12)
2688                  .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2689                  .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2690                  .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2691                  .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2692                  .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2693                  .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2694                  .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2695                  .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2696                  .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2697                  .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2698                  .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2699                  .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2700                  .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2701                  .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2702                  .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2703                  .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2704                  .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2705                  .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2706                  .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2707                  .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2708                  .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2709                  .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2710                  .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2711                  .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2712                  .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2713                  .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2714                  .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2715                  .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2716                  .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2717                  .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2718                  .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2719                  .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2720                  .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2721                  .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2722                  .Default(AArch64MCExpr::VK_INVALID);
2723
2724    if (RefKind == AArch64MCExpr::VK_INVALID) {
2725      Error(Parser.getTok().getLoc(),
2726            "expect relocation specifier in operand after ':'");
2727      return true;
2728    }
2729
2730    Parser.Lex(); // Eat identifier
2731
2732    if (Parser.getTok().isNot(AsmToken::Colon)) {
2733      Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2734      return true;
2735    }
2736    Parser.Lex(); // Eat ':'
2737  }
2738
2739  if (getParser().parseExpression(ImmVal))
2740    return true;
2741
2742  if (HasELFModifier)
2743    ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2744
2745  return false;
2746}
2747
2748/// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2749bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2750  assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2751  SMLoc S = getLoc();
2752  Parser.Lex(); // Eat left bracket token.
2753  StringRef Kind;
2754  int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2755  if (FirstReg == -1)
2756    return true;
2757  int64_t PrevReg = FirstReg;
2758  unsigned Count = 1;
2759
2760  if (Parser.getTok().is(AsmToken::Minus)) {
2761    Parser.Lex(); // Eat the minus.
2762
2763    SMLoc Loc = getLoc();
2764    StringRef NextKind;
2765    int64_t Reg = tryMatchVectorRegister(NextKind, true);
2766    if (Reg == -1)
2767      return true;
2768    // Any Kind suffices must match on all regs in the list.
2769    if (Kind != NextKind)
2770      return Error(Loc, "mismatched register size suffix");
2771
2772    unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2773
2774    if (Space == 0 || Space > 3) {
2775      return Error(Loc, "invalid number of vectors");
2776    }
2777
2778    Count += Space;
2779  }
2780  else {
2781    while (Parser.getTok().is(AsmToken::Comma)) {
2782      Parser.Lex(); // Eat the comma token.
2783
2784      SMLoc Loc = getLoc();
2785      StringRef NextKind;
2786      int64_t Reg = tryMatchVectorRegister(NextKind, true);
2787      if (Reg == -1)
2788        return true;
2789      // Any Kind suffices must match on all regs in the list.
2790      if (Kind != NextKind)
2791        return Error(Loc, "mismatched register size suffix");
2792
2793      // Registers must be incremental (with wraparound at 31)
2794      if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2795          (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2796       return Error(Loc, "registers must be sequential");
2797
2798      PrevReg = Reg;
2799      ++Count;
2800    }
2801  }
2802
2803  if (Parser.getTok().isNot(AsmToken::RCurly))
2804    return Error(getLoc(), "'}' expected");
2805  Parser.Lex(); // Eat the '}' token.
2806
2807  if (Count > 4)
2808    return Error(S, "invalid number of vectors");
2809
2810  unsigned NumElements = 0;
2811  char ElementKind = 0;
2812  if (!Kind.empty())
2813    parseValidVectorKind(Kind, NumElements, ElementKind);
2814
2815  Operands.push_back(AArch64Operand::CreateVectorList(
2816      FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2817
2818  // If there is an index specifier following the list, parse that too.
2819  if (Parser.getTok().is(AsmToken::LBrac)) {
2820    SMLoc SIdx = getLoc();
2821    Parser.Lex(); // Eat left bracket token.
2822
2823    const MCExpr *ImmVal;
2824    if (getParser().parseExpression(ImmVal))
2825      return false;
2826    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2827    if (!MCE) {
2828      TokError("immediate value expected for vector index");
2829      return false;
2830    }
2831
2832    SMLoc E = getLoc();
2833    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2834      Error(E, "']' expected");
2835      return false;
2836    }
2837
2838    Parser.Lex(); // Eat right bracket token.
2839
2840    Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2841                                                         E, getContext()));
2842  }
2843  return false;
2844}
2845
2846AArch64AsmParser::OperandMatchResultTy
2847AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2848  const AsmToken &Tok = Parser.getTok();
2849  if (!Tok.is(AsmToken::Identifier))
2850    return MatchOperand_NoMatch;
2851
2852  unsigned RegNum = MatchRegisterName(Tok.getString().lower());
2853
2854  MCContext &Ctx = getContext();
2855  const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2856  if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2857    return MatchOperand_NoMatch;
2858
2859  SMLoc S = getLoc();
2860  Parser.Lex(); // Eat register
2861
2862  if (Parser.getTok().isNot(AsmToken::Comma)) {
2863    Operands.push_back(
2864        AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2865    return MatchOperand_Success;
2866  }
2867  Parser.Lex(); // Eat comma.
2868
2869  if (Parser.getTok().is(AsmToken::Hash))
2870    Parser.Lex(); // Eat hash
2871
2872  if (Parser.getTok().isNot(AsmToken::Integer)) {
2873    Error(getLoc(), "index must be absent or #0");
2874    return MatchOperand_ParseFail;
2875  }
2876
2877  const MCExpr *ImmVal;
2878  if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2879      cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2880    Error(getLoc(), "index must be absent or #0");
2881    return MatchOperand_ParseFail;
2882  }
2883
2884  Operands.push_back(
2885      AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2886  return MatchOperand_Success;
2887}
2888
2889/// parseOperand - Parse a arm instruction operand.  For now this parses the
2890/// operand regardless of the mnemonic.
2891bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2892                                  bool invertCondCode) {
2893  // Check if the current operand has a custom associated parser, if so, try to
2894  // custom parse the operand, or fallback to the general approach.
2895  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2896  if (ResTy == MatchOperand_Success)
2897    return false;
2898  // If there wasn't a custom match, try the generic matcher below. Otherwise,
2899  // there was a match, but an error occurred, in which case, just return that
2900  // the operand parsing failed.
2901  if (ResTy == MatchOperand_ParseFail)
2902    return true;
2903
2904  // Nothing custom, so do general case parsing.
2905  SMLoc S, E;
2906  switch (getLexer().getKind()) {
2907  default: {
2908    SMLoc S = getLoc();
2909    const MCExpr *Expr;
2910    if (parseSymbolicImmVal(Expr))
2911      return Error(S, "invalid operand");
2912
2913    SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2914    Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2915    return false;
2916  }
2917  case AsmToken::LBrac: {
2918    SMLoc Loc = Parser.getTok().getLoc();
2919    Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2920                                                   getContext()));
2921    Parser.Lex(); // Eat '['
2922
2923    // There's no comma after a '[', so we can parse the next operand
2924    // immediately.
2925    return parseOperand(Operands, false, false);
2926  }
2927  case AsmToken::LCurly:
2928    return parseVectorList(Operands);
2929  case AsmToken::Identifier: {
2930    // If we're expecting a Condition Code operand, then just parse that.
2931    if (isCondCode)
2932      return parseCondCode(Operands, invertCondCode);
2933
2934    // If it's a register name, parse it.
2935    if (!parseRegister(Operands))
2936      return false;
2937
2938    // This could be an optional "shift" or "extend" operand.
2939    OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
2940    // We can only continue if no tokens were eaten.
2941    if (GotShift != MatchOperand_NoMatch)
2942      return GotShift;
2943
2944    // This was not a register so parse other operands that start with an
2945    // identifier (like labels) as expressions and create them as immediates.
2946    const MCExpr *IdVal;
2947    S = getLoc();
2948    if (getParser().parseExpression(IdVal))
2949      return true;
2950
2951    E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2952    Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
2953    return false;
2954  }
2955  case AsmToken::Integer:
2956  case AsmToken::Real:
2957  case AsmToken::Hash: {
2958    // #42 -> immediate.
2959    S = getLoc();
2960    if (getLexer().is(AsmToken::Hash))
2961      Parser.Lex();
2962
2963    // Parse a negative sign
2964    bool isNegative = false;
2965    if (Parser.getTok().is(AsmToken::Minus)) {
2966      isNegative = true;
2967      // We need to consume this token only when we have a Real, otherwise
2968      // we let parseSymbolicImmVal take care of it
2969      if (Parser.getLexer().peekTok().is(AsmToken::Real))
2970        Parser.Lex();
2971    }
2972
2973    // The only Real that should come through here is a literal #0.0 for
2974    // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
2975    // so convert the value.
2976    const AsmToken &Tok = Parser.getTok();
2977    if (Tok.is(AsmToken::Real)) {
2978      APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2979      uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2980      if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
2981          Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
2982          Mnemonic != "fcmlt")
2983        return TokError("unexpected floating point literal");
2984      else if (IntVal != 0 || isNegative)
2985        return TokError("expected floating-point constant #0.0");
2986      Parser.Lex(); // Eat the token.
2987
2988      Operands.push_back(
2989          AArch64Operand::CreateToken("#0", false, S, getContext()));
2990      Operands.push_back(
2991          AArch64Operand::CreateToken(".0", false, S, getContext()));
2992      return false;
2993    }
2994
2995    const MCExpr *ImmVal;
2996    if (parseSymbolicImmVal(ImmVal))
2997      return true;
2998
2999    E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3000    Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3001    return false;
3002  }
3003  }
3004}
3005
3006/// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3007/// operands.
3008bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3009                                        StringRef Name, SMLoc NameLoc,
3010                                        OperandVector &Operands) {
3011  Name = StringSwitch<StringRef>(Name.lower())
3012             .Case("beq", "b.eq")
3013             .Case("bne", "b.ne")
3014             .Case("bhs", "b.hs")
3015             .Case("bcs", "b.cs")
3016             .Case("blo", "b.lo")
3017             .Case("bcc", "b.cc")
3018             .Case("bmi", "b.mi")
3019             .Case("bpl", "b.pl")
3020             .Case("bvs", "b.vs")
3021             .Case("bvc", "b.vc")
3022             .Case("bhi", "b.hi")
3023             .Case("bls", "b.ls")
3024             .Case("bge", "b.ge")
3025             .Case("blt", "b.lt")
3026             .Case("bgt", "b.gt")
3027             .Case("ble", "b.le")
3028             .Case("bal", "b.al")
3029             .Case("bnv", "b.nv")
3030             .Default(Name);
3031
3032  // Create the leading tokens for the mnemonic, split by '.' characters.
3033  size_t Start = 0, Next = Name.find('.');
3034  StringRef Head = Name.slice(Start, Next);
3035
3036  // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3037  if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3038    bool IsError = parseSysAlias(Head, NameLoc, Operands);
3039    if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3040      Parser.eatToEndOfStatement();
3041    return IsError;
3042  }
3043
3044  Operands.push_back(
3045      AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3046  Mnemonic = Head;
3047
3048  // Handle condition codes for a branch mnemonic
3049  if (Head == "b" && Next != StringRef::npos) {
3050    Start = Next;
3051    Next = Name.find('.', Start + 1);
3052    Head = Name.slice(Start + 1, Next);
3053
3054    SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3055                                            (Head.data() - Name.data()));
3056    AArch64CC::CondCode CC = parseCondCodeString(Head);
3057    if (CC == AArch64CC::Invalid)
3058      return Error(SuffixLoc, "invalid condition code");
3059    Operands.push_back(
3060        AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3061    Operands.push_back(
3062        AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3063  }
3064
3065  // Add the remaining tokens in the mnemonic.
3066  while (Next != StringRef::npos) {
3067    Start = Next;
3068    Next = Name.find('.', Start + 1);
3069    Head = Name.slice(Start, Next);
3070    SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3071                                            (Head.data() - Name.data()) + 1);
3072    Operands.push_back(
3073        AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3074  }
3075
3076  // Conditional compare instructions have a Condition Code operand, which needs
3077  // to be parsed and an immediate operand created.
3078  bool condCodeFourthOperand =
3079      (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3080       Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3081       Head == "csinc" || Head == "csinv" || Head == "csneg");
3082
3083  // These instructions are aliases to some of the conditional select
3084  // instructions. However, the condition code is inverted in the aliased
3085  // instruction.
3086  //
3087  // FIXME: Is this the correct way to handle these? Or should the parser
3088  //        generate the aliased instructions directly?
3089  bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3090  bool condCodeThirdOperand =
3091      (Head == "cinc" || Head == "cinv" || Head == "cneg");
3092
3093  // Read the remaining operands.
3094  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3095    // Read the first operand.
3096    if (parseOperand(Operands, false, false)) {
3097      Parser.eatToEndOfStatement();
3098      return true;
3099    }
3100
3101    unsigned N = 2;
3102    while (getLexer().is(AsmToken::Comma)) {
3103      Parser.Lex(); // Eat the comma.
3104
3105      // Parse and remember the operand.
3106      if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3107                                     (N == 3 && condCodeThirdOperand) ||
3108                                     (N == 2 && condCodeSecondOperand),
3109                       condCodeSecondOperand || condCodeThirdOperand)) {
3110        Parser.eatToEndOfStatement();
3111        return true;
3112      }
3113
3114      // After successfully parsing some operands there are two special cases to
3115      // consider (i.e. notional operands not separated by commas). Both are due
3116      // to memory specifiers:
3117      //  + An RBrac will end an address for load/store/prefetch
3118      //  + An '!' will indicate a pre-indexed operation.
3119      //
3120      // It's someone else's responsibility to make sure these tokens are sane
3121      // in the given context!
3122      if (Parser.getTok().is(AsmToken::RBrac)) {
3123        SMLoc Loc = Parser.getTok().getLoc();
3124        Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3125                                                       getContext()));
3126        Parser.Lex();
3127      }
3128
3129      if (Parser.getTok().is(AsmToken::Exclaim)) {
3130        SMLoc Loc = Parser.getTok().getLoc();
3131        Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3132                                                       getContext()));
3133        Parser.Lex();
3134      }
3135
3136      ++N;
3137    }
3138  }
3139
3140  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3141    SMLoc Loc = Parser.getTok().getLoc();
3142    Parser.eatToEndOfStatement();
3143    return Error(Loc, "unexpected token in argument list");
3144  }
3145
3146  Parser.Lex(); // Consume the EndOfStatement
3147  return false;
3148}
3149
3150// FIXME: This entire function is a giant hack to provide us with decent
3151// operand range validation/diagnostics until TableGen/MC can be extended
3152// to support autogeneration of this kind of validation.
3153bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3154                                         SmallVectorImpl<SMLoc> &Loc) {
3155  const MCRegisterInfo *RI = getContext().getRegisterInfo();
3156  // Check for indexed addressing modes w/ the base register being the
3157  // same as a destination/source register or pair load where
3158  // the Rt == Rt2. All of those are undefined behaviour.
3159  switch (Inst.getOpcode()) {
3160  case AArch64::LDPSWpre:
3161  case AArch64::LDPWpost:
3162  case AArch64::LDPWpre:
3163  case AArch64::LDPXpost:
3164  case AArch64::LDPXpre: {
3165    unsigned Rt = Inst.getOperand(1).getReg();
3166    unsigned Rt2 = Inst.getOperand(2).getReg();
3167    unsigned Rn = Inst.getOperand(3).getReg();
3168    if (RI->isSubRegisterEq(Rn, Rt))
3169      return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3170                           "is also a destination");
3171    if (RI->isSubRegisterEq(Rn, Rt2))
3172      return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3173                           "is also a destination");
3174    // FALLTHROUGH
3175  }
3176  case AArch64::LDPDi:
3177  case AArch64::LDPQi:
3178  case AArch64::LDPSi:
3179  case AArch64::LDPSWi:
3180  case AArch64::LDPWi:
3181  case AArch64::LDPXi: {
3182    unsigned Rt = Inst.getOperand(0).getReg();
3183    unsigned Rt2 = Inst.getOperand(1).getReg();
3184    if (Rt == Rt2)
3185      return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3186    break;
3187  }
3188  case AArch64::LDPDpost:
3189  case AArch64::LDPDpre:
3190  case AArch64::LDPQpost:
3191  case AArch64::LDPQpre:
3192  case AArch64::LDPSpost:
3193  case AArch64::LDPSpre:
3194  case AArch64::LDPSWpost: {
3195    unsigned Rt = Inst.getOperand(1).getReg();
3196    unsigned Rt2 = Inst.getOperand(2).getReg();
3197    if (Rt == Rt2)
3198      return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3199    break;
3200  }
3201  case AArch64::STPDpost:
3202  case AArch64::STPDpre:
3203  case AArch64::STPQpost:
3204  case AArch64::STPQpre:
3205  case AArch64::STPSpost:
3206  case AArch64::STPSpre:
3207  case AArch64::STPWpost:
3208  case AArch64::STPWpre:
3209  case AArch64::STPXpost:
3210  case AArch64::STPXpre: {
3211    unsigned Rt = Inst.getOperand(1).getReg();
3212    unsigned Rt2 = Inst.getOperand(2).getReg();
3213    unsigned Rn = Inst.getOperand(3).getReg();
3214    if (RI->isSubRegisterEq(Rn, Rt))
3215      return Error(Loc[0], "unpredictable STP instruction, writeback base "
3216                           "is also a source");
3217    if (RI->isSubRegisterEq(Rn, Rt2))
3218      return Error(Loc[1], "unpredictable STP instruction, writeback base "
3219                           "is also a source");
3220    break;
3221  }
3222  case AArch64::LDRBBpre:
3223  case AArch64::LDRBpre:
3224  case AArch64::LDRHHpre:
3225  case AArch64::LDRHpre:
3226  case AArch64::LDRSBWpre:
3227  case AArch64::LDRSBXpre:
3228  case AArch64::LDRSHWpre:
3229  case AArch64::LDRSHXpre:
3230  case AArch64::LDRSWpre:
3231  case AArch64::LDRWpre:
3232  case AArch64::LDRXpre:
3233  case AArch64::LDRBBpost:
3234  case AArch64::LDRBpost:
3235  case AArch64::LDRHHpost:
3236  case AArch64::LDRHpost:
3237  case AArch64::LDRSBWpost:
3238  case AArch64::LDRSBXpost:
3239  case AArch64::LDRSHWpost:
3240  case AArch64::LDRSHXpost:
3241  case AArch64::LDRSWpost:
3242  case AArch64::LDRWpost:
3243  case AArch64::LDRXpost: {
3244    unsigned Rt = Inst.getOperand(1).getReg();
3245    unsigned Rn = Inst.getOperand(2).getReg();
3246    if (RI->isSubRegisterEq(Rn, Rt))
3247      return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3248                           "is also a source");
3249    break;
3250  }
3251  case AArch64::STRBBpost:
3252  case AArch64::STRBpost:
3253  case AArch64::STRHHpost:
3254  case AArch64::STRHpost:
3255  case AArch64::STRWpost:
3256  case AArch64::STRXpost:
3257  case AArch64::STRBBpre:
3258  case AArch64::STRBpre:
3259  case AArch64::STRHHpre:
3260  case AArch64::STRHpre:
3261  case AArch64::STRWpre:
3262  case AArch64::STRXpre: {
3263    unsigned Rt = Inst.getOperand(1).getReg();
3264    unsigned Rn = Inst.getOperand(2).getReg();
3265    if (RI->isSubRegisterEq(Rn, Rt))
3266      return Error(Loc[0], "unpredictable STR instruction, writeback base "
3267                           "is also a source");
3268    break;
3269  }
3270  }
3271
3272  // Now check immediate ranges. Separate from the above as there is overlap
3273  // in the instructions being checked and this keeps the nested conditionals
3274  // to a minimum.
3275  switch (Inst.getOpcode()) {
3276  case AArch64::ADDSWri:
3277  case AArch64::ADDSXri:
3278  case AArch64::ADDWri:
3279  case AArch64::ADDXri:
3280  case AArch64::SUBSWri:
3281  case AArch64::SUBSXri:
3282  case AArch64::SUBWri:
3283  case AArch64::SUBXri: {
3284    // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3285    // some slight duplication here.
3286    if (Inst.getOperand(2).isExpr()) {
3287      const MCExpr *Expr = Inst.getOperand(2).getExpr();
3288      AArch64MCExpr::VariantKind ELFRefKind;
3289      MCSymbolRefExpr::VariantKind DarwinRefKind;
3290      int64_t Addend;
3291      if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3292        return Error(Loc[2], "invalid immediate expression");
3293      }
3294
3295      // Only allow these with ADDXri.
3296      if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3297          DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3298          Inst.getOpcode() == AArch64::ADDXri)
3299        return false;
3300
3301      // Only allow these with ADDXri/ADDWri
3302      if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3303          ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3304          ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3305          ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3306          ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3307          ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3308          ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3309          ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3310          (Inst.getOpcode() == AArch64::ADDXri ||
3311          Inst.getOpcode() == AArch64::ADDWri))
3312        return false;
3313
3314      // Don't allow expressions in the immediate field otherwise
3315      return Error(Loc[2], "invalid immediate expression");
3316    }
3317    return false;
3318  }
3319  default:
3320    return false;
3321  }
3322}
3323
3324bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3325  switch (ErrCode) {
3326  case Match_MissingFeature:
3327    return Error(Loc,
3328                 "instruction requires a CPU feature not currently enabled");
3329  case Match_InvalidOperand:
3330    return Error(Loc, "invalid operand for instruction");
3331  case Match_InvalidSuffix:
3332    return Error(Loc, "invalid type suffix for instruction");
3333  case Match_InvalidCondCode:
3334    return Error(Loc, "expected AArch64 condition code");
3335  case Match_AddSubRegExtendSmall:
3336    return Error(Loc,
3337      "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3338  case Match_AddSubRegExtendLarge:
3339    return Error(Loc,
3340      "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3341  case Match_AddSubSecondSource:
3342    return Error(Loc,
3343      "expected compatible register, symbol or integer in range [0, 4095]");
3344  case Match_LogicalSecondSource:
3345    return Error(Loc, "expected compatible register or logical immediate");
3346  case Match_InvalidMovImm32Shift:
3347    return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3348  case Match_InvalidMovImm64Shift:
3349    return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3350  case Match_AddSubRegShift32:
3351    return Error(Loc,
3352       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3353  case Match_AddSubRegShift64:
3354    return Error(Loc,
3355       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3356  case Match_InvalidFPImm:
3357    return Error(Loc,
3358                 "expected compatible register or floating-point constant");
3359  case Match_InvalidMemoryIndexedSImm9:
3360    return Error(Loc, "index must be an integer in range [-256, 255].");
3361  case Match_InvalidMemoryIndexed4SImm7:
3362    return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3363  case Match_InvalidMemoryIndexed8SImm7:
3364    return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3365  case Match_InvalidMemoryIndexed16SImm7:
3366    return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3367  case Match_InvalidMemoryWExtend8:
3368    return Error(Loc,
3369                 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3370  case Match_InvalidMemoryWExtend16:
3371    return Error(Loc,
3372                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3373  case Match_InvalidMemoryWExtend32:
3374    return Error(Loc,
3375                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3376  case Match_InvalidMemoryWExtend64:
3377    return Error(Loc,
3378                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3379  case Match_InvalidMemoryWExtend128:
3380    return Error(Loc,
3381                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3382  case Match_InvalidMemoryXExtend8:
3383    return Error(Loc,
3384                 "expected 'lsl' or 'sxtx' with optional shift of #0");
3385  case Match_InvalidMemoryXExtend16:
3386    return Error(Loc,
3387                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3388  case Match_InvalidMemoryXExtend32:
3389    return Error(Loc,
3390                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3391  case Match_InvalidMemoryXExtend64:
3392    return Error(Loc,
3393                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3394  case Match_InvalidMemoryXExtend128:
3395    return Error(Loc,
3396                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3397  case Match_InvalidMemoryIndexed1:
3398    return Error(Loc, "index must be an integer in range [0, 4095].");
3399  case Match_InvalidMemoryIndexed2:
3400    return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3401  case Match_InvalidMemoryIndexed4:
3402    return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3403  case Match_InvalidMemoryIndexed8:
3404    return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3405  case Match_InvalidMemoryIndexed16:
3406    return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3407  case Match_InvalidImm0_7:
3408    return Error(Loc, "immediate must be an integer in range [0, 7].");
3409  case Match_InvalidImm0_15:
3410    return Error(Loc, "immediate must be an integer in range [0, 15].");
3411  case Match_InvalidImm0_31:
3412    return Error(Loc, "immediate must be an integer in range [0, 31].");
3413  case Match_InvalidImm0_63:
3414    return Error(Loc, "immediate must be an integer in range [0, 63].");
3415  case Match_InvalidImm0_127:
3416    return Error(Loc, "immediate must be an integer in range [0, 127].");
3417  case Match_InvalidImm0_65535:
3418    return Error(Loc, "immediate must be an integer in range [0, 65535].");
3419  case Match_InvalidImm1_8:
3420    return Error(Loc, "immediate must be an integer in range [1, 8].");
3421  case Match_InvalidImm1_16:
3422    return Error(Loc, "immediate must be an integer in range [1, 16].");
3423  case Match_InvalidImm1_32:
3424    return Error(Loc, "immediate must be an integer in range [1, 32].");
3425  case Match_InvalidImm1_64:
3426    return Error(Loc, "immediate must be an integer in range [1, 64].");
3427  case Match_InvalidIndex1:
3428    return Error(Loc, "expected lane specifier '[1]'");
3429  case Match_InvalidIndexB:
3430    return Error(Loc, "vector lane must be an integer in range [0, 15].");
3431  case Match_InvalidIndexH:
3432    return Error(Loc, "vector lane must be an integer in range [0, 7].");
3433  case Match_InvalidIndexS:
3434    return Error(Loc, "vector lane must be an integer in range [0, 3].");
3435  case Match_InvalidIndexD:
3436    return Error(Loc, "vector lane must be an integer in range [0, 1].");
3437  case Match_InvalidLabel:
3438    return Error(Loc, "expected label or encodable integer pc offset");
3439  case Match_MRS:
3440    return Error(Loc, "expected readable system register");
3441  case Match_MSR:
3442    return Error(Loc, "expected writable system register or pstate");
3443  case Match_MnemonicFail:
3444    return Error(Loc, "unrecognized instruction mnemonic");
3445  default:
3446    assert(0 && "unexpected error code!");
3447    return Error(Loc, "invalid instruction format");
3448  }
3449}
3450
3451static const char *getSubtargetFeatureName(unsigned Val);
3452
3453bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3454                                               OperandVector &Operands,
3455                                               MCStreamer &Out,
3456                                               unsigned &ErrorInfo,
3457                                               bool MatchingInlineAsm) {
3458  assert(!Operands.empty() && "Unexpect empty operand list!");
3459  AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[0]);
3460  assert(Op->isToken() && "Leading operand should always be a mnemonic!");
3461
3462  StringRef Tok = Op->getToken();
3463  unsigned NumOperands = Operands.size();
3464
3465  if (NumOperands == 4 && Tok == "lsl") {
3466    AArch64Operand *Op2 = static_cast<AArch64Operand *>(Operands[2]);
3467    AArch64Operand *Op3 = static_cast<AArch64Operand *>(Operands[3]);
3468    if (Op2->isReg() && Op3->isImm()) {
3469      const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3470      if (Op3CE) {
3471        uint64_t Op3Val = Op3CE->getValue();
3472        uint64_t NewOp3Val = 0;
3473        uint64_t NewOp4Val = 0;
3474        if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3475                Op2->getReg())) {
3476          NewOp3Val = (32 - Op3Val) & 0x1f;
3477          NewOp4Val = 31 - Op3Val;
3478        } else {
3479          NewOp3Val = (64 - Op3Val) & 0x3f;
3480          NewOp4Val = 63 - Op3Val;
3481        }
3482
3483        const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3484        const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3485
3486        Operands[0] = AArch64Operand::CreateToken(
3487            "ubfm", false, Op->getStartLoc(), getContext());
3488        Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
3489                                                Op3->getEndLoc(), getContext());
3490        Operands.push_back(AArch64Operand::CreateImm(
3491            NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
3492        delete Op3;
3493        delete Op;
3494      }
3495    }
3496  } else if (NumOperands == 5) {
3497    // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3498    // UBFIZ -> UBFM aliases.
3499    if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3500      AArch64Operand *Op1 = static_cast<AArch64Operand *>(Operands[1]);
3501      AArch64Operand *Op3 = static_cast<AArch64Operand *>(Operands[3]);
3502      AArch64Operand *Op4 = static_cast<AArch64Operand *>(Operands[4]);
3503
3504      if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
3505        const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3506        const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
3507
3508        if (Op3CE && Op4CE) {
3509          uint64_t Op3Val = Op3CE->getValue();
3510          uint64_t Op4Val = Op4CE->getValue();
3511
3512          uint64_t RegWidth = 0;
3513          if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3514              Op1->getReg()))
3515            RegWidth = 64;
3516          else
3517            RegWidth = 32;
3518
3519          if (Op3Val >= RegWidth)
3520            return Error(Op3->getStartLoc(),
3521                         "expected integer in range [0, 31]");
3522          if (Op4Val < 1 || Op4Val > RegWidth)
3523            return Error(Op4->getStartLoc(),
3524                         "expected integer in range [1, 32]");
3525
3526          uint64_t NewOp3Val = 0;
3527          if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3528                  Op1->getReg()))
3529            NewOp3Val = (32 - Op3Val) & 0x1f;
3530          else
3531            NewOp3Val = (64 - Op3Val) & 0x3f;
3532
3533          uint64_t NewOp4Val = Op4Val - 1;
3534
3535          if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3536            return Error(Op4->getStartLoc(),
3537                         "requested insert overflows register");
3538
3539          const MCExpr *NewOp3 =
3540              MCConstantExpr::Create(NewOp3Val, getContext());
3541          const MCExpr *NewOp4 =
3542              MCConstantExpr::Create(NewOp4Val, getContext());
3543          Operands[3] = AArch64Operand::CreateImm(
3544              NewOp3, Op3->getStartLoc(), Op3->getEndLoc(), getContext());
3545          Operands[4] = AArch64Operand::CreateImm(
3546              NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
3547          if (Tok == "bfi")
3548            Operands[0] = AArch64Operand::CreateToken(
3549                "bfm", false, Op->getStartLoc(), getContext());
3550          else if (Tok == "sbfiz")
3551            Operands[0] = AArch64Operand::CreateToken(
3552                "sbfm", false, Op->getStartLoc(), getContext());
3553          else if (Tok == "ubfiz")
3554            Operands[0] = AArch64Operand::CreateToken(
3555                "ubfm", false, Op->getStartLoc(), getContext());
3556          else
3557            llvm_unreachable("No valid mnemonic for alias?");
3558
3559          delete Op;
3560          delete Op3;
3561          delete Op4;
3562        }
3563      }
3564
3565      // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3566      // UBFX -> UBFM aliases.
3567    } else if (NumOperands == 5 &&
3568               (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3569      AArch64Operand *Op1 = static_cast<AArch64Operand *>(Operands[1]);
3570      AArch64Operand *Op3 = static_cast<AArch64Operand *>(Operands[3]);
3571      AArch64Operand *Op4 = static_cast<AArch64Operand *>(Operands[4]);
3572
3573      if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
3574        const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3575        const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
3576
3577        if (Op3CE && Op4CE) {
3578          uint64_t Op3Val = Op3CE->getValue();
3579          uint64_t Op4Val = Op4CE->getValue();
3580
3581          uint64_t RegWidth = 0;
3582          if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3583              Op1->getReg()))
3584            RegWidth = 64;
3585          else
3586            RegWidth = 32;
3587
3588          if (Op3Val >= RegWidth)
3589            return Error(Op3->getStartLoc(),
3590                         "expected integer in range [0, 31]");
3591          if (Op4Val < 1 || Op4Val > RegWidth)
3592            return Error(Op4->getStartLoc(),
3593                         "expected integer in range [1, 32]");
3594
3595          uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3596
3597          if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3598            return Error(Op4->getStartLoc(),
3599                         "requested extract overflows register");
3600
3601          const MCExpr *NewOp4 =
3602              MCConstantExpr::Create(NewOp4Val, getContext());
3603          Operands[4] = AArch64Operand::CreateImm(
3604              NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
3605          if (Tok == "bfxil")
3606            Operands[0] = AArch64Operand::CreateToken(
3607                "bfm", false, Op->getStartLoc(), getContext());
3608          else if (Tok == "sbfx")
3609            Operands[0] = AArch64Operand::CreateToken(
3610                "sbfm", false, Op->getStartLoc(), getContext());
3611          else if (Tok == "ubfx")
3612            Operands[0] = AArch64Operand::CreateToken(
3613                "ubfm", false, Op->getStartLoc(), getContext());
3614          else
3615            llvm_unreachable("No valid mnemonic for alias?");
3616
3617          delete Op;
3618          delete Op4;
3619        }
3620      }
3621    }
3622  }
3623  // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3624  //        InstAlias can't quite handle this since the reg classes aren't
3625  //        subclasses.
3626  if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3627    // The source register can be Wn here, but the matcher expects a
3628    // GPR64. Twiddle it here if necessary.
3629    AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[2]);
3630    if (Op->isReg()) {
3631      unsigned Reg = getXRegFromWReg(Op->getReg());
3632      Operands[2] = AArch64Operand::CreateReg(Reg, false, Op->getStartLoc(),
3633                                              Op->getEndLoc(), getContext());
3634      delete Op;
3635    }
3636  }
3637  // FIXME: Likewise for sxt[bh] with a Xd dst operand
3638  else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3639    AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[1]);
3640    if (Op->isReg() &&
3641        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3642            Op->getReg())) {
3643      // The source register can be Wn here, but the matcher expects a
3644      // GPR64. Twiddle it here if necessary.
3645      AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[2]);
3646      if (Op->isReg()) {
3647        unsigned Reg = getXRegFromWReg(Op->getReg());
3648        Operands[2] = AArch64Operand::CreateReg(Reg, false, Op->getStartLoc(),
3649                                                Op->getEndLoc(), getContext());
3650        delete Op;
3651      }
3652    }
3653  }
3654  // FIXME: Likewise for uxt[bh] with a Xd dst operand
3655  else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3656    AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[1]);
3657    if (Op->isReg() &&
3658        AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3659            Op->getReg())) {
3660      // The source register can be Wn here, but the matcher expects a
3661      // GPR32. Twiddle it here if necessary.
3662      AArch64Operand *Op = static_cast<AArch64Operand *>(Operands[1]);
3663      if (Op->isReg()) {
3664        unsigned Reg = getWRegFromXReg(Op->getReg());
3665        Operands[1] = AArch64Operand::CreateReg(Reg, false, Op->getStartLoc(),
3666                                                Op->getEndLoc(), getContext());
3667        delete Op;
3668      }
3669    }
3670  }
3671
3672  // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3673  if (NumOperands == 3 && Tok == "fmov") {
3674    AArch64Operand *RegOp = static_cast<AArch64Operand *>(Operands[1]);
3675    AArch64Operand *ImmOp = static_cast<AArch64Operand *>(Operands[2]);
3676    if (RegOp->isReg() && ImmOp->isFPImm() &&
3677        ImmOp->getFPImm() == (unsigned)-1) {
3678      unsigned zreg =
3679          AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3680              RegOp->getReg())
3681              ? AArch64::WZR
3682              : AArch64::XZR;
3683      Operands[2] = AArch64Operand::CreateReg(zreg, false, Op->getStartLoc(),
3684                                              Op->getEndLoc(), getContext());
3685      delete ImmOp;
3686    }
3687  }
3688
3689  MCInst Inst;
3690  // First try to match against the secondary set of tables containing the
3691  // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3692  unsigned MatchResult =
3693      MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3694
3695  // If that fails, try against the alternate table containing long-form NEON:
3696  // "fadd v0.2s, v1.2s, v2.2s"
3697  if (MatchResult != Match_Success)
3698    MatchResult =
3699        MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3700
3701  switch (MatchResult) {
3702  case Match_Success: {
3703    // Perform range checking and other semantic validations
3704    SmallVector<SMLoc, 8> OperandLocs;
3705    NumOperands = Operands.size();
3706    for (unsigned i = 1; i < NumOperands; ++i)
3707      OperandLocs.push_back(Operands[i]->getStartLoc());
3708    if (validateInstruction(Inst, OperandLocs))
3709      return true;
3710
3711    Inst.setLoc(IDLoc);
3712    Out.EmitInstruction(Inst, STI);
3713    return false;
3714  }
3715  case Match_MissingFeature: {
3716    assert(ErrorInfo && "Unknown missing feature!");
3717    // Special case the error message for the very common case where only
3718    // a single subtarget feature is missing (neon, e.g.).
3719    std::string Msg = "instruction requires:";
3720    unsigned Mask = 1;
3721    for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3722      if (ErrorInfo & Mask) {
3723        Msg += " ";
3724        Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3725      }
3726      Mask <<= 1;
3727    }
3728    return Error(IDLoc, Msg);
3729  }
3730  case Match_MnemonicFail:
3731    return showMatchError(IDLoc, MatchResult);
3732  case Match_InvalidOperand: {
3733    SMLoc ErrorLoc = IDLoc;
3734    if (ErrorInfo != ~0U) {
3735      if (ErrorInfo >= Operands.size())
3736        return Error(IDLoc, "too few operands for instruction");
3737
3738      ErrorLoc = ((AArch64Operand *)Operands[ErrorInfo])->getStartLoc();
3739      if (ErrorLoc == SMLoc())
3740        ErrorLoc = IDLoc;
3741    }
3742    // If the match failed on a suffix token operand, tweak the diagnostic
3743    // accordingly.
3744    if (((AArch64Operand *)Operands[ErrorInfo])->isToken() &&
3745        ((AArch64Operand *)Operands[ErrorInfo])->isTokenSuffix())
3746      MatchResult = Match_InvalidSuffix;
3747
3748    return showMatchError(ErrorLoc, MatchResult);
3749  }
3750  case Match_InvalidMemoryIndexed1:
3751  case Match_InvalidMemoryIndexed2:
3752  case Match_InvalidMemoryIndexed4:
3753  case Match_InvalidMemoryIndexed8:
3754  case Match_InvalidMemoryIndexed16:
3755  case Match_InvalidCondCode:
3756  case Match_AddSubRegExtendSmall:
3757  case Match_AddSubRegExtendLarge:
3758  case Match_AddSubSecondSource:
3759  case Match_LogicalSecondSource:
3760  case Match_AddSubRegShift32:
3761  case Match_AddSubRegShift64:
3762  case Match_InvalidMovImm32Shift:
3763  case Match_InvalidMovImm64Shift:
3764  case Match_InvalidFPImm:
3765  case Match_InvalidMemoryWExtend8:
3766  case Match_InvalidMemoryWExtend16:
3767  case Match_InvalidMemoryWExtend32:
3768  case Match_InvalidMemoryWExtend64:
3769  case Match_InvalidMemoryWExtend128:
3770  case Match_InvalidMemoryXExtend8:
3771  case Match_InvalidMemoryXExtend16:
3772  case Match_InvalidMemoryXExtend32:
3773  case Match_InvalidMemoryXExtend64:
3774  case Match_InvalidMemoryXExtend128:
3775  case Match_InvalidMemoryIndexed4SImm7:
3776  case Match_InvalidMemoryIndexed8SImm7:
3777  case Match_InvalidMemoryIndexed16SImm7:
3778  case Match_InvalidMemoryIndexedSImm9:
3779  case Match_InvalidImm0_7:
3780  case Match_InvalidImm0_15:
3781  case Match_InvalidImm0_31:
3782  case Match_InvalidImm0_63:
3783  case Match_InvalidImm0_127:
3784  case Match_InvalidImm0_65535:
3785  case Match_InvalidImm1_8:
3786  case Match_InvalidImm1_16:
3787  case Match_InvalidImm1_32:
3788  case Match_InvalidImm1_64:
3789  case Match_InvalidIndex1:
3790  case Match_InvalidIndexB:
3791  case Match_InvalidIndexH:
3792  case Match_InvalidIndexS:
3793  case Match_InvalidIndexD:
3794  case Match_InvalidLabel:
3795  case Match_MSR:
3796  case Match_MRS: {
3797    // Any time we get here, there's nothing fancy to do. Just get the
3798    // operand SMLoc and display the diagnostic.
3799    SMLoc ErrorLoc = ((AArch64Operand *)Operands[ErrorInfo])->getStartLoc();
3800    if (ErrorLoc == SMLoc())
3801      ErrorLoc = IDLoc;
3802    return showMatchError(ErrorLoc, MatchResult);
3803  }
3804  }
3805
3806  llvm_unreachable("Implement any new match types added!");
3807  return true;
3808}
3809
3810/// ParseDirective parses the arm specific directives
3811bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3812  StringRef IDVal = DirectiveID.getIdentifier();
3813  SMLoc Loc = DirectiveID.getLoc();
3814  if (IDVal == ".hword")
3815    return parseDirectiveWord(2, Loc);
3816  if (IDVal == ".word")
3817    return parseDirectiveWord(4, Loc);
3818  if (IDVal == ".xword")
3819    return parseDirectiveWord(8, Loc);
3820  if (IDVal == ".tlsdesccall")
3821    return parseDirectiveTLSDescCall(Loc);
3822
3823  return parseDirectiveLOH(IDVal, Loc);
3824}
3825
3826/// parseDirectiveWord
3827///  ::= .word [ expression (, expression)* ]
3828bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3829  if (getLexer().isNot(AsmToken::EndOfStatement)) {
3830    for (;;) {
3831      const MCExpr *Value;
3832      if (getParser().parseExpression(Value))
3833        return true;
3834
3835      getParser().getStreamer().EmitValue(Value, Size);
3836
3837      if (getLexer().is(AsmToken::EndOfStatement))
3838        break;
3839
3840      // FIXME: Improve diagnostic.
3841      if (getLexer().isNot(AsmToken::Comma))
3842        return Error(L, "unexpected token in directive");
3843      Parser.Lex();
3844    }
3845  }
3846
3847  Parser.Lex();
3848  return false;
3849}
3850
3851// parseDirectiveTLSDescCall:
3852//   ::= .tlsdesccall symbol
3853bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
3854  StringRef Name;
3855  if (getParser().parseIdentifier(Name))
3856    return Error(L, "expected symbol after directive");
3857
3858  MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
3859  const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
3860  Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
3861
3862  MCInst Inst;
3863  Inst.setOpcode(AArch64::TLSDESCCALL);
3864  Inst.addOperand(MCOperand::CreateExpr(Expr));
3865
3866  getParser().getStreamer().EmitInstruction(Inst, STI);
3867  return false;
3868}
3869
3870/// ::= .loh <lohName | lohId> label1, ..., labelN
3871/// The number of arguments depends on the loh identifier.
3872bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
3873  if (IDVal != MCLOHDirectiveName())
3874    return true;
3875  MCLOHType Kind;
3876  if (getParser().getTok().isNot(AsmToken::Identifier)) {
3877    if (getParser().getTok().isNot(AsmToken::Integer))
3878      return TokError("expected an identifier or a number in directive");
3879    // We successfully get a numeric value for the identifier.
3880    // Check if it is valid.
3881    int64_t Id = getParser().getTok().getIntVal();
3882    Kind = (MCLOHType)Id;
3883    // Check that Id does not overflow MCLOHType.
3884    if (!isValidMCLOHType(Kind) || Id != Kind)
3885      return TokError("invalid numeric identifier in directive");
3886  } else {
3887    StringRef Name = getTok().getIdentifier();
3888    // We successfully parse an identifier.
3889    // Check if it is a recognized one.
3890    int Id = MCLOHNameToId(Name);
3891
3892    if (Id == -1)
3893      return TokError("invalid identifier in directive");
3894    Kind = (MCLOHType)Id;
3895  }
3896  // Consume the identifier.
3897  Lex();
3898  // Get the number of arguments of this LOH.
3899  int NbArgs = MCLOHIdToNbArgs(Kind);
3900
3901  assert(NbArgs != -1 && "Invalid number of arguments");
3902
3903  SmallVector<MCSymbol *, 3> Args;
3904  for (int Idx = 0; Idx < NbArgs; ++Idx) {
3905    StringRef Name;
3906    if (getParser().parseIdentifier(Name))
3907      return TokError("expected identifier in directive");
3908    Args.push_back(getContext().GetOrCreateSymbol(Name));
3909
3910    if (Idx + 1 == NbArgs)
3911      break;
3912    if (getLexer().isNot(AsmToken::Comma))
3913      return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
3914    Lex();
3915  }
3916  if (getLexer().isNot(AsmToken::EndOfStatement))
3917    return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
3918
3919  getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
3920  return false;
3921}
3922
3923bool
3924AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
3925                                    AArch64MCExpr::VariantKind &ELFRefKind,
3926                                    MCSymbolRefExpr::VariantKind &DarwinRefKind,
3927                                    int64_t &Addend) {
3928  ELFRefKind = AArch64MCExpr::VK_INVALID;
3929  DarwinRefKind = MCSymbolRefExpr::VK_None;
3930  Addend = 0;
3931
3932  if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
3933    ELFRefKind = AE->getKind();
3934    Expr = AE->getSubExpr();
3935  }
3936
3937  const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
3938  if (SE) {
3939    // It's a simple symbol reference with no addend.
3940    DarwinRefKind = SE->getKind();
3941    return true;
3942  }
3943
3944  const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
3945  if (!BE)
3946    return false;
3947
3948  SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
3949  if (!SE)
3950    return false;
3951  DarwinRefKind = SE->getKind();
3952
3953  if (BE->getOpcode() != MCBinaryExpr::Add &&
3954      BE->getOpcode() != MCBinaryExpr::Sub)
3955    return false;
3956
3957  // See if the addend is is a constant, otherwise there's more going
3958  // on here than we can deal with.
3959  auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
3960  if (!AddendExpr)
3961    return false;
3962
3963  Addend = AddendExpr->getValue();
3964  if (BE->getOpcode() == MCBinaryExpr::Sub)
3965    Addend = -Addend;
3966
3967  // It's some symbol reference + a constant addend, but really
3968  // shouldn't use both Darwin and ELF syntax.
3969  return ELFRefKind == AArch64MCExpr::VK_INVALID ||
3970         DarwinRefKind == MCSymbolRefExpr::VK_None;
3971}
3972
3973/// Force static initialization.
3974extern "C" void LLVMInitializeAArch64AsmParser() {
3975  RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
3976  RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
3977
3978  RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64leTarget);
3979  RegisterMCAsmParser<AArch64AsmParser> W(TheARM64beTarget);
3980}
3981
3982#define GET_REGISTER_MATCHER
3983#define GET_SUBTARGET_FEATURE_NAME
3984#define GET_MATCHER_IMPLEMENTATION
3985#include "AArch64GenAsmMatcher.inc"
3986
3987// Define this matcher function after the auto-generated include so we
3988// have the match class enum definitions.
3989unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
3990                                                      unsigned Kind) {
3991  AArch64Operand *Op = static_cast<AArch64Operand *>(AsmOp);
3992  // If the kind is a token for a literal immediate, check if our asm
3993  // operand matches. This is for InstAliases which have a fixed-value
3994  // immediate in the syntax.
3995  int64_t ExpectedVal;
3996  switch (Kind) {
3997  default:
3998    return Match_InvalidOperand;
3999  case MCK__35_0:
4000    ExpectedVal = 0;
4001    break;
4002  case MCK__35_1:
4003    ExpectedVal = 1;
4004    break;
4005  case MCK__35_12:
4006    ExpectedVal = 12;
4007    break;
4008  case MCK__35_16:
4009    ExpectedVal = 16;
4010    break;
4011  case MCK__35_2:
4012    ExpectedVal = 2;
4013    break;
4014  case MCK__35_24:
4015    ExpectedVal = 24;
4016    break;
4017  case MCK__35_3:
4018    ExpectedVal = 3;
4019    break;
4020  case MCK__35_32:
4021    ExpectedVal = 32;
4022    break;
4023  case MCK__35_4:
4024    ExpectedVal = 4;
4025    break;
4026  case MCK__35_48:
4027    ExpectedVal = 48;
4028    break;
4029  case MCK__35_6:
4030    ExpectedVal = 6;
4031    break;
4032  case MCK__35_64:
4033    ExpectedVal = 64;
4034    break;
4035  case MCK__35_8:
4036    ExpectedVal = 8;
4037    break;
4038  }
4039  if (!Op->isImm())
4040    return Match_InvalidOperand;
4041  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4042  if (!CE)
4043    return Match_InvalidOperand;
4044  if (CE->getValue() == ExpectedVal)
4045    return Match_Success;
4046  return Match_InvalidOperand;
4047}
4048