AArch64AsmParser.cpp revision 36c7806f4eacd676932ba630246f88e0e37b1cd4
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the (GNU-style) assembly parser for the AArch64
11// architecture.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "MCTargetDesc/AArch64MCTargetDesc.h"
17#include "MCTargetDesc/AArch64MCExpr.h"
18#include "Utils/AArch64BaseInfo.h"
19#include "llvm/ADT/APFloat.h"
20#include "llvm/ADT/APInt.h"
21#include "llvm/ADT/StringSwitch.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/MC/MCContext.h"
24#include "llvm/MC/MCInst.h"
25#include "llvm/MC/MCSubtargetInfo.h"
26#include "llvm/MC/MCTargetAsmParser.h"
27#include "llvm/MC/MCExpr.h"
28#include "llvm/MC/MCRegisterInfo.h"
29#include "llvm/MC/MCStreamer.h"
30#include "llvm/MC/MCParser/MCAsmLexer.h"
31#include "llvm/MC/MCParser/MCAsmParser.h"
32#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
33#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
35#include "llvm/Support/TargetRegistry.h"
36
37using namespace llvm;
38
39namespace {
40
41class AArch64Operand;
42
43class AArch64AsmParser : public MCTargetAsmParser {
44  MCSubtargetInfo &STI;
45  MCAsmParser &Parser;
46
47#define GET_ASSEMBLER_HEADER
48#include "AArch64GenAsmMatcher.inc"
49
50public:
51  enum AArch64MatchResultTy {
52    Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
53#define GET_OPERAND_DIAGNOSTIC_TYPES
54#include "AArch64GenAsmMatcher.inc"
55  };
56
57  AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
58                   const MCInstrInfo &MII)
59      : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
60    MCAsmParserExtension::Initialize(_Parser);
61
62    // Initialize the set of available features.
63    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
64  }
65
66  // These are the public interface of the MCTargetAsmParser
67  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
68  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
69                        SMLoc NameLoc,
70                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
71
72  bool ParseDirective(AsmToken DirectiveID);
73  bool ParseDirectiveTLSDescCall(SMLoc L);
74  bool ParseDirectiveWord(unsigned Size, SMLoc L);
75
76  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
77                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
78                               MCStreamer&Out, unsigned &ErrorInfo,
79                               bool MatchingInlineAsm);
80
81  // The rest of the sub-parsers have more freedom over interface: they return
82  // an OperandMatchResultTy because it's less ambiguous than true/false or
83  // -1/0/1 even if it is more verbose
84  OperandMatchResultTy
85  ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
86               StringRef Mnemonic);
87
88  OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
89
90  OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
91
92  OperandMatchResultTy
93  ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
94                uint32_t NumLanes);
95
96  OperandMatchResultTy
97  ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
98                uint32_t &NumLanes);
99
100  OperandMatchResultTy
101  ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
102
103  OperandMatchResultTy
104  ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
105
106  OperandMatchResultTy
107  ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
108
109  OperandMatchResultTy
110  ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
111
112  template<typename SomeNamedImmMapper> OperandMatchResultTy
113  ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
114    return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
115  }
116
117  OperandMatchResultTy
118  ParseNamedImmOperand(const NamedImmMapper &Mapper,
119                       SmallVectorImpl<MCParsedAsmOperand*> &Operands);
120
121  OperandMatchResultTy
122  ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
123
124  OperandMatchResultTy
125  ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
126
127  OperandMatchResultTy
128  ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
129
130  bool TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc, StringRef &Layout,
131                      SMLoc &LayoutLoc);
132
133  OperandMatchResultTy ParseVectorList(SmallVectorImpl<MCParsedAsmOperand *> &);
134
135  bool validateInstruction(MCInst &Inst,
136                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
137
138  /// Scan the next token (which had better be an identifier) and determine
139  /// whether it represents a general-purpose or vector register. It returns
140  /// true if an identifier was found and populates its reference arguments. It
141  /// does not consume the token.
142  bool
143  IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
144                   SMLoc &LayoutLoc) const;
145
146};
147
148}
149
150namespace {
151
152/// Instances of this class represent a parsed AArch64 machine instruction.
153class AArch64Operand : public MCParsedAsmOperand {
154private:
155  enum KindTy {
156    k_ImmWithLSL,     // #uimm {, LSL #amt }
157    k_CondCode,       // eq/ne/...
158    k_FPImmediate,    // Limited-precision floating-point imm
159    k_Immediate,      // Including expressions referencing symbols
160    k_Register,
161    k_ShiftExtend,
162    k_VectorList,     // A sequential list of 1 to 4 registers.
163    k_SysReg,         // The register operand of MRS and MSR instructions
164    k_Token,          // The mnemonic; other raw tokens the auto-generated
165    k_WrappedRegister // Load/store exclusive permit a wrapped register.
166  } Kind;
167
168  SMLoc StartLoc, EndLoc;
169
170  struct ImmWithLSLOp {
171    const MCExpr *Val;
172    unsigned ShiftAmount;
173    bool ImplicitAmount;
174  };
175
176  struct CondCodeOp {
177    A64CC::CondCodes Code;
178  };
179
180  struct FPImmOp {
181    double Val;
182  };
183
184  struct ImmOp {
185    const MCExpr *Val;
186  };
187
188  struct RegOp {
189    unsigned RegNum;
190  };
191
192  struct ShiftExtendOp {
193    A64SE::ShiftExtSpecifiers ShiftType;
194    unsigned Amount;
195    bool ImplicitAmount;
196  };
197
198  // A vector register list is a sequential list of 1 to 4 registers.
199  struct VectorListOp {
200    unsigned RegNum;
201    unsigned Count;
202    A64Layout::VectorLayout Layout;
203  };
204
205  struct SysRegOp {
206    const char *Data;
207    unsigned Length;
208  };
209
210  struct TokOp {
211    const char *Data;
212    unsigned Length;
213  };
214
215  union {
216    struct ImmWithLSLOp ImmWithLSL;
217    struct CondCodeOp CondCode;
218    struct FPImmOp FPImm;
219    struct ImmOp Imm;
220    struct RegOp Reg;
221    struct ShiftExtendOp ShiftExtend;
222    struct VectorListOp VectorList;
223    struct SysRegOp SysReg;
224    struct TokOp Tok;
225  };
226
227  AArch64Operand(KindTy K, SMLoc S, SMLoc E)
228    : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
229
230public:
231  AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
232  }
233
234  SMLoc getStartLoc() const { return StartLoc; }
235  SMLoc getEndLoc() const { return EndLoc; }
236  void print(raw_ostream&) const;
237  void dump() const;
238
239  StringRef getToken() const {
240    assert(Kind == k_Token && "Invalid access!");
241    return StringRef(Tok.Data, Tok.Length);
242  }
243
244  unsigned getReg() const {
245    assert((Kind == k_Register || Kind == k_WrappedRegister)
246           && "Invalid access!");
247    return Reg.RegNum;
248  }
249
250  const MCExpr *getImm() const {
251    assert(Kind == k_Immediate && "Invalid access!");
252    return Imm.Val;
253  }
254
255  A64CC::CondCodes getCondCode() const {
256    assert(Kind == k_CondCode && "Invalid access!");
257    return CondCode.Code;
258  }
259
260  static bool isNonConstantExpr(const MCExpr *E,
261                                AArch64MCExpr::VariantKind &Variant) {
262    if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
263      Variant = A64E->getKind();
264      return true;
265    } else if (!isa<MCConstantExpr>(E)) {
266      Variant = AArch64MCExpr::VK_AARCH64_None;
267      return true;
268    }
269
270    return false;
271  }
272
273  bool isCondCode() const { return Kind == k_CondCode; }
274  bool isToken() const { return Kind == k_Token; }
275  bool isReg() const { return Kind == k_Register; }
276  bool isImm() const { return Kind == k_Immediate; }
277  bool isMem() const { return false; }
278  bool isFPImm() const { return Kind == k_FPImmediate; }
279  bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
280  bool isSysReg() const { return Kind == k_SysReg; }
281  bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
282  bool isWrappedReg() const { return Kind == k_WrappedRegister; }
283
284  bool isAddSubImmLSL0() const {
285    if (!isImmWithLSL()) return false;
286    if (ImmWithLSL.ShiftAmount != 0) return false;
287
288    AArch64MCExpr::VariantKind Variant;
289    if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
290      return Variant == AArch64MCExpr::VK_AARCH64_LO12
291          || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
292          || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
293          || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
294          || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
295          || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
296    }
297
298    // Otherwise it should be a real immediate in range:
299    const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
300    return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
301  }
302
303  bool isAddSubImmLSL12() const {
304    if (!isImmWithLSL()) return false;
305    if (ImmWithLSL.ShiftAmount != 12) return false;
306
307    AArch64MCExpr::VariantKind Variant;
308    if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
309      return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
310          || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
311    }
312
313    // Otherwise it should be a real immediate in range:
314    const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
315    return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
316  }
317
318  template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
319    if (!isShiftOrExtend()) return false;
320
321    A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
322    if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
323      return false;
324
325    if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
326      return false;
327
328    return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
329  }
330
331  bool isAdrpLabel() const {
332    if (!isImm()) return false;
333
334    AArch64MCExpr::VariantKind Variant;
335    if (isNonConstantExpr(getImm(), Variant)) {
336      return Variant == AArch64MCExpr::VK_AARCH64_None
337        || Variant == AArch64MCExpr::VK_AARCH64_GOT
338        || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
339        || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
340    }
341
342    return isLabel<21, 4096>();
343  }
344
345  template<unsigned RegWidth>  bool isBitfieldWidth() const {
346    if (!isImm()) return false;
347
348    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
349    if (!CE) return false;
350
351    return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
352  }
353
354  template<int RegWidth>
355  bool isCVTFixedPos() const {
356    if (!isImm()) return false;
357
358    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
359    if (!CE) return false;
360
361    return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
362  }
363
364  bool isFMOVImm() const {
365    if (!isFPImm()) return false;
366
367    APFloat RealVal(FPImm.Val);
368    uint32_t ImmVal;
369    return A64Imms::isFPImm(RealVal, ImmVal);
370  }
371
372  bool isFPZero() const {
373    if (!isFPImm()) return false;
374
375    APFloat RealVal(FPImm.Val);
376    return RealVal.isPosZero();
377  }
378
379  template<unsigned field_width, unsigned scale>
380  bool isLabel() const {
381    if (!isImm()) return false;
382
383    if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
384      return true;
385    } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
386      int64_t Val = CE->getValue();
387      int64_t Min = - (scale * (1LL << (field_width - 1)));
388      int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
389      return (Val % scale) == 0 && Val >= Min && Val <= Max;
390    }
391
392    // N.b. this disallows explicit relocation specifications via an
393    // AArch64MCExpr. Users needing that behaviour
394    return false;
395  }
396
397  bool isLane1() const {
398    if (!isImm()) return false;
399
400    // Because it's come through custom assembly parsing, it must always be a
401    // constant expression.
402    return cast<MCConstantExpr>(getImm())->getValue() == 1;
403  }
404
405  bool isLoadLitLabel() const {
406    if (!isImm()) return false;
407
408    AArch64MCExpr::VariantKind Variant;
409    if (isNonConstantExpr(getImm(), Variant)) {
410      return Variant == AArch64MCExpr::VK_AARCH64_None
411          || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
412    }
413
414    return isLabel<19, 4>();
415  }
416
417  template<unsigned RegWidth> bool isLogicalImm() const {
418    if (!isImm()) return false;
419
420    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
421    if (!CE) return false;
422
423    uint32_t Bits;
424    return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
425  }
426
427  template<unsigned RegWidth> bool isLogicalImmMOV() const {
428    if (!isLogicalImm<RegWidth>()) return false;
429
430    const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
431
432    // The move alias for ORR is only valid if the immediate cannot be
433    // represented with a move (immediate) instruction; they take priority.
434    int UImm16, Shift;
435    return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
436      && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
437  }
438
439  template<int MemSize>
440  bool isOffsetUImm12() const {
441    if (!isImm()) return false;
442
443    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
444
445    // Assume they know what they're doing for now if they've given us a
446    // non-constant expression. In principle we could check for ridiculous
447    // things that can't possibly work or relocations that would almost
448    // certainly break resulting code.
449    if (!CE)
450      return true;
451
452    int64_t Val = CE->getValue();
453
454    // Must be a multiple of the access size in bytes.
455    if ((Val & (MemSize - 1)) != 0) return false;
456
457    // Must be 12-bit unsigned
458    return Val >= 0 && Val <= 0xfff * MemSize;
459  }
460
461  template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
462  bool isShift() const {
463    if (!isShiftOrExtend()) return false;
464
465    if (ShiftExtend.ShiftType != SHKind)
466      return false;
467
468    return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
469  }
470
471  bool isMOVN32Imm() const {
472    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
473      AArch64MCExpr::VK_AARCH64_SABS_G0,
474      AArch64MCExpr::VK_AARCH64_SABS_G1,
475      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
476      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
477      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
478      AArch64MCExpr::VK_AARCH64_TPREL_G1,
479      AArch64MCExpr::VK_AARCH64_TPREL_G0,
480    };
481    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
482
483    return isMoveWideImm(32, PermittedModifiers, NumModifiers);
484  }
485
486  bool isMOVN64Imm() const {
487    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
488      AArch64MCExpr::VK_AARCH64_SABS_G0,
489      AArch64MCExpr::VK_AARCH64_SABS_G1,
490      AArch64MCExpr::VK_AARCH64_SABS_G2,
491      AArch64MCExpr::VK_AARCH64_DTPREL_G2,
492      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
493      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
494      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
495      AArch64MCExpr::VK_AARCH64_TPREL_G2,
496      AArch64MCExpr::VK_AARCH64_TPREL_G1,
497      AArch64MCExpr::VK_AARCH64_TPREL_G0,
498    };
499    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
500
501    return isMoveWideImm(64, PermittedModifiers, NumModifiers);
502  }
503
504
505  bool isMOVZ32Imm() const {
506    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
507      AArch64MCExpr::VK_AARCH64_ABS_G0,
508      AArch64MCExpr::VK_AARCH64_ABS_G1,
509      AArch64MCExpr::VK_AARCH64_SABS_G0,
510      AArch64MCExpr::VK_AARCH64_SABS_G1,
511      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
512      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
513      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
514      AArch64MCExpr::VK_AARCH64_TPREL_G1,
515      AArch64MCExpr::VK_AARCH64_TPREL_G0,
516    };
517    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
518
519    return isMoveWideImm(32, PermittedModifiers, NumModifiers);
520  }
521
522  bool isMOVZ64Imm() const {
523    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
524      AArch64MCExpr::VK_AARCH64_ABS_G0,
525      AArch64MCExpr::VK_AARCH64_ABS_G1,
526      AArch64MCExpr::VK_AARCH64_ABS_G2,
527      AArch64MCExpr::VK_AARCH64_ABS_G3,
528      AArch64MCExpr::VK_AARCH64_SABS_G0,
529      AArch64MCExpr::VK_AARCH64_SABS_G1,
530      AArch64MCExpr::VK_AARCH64_SABS_G2,
531      AArch64MCExpr::VK_AARCH64_DTPREL_G2,
532      AArch64MCExpr::VK_AARCH64_DTPREL_G1,
533      AArch64MCExpr::VK_AARCH64_DTPREL_G0,
534      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
535      AArch64MCExpr::VK_AARCH64_TPREL_G2,
536      AArch64MCExpr::VK_AARCH64_TPREL_G1,
537      AArch64MCExpr::VK_AARCH64_TPREL_G0,
538    };
539    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
540
541    return isMoveWideImm(64, PermittedModifiers, NumModifiers);
542  }
543
544  bool isMOVK32Imm() const {
545    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
546      AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
547      AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
548      AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
549      AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
550      AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
551      AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
552      AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
553    };
554    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
555
556    return isMoveWideImm(32, PermittedModifiers, NumModifiers);
557  }
558
559  bool isMOVK64Imm() const {
560    static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
561      AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
562      AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
563      AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
564      AArch64MCExpr::VK_AARCH64_ABS_G3,
565      AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
566      AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
567      AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
568      AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
569      AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
570    };
571    const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
572
573    return isMoveWideImm(64, PermittedModifiers, NumModifiers);
574  }
575
576  bool isMoveWideImm(unsigned RegWidth,
577                     const AArch64MCExpr::VariantKind *PermittedModifiers,
578                     unsigned NumModifiers) const {
579    if (!isImmWithLSL()) return false;
580
581    if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
582    if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
583
584    AArch64MCExpr::VariantKind Modifier;
585    if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
586      // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
587      if (!ImmWithLSL.ImplicitAmount) return false;
588
589      for (unsigned i = 0; i < NumModifiers; ++i)
590        if (PermittedModifiers[i] == Modifier) return true;
591
592      return false;
593    }
594
595    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
596    return CE && CE->getValue() >= 0  && CE->getValue() <= 0xffff;
597  }
598
599  template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
600  bool isMoveWideMovAlias() const {
601    if (!isImm()) return false;
602
603    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
604    if (!CE) return false;
605
606    int UImm16, Shift;
607    uint64_t Value = CE->getValue();
608
609    // If this is a 32-bit instruction then all bits above 32 should be the
610    // same: either of these is fine because signed/unsigned values should be
611    // permitted.
612    if (RegWidth == 32) {
613      if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
614        return false;
615
616      Value &= 0xffffffffULL;
617    }
618
619    return isValidImm(RegWidth, Value, UImm16, Shift);
620  }
621
622  bool isMSRWithReg() const {
623    if (!isSysReg()) return false;
624
625    bool IsKnownRegister;
626    StringRef Name(SysReg.Data, SysReg.Length);
627    A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
628
629    return IsKnownRegister;
630  }
631
632  bool isMSRPState() const {
633    if (!isSysReg()) return false;
634
635    bool IsKnownRegister;
636    StringRef Name(SysReg.Data, SysReg.Length);
637    A64PState::PStateMapper().fromString(Name, IsKnownRegister);
638
639    return IsKnownRegister;
640  }
641
642  bool isMRS() const {
643    if (!isSysReg()) return false;
644
645    // First check against specific MSR-only (write-only) registers
646    bool IsKnownRegister;
647    StringRef Name(SysReg.Data, SysReg.Length);
648    A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
649
650    return IsKnownRegister;
651  }
652
653  bool isPRFM() const {
654    if (!isImm()) return false;
655
656    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
657
658    if (!CE)
659      return false;
660
661    return CE->getValue() >= 0 && CE->getValue() <= 31;
662  }
663
664  template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
665    if (!isShiftOrExtend()) return false;
666
667    if (ShiftExtend.ShiftType != SHKind)
668      return false;
669
670    return ShiftExtend.Amount <= 4;
671  }
672
673  bool isRegExtendLSL() const {
674    if (!isShiftOrExtend()) return false;
675
676    if (ShiftExtend.ShiftType != A64SE::LSL)
677      return false;
678
679    return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
680  }
681
682  // if 0 < value <= w, return true
683  bool isShrFixedWidth(int w) const {
684    if (!isImm())
685      return false;
686    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
687    if (!CE)
688      return false;
689    int64_t Value = CE->getValue();
690    return Value > 0 && Value <= w;
691  }
692
693  bool isShrImm8() const { return isShrFixedWidth(8); }
694
695  bool isShrImm16() const { return isShrFixedWidth(16); }
696
697  bool isShrImm32() const { return isShrFixedWidth(32); }
698
699  bool isShrImm64() const { return isShrFixedWidth(64); }
700
701  // if 0 <= value < w, return true
702  bool isShlFixedWidth(int w) const {
703    if (!isImm())
704      return false;
705    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
706    if (!CE)
707      return false;
708    int64_t Value = CE->getValue();
709    return Value >= 0 && Value < w;
710  }
711
712  bool isShlImm8() const { return isShlFixedWidth(8); }
713
714  bool isShlImm16() const { return isShlFixedWidth(16); }
715
716  bool isShlImm32() const { return isShlFixedWidth(32); }
717
718  bool isShlImm64() const { return isShlFixedWidth(64); }
719
720  bool isNeonMovImmShiftLSL() const {
721    if (!isShiftOrExtend())
722      return false;
723
724    if (ShiftExtend.ShiftType != A64SE::LSL)
725      return false;
726
727    // Valid shift amount is 0, 8, 16 and 24.
728    return ShiftExtend.Amount % 8 == 0 && ShiftExtend.Amount <= 24;
729  }
730
731  bool isNeonMovImmShiftLSLH() const {
732    if (!isShiftOrExtend())
733      return false;
734
735    if (ShiftExtend.ShiftType != A64SE::LSL)
736      return false;
737
738    // Valid shift amount is 0 and 8.
739    return ShiftExtend.Amount == 0 || ShiftExtend.Amount == 8;
740  }
741
742  bool isNeonMovImmShiftMSL() const {
743    if (!isShiftOrExtend())
744      return false;
745
746    if (ShiftExtend.ShiftType != A64SE::MSL)
747      return false;
748
749    // Valid shift amount is 8 and 16.
750    return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16;
751  }
752
753  template <A64Layout::VectorLayout Layout, unsigned Count>
754  bool isVectorList() const {
755    return Kind == k_VectorList && VectorList.Layout == Layout &&
756           VectorList.Count == Count;
757  }
758
759  template <int MemSize> bool isSImm7Scaled() const {
760    if (!isImm())
761      return false;
762
763    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
764    if (!CE) return false;
765
766    int64_t Val = CE->getValue();
767    if (Val % MemSize != 0) return false;
768
769    Val /= MemSize;
770
771    return Val >= -64 && Val < 64;
772  }
773
774  template<int BitWidth>
775  bool isSImm() const {
776    if (!isImm()) return false;
777
778    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
779    if (!CE) return false;
780
781    return CE->getValue() >= -(1LL << (BitWidth - 1))
782      && CE->getValue() < (1LL << (BitWidth - 1));
783  }
784
785  template<int bitWidth>
786  bool isUImm() const {
787    if (!isImm()) return false;
788
789    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
790    if (!CE) return false;
791
792    return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
793  }
794
795  bool isUImm() const {
796    if (!isImm()) return false;
797
798    return isa<MCConstantExpr>(getImm());
799  }
800
801  bool isNeonUImm64Mask() const {
802    if (!isImm())
803      return false;
804
805    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
806    if (!CE)
807      return false;
808
809    uint64_t Value = CE->getValue();
810
811    // i64 value with each byte being either 0x00 or 0xff.
812    for (unsigned i = 0; i < 8; ++i, Value >>= 8)
813      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff)
814        return false;
815    return true;
816  }
817
818  // if value == N, return true
819  template<int N>
820  bool isExactImm() const {
821    if (!isImm()) return false;
822
823    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
824    if (!CE) return false;
825
826    return CE->getValue() == N;
827  }
828
829  static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
830                                          unsigned ShiftAmount,
831                                          bool ImplicitAmount,
832										  SMLoc S,SMLoc E) {
833    AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
834    Op->ImmWithLSL.Val = Val;
835    Op->ImmWithLSL.ShiftAmount = ShiftAmount;
836    Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
837    return Op;
838  }
839
840  static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
841                                        SMLoc S, SMLoc E) {
842    AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
843    Op->CondCode.Code = Code;
844    return Op;
845  }
846
847  static AArch64Operand *CreateFPImm(double Val,
848                                     SMLoc S, SMLoc E) {
849    AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
850    Op->FPImm.Val = Val;
851    return Op;
852  }
853
854  static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
855    AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
856    Op->Imm.Val = Val;
857    return Op;
858  }
859
860  static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
861    AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
862    Op->Reg.RegNum = RegNum;
863    return Op;
864  }
865
866  static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
867    AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
868    Op->Reg.RegNum = RegNum;
869    return Op;
870  }
871
872  static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
873                                           unsigned Amount,
874                                           bool ImplicitAmount,
875                                           SMLoc S, SMLoc E) {
876    AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
877    Op->ShiftExtend.ShiftType = ShiftTyp;
878    Op->ShiftExtend.Amount = Amount;
879    Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
880    return Op;
881  }
882
883  static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
884    AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
885    Op->Tok.Data = Str.data();
886    Op->Tok.Length = Str.size();
887    return Op;
888  }
889
890  static AArch64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
891                                          A64Layout::VectorLayout Layout,
892                                          SMLoc S, SMLoc E) {
893    AArch64Operand *Op = new AArch64Operand(k_VectorList, S, E);
894    Op->VectorList.RegNum = RegNum;
895    Op->VectorList.Count = Count;
896    Op->VectorList.Layout = Layout;
897    Op->StartLoc = S;
898    Op->EndLoc = E;
899    return Op;
900  }
901
902  static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
903    AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
904    Op->Tok.Data = Str.data();
905    Op->Tok.Length = Str.size();
906    return Op;
907  }
908
909
910  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
911    // Add as immediates when possible.
912    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
913      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
914    else
915      Inst.addOperand(MCOperand::CreateExpr(Expr));
916  }
917
918  template<unsigned RegWidth>
919  void addBFILSBOperands(MCInst &Inst, unsigned N) const {
920    assert(N == 1 && "Invalid number of operands!");
921    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
922    unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
923    Inst.addOperand(MCOperand::CreateImm(EncodedVal));
924  }
925
926  void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
927    assert(N == 1 && "Invalid number of operands!");
928    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
929    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
930  }
931
932  void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
933    assert(N == 1 && "Invalid number of operands!");
934
935    uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
936    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
937
938    Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
939  }
940
941  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
942    assert(N == 1 && "Invalid number of operands!");
943    Inst.addOperand(MCOperand::CreateImm(getCondCode()));
944  }
945
946  void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
947    assert(N == 1 && "Invalid number of operands!");
948
949    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
950    Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
951  }
952
953  void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
954    assert(N == 1 && "Invalid number of operands!");
955
956    APFloat RealVal(FPImm.Val);
957    uint32_t ImmVal;
958    A64Imms::isFPImm(RealVal, ImmVal);
959
960    Inst.addOperand(MCOperand::CreateImm(ImmVal));
961  }
962
963  void addFPZeroOperands(MCInst &Inst, unsigned N) const {
964    assert(N == 1 && "Invalid number of operands");
965    Inst.addOperand(MCOperand::CreateImm(0));
966  }
967
968  void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
969    assert(N == 1 && "Invalid number of operands!");
970    unsigned Encoded = A64InvertCondCode(getCondCode());
971    Inst.addOperand(MCOperand::CreateImm(Encoded));
972  }
973
974  void addRegOperands(MCInst &Inst, unsigned N) const {
975    assert(N == 1 && "Invalid number of operands!");
976    Inst.addOperand(MCOperand::CreateReg(getReg()));
977  }
978
979  void addImmOperands(MCInst &Inst, unsigned N) const {
980    assert(N == 1 && "Invalid number of operands!");
981    addExpr(Inst, getImm());
982  }
983
984  template<int MemSize>
985  void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
986    assert(N == 1 && "Invalid number of operands!");
987
988    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
989    uint64_t Val = CE->getValue() / MemSize;
990    Inst.addOperand(MCOperand::CreateImm(Val  & 0x7f));
991  }
992
993  template<int BitWidth>
994  void addSImmOperands(MCInst &Inst, unsigned N) const {
995    assert(N == 1 && "Invalid number of operands!");
996
997    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
998    uint64_t Val = CE->getValue();
999    Inst.addOperand(MCOperand::CreateImm(Val  & ((1ULL << BitWidth) - 1)));
1000  }
1001
1002  void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
1003    assert (N == 1 && "Invalid number of operands!");
1004
1005    addExpr(Inst, ImmWithLSL.Val);
1006  }
1007
1008  template<unsigned field_width, unsigned scale>
1009  void addLabelOperands(MCInst &Inst, unsigned N) const {
1010    assert(N == 1 && "Invalid number of operands!");
1011
1012    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1013
1014    if (!CE) {
1015      addExpr(Inst, Imm.Val);
1016      return;
1017    }
1018
1019    int64_t Val = CE->getValue();
1020    assert(Val % scale == 0 && "Unaligned immediate in instruction");
1021    Val /= scale;
1022
1023    Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
1024  }
1025
1026  template<int MemSize>
1027  void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
1028    assert(N == 1 && "Invalid number of operands!");
1029
1030    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
1031      Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
1032    } else {
1033      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1034    }
1035  }
1036
1037  template<unsigned RegWidth>
1038  void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1039    assert(N == 1 && "Invalid number of operands");
1040    const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
1041
1042    uint32_t Bits;
1043    A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
1044
1045    Inst.addOperand(MCOperand::CreateImm(Bits));
1046  }
1047
1048  void addMRSOperands(MCInst &Inst, unsigned N) const {
1049    assert(N == 1 && "Invalid number of operands!");
1050
1051    bool Valid;
1052    StringRef Name(SysReg.Data, SysReg.Length);
1053    uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
1054
1055    Inst.addOperand(MCOperand::CreateImm(Bits));
1056  }
1057
1058  void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
1059    assert(N == 1 && "Invalid number of operands!");
1060
1061    bool Valid;
1062    StringRef Name(SysReg.Data, SysReg.Length);
1063    uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
1064
1065    Inst.addOperand(MCOperand::CreateImm(Bits));
1066  }
1067
1068  void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
1069    assert(N == 1 && "Invalid number of operands!");
1070
1071    bool Valid;
1072    StringRef Name(SysReg.Data, SysReg.Length);
1073    uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
1074
1075    Inst.addOperand(MCOperand::CreateImm(Bits));
1076  }
1077
1078  void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
1079    assert(N == 2 && "Invalid number of operands!");
1080
1081    addExpr(Inst, ImmWithLSL.Val);
1082
1083    AArch64MCExpr::VariantKind Variant;
1084    if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
1085      Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
1086      return;
1087    }
1088
1089    // We know it's relocated
1090    switch (Variant) {
1091    case AArch64MCExpr::VK_AARCH64_ABS_G0:
1092    case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
1093    case AArch64MCExpr::VK_AARCH64_SABS_G0:
1094    case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
1095    case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
1096    case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
1097    case AArch64MCExpr::VK_AARCH64_TPREL_G0:
1098    case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
1099      Inst.addOperand(MCOperand::CreateImm(0));
1100      break;
1101    case AArch64MCExpr::VK_AARCH64_ABS_G1:
1102    case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
1103    case AArch64MCExpr::VK_AARCH64_SABS_G1:
1104    case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
1105    case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
1106    case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
1107    case AArch64MCExpr::VK_AARCH64_TPREL_G1:
1108    case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
1109      Inst.addOperand(MCOperand::CreateImm(1));
1110      break;
1111    case AArch64MCExpr::VK_AARCH64_ABS_G2:
1112    case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
1113    case AArch64MCExpr::VK_AARCH64_SABS_G2:
1114    case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
1115    case AArch64MCExpr::VK_AARCH64_TPREL_G2:
1116      Inst.addOperand(MCOperand::CreateImm(2));
1117      break;
1118    case AArch64MCExpr::VK_AARCH64_ABS_G3:
1119      Inst.addOperand(MCOperand::CreateImm(3));
1120      break;
1121    default: llvm_unreachable("Inappropriate move wide relocation");
1122    }
1123  }
1124
1125  template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
1126  void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
1127    assert(N == 2 && "Invalid number of operands!");
1128    int UImm16, Shift;
1129
1130    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1131    uint64_t Value = CE->getValue();
1132
1133    if (RegWidth == 32) {
1134      Value &= 0xffffffffULL;
1135    }
1136
1137    bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
1138    (void)Valid;
1139    assert(Valid && "Invalid immediates should have been weeded out by now");
1140
1141    Inst.addOperand(MCOperand::CreateImm(UImm16));
1142    Inst.addOperand(MCOperand::CreateImm(Shift));
1143  }
1144
1145  void addPRFMOperands(MCInst &Inst, unsigned N) const {
1146    assert(N == 1 && "Invalid number of operands!");
1147
1148    const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1149    assert(CE->getValue() >= 0 && CE->getValue() <= 31
1150           && "PRFM operand should be 5-bits");
1151
1152    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1153  }
1154
1155  // For Add-sub (extended register) operands.
1156  void addRegExtendOperands(MCInst &Inst, unsigned N) const {
1157    assert(N == 1 && "Invalid number of operands!");
1158
1159    Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1160  }
1161
1162  // For Vector Immediates shifted imm operands.
1163  void addNeonMovImmShiftLSLOperands(MCInst &Inst, unsigned N) const {
1164    assert(N == 1 && "Invalid number of operands!");
1165
1166    if (ShiftExtend.Amount % 8 != 0 || ShiftExtend.Amount > 24)
1167      llvm_unreachable("Invalid shift amount for vector immediate inst.");
1168
1169    // Encode LSL shift amount 0, 8, 16, 24 as 0, 1, 2, 3.
1170    int64_t Imm = ShiftExtend.Amount / 8;
1171    Inst.addOperand(MCOperand::CreateImm(Imm));
1172  }
1173
1174  void addNeonMovImmShiftLSLHOperands(MCInst &Inst, unsigned N) const {
1175    assert(N == 1 && "Invalid number of operands!");
1176
1177    if (ShiftExtend.Amount != 0 && ShiftExtend.Amount != 8)
1178      llvm_unreachable("Invalid shift amount for vector immediate inst.");
1179
1180    // Encode LSLH shift amount 0, 8  as 0, 1.
1181    int64_t Imm = ShiftExtend.Amount / 8;
1182    Inst.addOperand(MCOperand::CreateImm(Imm));
1183  }
1184
1185  void addNeonMovImmShiftMSLOperands(MCInst &Inst, unsigned N) const {
1186    assert(N == 1 && "Invalid number of operands!");
1187
1188    if (ShiftExtend.Amount != 8 && ShiftExtend.Amount != 16)
1189      llvm_unreachable("Invalid shift amount for vector immediate inst.");
1190
1191    // Encode MSL shift amount 8, 16  as 0, 1.
1192    int64_t Imm = ShiftExtend.Amount / 8 - 1;
1193    Inst.addOperand(MCOperand::CreateImm(Imm));
1194  }
1195
1196  // For the extend in load-store (register offset) instructions.
1197  template<unsigned MemSize>
1198  void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
1199    addAddrRegExtendOperands(Inst, N, MemSize);
1200  }
1201
1202  void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
1203                                unsigned MemSize) const {
1204    assert(N == 1 && "Invalid number of operands!");
1205
1206    // First bit of Option is set in instruction classes, the high two bits are
1207    // as follows:
1208    unsigned OptionHi = 0;
1209    switch (ShiftExtend.ShiftType) {
1210    case A64SE::UXTW:
1211    case A64SE::LSL:
1212      OptionHi = 1;
1213      break;
1214    case A64SE::SXTW:
1215    case A64SE::SXTX:
1216      OptionHi = 3;
1217      break;
1218    default:
1219      llvm_unreachable("Invalid extend type for register offset");
1220    }
1221
1222    unsigned S = 0;
1223    if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
1224      S = 1;
1225    else if (MemSize != 1 && ShiftExtend.Amount != 0)
1226      S = 1;
1227
1228    Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
1229  }
1230  void addShiftOperands(MCInst &Inst, unsigned N) const {
1231    assert(N == 1 && "Invalid number of operands!");
1232
1233    Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1234  }
1235
1236  void addNeonUImm64MaskOperands(MCInst &Inst, unsigned N) const {
1237    assert(N == 1 && "Invalid number of operands!");
1238
1239    // A bit from each byte in the constant forms the encoded immediate
1240    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1241    uint64_t Value = CE->getValue();
1242
1243    unsigned Imm = 0;
1244    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1245      Imm |= (Value & 1) << i;
1246    }
1247    Inst.addOperand(MCOperand::CreateImm(Imm));
1248  }
1249
1250  void addVectorListOperands(MCInst &Inst, unsigned N) const {
1251    assert(N == 1 && "Invalid number of operands!");
1252    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1253  }
1254};
1255
1256} // end anonymous namespace.
1257
1258AArch64AsmParser::OperandMatchResultTy
1259AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1260                               StringRef Mnemonic) {
1261
1262  // See if the operand has a custom parser
1263  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1264
1265  // It could either succeed, fail or just not care.
1266  if (ResTy != MatchOperand_NoMatch)
1267    return ResTy;
1268
1269  switch (getLexer().getKind()) {
1270  default:
1271    Error(Parser.getTok().getLoc(), "unexpected token in operand");
1272    return MatchOperand_ParseFail;
1273  case AsmToken::Identifier: {
1274    // It might be in the LSL/UXTB family ...
1275    OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
1276
1277    // We can only continue if no tokens were eaten.
1278    if (GotShift != MatchOperand_NoMatch)
1279      return GotShift;
1280
1281    // ... or it might be a register ...
1282    uint32_t NumLanes = 0;
1283    OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
1284    assert(GotReg != MatchOperand_ParseFail
1285           && "register parsing shouldn't partially succeed");
1286
1287    if (GotReg == MatchOperand_Success) {
1288      if (Parser.getTok().is(AsmToken::LBrac))
1289        return ParseNEONLane(Operands, NumLanes);
1290      else
1291        return MatchOperand_Success;
1292    }
1293    // ... or it might be a symbolish thing
1294  }
1295    // Fall through
1296  case AsmToken::LParen:  // E.g. (strcmp-4)
1297  case AsmToken::Integer: // 1f, 2b labels
1298  case AsmToken::String:  // quoted labels
1299  case AsmToken::Dot:     // . is Current location
1300  case AsmToken::Dollar:  // $ is PC
1301  case AsmToken::Colon: {
1302    SMLoc StartLoc  = Parser.getTok().getLoc();
1303    SMLoc EndLoc;
1304    const MCExpr *ImmVal = 0;
1305
1306    if (ParseImmediate(ImmVal) != MatchOperand_Success)
1307      return MatchOperand_ParseFail;
1308
1309    EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1310    Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1311    return MatchOperand_Success;
1312  }
1313  case AsmToken::Hash: {   // Immediates
1314    SMLoc StartLoc = Parser.getTok().getLoc();
1315    SMLoc EndLoc;
1316    const MCExpr *ImmVal = 0;
1317    Parser.Lex();
1318
1319    if (ParseImmediate(ImmVal) != MatchOperand_Success)
1320      return MatchOperand_ParseFail;
1321
1322    EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1323    Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1324    return MatchOperand_Success;
1325  }
1326  case AsmToken::LBrac: {
1327    SMLoc Loc = Parser.getTok().getLoc();
1328    Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1329    Parser.Lex(); // Eat '['
1330
1331    // There's no comma after a '[', so we can parse the next operand
1332    // immediately.
1333    return ParseOperand(Operands, Mnemonic);
1334  }
1335  // The following will likely be useful later, but not in very early cases
1336  case AsmToken::LCurly: // SIMD vector list is not parsed here
1337    llvm_unreachable("Don't know how to deal with '{' in operand");
1338    return MatchOperand_ParseFail;
1339  }
1340}
1341
1342AArch64AsmParser::OperandMatchResultTy
1343AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
1344  if (getLexer().is(AsmToken::Colon)) {
1345    AArch64MCExpr::VariantKind RefKind;
1346
1347    OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
1348    if (ResTy != MatchOperand_Success)
1349      return ResTy;
1350
1351    const MCExpr *SubExprVal;
1352    if (getParser().parseExpression(SubExprVal))
1353      return MatchOperand_ParseFail;
1354
1355    ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
1356    return MatchOperand_Success;
1357  }
1358
1359  // No weird AArch64MCExpr prefix
1360  return getParser().parseExpression(ExprVal)
1361    ? MatchOperand_ParseFail : MatchOperand_Success;
1362}
1363
1364// A lane attached to a NEON register. "[N]", which should yield three tokens:
1365// '[', N, ']'. A hash is not allowed to precede the immediate here.
1366AArch64AsmParser::OperandMatchResultTy
1367AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1368                                uint32_t NumLanes) {
1369  SMLoc Loc = Parser.getTok().getLoc();
1370
1371  assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
1372  Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1373  Parser.Lex(); // Eat '['
1374
1375  if (Parser.getTok().isNot(AsmToken::Integer)) {
1376    Error(Parser.getTok().getLoc(), "expected lane number");
1377    return MatchOperand_ParseFail;
1378  }
1379
1380  if (Parser.getTok().getIntVal() >= NumLanes) {
1381    Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
1382    return MatchOperand_ParseFail;
1383  }
1384
1385  const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
1386                                              getContext());
1387  SMLoc S = Parser.getTok().getLoc();
1388  Parser.Lex(); // Eat actual lane
1389  SMLoc E = Parser.getTok().getLoc();
1390  Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
1391
1392
1393  if (Parser.getTok().isNot(AsmToken::RBrac)) {
1394    Error(Parser.getTok().getLoc(), "expected ']' after lane");
1395    return MatchOperand_ParseFail;
1396  }
1397
1398  Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1399  Parser.Lex(); // Eat ']'
1400
1401  return MatchOperand_Success;
1402}
1403
1404AArch64AsmParser::OperandMatchResultTy
1405AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
1406  assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
1407  Parser.Lex();
1408
1409  if (getLexer().isNot(AsmToken::Identifier)) {
1410    Error(Parser.getTok().getLoc(),
1411          "expected relocation specifier in operand after ':'");
1412    return MatchOperand_ParseFail;
1413  }
1414
1415  std::string LowerCase = Parser.getTok().getIdentifier().lower();
1416  RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
1417    .Case("got",              AArch64MCExpr::VK_AARCH64_GOT)
1418    .Case("got_lo12",         AArch64MCExpr::VK_AARCH64_GOT_LO12)
1419    .Case("lo12",             AArch64MCExpr::VK_AARCH64_LO12)
1420    .Case("abs_g0",           AArch64MCExpr::VK_AARCH64_ABS_G0)
1421    .Case("abs_g0_nc",        AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
1422    .Case("abs_g1",           AArch64MCExpr::VK_AARCH64_ABS_G1)
1423    .Case("abs_g1_nc",        AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
1424    .Case("abs_g2",           AArch64MCExpr::VK_AARCH64_ABS_G2)
1425    .Case("abs_g2_nc",        AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
1426    .Case("abs_g3",           AArch64MCExpr::VK_AARCH64_ABS_G3)
1427    .Case("abs_g0_s",         AArch64MCExpr::VK_AARCH64_SABS_G0)
1428    .Case("abs_g1_s",         AArch64MCExpr::VK_AARCH64_SABS_G1)
1429    .Case("abs_g2_s",         AArch64MCExpr::VK_AARCH64_SABS_G2)
1430    .Case("dtprel_g2",        AArch64MCExpr::VK_AARCH64_DTPREL_G2)
1431    .Case("dtprel_g1",        AArch64MCExpr::VK_AARCH64_DTPREL_G1)
1432    .Case("dtprel_g1_nc",     AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
1433    .Case("dtprel_g0",        AArch64MCExpr::VK_AARCH64_DTPREL_G0)
1434    .Case("dtprel_g0_nc",     AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
1435    .Case("dtprel_hi12",      AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
1436    .Case("dtprel_lo12",      AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
1437    .Case("dtprel_lo12_nc",   AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
1438    .Case("gottprel_g1",      AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
1439    .Case("gottprel_g0_nc",   AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
1440    .Case("gottprel",         AArch64MCExpr::VK_AARCH64_GOTTPREL)
1441    .Case("gottprel_lo12",    AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
1442    .Case("tprel_g2",         AArch64MCExpr::VK_AARCH64_TPREL_G2)
1443    .Case("tprel_g1",         AArch64MCExpr::VK_AARCH64_TPREL_G1)
1444    .Case("tprel_g1_nc",      AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
1445    .Case("tprel_g0",         AArch64MCExpr::VK_AARCH64_TPREL_G0)
1446    .Case("tprel_g0_nc",      AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
1447    .Case("tprel_hi12",       AArch64MCExpr::VK_AARCH64_TPREL_HI12)
1448    .Case("tprel_lo12",       AArch64MCExpr::VK_AARCH64_TPREL_LO12)
1449    .Case("tprel_lo12_nc",    AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
1450    .Case("tlsdesc",          AArch64MCExpr::VK_AARCH64_TLSDESC)
1451    .Case("tlsdesc_lo12",     AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
1452    .Default(AArch64MCExpr::VK_AARCH64_None);
1453
1454  if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
1455    Error(Parser.getTok().getLoc(),
1456          "expected relocation specifier in operand after ':'");
1457    return MatchOperand_ParseFail;
1458  }
1459  Parser.Lex(); // Eat identifier
1460
1461  if (getLexer().isNot(AsmToken::Colon)) {
1462    Error(Parser.getTok().getLoc(),
1463          "expected ':' after relocation specifier");
1464    return MatchOperand_ParseFail;
1465  }
1466  Parser.Lex();
1467  return MatchOperand_Success;
1468}
1469
1470AArch64AsmParser::OperandMatchResultTy
1471AArch64AsmParser::ParseImmWithLSLOperand(
1472                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1473  // FIXME?: I want to live in a world where immediates must start with
1474  // #. Please don't dash my hopes (well, do if you have a good reason).
1475  if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1476
1477  SMLoc S = Parser.getTok().getLoc();
1478  Parser.Lex(); // Eat '#'
1479
1480  const MCExpr *Imm;
1481  if (ParseImmediate(Imm) != MatchOperand_Success)
1482    return MatchOperand_ParseFail;
1483  else if (Parser.getTok().isNot(AsmToken::Comma)) {
1484    SMLoc E = Parser.getTok().getLoc();
1485    Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
1486    return MatchOperand_Success;
1487  }
1488
1489  // Eat ','
1490  Parser.Lex();
1491
1492  // The optional operand must be "lsl #N" where N is non-negative.
1493  if (Parser.getTok().is(AsmToken::Identifier)
1494      && Parser.getTok().getIdentifier().equals_lower("lsl")) {
1495    Parser.Lex();
1496
1497    if (Parser.getTok().is(AsmToken::Hash)) {
1498      Parser.Lex();
1499
1500      if (Parser.getTok().isNot(AsmToken::Integer)) {
1501        Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
1502        return MatchOperand_ParseFail;
1503      }
1504    }
1505  }
1506
1507  int64_t ShiftAmount = Parser.getTok().getIntVal();
1508
1509  if (ShiftAmount < 0) {
1510    Error(Parser.getTok().getLoc(), "positive shift amount required");
1511    return MatchOperand_ParseFail;
1512  }
1513  Parser.Lex(); // Eat the number
1514
1515  SMLoc E = Parser.getTok().getLoc();
1516  Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
1517                                                      false, S, E));
1518  return MatchOperand_Success;
1519}
1520
1521
1522AArch64AsmParser::OperandMatchResultTy
1523AArch64AsmParser::ParseCondCodeOperand(
1524                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1525  if (Parser.getTok().isNot(AsmToken::Identifier))
1526    return MatchOperand_NoMatch;
1527
1528  StringRef Tok = Parser.getTok().getIdentifier();
1529  A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
1530
1531  if (CondCode == A64CC::Invalid)
1532    return MatchOperand_NoMatch;
1533
1534  SMLoc S = Parser.getTok().getLoc();
1535  Parser.Lex(); // Eat condition code
1536  SMLoc E = Parser.getTok().getLoc();
1537
1538  Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
1539  return MatchOperand_Success;
1540}
1541
1542AArch64AsmParser::OperandMatchResultTy
1543AArch64AsmParser::ParseCRxOperand(
1544                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1545  SMLoc S = Parser.getTok().getLoc();
1546  if (Parser.getTok().isNot(AsmToken::Identifier)) {
1547    Error(S, "Expected cN operand where 0 <= N <= 15");
1548    return MatchOperand_ParseFail;
1549  }
1550
1551  StringRef Tok = Parser.getTok().getIdentifier();
1552  if (Tok[0] != 'c' && Tok[0] != 'C') {
1553    Error(S, "Expected cN operand where 0 <= N <= 15");
1554    return MatchOperand_ParseFail;
1555  }
1556
1557  uint32_t CRNum;
1558  bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1559  if (BadNum || CRNum > 15) {
1560    Error(S, "Expected cN operand where 0 <= N <= 15");
1561    return MatchOperand_ParseFail;
1562  }
1563
1564  const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
1565
1566  Parser.Lex();
1567  SMLoc E = Parser.getTok().getLoc();
1568
1569  Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
1570  return MatchOperand_Success;
1571}
1572
1573AArch64AsmParser::OperandMatchResultTy
1574AArch64AsmParser::ParseFPImmOperand(
1575                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1576
1577  // FIXME?: I want to live in a world where immediates must start with
1578  // #. Please don't dash my hopes (well, do if you have a good reason).
1579  if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1580
1581  SMLoc S = Parser.getTok().getLoc();
1582  Parser.Lex(); // Eat '#'
1583
1584  bool Negative = false;
1585  if (Parser.getTok().is(AsmToken::Minus)) {
1586    Negative = true;
1587    Parser.Lex(); // Eat '-'
1588  } else if (Parser.getTok().is(AsmToken::Plus)) {
1589    Parser.Lex(); // Eat '+'
1590  }
1591
1592  if (Parser.getTok().isNot(AsmToken::Real)) {
1593    Error(S, "Expected floating-point immediate");
1594    return MatchOperand_ParseFail;
1595  }
1596
1597  APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
1598  if (Negative) RealVal.changeSign();
1599  double DblVal = RealVal.convertToDouble();
1600
1601  Parser.Lex(); // Eat real number
1602  SMLoc E = Parser.getTok().getLoc();
1603
1604  Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
1605  return MatchOperand_Success;
1606}
1607
1608
1609// Automatically generated
1610static unsigned MatchRegisterName(StringRef Name);
1611
1612bool
1613AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
1614                                   StringRef &Layout,
1615                                   SMLoc &LayoutLoc) const {
1616  const AsmToken &Tok = Parser.getTok();
1617
1618  if (Tok.isNot(AsmToken::Identifier))
1619    return false;
1620
1621  std::string LowerReg = Tok.getString().lower();
1622  size_t DotPos = LowerReg.find('.');
1623
1624  bool IsVec128 = false;
1625  SMLoc S = Tok.getLoc();
1626  RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
1627
1628  if (DotPos == std::string::npos) {
1629    Layout = StringRef();
1630  } else {
1631    // Everything afterwards needs to be a literal token, expected to be
1632    // '.2d','.b' etc for vector registers.
1633
1634    // This StringSwitch validates the input and (perhaps more importantly)
1635    // gives us a permanent string to use in the token (a pointer into LowerReg
1636    // would go out of scope when we return).
1637    LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
1638    StringRef LayoutText = StringRef(LowerReg).substr(DotPos);
1639
1640    // See if it's a 128-bit layout first.
1641    Layout = StringSwitch<const char *>(LayoutText)
1642      .Case(".q", ".q").Case(".1q", ".1q")
1643      .Case(".d", ".d").Case(".2d", ".2d")
1644      .Case(".s", ".s").Case(".4s", ".4s")
1645      .Case(".h", ".h").Case(".8h", ".8h")
1646      .Case(".b", ".b").Case(".16b", ".16b")
1647      .Default("");
1648
1649    if (Layout.size() != 0)
1650      IsVec128 = true;
1651    else {
1652      Layout = StringSwitch<const char *>(LayoutText)
1653                   .Case(".1d", ".1d")
1654                   .Case(".2s", ".2s")
1655                   .Case(".4h", ".4h")
1656                   .Case(".8b", ".8b")
1657                   .Default("");
1658    }
1659
1660    if (Layout.size() == 0) {
1661      // If we've still not pinned it down the register is malformed.
1662      return false;
1663    }
1664  }
1665
1666  RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
1667  if (RegNum == AArch64::NoRegister) {
1668    RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
1669      .Case("ip0", AArch64::X16)
1670      .Case("ip1", AArch64::X17)
1671      .Case("fp", AArch64::X29)
1672      .Case("lr", AArch64::X30)
1673      .Case("v0", IsVec128 ? AArch64::Q0 : AArch64::D0)
1674      .Case("v1", IsVec128 ? AArch64::Q1 : AArch64::D1)
1675      .Case("v2", IsVec128 ? AArch64::Q2 : AArch64::D2)
1676      .Case("v3", IsVec128 ? AArch64::Q3 : AArch64::D3)
1677      .Case("v4", IsVec128 ? AArch64::Q4 : AArch64::D4)
1678      .Case("v5", IsVec128 ? AArch64::Q5 : AArch64::D5)
1679      .Case("v6", IsVec128 ? AArch64::Q6 : AArch64::D6)
1680      .Case("v7", IsVec128 ? AArch64::Q7 : AArch64::D7)
1681      .Case("v8", IsVec128 ? AArch64::Q8 : AArch64::D8)
1682      .Case("v9", IsVec128 ? AArch64::Q9 : AArch64::D9)
1683      .Case("v10", IsVec128 ? AArch64::Q10 : AArch64::D10)
1684      .Case("v11", IsVec128 ? AArch64::Q11 : AArch64::D11)
1685      .Case("v12", IsVec128 ? AArch64::Q12 : AArch64::D12)
1686      .Case("v13", IsVec128 ? AArch64::Q13 : AArch64::D13)
1687      .Case("v14", IsVec128 ? AArch64::Q14 : AArch64::D14)
1688      .Case("v15", IsVec128 ? AArch64::Q15 : AArch64::D15)
1689      .Case("v16", IsVec128 ? AArch64::Q16 : AArch64::D16)
1690      .Case("v17", IsVec128 ? AArch64::Q17 : AArch64::D17)
1691      .Case("v18", IsVec128 ? AArch64::Q18 : AArch64::D18)
1692      .Case("v19", IsVec128 ? AArch64::Q19 : AArch64::D19)
1693      .Case("v20", IsVec128 ? AArch64::Q20 : AArch64::D20)
1694      .Case("v21", IsVec128 ? AArch64::Q21 : AArch64::D21)
1695      .Case("v22", IsVec128 ? AArch64::Q22 : AArch64::D22)
1696      .Case("v23", IsVec128 ? AArch64::Q23 : AArch64::D23)
1697      .Case("v24", IsVec128 ? AArch64::Q24 : AArch64::D24)
1698      .Case("v25", IsVec128 ? AArch64::Q25 : AArch64::D25)
1699      .Case("v26", IsVec128 ? AArch64::Q26 : AArch64::D26)
1700      .Case("v27", IsVec128 ? AArch64::Q27 : AArch64::D27)
1701      .Case("v28", IsVec128 ? AArch64::Q28 : AArch64::D28)
1702      .Case("v29", IsVec128 ? AArch64::Q29 : AArch64::D29)
1703      .Case("v30", IsVec128 ? AArch64::Q30 : AArch64::D30)
1704      .Case("v31", IsVec128 ? AArch64::Q31 : AArch64::D31)
1705      .Default(AArch64::NoRegister);
1706  }
1707  if (RegNum == AArch64::NoRegister)
1708    return false;
1709
1710  return true;
1711}
1712
1713AArch64AsmParser::OperandMatchResultTy
1714AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1715                                uint32_t &NumLanes) {
1716  unsigned RegNum;
1717  StringRef Layout;
1718  SMLoc RegEndLoc, LayoutLoc;
1719  SMLoc S = Parser.getTok().getLoc();
1720
1721  if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1722    return MatchOperand_NoMatch;
1723
1724  Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
1725
1726  if (Layout.size() != 0) {
1727    unsigned long long TmpLanes = 0;
1728    llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
1729    if (TmpLanes != 0) {
1730      NumLanes = TmpLanes;
1731    } else {
1732      // If the number of lanes isn't specified explicitly, a valid instruction
1733      // will have an element specifier and be capable of acting on the entire
1734      // vector register.
1735      switch (Layout.back()) {
1736      default: llvm_unreachable("Invalid layout specifier");
1737      case 'b': NumLanes = 16; break;
1738      case 'h': NumLanes = 8; break;
1739      case 's': NumLanes = 4; break;
1740      case 'd': NumLanes = 2; break;
1741      case 'q': NumLanes = 1; break;
1742      }
1743    }
1744
1745    Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
1746  }
1747
1748  Parser.Lex();
1749  return MatchOperand_Success;
1750}
1751
1752bool
1753AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1754                                SMLoc &EndLoc) {
1755  // This callback is used for things like DWARF frame directives in
1756  // assembly. They don't care about things like NEON layouts or lanes, they
1757  // just want to be able to produce the DWARF register number.
1758  StringRef LayoutSpec;
1759  SMLoc RegEndLoc, LayoutLoc;
1760  StartLoc = Parser.getTok().getLoc();
1761
1762  if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
1763    return true;
1764
1765  Parser.Lex();
1766  EndLoc = Parser.getTok().getLoc();
1767
1768  return false;
1769}
1770
1771AArch64AsmParser::OperandMatchResultTy
1772AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
1773                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1774  // Since these operands occur in very limited circumstances, without
1775  // alternatives, we actually signal an error if there is no match. If relaxing
1776  // this, beware of unintended consequences: an immediate will be accepted
1777  // during matching, no matter how it gets into the AArch64Operand.
1778  const AsmToken &Tok = Parser.getTok();
1779  SMLoc S = Tok.getLoc();
1780
1781  if (Tok.is(AsmToken::Identifier)) {
1782    bool ValidName;
1783    uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
1784
1785    if (!ValidName) {
1786      Error(S, "operand specifier not recognised");
1787      return MatchOperand_ParseFail;
1788    }
1789
1790    Parser.Lex(); // We're done with the identifier. Eat it
1791
1792    SMLoc E = Parser.getTok().getLoc();
1793    const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
1794    Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
1795    return MatchOperand_Success;
1796  } else if (Tok.is(AsmToken::Hash)) {
1797    Parser.Lex();
1798
1799    const MCExpr *ImmVal;
1800    if (ParseImmediate(ImmVal) != MatchOperand_Success)
1801      return MatchOperand_ParseFail;
1802
1803    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
1804    if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
1805      Error(S, "Invalid immediate for instruction");
1806      return MatchOperand_ParseFail;
1807    }
1808
1809    SMLoc E = Parser.getTok().getLoc();
1810    Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
1811    return MatchOperand_Success;
1812  }
1813
1814  Error(S, "unexpected operand for instruction");
1815  return MatchOperand_ParseFail;
1816}
1817
1818AArch64AsmParser::OperandMatchResultTy
1819AArch64AsmParser::ParseSysRegOperand(
1820                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1821  const AsmToken &Tok = Parser.getTok();
1822
1823  // Any MSR/MRS operand will be an identifier, and we want to store it as some
1824  // kind of string: SPSel is valid for two different forms of MSR with two
1825  // different encodings. There's no collision at the moment, but the potential
1826  // is there.
1827  if (!Tok.is(AsmToken::Identifier)) {
1828    return MatchOperand_NoMatch;
1829  }
1830
1831  SMLoc S = Tok.getLoc();
1832  Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
1833  Parser.Lex(); // Eat identifier
1834
1835  return MatchOperand_Success;
1836}
1837
1838AArch64AsmParser::OperandMatchResultTy
1839AArch64AsmParser::ParseLSXAddressOperand(
1840                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1841  SMLoc S = Parser.getTok().getLoc();
1842
1843  unsigned RegNum;
1844  SMLoc RegEndLoc, LayoutLoc;
1845  StringRef Layout;
1846  if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
1847     || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
1848     || Layout.size() != 0) {
1849    // Check Layout.size because we don't want to let "x3.4s" or similar
1850    // through.
1851    return MatchOperand_NoMatch;
1852  }
1853  Parser.Lex(); // Eat register
1854
1855  if (Parser.getTok().is(AsmToken::RBrac)) {
1856    // We're done
1857    SMLoc E = Parser.getTok().getLoc();
1858    Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1859    return MatchOperand_Success;
1860  }
1861
1862  // Otherwise, only ", #0" is valid
1863
1864  if (Parser.getTok().isNot(AsmToken::Comma)) {
1865    Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
1866    return MatchOperand_ParseFail;
1867  }
1868  Parser.Lex(); // Eat ','
1869
1870  if (Parser.getTok().isNot(AsmToken::Hash)) {
1871    Error(Parser.getTok().getLoc(), "expected '#0'");
1872    return MatchOperand_ParseFail;
1873  }
1874  Parser.Lex(); // Eat '#'
1875
1876  if (Parser.getTok().isNot(AsmToken::Integer)
1877      || Parser.getTok().getIntVal() != 0 ) {
1878    Error(Parser.getTok().getLoc(), "expected '#0'");
1879    return MatchOperand_ParseFail;
1880  }
1881  Parser.Lex(); // Eat '0'
1882
1883  SMLoc E = Parser.getTok().getLoc();
1884  Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1885  return MatchOperand_Success;
1886}
1887
1888AArch64AsmParser::OperandMatchResultTy
1889AArch64AsmParser::ParseShiftExtend(
1890                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1891  StringRef IDVal = Parser.getTok().getIdentifier();
1892  std::string LowerID = IDVal.lower();
1893
1894  A64SE::ShiftExtSpecifiers Spec =
1895      StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
1896        .Case("lsl", A64SE::LSL)
1897	.Case("msl", A64SE::MSL)
1898	.Case("lsr", A64SE::LSR)
1899	.Case("asr", A64SE::ASR)
1900	.Case("ror", A64SE::ROR)
1901	.Case("uxtb", A64SE::UXTB)
1902	.Case("uxth", A64SE::UXTH)
1903	.Case("uxtw", A64SE::UXTW)
1904	.Case("uxtx", A64SE::UXTX)
1905	.Case("sxtb", A64SE::SXTB)
1906	.Case("sxth", A64SE::SXTH)
1907	.Case("sxtw", A64SE::SXTW)
1908	.Case("sxtx", A64SE::SXTX)
1909	.Default(A64SE::Invalid);
1910
1911  if (Spec == A64SE::Invalid)
1912    return MatchOperand_NoMatch;
1913
1914  // Eat the shift
1915  SMLoc S, E;
1916  S = Parser.getTok().getLoc();
1917  Parser.Lex();
1918
1919  if (Spec != A64SE::LSL && Spec != A64SE::LSR && Spec != A64SE::ASR &&
1920      Spec != A64SE::ROR && Spec != A64SE::MSL) {
1921    // The shift amount can be omitted for the extending versions, but not real
1922    // shifts:
1923    //     add x0, x0, x0, uxtb
1924    // is valid, and equivalent to
1925    //     add x0, x0, x0, uxtb #0
1926
1927    if (Parser.getTok().is(AsmToken::Comma) ||
1928        Parser.getTok().is(AsmToken::EndOfStatement) ||
1929        Parser.getTok().is(AsmToken::RBrac)) {
1930      Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
1931                                                           S, E));
1932      return MatchOperand_Success;
1933    }
1934  }
1935
1936  // Eat # at beginning of immediate
1937  if (!Parser.getTok().is(AsmToken::Hash)) {
1938    Error(Parser.getTok().getLoc(),
1939          "expected #imm after shift specifier");
1940    return MatchOperand_ParseFail;
1941  }
1942  Parser.Lex();
1943
1944  // Make sure we do actually have a number
1945  if (!Parser.getTok().is(AsmToken::Integer)) {
1946    Error(Parser.getTok().getLoc(),
1947          "expected integer shift amount");
1948    return MatchOperand_ParseFail;
1949  }
1950  unsigned Amount = Parser.getTok().getIntVal();
1951  Parser.Lex();
1952  E = Parser.getTok().getLoc();
1953
1954  Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
1955                                                       S, E));
1956
1957  return MatchOperand_Success;
1958}
1959
1960/// Try to parse a vector register token, If it is a vector register,
1961/// the token is eaten and return true. Otherwise return false.
1962bool AArch64AsmParser::TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc,
1963                                      StringRef &Layout, SMLoc &LayoutLoc) {
1964  bool IsVector = true;
1965
1966  if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1967    IsVector = false;
1968  else if (!AArch64MCRegisterClasses[AArch64::FPR64RegClassID]
1969                .contains(RegNum) &&
1970           !AArch64MCRegisterClasses[AArch64::FPR128RegClassID]
1971                .contains(RegNum))
1972    IsVector = false;
1973  else if (Layout.size() == 0)
1974    IsVector = false;
1975
1976  if (!IsVector)
1977    Error(Parser.getTok().getLoc(), "expected vector type register");
1978
1979  Parser.Lex(); // Eat this token.
1980  return IsVector;
1981}
1982
1983
1984// A vector list contains 1-4 consecutive registers.
1985// Now there are two kinds of vector list when number of vector > 1:
1986//   (1) {Vn.layout, Vn+1.layout, ... , Vm.layout}
1987//   (2) {Vn.layout - Vm.layout}
1988// If the layout is like .b/.h/.s/.d, also parse the lane.
1989AArch64AsmParser::OperandMatchResultTy AArch64AsmParser::ParseVectorList(
1990    SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
1991  if (Parser.getTok().isNot(AsmToken::LCurly)) {
1992    Error(Parser.getTok().getLoc(), "'{' expected");
1993    return MatchOperand_ParseFail;
1994  }
1995  SMLoc SLoc = Parser.getTok().getLoc();
1996  Parser.Lex(); // Eat '{' token.
1997
1998  unsigned Reg, Count = 1;
1999  StringRef LayoutStr;
2000  SMLoc RegEndLoc, LayoutLoc;
2001  if (!TryParseVector(Reg, RegEndLoc, LayoutStr, LayoutLoc))
2002    return MatchOperand_ParseFail;
2003
2004  if (Parser.getTok().is(AsmToken::Minus)) {
2005    Parser.Lex(); // Eat the minus.
2006
2007    unsigned Reg2;
2008    StringRef LayoutStr2;
2009    SMLoc RegEndLoc2, LayoutLoc2;
2010    SMLoc RegLoc2 = Parser.getTok().getLoc();
2011
2012    if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
2013      return MatchOperand_ParseFail;
2014    unsigned Space = (Reg < Reg2) ? (Reg2 - Reg) : (Reg2 + 32 - Reg);
2015
2016    if (LayoutStr != LayoutStr2) {
2017      Error(LayoutLoc2, "expected the same vector layout");
2018      return MatchOperand_ParseFail;
2019    }
2020    if (Space == 0 || Space > 3) {
2021      Error(RegLoc2, "invalid number of vectors");
2022      return MatchOperand_ParseFail;
2023    }
2024
2025    Count += Space;
2026  } else {
2027    unsigned LastReg = Reg;
2028    while (Parser.getTok().is(AsmToken::Comma)) {
2029      Parser.Lex(); // Eat the comma.
2030      unsigned Reg2;
2031      StringRef LayoutStr2;
2032      SMLoc RegEndLoc2, LayoutLoc2;
2033      SMLoc RegLoc2 = Parser.getTok().getLoc();
2034
2035      if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
2036        return MatchOperand_ParseFail;
2037      unsigned Space = (LastReg < Reg2) ? (Reg2 - LastReg)
2038                                        : (Reg2 + 32 - LastReg);
2039      Count++;
2040
2041      // The space between two vectors should be 1. And they should have the same layout.
2042      // Total count shouldn't be great than 4
2043      if (Space != 1) {
2044        Error(RegLoc2, "invalid space between two vectors");
2045        return MatchOperand_ParseFail;
2046      }
2047      if (LayoutStr != LayoutStr2) {
2048        Error(LayoutLoc2, "expected the same vector layout");
2049        return MatchOperand_ParseFail;
2050      }
2051      if (Count > 4) {
2052        Error(RegLoc2, "invalid number of vectors");
2053        return MatchOperand_ParseFail;
2054      }
2055
2056      LastReg = Reg2;
2057    }
2058  }
2059
2060  if (Parser.getTok().isNot(AsmToken::RCurly)) {
2061    Error(Parser.getTok().getLoc(), "'}' expected");
2062    return MatchOperand_ParseFail;
2063  }
2064  SMLoc ELoc = Parser.getTok().getLoc();
2065  Parser.Lex(); // Eat '}' token.
2066
2067  A64Layout::VectorLayout Layout = A64StringToVectorLayout(LayoutStr);
2068  if (Count > 1) { // If count > 1, create vector list using super register.
2069    bool IsVec64 = (Layout < A64Layout::_16B);
2070    static unsigned SupRegIDs[3][2] = {
2071      { AArch64::QPairRegClassID, AArch64::DPairRegClassID },
2072      { AArch64::QTripleRegClassID, AArch64::DTripleRegClassID },
2073      { AArch64::QQuadRegClassID, AArch64::DQuadRegClassID }
2074    };
2075    unsigned SupRegID = SupRegIDs[Count - 2][static_cast<int>(IsVec64)];
2076    unsigned Sub0 = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0;
2077    const MCRegisterInfo *MRI = getContext().getRegisterInfo();
2078    Reg = MRI->getMatchingSuperReg(Reg, Sub0,
2079                                   &AArch64MCRegisterClasses[SupRegID]);
2080  }
2081  Operands.push_back(
2082      AArch64Operand::CreateVectorList(Reg, Count, Layout, SLoc, ELoc));
2083
2084  if (Parser.getTok().is(AsmToken::LBrac)) {
2085    uint32_t NumLanes = 0;
2086    switch(Layout) {
2087    case A64Layout::_B : NumLanes = 16; break;
2088    case A64Layout::_H : NumLanes = 8; break;
2089    case A64Layout::_S : NumLanes = 4; break;
2090    case A64Layout::_D : NumLanes = 2; break;
2091    default:
2092      SMLoc Loc = getLexer().getLoc();
2093      Error(Loc, "expected comma before next operand");
2094      return MatchOperand_ParseFail;
2095    }
2096    return ParseNEONLane(Operands, NumLanes);
2097  } else {
2098    return MatchOperand_Success;
2099  }
2100}
2101
2102// FIXME: We would really like to be able to tablegen'erate this.
2103bool AArch64AsmParser::
2104validateInstruction(MCInst &Inst,
2105                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2106  switch (Inst.getOpcode()) {
2107  case AArch64::BFIwwii:
2108  case AArch64::BFIxxii:
2109  case AArch64::SBFIZwwii:
2110  case AArch64::SBFIZxxii:
2111  case AArch64::UBFIZwwii:
2112  case AArch64::UBFIZxxii:  {
2113    unsigned ImmOps = Inst.getNumOperands() - 2;
2114    int64_t ImmR = Inst.getOperand(ImmOps).getImm();
2115    int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
2116
2117    if (ImmR != 0 && ImmS >= ImmR) {
2118      return Error(Operands[4]->getStartLoc(),
2119                   "requested insert overflows register");
2120    }
2121    return false;
2122  }
2123  case AArch64::BFXILwwii:
2124  case AArch64::BFXILxxii:
2125  case AArch64::SBFXwwii:
2126  case AArch64::SBFXxxii:
2127  case AArch64::UBFXwwii:
2128  case AArch64::UBFXxxii: {
2129    unsigned ImmOps = Inst.getNumOperands() - 2;
2130    int64_t ImmR = Inst.getOperand(ImmOps).getImm();
2131    int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
2132    int64_t RegWidth = 0;
2133    switch (Inst.getOpcode()) {
2134    case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
2135      RegWidth = 64;
2136      break;
2137    case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
2138      RegWidth = 32;
2139      break;
2140    }
2141
2142    if (ImmS >= RegWidth || ImmS < ImmR) {
2143      return Error(Operands[4]->getStartLoc(),
2144                   "requested extract overflows register");
2145    }
2146    return false;
2147  }
2148  case AArch64::ICix: {
2149    int64_t ImmVal = Inst.getOperand(0).getImm();
2150    A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
2151    if (!A64IC::NeedsRegister(ICOp)) {
2152      return Error(Operands[1]->getStartLoc(),
2153                   "specified IC op does not use a register");
2154    }
2155    return false;
2156  }
2157  case AArch64::ICi: {
2158    int64_t ImmVal = Inst.getOperand(0).getImm();
2159    A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
2160    if (A64IC::NeedsRegister(ICOp)) {
2161      return Error(Operands[1]->getStartLoc(),
2162                   "specified IC op requires a register");
2163    }
2164    return false;
2165  }
2166  case AArch64::TLBIix: {
2167    int64_t ImmVal = Inst.getOperand(0).getImm();
2168    A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
2169    if (!A64TLBI::NeedsRegister(TLBIOp)) {
2170      return Error(Operands[1]->getStartLoc(),
2171                   "specified TLBI op does not use a register");
2172    }
2173    return false;
2174  }
2175  case AArch64::TLBIi: {
2176    int64_t ImmVal = Inst.getOperand(0).getImm();
2177    A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
2178    if (A64TLBI::NeedsRegister(TLBIOp)) {
2179      return Error(Operands[1]->getStartLoc(),
2180                   "specified TLBI op requires a register");
2181    }
2182    return false;
2183  }
2184  }
2185
2186  return false;
2187}
2188
2189
2190// Parses the instruction *together with* all operands, appending each parsed
2191// operand to the "Operands" list
2192bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
2193                                        StringRef Name, SMLoc NameLoc,
2194                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2195  size_t CondCodePos = Name.find('.');
2196
2197  StringRef Mnemonic = Name.substr(0, CondCodePos);
2198  Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
2199
2200  if (CondCodePos != StringRef::npos) {
2201    // We have a condition code
2202    SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
2203    StringRef CondStr = Name.substr(CondCodePos + 1, StringRef::npos);
2204    A64CC::CondCodes Code;
2205
2206    Code = A64StringToCondCode(CondStr);
2207
2208    if (Code == A64CC::Invalid) {
2209      Error(S, "invalid condition code");
2210      Parser.eatToEndOfStatement();
2211      return true;
2212    }
2213
2214    SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
2215
2216    Operands.push_back(AArch64Operand::CreateToken(".",  DotL));
2217    SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
2218    Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
2219  }
2220
2221  // Now we parse the operands of this instruction
2222  if (getLexer().isNot(AsmToken::EndOfStatement)) {
2223    // Read the first operand.
2224    if (ParseOperand(Operands, Mnemonic)) {
2225      Parser.eatToEndOfStatement();
2226      return true;
2227    }
2228
2229    while (getLexer().is(AsmToken::Comma)) {
2230      Parser.Lex();  // Eat the comma.
2231
2232      // Parse and remember the operand.
2233      if (ParseOperand(Operands, Mnemonic)) {
2234        Parser.eatToEndOfStatement();
2235        return true;
2236      }
2237
2238
2239      // After successfully parsing some operands there are two special cases to
2240      // consider (i.e. notional operands not separated by commas). Both are due
2241      // to memory specifiers:
2242      //  + An RBrac will end an address for load/store/prefetch
2243      //  + An '!' will indicate a pre-indexed operation.
2244      //
2245      // It's someone else's responsibility to make sure these tokens are sane
2246      // in the given context!
2247      if (Parser.getTok().is(AsmToken::RBrac)) {
2248        SMLoc Loc = Parser.getTok().getLoc();
2249        Operands.push_back(AArch64Operand::CreateToken("]", Loc));
2250        Parser.Lex();
2251      }
2252
2253      if (Parser.getTok().is(AsmToken::Exclaim)) {
2254        SMLoc Loc = Parser.getTok().getLoc();
2255        Operands.push_back(AArch64Operand::CreateToken("!", Loc));
2256        Parser.Lex();
2257      }
2258    }
2259  }
2260
2261  if (getLexer().isNot(AsmToken::EndOfStatement)) {
2262    SMLoc Loc = getLexer().getLoc();
2263    Parser.eatToEndOfStatement();
2264    return Error(Loc, "expected comma before next operand");
2265  }
2266
2267  // Eat the EndOfStatement
2268  Parser.Lex();
2269
2270  return false;
2271}
2272
2273bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
2274  StringRef IDVal = DirectiveID.getIdentifier();
2275  if (IDVal == ".hword")
2276    return ParseDirectiveWord(2, DirectiveID.getLoc());
2277  else if (IDVal == ".word")
2278    return ParseDirectiveWord(4, DirectiveID.getLoc());
2279  else if (IDVal == ".xword")
2280    return ParseDirectiveWord(8, DirectiveID.getLoc());
2281  else if (IDVal == ".tlsdesccall")
2282    return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
2283
2284  return true;
2285}
2286
2287/// parseDirectiveWord
2288///  ::= .word [ expression (, expression)* ]
2289bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
2290  if (getLexer().isNot(AsmToken::EndOfStatement)) {
2291    for (;;) {
2292      const MCExpr *Value;
2293      if (getParser().parseExpression(Value))
2294        return true;
2295
2296      getParser().getStreamer().EmitValue(Value, Size);
2297
2298      if (getLexer().is(AsmToken::EndOfStatement))
2299        break;
2300
2301      // FIXME: Improve diagnostic.
2302      if (getLexer().isNot(AsmToken::Comma))
2303        return Error(L, "unexpected token in directive");
2304      Parser.Lex();
2305    }
2306  }
2307
2308  Parser.Lex();
2309  return false;
2310}
2311
2312// parseDirectiveTLSDescCall:
2313//   ::= .tlsdesccall symbol
2314bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
2315  StringRef Name;
2316  if (getParser().parseIdentifier(Name))
2317    return Error(L, "expected symbol after directive");
2318
2319  MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
2320  const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
2321
2322  MCInst Inst;
2323  Inst.setOpcode(AArch64::TLSDESCCALL);
2324  Inst.addOperand(MCOperand::CreateExpr(Expr));
2325
2326  getParser().getStreamer().EmitInstruction(Inst);
2327  return false;
2328}
2329
2330
2331bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2332                                 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
2333                                 MCStreamer &Out, unsigned &ErrorInfo,
2334                                 bool MatchingInlineAsm) {
2335  MCInst Inst;
2336  unsigned MatchResult;
2337  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
2338                                     MatchingInlineAsm);
2339
2340  if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
2341    return Error(IDLoc, "too few operands for instruction");
2342
2343  switch (MatchResult) {
2344  default: break;
2345  case Match_Success:
2346    if (validateInstruction(Inst, Operands))
2347      return true;
2348
2349    Out.EmitInstruction(Inst);
2350    return false;
2351  case Match_MissingFeature:
2352    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
2353    return true;
2354  case Match_InvalidOperand: {
2355    SMLoc ErrorLoc = IDLoc;
2356    if (ErrorInfo != ~0U) {
2357      ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
2358      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
2359    }
2360
2361    return Error(ErrorLoc, "invalid operand for instruction");
2362  }
2363  case Match_MnemonicFail:
2364    return Error(IDLoc, "invalid instruction");
2365
2366  case Match_AddSubRegExtendSmall:
2367    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2368      "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
2369  case Match_AddSubRegExtendLarge:
2370    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2371      "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
2372  case Match_AddSubRegShift32:
2373    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2374       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
2375  case Match_AddSubRegShift64:
2376    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2377       "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
2378  case Match_AddSubSecondSource:
2379      return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2380          "expected compatible register, symbol or integer in range [0, 4095]");
2381  case Match_CVTFixedPos32:
2382    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2383                 "expected integer in range [1, 32]");
2384  case Match_CVTFixedPos64:
2385    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2386                 "expected integer in range [1, 64]");
2387  case Match_CondCode:
2388    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2389                 "expected AArch64 condition code");
2390  case Match_FPImm:
2391    // Any situation which allows a nontrivial floating-point constant also
2392    // allows a register.
2393    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2394                 "expected compatible register or floating-point constant");
2395  case Match_FPZero:
2396    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2397                 "expected floating-point constant #0.0 or invalid register type");
2398  case Match_Label:
2399    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2400                 "expected label or encodable integer pc offset");
2401  case Match_Lane1:
2402    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2403                 "expected lane specifier '[1]'");
2404  case Match_LoadStoreExtend32_1:
2405    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2406                 "expected 'uxtw' or 'sxtw' with optional shift of #0");
2407  case Match_LoadStoreExtend32_2:
2408    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2409                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
2410  case Match_LoadStoreExtend32_4:
2411    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2412                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
2413  case Match_LoadStoreExtend32_8:
2414    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2415                 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
2416  case Match_LoadStoreExtend32_16:
2417    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2418                 "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
2419  case Match_LoadStoreExtend64_1:
2420    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2421                 "expected 'lsl' or 'sxtx' with optional shift of #0");
2422  case Match_LoadStoreExtend64_2:
2423    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2424                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
2425  case Match_LoadStoreExtend64_4:
2426    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2427                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
2428  case Match_LoadStoreExtend64_8:
2429    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2430                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
2431  case Match_LoadStoreExtend64_16:
2432    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2433                 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
2434  case Match_LoadStoreSImm7_4:
2435    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2436                 "expected integer multiple of 4 in range [-256, 252]");
2437  case Match_LoadStoreSImm7_8:
2438    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2439                 "expected integer multiple of 8 in range [-512, 508]");
2440  case Match_LoadStoreSImm7_16:
2441    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2442                 "expected integer multiple of 16 in range [-1024, 1016]");
2443  case Match_LoadStoreSImm9:
2444    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2445                 "expected integer in range [-256, 255]");
2446  case Match_LoadStoreUImm12_1:
2447    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2448                 "expected symbolic reference or integer in range [0, 4095]");
2449  case Match_LoadStoreUImm12_2:
2450    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2451                 "expected symbolic reference or integer in range [0, 8190]");
2452  case Match_LoadStoreUImm12_4:
2453    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2454                 "expected symbolic reference or integer in range [0, 16380]");
2455  case Match_LoadStoreUImm12_8:
2456    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2457                 "expected symbolic reference or integer in range [0, 32760]");
2458  case Match_LoadStoreUImm12_16:
2459    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2460                 "expected symbolic reference or integer in range [0, 65520]");
2461  case Match_LogicalSecondSource:
2462    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2463                 "expected compatible register or logical immediate");
2464  case Match_MOVWUImm16:
2465    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2466                 "expected relocated symbol or integer in range [0, 65535]");
2467  case Match_MRS:
2468    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2469                 "expected readable system register");
2470  case Match_MSR:
2471    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2472                 "expected writable system register or pstate");
2473  case Match_NamedImm_at:
2474    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2475                "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
2476  case Match_NamedImm_dbarrier:
2477    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2478             "expected integer in range [0, 15] or symbolic barrier operand");
2479  case Match_NamedImm_dc:
2480    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2481                 "expected symbolic 'dc' operand");
2482  case Match_NamedImm_ic:
2483    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2484                 "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
2485  case Match_NamedImm_isb:
2486    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2487                 "expected integer in range [0, 15] or 'sy'");
2488  case Match_NamedImm_prefetch:
2489    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2490                 "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
2491  case Match_NamedImm_tlbi:
2492    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2493                 "expected translation buffer invalidation operand");
2494  case Match_UImm16:
2495    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2496                 "expected integer in range [0, 65535]");
2497  case Match_UImm3:
2498    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2499                 "expected integer in range [0, 7]");
2500  case Match_UImm4:
2501    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2502                 "expected integer in range [0, 15]");
2503  case Match_UImm5:
2504    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2505                 "expected integer in range [0, 31]");
2506  case Match_UImm6:
2507    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2508                 "expected integer in range [0, 63]");
2509  case Match_UImm7:
2510    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2511                 "expected integer in range [0, 127]");
2512  case Match_Width32:
2513    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2514                 "expected integer in range [<lsb>, 31]");
2515  case Match_Width64:
2516    return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2517                 "expected integer in range [<lsb>, 63]");
2518  case Match_ShrImm8:
2519    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2520                 "expected integer in range [1, 8]");
2521  case Match_ShrImm16:
2522    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2523                 "expected integer in range [1, 16]");
2524  case Match_ShrImm32:
2525    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2526                 "expected integer in range [1, 32]");
2527  case Match_ShrImm64:
2528    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2529                 "expected integer in range [1, 64]");
2530  case Match_ShlImm8:
2531    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2532                 "expected integer in range [0, 7]");
2533  case Match_ShlImm16:
2534    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2535                 "expected integer in range [0, 15]");
2536  case Match_ShlImm32:
2537    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2538                 "expected integer in range [0, 31]");
2539  case Match_ShlImm64:
2540    return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2541                 "expected integer in range [0, 63]");
2542  }
2543
2544  llvm_unreachable("Implement any new match types added!");
2545  return true;
2546}
2547
2548void AArch64Operand::print(raw_ostream &OS) const {
2549  switch (Kind) {
2550  case k_CondCode:
2551    OS << "<CondCode: " << CondCode.Code << ">";
2552    break;
2553  case k_FPImmediate:
2554    OS << "<fpimm: " << FPImm.Val << ">";
2555    break;
2556  case k_ImmWithLSL:
2557    OS << "<immwithlsl: imm=" << ImmWithLSL.Val
2558       << ", shift=" << ImmWithLSL.ShiftAmount << ">";
2559    break;
2560  case k_Immediate:
2561    getImm()->print(OS);
2562    break;
2563  case k_Register:
2564    OS << "<register " << getReg() << '>';
2565    break;
2566  case k_Token:
2567    OS << '\'' << getToken() << '\'';
2568    break;
2569  case k_ShiftExtend:
2570    OS << "<shift: type=" << ShiftExtend.ShiftType
2571       << ", amount=" << ShiftExtend.Amount << ">";
2572    break;
2573  case k_SysReg: {
2574    StringRef Name(SysReg.Data, SysReg.Length);
2575    OS << "<sysreg: " << Name << '>';
2576    break;
2577  }
2578  default:
2579    llvm_unreachable("No idea how to print this kind of operand");
2580    break;
2581  }
2582}
2583
2584void AArch64Operand::dump() const {
2585  print(errs());
2586}
2587
2588
2589/// Force static initialization.
2590extern "C" void LLVMInitializeAArch64AsmParser() {
2591  RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64Target);
2592}
2593
2594#define GET_REGISTER_MATCHER
2595#define GET_MATCHER_IMPLEMENTATION
2596#include "AArch64GenAsmMatcher.inc"
2597