ARMAsmParser.cpp revision cdd776d13f799da1aff4b2c9c58a236bee74ea2e
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "ARMFeatures.h"
11#include "llvm/MC/MCTargetAsmParser.h"
12#include "MCTargetDesc/ARMAddressingModes.h"
13#include "MCTargetDesc/ARMBaseInfo.h"
14#include "MCTargetDesc/ARMMCExpr.h"
15#include "llvm/ADT/BitVector.h"
16#include "llvm/ADT/OwningPtr.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/SmallVector.h"
19#include "llvm/ADT/StringSwitch.h"
20#include "llvm/ADT/Twine.h"
21#include "llvm/MC/MCAsmInfo.h"
22#include "llvm/MC/MCAssembler.h"
23#include "llvm/MC/MCContext.h"
24#include "llvm/MC/MCELFStreamer.h"
25#include "llvm/MC/MCExpr.h"
26#include "llvm/MC/MCInst.h"
27#include "llvm/MC/MCInstrDesc.h"
28#include "llvm/MC/MCInstrInfo.h"
29#include "llvm/MC/MCParser/MCAsmLexer.h"
30#include "llvm/MC/MCParser/MCAsmParser.h"
31#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
32#include "llvm/MC/MCRegisterInfo.h"
33#include "llvm/MC/MCStreamer.h"
34#include "llvm/MC/MCSubtargetInfo.h"
35#include "llvm/Support/ELF.h"
36#include "llvm/Support/MathExtras.h"
37#include "llvm/Support/SourceMgr.h"
38#include "llvm/Support/TargetRegistry.h"
39#include "llvm/Support/raw_ostream.h"
40
41using namespace llvm;
42
43namespace {
44
45class ARMOperand;
46
47enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
48
49class ARMAsmParser : public MCTargetAsmParser {
50  MCSubtargetInfo &STI;
51  MCAsmParser &Parser;
52  const MCInstrInfo &MII;
53  const MCRegisterInfo *MRI;
54
55  ARMTargetStreamer &getTargetStreamer() {
56    MCTargetStreamer &TS = getParser().getStreamer().getTargetStreamer();
57    return static_cast<ARMTargetStreamer &>(TS);
58  }
59
60  // Unwind directives state
61  SMLoc FnStartLoc;
62  SMLoc CantUnwindLoc;
63  SMLoc PersonalityLoc;
64  SMLoc HandlerDataLoc;
65  int FPReg;
66  void resetUnwindDirectiveParserState() {
67    FnStartLoc = SMLoc();
68    CantUnwindLoc = SMLoc();
69    PersonalityLoc = SMLoc();
70    HandlerDataLoc = SMLoc();
71    FPReg = -1;
72  }
73
74  // Map of register aliases registers via the .req directive.
75  StringMap<unsigned> RegisterReqs;
76
77  struct {
78    ARMCC::CondCodes Cond;    // Condition for IT block.
79    unsigned Mask:4;          // Condition mask for instructions.
80                              // Starting at first 1 (from lsb).
81                              //   '1'  condition as indicated in IT.
82                              //   '0'  inverse of condition (else).
83                              // Count of instructions in IT block is
84                              // 4 - trailingzeroes(mask)
85
86    bool FirstCond;           // Explicit flag for when we're parsing the
87                              // First instruction in the IT block. It's
88                              // implied in the mask, so needs special
89                              // handling.
90
91    unsigned CurPosition;     // Current position in parsing of IT
92                              // block. In range [0,3]. Initialized
93                              // according to count of instructions in block.
94                              // ~0U if no active IT block.
95  } ITState;
96  bool inITBlock() { return ITState.CurPosition != ~0U;}
97  void forwardITPosition() {
98    if (!inITBlock()) return;
99    // Move to the next instruction in the IT block, if there is one. If not,
100    // mark the block as done.
101    unsigned TZ = countTrailingZeros(ITState.Mask);
102    if (++ITState.CurPosition == 5 - TZ)
103      ITState.CurPosition = ~0U; // Done with the IT block after this.
104  }
105
106
107  MCAsmParser &getParser() const { return Parser; }
108  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
109
110  bool Warning(SMLoc L, const Twine &Msg,
111               ArrayRef<SMRange> Ranges = None) {
112    return Parser.Warning(L, Msg, Ranges);
113  }
114  bool Error(SMLoc L, const Twine &Msg,
115             ArrayRef<SMRange> Ranges = None) {
116    return Parser.Error(L, Msg, Ranges);
117  }
118
119  int tryParseRegister();
120  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
121  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
122  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
123  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
124  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
125  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
126  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
127                              unsigned &ShiftAmount);
128  bool parseDirectiveWord(unsigned Size, SMLoc L);
129  bool parseDirectiveThumb(SMLoc L);
130  bool parseDirectiveARM(SMLoc L);
131  bool parseDirectiveThumbFunc(SMLoc L);
132  bool parseDirectiveCode(SMLoc L);
133  bool parseDirectiveSyntax(SMLoc L);
134  bool parseDirectiveReq(StringRef Name, SMLoc L);
135  bool parseDirectiveUnreq(SMLoc L);
136  bool parseDirectiveArch(SMLoc L);
137  bool parseDirectiveEabiAttr(SMLoc L);
138  bool parseDirectiveFnStart(SMLoc L);
139  bool parseDirectiveFnEnd(SMLoc L);
140  bool parseDirectiveCantUnwind(SMLoc L);
141  bool parseDirectivePersonality(SMLoc L);
142  bool parseDirectiveHandlerData(SMLoc L);
143  bool parseDirectiveSetFP(SMLoc L);
144  bool parseDirectivePad(SMLoc L);
145  bool parseDirectiveRegSave(SMLoc L, bool IsVector);
146
147  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
148                          bool &CarrySetting, unsigned &ProcessorIMod,
149                          StringRef &ITMask);
150  void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
151                             bool &CanAcceptCarrySet,
152                             bool &CanAcceptPredicationCode);
153
154  bool isThumb() const {
155    // FIXME: Can tablegen auto-generate this?
156    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
157  }
158  bool isThumbOne() const {
159    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
160  }
161  bool isThumbTwo() const {
162    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
163  }
164  bool hasThumb() const {
165    return STI.getFeatureBits() & ARM::HasV4TOps;
166  }
167  bool hasV6Ops() const {
168    return STI.getFeatureBits() & ARM::HasV6Ops;
169  }
170  bool hasV6MOps() const {
171    return STI.getFeatureBits() & ARM::HasV6MOps;
172  }
173  bool hasV7Ops() const {
174    return STI.getFeatureBits() & ARM::HasV7Ops;
175  }
176  bool hasV8Ops() const {
177    return STI.getFeatureBits() & ARM::HasV8Ops;
178  }
179  bool hasARM() const {
180    return !(STI.getFeatureBits() & ARM::FeatureNoARM);
181  }
182
183  void SwitchMode() {
184    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
185    setAvailableFeatures(FB);
186  }
187  bool isMClass() const {
188    return STI.getFeatureBits() & ARM::FeatureMClass;
189  }
190
191  /// @name Auto-generated Match Functions
192  /// {
193
194#define GET_ASSEMBLER_HEADER
195#include "ARMGenAsmMatcher.inc"
196
197  /// }
198
199  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
200  OperandMatchResultTy parseCoprocNumOperand(
201    SmallVectorImpl<MCParsedAsmOperand*>&);
202  OperandMatchResultTy parseCoprocRegOperand(
203    SmallVectorImpl<MCParsedAsmOperand*>&);
204  OperandMatchResultTy parseCoprocOptionOperand(
205    SmallVectorImpl<MCParsedAsmOperand*>&);
206  OperandMatchResultTy parseMemBarrierOptOperand(
207    SmallVectorImpl<MCParsedAsmOperand*>&);
208  OperandMatchResultTy parseInstSyncBarrierOptOperand(
209    SmallVectorImpl<MCParsedAsmOperand*>&);
210  OperandMatchResultTy parseProcIFlagsOperand(
211    SmallVectorImpl<MCParsedAsmOperand*>&);
212  OperandMatchResultTy parseMSRMaskOperand(
213    SmallVectorImpl<MCParsedAsmOperand*>&);
214  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
215                                   StringRef Op, int Low, int High);
216  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
217    return parsePKHImm(O, "lsl", 0, 31);
218  }
219  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
220    return parsePKHImm(O, "asr", 1, 32);
221  }
222  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
223  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
224  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
225  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
226  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
227  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
228  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
229  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
230  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
231                                       SMLoc &EndLoc);
232
233  // Asm Match Converter Methods
234  void cvtThumbMultiply(MCInst &Inst,
235                        const SmallVectorImpl<MCParsedAsmOperand*> &);
236  void cvtThumbBranches(MCInst &Inst,
237                        const SmallVectorImpl<MCParsedAsmOperand*> &);
238
239  bool validateInstruction(MCInst &Inst,
240                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
241  bool processInstruction(MCInst &Inst,
242                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
243  bool shouldOmitCCOutOperand(StringRef Mnemonic,
244                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
245  bool shouldOmitPredicateOperand(StringRef Mnemonic,
246                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
247public:
248  enum ARMMatchResultTy {
249    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
250    Match_RequiresNotITBlock,
251    Match_RequiresV6,
252    Match_RequiresThumb2,
253#define GET_OPERAND_DIAGNOSTIC_TYPES
254#include "ARMGenAsmMatcher.inc"
255
256  };
257
258  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
259               const MCInstrInfo &MII)
260      : MCTargetAsmParser(), STI(_STI), Parser(_Parser), MII(MII), FPReg(-1) {
261    MCAsmParserExtension::Initialize(_Parser);
262
263    // Cache the MCRegisterInfo.
264    MRI = getContext().getRegisterInfo();
265
266    // Initialize the set of available features.
267    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
268
269    // Not in an ITBlock to start with.
270    ITState.CurPosition = ~0U;
271  }
272
273  // Implementation of the MCTargetAsmParser interface:
274  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
275  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
276                        SMLoc NameLoc,
277                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
278  bool ParseDirective(AsmToken DirectiveID);
279
280  unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind);
281  unsigned checkTargetMatchPredicate(MCInst &Inst);
282
283  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
284                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
285                               MCStreamer &Out, unsigned &ErrorInfo,
286                               bool MatchingInlineAsm);
287};
288} // end anonymous namespace
289
290namespace {
291
292/// ARMOperand - Instances of this class represent a parsed ARM machine
293/// operand.
294class ARMOperand : public MCParsedAsmOperand {
295  enum KindTy {
296    k_CondCode,
297    k_CCOut,
298    k_ITCondMask,
299    k_CoprocNum,
300    k_CoprocReg,
301    k_CoprocOption,
302    k_Immediate,
303    k_MemBarrierOpt,
304    k_InstSyncBarrierOpt,
305    k_Memory,
306    k_PostIndexRegister,
307    k_MSRMask,
308    k_ProcIFlags,
309    k_VectorIndex,
310    k_Register,
311    k_RegisterList,
312    k_DPRRegisterList,
313    k_SPRRegisterList,
314    k_VectorList,
315    k_VectorListAllLanes,
316    k_VectorListIndexed,
317    k_ShiftedRegister,
318    k_ShiftedImmediate,
319    k_ShifterImmediate,
320    k_RotateImmediate,
321    k_BitfieldDescriptor,
322    k_Token
323  } Kind;
324
325  SMLoc StartLoc, EndLoc;
326  SmallVector<unsigned, 8> Registers;
327
328  struct CCOp {
329    ARMCC::CondCodes Val;
330  };
331
332  struct CopOp {
333    unsigned Val;
334  };
335
336  struct CoprocOptionOp {
337    unsigned Val;
338  };
339
340  struct ITMaskOp {
341    unsigned Mask:4;
342  };
343
344  struct MBOptOp {
345    ARM_MB::MemBOpt Val;
346  };
347
348  struct ISBOptOp {
349    ARM_ISB::InstSyncBOpt Val;
350  };
351
352  struct IFlagsOp {
353    ARM_PROC::IFlags Val;
354  };
355
356  struct MMaskOp {
357    unsigned Val;
358  };
359
360  struct TokOp {
361    const char *Data;
362    unsigned Length;
363  };
364
365  struct RegOp {
366    unsigned RegNum;
367  };
368
369  // A vector register list is a sequential list of 1 to 4 registers.
370  struct VectorListOp {
371    unsigned RegNum;
372    unsigned Count;
373    unsigned LaneIndex;
374    bool isDoubleSpaced;
375  };
376
377  struct VectorIndexOp {
378    unsigned Val;
379  };
380
381  struct ImmOp {
382    const MCExpr *Val;
383  };
384
385  /// Combined record for all forms of ARM address expressions.
386  struct MemoryOp {
387    unsigned BaseRegNum;
388    // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
389    // was specified.
390    const MCConstantExpr *OffsetImm;  // Offset immediate value
391    unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
392    ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
393    unsigned ShiftImm;        // shift for OffsetReg.
394    unsigned Alignment;       // 0 = no alignment specified
395    // n = alignment in bytes (2, 4, 8, 16, or 32)
396    unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
397  };
398
399  struct PostIdxRegOp {
400    unsigned RegNum;
401    bool isAdd;
402    ARM_AM::ShiftOpc ShiftTy;
403    unsigned ShiftImm;
404  };
405
406  struct ShifterImmOp {
407    bool isASR;
408    unsigned Imm;
409  };
410
411  struct RegShiftedRegOp {
412    ARM_AM::ShiftOpc ShiftTy;
413    unsigned SrcReg;
414    unsigned ShiftReg;
415    unsigned ShiftImm;
416  };
417
418  struct RegShiftedImmOp {
419    ARM_AM::ShiftOpc ShiftTy;
420    unsigned SrcReg;
421    unsigned ShiftImm;
422  };
423
424  struct RotImmOp {
425    unsigned Imm;
426  };
427
428  struct BitfieldOp {
429    unsigned LSB;
430    unsigned Width;
431  };
432
433  union {
434    struct CCOp CC;
435    struct CopOp Cop;
436    struct CoprocOptionOp CoprocOption;
437    struct MBOptOp MBOpt;
438    struct ISBOptOp ISBOpt;
439    struct ITMaskOp ITMask;
440    struct IFlagsOp IFlags;
441    struct MMaskOp MMask;
442    struct TokOp Tok;
443    struct RegOp Reg;
444    struct VectorListOp VectorList;
445    struct VectorIndexOp VectorIndex;
446    struct ImmOp Imm;
447    struct MemoryOp Memory;
448    struct PostIdxRegOp PostIdxReg;
449    struct ShifterImmOp ShifterImm;
450    struct RegShiftedRegOp RegShiftedReg;
451    struct RegShiftedImmOp RegShiftedImm;
452    struct RotImmOp RotImm;
453    struct BitfieldOp Bitfield;
454  };
455
456  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
457public:
458  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
459    Kind = o.Kind;
460    StartLoc = o.StartLoc;
461    EndLoc = o.EndLoc;
462    switch (Kind) {
463    case k_CondCode:
464      CC = o.CC;
465      break;
466    case k_ITCondMask:
467      ITMask = o.ITMask;
468      break;
469    case k_Token:
470      Tok = o.Tok;
471      break;
472    case k_CCOut:
473    case k_Register:
474      Reg = o.Reg;
475      break;
476    case k_RegisterList:
477    case k_DPRRegisterList:
478    case k_SPRRegisterList:
479      Registers = o.Registers;
480      break;
481    case k_VectorList:
482    case k_VectorListAllLanes:
483    case k_VectorListIndexed:
484      VectorList = o.VectorList;
485      break;
486    case k_CoprocNum:
487    case k_CoprocReg:
488      Cop = o.Cop;
489      break;
490    case k_CoprocOption:
491      CoprocOption = o.CoprocOption;
492      break;
493    case k_Immediate:
494      Imm = o.Imm;
495      break;
496    case k_MemBarrierOpt:
497      MBOpt = o.MBOpt;
498      break;
499    case k_InstSyncBarrierOpt:
500      ISBOpt = o.ISBOpt;
501    case k_Memory:
502      Memory = o.Memory;
503      break;
504    case k_PostIndexRegister:
505      PostIdxReg = o.PostIdxReg;
506      break;
507    case k_MSRMask:
508      MMask = o.MMask;
509      break;
510    case k_ProcIFlags:
511      IFlags = o.IFlags;
512      break;
513    case k_ShifterImmediate:
514      ShifterImm = o.ShifterImm;
515      break;
516    case k_ShiftedRegister:
517      RegShiftedReg = o.RegShiftedReg;
518      break;
519    case k_ShiftedImmediate:
520      RegShiftedImm = o.RegShiftedImm;
521      break;
522    case k_RotateImmediate:
523      RotImm = o.RotImm;
524      break;
525    case k_BitfieldDescriptor:
526      Bitfield = o.Bitfield;
527      break;
528    case k_VectorIndex:
529      VectorIndex = o.VectorIndex;
530      break;
531    }
532  }
533
534  /// getStartLoc - Get the location of the first token of this operand.
535  SMLoc getStartLoc() const { return StartLoc; }
536  /// getEndLoc - Get the location of the last token of this operand.
537  SMLoc getEndLoc() const { return EndLoc; }
538  /// getLocRange - Get the range between the first and last token of this
539  /// operand.
540  SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
541
542  ARMCC::CondCodes getCondCode() const {
543    assert(Kind == k_CondCode && "Invalid access!");
544    return CC.Val;
545  }
546
547  unsigned getCoproc() const {
548    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
549    return Cop.Val;
550  }
551
552  StringRef getToken() const {
553    assert(Kind == k_Token && "Invalid access!");
554    return StringRef(Tok.Data, Tok.Length);
555  }
556
557  unsigned getReg() const {
558    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
559    return Reg.RegNum;
560  }
561
562  const SmallVectorImpl<unsigned> &getRegList() const {
563    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
564            Kind == k_SPRRegisterList) && "Invalid access!");
565    return Registers;
566  }
567
568  const MCExpr *getImm() const {
569    assert(isImm() && "Invalid access!");
570    return Imm.Val;
571  }
572
573  unsigned getVectorIndex() const {
574    assert(Kind == k_VectorIndex && "Invalid access!");
575    return VectorIndex.Val;
576  }
577
578  ARM_MB::MemBOpt getMemBarrierOpt() const {
579    assert(Kind == k_MemBarrierOpt && "Invalid access!");
580    return MBOpt.Val;
581  }
582
583  ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
584    assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
585    return ISBOpt.Val;
586  }
587
588  ARM_PROC::IFlags getProcIFlags() const {
589    assert(Kind == k_ProcIFlags && "Invalid access!");
590    return IFlags.Val;
591  }
592
593  unsigned getMSRMask() const {
594    assert(Kind == k_MSRMask && "Invalid access!");
595    return MMask.Val;
596  }
597
598  bool isCoprocNum() const { return Kind == k_CoprocNum; }
599  bool isCoprocReg() const { return Kind == k_CoprocReg; }
600  bool isCoprocOption() const { return Kind == k_CoprocOption; }
601  bool isCondCode() const { return Kind == k_CondCode; }
602  bool isCCOut() const { return Kind == k_CCOut; }
603  bool isITMask() const { return Kind == k_ITCondMask; }
604  bool isITCondCode() const { return Kind == k_CondCode; }
605  bool isImm() const { return Kind == k_Immediate; }
606  // checks whether this operand is an unsigned offset which fits is a field
607  // of specified width and scaled by a specific number of bits
608  template<unsigned width, unsigned scale>
609  bool isUnsignedOffset() const {
610    if (!isImm()) return false;
611    if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
612    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
613      int64_t Val = CE->getValue();
614      int64_t Align = 1LL << scale;
615      int64_t Max = Align * ((1LL << width) - 1);
616      return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
617    }
618    return false;
619  }
620  // checks whether this operand is an signed offset which fits is a field
621  // of specified width and scaled by a specific number of bits
622  template<unsigned width, unsigned scale>
623  bool isSignedOffset() const {
624    if (!isImm()) return false;
625    if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
626    if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
627      int64_t Val = CE->getValue();
628      int64_t Align = 1LL << scale;
629      int64_t Max = Align * ((1LL << (width-1)) - 1);
630      int64_t Min = -Align * (1LL << (width-1));
631      return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
632    }
633    return false;
634  }
635
636  // checks whether this operand is a memory operand computed as an offset
637  // applied to PC. the offset may have 8 bits of magnitude and is represented
638  // with two bits of shift. textually it may be either [pc, #imm], #imm or
639  // relocable expression...
640  bool isThumbMemPC() const {
641    int64_t Val = 0;
642    if (isImm()) {
643      if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
644      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
645      if (!CE) return false;
646      Val = CE->getValue();
647    }
648    else if (isMem()) {
649      if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
650      if(Memory.BaseRegNum != ARM::PC) return false;
651      Val = Memory.OffsetImm->getValue();
652    }
653    else return false;
654    return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
655  }
656  bool isFPImm() const {
657    if (!isImm()) return false;
658    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
659    if (!CE) return false;
660    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
661    return Val != -1;
662  }
663  bool isFBits16() const {
664    if (!isImm()) return false;
665    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
666    if (!CE) return false;
667    int64_t Value = CE->getValue();
668    return Value >= 0 && Value <= 16;
669  }
670  bool isFBits32() const {
671    if (!isImm()) return false;
672    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
673    if (!CE) return false;
674    int64_t Value = CE->getValue();
675    return Value >= 1 && Value <= 32;
676  }
677  bool isImm8s4() const {
678    if (!isImm()) return false;
679    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
680    if (!CE) return false;
681    int64_t Value = CE->getValue();
682    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
683  }
684  bool isImm0_1020s4() const {
685    if (!isImm()) return false;
686    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
687    if (!CE) return false;
688    int64_t Value = CE->getValue();
689    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
690  }
691  bool isImm0_508s4() const {
692    if (!isImm()) return false;
693    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
694    if (!CE) return false;
695    int64_t Value = CE->getValue();
696    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
697  }
698  bool isImm0_508s4Neg() const {
699    if (!isImm()) return false;
700    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
701    if (!CE) return false;
702    int64_t Value = -CE->getValue();
703    // explicitly exclude zero. we want that to use the normal 0_508 version.
704    return ((Value & 3) == 0) && Value > 0 && Value <= 508;
705  }
706  bool isImm0_239() const {
707    if (!isImm()) return false;
708    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
709    if (!CE) return false;
710    int64_t Value = CE->getValue();
711    return Value >= 0 && Value < 240;
712  }
713  bool isImm0_255() const {
714    if (!isImm()) return false;
715    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
716    if (!CE) return false;
717    int64_t Value = CE->getValue();
718    return Value >= 0 && Value < 256;
719  }
720  bool isImm0_4095() const {
721    if (!isImm()) return false;
722    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
723    if (!CE) return false;
724    int64_t Value = CE->getValue();
725    return Value >= 0 && Value < 4096;
726  }
727  bool isImm0_4095Neg() const {
728    if (!isImm()) return false;
729    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
730    if (!CE) return false;
731    int64_t Value = -CE->getValue();
732    return Value > 0 && Value < 4096;
733  }
734  bool isImm0_1() const {
735    if (!isImm()) return false;
736    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
737    if (!CE) return false;
738    int64_t Value = CE->getValue();
739    return Value >= 0 && Value < 2;
740  }
741  bool isImm0_3() const {
742    if (!isImm()) return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    if (!CE) return false;
745    int64_t Value = CE->getValue();
746    return Value >= 0 && Value < 4;
747  }
748  bool isImm0_7() const {
749    if (!isImm()) return false;
750    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
751    if (!CE) return false;
752    int64_t Value = CE->getValue();
753    return Value >= 0 && Value < 8;
754  }
755  bool isImm0_15() const {
756    if (!isImm()) return false;
757    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
758    if (!CE) return false;
759    int64_t Value = CE->getValue();
760    return Value >= 0 && Value < 16;
761  }
762  bool isImm0_31() const {
763    if (!isImm()) return false;
764    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
765    if (!CE) return false;
766    int64_t Value = CE->getValue();
767    return Value >= 0 && Value < 32;
768  }
769  bool isImm0_63() const {
770    if (!isImm()) return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return Value >= 0 && Value < 64;
775  }
776  bool isImm8() const {
777    if (!isImm()) return false;
778    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
779    if (!CE) return false;
780    int64_t Value = CE->getValue();
781    return Value == 8;
782  }
783  bool isImm16() const {
784    if (!isImm()) return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return Value == 16;
789  }
790  bool isImm32() const {
791    if (!isImm()) return false;
792    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793    if (!CE) return false;
794    int64_t Value = CE->getValue();
795    return Value == 32;
796  }
797  bool isShrImm8() const {
798    if (!isImm()) return false;
799    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800    if (!CE) return false;
801    int64_t Value = CE->getValue();
802    return Value > 0 && Value <= 8;
803  }
804  bool isShrImm16() const {
805    if (!isImm()) return false;
806    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
807    if (!CE) return false;
808    int64_t Value = CE->getValue();
809    return Value > 0 && Value <= 16;
810  }
811  bool isShrImm32() const {
812    if (!isImm()) return false;
813    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
814    if (!CE) return false;
815    int64_t Value = CE->getValue();
816    return Value > 0 && Value <= 32;
817  }
818  bool isShrImm64() const {
819    if (!isImm()) return false;
820    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
821    if (!CE) return false;
822    int64_t Value = CE->getValue();
823    return Value > 0 && Value <= 64;
824  }
825  bool isImm1_7() const {
826    if (!isImm()) return false;
827    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
828    if (!CE) return false;
829    int64_t Value = CE->getValue();
830    return Value > 0 && Value < 8;
831  }
832  bool isImm1_15() const {
833    if (!isImm()) return false;
834    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
835    if (!CE) return false;
836    int64_t Value = CE->getValue();
837    return Value > 0 && Value < 16;
838  }
839  bool isImm1_31() const {
840    if (!isImm()) return false;
841    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
842    if (!CE) return false;
843    int64_t Value = CE->getValue();
844    return Value > 0 && Value < 32;
845  }
846  bool isImm1_16() const {
847    if (!isImm()) return false;
848    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
849    if (!CE) return false;
850    int64_t Value = CE->getValue();
851    return Value > 0 && Value < 17;
852  }
853  bool isImm1_32() const {
854    if (!isImm()) return false;
855    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
856    if (!CE) return false;
857    int64_t Value = CE->getValue();
858    return Value > 0 && Value < 33;
859  }
860  bool isImm0_32() const {
861    if (!isImm()) return false;
862    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
863    if (!CE) return false;
864    int64_t Value = CE->getValue();
865    return Value >= 0 && Value < 33;
866  }
867  bool isImm0_65535() const {
868    if (!isImm()) return false;
869    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
870    if (!CE) return false;
871    int64_t Value = CE->getValue();
872    return Value >= 0 && Value < 65536;
873  }
874  bool isImm256_65535Expr() const {
875    if (!isImm()) return false;
876    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
877    // If it's not a constant expression, it'll generate a fixup and be
878    // handled later.
879    if (!CE) return true;
880    int64_t Value = CE->getValue();
881    return Value >= 256 && Value < 65536;
882  }
883  bool isImm0_65535Expr() const {
884    if (!isImm()) return false;
885    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
886    // If it's not a constant expression, it'll generate a fixup and be
887    // handled later.
888    if (!CE) return true;
889    int64_t Value = CE->getValue();
890    return Value >= 0 && Value < 65536;
891  }
892  bool isImm24bit() const {
893    if (!isImm()) return false;
894    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
895    if (!CE) return false;
896    int64_t Value = CE->getValue();
897    return Value >= 0 && Value <= 0xffffff;
898  }
899  bool isImmThumbSR() const {
900    if (!isImm()) return false;
901    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
902    if (!CE) return false;
903    int64_t Value = CE->getValue();
904    return Value > 0 && Value < 33;
905  }
906  bool isPKHLSLImm() const {
907    if (!isImm()) return false;
908    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
909    if (!CE) return false;
910    int64_t Value = CE->getValue();
911    return Value >= 0 && Value < 32;
912  }
913  bool isPKHASRImm() const {
914    if (!isImm()) return false;
915    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
916    if (!CE) return false;
917    int64_t Value = CE->getValue();
918    return Value > 0 && Value <= 32;
919  }
920  bool isAdrLabel() const {
921    // If we have an immediate that's not a constant, treat it as a label
922    // reference needing a fixup. If it is a constant, but it can't fit
923    // into shift immediate encoding, we reject it.
924    if (isImm() && !isa<MCConstantExpr>(getImm())) return true;
925    else return (isARMSOImm() || isARMSOImmNeg());
926  }
927  bool isARMSOImm() const {
928    if (!isImm()) return false;
929    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
930    if (!CE) return false;
931    int64_t Value = CE->getValue();
932    return ARM_AM::getSOImmVal(Value) != -1;
933  }
934  bool isARMSOImmNot() const {
935    if (!isImm()) return false;
936    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
937    if (!CE) return false;
938    int64_t Value = CE->getValue();
939    return ARM_AM::getSOImmVal(~Value) != -1;
940  }
941  bool isARMSOImmNeg() const {
942    if (!isImm()) return false;
943    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
944    if (!CE) return false;
945    int64_t Value = CE->getValue();
946    // Only use this when not representable as a plain so_imm.
947    return ARM_AM::getSOImmVal(Value) == -1 &&
948      ARM_AM::getSOImmVal(-Value) != -1;
949  }
950  bool isT2SOImm() const {
951    if (!isImm()) return false;
952    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
953    if (!CE) return false;
954    int64_t Value = CE->getValue();
955    return ARM_AM::getT2SOImmVal(Value) != -1;
956  }
957  bool isT2SOImmNot() const {
958    if (!isImm()) return false;
959    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
960    if (!CE) return false;
961    int64_t Value = CE->getValue();
962    return ARM_AM::getT2SOImmVal(Value) == -1 &&
963      ARM_AM::getT2SOImmVal(~Value) != -1;
964  }
965  bool isT2SOImmNeg() const {
966    if (!isImm()) return false;
967    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
968    if (!CE) return false;
969    int64_t Value = CE->getValue();
970    // Only use this when not representable as a plain so_imm.
971    return ARM_AM::getT2SOImmVal(Value) == -1 &&
972      ARM_AM::getT2SOImmVal(-Value) != -1;
973  }
974  bool isSetEndImm() const {
975    if (!isImm()) return false;
976    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
977    if (!CE) return false;
978    int64_t Value = CE->getValue();
979    return Value == 1 || Value == 0;
980  }
981  bool isReg() const { return Kind == k_Register; }
982  bool isRegList() const { return Kind == k_RegisterList; }
983  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
984  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
985  bool isToken() const { return Kind == k_Token; }
986  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
987  bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
988  bool isMem() const { return Kind == k_Memory; }
989  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
990  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
991  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
992  bool isRotImm() const { return Kind == k_RotateImmediate; }
993  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
994  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
995  bool isPostIdxReg() const {
996    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
997  }
998  bool isMemNoOffset(bool alignOK = false) const {
999    if (!isMem())
1000      return false;
1001    // No offset of any kind.
1002    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
1003     (alignOK || Memory.Alignment == 0);
1004  }
1005  bool isMemPCRelImm12() const {
1006    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1007      return false;
1008    // Base register must be PC.
1009    if (Memory.BaseRegNum != ARM::PC)
1010      return false;
1011    // Immediate offset in range [-4095, 4095].
1012    if (!Memory.OffsetImm) return true;
1013    int64_t Val = Memory.OffsetImm->getValue();
1014    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1015  }
1016  bool isAlignedMemory() const {
1017    return isMemNoOffset(true);
1018  }
1019  bool isAddrMode2() const {
1020    if (!isMem() || Memory.Alignment != 0) return false;
1021    // Check for register offset.
1022    if (Memory.OffsetRegNum) return true;
1023    // Immediate offset in range [-4095, 4095].
1024    if (!Memory.OffsetImm) return true;
1025    int64_t Val = Memory.OffsetImm->getValue();
1026    return Val > -4096 && Val < 4096;
1027  }
1028  bool isAM2OffsetImm() const {
1029    if (!isImm()) return false;
1030    // Immediate offset in range [-4095, 4095].
1031    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1032    if (!CE) return false;
1033    int64_t Val = CE->getValue();
1034    return (Val == INT32_MIN) || (Val > -4096 && Val < 4096);
1035  }
1036  bool isAddrMode3() const {
1037    // If we have an immediate that's not a constant, treat it as a label
1038    // reference needing a fixup. If it is a constant, it's something else
1039    // and we reject it.
1040    if (isImm() && !isa<MCConstantExpr>(getImm()))
1041      return true;
1042    if (!isMem() || Memory.Alignment != 0) return false;
1043    // No shifts are legal for AM3.
1044    if (Memory.ShiftType != ARM_AM::no_shift) return false;
1045    // Check for register offset.
1046    if (Memory.OffsetRegNum) return true;
1047    // Immediate offset in range [-255, 255].
1048    if (!Memory.OffsetImm) return true;
1049    int64_t Val = Memory.OffsetImm->getValue();
1050    // The #-0 offset is encoded as INT32_MIN, and we have to check
1051    // for this too.
1052    return (Val > -256 && Val < 256) || Val == INT32_MIN;
1053  }
1054  bool isAM3Offset() const {
1055    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
1056      return false;
1057    if (Kind == k_PostIndexRegister)
1058      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
1059    // Immediate offset in range [-255, 255].
1060    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1061    if (!CE) return false;
1062    int64_t Val = CE->getValue();
1063    // Special case, #-0 is INT32_MIN.
1064    return (Val > -256 && Val < 256) || Val == INT32_MIN;
1065  }
1066  bool isAddrMode5() const {
1067    // If we have an immediate that's not a constant, treat it as a label
1068    // reference needing a fixup. If it is a constant, it's something else
1069    // and we reject it.
1070    if (isImm() && !isa<MCConstantExpr>(getImm()))
1071      return true;
1072    if (!isMem() || Memory.Alignment != 0) return false;
1073    // Check for register offset.
1074    if (Memory.OffsetRegNum) return false;
1075    // Immediate offset in range [-1020, 1020] and a multiple of 4.
1076    if (!Memory.OffsetImm) return true;
1077    int64_t Val = Memory.OffsetImm->getValue();
1078    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1079      Val == INT32_MIN;
1080  }
1081  bool isMemTBB() const {
1082    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1083        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1084      return false;
1085    return true;
1086  }
1087  bool isMemTBH() const {
1088    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1089        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1090        Memory.Alignment != 0 )
1091      return false;
1092    return true;
1093  }
1094  bool isMemRegOffset() const {
1095    if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1096      return false;
1097    return true;
1098  }
1099  bool isT2MemRegOffset() const {
1100    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1101        Memory.Alignment != 0)
1102      return false;
1103    // Only lsl #{0, 1, 2, 3} allowed.
1104    if (Memory.ShiftType == ARM_AM::no_shift)
1105      return true;
1106    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1107      return false;
1108    return true;
1109  }
1110  bool isMemThumbRR() const {
1111    // Thumb reg+reg addressing is simple. Just two registers, a base and
1112    // an offset. No shifts, negations or any other complicating factors.
1113    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1114        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1115      return false;
1116    return isARMLowRegister(Memory.BaseRegNum) &&
1117      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1118  }
1119  bool isMemThumbRIs4() const {
1120    if (!isMem() || Memory.OffsetRegNum != 0 ||
1121        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1122      return false;
1123    // Immediate offset, multiple of 4 in range [0, 124].
1124    if (!Memory.OffsetImm) return true;
1125    int64_t Val = Memory.OffsetImm->getValue();
1126    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1127  }
1128  bool isMemThumbRIs2() const {
1129    if (!isMem() || Memory.OffsetRegNum != 0 ||
1130        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1131      return false;
1132    // Immediate offset, multiple of 4 in range [0, 62].
1133    if (!Memory.OffsetImm) return true;
1134    int64_t Val = Memory.OffsetImm->getValue();
1135    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1136  }
1137  bool isMemThumbRIs1() const {
1138    if (!isMem() || Memory.OffsetRegNum != 0 ||
1139        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1140      return false;
1141    // Immediate offset in range [0, 31].
1142    if (!Memory.OffsetImm) return true;
1143    int64_t Val = Memory.OffsetImm->getValue();
1144    return Val >= 0 && Val <= 31;
1145  }
1146  bool isMemThumbSPI() const {
1147    if (!isMem() || Memory.OffsetRegNum != 0 ||
1148        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1149      return false;
1150    // Immediate offset, multiple of 4 in range [0, 1020].
1151    if (!Memory.OffsetImm) return true;
1152    int64_t Val = Memory.OffsetImm->getValue();
1153    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1154  }
1155  bool isMemImm8s4Offset() const {
1156    // If we have an immediate that's not a constant, treat it as a label
1157    // reference needing a fixup. If it is a constant, it's something else
1158    // and we reject it.
1159    if (isImm() && !isa<MCConstantExpr>(getImm()))
1160      return true;
1161    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1162      return false;
1163    // Immediate offset a multiple of 4 in range [-1020, 1020].
1164    if (!Memory.OffsetImm) return true;
1165    int64_t Val = Memory.OffsetImm->getValue();
1166    // Special case, #-0 is INT32_MIN.
1167    return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
1168  }
1169  bool isMemImm0_1020s4Offset() const {
1170    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1171      return false;
1172    // Immediate offset a multiple of 4 in range [0, 1020].
1173    if (!Memory.OffsetImm) return true;
1174    int64_t Val = Memory.OffsetImm->getValue();
1175    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1176  }
1177  bool isMemImm8Offset() const {
1178    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1179      return false;
1180    // Base reg of PC isn't allowed for these encodings.
1181    if (Memory.BaseRegNum == ARM::PC) return false;
1182    // Immediate offset in range [-255, 255].
1183    if (!Memory.OffsetImm) return true;
1184    int64_t Val = Memory.OffsetImm->getValue();
1185    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1186  }
1187  bool isMemPosImm8Offset() const {
1188    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1189      return false;
1190    // Immediate offset in range [0, 255].
1191    if (!Memory.OffsetImm) return true;
1192    int64_t Val = Memory.OffsetImm->getValue();
1193    return Val >= 0 && Val < 256;
1194  }
1195  bool isMemNegImm8Offset() const {
1196    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1197      return false;
1198    // Base reg of PC isn't allowed for these encodings.
1199    if (Memory.BaseRegNum == ARM::PC) return false;
1200    // Immediate offset in range [-255, -1].
1201    if (!Memory.OffsetImm) return false;
1202    int64_t Val = Memory.OffsetImm->getValue();
1203    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1204  }
1205  bool isMemUImm12Offset() const {
1206    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1207      return false;
1208    // Immediate offset in range [0, 4095].
1209    if (!Memory.OffsetImm) return true;
1210    int64_t Val = Memory.OffsetImm->getValue();
1211    return (Val >= 0 && Val < 4096);
1212  }
1213  bool isMemImm12Offset() const {
1214    // If we have an immediate that's not a constant, treat it as a label
1215    // reference needing a fixup. If it is a constant, it's something else
1216    // and we reject it.
1217    if (isImm() && !isa<MCConstantExpr>(getImm()))
1218      return true;
1219
1220    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1221      return false;
1222    // Immediate offset in range [-4095, 4095].
1223    if (!Memory.OffsetImm) return true;
1224    int64_t Val = Memory.OffsetImm->getValue();
1225    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1226  }
1227  bool isPostIdxImm8() const {
1228    if (!isImm()) return false;
1229    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1230    if (!CE) return false;
1231    int64_t Val = CE->getValue();
1232    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1233  }
1234  bool isPostIdxImm8s4() const {
1235    if (!isImm()) return false;
1236    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1237    if (!CE) return false;
1238    int64_t Val = CE->getValue();
1239    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1240      (Val == INT32_MIN);
1241  }
1242
1243  bool isMSRMask() const { return Kind == k_MSRMask; }
1244  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1245
1246  // NEON operands.
1247  bool isSingleSpacedVectorList() const {
1248    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1249  }
1250  bool isDoubleSpacedVectorList() const {
1251    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1252  }
1253  bool isVecListOneD() const {
1254    if (!isSingleSpacedVectorList()) return false;
1255    return VectorList.Count == 1;
1256  }
1257
1258  bool isVecListDPair() const {
1259    if (!isSingleSpacedVectorList()) return false;
1260    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1261              .contains(VectorList.RegNum));
1262  }
1263
1264  bool isVecListThreeD() const {
1265    if (!isSingleSpacedVectorList()) return false;
1266    return VectorList.Count == 3;
1267  }
1268
1269  bool isVecListFourD() const {
1270    if (!isSingleSpacedVectorList()) return false;
1271    return VectorList.Count == 4;
1272  }
1273
1274  bool isVecListDPairSpaced() const {
1275    if (isSingleSpacedVectorList()) return false;
1276    return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1277              .contains(VectorList.RegNum));
1278  }
1279
1280  bool isVecListThreeQ() const {
1281    if (!isDoubleSpacedVectorList()) return false;
1282    return VectorList.Count == 3;
1283  }
1284
1285  bool isVecListFourQ() const {
1286    if (!isDoubleSpacedVectorList()) return false;
1287    return VectorList.Count == 4;
1288  }
1289
1290  bool isSingleSpacedVectorAllLanes() const {
1291    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1292  }
1293  bool isDoubleSpacedVectorAllLanes() const {
1294    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1295  }
1296  bool isVecListOneDAllLanes() const {
1297    if (!isSingleSpacedVectorAllLanes()) return false;
1298    return VectorList.Count == 1;
1299  }
1300
1301  bool isVecListDPairAllLanes() const {
1302    if (!isSingleSpacedVectorAllLanes()) return false;
1303    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1304              .contains(VectorList.RegNum));
1305  }
1306
1307  bool isVecListDPairSpacedAllLanes() const {
1308    if (!isDoubleSpacedVectorAllLanes()) return false;
1309    return VectorList.Count == 2;
1310  }
1311
1312  bool isVecListThreeDAllLanes() const {
1313    if (!isSingleSpacedVectorAllLanes()) return false;
1314    return VectorList.Count == 3;
1315  }
1316
1317  bool isVecListThreeQAllLanes() const {
1318    if (!isDoubleSpacedVectorAllLanes()) return false;
1319    return VectorList.Count == 3;
1320  }
1321
1322  bool isVecListFourDAllLanes() const {
1323    if (!isSingleSpacedVectorAllLanes()) return false;
1324    return VectorList.Count == 4;
1325  }
1326
1327  bool isVecListFourQAllLanes() const {
1328    if (!isDoubleSpacedVectorAllLanes()) return false;
1329    return VectorList.Count == 4;
1330  }
1331
1332  bool isSingleSpacedVectorIndexed() const {
1333    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1334  }
1335  bool isDoubleSpacedVectorIndexed() const {
1336    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1337  }
1338  bool isVecListOneDByteIndexed() const {
1339    if (!isSingleSpacedVectorIndexed()) return false;
1340    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1341  }
1342
1343  bool isVecListOneDHWordIndexed() const {
1344    if (!isSingleSpacedVectorIndexed()) return false;
1345    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1346  }
1347
1348  bool isVecListOneDWordIndexed() const {
1349    if (!isSingleSpacedVectorIndexed()) return false;
1350    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1351  }
1352
1353  bool isVecListTwoDByteIndexed() const {
1354    if (!isSingleSpacedVectorIndexed()) return false;
1355    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1356  }
1357
1358  bool isVecListTwoDHWordIndexed() const {
1359    if (!isSingleSpacedVectorIndexed()) return false;
1360    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1361  }
1362
1363  bool isVecListTwoQWordIndexed() const {
1364    if (!isDoubleSpacedVectorIndexed()) return false;
1365    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1366  }
1367
1368  bool isVecListTwoQHWordIndexed() const {
1369    if (!isDoubleSpacedVectorIndexed()) return false;
1370    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1371  }
1372
1373  bool isVecListTwoDWordIndexed() const {
1374    if (!isSingleSpacedVectorIndexed()) return false;
1375    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1376  }
1377
1378  bool isVecListThreeDByteIndexed() const {
1379    if (!isSingleSpacedVectorIndexed()) return false;
1380    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1381  }
1382
1383  bool isVecListThreeDHWordIndexed() const {
1384    if (!isSingleSpacedVectorIndexed()) return false;
1385    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1386  }
1387
1388  bool isVecListThreeQWordIndexed() const {
1389    if (!isDoubleSpacedVectorIndexed()) return false;
1390    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1391  }
1392
1393  bool isVecListThreeQHWordIndexed() const {
1394    if (!isDoubleSpacedVectorIndexed()) return false;
1395    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1396  }
1397
1398  bool isVecListThreeDWordIndexed() const {
1399    if (!isSingleSpacedVectorIndexed()) return false;
1400    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1401  }
1402
1403  bool isVecListFourDByteIndexed() const {
1404    if (!isSingleSpacedVectorIndexed()) return false;
1405    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1406  }
1407
1408  bool isVecListFourDHWordIndexed() const {
1409    if (!isSingleSpacedVectorIndexed()) return false;
1410    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1411  }
1412
1413  bool isVecListFourQWordIndexed() const {
1414    if (!isDoubleSpacedVectorIndexed()) return false;
1415    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1416  }
1417
1418  bool isVecListFourQHWordIndexed() const {
1419    if (!isDoubleSpacedVectorIndexed()) return false;
1420    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1421  }
1422
1423  bool isVecListFourDWordIndexed() const {
1424    if (!isSingleSpacedVectorIndexed()) return false;
1425    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1426  }
1427
1428  bool isVectorIndex8() const {
1429    if (Kind != k_VectorIndex) return false;
1430    return VectorIndex.Val < 8;
1431  }
1432  bool isVectorIndex16() const {
1433    if (Kind != k_VectorIndex) return false;
1434    return VectorIndex.Val < 4;
1435  }
1436  bool isVectorIndex32() const {
1437    if (Kind != k_VectorIndex) return false;
1438    return VectorIndex.Val < 2;
1439  }
1440
1441  bool isNEONi8splat() const {
1442    if (!isImm()) return false;
1443    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1444    // Must be a constant.
1445    if (!CE) return false;
1446    int64_t Value = CE->getValue();
1447    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1448    // value.
1449    return Value >= 0 && Value < 256;
1450  }
1451
1452  bool isNEONi16splat() const {
1453    if (!isImm()) return false;
1454    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1455    // Must be a constant.
1456    if (!CE) return false;
1457    int64_t Value = CE->getValue();
1458    // i16 value in the range [0,255] or [0x0100, 0xff00]
1459    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1460  }
1461
1462  bool isNEONi32splat() const {
1463    if (!isImm()) return false;
1464    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1465    // Must be a constant.
1466    if (!CE) return false;
1467    int64_t Value = CE->getValue();
1468    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1469    return (Value >= 0 && Value < 256) ||
1470      (Value >= 0x0100 && Value <= 0xff00) ||
1471      (Value >= 0x010000 && Value <= 0xff0000) ||
1472      (Value >= 0x01000000 && Value <= 0xff000000);
1473  }
1474
1475  bool isNEONi32vmov() const {
1476    if (!isImm()) return false;
1477    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1478    // Must be a constant.
1479    if (!CE) return false;
1480    int64_t Value = CE->getValue();
1481    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1482    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1483    return (Value >= 0 && Value < 256) ||
1484      (Value >= 0x0100 && Value <= 0xff00) ||
1485      (Value >= 0x010000 && Value <= 0xff0000) ||
1486      (Value >= 0x01000000 && Value <= 0xff000000) ||
1487      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1488      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1489  }
1490  bool isNEONi32vmovNeg() const {
1491    if (!isImm()) return false;
1492    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1493    // Must be a constant.
1494    if (!CE) return false;
1495    int64_t Value = ~CE->getValue();
1496    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1497    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1498    return (Value >= 0 && Value < 256) ||
1499      (Value >= 0x0100 && Value <= 0xff00) ||
1500      (Value >= 0x010000 && Value <= 0xff0000) ||
1501      (Value >= 0x01000000 && Value <= 0xff000000) ||
1502      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1503      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1504  }
1505
1506  bool isNEONi64splat() const {
1507    if (!isImm()) return false;
1508    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1509    // Must be a constant.
1510    if (!CE) return false;
1511    uint64_t Value = CE->getValue();
1512    // i64 value with each byte being either 0 or 0xff.
1513    for (unsigned i = 0; i < 8; ++i)
1514      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1515    return true;
1516  }
1517
1518  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1519    // Add as immediates when possible.  Null MCExpr = 0.
1520    if (Expr == 0)
1521      Inst.addOperand(MCOperand::CreateImm(0));
1522    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1523      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1524    else
1525      Inst.addOperand(MCOperand::CreateExpr(Expr));
1526  }
1527
1528  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1529    assert(N == 2 && "Invalid number of operands!");
1530    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1531    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1532    Inst.addOperand(MCOperand::CreateReg(RegNum));
1533  }
1534
1535  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1536    assert(N == 1 && "Invalid number of operands!");
1537    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1538  }
1539
1540  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1541    assert(N == 1 && "Invalid number of operands!");
1542    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1543  }
1544
1545  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1546    assert(N == 1 && "Invalid number of operands!");
1547    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1548  }
1549
1550  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1551    assert(N == 1 && "Invalid number of operands!");
1552    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1553  }
1554
1555  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1556    assert(N == 1 && "Invalid number of operands!");
1557    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1558  }
1559
1560  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1561    assert(N == 1 && "Invalid number of operands!");
1562    Inst.addOperand(MCOperand::CreateReg(getReg()));
1563  }
1564
1565  void addRegOperands(MCInst &Inst, unsigned N) const {
1566    assert(N == 1 && "Invalid number of operands!");
1567    Inst.addOperand(MCOperand::CreateReg(getReg()));
1568  }
1569
1570  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1571    assert(N == 3 && "Invalid number of operands!");
1572    assert(isRegShiftedReg() &&
1573           "addRegShiftedRegOperands() on non RegShiftedReg!");
1574    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1575    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1576    Inst.addOperand(MCOperand::CreateImm(
1577      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1578  }
1579
1580  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1581    assert(N == 2 && "Invalid number of operands!");
1582    assert(isRegShiftedImm() &&
1583           "addRegShiftedImmOperands() on non RegShiftedImm!");
1584    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1585    // Shift of #32 is encoded as 0 where permitted
1586    unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
1587    Inst.addOperand(MCOperand::CreateImm(
1588      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
1589  }
1590
1591  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1592    assert(N == 1 && "Invalid number of operands!");
1593    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1594                                         ShifterImm.Imm));
1595  }
1596
1597  void addRegListOperands(MCInst &Inst, unsigned N) const {
1598    assert(N == 1 && "Invalid number of operands!");
1599    const SmallVectorImpl<unsigned> &RegList = getRegList();
1600    for (SmallVectorImpl<unsigned>::const_iterator
1601           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1602      Inst.addOperand(MCOperand::CreateReg(*I));
1603  }
1604
1605  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1606    addRegListOperands(Inst, N);
1607  }
1608
1609  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1610    addRegListOperands(Inst, N);
1611  }
1612
1613  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1614    assert(N == 1 && "Invalid number of operands!");
1615    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1616    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1617  }
1618
1619  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1620    assert(N == 1 && "Invalid number of operands!");
1621    // Munge the lsb/width into a bitfield mask.
1622    unsigned lsb = Bitfield.LSB;
1623    unsigned width = Bitfield.Width;
1624    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1625    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1626                      (32 - (lsb + width)));
1627    Inst.addOperand(MCOperand::CreateImm(Mask));
1628  }
1629
1630  void addImmOperands(MCInst &Inst, unsigned N) const {
1631    assert(N == 1 && "Invalid number of operands!");
1632    addExpr(Inst, getImm());
1633  }
1634
1635  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1636    assert(N == 1 && "Invalid number of operands!");
1637    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1638    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1639  }
1640
1641  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1642    assert(N == 1 && "Invalid number of operands!");
1643    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1644    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1645  }
1646
1647  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1648    assert(N == 1 && "Invalid number of operands!");
1649    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1650    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1651    Inst.addOperand(MCOperand::CreateImm(Val));
1652  }
1653
1654  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1655    assert(N == 1 && "Invalid number of operands!");
1656    // FIXME: We really want to scale the value here, but the LDRD/STRD
1657    // instruction don't encode operands that way yet.
1658    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1659    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1660  }
1661
1662  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1663    assert(N == 1 && "Invalid number of operands!");
1664    // The immediate is scaled by four in the encoding and is stored
1665    // in the MCInst as such. Lop off the low two bits here.
1666    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1667    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1668  }
1669
1670  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1671    assert(N == 1 && "Invalid number of operands!");
1672    // The immediate is scaled by four in the encoding and is stored
1673    // in the MCInst as such. Lop off the low two bits here.
1674    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1675    Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
1676  }
1677
1678  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1679    assert(N == 1 && "Invalid number of operands!");
1680    // The immediate is scaled by four in the encoding and is stored
1681    // in the MCInst as such. Lop off the low two bits here.
1682    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1683    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1684  }
1685
1686  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1687    assert(N == 1 && "Invalid number of operands!");
1688    // The constant encodes as the immediate-1, and we store in the instruction
1689    // the bits as encoded, so subtract off one here.
1690    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1691    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1692  }
1693
1694  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1695    assert(N == 1 && "Invalid number of operands!");
1696    // The constant encodes as the immediate-1, and we store in the instruction
1697    // the bits as encoded, so subtract off one here.
1698    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1699    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1700  }
1701
1702  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1703    assert(N == 1 && "Invalid number of operands!");
1704    // The constant encodes as the immediate, except for 32, which encodes as
1705    // zero.
1706    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1707    unsigned Imm = CE->getValue();
1708    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1709  }
1710
1711  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1712    assert(N == 1 && "Invalid number of operands!");
1713    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1714    // the instruction as well.
1715    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1716    int Val = CE->getValue();
1717    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1718  }
1719
1720  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1721    assert(N == 1 && "Invalid number of operands!");
1722    // The operand is actually a t2_so_imm, but we have its bitwise
1723    // negation in the assembly source, so twiddle it here.
1724    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1725    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1726  }
1727
1728  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1729    assert(N == 1 && "Invalid number of operands!");
1730    // The operand is actually a t2_so_imm, but we have its
1731    // negation in the assembly source, so twiddle it here.
1732    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1733    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1734  }
1735
1736  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1737    assert(N == 1 && "Invalid number of operands!");
1738    // The operand is actually an imm0_4095, but we have its
1739    // negation in the assembly source, so twiddle it here.
1740    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1741    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1742  }
1743
1744  void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
1745    if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
1746      Inst.addOperand(MCOperand::CreateImm(CE->getValue() >> 2));
1747      return;
1748    }
1749
1750    const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
1751    assert(SR && "Unknown value type!");
1752    Inst.addOperand(MCOperand::CreateExpr(SR));
1753  }
1754
1755  void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
1756    assert(N == 1 && "Invalid number of operands!");
1757    if (isImm()) {
1758      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1759      if (CE) {
1760        Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1761        return;
1762      }
1763
1764      const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
1765      assert(SR && "Unknown value type!");
1766      Inst.addOperand(MCOperand::CreateExpr(SR));
1767      return;
1768    }
1769
1770    assert(isMem()  && "Unknown value type!");
1771    assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
1772    Inst.addOperand(MCOperand::CreateImm(Memory.OffsetImm->getValue()));
1773  }
1774
1775  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1776    assert(N == 1 && "Invalid number of operands!");
1777    // The operand is actually a so_imm, but we have its bitwise
1778    // negation in the assembly source, so twiddle it here.
1779    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1780    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1781  }
1782
1783  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1784    assert(N == 1 && "Invalid number of operands!");
1785    // The operand is actually a so_imm, but we have its
1786    // negation in the assembly source, so twiddle it here.
1787    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1788    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1789  }
1790
1791  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1792    assert(N == 1 && "Invalid number of operands!");
1793    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1794  }
1795
1796  void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
1797    assert(N == 1 && "Invalid number of operands!");
1798    Inst.addOperand(MCOperand::CreateImm(unsigned(getInstSyncBarrierOpt())));
1799  }
1800
1801  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1802    assert(N == 1 && "Invalid number of operands!");
1803    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1804  }
1805
1806  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1807    assert(N == 1 && "Invalid number of operands!");
1808    int32_t Imm = Memory.OffsetImm->getValue();
1809    Inst.addOperand(MCOperand::CreateImm(Imm));
1810  }
1811
1812  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1813    assert(N == 1 && "Invalid number of operands!");
1814    assert(isImm() && "Not an immediate!");
1815
1816    // If we have an immediate that's not a constant, treat it as a label
1817    // reference needing a fixup.
1818    if (!isa<MCConstantExpr>(getImm())) {
1819      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1820      return;
1821    }
1822
1823    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1824    int Val = CE->getValue();
1825    Inst.addOperand(MCOperand::CreateImm(Val));
1826  }
1827
1828  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1829    assert(N == 2 && "Invalid number of operands!");
1830    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1831    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1832  }
1833
1834  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1835    assert(N == 3 && "Invalid number of operands!");
1836    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1837    if (!Memory.OffsetRegNum) {
1838      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1839      // Special case for #-0
1840      if (Val == INT32_MIN) Val = 0;
1841      if (Val < 0) Val = -Val;
1842      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1843    } else {
1844      // For register offset, we encode the shift type and negation flag
1845      // here.
1846      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1847                              Memory.ShiftImm, Memory.ShiftType);
1848    }
1849    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1850    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1851    Inst.addOperand(MCOperand::CreateImm(Val));
1852  }
1853
1854  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1855    assert(N == 2 && "Invalid number of operands!");
1856    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1857    assert(CE && "non-constant AM2OffsetImm operand!");
1858    int32_t Val = CE->getValue();
1859    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1860    // Special case for #-0
1861    if (Val == INT32_MIN) Val = 0;
1862    if (Val < 0) Val = -Val;
1863    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1864    Inst.addOperand(MCOperand::CreateReg(0));
1865    Inst.addOperand(MCOperand::CreateImm(Val));
1866  }
1867
1868  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1869    assert(N == 3 && "Invalid number of operands!");
1870    // If we have an immediate that's not a constant, treat it as a label
1871    // reference needing a fixup. If it is a constant, it's something else
1872    // and we reject it.
1873    if (isImm()) {
1874      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1875      Inst.addOperand(MCOperand::CreateReg(0));
1876      Inst.addOperand(MCOperand::CreateImm(0));
1877      return;
1878    }
1879
1880    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1881    if (!Memory.OffsetRegNum) {
1882      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1883      // Special case for #-0
1884      if (Val == INT32_MIN) Val = 0;
1885      if (Val < 0) Val = -Val;
1886      Val = ARM_AM::getAM3Opc(AddSub, Val);
1887    } else {
1888      // For register offset, we encode the shift type and negation flag
1889      // here.
1890      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1891    }
1892    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1893    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1894    Inst.addOperand(MCOperand::CreateImm(Val));
1895  }
1896
1897  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1898    assert(N == 2 && "Invalid number of operands!");
1899    if (Kind == k_PostIndexRegister) {
1900      int32_t Val =
1901        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1902      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1903      Inst.addOperand(MCOperand::CreateImm(Val));
1904      return;
1905    }
1906
1907    // Constant offset.
1908    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1909    int32_t Val = CE->getValue();
1910    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1911    // Special case for #-0
1912    if (Val == INT32_MIN) Val = 0;
1913    if (Val < 0) Val = -Val;
1914    Val = ARM_AM::getAM3Opc(AddSub, Val);
1915    Inst.addOperand(MCOperand::CreateReg(0));
1916    Inst.addOperand(MCOperand::CreateImm(Val));
1917  }
1918
1919  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1920    assert(N == 2 && "Invalid number of operands!");
1921    // If we have an immediate that's not a constant, treat it as a label
1922    // reference needing a fixup. If it is a constant, it's something else
1923    // and we reject it.
1924    if (isImm()) {
1925      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1926      Inst.addOperand(MCOperand::CreateImm(0));
1927      return;
1928    }
1929
1930    // The lower two bits are always zero and as such are not encoded.
1931    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1932    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1933    // Special case for #-0
1934    if (Val == INT32_MIN) Val = 0;
1935    if (Val < 0) Val = -Val;
1936    Val = ARM_AM::getAM5Opc(AddSub, Val);
1937    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1938    Inst.addOperand(MCOperand::CreateImm(Val));
1939  }
1940
1941  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1942    assert(N == 2 && "Invalid number of operands!");
1943    // If we have an immediate that's not a constant, treat it as a label
1944    // reference needing a fixup. If it is a constant, it's something else
1945    // and we reject it.
1946    if (isImm()) {
1947      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1948      Inst.addOperand(MCOperand::CreateImm(0));
1949      return;
1950    }
1951
1952    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1953    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1954    Inst.addOperand(MCOperand::CreateImm(Val));
1955  }
1956
1957  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1958    assert(N == 2 && "Invalid number of operands!");
1959    // The lower two bits are always zero and as such are not encoded.
1960    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1961    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1962    Inst.addOperand(MCOperand::CreateImm(Val));
1963  }
1964
1965  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1966    assert(N == 2 && "Invalid number of operands!");
1967    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1968    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1969    Inst.addOperand(MCOperand::CreateImm(Val));
1970  }
1971
1972  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1973    addMemImm8OffsetOperands(Inst, N);
1974  }
1975
1976  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1977    addMemImm8OffsetOperands(Inst, N);
1978  }
1979
1980  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1981    assert(N == 2 && "Invalid number of operands!");
1982    // If this is an immediate, it's a label reference.
1983    if (isImm()) {
1984      addExpr(Inst, getImm());
1985      Inst.addOperand(MCOperand::CreateImm(0));
1986      return;
1987    }
1988
1989    // Otherwise, it's a normal memory reg+offset.
1990    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1991    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1992    Inst.addOperand(MCOperand::CreateImm(Val));
1993  }
1994
1995  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1996    assert(N == 2 && "Invalid number of operands!");
1997    // If this is an immediate, it's a label reference.
1998    if (isImm()) {
1999      addExpr(Inst, getImm());
2000      Inst.addOperand(MCOperand::CreateImm(0));
2001      return;
2002    }
2003
2004    // Otherwise, it's a normal memory reg+offset.
2005    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2006    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2007    Inst.addOperand(MCOperand::CreateImm(Val));
2008  }
2009
2010  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2011    assert(N == 2 && "Invalid number of operands!");
2012    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2013    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2014  }
2015
2016  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2017    assert(N == 2 && "Invalid number of operands!");
2018    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2019    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2020  }
2021
2022  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2023    assert(N == 3 && "Invalid number of operands!");
2024    unsigned Val =
2025      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2026                        Memory.ShiftImm, Memory.ShiftType);
2027    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2028    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2029    Inst.addOperand(MCOperand::CreateImm(Val));
2030  }
2031
2032  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2033    assert(N == 3 && "Invalid number of operands!");
2034    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2035    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2036    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
2037  }
2038
2039  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2040    assert(N == 2 && "Invalid number of operands!");
2041    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2042    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
2043  }
2044
2045  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2046    assert(N == 2 && "Invalid number of operands!");
2047    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2048    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2049    Inst.addOperand(MCOperand::CreateImm(Val));
2050  }
2051
2052  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2053    assert(N == 2 && "Invalid number of operands!");
2054    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2055    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2056    Inst.addOperand(MCOperand::CreateImm(Val));
2057  }
2058
2059  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2060    assert(N == 2 && "Invalid number of operands!");
2061    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2062    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2063    Inst.addOperand(MCOperand::CreateImm(Val));
2064  }
2065
2066  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2067    assert(N == 2 && "Invalid number of operands!");
2068    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2069    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
2070    Inst.addOperand(MCOperand::CreateImm(Val));
2071  }
2072
2073  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2074    assert(N == 1 && "Invalid number of operands!");
2075    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2076    assert(CE && "non-constant post-idx-imm8 operand!");
2077    int Imm = CE->getValue();
2078    bool isAdd = Imm >= 0;
2079    if (Imm == INT32_MIN) Imm = 0;
2080    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2081    Inst.addOperand(MCOperand::CreateImm(Imm));
2082  }
2083
2084  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2085    assert(N == 1 && "Invalid number of operands!");
2086    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2087    assert(CE && "non-constant post-idx-imm8s4 operand!");
2088    int Imm = CE->getValue();
2089    bool isAdd = Imm >= 0;
2090    if (Imm == INT32_MIN) Imm = 0;
2091    // Immediate is scaled by 4.
2092    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2093    Inst.addOperand(MCOperand::CreateImm(Imm));
2094  }
2095
2096  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2097    assert(N == 2 && "Invalid number of operands!");
2098    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
2099    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
2100  }
2101
2102  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2103    assert(N == 2 && "Invalid number of operands!");
2104    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
2105    // The sign, shift type, and shift amount are encoded in a single operand
2106    // using the AM2 encoding helpers.
2107    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2108    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2109                                     PostIdxReg.ShiftTy);
2110    Inst.addOperand(MCOperand::CreateImm(Imm));
2111  }
2112
2113  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2114    assert(N == 1 && "Invalid number of operands!");
2115    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
2116  }
2117
2118  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2119    assert(N == 1 && "Invalid number of operands!");
2120    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
2121  }
2122
2123  void addVecListOperands(MCInst &Inst, unsigned N) const {
2124    assert(N == 1 && "Invalid number of operands!");
2125    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
2126  }
2127
2128  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2129    assert(N == 2 && "Invalid number of operands!");
2130    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
2131    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
2132  }
2133
2134  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2135    assert(N == 1 && "Invalid number of operands!");
2136    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2137  }
2138
2139  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2140    assert(N == 1 && "Invalid number of operands!");
2141    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2142  }
2143
2144  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2145    assert(N == 1 && "Invalid number of operands!");
2146    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2147  }
2148
2149  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2150    assert(N == 1 && "Invalid number of operands!");
2151    // The immediate encodes the type of constant as well as the value.
2152    // Mask in that this is an i8 splat.
2153    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2154    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
2155  }
2156
2157  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2158    assert(N == 1 && "Invalid number of operands!");
2159    // The immediate encodes the type of constant as well as the value.
2160    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2161    unsigned Value = CE->getValue();
2162    if (Value >= 256)
2163      Value = (Value >> 8) | 0xa00;
2164    else
2165      Value |= 0x800;
2166    Inst.addOperand(MCOperand::CreateImm(Value));
2167  }
2168
2169  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2170    assert(N == 1 && "Invalid number of operands!");
2171    // The immediate encodes the type of constant as well as the value.
2172    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2173    unsigned Value = CE->getValue();
2174    if (Value >= 256 && Value <= 0xff00)
2175      Value = (Value >> 8) | 0x200;
2176    else if (Value > 0xffff && Value <= 0xff0000)
2177      Value = (Value >> 16) | 0x400;
2178    else if (Value > 0xffffff)
2179      Value = (Value >> 24) | 0x600;
2180    Inst.addOperand(MCOperand::CreateImm(Value));
2181  }
2182
2183  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2184    assert(N == 1 && "Invalid number of operands!");
2185    // The immediate encodes the type of constant as well as the value.
2186    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2187    unsigned Value = CE->getValue();
2188    if (Value >= 256 && Value <= 0xffff)
2189      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2190    else if (Value > 0xffff && Value <= 0xffffff)
2191      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2192    else if (Value > 0xffffff)
2193      Value = (Value >> 24) | 0x600;
2194    Inst.addOperand(MCOperand::CreateImm(Value));
2195  }
2196
2197  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2198    assert(N == 1 && "Invalid number of operands!");
2199    // The immediate encodes the type of constant as well as the value.
2200    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2201    unsigned Value = ~CE->getValue();
2202    if (Value >= 256 && Value <= 0xffff)
2203      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2204    else if (Value > 0xffff && Value <= 0xffffff)
2205      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2206    else if (Value > 0xffffff)
2207      Value = (Value >> 24) | 0x600;
2208    Inst.addOperand(MCOperand::CreateImm(Value));
2209  }
2210
2211  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2212    assert(N == 1 && "Invalid number of operands!");
2213    // The immediate encodes the type of constant as well as the value.
2214    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2215    uint64_t Value = CE->getValue();
2216    unsigned Imm = 0;
2217    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2218      Imm |= (Value & 1) << i;
2219    }
2220    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
2221  }
2222
2223  virtual void print(raw_ostream &OS) const;
2224
2225  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
2226    ARMOperand *Op = new ARMOperand(k_ITCondMask);
2227    Op->ITMask.Mask = Mask;
2228    Op->StartLoc = S;
2229    Op->EndLoc = S;
2230    return Op;
2231  }
2232
2233  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
2234    ARMOperand *Op = new ARMOperand(k_CondCode);
2235    Op->CC.Val = CC;
2236    Op->StartLoc = S;
2237    Op->EndLoc = S;
2238    return Op;
2239  }
2240
2241  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2242    ARMOperand *Op = new ARMOperand(k_CoprocNum);
2243    Op->Cop.Val = CopVal;
2244    Op->StartLoc = S;
2245    Op->EndLoc = S;
2246    return Op;
2247  }
2248
2249  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2250    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2251    Op->Cop.Val = CopVal;
2252    Op->StartLoc = S;
2253    Op->EndLoc = S;
2254    return Op;
2255  }
2256
2257  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2258    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2259    Op->Cop.Val = Val;
2260    Op->StartLoc = S;
2261    Op->EndLoc = E;
2262    return Op;
2263  }
2264
2265  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2266    ARMOperand *Op = new ARMOperand(k_CCOut);
2267    Op->Reg.RegNum = RegNum;
2268    Op->StartLoc = S;
2269    Op->EndLoc = S;
2270    return Op;
2271  }
2272
2273  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2274    ARMOperand *Op = new ARMOperand(k_Token);
2275    Op->Tok.Data = Str.data();
2276    Op->Tok.Length = Str.size();
2277    Op->StartLoc = S;
2278    Op->EndLoc = S;
2279    return Op;
2280  }
2281
2282  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2283    ARMOperand *Op = new ARMOperand(k_Register);
2284    Op->Reg.RegNum = RegNum;
2285    Op->StartLoc = S;
2286    Op->EndLoc = E;
2287    return Op;
2288  }
2289
2290  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2291                                           unsigned SrcReg,
2292                                           unsigned ShiftReg,
2293                                           unsigned ShiftImm,
2294                                           SMLoc S, SMLoc E) {
2295    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2296    Op->RegShiftedReg.ShiftTy = ShTy;
2297    Op->RegShiftedReg.SrcReg = SrcReg;
2298    Op->RegShiftedReg.ShiftReg = ShiftReg;
2299    Op->RegShiftedReg.ShiftImm = ShiftImm;
2300    Op->StartLoc = S;
2301    Op->EndLoc = E;
2302    return Op;
2303  }
2304
2305  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2306                                            unsigned SrcReg,
2307                                            unsigned ShiftImm,
2308                                            SMLoc S, SMLoc E) {
2309    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2310    Op->RegShiftedImm.ShiftTy = ShTy;
2311    Op->RegShiftedImm.SrcReg = SrcReg;
2312    Op->RegShiftedImm.ShiftImm = ShiftImm;
2313    Op->StartLoc = S;
2314    Op->EndLoc = E;
2315    return Op;
2316  }
2317
2318  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2319                                   SMLoc S, SMLoc E) {
2320    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2321    Op->ShifterImm.isASR = isASR;
2322    Op->ShifterImm.Imm = Imm;
2323    Op->StartLoc = S;
2324    Op->EndLoc = E;
2325    return Op;
2326  }
2327
2328  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2329    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2330    Op->RotImm.Imm = Imm;
2331    Op->StartLoc = S;
2332    Op->EndLoc = E;
2333    return Op;
2334  }
2335
2336  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2337                                    SMLoc S, SMLoc E) {
2338    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2339    Op->Bitfield.LSB = LSB;
2340    Op->Bitfield.Width = Width;
2341    Op->StartLoc = S;
2342    Op->EndLoc = E;
2343    return Op;
2344  }
2345
2346  static ARMOperand *
2347  CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned> > &Regs,
2348                SMLoc StartLoc, SMLoc EndLoc) {
2349    assert (Regs.size() > 0 && "RegList contains no registers?");
2350    KindTy Kind = k_RegisterList;
2351
2352    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
2353      Kind = k_DPRRegisterList;
2354    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2355             contains(Regs.front().second))
2356      Kind = k_SPRRegisterList;
2357
2358    // Sort based on the register encoding values.
2359    array_pod_sort(Regs.begin(), Regs.end());
2360
2361    ARMOperand *Op = new ARMOperand(Kind);
2362    for (SmallVectorImpl<std::pair<unsigned, unsigned> >::const_iterator
2363           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2364      Op->Registers.push_back(I->second);
2365    Op->StartLoc = StartLoc;
2366    Op->EndLoc = EndLoc;
2367    return Op;
2368  }
2369
2370  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2371                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2372    ARMOperand *Op = new ARMOperand(k_VectorList);
2373    Op->VectorList.RegNum = RegNum;
2374    Op->VectorList.Count = Count;
2375    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2376    Op->StartLoc = S;
2377    Op->EndLoc = E;
2378    return Op;
2379  }
2380
2381  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2382                                              bool isDoubleSpaced,
2383                                              SMLoc S, SMLoc E) {
2384    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2385    Op->VectorList.RegNum = RegNum;
2386    Op->VectorList.Count = Count;
2387    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2388    Op->StartLoc = S;
2389    Op->EndLoc = E;
2390    return Op;
2391  }
2392
2393  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2394                                             unsigned Index,
2395                                             bool isDoubleSpaced,
2396                                             SMLoc S, SMLoc E) {
2397    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2398    Op->VectorList.RegNum = RegNum;
2399    Op->VectorList.Count = Count;
2400    Op->VectorList.LaneIndex = Index;
2401    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2402    Op->StartLoc = S;
2403    Op->EndLoc = E;
2404    return Op;
2405  }
2406
2407  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2408                                       MCContext &Ctx) {
2409    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2410    Op->VectorIndex.Val = Idx;
2411    Op->StartLoc = S;
2412    Op->EndLoc = E;
2413    return Op;
2414  }
2415
2416  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2417    ARMOperand *Op = new ARMOperand(k_Immediate);
2418    Op->Imm.Val = Val;
2419    Op->StartLoc = S;
2420    Op->EndLoc = E;
2421    return Op;
2422  }
2423
2424  static ARMOperand *CreateMem(unsigned BaseRegNum,
2425                               const MCConstantExpr *OffsetImm,
2426                               unsigned OffsetRegNum,
2427                               ARM_AM::ShiftOpc ShiftType,
2428                               unsigned ShiftImm,
2429                               unsigned Alignment,
2430                               bool isNegative,
2431                               SMLoc S, SMLoc E) {
2432    ARMOperand *Op = new ARMOperand(k_Memory);
2433    Op->Memory.BaseRegNum = BaseRegNum;
2434    Op->Memory.OffsetImm = OffsetImm;
2435    Op->Memory.OffsetRegNum = OffsetRegNum;
2436    Op->Memory.ShiftType = ShiftType;
2437    Op->Memory.ShiftImm = ShiftImm;
2438    Op->Memory.Alignment = Alignment;
2439    Op->Memory.isNegative = isNegative;
2440    Op->StartLoc = S;
2441    Op->EndLoc = E;
2442    return Op;
2443  }
2444
2445  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2446                                      ARM_AM::ShiftOpc ShiftTy,
2447                                      unsigned ShiftImm,
2448                                      SMLoc S, SMLoc E) {
2449    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2450    Op->PostIdxReg.RegNum = RegNum;
2451    Op->PostIdxReg.isAdd = isAdd;
2452    Op->PostIdxReg.ShiftTy = ShiftTy;
2453    Op->PostIdxReg.ShiftImm = ShiftImm;
2454    Op->StartLoc = S;
2455    Op->EndLoc = E;
2456    return Op;
2457  }
2458
2459  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2460    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2461    Op->MBOpt.Val = Opt;
2462    Op->StartLoc = S;
2463    Op->EndLoc = S;
2464    return Op;
2465  }
2466
2467  static ARMOperand *CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt,
2468                                              SMLoc S) {
2469    ARMOperand *Op = new ARMOperand(k_InstSyncBarrierOpt);
2470    Op->ISBOpt.Val = Opt;
2471    Op->StartLoc = S;
2472    Op->EndLoc = S;
2473    return Op;
2474  }
2475
2476  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2477    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2478    Op->IFlags.Val = IFlags;
2479    Op->StartLoc = S;
2480    Op->EndLoc = S;
2481    return Op;
2482  }
2483
2484  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2485    ARMOperand *Op = new ARMOperand(k_MSRMask);
2486    Op->MMask.Val = MMask;
2487    Op->StartLoc = S;
2488    Op->EndLoc = S;
2489    return Op;
2490  }
2491};
2492
2493} // end anonymous namespace.
2494
2495void ARMOperand::print(raw_ostream &OS) const {
2496  switch (Kind) {
2497  case k_CondCode:
2498    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2499    break;
2500  case k_CCOut:
2501    OS << "<ccout " << getReg() << ">";
2502    break;
2503  case k_ITCondMask: {
2504    static const char *const MaskStr[] = {
2505      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2506      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2507    };
2508    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2509    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2510    break;
2511  }
2512  case k_CoprocNum:
2513    OS << "<coprocessor number: " << getCoproc() << ">";
2514    break;
2515  case k_CoprocReg:
2516    OS << "<coprocessor register: " << getCoproc() << ">";
2517    break;
2518  case k_CoprocOption:
2519    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2520    break;
2521  case k_MSRMask:
2522    OS << "<mask: " << getMSRMask() << ">";
2523    break;
2524  case k_Immediate:
2525    getImm()->print(OS);
2526    break;
2527  case k_MemBarrierOpt:
2528    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
2529    break;
2530  case k_InstSyncBarrierOpt:
2531    OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
2532    break;
2533  case k_Memory:
2534    OS << "<memory "
2535       << " base:" << Memory.BaseRegNum;
2536    OS << ">";
2537    break;
2538  case k_PostIndexRegister:
2539    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2540       << PostIdxReg.RegNum;
2541    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2542      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2543         << PostIdxReg.ShiftImm;
2544    OS << ">";
2545    break;
2546  case k_ProcIFlags: {
2547    OS << "<ARM_PROC::";
2548    unsigned IFlags = getProcIFlags();
2549    for (int i=2; i >= 0; --i)
2550      if (IFlags & (1 << i))
2551        OS << ARM_PROC::IFlagsToString(1 << i);
2552    OS << ">";
2553    break;
2554  }
2555  case k_Register:
2556    OS << "<register " << getReg() << ">";
2557    break;
2558  case k_ShifterImmediate:
2559    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2560       << " #" << ShifterImm.Imm << ">";
2561    break;
2562  case k_ShiftedRegister:
2563    OS << "<so_reg_reg "
2564       << RegShiftedReg.SrcReg << " "
2565       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2566       << " " << RegShiftedReg.ShiftReg << ">";
2567    break;
2568  case k_ShiftedImmediate:
2569    OS << "<so_reg_imm "
2570       << RegShiftedImm.SrcReg << " "
2571       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2572       << " #" << RegShiftedImm.ShiftImm << ">";
2573    break;
2574  case k_RotateImmediate:
2575    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2576    break;
2577  case k_BitfieldDescriptor:
2578    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2579       << ", width: " << Bitfield.Width << ">";
2580    break;
2581  case k_RegisterList:
2582  case k_DPRRegisterList:
2583  case k_SPRRegisterList: {
2584    OS << "<register_list ";
2585
2586    const SmallVectorImpl<unsigned> &RegList = getRegList();
2587    for (SmallVectorImpl<unsigned>::const_iterator
2588           I = RegList.begin(), E = RegList.end(); I != E; ) {
2589      OS << *I;
2590      if (++I < E) OS << ", ";
2591    }
2592
2593    OS << ">";
2594    break;
2595  }
2596  case k_VectorList:
2597    OS << "<vector_list " << VectorList.Count << " * "
2598       << VectorList.RegNum << ">";
2599    break;
2600  case k_VectorListAllLanes:
2601    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2602       << VectorList.RegNum << ">";
2603    break;
2604  case k_VectorListIndexed:
2605    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2606       << VectorList.Count << " * " << VectorList.RegNum << ">";
2607    break;
2608  case k_Token:
2609    OS << "'" << getToken() << "'";
2610    break;
2611  case k_VectorIndex:
2612    OS << "<vectorindex " << getVectorIndex() << ">";
2613    break;
2614  }
2615}
2616
2617/// @name Auto-generated Match Functions
2618/// {
2619
2620static unsigned MatchRegisterName(StringRef Name);
2621
2622/// }
2623
2624bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2625                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2626  StartLoc = Parser.getTok().getLoc();
2627  EndLoc = Parser.getTok().getEndLoc();
2628  RegNo = tryParseRegister();
2629
2630  return (RegNo == (unsigned)-1);
2631}
2632
2633/// Try to parse a register name.  The token must be an Identifier when called,
2634/// and if it is a register name the token is eaten and the register number is
2635/// returned.  Otherwise return -1.
2636///
2637int ARMAsmParser::tryParseRegister() {
2638  const AsmToken &Tok = Parser.getTok();
2639  if (Tok.isNot(AsmToken::Identifier)) return -1;
2640
2641  std::string lowerCase = Tok.getString().lower();
2642  unsigned RegNum = MatchRegisterName(lowerCase);
2643  if (!RegNum) {
2644    RegNum = StringSwitch<unsigned>(lowerCase)
2645      .Case("r13", ARM::SP)
2646      .Case("r14", ARM::LR)
2647      .Case("r15", ARM::PC)
2648      .Case("ip", ARM::R12)
2649      // Additional register name aliases for 'gas' compatibility.
2650      .Case("a1", ARM::R0)
2651      .Case("a2", ARM::R1)
2652      .Case("a3", ARM::R2)
2653      .Case("a4", ARM::R3)
2654      .Case("v1", ARM::R4)
2655      .Case("v2", ARM::R5)
2656      .Case("v3", ARM::R6)
2657      .Case("v4", ARM::R7)
2658      .Case("v5", ARM::R8)
2659      .Case("v6", ARM::R9)
2660      .Case("v7", ARM::R10)
2661      .Case("v8", ARM::R11)
2662      .Case("sb", ARM::R9)
2663      .Case("sl", ARM::R10)
2664      .Case("fp", ARM::R11)
2665      .Default(0);
2666  }
2667  if (!RegNum) {
2668    // Check for aliases registered via .req. Canonicalize to lower case.
2669    // That's more consistent since register names are case insensitive, and
2670    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2671    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2672    // If no match, return failure.
2673    if (Entry == RegisterReqs.end())
2674      return -1;
2675    Parser.Lex(); // Eat identifier token.
2676    return Entry->getValue();
2677  }
2678
2679  Parser.Lex(); // Eat identifier token.
2680
2681  return RegNum;
2682}
2683
2684// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2685// If a recoverable error occurs, return 1. If an irrecoverable error
2686// occurs, return -1. An irrecoverable error is one where tokens have been
2687// consumed in the process of trying to parse the shifter (i.e., when it is
2688// indeed a shifter operand, but malformed).
2689int ARMAsmParser::tryParseShiftRegister(
2690                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2691  SMLoc S = Parser.getTok().getLoc();
2692  const AsmToken &Tok = Parser.getTok();
2693  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2694
2695  std::string lowerCase = Tok.getString().lower();
2696  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2697      .Case("asl", ARM_AM::lsl)
2698      .Case("lsl", ARM_AM::lsl)
2699      .Case("lsr", ARM_AM::lsr)
2700      .Case("asr", ARM_AM::asr)
2701      .Case("ror", ARM_AM::ror)
2702      .Case("rrx", ARM_AM::rrx)
2703      .Default(ARM_AM::no_shift);
2704
2705  if (ShiftTy == ARM_AM::no_shift)
2706    return 1;
2707
2708  Parser.Lex(); // Eat the operator.
2709
2710  // The source register for the shift has already been added to the
2711  // operand list, so we need to pop it off and combine it into the shifted
2712  // register operand instead.
2713  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2714  if (!PrevOp->isReg())
2715    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2716  int SrcReg = PrevOp->getReg();
2717
2718  SMLoc EndLoc;
2719  int64_t Imm = 0;
2720  int ShiftReg = 0;
2721  if (ShiftTy == ARM_AM::rrx) {
2722    // RRX Doesn't have an explicit shift amount. The encoder expects
2723    // the shift register to be the same as the source register. Seems odd,
2724    // but OK.
2725    ShiftReg = SrcReg;
2726  } else {
2727    // Figure out if this is shifted by a constant or a register (for non-RRX).
2728    if (Parser.getTok().is(AsmToken::Hash) ||
2729        Parser.getTok().is(AsmToken::Dollar)) {
2730      Parser.Lex(); // Eat hash.
2731      SMLoc ImmLoc = Parser.getTok().getLoc();
2732      const MCExpr *ShiftExpr = 0;
2733      if (getParser().parseExpression(ShiftExpr, EndLoc)) {
2734        Error(ImmLoc, "invalid immediate shift value");
2735        return -1;
2736      }
2737      // The expression must be evaluatable as an immediate.
2738      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2739      if (!CE) {
2740        Error(ImmLoc, "invalid immediate shift value");
2741        return -1;
2742      }
2743      // Range check the immediate.
2744      // lsl, ror: 0 <= imm <= 31
2745      // lsr, asr: 0 <= imm <= 32
2746      Imm = CE->getValue();
2747      if (Imm < 0 ||
2748          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2749          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2750        Error(ImmLoc, "immediate shift value out of range");
2751        return -1;
2752      }
2753      // shift by zero is a nop. Always send it through as lsl.
2754      // ('as' compatibility)
2755      if (Imm == 0)
2756        ShiftTy = ARM_AM::lsl;
2757    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2758      SMLoc L = Parser.getTok().getLoc();
2759      EndLoc = Parser.getTok().getEndLoc();
2760      ShiftReg = tryParseRegister();
2761      if (ShiftReg == -1) {
2762        Error (L, "expected immediate or register in shift operand");
2763        return -1;
2764      }
2765    } else {
2766      Error (Parser.getTok().getLoc(),
2767                    "expected immediate or register in shift operand");
2768      return -1;
2769    }
2770  }
2771
2772  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2773    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2774                                                         ShiftReg, Imm,
2775                                                         S, EndLoc));
2776  else
2777    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2778                                                          S, EndLoc));
2779
2780  return 0;
2781}
2782
2783
2784/// Try to parse a register name.  The token must be an Identifier when called.
2785/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2786/// if there is a "writeback". 'true' if it's not a register.
2787///
2788/// TODO this is likely to change to allow different register types and or to
2789/// parse for a specific register type.
2790bool ARMAsmParser::
2791tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2792  const AsmToken &RegTok = Parser.getTok();
2793  int RegNo = tryParseRegister();
2794  if (RegNo == -1)
2795    return true;
2796
2797  Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(),
2798                                           RegTok.getEndLoc()));
2799
2800  const AsmToken &ExclaimTok = Parser.getTok();
2801  if (ExclaimTok.is(AsmToken::Exclaim)) {
2802    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2803                                               ExclaimTok.getLoc()));
2804    Parser.Lex(); // Eat exclaim token
2805    return false;
2806  }
2807
2808  // Also check for an index operand. This is only legal for vector registers,
2809  // but that'll get caught OK in operand matching, so we don't need to
2810  // explicitly filter everything else out here.
2811  if (Parser.getTok().is(AsmToken::LBrac)) {
2812    SMLoc SIdx = Parser.getTok().getLoc();
2813    Parser.Lex(); // Eat left bracket token.
2814
2815    const MCExpr *ImmVal;
2816    if (getParser().parseExpression(ImmVal))
2817      return true;
2818    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2819    if (!MCE)
2820      return TokError("immediate value expected for vector index");
2821
2822    if (Parser.getTok().isNot(AsmToken::RBrac))
2823      return Error(Parser.getTok().getLoc(), "']' expected");
2824
2825    SMLoc E = Parser.getTok().getEndLoc();
2826    Parser.Lex(); // Eat right bracket token.
2827
2828    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2829                                                     SIdx, E,
2830                                                     getContext()));
2831  }
2832
2833  return false;
2834}
2835
2836/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2837/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2838/// "c5", ...
2839static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2840  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2841  // but efficient.
2842  switch (Name.size()) {
2843  default: return -1;
2844  case 2:
2845    if (Name[0] != CoprocOp)
2846      return -1;
2847    switch (Name[1]) {
2848    default:  return -1;
2849    case '0': return 0;
2850    case '1': return 1;
2851    case '2': return 2;
2852    case '3': return 3;
2853    case '4': return 4;
2854    case '5': return 5;
2855    case '6': return 6;
2856    case '7': return 7;
2857    case '8': return 8;
2858    case '9': return 9;
2859    }
2860  case 3:
2861    if (Name[0] != CoprocOp || Name[1] != '1')
2862      return -1;
2863    switch (Name[2]) {
2864    default:  return -1;
2865    case '0': return 10;
2866    case '1': return 11;
2867    case '2': return 12;
2868    case '3': return 13;
2869    case '4': return 14;
2870    case '5': return 15;
2871    }
2872  }
2873}
2874
2875/// parseITCondCode - Try to parse a condition code for an IT instruction.
2876ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2877parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2878  SMLoc S = Parser.getTok().getLoc();
2879  const AsmToken &Tok = Parser.getTok();
2880  if (!Tok.is(AsmToken::Identifier))
2881    return MatchOperand_NoMatch;
2882  unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
2883    .Case("eq", ARMCC::EQ)
2884    .Case("ne", ARMCC::NE)
2885    .Case("hs", ARMCC::HS)
2886    .Case("cs", ARMCC::HS)
2887    .Case("lo", ARMCC::LO)
2888    .Case("cc", ARMCC::LO)
2889    .Case("mi", ARMCC::MI)
2890    .Case("pl", ARMCC::PL)
2891    .Case("vs", ARMCC::VS)
2892    .Case("vc", ARMCC::VC)
2893    .Case("hi", ARMCC::HI)
2894    .Case("ls", ARMCC::LS)
2895    .Case("ge", ARMCC::GE)
2896    .Case("lt", ARMCC::LT)
2897    .Case("gt", ARMCC::GT)
2898    .Case("le", ARMCC::LE)
2899    .Case("al", ARMCC::AL)
2900    .Default(~0U);
2901  if (CC == ~0U)
2902    return MatchOperand_NoMatch;
2903  Parser.Lex(); // Eat the token.
2904
2905  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2906
2907  return MatchOperand_Success;
2908}
2909
2910/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2911/// token must be an Identifier when called, and if it is a coprocessor
2912/// number, the token is eaten and the operand is added to the operand list.
2913ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2914parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2915  SMLoc S = Parser.getTok().getLoc();
2916  const AsmToken &Tok = Parser.getTok();
2917  if (Tok.isNot(AsmToken::Identifier))
2918    return MatchOperand_NoMatch;
2919
2920  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2921  if (Num == -1)
2922    return MatchOperand_NoMatch;
2923
2924  Parser.Lex(); // Eat identifier token.
2925  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2926  return MatchOperand_Success;
2927}
2928
2929/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2930/// token must be an Identifier when called, and if it is a coprocessor
2931/// number, the token is eaten and the operand is added to the operand list.
2932ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2933parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2934  SMLoc S = Parser.getTok().getLoc();
2935  const AsmToken &Tok = Parser.getTok();
2936  if (Tok.isNot(AsmToken::Identifier))
2937    return MatchOperand_NoMatch;
2938
2939  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2940  if (Reg == -1)
2941    return MatchOperand_NoMatch;
2942
2943  Parser.Lex(); // Eat identifier token.
2944  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2945  return MatchOperand_Success;
2946}
2947
2948/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2949/// coproc_option : '{' imm0_255 '}'
2950ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2951parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2952  SMLoc S = Parser.getTok().getLoc();
2953
2954  // If this isn't a '{', this isn't a coprocessor immediate operand.
2955  if (Parser.getTok().isNot(AsmToken::LCurly))
2956    return MatchOperand_NoMatch;
2957  Parser.Lex(); // Eat the '{'
2958
2959  const MCExpr *Expr;
2960  SMLoc Loc = Parser.getTok().getLoc();
2961  if (getParser().parseExpression(Expr)) {
2962    Error(Loc, "illegal expression");
2963    return MatchOperand_ParseFail;
2964  }
2965  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2966  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2967    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2968    return MatchOperand_ParseFail;
2969  }
2970  int Val = CE->getValue();
2971
2972  // Check for and consume the closing '}'
2973  if (Parser.getTok().isNot(AsmToken::RCurly))
2974    return MatchOperand_ParseFail;
2975  SMLoc E = Parser.getTok().getEndLoc();
2976  Parser.Lex(); // Eat the '}'
2977
2978  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2979  return MatchOperand_Success;
2980}
2981
2982// For register list parsing, we need to map from raw GPR register numbering
2983// to the enumeration values. The enumeration values aren't sorted by
2984// register number due to our using "sp", "lr" and "pc" as canonical names.
2985static unsigned getNextRegister(unsigned Reg) {
2986  // If this is a GPR, we need to do it manually, otherwise we can rely
2987  // on the sort ordering of the enumeration since the other reg-classes
2988  // are sane.
2989  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2990    return Reg + 1;
2991  switch(Reg) {
2992  default: llvm_unreachable("Invalid GPR number!");
2993  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2994  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2995  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2996  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2997  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2998  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2999  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
3000  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
3001  }
3002}
3003
3004// Return the low-subreg of a given Q register.
3005static unsigned getDRegFromQReg(unsigned QReg) {
3006  switch (QReg) {
3007  default: llvm_unreachable("expected a Q register!");
3008  case ARM::Q0:  return ARM::D0;
3009  case ARM::Q1:  return ARM::D2;
3010  case ARM::Q2:  return ARM::D4;
3011  case ARM::Q3:  return ARM::D6;
3012  case ARM::Q4:  return ARM::D8;
3013  case ARM::Q5:  return ARM::D10;
3014  case ARM::Q6:  return ARM::D12;
3015  case ARM::Q7:  return ARM::D14;
3016  case ARM::Q8:  return ARM::D16;
3017  case ARM::Q9:  return ARM::D18;
3018  case ARM::Q10: return ARM::D20;
3019  case ARM::Q11: return ARM::D22;
3020  case ARM::Q12: return ARM::D24;
3021  case ARM::Q13: return ARM::D26;
3022  case ARM::Q14: return ARM::D28;
3023  case ARM::Q15: return ARM::D30;
3024  }
3025}
3026
3027/// Parse a register list.
3028bool ARMAsmParser::
3029parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3030  assert(Parser.getTok().is(AsmToken::LCurly) &&
3031         "Token is not a Left Curly Brace");
3032  SMLoc S = Parser.getTok().getLoc();
3033  Parser.Lex(); // Eat '{' token.
3034  SMLoc RegLoc = Parser.getTok().getLoc();
3035
3036  // Check the first register in the list to see what register class
3037  // this is a list of.
3038  int Reg = tryParseRegister();
3039  if (Reg == -1)
3040    return Error(RegLoc, "register expected");
3041
3042  // The reglist instructions have at most 16 registers, so reserve
3043  // space for that many.
3044  int EReg = 0;
3045  SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
3046
3047  // Allow Q regs and just interpret them as the two D sub-registers.
3048  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3049    Reg = getDRegFromQReg(Reg);
3050    EReg = MRI->getEncodingValue(Reg);
3051    Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3052    ++Reg;
3053  }
3054  const MCRegisterClass *RC;
3055  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3056    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
3057  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
3058    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
3059  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
3060    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
3061  else
3062    return Error(RegLoc, "invalid register in register list");
3063
3064  // Store the register.
3065  EReg = MRI->getEncodingValue(Reg);
3066  Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3067
3068  // This starts immediately after the first register token in the list,
3069  // so we can see either a comma or a minus (range separator) as a legal
3070  // next token.
3071  while (Parser.getTok().is(AsmToken::Comma) ||
3072         Parser.getTok().is(AsmToken::Minus)) {
3073    if (Parser.getTok().is(AsmToken::Minus)) {
3074      Parser.Lex(); // Eat the minus.
3075      SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3076      int EndReg = tryParseRegister();
3077      if (EndReg == -1)
3078        return Error(AfterMinusLoc, "register expected");
3079      // Allow Q regs and just interpret them as the two D sub-registers.
3080      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3081        EndReg = getDRegFromQReg(EndReg) + 1;
3082      // If the register is the same as the start reg, there's nothing
3083      // more to do.
3084      if (Reg == EndReg)
3085        continue;
3086      // The register must be in the same register class as the first.
3087      if (!RC->contains(EndReg))
3088        return Error(AfterMinusLoc, "invalid register in register list");
3089      // Ranges must go from low to high.
3090      if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
3091        return Error(AfterMinusLoc, "bad range in register list");
3092
3093      // Add all the registers in the range to the register list.
3094      while (Reg != EndReg) {
3095        Reg = getNextRegister(Reg);
3096        EReg = MRI->getEncodingValue(Reg);
3097        Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3098      }
3099      continue;
3100    }
3101    Parser.Lex(); // Eat the comma.
3102    RegLoc = Parser.getTok().getLoc();
3103    int OldReg = Reg;
3104    const AsmToken RegTok = Parser.getTok();
3105    Reg = tryParseRegister();
3106    if (Reg == -1)
3107      return Error(RegLoc, "register expected");
3108    // Allow Q regs and just interpret them as the two D sub-registers.
3109    bool isQReg = false;
3110    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3111      Reg = getDRegFromQReg(Reg);
3112      isQReg = true;
3113    }
3114    // The register must be in the same register class as the first.
3115    if (!RC->contains(Reg))
3116      return Error(RegLoc, "invalid register in register list");
3117    // List must be monotonically increasing.
3118    if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
3119      if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3120        Warning(RegLoc, "register list not in ascending order");
3121      else
3122        return Error(RegLoc, "register list not in ascending order");
3123    }
3124    if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
3125      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
3126              ") in register list");
3127      continue;
3128    }
3129    // VFP register lists must also be contiguous.
3130    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
3131        Reg != OldReg + 1)
3132      return Error(RegLoc, "non-contiguous register range");
3133    EReg = MRI->getEncodingValue(Reg);
3134    Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3135    if (isQReg) {
3136      EReg = MRI->getEncodingValue(++Reg);
3137      Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3138    }
3139  }
3140
3141  if (Parser.getTok().isNot(AsmToken::RCurly))
3142    return Error(Parser.getTok().getLoc(), "'}' expected");
3143  SMLoc E = Parser.getTok().getEndLoc();
3144  Parser.Lex(); // Eat '}' token.
3145
3146  // Push the register list operand.
3147  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
3148
3149  // The ARM system instruction variants for LDM/STM have a '^' token here.
3150  if (Parser.getTok().is(AsmToken::Caret)) {
3151    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
3152    Parser.Lex(); // Eat '^' token.
3153  }
3154
3155  return false;
3156}
3157
3158// Helper function to parse the lane index for vector lists.
3159ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3160parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
3161  Index = 0; // Always return a defined index value.
3162  if (Parser.getTok().is(AsmToken::LBrac)) {
3163    Parser.Lex(); // Eat the '['.
3164    if (Parser.getTok().is(AsmToken::RBrac)) {
3165      // "Dn[]" is the 'all lanes' syntax.
3166      LaneKind = AllLanes;
3167      EndLoc = Parser.getTok().getEndLoc();
3168      Parser.Lex(); // Eat the ']'.
3169      return MatchOperand_Success;
3170    }
3171
3172    // There's an optional '#' token here. Normally there wouldn't be, but
3173    // inline assemble puts one in, and it's friendly to accept that.
3174    if (Parser.getTok().is(AsmToken::Hash))
3175      Parser.Lex(); // Eat '#' or '$'.
3176
3177    const MCExpr *LaneIndex;
3178    SMLoc Loc = Parser.getTok().getLoc();
3179    if (getParser().parseExpression(LaneIndex)) {
3180      Error(Loc, "illegal expression");
3181      return MatchOperand_ParseFail;
3182    }
3183    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3184    if (!CE) {
3185      Error(Loc, "lane index must be empty or an integer");
3186      return MatchOperand_ParseFail;
3187    }
3188    if (Parser.getTok().isNot(AsmToken::RBrac)) {
3189      Error(Parser.getTok().getLoc(), "']' expected");
3190      return MatchOperand_ParseFail;
3191    }
3192    EndLoc = Parser.getTok().getEndLoc();
3193    Parser.Lex(); // Eat the ']'.
3194    int64_t Val = CE->getValue();
3195
3196    // FIXME: Make this range check context sensitive for .8, .16, .32.
3197    if (Val < 0 || Val > 7) {
3198      Error(Parser.getTok().getLoc(), "lane index out of range");
3199      return MatchOperand_ParseFail;
3200    }
3201    Index = Val;
3202    LaneKind = IndexedLane;
3203    return MatchOperand_Success;
3204  }
3205  LaneKind = NoLanes;
3206  return MatchOperand_Success;
3207}
3208
3209// parse a vector register list
3210ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3211parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3212  VectorLaneTy LaneKind;
3213  unsigned LaneIndex;
3214  SMLoc S = Parser.getTok().getLoc();
3215  // As an extension (to match gas), support a plain D register or Q register
3216  // (without encosing curly braces) as a single or double entry list,
3217  // respectively.
3218  if (Parser.getTok().is(AsmToken::Identifier)) {
3219    SMLoc E = Parser.getTok().getEndLoc();
3220    int Reg = tryParseRegister();
3221    if (Reg == -1)
3222      return MatchOperand_NoMatch;
3223    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3224      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3225      if (Res != MatchOperand_Success)
3226        return Res;
3227      switch (LaneKind) {
3228      case NoLanes:
3229        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3230        break;
3231      case AllLanes:
3232        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3233                                                                S, E));
3234        break;
3235      case IndexedLane:
3236        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3237                                                               LaneIndex,
3238                                                               false, S, E));
3239        break;
3240      }
3241      return MatchOperand_Success;
3242    }
3243    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3244      Reg = getDRegFromQReg(Reg);
3245      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3246      if (Res != MatchOperand_Success)
3247        return Res;
3248      switch (LaneKind) {
3249      case NoLanes:
3250        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3251                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3252        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3253        break;
3254      case AllLanes:
3255        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3256                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3257        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3258                                                                S, E));
3259        break;
3260      case IndexedLane:
3261        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3262                                                               LaneIndex,
3263                                                               false, S, E));
3264        break;
3265      }
3266      return MatchOperand_Success;
3267    }
3268    Error(S, "vector register expected");
3269    return MatchOperand_ParseFail;
3270  }
3271
3272  if (Parser.getTok().isNot(AsmToken::LCurly))
3273    return MatchOperand_NoMatch;
3274
3275  Parser.Lex(); // Eat '{' token.
3276  SMLoc RegLoc = Parser.getTok().getLoc();
3277
3278  int Reg = tryParseRegister();
3279  if (Reg == -1) {
3280    Error(RegLoc, "register expected");
3281    return MatchOperand_ParseFail;
3282  }
3283  unsigned Count = 1;
3284  int Spacing = 0;
3285  unsigned FirstReg = Reg;
3286  // The list is of D registers, but we also allow Q regs and just interpret
3287  // them as the two D sub-registers.
3288  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3289    FirstReg = Reg = getDRegFromQReg(Reg);
3290    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3291                 // it's ambiguous with four-register single spaced.
3292    ++Reg;
3293    ++Count;
3294  }
3295
3296  SMLoc E;
3297  if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3298    return MatchOperand_ParseFail;
3299
3300  while (Parser.getTok().is(AsmToken::Comma) ||
3301         Parser.getTok().is(AsmToken::Minus)) {
3302    if (Parser.getTok().is(AsmToken::Minus)) {
3303      if (!Spacing)
3304        Spacing = 1; // Register range implies a single spaced list.
3305      else if (Spacing == 2) {
3306        Error(Parser.getTok().getLoc(),
3307              "sequential registers in double spaced list");
3308        return MatchOperand_ParseFail;
3309      }
3310      Parser.Lex(); // Eat the minus.
3311      SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3312      int EndReg = tryParseRegister();
3313      if (EndReg == -1) {
3314        Error(AfterMinusLoc, "register expected");
3315        return MatchOperand_ParseFail;
3316      }
3317      // Allow Q regs and just interpret them as the two D sub-registers.
3318      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3319        EndReg = getDRegFromQReg(EndReg) + 1;
3320      // If the register is the same as the start reg, there's nothing
3321      // more to do.
3322      if (Reg == EndReg)
3323        continue;
3324      // The register must be in the same register class as the first.
3325      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3326        Error(AfterMinusLoc, "invalid register in register list");
3327        return MatchOperand_ParseFail;
3328      }
3329      // Ranges must go from low to high.
3330      if (Reg > EndReg) {
3331        Error(AfterMinusLoc, "bad range in register list");
3332        return MatchOperand_ParseFail;
3333      }
3334      // Parse the lane specifier if present.
3335      VectorLaneTy NextLaneKind;
3336      unsigned NextLaneIndex;
3337      if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3338          MatchOperand_Success)
3339        return MatchOperand_ParseFail;
3340      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3341        Error(AfterMinusLoc, "mismatched lane index in register list");
3342        return MatchOperand_ParseFail;
3343      }
3344
3345      // Add all the registers in the range to the register list.
3346      Count += EndReg - Reg;
3347      Reg = EndReg;
3348      continue;
3349    }
3350    Parser.Lex(); // Eat the comma.
3351    RegLoc = Parser.getTok().getLoc();
3352    int OldReg = Reg;
3353    Reg = tryParseRegister();
3354    if (Reg == -1) {
3355      Error(RegLoc, "register expected");
3356      return MatchOperand_ParseFail;
3357    }
3358    // vector register lists must be contiguous.
3359    // It's OK to use the enumeration values directly here rather, as the
3360    // VFP register classes have the enum sorted properly.
3361    //
3362    // The list is of D registers, but we also allow Q regs and just interpret
3363    // them as the two D sub-registers.
3364    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3365      if (!Spacing)
3366        Spacing = 1; // Register range implies a single spaced list.
3367      else if (Spacing == 2) {
3368        Error(RegLoc,
3369              "invalid register in double-spaced list (must be 'D' register')");
3370        return MatchOperand_ParseFail;
3371      }
3372      Reg = getDRegFromQReg(Reg);
3373      if (Reg != OldReg + 1) {
3374        Error(RegLoc, "non-contiguous register range");
3375        return MatchOperand_ParseFail;
3376      }
3377      ++Reg;
3378      Count += 2;
3379      // Parse the lane specifier if present.
3380      VectorLaneTy NextLaneKind;
3381      unsigned NextLaneIndex;
3382      SMLoc LaneLoc = Parser.getTok().getLoc();
3383      if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3384          MatchOperand_Success)
3385        return MatchOperand_ParseFail;
3386      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3387        Error(LaneLoc, "mismatched lane index in register list");
3388        return MatchOperand_ParseFail;
3389      }
3390      continue;
3391    }
3392    // Normal D register.
3393    // Figure out the register spacing (single or double) of the list if
3394    // we don't know it already.
3395    if (!Spacing)
3396      Spacing = 1 + (Reg == OldReg + 2);
3397
3398    // Just check that it's contiguous and keep going.
3399    if (Reg != OldReg + Spacing) {
3400      Error(RegLoc, "non-contiguous register range");
3401      return MatchOperand_ParseFail;
3402    }
3403    ++Count;
3404    // Parse the lane specifier if present.
3405    VectorLaneTy NextLaneKind;
3406    unsigned NextLaneIndex;
3407    SMLoc EndLoc = Parser.getTok().getLoc();
3408    if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
3409      return MatchOperand_ParseFail;
3410    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3411      Error(EndLoc, "mismatched lane index in register list");
3412      return MatchOperand_ParseFail;
3413    }
3414  }
3415
3416  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3417    Error(Parser.getTok().getLoc(), "'}' expected");
3418    return MatchOperand_ParseFail;
3419  }
3420  E = Parser.getTok().getEndLoc();
3421  Parser.Lex(); // Eat '}' token.
3422
3423  switch (LaneKind) {
3424  case NoLanes:
3425    // Two-register operands have been converted to the
3426    // composite register classes.
3427    if (Count == 2) {
3428      const MCRegisterClass *RC = (Spacing == 1) ?
3429        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3430        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3431      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3432    }
3433
3434    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3435                                                    (Spacing == 2), S, E));
3436    break;
3437  case AllLanes:
3438    // Two-register operands have been converted to the
3439    // composite register classes.
3440    if (Count == 2) {
3441      const MCRegisterClass *RC = (Spacing == 1) ?
3442        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3443        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3444      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3445    }
3446    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3447                                                            (Spacing == 2),
3448                                                            S, E));
3449    break;
3450  case IndexedLane:
3451    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3452                                                           LaneIndex,
3453                                                           (Spacing == 2),
3454                                                           S, E));
3455    break;
3456  }
3457  return MatchOperand_Success;
3458}
3459
3460/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3461ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3462parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3463  SMLoc S = Parser.getTok().getLoc();
3464  const AsmToken &Tok = Parser.getTok();
3465  unsigned Opt;
3466
3467  if (Tok.is(AsmToken::Identifier)) {
3468    StringRef OptStr = Tok.getString();
3469
3470    Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
3471      .Case("sy",    ARM_MB::SY)
3472      .Case("st",    ARM_MB::ST)
3473      .Case("ld",    ARM_MB::LD)
3474      .Case("sh",    ARM_MB::ISH)
3475      .Case("ish",   ARM_MB::ISH)
3476      .Case("shst",  ARM_MB::ISHST)
3477      .Case("ishst", ARM_MB::ISHST)
3478      .Case("ishld", ARM_MB::ISHLD)
3479      .Case("nsh",   ARM_MB::NSH)
3480      .Case("un",    ARM_MB::NSH)
3481      .Case("nshst", ARM_MB::NSHST)
3482      .Case("nshld", ARM_MB::NSHLD)
3483      .Case("unst",  ARM_MB::NSHST)
3484      .Case("osh",   ARM_MB::OSH)
3485      .Case("oshst", ARM_MB::OSHST)
3486      .Case("oshld", ARM_MB::OSHLD)
3487      .Default(~0U);
3488
3489    // ishld, oshld, nshld and ld are only available from ARMv8.
3490    if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
3491                        Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
3492      Opt = ~0U;
3493
3494    if (Opt == ~0U)
3495      return MatchOperand_NoMatch;
3496
3497    Parser.Lex(); // Eat identifier token.
3498  } else if (Tok.is(AsmToken::Hash) ||
3499             Tok.is(AsmToken::Dollar) ||
3500             Tok.is(AsmToken::Integer)) {
3501    if (Parser.getTok().isNot(AsmToken::Integer))
3502      Parser.Lex(); // Eat '#' or '$'.
3503    SMLoc Loc = Parser.getTok().getLoc();
3504
3505    const MCExpr *MemBarrierID;
3506    if (getParser().parseExpression(MemBarrierID)) {
3507      Error(Loc, "illegal expression");
3508      return MatchOperand_ParseFail;
3509    }
3510
3511    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
3512    if (!CE) {
3513      Error(Loc, "constant expression expected");
3514      return MatchOperand_ParseFail;
3515    }
3516
3517    int Val = CE->getValue();
3518    if (Val & ~0xf) {
3519      Error(Loc, "immediate value out of range");
3520      return MatchOperand_ParseFail;
3521    }
3522
3523    Opt = ARM_MB::RESERVED_0 + Val;
3524  } else
3525    return MatchOperand_ParseFail;
3526
3527  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3528  return MatchOperand_Success;
3529}
3530
3531/// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
3532ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3533parseInstSyncBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3534  SMLoc S = Parser.getTok().getLoc();
3535  const AsmToken &Tok = Parser.getTok();
3536  unsigned Opt;
3537
3538  if (Tok.is(AsmToken::Identifier)) {
3539    StringRef OptStr = Tok.getString();
3540
3541    if (OptStr.lower() == "sy")
3542      Opt = ARM_ISB::SY;
3543    else
3544      return MatchOperand_NoMatch;
3545
3546    Parser.Lex(); // Eat identifier token.
3547  } else if (Tok.is(AsmToken::Hash) ||
3548             Tok.is(AsmToken::Dollar) ||
3549             Tok.is(AsmToken::Integer)) {
3550    if (Parser.getTok().isNot(AsmToken::Integer))
3551      Parser.Lex(); // Eat '#' or '$'.
3552    SMLoc Loc = Parser.getTok().getLoc();
3553
3554    const MCExpr *ISBarrierID;
3555    if (getParser().parseExpression(ISBarrierID)) {
3556      Error(Loc, "illegal expression");
3557      return MatchOperand_ParseFail;
3558    }
3559
3560    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
3561    if (!CE) {
3562      Error(Loc, "constant expression expected");
3563      return MatchOperand_ParseFail;
3564    }
3565
3566    int Val = CE->getValue();
3567    if (Val & ~0xf) {
3568      Error(Loc, "immediate value out of range");
3569      return MatchOperand_ParseFail;
3570    }
3571
3572    Opt = ARM_ISB::RESERVED_0 + Val;
3573  } else
3574    return MatchOperand_ParseFail;
3575
3576  Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
3577          (ARM_ISB::InstSyncBOpt)Opt, S));
3578  return MatchOperand_Success;
3579}
3580
3581
3582/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3583ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3584parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3585  SMLoc S = Parser.getTok().getLoc();
3586  const AsmToken &Tok = Parser.getTok();
3587  if (!Tok.is(AsmToken::Identifier))
3588    return MatchOperand_NoMatch;
3589  StringRef IFlagsStr = Tok.getString();
3590
3591  // An iflags string of "none" is interpreted to mean that none of the AIF
3592  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3593  unsigned IFlags = 0;
3594  if (IFlagsStr != "none") {
3595        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3596      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3597        .Case("a", ARM_PROC::A)
3598        .Case("i", ARM_PROC::I)
3599        .Case("f", ARM_PROC::F)
3600        .Default(~0U);
3601
3602      // If some specific iflag is already set, it means that some letter is
3603      // present more than once, this is not acceptable.
3604      if (Flag == ~0U || (IFlags & Flag))
3605        return MatchOperand_NoMatch;
3606
3607      IFlags |= Flag;
3608    }
3609  }
3610
3611  Parser.Lex(); // Eat identifier token.
3612  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3613  return MatchOperand_Success;
3614}
3615
3616/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3617ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3618parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3619  SMLoc S = Parser.getTok().getLoc();
3620  const AsmToken &Tok = Parser.getTok();
3621  if (!Tok.is(AsmToken::Identifier))
3622    return MatchOperand_NoMatch;
3623  StringRef Mask = Tok.getString();
3624
3625  if (isMClass()) {
3626    // See ARMv6-M 10.1.1
3627    std::string Name = Mask.lower();
3628    unsigned FlagsVal = StringSwitch<unsigned>(Name)
3629      // Note: in the documentation:
3630      //  ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
3631      //  for MSR APSR_nzcvq.
3632      // but we do make it an alias here.  This is so to get the "mask encoding"
3633      // bits correct on MSR APSR writes.
3634      //
3635      // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
3636      // should really only be allowed when writing a special register.  Note
3637      // they get dropped in the MRS instruction reading a special register as
3638      // the SYSm field is only 8 bits.
3639      //
3640      // FIXME: the _g and _nzcvqg versions are only allowed if the processor
3641      // includes the DSP extension but that is not checked.
3642      .Case("apsr", 0x800)
3643      .Case("apsr_nzcvq", 0x800)
3644      .Case("apsr_g", 0x400)
3645      .Case("apsr_nzcvqg", 0xc00)
3646      .Case("iapsr", 0x801)
3647      .Case("iapsr_nzcvq", 0x801)
3648      .Case("iapsr_g", 0x401)
3649      .Case("iapsr_nzcvqg", 0xc01)
3650      .Case("eapsr", 0x802)
3651      .Case("eapsr_nzcvq", 0x802)
3652      .Case("eapsr_g", 0x402)
3653      .Case("eapsr_nzcvqg", 0xc02)
3654      .Case("xpsr", 0x803)
3655      .Case("xpsr_nzcvq", 0x803)
3656      .Case("xpsr_g", 0x403)
3657      .Case("xpsr_nzcvqg", 0xc03)
3658      .Case("ipsr", 0x805)
3659      .Case("epsr", 0x806)
3660      .Case("iepsr", 0x807)
3661      .Case("msp", 0x808)
3662      .Case("psp", 0x809)
3663      .Case("primask", 0x810)
3664      .Case("basepri", 0x811)
3665      .Case("basepri_max", 0x812)
3666      .Case("faultmask", 0x813)
3667      .Case("control", 0x814)
3668      .Default(~0U);
3669
3670    if (FlagsVal == ~0U)
3671      return MatchOperand_NoMatch;
3672
3673    if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813)
3674      // basepri, basepri_max and faultmask only valid for V7m.
3675      return MatchOperand_NoMatch;
3676
3677    Parser.Lex(); // Eat identifier token.
3678    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3679    return MatchOperand_Success;
3680  }
3681
3682  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3683  size_t Start = 0, Next = Mask.find('_');
3684  StringRef Flags = "";
3685  std::string SpecReg = Mask.slice(Start, Next).lower();
3686  if (Next != StringRef::npos)
3687    Flags = Mask.slice(Next+1, Mask.size());
3688
3689  // FlagsVal contains the complete mask:
3690  // 3-0: Mask
3691  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3692  unsigned FlagsVal = 0;
3693
3694  if (SpecReg == "apsr") {
3695    FlagsVal = StringSwitch<unsigned>(Flags)
3696    .Case("nzcvq",  0x8) // same as CPSR_f
3697    .Case("g",      0x4) // same as CPSR_s
3698    .Case("nzcvqg", 0xc) // same as CPSR_fs
3699    .Default(~0U);
3700
3701    if (FlagsVal == ~0U) {
3702      if (!Flags.empty())
3703        return MatchOperand_NoMatch;
3704      else
3705        FlagsVal = 8; // No flag
3706    }
3707  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3708    // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
3709    if (Flags == "all" || Flags == "")
3710      Flags = "fc";
3711    for (int i = 0, e = Flags.size(); i != e; ++i) {
3712      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3713      .Case("c", 1)
3714      .Case("x", 2)
3715      .Case("s", 4)
3716      .Case("f", 8)
3717      .Default(~0U);
3718
3719      // If some specific flag is already set, it means that some letter is
3720      // present more than once, this is not acceptable.
3721      if (FlagsVal == ~0U || (FlagsVal & Flag))
3722        return MatchOperand_NoMatch;
3723      FlagsVal |= Flag;
3724    }
3725  } else // No match for special register.
3726    return MatchOperand_NoMatch;
3727
3728  // Special register without flags is NOT equivalent to "fc" flags.
3729  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3730  // two lines would enable gas compatibility at the expense of breaking
3731  // round-tripping.
3732  //
3733  // if (!FlagsVal)
3734  //  FlagsVal = 0x9;
3735
3736  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3737  if (SpecReg == "spsr")
3738    FlagsVal |= 16;
3739
3740  Parser.Lex(); // Eat identifier token.
3741  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3742  return MatchOperand_Success;
3743}
3744
3745ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3746parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3747            int Low, int High) {
3748  const AsmToken &Tok = Parser.getTok();
3749  if (Tok.isNot(AsmToken::Identifier)) {
3750    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3751    return MatchOperand_ParseFail;
3752  }
3753  StringRef ShiftName = Tok.getString();
3754  std::string LowerOp = Op.lower();
3755  std::string UpperOp = Op.upper();
3756  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3757    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3758    return MatchOperand_ParseFail;
3759  }
3760  Parser.Lex(); // Eat shift type token.
3761
3762  // There must be a '#' and a shift amount.
3763  if (Parser.getTok().isNot(AsmToken::Hash) &&
3764      Parser.getTok().isNot(AsmToken::Dollar)) {
3765    Error(Parser.getTok().getLoc(), "'#' expected");
3766    return MatchOperand_ParseFail;
3767  }
3768  Parser.Lex(); // Eat hash token.
3769
3770  const MCExpr *ShiftAmount;
3771  SMLoc Loc = Parser.getTok().getLoc();
3772  SMLoc EndLoc;
3773  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
3774    Error(Loc, "illegal expression");
3775    return MatchOperand_ParseFail;
3776  }
3777  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3778  if (!CE) {
3779    Error(Loc, "constant expression expected");
3780    return MatchOperand_ParseFail;
3781  }
3782  int Val = CE->getValue();
3783  if (Val < Low || Val > High) {
3784    Error(Loc, "immediate value out of range");
3785    return MatchOperand_ParseFail;
3786  }
3787
3788  Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
3789
3790  return MatchOperand_Success;
3791}
3792
3793ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3794parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3795  const AsmToken &Tok = Parser.getTok();
3796  SMLoc S = Tok.getLoc();
3797  if (Tok.isNot(AsmToken::Identifier)) {
3798    Error(S, "'be' or 'le' operand expected");
3799    return MatchOperand_ParseFail;
3800  }
3801  int Val = StringSwitch<int>(Tok.getString().lower())
3802    .Case("be", 1)
3803    .Case("le", 0)
3804    .Default(-1);
3805  Parser.Lex(); // Eat the token.
3806
3807  if (Val == -1) {
3808    Error(S, "'be' or 'le' operand expected");
3809    return MatchOperand_ParseFail;
3810  }
3811  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3812                                                                  getContext()),
3813                                           S, Tok.getEndLoc()));
3814  return MatchOperand_Success;
3815}
3816
3817/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3818/// instructions. Legal values are:
3819///     lsl #n  'n' in [0,31]
3820///     asr #n  'n' in [1,32]
3821///             n == 32 encoded as n == 0.
3822ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3823parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3824  const AsmToken &Tok = Parser.getTok();
3825  SMLoc S = Tok.getLoc();
3826  if (Tok.isNot(AsmToken::Identifier)) {
3827    Error(S, "shift operator 'asr' or 'lsl' expected");
3828    return MatchOperand_ParseFail;
3829  }
3830  StringRef ShiftName = Tok.getString();
3831  bool isASR;
3832  if (ShiftName == "lsl" || ShiftName == "LSL")
3833    isASR = false;
3834  else if (ShiftName == "asr" || ShiftName == "ASR")
3835    isASR = true;
3836  else {
3837    Error(S, "shift operator 'asr' or 'lsl' expected");
3838    return MatchOperand_ParseFail;
3839  }
3840  Parser.Lex(); // Eat the operator.
3841
3842  // A '#' and a shift amount.
3843  if (Parser.getTok().isNot(AsmToken::Hash) &&
3844      Parser.getTok().isNot(AsmToken::Dollar)) {
3845    Error(Parser.getTok().getLoc(), "'#' expected");
3846    return MatchOperand_ParseFail;
3847  }
3848  Parser.Lex(); // Eat hash token.
3849  SMLoc ExLoc = Parser.getTok().getLoc();
3850
3851  const MCExpr *ShiftAmount;
3852  SMLoc EndLoc;
3853  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
3854    Error(ExLoc, "malformed shift expression");
3855    return MatchOperand_ParseFail;
3856  }
3857  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3858  if (!CE) {
3859    Error(ExLoc, "shift amount must be an immediate");
3860    return MatchOperand_ParseFail;
3861  }
3862
3863  int64_t Val = CE->getValue();
3864  if (isASR) {
3865    // Shift amount must be in [1,32]
3866    if (Val < 1 || Val > 32) {
3867      Error(ExLoc, "'asr' shift amount must be in range [1,32]");
3868      return MatchOperand_ParseFail;
3869    }
3870    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3871    if (isThumb() && Val == 32) {
3872      Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
3873      return MatchOperand_ParseFail;
3874    }
3875    if (Val == 32) Val = 0;
3876  } else {
3877    // Shift amount must be in [1,32]
3878    if (Val < 0 || Val > 31) {
3879      Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
3880      return MatchOperand_ParseFail;
3881    }
3882  }
3883
3884  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
3885
3886  return MatchOperand_Success;
3887}
3888
3889/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3890/// of instructions. Legal values are:
3891///     ror #n  'n' in {0, 8, 16, 24}
3892ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3893parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3894  const AsmToken &Tok = Parser.getTok();
3895  SMLoc S = Tok.getLoc();
3896  if (Tok.isNot(AsmToken::Identifier))
3897    return MatchOperand_NoMatch;
3898  StringRef ShiftName = Tok.getString();
3899  if (ShiftName != "ror" && ShiftName != "ROR")
3900    return MatchOperand_NoMatch;
3901  Parser.Lex(); // Eat the operator.
3902
3903  // A '#' and a rotate amount.
3904  if (Parser.getTok().isNot(AsmToken::Hash) &&
3905      Parser.getTok().isNot(AsmToken::Dollar)) {
3906    Error(Parser.getTok().getLoc(), "'#' expected");
3907    return MatchOperand_ParseFail;
3908  }
3909  Parser.Lex(); // Eat hash token.
3910  SMLoc ExLoc = Parser.getTok().getLoc();
3911
3912  const MCExpr *ShiftAmount;
3913  SMLoc EndLoc;
3914  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
3915    Error(ExLoc, "malformed rotate expression");
3916    return MatchOperand_ParseFail;
3917  }
3918  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3919  if (!CE) {
3920    Error(ExLoc, "rotate amount must be an immediate");
3921    return MatchOperand_ParseFail;
3922  }
3923
3924  int64_t Val = CE->getValue();
3925  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3926  // normally, zero is represented in asm by omitting the rotate operand
3927  // entirely.
3928  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3929    Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
3930    return MatchOperand_ParseFail;
3931  }
3932
3933  Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
3934
3935  return MatchOperand_Success;
3936}
3937
3938ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3939parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3940  SMLoc S = Parser.getTok().getLoc();
3941  // The bitfield descriptor is really two operands, the LSB and the width.
3942  if (Parser.getTok().isNot(AsmToken::Hash) &&
3943      Parser.getTok().isNot(AsmToken::Dollar)) {
3944    Error(Parser.getTok().getLoc(), "'#' expected");
3945    return MatchOperand_ParseFail;
3946  }
3947  Parser.Lex(); // Eat hash token.
3948
3949  const MCExpr *LSBExpr;
3950  SMLoc E = Parser.getTok().getLoc();
3951  if (getParser().parseExpression(LSBExpr)) {
3952    Error(E, "malformed immediate expression");
3953    return MatchOperand_ParseFail;
3954  }
3955  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3956  if (!CE) {
3957    Error(E, "'lsb' operand must be an immediate");
3958    return MatchOperand_ParseFail;
3959  }
3960
3961  int64_t LSB = CE->getValue();
3962  // The LSB must be in the range [0,31]
3963  if (LSB < 0 || LSB > 31) {
3964    Error(E, "'lsb' operand must be in the range [0,31]");
3965    return MatchOperand_ParseFail;
3966  }
3967  E = Parser.getTok().getLoc();
3968
3969  // Expect another immediate operand.
3970  if (Parser.getTok().isNot(AsmToken::Comma)) {
3971    Error(Parser.getTok().getLoc(), "too few operands");
3972    return MatchOperand_ParseFail;
3973  }
3974  Parser.Lex(); // Eat hash token.
3975  if (Parser.getTok().isNot(AsmToken::Hash) &&
3976      Parser.getTok().isNot(AsmToken::Dollar)) {
3977    Error(Parser.getTok().getLoc(), "'#' expected");
3978    return MatchOperand_ParseFail;
3979  }
3980  Parser.Lex(); // Eat hash token.
3981
3982  const MCExpr *WidthExpr;
3983  SMLoc EndLoc;
3984  if (getParser().parseExpression(WidthExpr, EndLoc)) {
3985    Error(E, "malformed immediate expression");
3986    return MatchOperand_ParseFail;
3987  }
3988  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3989  if (!CE) {
3990    Error(E, "'width' operand must be an immediate");
3991    return MatchOperand_ParseFail;
3992  }
3993
3994  int64_t Width = CE->getValue();
3995  // The LSB must be in the range [1,32-lsb]
3996  if (Width < 1 || Width > 32 - LSB) {
3997    Error(E, "'width' operand must be in the range [1,32-lsb]");
3998    return MatchOperand_ParseFail;
3999  }
4000
4001  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
4002
4003  return MatchOperand_Success;
4004}
4005
4006ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4007parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4008  // Check for a post-index addressing register operand. Specifically:
4009  // postidx_reg := '+' register {, shift}
4010  //              | '-' register {, shift}
4011  //              | register {, shift}
4012
4013  // This method must return MatchOperand_NoMatch without consuming any tokens
4014  // in the case where there is no match, as other alternatives take other
4015  // parse methods.
4016  AsmToken Tok = Parser.getTok();
4017  SMLoc S = Tok.getLoc();
4018  bool haveEaten = false;
4019  bool isAdd = true;
4020  if (Tok.is(AsmToken::Plus)) {
4021    Parser.Lex(); // Eat the '+' token.
4022    haveEaten = true;
4023  } else if (Tok.is(AsmToken::Minus)) {
4024    Parser.Lex(); // Eat the '-' token.
4025    isAdd = false;
4026    haveEaten = true;
4027  }
4028
4029  SMLoc E = Parser.getTok().getEndLoc();
4030  int Reg = tryParseRegister();
4031  if (Reg == -1) {
4032    if (!haveEaten)
4033      return MatchOperand_NoMatch;
4034    Error(Parser.getTok().getLoc(), "register expected");
4035    return MatchOperand_ParseFail;
4036  }
4037
4038  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
4039  unsigned ShiftImm = 0;
4040  if (Parser.getTok().is(AsmToken::Comma)) {
4041    Parser.Lex(); // Eat the ','.
4042    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
4043      return MatchOperand_ParseFail;
4044
4045    // FIXME: Only approximates end...may include intervening whitespace.
4046    E = Parser.getTok().getLoc();
4047  }
4048
4049  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
4050                                                  ShiftImm, S, E));
4051
4052  return MatchOperand_Success;
4053}
4054
4055ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4056parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4057  // Check for a post-index addressing register operand. Specifically:
4058  // am3offset := '+' register
4059  //              | '-' register
4060  //              | register
4061  //              | # imm
4062  //              | # + imm
4063  //              | # - imm
4064
4065  // This method must return MatchOperand_NoMatch without consuming any tokens
4066  // in the case where there is no match, as other alternatives take other
4067  // parse methods.
4068  AsmToken Tok = Parser.getTok();
4069  SMLoc S = Tok.getLoc();
4070
4071  // Do immediates first, as we always parse those if we have a '#'.
4072  if (Parser.getTok().is(AsmToken::Hash) ||
4073      Parser.getTok().is(AsmToken::Dollar)) {
4074    Parser.Lex(); // Eat '#' or '$'.
4075    // Explicitly look for a '-', as we need to encode negative zero
4076    // differently.
4077    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4078    const MCExpr *Offset;
4079    SMLoc E;
4080    if (getParser().parseExpression(Offset, E))
4081      return MatchOperand_ParseFail;
4082    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4083    if (!CE) {
4084      Error(S, "constant expression expected");
4085      return MatchOperand_ParseFail;
4086    }
4087    // Negative zero is encoded as the flag value INT32_MIN.
4088    int32_t Val = CE->getValue();
4089    if (isNegative && Val == 0)
4090      Val = INT32_MIN;
4091
4092    Operands.push_back(
4093      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
4094
4095    return MatchOperand_Success;
4096  }
4097
4098
4099  bool haveEaten = false;
4100  bool isAdd = true;
4101  if (Tok.is(AsmToken::Plus)) {
4102    Parser.Lex(); // Eat the '+' token.
4103    haveEaten = true;
4104  } else if (Tok.is(AsmToken::Minus)) {
4105    Parser.Lex(); // Eat the '-' token.
4106    isAdd = false;
4107    haveEaten = true;
4108  }
4109
4110  Tok = Parser.getTok();
4111  int Reg = tryParseRegister();
4112  if (Reg == -1) {
4113    if (!haveEaten)
4114      return MatchOperand_NoMatch;
4115    Error(Tok.getLoc(), "register expected");
4116    return MatchOperand_ParseFail;
4117  }
4118
4119  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
4120                                                  0, S, Tok.getEndLoc()));
4121
4122  return MatchOperand_Success;
4123}
4124
4125/// Convert parsed operands to MCInst.  Needed here because this instruction
4126/// only has two register operands, but multiplication is commutative so
4127/// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
4128void ARMAsmParser::
4129cvtThumbMultiply(MCInst &Inst,
4130           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4131  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4132  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4133  // If we have a three-operand form, make sure to set Rn to be the operand
4134  // that isn't the same as Rd.
4135  unsigned RegOp = 4;
4136  if (Operands.size() == 6 &&
4137      ((ARMOperand*)Operands[4])->getReg() ==
4138        ((ARMOperand*)Operands[3])->getReg())
4139    RegOp = 5;
4140  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4141  Inst.addOperand(Inst.getOperand(0));
4142  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4143}
4144
4145void ARMAsmParser::
4146cvtThumbBranches(MCInst &Inst,
4147           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4148  int CondOp = -1, ImmOp = -1;
4149  switch(Inst.getOpcode()) {
4150    case ARM::tB:
4151    case ARM::tBcc:  CondOp = 1; ImmOp = 2; break;
4152
4153    case ARM::t2B:
4154    case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
4155
4156    default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
4157  }
4158  // first decide whether or not the branch should be conditional
4159  // by looking at it's location relative to an IT block
4160  if(inITBlock()) {
4161    // inside an IT block we cannot have any conditional branches. any
4162    // such instructions needs to be converted to unconditional form
4163    switch(Inst.getOpcode()) {
4164      case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
4165      case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
4166    }
4167  } else {
4168    // outside IT blocks we can only have unconditional branches with AL
4169    // condition code or conditional branches with non-AL condition code
4170    unsigned Cond = static_cast<ARMOperand*>(Operands[CondOp])->getCondCode();
4171    switch(Inst.getOpcode()) {
4172      case ARM::tB:
4173      case ARM::tBcc:
4174        Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
4175        break;
4176      case ARM::t2B:
4177      case ARM::t2Bcc:
4178        Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
4179        break;
4180    }
4181  }
4182
4183  // now decide on encoding size based on branch target range
4184  switch(Inst.getOpcode()) {
4185    // classify tB as either t2B or t1B based on range of immediate operand
4186    case ARM::tB: {
4187      ARMOperand* op = static_cast<ARMOperand*>(Operands[ImmOp]);
4188      if(!op->isSignedOffset<11, 1>() && isThumbTwo())
4189        Inst.setOpcode(ARM::t2B);
4190      break;
4191    }
4192    // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
4193    case ARM::tBcc: {
4194      ARMOperand* op = static_cast<ARMOperand*>(Operands[ImmOp]);
4195      if(!op->isSignedOffset<8, 1>() && isThumbTwo())
4196        Inst.setOpcode(ARM::t2Bcc);
4197      break;
4198    }
4199  }
4200  ((ARMOperand*)Operands[ImmOp])->addImmOperands(Inst, 1);
4201  ((ARMOperand*)Operands[CondOp])->addCondCodeOperands(Inst, 2);
4202}
4203
4204/// Parse an ARM memory expression, return false if successful else return true
4205/// or an error.  The first token must be a '[' when called.
4206bool ARMAsmParser::
4207parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4208  SMLoc S, E;
4209  assert(Parser.getTok().is(AsmToken::LBrac) &&
4210         "Token is not a Left Bracket");
4211  S = Parser.getTok().getLoc();
4212  Parser.Lex(); // Eat left bracket token.
4213
4214  const AsmToken &BaseRegTok = Parser.getTok();
4215  int BaseRegNum = tryParseRegister();
4216  if (BaseRegNum == -1)
4217    return Error(BaseRegTok.getLoc(), "register expected");
4218
4219  // The next token must either be a comma, a colon or a closing bracket.
4220  const AsmToken &Tok = Parser.getTok();
4221  if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
4222      !Tok.is(AsmToken::RBrac))
4223    return Error(Tok.getLoc(), "malformed memory operand");
4224
4225  if (Tok.is(AsmToken::RBrac)) {
4226    E = Tok.getEndLoc();
4227    Parser.Lex(); // Eat right bracket token.
4228
4229    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4230                                             0, 0, false, S, E));
4231
4232    // If there's a pre-indexing writeback marker, '!', just add it as a token
4233    // operand. It's rather odd, but syntactically valid.
4234    if (Parser.getTok().is(AsmToken::Exclaim)) {
4235      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4236      Parser.Lex(); // Eat the '!'.
4237    }
4238
4239    return false;
4240  }
4241
4242  assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
4243         "Lost colon or comma in memory operand?!");
4244  if (Tok.is(AsmToken::Comma)) {
4245    Parser.Lex(); // Eat the comma.
4246  }
4247
4248  // If we have a ':', it's an alignment specifier.
4249  if (Parser.getTok().is(AsmToken::Colon)) {
4250    Parser.Lex(); // Eat the ':'.
4251    E = Parser.getTok().getLoc();
4252
4253    const MCExpr *Expr;
4254    if (getParser().parseExpression(Expr))
4255     return true;
4256
4257    // The expression has to be a constant. Memory references with relocations
4258    // don't come through here, as they use the <label> forms of the relevant
4259    // instructions.
4260    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4261    if (!CE)
4262      return Error (E, "constant expression expected");
4263
4264    unsigned Align = 0;
4265    switch (CE->getValue()) {
4266    default:
4267      return Error(E,
4268                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4269    case 16:  Align = 2; break;
4270    case 32:  Align = 4; break;
4271    case 64:  Align = 8; break;
4272    case 128: Align = 16; break;
4273    case 256: Align = 32; break;
4274    }
4275
4276    // Now we should have the closing ']'
4277    if (Parser.getTok().isNot(AsmToken::RBrac))
4278      return Error(Parser.getTok().getLoc(), "']' expected");
4279    E = Parser.getTok().getEndLoc();
4280    Parser.Lex(); // Eat right bracket token.
4281
4282    // Don't worry about range checking the value here. That's handled by
4283    // the is*() predicates.
4284    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4285                                             ARM_AM::no_shift, 0, Align,
4286                                             false, S, E));
4287
4288    // If there's a pre-indexing writeback marker, '!', just add it as a token
4289    // operand.
4290    if (Parser.getTok().is(AsmToken::Exclaim)) {
4291      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4292      Parser.Lex(); // Eat the '!'.
4293    }
4294
4295    return false;
4296  }
4297
4298  // If we have a '#', it's an immediate offset, else assume it's a register
4299  // offset. Be friendly and also accept a plain integer (without a leading
4300  // hash) for gas compatibility.
4301  if (Parser.getTok().is(AsmToken::Hash) ||
4302      Parser.getTok().is(AsmToken::Dollar) ||
4303      Parser.getTok().is(AsmToken::Integer)) {
4304    if (Parser.getTok().isNot(AsmToken::Integer))
4305      Parser.Lex(); // Eat '#' or '$'.
4306    E = Parser.getTok().getLoc();
4307
4308    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4309    const MCExpr *Offset;
4310    if (getParser().parseExpression(Offset))
4311     return true;
4312
4313    // The expression has to be a constant. Memory references with relocations
4314    // don't come through here, as they use the <label> forms of the relevant
4315    // instructions.
4316    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4317    if (!CE)
4318      return Error (E, "constant expression expected");
4319
4320    // If the constant was #-0, represent it as INT32_MIN.
4321    int32_t Val = CE->getValue();
4322    if (isNegative && Val == 0)
4323      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4324
4325    // Now we should have the closing ']'
4326    if (Parser.getTok().isNot(AsmToken::RBrac))
4327      return Error(Parser.getTok().getLoc(), "']' expected");
4328    E = Parser.getTok().getEndLoc();
4329    Parser.Lex(); // Eat right bracket token.
4330
4331    // Don't worry about range checking the value here. That's handled by
4332    // the is*() predicates.
4333    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4334                                             ARM_AM::no_shift, 0, 0,
4335                                             false, S, E));
4336
4337    // If there's a pre-indexing writeback marker, '!', just add it as a token
4338    // operand.
4339    if (Parser.getTok().is(AsmToken::Exclaim)) {
4340      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4341      Parser.Lex(); // Eat the '!'.
4342    }
4343
4344    return false;
4345  }
4346
4347  // The register offset is optionally preceded by a '+' or '-'
4348  bool isNegative = false;
4349  if (Parser.getTok().is(AsmToken::Minus)) {
4350    isNegative = true;
4351    Parser.Lex(); // Eat the '-'.
4352  } else if (Parser.getTok().is(AsmToken::Plus)) {
4353    // Nothing to do.
4354    Parser.Lex(); // Eat the '+'.
4355  }
4356
4357  E = Parser.getTok().getLoc();
4358  int OffsetRegNum = tryParseRegister();
4359  if (OffsetRegNum == -1)
4360    return Error(E, "register expected");
4361
4362  // If there's a shift operator, handle it.
4363  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4364  unsigned ShiftImm = 0;
4365  if (Parser.getTok().is(AsmToken::Comma)) {
4366    Parser.Lex(); // Eat the ','.
4367    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4368      return true;
4369  }
4370
4371  // Now we should have the closing ']'
4372  if (Parser.getTok().isNot(AsmToken::RBrac))
4373    return Error(Parser.getTok().getLoc(), "']' expected");
4374  E = Parser.getTok().getEndLoc();
4375  Parser.Lex(); // Eat right bracket token.
4376
4377  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4378                                           ShiftType, ShiftImm, 0, isNegative,
4379                                           S, E));
4380
4381  // If there's a pre-indexing writeback marker, '!', just add it as a token
4382  // operand.
4383  if (Parser.getTok().is(AsmToken::Exclaim)) {
4384    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4385    Parser.Lex(); // Eat the '!'.
4386  }
4387
4388  return false;
4389}
4390
4391/// parseMemRegOffsetShift - one of these two:
4392///   ( lsl | lsr | asr | ror ) , # shift_amount
4393///   rrx
4394/// return true if it parses a shift otherwise it returns false.
4395bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4396                                          unsigned &Amount) {
4397  SMLoc Loc = Parser.getTok().getLoc();
4398  const AsmToken &Tok = Parser.getTok();
4399  if (Tok.isNot(AsmToken::Identifier))
4400    return true;
4401  StringRef ShiftName = Tok.getString();
4402  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4403      ShiftName == "asl" || ShiftName == "ASL")
4404    St = ARM_AM::lsl;
4405  else if (ShiftName == "lsr" || ShiftName == "LSR")
4406    St = ARM_AM::lsr;
4407  else if (ShiftName == "asr" || ShiftName == "ASR")
4408    St = ARM_AM::asr;
4409  else if (ShiftName == "ror" || ShiftName == "ROR")
4410    St = ARM_AM::ror;
4411  else if (ShiftName == "rrx" || ShiftName == "RRX")
4412    St = ARM_AM::rrx;
4413  else
4414    return Error(Loc, "illegal shift operator");
4415  Parser.Lex(); // Eat shift type token.
4416
4417  // rrx stands alone.
4418  Amount = 0;
4419  if (St != ARM_AM::rrx) {
4420    Loc = Parser.getTok().getLoc();
4421    // A '#' and a shift amount.
4422    const AsmToken &HashTok = Parser.getTok();
4423    if (HashTok.isNot(AsmToken::Hash) &&
4424        HashTok.isNot(AsmToken::Dollar))
4425      return Error(HashTok.getLoc(), "'#' expected");
4426    Parser.Lex(); // Eat hash token.
4427
4428    const MCExpr *Expr;
4429    if (getParser().parseExpression(Expr))
4430      return true;
4431    // Range check the immediate.
4432    // lsl, ror: 0 <= imm <= 31
4433    // lsr, asr: 0 <= imm <= 32
4434    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4435    if (!CE)
4436      return Error(Loc, "shift amount must be an immediate");
4437    int64_t Imm = CE->getValue();
4438    if (Imm < 0 ||
4439        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4440        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4441      return Error(Loc, "immediate shift value out of range");
4442    // If <ShiftTy> #0, turn it into a no_shift.
4443    if (Imm == 0)
4444      St = ARM_AM::lsl;
4445    // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
4446    if (Imm == 32)
4447      Imm = 0;
4448    Amount = Imm;
4449  }
4450
4451  return false;
4452}
4453
4454/// parseFPImm - A floating point immediate expression operand.
4455ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4456parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4457  // Anything that can accept a floating point constant as an operand
4458  // needs to go through here, as the regular parseExpression is
4459  // integer only.
4460  //
4461  // This routine still creates a generic Immediate operand, containing
4462  // a bitcast of the 64-bit floating point value. The various operands
4463  // that accept floats can check whether the value is valid for them
4464  // via the standard is*() predicates.
4465
4466  SMLoc S = Parser.getTok().getLoc();
4467
4468  if (Parser.getTok().isNot(AsmToken::Hash) &&
4469      Parser.getTok().isNot(AsmToken::Dollar))
4470    return MatchOperand_NoMatch;
4471
4472  // Disambiguate the VMOV forms that can accept an FP immediate.
4473  // vmov.f32 <sreg>, #imm
4474  // vmov.f64 <dreg>, #imm
4475  // vmov.f32 <dreg>, #imm  @ vector f32x2
4476  // vmov.f32 <qreg>, #imm  @ vector f32x4
4477  //
4478  // There are also the NEON VMOV instructions which expect an
4479  // integer constant. Make sure we don't try to parse an FPImm
4480  // for these:
4481  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4482  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4483  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4484                           TyOp->getToken() != ".f64"))
4485    return MatchOperand_NoMatch;
4486
4487  Parser.Lex(); // Eat '#' or '$'.
4488
4489  // Handle negation, as that still comes through as a separate token.
4490  bool isNegative = false;
4491  if (Parser.getTok().is(AsmToken::Minus)) {
4492    isNegative = true;
4493    Parser.Lex();
4494  }
4495  const AsmToken &Tok = Parser.getTok();
4496  SMLoc Loc = Tok.getLoc();
4497  if (Tok.is(AsmToken::Real)) {
4498    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4499    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4500    // If we had a '-' in front, toggle the sign bit.
4501    IntVal ^= (uint64_t)isNegative << 31;
4502    Parser.Lex(); // Eat the token.
4503    Operands.push_back(ARMOperand::CreateImm(
4504          MCConstantExpr::Create(IntVal, getContext()),
4505          S, Parser.getTok().getLoc()));
4506    return MatchOperand_Success;
4507  }
4508  // Also handle plain integers. Instructions which allow floating point
4509  // immediates also allow a raw encoded 8-bit value.
4510  if (Tok.is(AsmToken::Integer)) {
4511    int64_t Val = Tok.getIntVal();
4512    Parser.Lex(); // Eat the token.
4513    if (Val > 255 || Val < 0) {
4514      Error(Loc, "encoded floating point value out of range");
4515      return MatchOperand_ParseFail;
4516    }
4517    double RealVal = ARM_AM::getFPImmFloat(Val);
4518    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4519    Operands.push_back(ARMOperand::CreateImm(
4520        MCConstantExpr::Create(Val, getContext()), S,
4521        Parser.getTok().getLoc()));
4522    return MatchOperand_Success;
4523  }
4524
4525  Error(Loc, "invalid floating point immediate");
4526  return MatchOperand_ParseFail;
4527}
4528
4529/// Parse a arm instruction operand.  For now this parses the operand regardless
4530/// of the mnemonic.
4531bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4532                                StringRef Mnemonic) {
4533  SMLoc S, E;
4534
4535  // Check if the current operand has a custom associated parser, if so, try to
4536  // custom parse the operand, or fallback to the general approach.
4537  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4538  if (ResTy == MatchOperand_Success)
4539    return false;
4540  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4541  // there was a match, but an error occurred, in which case, just return that
4542  // the operand parsing failed.
4543  if (ResTy == MatchOperand_ParseFail)
4544    return true;
4545
4546  switch (getLexer().getKind()) {
4547  default:
4548    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4549    return true;
4550  case AsmToken::Identifier: {
4551    // If we've seen a branch mnemonic, the next operand must be a label.  This
4552    // is true even if the label is a register name.  So "br r1" means branch to
4553    // label "r1".
4554    bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
4555    if (!ExpectLabel) {
4556      if (!tryParseRegisterWithWriteBack(Operands))
4557        return false;
4558      int Res = tryParseShiftRegister(Operands);
4559      if (Res == 0) // success
4560        return false;
4561      else if (Res == -1) // irrecoverable error
4562        return true;
4563      // If this is VMRS, check for the apsr_nzcv operand.
4564      if (Mnemonic == "vmrs" &&
4565          Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4566        S = Parser.getTok().getLoc();
4567        Parser.Lex();
4568        Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4569        return false;
4570      }
4571    }
4572
4573    // Fall though for the Identifier case that is not a register or a
4574    // special name.
4575  }
4576  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4577  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4578  case AsmToken::String:  // quoted label names.
4579  case AsmToken::Dot: {   // . as a branch target
4580    // This was not a register so parse other operands that start with an
4581    // identifier (like labels) as expressions and create them as immediates.
4582    const MCExpr *IdVal;
4583    S = Parser.getTok().getLoc();
4584    if (getParser().parseExpression(IdVal))
4585      return true;
4586    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4587    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4588    return false;
4589  }
4590  case AsmToken::LBrac:
4591    return parseMemory(Operands);
4592  case AsmToken::LCurly:
4593    return parseRegisterList(Operands);
4594  case AsmToken::Dollar:
4595  case AsmToken::Hash: {
4596    // #42 -> immediate.
4597    S = Parser.getTok().getLoc();
4598    Parser.Lex();
4599
4600    if (Parser.getTok().isNot(AsmToken::Colon)) {
4601      bool isNegative = Parser.getTok().is(AsmToken::Minus);
4602      const MCExpr *ImmVal;
4603      if (getParser().parseExpression(ImmVal))
4604        return true;
4605      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4606      if (CE) {
4607        int32_t Val = CE->getValue();
4608        if (isNegative && Val == 0)
4609          ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4610      }
4611      E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4612      Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4613
4614      // There can be a trailing '!' on operands that we want as a separate
4615      // '!' Token operand. Handle that here. For example, the compatibilty
4616      // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
4617      if (Parser.getTok().is(AsmToken::Exclaim)) {
4618        Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
4619                                                   Parser.getTok().getLoc()));
4620        Parser.Lex(); // Eat exclaim token
4621      }
4622      return false;
4623    }
4624    // w/ a ':' after the '#', it's just like a plain ':'.
4625    // FALLTHROUGH
4626  }
4627  case AsmToken::Colon: {
4628    // ":lower16:" and ":upper16:" expression prefixes
4629    // FIXME: Check it's an expression prefix,
4630    // e.g. (FOO - :lower16:BAR) isn't legal.
4631    ARMMCExpr::VariantKind RefKind;
4632    if (parsePrefix(RefKind))
4633      return true;
4634
4635    const MCExpr *SubExprVal;
4636    if (getParser().parseExpression(SubExprVal))
4637      return true;
4638
4639    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4640                                              getContext());
4641    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4642    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4643    return false;
4644  }
4645  }
4646}
4647
4648// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4649//  :lower16: and :upper16:.
4650bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4651  RefKind = ARMMCExpr::VK_ARM_None;
4652
4653  // :lower16: and :upper16: modifiers
4654  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4655  Parser.Lex(); // Eat ':'
4656
4657  if (getLexer().isNot(AsmToken::Identifier)) {
4658    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4659    return true;
4660  }
4661
4662  StringRef IDVal = Parser.getTok().getIdentifier();
4663  if (IDVal == "lower16") {
4664    RefKind = ARMMCExpr::VK_ARM_LO16;
4665  } else if (IDVal == "upper16") {
4666    RefKind = ARMMCExpr::VK_ARM_HI16;
4667  } else {
4668    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4669    return true;
4670  }
4671  Parser.Lex();
4672
4673  if (getLexer().isNot(AsmToken::Colon)) {
4674    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4675    return true;
4676  }
4677  Parser.Lex(); // Eat the last ':'
4678  return false;
4679}
4680
4681/// \brief Given a mnemonic, split out possible predication code and carry
4682/// setting letters to form a canonical mnemonic and flags.
4683//
4684// FIXME: Would be nice to autogen this.
4685// FIXME: This is a bit of a maze of special cases.
4686StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4687                                      unsigned &PredicationCode,
4688                                      bool &CarrySetting,
4689                                      unsigned &ProcessorIMod,
4690                                      StringRef &ITMask) {
4691  PredicationCode = ARMCC::AL;
4692  CarrySetting = false;
4693  ProcessorIMod = 0;
4694
4695  // Ignore some mnemonics we know aren't predicated forms.
4696  //
4697  // FIXME: Would be nice to autogen this.
4698  if ((Mnemonic == "movs" && isThumb()) ||
4699      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4700      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4701      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4702      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4703      Mnemonic == "vaclt" || Mnemonic == "vacle"  || Mnemonic == "hlt" ||
4704      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4705      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4706      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4707      Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
4708      Mnemonic == "vcvta" || Mnemonic == "vcvtn"  || Mnemonic == "vcvtp" ||
4709      Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
4710      Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic.startswith("vsel"))
4711    return Mnemonic;
4712
4713  // First, split out any predication code. Ignore mnemonics we know aren't
4714  // predicated but do have a carry-set and so weren't caught above.
4715  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4716      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4717      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4718      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4719    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4720      .Case("eq", ARMCC::EQ)
4721      .Case("ne", ARMCC::NE)
4722      .Case("hs", ARMCC::HS)
4723      .Case("cs", ARMCC::HS)
4724      .Case("lo", ARMCC::LO)
4725      .Case("cc", ARMCC::LO)
4726      .Case("mi", ARMCC::MI)
4727      .Case("pl", ARMCC::PL)
4728      .Case("vs", ARMCC::VS)
4729      .Case("vc", ARMCC::VC)
4730      .Case("hi", ARMCC::HI)
4731      .Case("ls", ARMCC::LS)
4732      .Case("ge", ARMCC::GE)
4733      .Case("lt", ARMCC::LT)
4734      .Case("gt", ARMCC::GT)
4735      .Case("le", ARMCC::LE)
4736      .Case("al", ARMCC::AL)
4737      .Default(~0U);
4738    if (CC != ~0U) {
4739      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4740      PredicationCode = CC;
4741    }
4742  }
4743
4744  // Next, determine if we have a carry setting bit. We explicitly ignore all
4745  // the instructions we know end in 's'.
4746  if (Mnemonic.endswith("s") &&
4747      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4748        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4749        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4750        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4751        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4752        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4753        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4754        Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4755        Mnemonic == "vfms" || Mnemonic == "vfnms" ||
4756        (Mnemonic == "movs" && isThumb()))) {
4757    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4758    CarrySetting = true;
4759  }
4760
4761  // The "cps" instruction can have a interrupt mode operand which is glued into
4762  // the mnemonic. Check if this is the case, split it and parse the imod op
4763  if (Mnemonic.startswith("cps")) {
4764    // Split out any imod code.
4765    unsigned IMod =
4766      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4767      .Case("ie", ARM_PROC::IE)
4768      .Case("id", ARM_PROC::ID)
4769      .Default(~0U);
4770    if (IMod != ~0U) {
4771      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4772      ProcessorIMod = IMod;
4773    }
4774  }
4775
4776  // The "it" instruction has the condition mask on the end of the mnemonic.
4777  if (Mnemonic.startswith("it")) {
4778    ITMask = Mnemonic.slice(2, Mnemonic.size());
4779    Mnemonic = Mnemonic.slice(0, 2);
4780  }
4781
4782  return Mnemonic;
4783}
4784
4785/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4786/// inclusion of carry set or predication code operands.
4787//
4788// FIXME: It would be nice to autogen this.
4789void ARMAsmParser::
4790getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
4791                     bool &CanAcceptCarrySet, bool &CanAcceptPredicationCode) {
4792  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4793      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4794      Mnemonic == "add" || Mnemonic == "adc" ||
4795      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4796      Mnemonic == "orr" || Mnemonic == "mvn" ||
4797      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4798      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4799      Mnemonic == "vfm" || Mnemonic == "vfnm" ||
4800      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4801                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4802                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4803    CanAcceptCarrySet = true;
4804  } else
4805    CanAcceptCarrySet = false;
4806
4807  if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
4808      Mnemonic == "cps" ||  Mnemonic == "it" ||  Mnemonic == "cbz" ||
4809      Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic.startswith("crc32") ||
4810      Mnemonic.startswith("cps") || Mnemonic.startswith("vsel") ||
4811      Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" ||
4812      Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" ||
4813      Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" ||
4814      Mnemonic == "vrintm" || Mnemonic.startswith("aes") ||
4815      Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
4816      (FullInst.startswith("vmull") && FullInst.endswith(".p64"))) {
4817    // These mnemonics are never predicable
4818    CanAcceptPredicationCode = false;
4819  } else if (!isThumb()) {
4820    // Some instructions are only predicable in Thumb mode
4821    CanAcceptPredicationCode
4822      = Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
4823        Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
4824        Mnemonic != "dmb" && Mnemonic != "dsb" && Mnemonic != "isb" &&
4825        Mnemonic != "pld" && Mnemonic != "pli" && Mnemonic != "pldw" &&
4826        Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
4827        Mnemonic != "stc2" && Mnemonic != "stc2l" &&
4828        !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs");
4829  } else if (isThumbOne()) {
4830    if (hasV6MOps())
4831      CanAcceptPredicationCode = Mnemonic != "movs";
4832    else
4833      CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
4834  } else
4835    CanAcceptPredicationCode = true;
4836}
4837
4838bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4839                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4840  // FIXME: This is all horribly hacky. We really need a better way to deal
4841  // with optional operands like this in the matcher table.
4842
4843  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4844  // another does not. Specifically, the MOVW instruction does not. So we
4845  // special case it here and remove the defaulted (non-setting) cc_out
4846  // operand if that's the instruction we're trying to match.
4847  //
4848  // We do this as post-processing of the explicit operands rather than just
4849  // conditionally adding the cc_out in the first place because we need
4850  // to check the type of the parsed immediate operand.
4851  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4852      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4853      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4854      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4855    return true;
4856
4857  // Register-register 'add' for thumb does not have a cc_out operand
4858  // when there are only two register operands.
4859  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4860      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4861      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4862      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4863    return true;
4864  // Register-register 'add' for thumb does not have a cc_out operand
4865  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4866  // have to check the immediate range here since Thumb2 has a variant
4867  // that can handle a different range and has a cc_out operand.
4868  if (((isThumb() && Mnemonic == "add") ||
4869       (isThumbTwo() && Mnemonic == "sub")) &&
4870      Operands.size() == 6 &&
4871      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4872      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4873      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4874      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4875      ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
4876       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4877    return true;
4878  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4879  // imm0_4095 variant. That's the least-preferred variant when
4880  // selecting via the generic "add" mnemonic, so to know that we
4881  // should remove the cc_out operand, we have to explicitly check that
4882  // it's not one of the other variants. Ugh.
4883  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4884      Operands.size() == 6 &&
4885      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4886      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4887      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4888    // Nest conditions rather than one big 'if' statement for readability.
4889    //
4890    // If both registers are low, we're in an IT block, and the immediate is
4891    // in range, we should use encoding T1 instead, which has a cc_out.
4892    if (inITBlock() &&
4893        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4894        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4895        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4896      return false;
4897    // Check against T3. If the second register is the PC, this is an
4898    // alternate form of ADR, which uses encoding T4, so check for that too.
4899    if (static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4900        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4901      return false;
4902
4903    // Otherwise, we use encoding T4, which does not have a cc_out
4904    // operand.
4905    return true;
4906  }
4907
4908  // The thumb2 multiply instruction doesn't have a CCOut register, so
4909  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4910  // use the 16-bit encoding or not.
4911  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4912      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4913      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4914      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4915      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4916      // If the registers aren't low regs, the destination reg isn't the
4917      // same as one of the source regs, or the cc_out operand is zero
4918      // outside of an IT block, we have to use the 32-bit encoding, so
4919      // remove the cc_out operand.
4920      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4921       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4922       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4923       !inITBlock() ||
4924       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4925        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4926        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4927        static_cast<ARMOperand*>(Operands[4])->getReg())))
4928    return true;
4929
4930  // Also check the 'mul' syntax variant that doesn't specify an explicit
4931  // destination register.
4932  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4933      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4934      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4935      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4936      // If the registers aren't low regs  or the cc_out operand is zero
4937      // outside of an IT block, we have to use the 32-bit encoding, so
4938      // remove the cc_out operand.
4939      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4940       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4941       !inITBlock()))
4942    return true;
4943
4944
4945
4946  // Register-register 'add/sub' for thumb does not have a cc_out operand
4947  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4948  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4949  // right, this will result in better diagnostics (which operand is off)
4950  // anyway.
4951  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4952      (Operands.size() == 5 || Operands.size() == 6) &&
4953      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4954      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4955      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4956      (static_cast<ARMOperand*>(Operands[4])->isImm() ||
4957       (Operands.size() == 6 &&
4958        static_cast<ARMOperand*>(Operands[5])->isImm())))
4959    return true;
4960
4961  return false;
4962}
4963
4964bool ARMAsmParser::shouldOmitPredicateOperand(
4965    StringRef Mnemonic, SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
4966  // VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON
4967  unsigned RegIdx = 3;
4968  if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") &&
4969      static_cast<ARMOperand *>(Operands[2])->getToken() == ".f32") {
4970    if (static_cast<ARMOperand *>(Operands[3])->isToken() &&
4971        static_cast<ARMOperand *>(Operands[3])->getToken() == ".f32")
4972      RegIdx = 4;
4973
4974    if (static_cast<ARMOperand *>(Operands[RegIdx])->isReg() &&
4975        (ARMMCRegisterClasses[ARM::DPRRegClassID]
4976             .contains(static_cast<ARMOperand *>(Operands[RegIdx])->getReg()) ||
4977         ARMMCRegisterClasses[ARM::QPRRegClassID]
4978             .contains(static_cast<ARMOperand *>(Operands[RegIdx])->getReg())))
4979      return true;
4980  }
4981  return false;
4982}
4983
4984static bool isDataTypeToken(StringRef Tok) {
4985  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4986    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4987    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4988    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4989    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4990    Tok == ".f" || Tok == ".d";
4991}
4992
4993// FIXME: This bit should probably be handled via an explicit match class
4994// in the .td files that matches the suffix instead of having it be
4995// a literal string token the way it is now.
4996static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4997  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4998}
4999static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features,
5000                                 unsigned VariantID);
5001/// Parse an arm instruction mnemonic followed by its operands.
5002bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
5003                                    SMLoc NameLoc,
5004                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5005  // Apply mnemonic aliases before doing anything else, as the destination
5006  // mnemnonic may include suffices and we want to handle them normally.
5007  // The generic tblgen'erated code does this later, at the start of
5008  // MatchInstructionImpl(), but that's too late for aliases that include
5009  // any sort of suffix.
5010  unsigned AvailableFeatures = getAvailableFeatures();
5011  unsigned AssemblerDialect = getParser().getAssemblerDialect();
5012  applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
5013
5014  // First check for the ARM-specific .req directive.
5015  if (Parser.getTok().is(AsmToken::Identifier) &&
5016      Parser.getTok().getIdentifier() == ".req") {
5017    parseDirectiveReq(Name, NameLoc);
5018    // We always return 'error' for this, as we're done with this
5019    // statement and don't need to match the 'instruction."
5020    return true;
5021  }
5022
5023  // Create the leading tokens for the mnemonic, split by '.' characters.
5024  size_t Start = 0, Next = Name.find('.');
5025  StringRef Mnemonic = Name.slice(Start, Next);
5026
5027  // Split out the predication code and carry setting flag from the mnemonic.
5028  unsigned PredicationCode;
5029  unsigned ProcessorIMod;
5030  bool CarrySetting;
5031  StringRef ITMask;
5032  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
5033                           ProcessorIMod, ITMask);
5034
5035  // In Thumb1, only the branch (B) instruction can be predicated.
5036  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
5037    Parser.eatToEndOfStatement();
5038    return Error(NameLoc, "conditional execution not supported in Thumb1");
5039  }
5040
5041  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
5042
5043  // Handle the IT instruction ITMask. Convert it to a bitmask. This
5044  // is the mask as it will be for the IT encoding if the conditional
5045  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
5046  // where the conditional bit0 is zero, the instruction post-processing
5047  // will adjust the mask accordingly.
5048  if (Mnemonic == "it") {
5049    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
5050    if (ITMask.size() > 3) {
5051      Parser.eatToEndOfStatement();
5052      return Error(Loc, "too many conditions on IT instruction");
5053    }
5054    unsigned Mask = 8;
5055    for (unsigned i = ITMask.size(); i != 0; --i) {
5056      char pos = ITMask[i - 1];
5057      if (pos != 't' && pos != 'e') {
5058        Parser.eatToEndOfStatement();
5059        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
5060      }
5061      Mask >>= 1;
5062      if (ITMask[i - 1] == 't')
5063        Mask |= 8;
5064    }
5065    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
5066  }
5067
5068  // FIXME: This is all a pretty gross hack. We should automatically handle
5069  // optional operands like this via tblgen.
5070
5071  // Next, add the CCOut and ConditionCode operands, if needed.
5072  //
5073  // For mnemonics which can ever incorporate a carry setting bit or predication
5074  // code, our matching model involves us always generating CCOut and
5075  // ConditionCode operands to match the mnemonic "as written" and then we let
5076  // the matcher deal with finding the right instruction or generating an
5077  // appropriate error.
5078  bool CanAcceptCarrySet, CanAcceptPredicationCode;
5079  getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode);
5080
5081  // If we had a carry-set on an instruction that can't do that, issue an
5082  // error.
5083  if (!CanAcceptCarrySet && CarrySetting) {
5084    Parser.eatToEndOfStatement();
5085    return Error(NameLoc, "instruction '" + Mnemonic +
5086                 "' can not set flags, but 's' suffix specified");
5087  }
5088  // If we had a predication code on an instruction that can't do that, issue an
5089  // error.
5090  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
5091    Parser.eatToEndOfStatement();
5092    return Error(NameLoc, "instruction '" + Mnemonic +
5093                 "' is not predicable, but condition code specified");
5094  }
5095
5096  // Add the carry setting operand, if necessary.
5097  if (CanAcceptCarrySet) {
5098    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
5099    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
5100                                               Loc));
5101  }
5102
5103  // Add the predication code operand, if necessary.
5104  if (CanAcceptPredicationCode) {
5105    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
5106                                      CarrySetting);
5107    Operands.push_back(ARMOperand::CreateCondCode(
5108                         ARMCC::CondCodes(PredicationCode), Loc));
5109  }
5110
5111  // Add the processor imod operand, if necessary.
5112  if (ProcessorIMod) {
5113    Operands.push_back(ARMOperand::CreateImm(
5114          MCConstantExpr::Create(ProcessorIMod, getContext()),
5115                                 NameLoc, NameLoc));
5116  }
5117
5118  // Add the remaining tokens in the mnemonic.
5119  while (Next != StringRef::npos) {
5120    Start = Next;
5121    Next = Name.find('.', Start + 1);
5122    StringRef ExtraToken = Name.slice(Start, Next);
5123
5124    // Some NEON instructions have an optional datatype suffix that is
5125    // completely ignored. Check for that.
5126    if (isDataTypeToken(ExtraToken) &&
5127        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5128      continue;
5129
5130    // For for ARM mode generate an error if the .n qualifier is used.
5131    if (ExtraToken == ".n" && !isThumb()) {
5132      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5133      return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
5134                   "arm mode");
5135    }
5136
5137    // The .n qualifier is always discarded as that is what the tables
5138    // and matcher expect.  In ARM mode the .w qualifier has no effect,
5139    // so discard it to avoid errors that can be caused by the matcher.
5140    if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
5141      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5142      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5143    }
5144  }
5145
5146  // Read the remaining operands.
5147  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5148    // Read the first operand.
5149    if (parseOperand(Operands, Mnemonic)) {
5150      Parser.eatToEndOfStatement();
5151      return true;
5152    }
5153
5154    while (getLexer().is(AsmToken::Comma)) {
5155      Parser.Lex();  // Eat the comma.
5156
5157      // Parse and remember the operand.
5158      if (parseOperand(Operands, Mnemonic)) {
5159        Parser.eatToEndOfStatement();
5160        return true;
5161      }
5162    }
5163  }
5164
5165  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5166    SMLoc Loc = getLexer().getLoc();
5167    Parser.eatToEndOfStatement();
5168    return Error(Loc, "unexpected token in argument list");
5169  }
5170
5171  Parser.Lex(); // Consume the EndOfStatement
5172
5173  // Some instructions, mostly Thumb, have forms for the same mnemonic that
5174  // do and don't have a cc_out optional-def operand. With some spot-checks
5175  // of the operand list, we can figure out which variant we're trying to
5176  // parse and adjust accordingly before actually matching. We shouldn't ever
5177  // try to remove a cc_out operand that was explicitly set on the the
5178  // mnemonic, of course (CarrySetting == true). Reason number #317 the
5179  // table driven matcher doesn't fit well with the ARM instruction set.
5180  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5181    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5182    Operands.erase(Operands.begin() + 1);
5183    delete Op;
5184  }
5185
5186  // Some instructions have the same mnemonic, but don't always
5187  // have a predicate. Distinguish them here and delete the
5188  // predicate if needed.
5189  if (shouldOmitPredicateOperand(Mnemonic, Operands)) {
5190    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5191    Operands.erase(Operands.begin() + 1);
5192    delete Op;
5193  }
5194
5195  // ARM mode 'blx' need special handling, as the register operand version
5196  // is predicable, but the label operand version is not. So, we can't rely
5197  // on the Mnemonic based checking to correctly figure out when to put
5198  // a k_CondCode operand in the list. If we're trying to match the label
5199  // version, remove the k_CondCode operand here.
5200  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5201      static_cast<ARMOperand*>(Operands[2])->isImm()) {
5202    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5203    Operands.erase(Operands.begin() + 1);
5204    delete Op;
5205  }
5206
5207  // Adjust operands of ldrexd/strexd to MCK_GPRPair.
5208  // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
5209  // a single GPRPair reg operand is used in the .td file to replace the two
5210  // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
5211  // expressed as a GPRPair, so we have to manually merge them.
5212  // FIXME: We would really like to be able to tablegen'erate this.
5213  if (!isThumb() && Operands.size() > 4 &&
5214      (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
5215       Mnemonic == "stlexd")) {
5216    bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
5217    unsigned Idx = isLoad ? 2 : 3;
5218    ARMOperand* Op1 = static_cast<ARMOperand*>(Operands[Idx]);
5219    ARMOperand* Op2 = static_cast<ARMOperand*>(Operands[Idx+1]);
5220
5221    const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
5222    // Adjust only if Op1 and Op2 are GPRs.
5223    if (Op1->isReg() && Op2->isReg() && MRC.contains(Op1->getReg()) &&
5224        MRC.contains(Op2->getReg())) {
5225      unsigned Reg1 = Op1->getReg();
5226      unsigned Reg2 = Op2->getReg();
5227      unsigned Rt = MRI->getEncodingValue(Reg1);
5228      unsigned Rt2 = MRI->getEncodingValue(Reg2);
5229
5230      // Rt2 must be Rt + 1 and Rt must be even.
5231      if (Rt + 1 != Rt2 || (Rt & 1)) {
5232        Error(Op2->getStartLoc(), isLoad ?
5233            "destination operands must be sequential" :
5234            "source operands must be sequential");
5235        return true;
5236      }
5237      unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
5238          &(MRI->getRegClass(ARM::GPRPairRegClassID)));
5239      Operands.erase(Operands.begin() + Idx, Operands.begin() + Idx + 2);
5240      Operands.insert(Operands.begin() + Idx, ARMOperand::CreateReg(
5241            NewReg, Op1->getStartLoc(), Op2->getEndLoc()));
5242      delete Op1;
5243      delete Op2;
5244    }
5245  }
5246
5247  // FIXME: As said above, this is all a pretty gross hack.  This instruction
5248  // does not fit with other "subs" and tblgen.
5249  // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
5250  // so the Mnemonic is the original name "subs" and delete the predicate
5251  // operand so it will match the table entry.
5252  if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
5253      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5254      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::PC &&
5255      static_cast<ARMOperand*>(Operands[4])->isReg() &&
5256      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::LR &&
5257      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5258    ARMOperand *Op0 = static_cast<ARMOperand*>(Operands[0]);
5259    Operands.erase(Operands.begin());
5260    delete Op0;
5261    Operands.insert(Operands.begin(), ARMOperand::CreateToken(Name, NameLoc));
5262
5263    ARMOperand *Op1 = static_cast<ARMOperand*>(Operands[1]);
5264    Operands.erase(Operands.begin() + 1);
5265    delete Op1;
5266  }
5267  return false;
5268}
5269
5270// Validate context-sensitive operand constraints.
5271
5272// return 'true' if register list contains non-low GPR registers,
5273// 'false' otherwise. If Reg is in the register list or is HiReg, set
5274// 'containsReg' to true.
5275static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5276                                 unsigned HiReg, bool &containsReg) {
5277  containsReg = false;
5278  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5279    unsigned OpReg = Inst.getOperand(i).getReg();
5280    if (OpReg == Reg)
5281      containsReg = true;
5282    // Anything other than a low register isn't legal here.
5283    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5284      return true;
5285  }
5286  return false;
5287}
5288
5289// Check if the specified regisgter is in the register list of the inst,
5290// starting at the indicated operand number.
5291static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5292  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5293    unsigned OpReg = Inst.getOperand(i).getReg();
5294    if (OpReg == Reg)
5295      return true;
5296  }
5297  return false;
5298}
5299
5300// Return true if instruction has the interesting property of being
5301// allowed in IT blocks, but not being predicable.
5302static bool instIsBreakpoint(const MCInst &Inst) {
5303    return Inst.getOpcode() == ARM::tBKPT ||
5304           Inst.getOpcode() == ARM::BKPT ||
5305           Inst.getOpcode() == ARM::tHLT ||
5306           Inst.getOpcode() == ARM::HLT;
5307
5308}
5309
5310// FIXME: We would really like to be able to tablegen'erate this.
5311bool ARMAsmParser::
5312validateInstruction(MCInst &Inst,
5313                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5314  const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5315  SMLoc Loc = Operands[0]->getStartLoc();
5316
5317  // Check the IT block state first.
5318  // NOTE: BKPT and HLT instructions have the interesting property of being
5319  // allowed in IT blocks, but not being predicable. They just always execute.
5320  if (inITBlock() && !instIsBreakpoint(Inst)) {
5321    unsigned Bit = 1;
5322    if (ITState.FirstCond)
5323      ITState.FirstCond = false;
5324    else
5325      Bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5326    // The instruction must be predicable.
5327    if (!MCID.isPredicable())
5328      return Error(Loc, "instructions in IT block must be predicable");
5329    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5330    unsigned ITCond = Bit ? ITState.Cond :
5331      ARMCC::getOppositeCondition(ITState.Cond);
5332    if (Cond != ITCond) {
5333      // Find the condition code Operand to get its SMLoc information.
5334      SMLoc CondLoc;
5335      for (unsigned I = 1; I < Operands.size(); ++I)
5336        if (static_cast<ARMOperand*>(Operands[I])->isCondCode())
5337          CondLoc = Operands[I]->getStartLoc();
5338      return Error(CondLoc, "incorrect condition in IT block; got '" +
5339                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5340                   "', but expected '" +
5341                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5342    }
5343  // Check for non-'al' condition codes outside of the IT block.
5344  } else if (isThumbTwo() && MCID.isPredicable() &&
5345             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5346             ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
5347             Inst.getOpcode() != ARM::t2Bcc)
5348    return Error(Loc, "predicated instructions must be in IT block");
5349
5350  const unsigned Opcode = Inst.getOpcode();
5351  switch (Opcode) {
5352  case ARM::LDRD:
5353  case ARM::LDRD_PRE:
5354  case ARM::LDRD_POST: {
5355    const unsigned RtReg = Inst.getOperand(0).getReg();
5356
5357    // Rt can't be R14.
5358    if (RtReg == ARM::LR)
5359      return Error(Operands[3]->getStartLoc(),
5360                   "Rt can't be R14");
5361
5362    const unsigned Rt = MRI->getEncodingValue(RtReg);
5363    // Rt must be even-numbered.
5364    if ((Rt & 1) == 1)
5365      return Error(Operands[3]->getStartLoc(),
5366                   "Rt must be even-numbered");
5367
5368    // Rt2 must be Rt + 1.
5369    const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5370    if (Rt2 != Rt + 1)
5371      return Error(Operands[3]->getStartLoc(),
5372                   "destination operands must be sequential");
5373
5374    if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) {
5375      const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
5376      // For addressing modes with writeback, the base register needs to be
5377      // different from the destination registers.
5378      if (Rn == Rt || Rn == Rt2)
5379        return Error(Operands[3]->getStartLoc(),
5380                     "base register needs to be different from destination "
5381                     "registers");
5382    }
5383
5384    return false;
5385  }
5386  case ARM::t2LDRDi8:
5387  case ARM::t2LDRD_PRE:
5388  case ARM::t2LDRD_POST: {
5389    // Rt2 must be different from Rt.
5390    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5391    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5392    if (Rt2 == Rt)
5393      return Error(Operands[3]->getStartLoc(),
5394                   "destination operands can't be identical");
5395    return false;
5396  }
5397  case ARM::STRD: {
5398    // Rt2 must be Rt + 1.
5399    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5400    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5401    if (Rt2 != Rt + 1)
5402      return Error(Operands[3]->getStartLoc(),
5403                   "source operands must be sequential");
5404    return false;
5405  }
5406  case ARM::STRD_PRE:
5407  case ARM::STRD_POST: {
5408    // Rt2 must be Rt + 1.
5409    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5410    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
5411    if (Rt2 != Rt + 1)
5412      return Error(Operands[3]->getStartLoc(),
5413                   "source operands must be sequential");
5414    return false;
5415  }
5416  case ARM::SBFX:
5417  case ARM::UBFX: {
5418    // Width must be in range [1, 32-lsb].
5419    unsigned LSB = Inst.getOperand(2).getImm();
5420    unsigned Widthm1 = Inst.getOperand(3).getImm();
5421    if (Widthm1 >= 32 - LSB)
5422      return Error(Operands[5]->getStartLoc(),
5423                   "bitfield width must be in range [1,32-lsb]");
5424    return false;
5425  }
5426  // Notionally handles ARM::tLDMIA_UPD too.
5427  case ARM::tLDMIA: {
5428    // If we're parsing Thumb2, the .w variant is available and handles
5429    // most cases that are normally illegal for a Thumb1 LDM instruction.
5430    // We'll make the transformation in processInstruction() if necessary.
5431    //
5432    // Thumb LDM instructions are writeback iff the base register is not
5433    // in the register list.
5434    unsigned Rn = Inst.getOperand(0).getReg();
5435    bool HasWritebackToken =
5436      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5437       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5438    bool ListContainsBase;
5439    if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
5440      return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
5441                   "registers must be in range r0-r7");
5442    // If we should have writeback, then there should be a '!' token.
5443    if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
5444      return Error(Operands[2]->getStartLoc(),
5445                   "writeback operator '!' expected");
5446    // If we should not have writeback, there must not be a '!'. This is
5447    // true even for the 32-bit wide encodings.
5448    if (ListContainsBase && HasWritebackToken)
5449      return Error(Operands[3]->getStartLoc(),
5450                   "writeback operator '!' not allowed when base register "
5451                   "in register list");
5452
5453    break;
5454  }
5455  case ARM::LDMIA_UPD:
5456  case ARM::LDMDB_UPD:
5457  case ARM::LDMIB_UPD:
5458  case ARM::LDMDA_UPD:
5459    // ARM variants loading and updating the same register are only officially
5460    // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
5461    if (!hasV7Ops())
5462      break;
5463    // Fallthrough
5464  case ARM::t2LDMIA_UPD:
5465  case ARM::t2LDMDB_UPD:
5466  case ARM::t2STMIA_UPD:
5467  case ARM::t2STMDB_UPD: {
5468    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5469      return Error(Operands.back()->getStartLoc(),
5470                   "writeback register not allowed in register list");
5471    break;
5472  }
5473  case ARM::tMUL: {
5474    // The second source operand must be the same register as the destination
5475    // operand.
5476    //
5477    // In this case, we must directly check the parsed operands because the
5478    // cvtThumbMultiply() function is written in such a way that it guarantees
5479    // this first statement is always true for the new Inst.  Essentially, the
5480    // destination is unconditionally copied into the second source operand
5481    // without checking to see if it matches what we actually parsed.
5482    if (Operands.size() == 6 &&
5483        (((ARMOperand*)Operands[3])->getReg() !=
5484         ((ARMOperand*)Operands[5])->getReg()) &&
5485        (((ARMOperand*)Operands[3])->getReg() !=
5486         ((ARMOperand*)Operands[4])->getReg())) {
5487      return Error(Operands[3]->getStartLoc(),
5488                   "destination register must match source register");
5489    }
5490    break;
5491  }
5492  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5493  // so only issue a diagnostic for thumb1. The instructions will be
5494  // switched to the t2 encodings in processInstruction() if necessary.
5495  case ARM::tPOP: {
5496    bool ListContainsBase;
5497    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
5498        !isThumbTwo())
5499      return Error(Operands[2]->getStartLoc(),
5500                   "registers must be in range r0-r7 or pc");
5501    break;
5502  }
5503  case ARM::tPUSH: {
5504    bool ListContainsBase;
5505    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
5506        !isThumbTwo())
5507      return Error(Operands[2]->getStartLoc(),
5508                   "registers must be in range r0-r7 or lr");
5509    break;
5510  }
5511  case ARM::tSTMIA_UPD: {
5512    bool ListContainsBase, InvalidLowList;
5513    InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
5514                                          0, ListContainsBase);
5515    if (InvalidLowList && !isThumbTwo())
5516      return Error(Operands[4]->getStartLoc(),
5517                   "registers must be in range r0-r7");
5518
5519    // This would be converted to a 32-bit stm, but that's not valid if the
5520    // writeback register is in the list.
5521    if (InvalidLowList && ListContainsBase)
5522      return Error(Operands[4]->getStartLoc(),
5523                   "writeback operator '!' not allowed when base register "
5524                   "in register list");
5525    break;
5526  }
5527  case ARM::tADDrSP: {
5528    // If the non-SP source operand and the destination operand are not the
5529    // same, we need thumb2 (for the wide encoding), or we have an error.
5530    if (!isThumbTwo() &&
5531        Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
5532      return Error(Operands[4]->getStartLoc(),
5533                   "source register must be the same as destination");
5534    }
5535    break;
5536  }
5537  // Final range checking for Thumb unconditional branch instructions.
5538  case ARM::tB:
5539    if (!(static_cast<ARMOperand*>(Operands[2]))->isSignedOffset<11, 1>())
5540      return Error(Operands[2]->getStartLoc(), "branch target out of range");
5541    break;
5542  case ARM::t2B: {
5543    int op = (Operands[2]->isImm()) ? 2 : 3;
5544    if (!(static_cast<ARMOperand*>(Operands[op]))->isSignedOffset<24, 1>())
5545      return Error(Operands[op]->getStartLoc(), "branch target out of range");
5546    break;
5547  }
5548  // Final range checking for Thumb conditional branch instructions.
5549  case ARM::tBcc:
5550    if (!(static_cast<ARMOperand*>(Operands[2]))->isSignedOffset<8, 1>())
5551      return Error(Operands[2]->getStartLoc(), "branch target out of range");
5552    break;
5553  case ARM::t2Bcc: {
5554    int Op = (Operands[2]->isImm()) ? 2 : 3;
5555    if (!(static_cast<ARMOperand*>(Operands[Op]))->isSignedOffset<20, 1>())
5556      return Error(Operands[Op]->getStartLoc(), "branch target out of range");
5557    break;
5558  }
5559  }
5560
5561  return false;
5562}
5563
5564static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5565  switch(Opc) {
5566  default: llvm_unreachable("unexpected opcode!");
5567  // VST1LN
5568  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5569  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5570  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5571  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5572  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5573  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5574  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5575  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5576  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5577
5578  // VST2LN
5579  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5580  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5581  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5582  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5583  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5584
5585  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5586  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5587  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5588  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5589  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5590
5591  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5592  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5593  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5594  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5595  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5596
5597  // VST3LN
5598  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5599  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5600  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5601  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5602  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5603  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5604  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5605  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5606  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5607  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5608  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5609  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5610  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5611  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5612  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5613
5614  // VST3
5615  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5616  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5617  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5618  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5619  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5620  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5621  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5622  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5623  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5624  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5625  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5626  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5627  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5628  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5629  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5630  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5631  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5632  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5633
5634  // VST4LN
5635  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5636  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5637  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5638  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5639  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5640  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5641  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5642  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5643  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5644  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5645  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5646  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5647  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5648  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5649  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5650
5651  // VST4
5652  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5653  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5654  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5655  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5656  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5657  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5658  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5659  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5660  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5661  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5662  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5663  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5664  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5665  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5666  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5667  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5668  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5669  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5670  }
5671}
5672
5673static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5674  switch(Opc) {
5675  default: llvm_unreachable("unexpected opcode!");
5676  // VLD1LN
5677  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5678  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5679  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5680  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5681  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5682  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5683  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5684  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5685  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5686
5687  // VLD2LN
5688  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5689  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5690  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5691  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5692  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5693  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5694  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5695  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5696  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5697  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5698  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5699  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5700  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5701  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5702  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5703
5704  // VLD3DUP
5705  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5706  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5707  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5708  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5709  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5710  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5711  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5712  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5713  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5714  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5715  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5716  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5717  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5718  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5719  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5720  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5721  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5722  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5723
5724  // VLD3LN
5725  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5726  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5727  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5728  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5729  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5730  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5731  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5732  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5733  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5734  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5735  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5736  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5737  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5738  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5739  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5740
5741  // VLD3
5742  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5743  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5744  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5745  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5746  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5747  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5748  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5749  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5750  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5751  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5752  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5753  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5754  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5755  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5756  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5757  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5758  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5759  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5760
5761  // VLD4LN
5762  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5763  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5764  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5765  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5766  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5767  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5768  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5769  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5770  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5771  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5772  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5773  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5774  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5775  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5776  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5777
5778  // VLD4DUP
5779  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5780  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5781  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5782  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5783  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5784  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5785  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5786  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5787  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5788  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5789  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5790  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5791  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5792  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5793  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5794  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5795  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5796  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5797
5798  // VLD4
5799  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5800  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5801  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5802  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5803  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5804  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5805  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5806  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5807  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5808  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5809  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5810  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5811  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5812  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5813  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5814  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5815  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5816  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5817  }
5818}
5819
5820bool ARMAsmParser::
5821processInstruction(MCInst &Inst,
5822                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5823  switch (Inst.getOpcode()) {
5824  // Alias for alternate form of 'ADR Rd, #imm' instruction.
5825  case ARM::ADDri: {
5826    if (Inst.getOperand(1).getReg() != ARM::PC ||
5827        Inst.getOperand(5).getReg() != 0)
5828      return false;
5829    MCInst TmpInst;
5830    TmpInst.setOpcode(ARM::ADR);
5831    TmpInst.addOperand(Inst.getOperand(0));
5832    TmpInst.addOperand(Inst.getOperand(2));
5833    TmpInst.addOperand(Inst.getOperand(3));
5834    TmpInst.addOperand(Inst.getOperand(4));
5835    Inst = TmpInst;
5836    return true;
5837  }
5838  // Aliases for alternate PC+imm syntax of LDR instructions.
5839  case ARM::t2LDRpcrel:
5840    // Select the narrow version if the immediate will fit.
5841    if (Inst.getOperand(1).getImm() > 0 &&
5842        Inst.getOperand(1).getImm() <= 0xff &&
5843        !(static_cast<ARMOperand*>(Operands[2])->isToken() &&
5844         static_cast<ARMOperand*>(Operands[2])->getToken() == ".w"))
5845      Inst.setOpcode(ARM::tLDRpci);
5846    else
5847      Inst.setOpcode(ARM::t2LDRpci);
5848    return true;
5849  case ARM::t2LDRBpcrel:
5850    Inst.setOpcode(ARM::t2LDRBpci);
5851    return true;
5852  case ARM::t2LDRHpcrel:
5853    Inst.setOpcode(ARM::t2LDRHpci);
5854    return true;
5855  case ARM::t2LDRSBpcrel:
5856    Inst.setOpcode(ARM::t2LDRSBpci);
5857    return true;
5858  case ARM::t2LDRSHpcrel:
5859    Inst.setOpcode(ARM::t2LDRSHpci);
5860    return true;
5861  // Handle NEON VST complex aliases.
5862  case ARM::VST1LNdWB_register_Asm_8:
5863  case ARM::VST1LNdWB_register_Asm_16:
5864  case ARM::VST1LNdWB_register_Asm_32: {
5865    MCInst TmpInst;
5866    // Shuffle the operands around so the lane index operand is in the
5867    // right place.
5868    unsigned Spacing;
5869    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5870    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5871    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5872    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5873    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5874    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5875    TmpInst.addOperand(Inst.getOperand(1)); // lane
5876    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5877    TmpInst.addOperand(Inst.getOperand(6));
5878    Inst = TmpInst;
5879    return true;
5880  }
5881
5882  case ARM::VST2LNdWB_register_Asm_8:
5883  case ARM::VST2LNdWB_register_Asm_16:
5884  case ARM::VST2LNdWB_register_Asm_32:
5885  case ARM::VST2LNqWB_register_Asm_16:
5886  case ARM::VST2LNqWB_register_Asm_32: {
5887    MCInst TmpInst;
5888    // Shuffle the operands around so the lane index operand is in the
5889    // right place.
5890    unsigned Spacing;
5891    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5892    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5893    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5894    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5895    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5896    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5897    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5898                                            Spacing));
5899    TmpInst.addOperand(Inst.getOperand(1)); // lane
5900    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5901    TmpInst.addOperand(Inst.getOperand(6));
5902    Inst = TmpInst;
5903    return true;
5904  }
5905
5906  case ARM::VST3LNdWB_register_Asm_8:
5907  case ARM::VST3LNdWB_register_Asm_16:
5908  case ARM::VST3LNdWB_register_Asm_32:
5909  case ARM::VST3LNqWB_register_Asm_16:
5910  case ARM::VST3LNqWB_register_Asm_32: {
5911    MCInst TmpInst;
5912    // Shuffle the operands around so the lane index operand is in the
5913    // right place.
5914    unsigned Spacing;
5915    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5916    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5917    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5918    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5919    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5920    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5921    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5922                                            Spacing));
5923    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5924                                            Spacing * 2));
5925    TmpInst.addOperand(Inst.getOperand(1)); // lane
5926    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5927    TmpInst.addOperand(Inst.getOperand(6));
5928    Inst = TmpInst;
5929    return true;
5930  }
5931
5932  case ARM::VST4LNdWB_register_Asm_8:
5933  case ARM::VST4LNdWB_register_Asm_16:
5934  case ARM::VST4LNdWB_register_Asm_32:
5935  case ARM::VST4LNqWB_register_Asm_16:
5936  case ARM::VST4LNqWB_register_Asm_32: {
5937    MCInst TmpInst;
5938    // Shuffle the operands around so the lane index operand is in the
5939    // right place.
5940    unsigned Spacing;
5941    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5942    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5943    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5944    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5945    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5946    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5947    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5948                                            Spacing));
5949    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5950                                            Spacing * 2));
5951    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5952                                            Spacing * 3));
5953    TmpInst.addOperand(Inst.getOperand(1)); // lane
5954    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5955    TmpInst.addOperand(Inst.getOperand(6));
5956    Inst = TmpInst;
5957    return true;
5958  }
5959
5960  case ARM::VST1LNdWB_fixed_Asm_8:
5961  case ARM::VST1LNdWB_fixed_Asm_16:
5962  case ARM::VST1LNdWB_fixed_Asm_32: {
5963    MCInst TmpInst;
5964    // Shuffle the operands around so the lane index operand is in the
5965    // right place.
5966    unsigned Spacing;
5967    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5968    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5969    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5970    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5971    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5972    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5973    TmpInst.addOperand(Inst.getOperand(1)); // lane
5974    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5975    TmpInst.addOperand(Inst.getOperand(5));
5976    Inst = TmpInst;
5977    return true;
5978  }
5979
5980  case ARM::VST2LNdWB_fixed_Asm_8:
5981  case ARM::VST2LNdWB_fixed_Asm_16:
5982  case ARM::VST2LNdWB_fixed_Asm_32:
5983  case ARM::VST2LNqWB_fixed_Asm_16:
5984  case ARM::VST2LNqWB_fixed_Asm_32: {
5985    MCInst TmpInst;
5986    // Shuffle the operands around so the lane index operand is in the
5987    // right place.
5988    unsigned Spacing;
5989    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5990    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5991    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5992    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5993    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5994    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5995    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5996                                            Spacing));
5997    TmpInst.addOperand(Inst.getOperand(1)); // lane
5998    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5999    TmpInst.addOperand(Inst.getOperand(5));
6000    Inst = TmpInst;
6001    return true;
6002  }
6003
6004  case ARM::VST3LNdWB_fixed_Asm_8:
6005  case ARM::VST3LNdWB_fixed_Asm_16:
6006  case ARM::VST3LNdWB_fixed_Asm_32:
6007  case ARM::VST3LNqWB_fixed_Asm_16:
6008  case ARM::VST3LNqWB_fixed_Asm_32: {
6009    MCInst TmpInst;
6010    // Shuffle the operands around so the lane index operand is in the
6011    // right place.
6012    unsigned Spacing;
6013    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6014    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6015    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6016    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6017    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6018    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6019    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6020                                            Spacing));
6021    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6022                                            Spacing * 2));
6023    TmpInst.addOperand(Inst.getOperand(1)); // lane
6024    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6025    TmpInst.addOperand(Inst.getOperand(5));
6026    Inst = TmpInst;
6027    return true;
6028  }
6029
6030  case ARM::VST4LNdWB_fixed_Asm_8:
6031  case ARM::VST4LNdWB_fixed_Asm_16:
6032  case ARM::VST4LNdWB_fixed_Asm_32:
6033  case ARM::VST4LNqWB_fixed_Asm_16:
6034  case ARM::VST4LNqWB_fixed_Asm_32: {
6035    MCInst TmpInst;
6036    // Shuffle the operands around so the lane index operand is in the
6037    // right place.
6038    unsigned Spacing;
6039    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6040    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6041    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6042    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6043    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6044    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6045    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6046                                            Spacing));
6047    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6048                                            Spacing * 2));
6049    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6050                                            Spacing * 3));
6051    TmpInst.addOperand(Inst.getOperand(1)); // lane
6052    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6053    TmpInst.addOperand(Inst.getOperand(5));
6054    Inst = TmpInst;
6055    return true;
6056  }
6057
6058  case ARM::VST1LNdAsm_8:
6059  case ARM::VST1LNdAsm_16:
6060  case ARM::VST1LNdAsm_32: {
6061    MCInst TmpInst;
6062    // Shuffle the operands around so the lane index operand is in the
6063    // right place.
6064    unsigned Spacing;
6065    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6066    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6067    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6068    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6069    TmpInst.addOperand(Inst.getOperand(1)); // lane
6070    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6071    TmpInst.addOperand(Inst.getOperand(5));
6072    Inst = TmpInst;
6073    return true;
6074  }
6075
6076  case ARM::VST2LNdAsm_8:
6077  case ARM::VST2LNdAsm_16:
6078  case ARM::VST2LNdAsm_32:
6079  case ARM::VST2LNqAsm_16:
6080  case ARM::VST2LNqAsm_32: {
6081    MCInst TmpInst;
6082    // Shuffle the operands around so the lane index operand is in the
6083    // right place.
6084    unsigned Spacing;
6085    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6086    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6087    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6088    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6089    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6090                                            Spacing));
6091    TmpInst.addOperand(Inst.getOperand(1)); // lane
6092    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6093    TmpInst.addOperand(Inst.getOperand(5));
6094    Inst = TmpInst;
6095    return true;
6096  }
6097
6098  case ARM::VST3LNdAsm_8:
6099  case ARM::VST3LNdAsm_16:
6100  case ARM::VST3LNdAsm_32:
6101  case ARM::VST3LNqAsm_16:
6102  case ARM::VST3LNqAsm_32: {
6103    MCInst TmpInst;
6104    // Shuffle the operands around so the lane index operand is in the
6105    // right place.
6106    unsigned Spacing;
6107    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6108    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6109    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6110    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6111    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6112                                            Spacing));
6113    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6114                                            Spacing * 2));
6115    TmpInst.addOperand(Inst.getOperand(1)); // lane
6116    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6117    TmpInst.addOperand(Inst.getOperand(5));
6118    Inst = TmpInst;
6119    return true;
6120  }
6121
6122  case ARM::VST4LNdAsm_8:
6123  case ARM::VST4LNdAsm_16:
6124  case ARM::VST4LNdAsm_32:
6125  case ARM::VST4LNqAsm_16:
6126  case ARM::VST4LNqAsm_32: {
6127    MCInst TmpInst;
6128    // Shuffle the operands around so the lane index operand is in the
6129    // right place.
6130    unsigned Spacing;
6131    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6132    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6133    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6134    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6135    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6136                                            Spacing));
6137    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6138                                            Spacing * 2));
6139    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6140                                            Spacing * 3));
6141    TmpInst.addOperand(Inst.getOperand(1)); // lane
6142    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6143    TmpInst.addOperand(Inst.getOperand(5));
6144    Inst = TmpInst;
6145    return true;
6146  }
6147
6148  // Handle NEON VLD complex aliases.
6149  case ARM::VLD1LNdWB_register_Asm_8:
6150  case ARM::VLD1LNdWB_register_Asm_16:
6151  case ARM::VLD1LNdWB_register_Asm_32: {
6152    MCInst TmpInst;
6153    // Shuffle the operands around so the lane index operand is in the
6154    // right place.
6155    unsigned Spacing;
6156    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6157    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6158    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6159    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6160    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6161    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6162    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6163    TmpInst.addOperand(Inst.getOperand(1)); // lane
6164    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6165    TmpInst.addOperand(Inst.getOperand(6));
6166    Inst = TmpInst;
6167    return true;
6168  }
6169
6170  case ARM::VLD2LNdWB_register_Asm_8:
6171  case ARM::VLD2LNdWB_register_Asm_16:
6172  case ARM::VLD2LNdWB_register_Asm_32:
6173  case ARM::VLD2LNqWB_register_Asm_16:
6174  case ARM::VLD2LNqWB_register_Asm_32: {
6175    MCInst TmpInst;
6176    // Shuffle the operands around so the lane index operand is in the
6177    // right place.
6178    unsigned Spacing;
6179    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6180    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6181    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6182                                            Spacing));
6183    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6184    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6185    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6186    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6187    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6188    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6189                                            Spacing));
6190    TmpInst.addOperand(Inst.getOperand(1)); // lane
6191    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6192    TmpInst.addOperand(Inst.getOperand(6));
6193    Inst = TmpInst;
6194    return true;
6195  }
6196
6197  case ARM::VLD3LNdWB_register_Asm_8:
6198  case ARM::VLD3LNdWB_register_Asm_16:
6199  case ARM::VLD3LNdWB_register_Asm_32:
6200  case ARM::VLD3LNqWB_register_Asm_16:
6201  case ARM::VLD3LNqWB_register_Asm_32: {
6202    MCInst TmpInst;
6203    // Shuffle the operands around so the lane index operand is in the
6204    // right place.
6205    unsigned Spacing;
6206    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6207    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6208    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6209                                            Spacing));
6210    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6211                                            Spacing * 2));
6212    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6213    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6214    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6215    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6216    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6217    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6218                                            Spacing));
6219    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6220                                            Spacing * 2));
6221    TmpInst.addOperand(Inst.getOperand(1)); // lane
6222    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6223    TmpInst.addOperand(Inst.getOperand(6));
6224    Inst = TmpInst;
6225    return true;
6226  }
6227
6228  case ARM::VLD4LNdWB_register_Asm_8:
6229  case ARM::VLD4LNdWB_register_Asm_16:
6230  case ARM::VLD4LNdWB_register_Asm_32:
6231  case ARM::VLD4LNqWB_register_Asm_16:
6232  case ARM::VLD4LNqWB_register_Asm_32: {
6233    MCInst TmpInst;
6234    // Shuffle the operands around so the lane index operand is in the
6235    // right place.
6236    unsigned Spacing;
6237    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6238    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6239    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6240                                            Spacing));
6241    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6242                                            Spacing * 2));
6243    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6244                                            Spacing * 3));
6245    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6246    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6247    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6248    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6249    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6250    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6251                                            Spacing));
6252    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6253                                            Spacing * 2));
6254    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6255                                            Spacing * 3));
6256    TmpInst.addOperand(Inst.getOperand(1)); // lane
6257    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6258    TmpInst.addOperand(Inst.getOperand(6));
6259    Inst = TmpInst;
6260    return true;
6261  }
6262
6263  case ARM::VLD1LNdWB_fixed_Asm_8:
6264  case ARM::VLD1LNdWB_fixed_Asm_16:
6265  case ARM::VLD1LNdWB_fixed_Asm_32: {
6266    MCInst TmpInst;
6267    // Shuffle the operands around so the lane index operand is in the
6268    // right place.
6269    unsigned Spacing;
6270    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6271    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6272    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6273    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6274    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6275    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6276    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6277    TmpInst.addOperand(Inst.getOperand(1)); // lane
6278    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6279    TmpInst.addOperand(Inst.getOperand(5));
6280    Inst = TmpInst;
6281    return true;
6282  }
6283
6284  case ARM::VLD2LNdWB_fixed_Asm_8:
6285  case ARM::VLD2LNdWB_fixed_Asm_16:
6286  case ARM::VLD2LNdWB_fixed_Asm_32:
6287  case ARM::VLD2LNqWB_fixed_Asm_16:
6288  case ARM::VLD2LNqWB_fixed_Asm_32: {
6289    MCInst TmpInst;
6290    // Shuffle the operands around so the lane index operand is in the
6291    // right place.
6292    unsigned Spacing;
6293    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6294    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6295    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6296                                            Spacing));
6297    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6298    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6299    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6300    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6301    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6302    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6303                                            Spacing));
6304    TmpInst.addOperand(Inst.getOperand(1)); // lane
6305    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6306    TmpInst.addOperand(Inst.getOperand(5));
6307    Inst = TmpInst;
6308    return true;
6309  }
6310
6311  case ARM::VLD3LNdWB_fixed_Asm_8:
6312  case ARM::VLD3LNdWB_fixed_Asm_16:
6313  case ARM::VLD3LNdWB_fixed_Asm_32:
6314  case ARM::VLD3LNqWB_fixed_Asm_16:
6315  case ARM::VLD3LNqWB_fixed_Asm_32: {
6316    MCInst TmpInst;
6317    // Shuffle the operands around so the lane index operand is in the
6318    // right place.
6319    unsigned Spacing;
6320    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6321    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6322    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6323                                            Spacing));
6324    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6325                                            Spacing * 2));
6326    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6327    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6328    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6329    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6330    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6331    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6332                                            Spacing));
6333    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6334                                            Spacing * 2));
6335    TmpInst.addOperand(Inst.getOperand(1)); // lane
6336    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6337    TmpInst.addOperand(Inst.getOperand(5));
6338    Inst = TmpInst;
6339    return true;
6340  }
6341
6342  case ARM::VLD4LNdWB_fixed_Asm_8:
6343  case ARM::VLD4LNdWB_fixed_Asm_16:
6344  case ARM::VLD4LNdWB_fixed_Asm_32:
6345  case ARM::VLD4LNqWB_fixed_Asm_16:
6346  case ARM::VLD4LNqWB_fixed_Asm_32: {
6347    MCInst TmpInst;
6348    // Shuffle the operands around so the lane index operand is in the
6349    // right place.
6350    unsigned Spacing;
6351    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6352    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6353    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6354                                            Spacing));
6355    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6356                                            Spacing * 2));
6357    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6358                                            Spacing * 3));
6359    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6360    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6361    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6362    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6363    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6364    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6365                                            Spacing));
6366    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6367                                            Spacing * 2));
6368    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6369                                            Spacing * 3));
6370    TmpInst.addOperand(Inst.getOperand(1)); // lane
6371    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6372    TmpInst.addOperand(Inst.getOperand(5));
6373    Inst = TmpInst;
6374    return true;
6375  }
6376
6377  case ARM::VLD1LNdAsm_8:
6378  case ARM::VLD1LNdAsm_16:
6379  case ARM::VLD1LNdAsm_32: {
6380    MCInst TmpInst;
6381    // Shuffle the operands around so the lane index operand is in the
6382    // right place.
6383    unsigned Spacing;
6384    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6385    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6386    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6387    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6388    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6389    TmpInst.addOperand(Inst.getOperand(1)); // lane
6390    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6391    TmpInst.addOperand(Inst.getOperand(5));
6392    Inst = TmpInst;
6393    return true;
6394  }
6395
6396  case ARM::VLD2LNdAsm_8:
6397  case ARM::VLD2LNdAsm_16:
6398  case ARM::VLD2LNdAsm_32:
6399  case ARM::VLD2LNqAsm_16:
6400  case ARM::VLD2LNqAsm_32: {
6401    MCInst TmpInst;
6402    // Shuffle the operands around so the lane index operand is in the
6403    // right place.
6404    unsigned Spacing;
6405    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6406    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6407    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6408                                            Spacing));
6409    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6410    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6411    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6412    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6413                                            Spacing));
6414    TmpInst.addOperand(Inst.getOperand(1)); // lane
6415    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6416    TmpInst.addOperand(Inst.getOperand(5));
6417    Inst = TmpInst;
6418    return true;
6419  }
6420
6421  case ARM::VLD3LNdAsm_8:
6422  case ARM::VLD3LNdAsm_16:
6423  case ARM::VLD3LNdAsm_32:
6424  case ARM::VLD3LNqAsm_16:
6425  case ARM::VLD3LNqAsm_32: {
6426    MCInst TmpInst;
6427    // Shuffle the operands around so the lane index operand is in the
6428    // right place.
6429    unsigned Spacing;
6430    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6431    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6432    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6433                                            Spacing));
6434    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6435                                            Spacing * 2));
6436    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6437    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6438    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6439    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6440                                            Spacing));
6441    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6442                                            Spacing * 2));
6443    TmpInst.addOperand(Inst.getOperand(1)); // lane
6444    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6445    TmpInst.addOperand(Inst.getOperand(5));
6446    Inst = TmpInst;
6447    return true;
6448  }
6449
6450  case ARM::VLD4LNdAsm_8:
6451  case ARM::VLD4LNdAsm_16:
6452  case ARM::VLD4LNdAsm_32:
6453  case ARM::VLD4LNqAsm_16:
6454  case ARM::VLD4LNqAsm_32: {
6455    MCInst TmpInst;
6456    // Shuffle the operands around so the lane index operand is in the
6457    // right place.
6458    unsigned Spacing;
6459    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6460    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6461    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6462                                            Spacing));
6463    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6464                                            Spacing * 2));
6465    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6466                                            Spacing * 3));
6467    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6468    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6469    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6470    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6471                                            Spacing));
6472    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6473                                            Spacing * 2));
6474    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6475                                            Spacing * 3));
6476    TmpInst.addOperand(Inst.getOperand(1)); // lane
6477    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6478    TmpInst.addOperand(Inst.getOperand(5));
6479    Inst = TmpInst;
6480    return true;
6481  }
6482
6483  // VLD3DUP single 3-element structure to all lanes instructions.
6484  case ARM::VLD3DUPdAsm_8:
6485  case ARM::VLD3DUPdAsm_16:
6486  case ARM::VLD3DUPdAsm_32:
6487  case ARM::VLD3DUPqAsm_8:
6488  case ARM::VLD3DUPqAsm_16:
6489  case ARM::VLD3DUPqAsm_32: {
6490    MCInst TmpInst;
6491    unsigned Spacing;
6492    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6493    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6494    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6495                                            Spacing));
6496    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6497                                            Spacing * 2));
6498    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6499    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6500    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6501    TmpInst.addOperand(Inst.getOperand(4));
6502    Inst = TmpInst;
6503    return true;
6504  }
6505
6506  case ARM::VLD3DUPdWB_fixed_Asm_8:
6507  case ARM::VLD3DUPdWB_fixed_Asm_16:
6508  case ARM::VLD3DUPdWB_fixed_Asm_32:
6509  case ARM::VLD3DUPqWB_fixed_Asm_8:
6510  case ARM::VLD3DUPqWB_fixed_Asm_16:
6511  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6512    MCInst TmpInst;
6513    unsigned Spacing;
6514    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6515    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6516    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6517                                            Spacing));
6518    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6519                                            Spacing * 2));
6520    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6521    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6522    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6523    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6524    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6525    TmpInst.addOperand(Inst.getOperand(4));
6526    Inst = TmpInst;
6527    return true;
6528  }
6529
6530  case ARM::VLD3DUPdWB_register_Asm_8:
6531  case ARM::VLD3DUPdWB_register_Asm_16:
6532  case ARM::VLD3DUPdWB_register_Asm_32:
6533  case ARM::VLD3DUPqWB_register_Asm_8:
6534  case ARM::VLD3DUPqWB_register_Asm_16:
6535  case ARM::VLD3DUPqWB_register_Asm_32: {
6536    MCInst TmpInst;
6537    unsigned Spacing;
6538    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6539    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6540    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6541                                            Spacing));
6542    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6543                                            Spacing * 2));
6544    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6545    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6546    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6547    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6548    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6549    TmpInst.addOperand(Inst.getOperand(5));
6550    Inst = TmpInst;
6551    return true;
6552  }
6553
6554  // VLD3 multiple 3-element structure instructions.
6555  case ARM::VLD3dAsm_8:
6556  case ARM::VLD3dAsm_16:
6557  case ARM::VLD3dAsm_32:
6558  case ARM::VLD3qAsm_8:
6559  case ARM::VLD3qAsm_16:
6560  case ARM::VLD3qAsm_32: {
6561    MCInst TmpInst;
6562    unsigned Spacing;
6563    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6564    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6565    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6566                                            Spacing));
6567    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6568                                            Spacing * 2));
6569    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6570    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6571    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6572    TmpInst.addOperand(Inst.getOperand(4));
6573    Inst = TmpInst;
6574    return true;
6575  }
6576
6577  case ARM::VLD3dWB_fixed_Asm_8:
6578  case ARM::VLD3dWB_fixed_Asm_16:
6579  case ARM::VLD3dWB_fixed_Asm_32:
6580  case ARM::VLD3qWB_fixed_Asm_8:
6581  case ARM::VLD3qWB_fixed_Asm_16:
6582  case ARM::VLD3qWB_fixed_Asm_32: {
6583    MCInst TmpInst;
6584    unsigned Spacing;
6585    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6586    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6587    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6588                                            Spacing));
6589    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6590                                            Spacing * 2));
6591    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6592    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6593    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6594    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6595    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6596    TmpInst.addOperand(Inst.getOperand(4));
6597    Inst = TmpInst;
6598    return true;
6599  }
6600
6601  case ARM::VLD3dWB_register_Asm_8:
6602  case ARM::VLD3dWB_register_Asm_16:
6603  case ARM::VLD3dWB_register_Asm_32:
6604  case ARM::VLD3qWB_register_Asm_8:
6605  case ARM::VLD3qWB_register_Asm_16:
6606  case ARM::VLD3qWB_register_Asm_32: {
6607    MCInst TmpInst;
6608    unsigned Spacing;
6609    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6610    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6611    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6612                                            Spacing));
6613    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6614                                            Spacing * 2));
6615    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6616    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6617    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6618    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6619    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6620    TmpInst.addOperand(Inst.getOperand(5));
6621    Inst = TmpInst;
6622    return true;
6623  }
6624
6625  // VLD4DUP single 3-element structure to all lanes instructions.
6626  case ARM::VLD4DUPdAsm_8:
6627  case ARM::VLD4DUPdAsm_16:
6628  case ARM::VLD4DUPdAsm_32:
6629  case ARM::VLD4DUPqAsm_8:
6630  case ARM::VLD4DUPqAsm_16:
6631  case ARM::VLD4DUPqAsm_32: {
6632    MCInst TmpInst;
6633    unsigned Spacing;
6634    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6635    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6636    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6637                                            Spacing));
6638    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6639                                            Spacing * 2));
6640    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6641                                            Spacing * 3));
6642    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6643    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6644    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6645    TmpInst.addOperand(Inst.getOperand(4));
6646    Inst = TmpInst;
6647    return true;
6648  }
6649
6650  case ARM::VLD4DUPdWB_fixed_Asm_8:
6651  case ARM::VLD4DUPdWB_fixed_Asm_16:
6652  case ARM::VLD4DUPdWB_fixed_Asm_32:
6653  case ARM::VLD4DUPqWB_fixed_Asm_8:
6654  case ARM::VLD4DUPqWB_fixed_Asm_16:
6655  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6656    MCInst TmpInst;
6657    unsigned Spacing;
6658    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6659    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6660    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6661                                            Spacing));
6662    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6663                                            Spacing * 2));
6664    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6665                                            Spacing * 3));
6666    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6667    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6668    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6669    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6670    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6671    TmpInst.addOperand(Inst.getOperand(4));
6672    Inst = TmpInst;
6673    return true;
6674  }
6675
6676  case ARM::VLD4DUPdWB_register_Asm_8:
6677  case ARM::VLD4DUPdWB_register_Asm_16:
6678  case ARM::VLD4DUPdWB_register_Asm_32:
6679  case ARM::VLD4DUPqWB_register_Asm_8:
6680  case ARM::VLD4DUPqWB_register_Asm_16:
6681  case ARM::VLD4DUPqWB_register_Asm_32: {
6682    MCInst TmpInst;
6683    unsigned Spacing;
6684    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6685    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6686    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6687                                            Spacing));
6688    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6689                                            Spacing * 2));
6690    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6691                                            Spacing * 3));
6692    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6693    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6694    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6695    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6696    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6697    TmpInst.addOperand(Inst.getOperand(5));
6698    Inst = TmpInst;
6699    return true;
6700  }
6701
6702  // VLD4 multiple 4-element structure instructions.
6703  case ARM::VLD4dAsm_8:
6704  case ARM::VLD4dAsm_16:
6705  case ARM::VLD4dAsm_32:
6706  case ARM::VLD4qAsm_8:
6707  case ARM::VLD4qAsm_16:
6708  case ARM::VLD4qAsm_32: {
6709    MCInst TmpInst;
6710    unsigned Spacing;
6711    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6712    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6713    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6714                                            Spacing));
6715    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6716                                            Spacing * 2));
6717    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6718                                            Spacing * 3));
6719    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6720    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6721    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6722    TmpInst.addOperand(Inst.getOperand(4));
6723    Inst = TmpInst;
6724    return true;
6725  }
6726
6727  case ARM::VLD4dWB_fixed_Asm_8:
6728  case ARM::VLD4dWB_fixed_Asm_16:
6729  case ARM::VLD4dWB_fixed_Asm_32:
6730  case ARM::VLD4qWB_fixed_Asm_8:
6731  case ARM::VLD4qWB_fixed_Asm_16:
6732  case ARM::VLD4qWB_fixed_Asm_32: {
6733    MCInst TmpInst;
6734    unsigned Spacing;
6735    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6736    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6737    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6738                                            Spacing));
6739    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6740                                            Spacing * 2));
6741    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6742                                            Spacing * 3));
6743    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6744    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6745    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6746    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6747    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6748    TmpInst.addOperand(Inst.getOperand(4));
6749    Inst = TmpInst;
6750    return true;
6751  }
6752
6753  case ARM::VLD4dWB_register_Asm_8:
6754  case ARM::VLD4dWB_register_Asm_16:
6755  case ARM::VLD4dWB_register_Asm_32:
6756  case ARM::VLD4qWB_register_Asm_8:
6757  case ARM::VLD4qWB_register_Asm_16:
6758  case ARM::VLD4qWB_register_Asm_32: {
6759    MCInst TmpInst;
6760    unsigned Spacing;
6761    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6762    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6763    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6764                                            Spacing));
6765    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6766                                            Spacing * 2));
6767    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6768                                            Spacing * 3));
6769    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6770    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6771    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6772    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6773    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6774    TmpInst.addOperand(Inst.getOperand(5));
6775    Inst = TmpInst;
6776    return true;
6777  }
6778
6779  // VST3 multiple 3-element structure instructions.
6780  case ARM::VST3dAsm_8:
6781  case ARM::VST3dAsm_16:
6782  case ARM::VST3dAsm_32:
6783  case ARM::VST3qAsm_8:
6784  case ARM::VST3qAsm_16:
6785  case ARM::VST3qAsm_32: {
6786    MCInst TmpInst;
6787    unsigned Spacing;
6788    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6789    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6790    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6791    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6792    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6793                                            Spacing));
6794    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6795                                            Spacing * 2));
6796    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6797    TmpInst.addOperand(Inst.getOperand(4));
6798    Inst = TmpInst;
6799    return true;
6800  }
6801
6802  case ARM::VST3dWB_fixed_Asm_8:
6803  case ARM::VST3dWB_fixed_Asm_16:
6804  case ARM::VST3dWB_fixed_Asm_32:
6805  case ARM::VST3qWB_fixed_Asm_8:
6806  case ARM::VST3qWB_fixed_Asm_16:
6807  case ARM::VST3qWB_fixed_Asm_32: {
6808    MCInst TmpInst;
6809    unsigned Spacing;
6810    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6811    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6812    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6813    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6814    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6815    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6816    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6817                                            Spacing));
6818    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6819                                            Spacing * 2));
6820    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6821    TmpInst.addOperand(Inst.getOperand(4));
6822    Inst = TmpInst;
6823    return true;
6824  }
6825
6826  case ARM::VST3dWB_register_Asm_8:
6827  case ARM::VST3dWB_register_Asm_16:
6828  case ARM::VST3dWB_register_Asm_32:
6829  case ARM::VST3qWB_register_Asm_8:
6830  case ARM::VST3qWB_register_Asm_16:
6831  case ARM::VST3qWB_register_Asm_32: {
6832    MCInst TmpInst;
6833    unsigned Spacing;
6834    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6835    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6836    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6837    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6838    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6839    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6840    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6841                                            Spacing));
6842    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6843                                            Spacing * 2));
6844    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6845    TmpInst.addOperand(Inst.getOperand(5));
6846    Inst = TmpInst;
6847    return true;
6848  }
6849
6850  // VST4 multiple 3-element structure instructions.
6851  case ARM::VST4dAsm_8:
6852  case ARM::VST4dAsm_16:
6853  case ARM::VST4dAsm_32:
6854  case ARM::VST4qAsm_8:
6855  case ARM::VST4qAsm_16:
6856  case ARM::VST4qAsm_32: {
6857    MCInst TmpInst;
6858    unsigned Spacing;
6859    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6860    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6861    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6862    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6863    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6864                                            Spacing));
6865    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6866                                            Spacing * 2));
6867    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6868                                            Spacing * 3));
6869    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6870    TmpInst.addOperand(Inst.getOperand(4));
6871    Inst = TmpInst;
6872    return true;
6873  }
6874
6875  case ARM::VST4dWB_fixed_Asm_8:
6876  case ARM::VST4dWB_fixed_Asm_16:
6877  case ARM::VST4dWB_fixed_Asm_32:
6878  case ARM::VST4qWB_fixed_Asm_8:
6879  case ARM::VST4qWB_fixed_Asm_16:
6880  case ARM::VST4qWB_fixed_Asm_32: {
6881    MCInst TmpInst;
6882    unsigned Spacing;
6883    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6884    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6885    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6886    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6887    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6888    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6889    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6890                                            Spacing));
6891    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6892                                            Spacing * 2));
6893    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6894                                            Spacing * 3));
6895    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6896    TmpInst.addOperand(Inst.getOperand(4));
6897    Inst = TmpInst;
6898    return true;
6899  }
6900
6901  case ARM::VST4dWB_register_Asm_8:
6902  case ARM::VST4dWB_register_Asm_16:
6903  case ARM::VST4dWB_register_Asm_32:
6904  case ARM::VST4qWB_register_Asm_8:
6905  case ARM::VST4qWB_register_Asm_16:
6906  case ARM::VST4qWB_register_Asm_32: {
6907    MCInst TmpInst;
6908    unsigned Spacing;
6909    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6910    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6911    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6912    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6913    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6914    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6915    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6916                                            Spacing));
6917    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6918                                            Spacing * 2));
6919    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6920                                            Spacing * 3));
6921    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6922    TmpInst.addOperand(Inst.getOperand(5));
6923    Inst = TmpInst;
6924    return true;
6925  }
6926
6927  // Handle encoding choice for the shift-immediate instructions.
6928  case ARM::t2LSLri:
6929  case ARM::t2LSRri:
6930  case ARM::t2ASRri: {
6931    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6932        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6933        Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
6934        !(static_cast<ARMOperand*>(Operands[3])->isToken() &&
6935         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) {
6936      unsigned NewOpc;
6937      switch (Inst.getOpcode()) {
6938      default: llvm_unreachable("unexpected opcode");
6939      case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
6940      case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
6941      case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
6942      }
6943      // The Thumb1 operands aren't in the same order. Awesome, eh?
6944      MCInst TmpInst;
6945      TmpInst.setOpcode(NewOpc);
6946      TmpInst.addOperand(Inst.getOperand(0));
6947      TmpInst.addOperand(Inst.getOperand(5));
6948      TmpInst.addOperand(Inst.getOperand(1));
6949      TmpInst.addOperand(Inst.getOperand(2));
6950      TmpInst.addOperand(Inst.getOperand(3));
6951      TmpInst.addOperand(Inst.getOperand(4));
6952      Inst = TmpInst;
6953      return true;
6954    }
6955    return false;
6956  }
6957
6958  // Handle the Thumb2 mode MOV complex aliases.
6959  case ARM::t2MOVsr:
6960  case ARM::t2MOVSsr: {
6961    // Which instruction to expand to depends on the CCOut operand and
6962    // whether we're in an IT block if the register operands are low
6963    // registers.
6964    bool isNarrow = false;
6965    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6966        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6967        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6968        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6969        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6970      isNarrow = true;
6971    MCInst TmpInst;
6972    unsigned newOpc;
6973    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6974    default: llvm_unreachable("unexpected opcode!");
6975    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6976    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6977    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6978    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6979    }
6980    TmpInst.setOpcode(newOpc);
6981    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6982    if (isNarrow)
6983      TmpInst.addOperand(MCOperand::CreateReg(
6984          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6985    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6986    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6987    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6988    TmpInst.addOperand(Inst.getOperand(5));
6989    if (!isNarrow)
6990      TmpInst.addOperand(MCOperand::CreateReg(
6991          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6992    Inst = TmpInst;
6993    return true;
6994  }
6995  case ARM::t2MOVsi:
6996  case ARM::t2MOVSsi: {
6997    // Which instruction to expand to depends on the CCOut operand and
6998    // whether we're in an IT block if the register operands are low
6999    // registers.
7000    bool isNarrow = false;
7001    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7002        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7003        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
7004      isNarrow = true;
7005    MCInst TmpInst;
7006    unsigned newOpc;
7007    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
7008    default: llvm_unreachable("unexpected opcode!");
7009    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
7010    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
7011    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
7012    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
7013    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
7014    }
7015    unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
7016    if (Amount == 32) Amount = 0;
7017    TmpInst.setOpcode(newOpc);
7018    TmpInst.addOperand(Inst.getOperand(0)); // Rd
7019    if (isNarrow)
7020      TmpInst.addOperand(MCOperand::CreateReg(
7021          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
7022    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7023    if (newOpc != ARM::t2RRX)
7024      TmpInst.addOperand(MCOperand::CreateImm(Amount));
7025    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7026    TmpInst.addOperand(Inst.getOperand(4));
7027    if (!isNarrow)
7028      TmpInst.addOperand(MCOperand::CreateReg(
7029          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
7030    Inst = TmpInst;
7031    return true;
7032  }
7033  // Handle the ARM mode MOV complex aliases.
7034  case ARM::ASRr:
7035  case ARM::LSRr:
7036  case ARM::LSLr:
7037  case ARM::RORr: {
7038    ARM_AM::ShiftOpc ShiftTy;
7039    switch(Inst.getOpcode()) {
7040    default: llvm_unreachable("unexpected opcode!");
7041    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
7042    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
7043    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
7044    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
7045    }
7046    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
7047    MCInst TmpInst;
7048    TmpInst.setOpcode(ARM::MOVsr);
7049    TmpInst.addOperand(Inst.getOperand(0)); // Rd
7050    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7051    TmpInst.addOperand(Inst.getOperand(2)); // Rm
7052    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7053    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7054    TmpInst.addOperand(Inst.getOperand(4));
7055    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
7056    Inst = TmpInst;
7057    return true;
7058  }
7059  case ARM::ASRi:
7060  case ARM::LSRi:
7061  case ARM::LSLi:
7062  case ARM::RORi: {
7063    ARM_AM::ShiftOpc ShiftTy;
7064    switch(Inst.getOpcode()) {
7065    default: llvm_unreachable("unexpected opcode!");
7066    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
7067    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
7068    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
7069    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
7070    }
7071    // A shift by zero is a plain MOVr, not a MOVsi.
7072    unsigned Amt = Inst.getOperand(2).getImm();
7073    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
7074    // A shift by 32 should be encoded as 0 when permitted
7075    if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
7076      Amt = 0;
7077    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
7078    MCInst TmpInst;
7079    TmpInst.setOpcode(Opc);
7080    TmpInst.addOperand(Inst.getOperand(0)); // Rd
7081    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7082    if (Opc == ARM::MOVsi)
7083      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7084    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7085    TmpInst.addOperand(Inst.getOperand(4));
7086    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
7087    Inst = TmpInst;
7088    return true;
7089  }
7090  case ARM::RRXi: {
7091    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
7092    MCInst TmpInst;
7093    TmpInst.setOpcode(ARM::MOVsi);
7094    TmpInst.addOperand(Inst.getOperand(0)); // Rd
7095    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7096    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7097    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7098    TmpInst.addOperand(Inst.getOperand(3));
7099    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
7100    Inst = TmpInst;
7101    return true;
7102  }
7103  case ARM::t2LDMIA_UPD: {
7104    // If this is a load of a single register, then we should use
7105    // a post-indexed LDR instruction instead, per the ARM ARM.
7106    if (Inst.getNumOperands() != 5)
7107      return false;
7108    MCInst TmpInst;
7109    TmpInst.setOpcode(ARM::t2LDR_POST);
7110    TmpInst.addOperand(Inst.getOperand(4)); // Rt
7111    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7112    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7113    TmpInst.addOperand(MCOperand::CreateImm(4));
7114    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7115    TmpInst.addOperand(Inst.getOperand(3));
7116    Inst = TmpInst;
7117    return true;
7118  }
7119  case ARM::t2STMDB_UPD: {
7120    // If this is a store of a single register, then we should use
7121    // a pre-indexed STR instruction instead, per the ARM ARM.
7122    if (Inst.getNumOperands() != 5)
7123      return false;
7124    MCInst TmpInst;
7125    TmpInst.setOpcode(ARM::t2STR_PRE);
7126    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7127    TmpInst.addOperand(Inst.getOperand(4)); // Rt
7128    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7129    TmpInst.addOperand(MCOperand::CreateImm(-4));
7130    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7131    TmpInst.addOperand(Inst.getOperand(3));
7132    Inst = TmpInst;
7133    return true;
7134  }
7135  case ARM::LDMIA_UPD:
7136    // If this is a load of a single register via a 'pop', then we should use
7137    // a post-indexed LDR instruction instead, per the ARM ARM.
7138    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
7139        Inst.getNumOperands() == 5) {
7140      MCInst TmpInst;
7141      TmpInst.setOpcode(ARM::LDR_POST_IMM);
7142      TmpInst.addOperand(Inst.getOperand(4)); // Rt
7143      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7144      TmpInst.addOperand(Inst.getOperand(1)); // Rn
7145      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
7146      TmpInst.addOperand(MCOperand::CreateImm(4));
7147      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7148      TmpInst.addOperand(Inst.getOperand(3));
7149      Inst = TmpInst;
7150      return true;
7151    }
7152    break;
7153  case ARM::STMDB_UPD:
7154    // If this is a store of a single register via a 'push', then we should use
7155    // a pre-indexed STR instruction instead, per the ARM ARM.
7156    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
7157        Inst.getNumOperands() == 5) {
7158      MCInst TmpInst;
7159      TmpInst.setOpcode(ARM::STR_PRE_IMM);
7160      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7161      TmpInst.addOperand(Inst.getOperand(4)); // Rt
7162      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
7163      TmpInst.addOperand(MCOperand::CreateImm(-4));
7164      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7165      TmpInst.addOperand(Inst.getOperand(3));
7166      Inst = TmpInst;
7167    }
7168    break;
7169  case ARM::t2ADDri12:
7170    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
7171    // mnemonic was used (not "addw"), encoding T3 is preferred.
7172    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
7173        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7174      break;
7175    Inst.setOpcode(ARM::t2ADDri);
7176    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7177    break;
7178  case ARM::t2SUBri12:
7179    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
7180    // mnemonic was used (not "subw"), encoding T3 is preferred.
7181    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
7182        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7183      break;
7184    Inst.setOpcode(ARM::t2SUBri);
7185    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7186    break;
7187  case ARM::tADDi8:
7188    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7189    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7190    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7191    // to encoding T1 if <Rd> is omitted."
7192    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7193      Inst.setOpcode(ARM::tADDi3);
7194      return true;
7195    }
7196    break;
7197  case ARM::tSUBi8:
7198    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7199    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7200    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7201    // to encoding T1 if <Rd> is omitted."
7202    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7203      Inst.setOpcode(ARM::tSUBi3);
7204      return true;
7205    }
7206    break;
7207  case ARM::t2ADDri:
7208  case ARM::t2SUBri: {
7209    // If the destination and first source operand are the same, and
7210    // the flags are compatible with the current IT status, use encoding T2
7211    // instead of T3. For compatibility with the system 'as'. Make sure the
7212    // wide encoding wasn't explicit.
7213    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7214        !isARMLowRegister(Inst.getOperand(0).getReg()) ||
7215        (unsigned)Inst.getOperand(2).getImm() > 255 ||
7216        ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
7217        (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
7218        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7219         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
7220      break;
7221    MCInst TmpInst;
7222    TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
7223                      ARM::tADDi8 : ARM::tSUBi8);
7224    TmpInst.addOperand(Inst.getOperand(0));
7225    TmpInst.addOperand(Inst.getOperand(5));
7226    TmpInst.addOperand(Inst.getOperand(0));
7227    TmpInst.addOperand(Inst.getOperand(2));
7228    TmpInst.addOperand(Inst.getOperand(3));
7229    TmpInst.addOperand(Inst.getOperand(4));
7230    Inst = TmpInst;
7231    return true;
7232  }
7233  case ARM::t2ADDrr: {
7234    // If the destination and first source operand are the same, and
7235    // there's no setting of the flags, use encoding T2 instead of T3.
7236    // Note that this is only for ADD, not SUB. This mirrors the system
7237    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
7238    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7239        Inst.getOperand(5).getReg() != 0 ||
7240        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7241         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
7242      break;
7243    MCInst TmpInst;
7244    TmpInst.setOpcode(ARM::tADDhirr);
7245    TmpInst.addOperand(Inst.getOperand(0));
7246    TmpInst.addOperand(Inst.getOperand(0));
7247    TmpInst.addOperand(Inst.getOperand(2));
7248    TmpInst.addOperand(Inst.getOperand(3));
7249    TmpInst.addOperand(Inst.getOperand(4));
7250    Inst = TmpInst;
7251    return true;
7252  }
7253  case ARM::tADDrSP: {
7254    // If the non-SP source operand and the destination operand are not the
7255    // same, we need to use the 32-bit encoding if it's available.
7256    if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
7257      Inst.setOpcode(ARM::t2ADDrr);
7258      Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7259      return true;
7260    }
7261    break;
7262  }
7263  case ARM::tB:
7264    // A Thumb conditional branch outside of an IT block is a tBcc.
7265    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
7266      Inst.setOpcode(ARM::tBcc);
7267      return true;
7268    }
7269    break;
7270  case ARM::t2B:
7271    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
7272    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
7273      Inst.setOpcode(ARM::t2Bcc);
7274      return true;
7275    }
7276    break;
7277  case ARM::t2Bcc:
7278    // If the conditional is AL or we're in an IT block, we really want t2B.
7279    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
7280      Inst.setOpcode(ARM::t2B);
7281      return true;
7282    }
7283    break;
7284  case ARM::tBcc:
7285    // If the conditional is AL, we really want tB.
7286    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
7287      Inst.setOpcode(ARM::tB);
7288      return true;
7289    }
7290    break;
7291  case ARM::tLDMIA: {
7292    // If the register list contains any high registers, or if the writeback
7293    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
7294    // instead if we're in Thumb2. Otherwise, this should have generated
7295    // an error in validateInstruction().
7296    unsigned Rn = Inst.getOperand(0).getReg();
7297    bool hasWritebackToken =
7298      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7299       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
7300    bool listContainsBase;
7301    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
7302        (!listContainsBase && !hasWritebackToken) ||
7303        (listContainsBase && hasWritebackToken)) {
7304      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7305      assert (isThumbTwo());
7306      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
7307      // If we're switching to the updating version, we need to insert
7308      // the writeback tied operand.
7309      if (hasWritebackToken)
7310        Inst.insert(Inst.begin(),
7311                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
7312      return true;
7313    }
7314    break;
7315  }
7316  case ARM::tSTMIA_UPD: {
7317    // If the register list contains any high registers, we need to use
7318    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7319    // should have generated an error in validateInstruction().
7320    unsigned Rn = Inst.getOperand(0).getReg();
7321    bool listContainsBase;
7322    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
7323      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7324      assert (isThumbTwo());
7325      Inst.setOpcode(ARM::t2STMIA_UPD);
7326      return true;
7327    }
7328    break;
7329  }
7330  case ARM::tPOP: {
7331    bool listContainsBase;
7332    // If the register list contains any high registers, we need to use
7333    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7334    // should have generated an error in validateInstruction().
7335    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
7336      return false;
7337    assert (isThumbTwo());
7338    Inst.setOpcode(ARM::t2LDMIA_UPD);
7339    // Add the base register and writeback operands.
7340    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7341    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7342    return true;
7343  }
7344  case ARM::tPUSH: {
7345    bool listContainsBase;
7346    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
7347      return false;
7348    assert (isThumbTwo());
7349    Inst.setOpcode(ARM::t2STMDB_UPD);
7350    // Add the base register and writeback operands.
7351    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7352    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7353    return true;
7354  }
7355  case ARM::t2MOVi: {
7356    // If we can use the 16-bit encoding and the user didn't explicitly
7357    // request the 32-bit variant, transform it here.
7358    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7359        (unsigned)Inst.getOperand(1).getImm() <= 255 &&
7360        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
7361         Inst.getOperand(4).getReg() == ARM::CPSR) ||
7362        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
7363        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7364         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7365      // The operands aren't in the same order for tMOVi8...
7366      MCInst TmpInst;
7367      TmpInst.setOpcode(ARM::tMOVi8);
7368      TmpInst.addOperand(Inst.getOperand(0));
7369      TmpInst.addOperand(Inst.getOperand(4));
7370      TmpInst.addOperand(Inst.getOperand(1));
7371      TmpInst.addOperand(Inst.getOperand(2));
7372      TmpInst.addOperand(Inst.getOperand(3));
7373      Inst = TmpInst;
7374      return true;
7375    }
7376    break;
7377  }
7378  case ARM::t2MOVr: {
7379    // If we can use the 16-bit encoding and the user didn't explicitly
7380    // request the 32-bit variant, transform it here.
7381    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7382        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7383        Inst.getOperand(2).getImm() == ARMCC::AL &&
7384        Inst.getOperand(4).getReg() == ARM::CPSR &&
7385        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7386         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7387      // The operands aren't the same for tMOV[S]r... (no cc_out)
7388      MCInst TmpInst;
7389      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7390      TmpInst.addOperand(Inst.getOperand(0));
7391      TmpInst.addOperand(Inst.getOperand(1));
7392      TmpInst.addOperand(Inst.getOperand(2));
7393      TmpInst.addOperand(Inst.getOperand(3));
7394      Inst = TmpInst;
7395      return true;
7396    }
7397    break;
7398  }
7399  case ARM::t2SXTH:
7400  case ARM::t2SXTB:
7401  case ARM::t2UXTH:
7402  case ARM::t2UXTB: {
7403    // If we can use the 16-bit encoding and the user didn't explicitly
7404    // request the 32-bit variant, transform it here.
7405    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7406        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7407        Inst.getOperand(2).getImm() == 0 &&
7408        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7409         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7410      unsigned NewOpc;
7411      switch (Inst.getOpcode()) {
7412      default: llvm_unreachable("Illegal opcode!");
7413      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7414      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7415      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7416      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7417      }
7418      // The operands aren't the same for thumb1 (no rotate operand).
7419      MCInst TmpInst;
7420      TmpInst.setOpcode(NewOpc);
7421      TmpInst.addOperand(Inst.getOperand(0));
7422      TmpInst.addOperand(Inst.getOperand(1));
7423      TmpInst.addOperand(Inst.getOperand(3));
7424      TmpInst.addOperand(Inst.getOperand(4));
7425      Inst = TmpInst;
7426      return true;
7427    }
7428    break;
7429  }
7430  case ARM::MOVsi: {
7431    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7432    // rrx shifts and asr/lsr of #32 is encoded as 0
7433    if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
7434      return false;
7435    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7436      // Shifting by zero is accepted as a vanilla 'MOVr'
7437      MCInst TmpInst;
7438      TmpInst.setOpcode(ARM::MOVr);
7439      TmpInst.addOperand(Inst.getOperand(0));
7440      TmpInst.addOperand(Inst.getOperand(1));
7441      TmpInst.addOperand(Inst.getOperand(3));
7442      TmpInst.addOperand(Inst.getOperand(4));
7443      TmpInst.addOperand(Inst.getOperand(5));
7444      Inst = TmpInst;
7445      return true;
7446    }
7447    return false;
7448  }
7449  case ARM::ANDrsi:
7450  case ARM::ORRrsi:
7451  case ARM::EORrsi:
7452  case ARM::BICrsi:
7453  case ARM::SUBrsi:
7454  case ARM::ADDrsi: {
7455    unsigned newOpc;
7456    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7457    if (SOpc == ARM_AM::rrx) return false;
7458    switch (Inst.getOpcode()) {
7459    default: llvm_unreachable("unexpected opcode!");
7460    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7461    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7462    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7463    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7464    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7465    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7466    }
7467    // If the shift is by zero, use the non-shifted instruction definition.
7468    // The exception is for right shifts, where 0 == 32
7469    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
7470        !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
7471      MCInst TmpInst;
7472      TmpInst.setOpcode(newOpc);
7473      TmpInst.addOperand(Inst.getOperand(0));
7474      TmpInst.addOperand(Inst.getOperand(1));
7475      TmpInst.addOperand(Inst.getOperand(2));
7476      TmpInst.addOperand(Inst.getOperand(4));
7477      TmpInst.addOperand(Inst.getOperand(5));
7478      TmpInst.addOperand(Inst.getOperand(6));
7479      Inst = TmpInst;
7480      return true;
7481    }
7482    return false;
7483  }
7484  case ARM::ITasm:
7485  case ARM::t2IT: {
7486    // The mask bits for all but the first condition are represented as
7487    // the low bit of the condition code value implies 't'. We currently
7488    // always have 1 implies 't', so XOR toggle the bits if the low bit
7489    // of the condition code is zero.
7490    MCOperand &MO = Inst.getOperand(1);
7491    unsigned Mask = MO.getImm();
7492    unsigned OrigMask = Mask;
7493    unsigned TZ = countTrailingZeros(Mask);
7494    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7495      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7496      Mask ^= (0xE << TZ) & 0xF;
7497    }
7498    MO.setImm(Mask);
7499
7500    // Set up the IT block state according to the IT instruction we just
7501    // matched.
7502    assert(!inITBlock() && "nested IT blocks?!");
7503    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7504    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7505    ITState.CurPosition = 0;
7506    ITState.FirstCond = true;
7507    break;
7508  }
7509  case ARM::t2LSLrr:
7510  case ARM::t2LSRrr:
7511  case ARM::t2ASRrr:
7512  case ARM::t2SBCrr:
7513  case ARM::t2RORrr:
7514  case ARM::t2BICrr:
7515  {
7516    // Assemblers should use the narrow encodings of these instructions when permissible.
7517    if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7518         isARMLowRegister(Inst.getOperand(2).getReg())) &&
7519        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7520        ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7521         (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7522        (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
7523         !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
7524      unsigned NewOpc;
7525      switch (Inst.getOpcode()) {
7526        default: llvm_unreachable("unexpected opcode");
7527        case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
7528        case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
7529        case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
7530        case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
7531        case ARM::t2RORrr: NewOpc = ARM::tROR; break;
7532        case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
7533      }
7534      MCInst TmpInst;
7535      TmpInst.setOpcode(NewOpc);
7536      TmpInst.addOperand(Inst.getOperand(0));
7537      TmpInst.addOperand(Inst.getOperand(5));
7538      TmpInst.addOperand(Inst.getOperand(1));
7539      TmpInst.addOperand(Inst.getOperand(2));
7540      TmpInst.addOperand(Inst.getOperand(3));
7541      TmpInst.addOperand(Inst.getOperand(4));
7542      Inst = TmpInst;
7543      return true;
7544    }
7545    return false;
7546  }
7547  case ARM::t2ANDrr:
7548  case ARM::t2EORrr:
7549  case ARM::t2ADCrr:
7550  case ARM::t2ORRrr:
7551  {
7552    // Assemblers should use the narrow encodings of these instructions when permissible.
7553    // These instructions are special in that they are commutable, so shorter encodings
7554    // are available more often.
7555    if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7556         isARMLowRegister(Inst.getOperand(2).getReg())) &&
7557        (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
7558         Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
7559        ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7560         (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7561        (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
7562         !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
7563      unsigned NewOpc;
7564      switch (Inst.getOpcode()) {
7565        default: llvm_unreachable("unexpected opcode");
7566        case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
7567        case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
7568        case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
7569        case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
7570      }
7571      MCInst TmpInst;
7572      TmpInst.setOpcode(NewOpc);
7573      TmpInst.addOperand(Inst.getOperand(0));
7574      TmpInst.addOperand(Inst.getOperand(5));
7575      if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
7576        TmpInst.addOperand(Inst.getOperand(1));
7577        TmpInst.addOperand(Inst.getOperand(2));
7578      } else {
7579        TmpInst.addOperand(Inst.getOperand(2));
7580        TmpInst.addOperand(Inst.getOperand(1));
7581      }
7582      TmpInst.addOperand(Inst.getOperand(3));
7583      TmpInst.addOperand(Inst.getOperand(4));
7584      Inst = TmpInst;
7585      return true;
7586    }
7587    return false;
7588  }
7589  }
7590  return false;
7591}
7592
7593unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7594  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7595  // suffix depending on whether they're in an IT block or not.
7596  unsigned Opc = Inst.getOpcode();
7597  const MCInstrDesc &MCID = MII.get(Opc);
7598  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7599    assert(MCID.hasOptionalDef() &&
7600           "optionally flag setting instruction missing optional def operand");
7601    assert(MCID.NumOperands == Inst.getNumOperands() &&
7602           "operand count mismatch!");
7603    // Find the optional-def operand (cc_out).
7604    unsigned OpNo;
7605    for (OpNo = 0;
7606         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7607         ++OpNo)
7608      ;
7609    // If we're parsing Thumb1, reject it completely.
7610    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7611      return Match_MnemonicFail;
7612    // If we're parsing Thumb2, which form is legal depends on whether we're
7613    // in an IT block.
7614    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7615        !inITBlock())
7616      return Match_RequiresITBlock;
7617    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7618        inITBlock())
7619      return Match_RequiresNotITBlock;
7620  }
7621  // Some high-register supporting Thumb1 encodings only allow both registers
7622  // to be from r0-r7 when in Thumb2.
7623  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7624           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7625           isARMLowRegister(Inst.getOperand(2).getReg()))
7626    return Match_RequiresThumb2;
7627  // Others only require ARMv6 or later.
7628  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7629           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7630           isARMLowRegister(Inst.getOperand(1).getReg()))
7631    return Match_RequiresV6;
7632  return Match_Success;
7633}
7634
7635static const char *getSubtargetFeatureName(unsigned Val);
7636bool ARMAsmParser::
7637MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
7638                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7639                        MCStreamer &Out, unsigned &ErrorInfo,
7640                        bool MatchingInlineAsm) {
7641  MCInst Inst;
7642  unsigned MatchResult;
7643
7644  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
7645                                     MatchingInlineAsm);
7646  switch (MatchResult) {
7647  default: break;
7648  case Match_Success:
7649    // Context sensitive operand constraints aren't handled by the matcher,
7650    // so check them here.
7651    if (validateInstruction(Inst, Operands)) {
7652      // Still progress the IT block, otherwise one wrong condition causes
7653      // nasty cascading errors.
7654      forwardITPosition();
7655      return true;
7656    }
7657
7658    { // processInstruction() updates inITBlock state, we need to save it away
7659      bool wasInITBlock = inITBlock();
7660
7661      // Some instructions need post-processing to, for example, tweak which
7662      // encoding is selected. Loop on it while changes happen so the
7663      // individual transformations can chain off each other. E.g.,
7664      // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7665      while (processInstruction(Inst, Operands))
7666        ;
7667
7668      // Only after the instruction is fully processed, we can validate it
7669      if (wasInITBlock && hasV8Ops() && isThumb() &&
7670          !isV8EligibleForIT(&Inst, 2)) {
7671        Warning(IDLoc, "deprecated instruction in IT block");
7672      }
7673    }
7674
7675    // Only move forward at the very end so that everything in validate
7676    // and process gets a consistent answer about whether we're in an IT
7677    // block.
7678    forwardITPosition();
7679
7680    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7681    // doesn't actually encode.
7682    if (Inst.getOpcode() == ARM::ITasm)
7683      return false;
7684
7685    Inst.setLoc(IDLoc);
7686    Out.EmitInstruction(Inst);
7687    return false;
7688  case Match_MissingFeature: {
7689    assert(ErrorInfo && "Unknown missing feature!");
7690    // Special case the error message for the very common case where only
7691    // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
7692    std::string Msg = "instruction requires:";
7693    unsigned Mask = 1;
7694    for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
7695      if (ErrorInfo & Mask) {
7696        Msg += " ";
7697        Msg += getSubtargetFeatureName(ErrorInfo & Mask);
7698      }
7699      Mask <<= 1;
7700    }
7701    return Error(IDLoc, Msg);
7702  }
7703  case Match_InvalidOperand: {
7704    SMLoc ErrorLoc = IDLoc;
7705    if (ErrorInfo != ~0U) {
7706      if (ErrorInfo >= Operands.size())
7707        return Error(IDLoc, "too few operands for instruction");
7708
7709      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7710      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7711    }
7712
7713    return Error(ErrorLoc, "invalid operand for instruction");
7714  }
7715  case Match_MnemonicFail:
7716    return Error(IDLoc, "invalid instruction",
7717                 ((ARMOperand*)Operands[0])->getLocRange());
7718  case Match_RequiresNotITBlock:
7719    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7720  case Match_RequiresITBlock:
7721    return Error(IDLoc, "instruction only valid inside IT block");
7722  case Match_RequiresV6:
7723    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7724  case Match_RequiresThumb2:
7725    return Error(IDLoc, "instruction variant requires Thumb2");
7726  case Match_ImmRange0_15: {
7727    SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7728    if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7729    return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
7730  }
7731  case Match_ImmRange0_239: {
7732    SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7733    if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7734    return Error(ErrorLoc, "immediate operand must be in the range [0,239]");
7735  }
7736  }
7737
7738  llvm_unreachable("Implement any new match types added!");
7739}
7740
7741/// parseDirective parses the arm specific directives
7742bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7743  StringRef IDVal = DirectiveID.getIdentifier();
7744  if (IDVal == ".word")
7745    return parseDirectiveWord(4, DirectiveID.getLoc());
7746  else if (IDVal == ".thumb")
7747    return parseDirectiveThumb(DirectiveID.getLoc());
7748  else if (IDVal == ".arm")
7749    return parseDirectiveARM(DirectiveID.getLoc());
7750  else if (IDVal == ".thumb_func")
7751    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7752  else if (IDVal == ".code")
7753    return parseDirectiveCode(DirectiveID.getLoc());
7754  else if (IDVal == ".syntax")
7755    return parseDirectiveSyntax(DirectiveID.getLoc());
7756  else if (IDVal == ".unreq")
7757    return parseDirectiveUnreq(DirectiveID.getLoc());
7758  else if (IDVal == ".arch")
7759    return parseDirectiveArch(DirectiveID.getLoc());
7760  else if (IDVal == ".eabi_attribute")
7761    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7762  else if (IDVal == ".fnstart")
7763    return parseDirectiveFnStart(DirectiveID.getLoc());
7764  else if (IDVal == ".fnend")
7765    return parseDirectiveFnEnd(DirectiveID.getLoc());
7766  else if (IDVal == ".cantunwind")
7767    return parseDirectiveCantUnwind(DirectiveID.getLoc());
7768  else if (IDVal == ".personality")
7769    return parseDirectivePersonality(DirectiveID.getLoc());
7770  else if (IDVal == ".handlerdata")
7771    return parseDirectiveHandlerData(DirectiveID.getLoc());
7772  else if (IDVal == ".setfp")
7773    return parseDirectiveSetFP(DirectiveID.getLoc());
7774  else if (IDVal == ".pad")
7775    return parseDirectivePad(DirectiveID.getLoc());
7776  else if (IDVal == ".save")
7777    return parseDirectiveRegSave(DirectiveID.getLoc(), false);
7778  else if (IDVal == ".vsave")
7779    return parseDirectiveRegSave(DirectiveID.getLoc(), true);
7780  return true;
7781}
7782
7783/// parseDirectiveWord
7784///  ::= .word [ expression (, expression)* ]
7785bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7786  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7787    for (;;) {
7788      const MCExpr *Value;
7789      if (getParser().parseExpression(Value))
7790        return true;
7791
7792      getParser().getStreamer().EmitValue(Value, Size);
7793
7794      if (getLexer().is(AsmToken::EndOfStatement))
7795        break;
7796
7797      // FIXME: Improve diagnostic.
7798      if (getLexer().isNot(AsmToken::Comma))
7799        return Error(L, "unexpected token in directive");
7800      Parser.Lex();
7801    }
7802  }
7803
7804  Parser.Lex();
7805  return false;
7806}
7807
7808/// parseDirectiveThumb
7809///  ::= .thumb
7810bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7811  if (getLexer().isNot(AsmToken::EndOfStatement))
7812    return Error(L, "unexpected token in directive");
7813  Parser.Lex();
7814
7815  if (!hasThumb())
7816    return Error(L, "target does not support Thumb mode");
7817
7818  if (!isThumb())
7819    SwitchMode();
7820  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7821  return false;
7822}
7823
7824/// parseDirectiveARM
7825///  ::= .arm
7826bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7827  if (getLexer().isNot(AsmToken::EndOfStatement))
7828    return Error(L, "unexpected token in directive");
7829  Parser.Lex();
7830
7831  if (!hasARM())
7832    return Error(L, "target does not support ARM mode");
7833
7834  if (isThumb())
7835    SwitchMode();
7836  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7837  return false;
7838}
7839
7840/// parseDirectiveThumbFunc
7841///  ::= .thumbfunc symbol_name
7842bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7843  const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo();
7844  bool isMachO = MAI->hasSubsectionsViaSymbols();
7845  StringRef Name;
7846  bool needFuncName = true;
7847
7848  // Darwin asm has (optionally) function name after .thumb_func direction
7849  // ELF doesn't
7850  if (isMachO) {
7851    const AsmToken &Tok = Parser.getTok();
7852    if (Tok.isNot(AsmToken::EndOfStatement)) {
7853      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7854        return Error(L, "unexpected token in .thumb_func directive");
7855      Name = Tok.getIdentifier();
7856      Parser.Lex(); // Consume the identifier token.
7857      needFuncName = false;
7858    }
7859  }
7860
7861  if (getLexer().isNot(AsmToken::EndOfStatement))
7862    return Error(L, "unexpected token in directive");
7863
7864  // Eat the end of statement and any blank lines that follow.
7865  while (getLexer().is(AsmToken::EndOfStatement))
7866    Parser.Lex();
7867
7868  // FIXME: assuming function name will be the line following .thumb_func
7869  // We really should be checking the next symbol definition even if there's
7870  // stuff in between.
7871  if (needFuncName) {
7872    Name = Parser.getTok().getIdentifier();
7873  }
7874
7875  // Mark symbol as a thumb symbol.
7876  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7877  getParser().getStreamer().EmitThumbFunc(Func);
7878  return false;
7879}
7880
7881/// parseDirectiveSyntax
7882///  ::= .syntax unified | divided
7883bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7884  const AsmToken &Tok = Parser.getTok();
7885  if (Tok.isNot(AsmToken::Identifier))
7886    return Error(L, "unexpected token in .syntax directive");
7887  StringRef Mode = Tok.getString();
7888  if (Mode == "unified" || Mode == "UNIFIED")
7889    Parser.Lex();
7890  else if (Mode == "divided" || Mode == "DIVIDED")
7891    return Error(L, "'.syntax divided' arm asssembly not supported");
7892  else
7893    return Error(L, "unrecognized syntax mode in .syntax directive");
7894
7895  if (getLexer().isNot(AsmToken::EndOfStatement))
7896    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7897  Parser.Lex();
7898
7899  // TODO tell the MC streamer the mode
7900  // getParser().getStreamer().Emit???();
7901  return false;
7902}
7903
7904/// parseDirectiveCode
7905///  ::= .code 16 | 32
7906bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7907  const AsmToken &Tok = Parser.getTok();
7908  if (Tok.isNot(AsmToken::Integer))
7909    return Error(L, "unexpected token in .code directive");
7910  int64_t Val = Parser.getTok().getIntVal();
7911  if (Val == 16)
7912    Parser.Lex();
7913  else if (Val == 32)
7914    Parser.Lex();
7915  else
7916    return Error(L, "invalid operand to .code directive");
7917
7918  if (getLexer().isNot(AsmToken::EndOfStatement))
7919    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7920  Parser.Lex();
7921
7922  if (Val == 16) {
7923    if (!hasThumb())
7924      return Error(L, "target does not support Thumb mode");
7925
7926    if (!isThumb())
7927      SwitchMode();
7928    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7929  } else {
7930    if (!hasARM())
7931      return Error(L, "target does not support ARM mode");
7932
7933    if (isThumb())
7934      SwitchMode();
7935    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7936  }
7937
7938  return false;
7939}
7940
7941/// parseDirectiveReq
7942///  ::= name .req registername
7943bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7944  Parser.Lex(); // Eat the '.req' token.
7945  unsigned Reg;
7946  SMLoc SRegLoc, ERegLoc;
7947  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7948    Parser.eatToEndOfStatement();
7949    return Error(SRegLoc, "register name expected");
7950  }
7951
7952  // Shouldn't be anything else.
7953  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7954    Parser.eatToEndOfStatement();
7955    return Error(Parser.getTok().getLoc(),
7956                 "unexpected input in .req directive.");
7957  }
7958
7959  Parser.Lex(); // Consume the EndOfStatement
7960
7961  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7962    return Error(SRegLoc, "redefinition of '" + Name +
7963                          "' does not match original.");
7964
7965  return false;
7966}
7967
7968/// parseDirectiveUneq
7969///  ::= .unreq registername
7970bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7971  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7972    Parser.eatToEndOfStatement();
7973    return Error(L, "unexpected input in .unreq directive.");
7974  }
7975  RegisterReqs.erase(Parser.getTok().getIdentifier());
7976  Parser.Lex(); // Eat the identifier.
7977  return false;
7978}
7979
7980/// parseDirectiveArch
7981///  ::= .arch token
7982bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7983  return true;
7984}
7985
7986/// parseDirectiveEabiAttr
7987///  ::= .eabi_attribute int, int
7988bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7989  return true;
7990}
7991
7992/// parseDirectiveFnStart
7993///  ::= .fnstart
7994bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
7995  if (FnStartLoc.isValid()) {
7996    Error(L, ".fnstart starts before the end of previous one");
7997    Error(FnStartLoc, "previous .fnstart starts here");
7998    return true;
7999  }
8000
8001  FnStartLoc = L;
8002  getTargetStreamer().emitFnStart();
8003  return false;
8004}
8005
8006/// parseDirectiveFnEnd
8007///  ::= .fnend
8008bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
8009  // Check the ordering of unwind directives
8010  if (!FnStartLoc.isValid())
8011    return Error(L, ".fnstart must precede .fnend directive");
8012
8013  // Reset the unwind directives parser state
8014  resetUnwindDirectiveParserState();
8015  getTargetStreamer().emitFnEnd();
8016  return false;
8017}
8018
8019/// parseDirectiveCantUnwind
8020///  ::= .cantunwind
8021bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
8022  // Check the ordering of unwind directives
8023  CantUnwindLoc = L;
8024  if (!FnStartLoc.isValid())
8025    return Error(L, ".fnstart must precede .cantunwind directive");
8026  if (HandlerDataLoc.isValid()) {
8027    Error(L, ".cantunwind can't be used with .handlerdata directive");
8028    Error(HandlerDataLoc, ".handlerdata was specified here");
8029    return true;
8030  }
8031  if (PersonalityLoc.isValid()) {
8032    Error(L, ".cantunwind can't be used with .personality directive");
8033    Error(PersonalityLoc, ".personality was specified here");
8034    return true;
8035  }
8036
8037  getTargetStreamer().emitCantUnwind();
8038  return false;
8039}
8040
8041/// parseDirectivePersonality
8042///  ::= .personality name
8043bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
8044  // Check the ordering of unwind directives
8045  PersonalityLoc = L;
8046  if (!FnStartLoc.isValid())
8047    return Error(L, ".fnstart must precede .personality directive");
8048  if (CantUnwindLoc.isValid()) {
8049    Error(L, ".personality can't be used with .cantunwind directive");
8050    Error(CantUnwindLoc, ".cantunwind was specified here");
8051    return true;
8052  }
8053  if (HandlerDataLoc.isValid()) {
8054    Error(L, ".personality must precede .handlerdata directive");
8055    Error(HandlerDataLoc, ".handlerdata was specified here");
8056    return true;
8057  }
8058
8059  // Parse the name of the personality routine
8060  if (Parser.getTok().isNot(AsmToken::Identifier)) {
8061    Parser.eatToEndOfStatement();
8062    return Error(L, "unexpected input in .personality directive.");
8063  }
8064  StringRef Name(Parser.getTok().getIdentifier());
8065  Parser.Lex();
8066
8067  MCSymbol *PR = getParser().getContext().GetOrCreateSymbol(Name);
8068  getTargetStreamer().emitPersonality(PR);
8069  return false;
8070}
8071
8072/// parseDirectiveHandlerData
8073///  ::= .handlerdata
8074bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
8075  // Check the ordering of unwind directives
8076  HandlerDataLoc = L;
8077  if (!FnStartLoc.isValid())
8078    return Error(L, ".fnstart must precede .personality directive");
8079  if (CantUnwindLoc.isValid()) {
8080    Error(L, ".handlerdata can't be used with .cantunwind directive");
8081    Error(CantUnwindLoc, ".cantunwind was specified here");
8082    return true;
8083  }
8084
8085  getTargetStreamer().emitHandlerData();
8086  return false;
8087}
8088
8089/// parseDirectiveSetFP
8090///  ::= .setfp fpreg, spreg [, offset]
8091bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
8092  // Check the ordering of unwind directives
8093  if (!FnStartLoc.isValid())
8094    return Error(L, ".fnstart must precede .setfp directive");
8095  if (HandlerDataLoc.isValid())
8096    return Error(L, ".setfp must precede .handlerdata directive");
8097
8098  // Parse fpreg
8099  SMLoc NewFPRegLoc = Parser.getTok().getLoc();
8100  int NewFPReg = tryParseRegister();
8101  if (NewFPReg == -1)
8102    return Error(NewFPRegLoc, "frame pointer register expected");
8103
8104  // Consume comma
8105  if (!Parser.getTok().is(AsmToken::Comma))
8106    return Error(Parser.getTok().getLoc(), "comma expected");
8107  Parser.Lex(); // skip comma
8108
8109  // Parse spreg
8110  SMLoc NewSPRegLoc = Parser.getTok().getLoc();
8111  int NewSPReg = tryParseRegister();
8112  if (NewSPReg == -1)
8113    return Error(NewSPRegLoc, "stack pointer register expected");
8114
8115  if (NewSPReg != ARM::SP && NewSPReg != FPReg)
8116    return Error(NewSPRegLoc,
8117                 "register should be either $sp or the latest fp register");
8118
8119  // Update the frame pointer register
8120  FPReg = NewFPReg;
8121
8122  // Parse offset
8123  int64_t Offset = 0;
8124  if (Parser.getTok().is(AsmToken::Comma)) {
8125    Parser.Lex(); // skip comma
8126
8127    if (Parser.getTok().isNot(AsmToken::Hash) &&
8128        Parser.getTok().isNot(AsmToken::Dollar)) {
8129      return Error(Parser.getTok().getLoc(), "'#' expected");
8130    }
8131    Parser.Lex(); // skip hash token.
8132
8133    const MCExpr *OffsetExpr;
8134    SMLoc ExLoc = Parser.getTok().getLoc();
8135    SMLoc EndLoc;
8136    if (getParser().parseExpression(OffsetExpr, EndLoc))
8137      return Error(ExLoc, "malformed setfp offset");
8138    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
8139    if (!CE)
8140      return Error(ExLoc, "setfp offset must be an immediate");
8141
8142    Offset = CE->getValue();
8143  }
8144
8145  getTargetStreamer().emitSetFP(static_cast<unsigned>(NewFPReg),
8146                                static_cast<unsigned>(NewSPReg), Offset);
8147  return false;
8148}
8149
8150/// parseDirective
8151///  ::= .pad offset
8152bool ARMAsmParser::parseDirectivePad(SMLoc L) {
8153  // Check the ordering of unwind directives
8154  if (!FnStartLoc.isValid())
8155    return Error(L, ".fnstart must precede .pad directive");
8156  if (HandlerDataLoc.isValid())
8157    return Error(L, ".pad must precede .handlerdata directive");
8158
8159  // Parse the offset
8160  if (Parser.getTok().isNot(AsmToken::Hash) &&
8161      Parser.getTok().isNot(AsmToken::Dollar)) {
8162    return Error(Parser.getTok().getLoc(), "'#' expected");
8163  }
8164  Parser.Lex(); // skip hash token.
8165
8166  const MCExpr *OffsetExpr;
8167  SMLoc ExLoc = Parser.getTok().getLoc();
8168  SMLoc EndLoc;
8169  if (getParser().parseExpression(OffsetExpr, EndLoc))
8170    return Error(ExLoc, "malformed pad offset");
8171  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
8172  if (!CE)
8173    return Error(ExLoc, "pad offset must be an immediate");
8174
8175  getTargetStreamer().emitPad(CE->getValue());
8176  return false;
8177}
8178
8179/// parseDirectiveRegSave
8180///  ::= .save  { registers }
8181///  ::= .vsave { registers }
8182bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
8183  // Check the ordering of unwind directives
8184  if (!FnStartLoc.isValid())
8185    return Error(L, ".fnstart must precede .save or .vsave directives");
8186  if (HandlerDataLoc.isValid())
8187    return Error(L, ".save or .vsave must precede .handlerdata directive");
8188
8189  // RAII object to make sure parsed operands are deleted.
8190  struct CleanupObject {
8191    SmallVector<MCParsedAsmOperand *, 1> Operands;
8192    ~CleanupObject() {
8193      for (unsigned I = 0, E = Operands.size(); I != E; ++I)
8194        delete Operands[I];
8195    }
8196  } CO;
8197
8198  // Parse the register list
8199  if (parseRegisterList(CO.Operands))
8200    return true;
8201  ARMOperand *Op = (ARMOperand*)CO.Operands[0];
8202  if (!IsVector && !Op->isRegList())
8203    return Error(L, ".save expects GPR registers");
8204  if (IsVector && !Op->isDPRRegList())
8205    return Error(L, ".vsave expects DPR registers");
8206
8207  getTargetStreamer().emitRegSave(Op->getRegList(), IsVector);
8208  return false;
8209}
8210
8211/// Force static initialization.
8212extern "C" void LLVMInitializeARMAsmParser() {
8213  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
8214  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
8215}
8216
8217#define GET_REGISTER_MATCHER
8218#define GET_SUBTARGET_FEATURE_NAME
8219#define GET_MATCHER_IMPLEMENTATION
8220#include "ARMGenAsmMatcher.inc"
8221
8222// Define this matcher function after the auto-generated include so we
8223// have the match class enum definitions.
8224unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
8225                                                  unsigned Kind) {
8226  ARMOperand *Op = static_cast<ARMOperand*>(AsmOp);
8227  // If the kind is a token for a literal immediate, check if our asm
8228  // operand matches. This is for InstAliases which have a fixed-value
8229  // immediate in the syntax.
8230  if (Kind == MCK__35_0 && Op->isImm()) {
8231    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
8232    if (!CE)
8233      return Match_InvalidOperand;
8234    if (CE->getValue() == 0)
8235      return Match_Success;
8236  }
8237  return Match_InvalidOperand;
8238}
8239