ARMAsmParser.cpp revision b8768dc32df0bf3edfa2777cdef57bb066e54344
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47  const MCRegisterInfo *MRI;
48
49  // Map of register aliases registers via the .req directive.
50  StringMap<unsigned> RegisterReqs;
51
52  struct {
53    ARMCC::CondCodes Cond;    // Condition for IT block.
54    unsigned Mask:4;          // Condition mask for instructions.
55                              // Starting at first 1 (from lsb).
56                              //   '1'  condition as indicated in IT.
57                              //   '0'  inverse of condition (else).
58                              // Count of instructions in IT block is
59                              // 4 - trailingzeroes(mask)
60
61    bool FirstCond;           // Explicit flag for when we're parsing the
62                              // First instruction in the IT block. It's
63                              // implied in the mask, so needs special
64                              // handling.
65
66    unsigned CurPosition;     // Current position in parsing of IT
67                              // block. In range [0,3]. Initialized
68                              // according to count of instructions in block.
69                              // ~0U if no active IT block.
70  } ITState;
71  bool inITBlock() { return ITState.CurPosition != ~0U;}
72  void forwardITPosition() {
73    if (!inITBlock()) return;
74    // Move to the next instruction in the IT block, if there is one. If not,
75    // mark the block as done.
76    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
77    if (++ITState.CurPosition == 5 - TZ)
78      ITState.CurPosition = ~0U; // Done with the IT block after this.
79  }
80
81
82  MCAsmParser &getParser() const { return Parser; }
83  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
84
85  bool Warning(SMLoc L, const Twine &Msg,
86               ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
87    return Parser.Warning(L, Msg, Ranges);
88  }
89  bool Error(SMLoc L, const Twine &Msg,
90             ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
91    return Parser.Error(L, Msg, Ranges);
92  }
93
94  int tryParseRegister();
95  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
96  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
97  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
98  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
99  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
100  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
101  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
102                              unsigned &ShiftAmount);
103  bool parseDirectiveWord(unsigned Size, SMLoc L);
104  bool parseDirectiveThumb(SMLoc L);
105  bool parseDirectiveARM(SMLoc L);
106  bool parseDirectiveThumbFunc(SMLoc L);
107  bool parseDirectiveCode(SMLoc L);
108  bool parseDirectiveSyntax(SMLoc L);
109  bool parseDirectiveReq(StringRef Name, SMLoc L);
110  bool parseDirectiveUnreq(SMLoc L);
111  bool parseDirectiveArch(SMLoc L);
112  bool parseDirectiveEabiAttr(SMLoc L);
113
114  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
115                          bool &CarrySetting, unsigned &ProcessorIMod,
116                          StringRef &ITMask);
117  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
118                             bool &CanAcceptPredicationCode);
119
120  bool isThumb() const {
121    // FIXME: Can tablegen auto-generate this?
122    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
123  }
124  bool isThumbOne() const {
125    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
126  }
127  bool isThumbTwo() const {
128    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
129  }
130  bool hasV6Ops() const {
131    return STI.getFeatureBits() & ARM::HasV6Ops;
132  }
133  bool hasV7Ops() const {
134    return STI.getFeatureBits() & ARM::HasV7Ops;
135  }
136  void SwitchMode() {
137    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
138    setAvailableFeatures(FB);
139  }
140  bool isMClass() const {
141    return STI.getFeatureBits() & ARM::FeatureMClass;
142  }
143
144  /// @name Auto-generated Match Functions
145  /// {
146
147#define GET_ASSEMBLER_HEADER
148#include "ARMGenAsmMatcher.inc"
149
150  /// }
151
152  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
153  OperandMatchResultTy parseCoprocNumOperand(
154    SmallVectorImpl<MCParsedAsmOperand*>&);
155  OperandMatchResultTy parseCoprocRegOperand(
156    SmallVectorImpl<MCParsedAsmOperand*>&);
157  OperandMatchResultTy parseCoprocOptionOperand(
158    SmallVectorImpl<MCParsedAsmOperand*>&);
159  OperandMatchResultTy parseMemBarrierOptOperand(
160    SmallVectorImpl<MCParsedAsmOperand*>&);
161  OperandMatchResultTy parseProcIFlagsOperand(
162    SmallVectorImpl<MCParsedAsmOperand*>&);
163  OperandMatchResultTy parseMSRMaskOperand(
164    SmallVectorImpl<MCParsedAsmOperand*>&);
165  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
166                                   StringRef Op, int Low, int High);
167  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
168    return parsePKHImm(O, "lsl", 0, 31);
169  }
170  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
171    return parsePKHImm(O, "asr", 1, 32);
172  }
173  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
175  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
176  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
177  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
178  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
179  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
180  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
181  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
182
183  // Asm Match Converter Methods
184  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
185                    const SmallVectorImpl<MCParsedAsmOperand*> &);
186  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
187                    const SmallVectorImpl<MCParsedAsmOperand*> &);
188  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
189                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
190  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
191                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
192  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
193                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
194  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
195                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
196  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
197                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
198  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
199                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
200  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
201                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
202  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
203                             const SmallVectorImpl<MCParsedAsmOperand*> &);
204  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
205                             const SmallVectorImpl<MCParsedAsmOperand*> &);
206  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
207                             const SmallVectorImpl<MCParsedAsmOperand*> &);
208  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
209                             const SmallVectorImpl<MCParsedAsmOperand*> &);
210  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
211                  const SmallVectorImpl<MCParsedAsmOperand*> &);
212  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
213                  const SmallVectorImpl<MCParsedAsmOperand*> &);
214  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
215                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
216  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
217                        const SmallVectorImpl<MCParsedAsmOperand*> &);
218  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
219                     const SmallVectorImpl<MCParsedAsmOperand*> &);
220  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
221                        const SmallVectorImpl<MCParsedAsmOperand*> &);
222  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
223                     const SmallVectorImpl<MCParsedAsmOperand*> &);
224  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
225                        const SmallVectorImpl<MCParsedAsmOperand*> &);
226
227  bool validateInstruction(MCInst &Inst,
228                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
229  bool processInstruction(MCInst &Inst,
230                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
231  bool shouldOmitCCOutOperand(StringRef Mnemonic,
232                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
233
234public:
235  enum ARMMatchResultTy {
236    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
237    Match_RequiresNotITBlock,
238    Match_RequiresV6,
239    Match_RequiresThumb2
240  };
241
242  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
243    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
244    MCAsmParserExtension::Initialize(_Parser);
245
246    // Cache the MCRegisterInfo.
247    MRI = &getContext().getRegisterInfo();
248
249    // Initialize the set of available features.
250    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
251
252    // Not in an ITBlock to start with.
253    ITState.CurPosition = ~0U;
254  }
255
256  // Implementation of the MCTargetAsmParser interface:
257  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
258  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
259                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
260  bool ParseDirective(AsmToken DirectiveID);
261
262  unsigned checkTargetMatchPredicate(MCInst &Inst);
263
264  bool MatchAndEmitInstruction(SMLoc IDLoc,
265                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
266                               MCStreamer &Out);
267};
268} // end anonymous namespace
269
270namespace {
271
272/// ARMOperand - Instances of this class represent a parsed ARM machine
273/// instruction.
274class ARMOperand : public MCParsedAsmOperand {
275  enum KindTy {
276    k_CondCode,
277    k_CCOut,
278    k_ITCondMask,
279    k_CoprocNum,
280    k_CoprocReg,
281    k_CoprocOption,
282    k_Immediate,
283    k_MemBarrierOpt,
284    k_Memory,
285    k_PostIndexRegister,
286    k_MSRMask,
287    k_ProcIFlags,
288    k_VectorIndex,
289    k_Register,
290    k_RegisterList,
291    k_DPRRegisterList,
292    k_SPRRegisterList,
293    k_VectorList,
294    k_VectorListAllLanes,
295    k_VectorListIndexed,
296    k_ShiftedRegister,
297    k_ShiftedImmediate,
298    k_ShifterImmediate,
299    k_RotateImmediate,
300    k_BitfieldDescriptor,
301    k_Token
302  } Kind;
303
304  SMLoc StartLoc, EndLoc;
305  SmallVector<unsigned, 8> Registers;
306
307  union {
308    struct {
309      ARMCC::CondCodes Val;
310    } CC;
311
312    struct {
313      unsigned Val;
314    } Cop;
315
316    struct {
317      unsigned Val;
318    } CoprocOption;
319
320    struct {
321      unsigned Mask:4;
322    } ITMask;
323
324    struct {
325      ARM_MB::MemBOpt Val;
326    } MBOpt;
327
328    struct {
329      ARM_PROC::IFlags Val;
330    } IFlags;
331
332    struct {
333      unsigned Val;
334    } MMask;
335
336    struct {
337      const char *Data;
338      unsigned Length;
339    } Tok;
340
341    struct {
342      unsigned RegNum;
343    } Reg;
344
345    // A vector register list is a sequential list of 1 to 4 registers.
346    struct {
347      unsigned RegNum;
348      unsigned Count;
349      unsigned LaneIndex;
350      bool isDoubleSpaced;
351    } VectorList;
352
353    struct {
354      unsigned Val;
355    } VectorIndex;
356
357    struct {
358      const MCExpr *Val;
359    } Imm;
360
361    /// Combined record for all forms of ARM address expressions.
362    struct {
363      unsigned BaseRegNum;
364      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
365      // was specified.
366      const MCConstantExpr *OffsetImm;  // Offset immediate value
367      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
368      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
369      unsigned ShiftImm;        // shift for OffsetReg.
370      unsigned Alignment;       // 0 = no alignment specified
371                                // n = alignment in bytes (2, 4, 8, 16, or 32)
372      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
373    } Memory;
374
375    struct {
376      unsigned RegNum;
377      bool isAdd;
378      ARM_AM::ShiftOpc ShiftTy;
379      unsigned ShiftImm;
380    } PostIdxReg;
381
382    struct {
383      bool isASR;
384      unsigned Imm;
385    } ShifterImm;
386    struct {
387      ARM_AM::ShiftOpc ShiftTy;
388      unsigned SrcReg;
389      unsigned ShiftReg;
390      unsigned ShiftImm;
391    } RegShiftedReg;
392    struct {
393      ARM_AM::ShiftOpc ShiftTy;
394      unsigned SrcReg;
395      unsigned ShiftImm;
396    } RegShiftedImm;
397    struct {
398      unsigned Imm;
399    } RotImm;
400    struct {
401      unsigned LSB;
402      unsigned Width;
403    } Bitfield;
404  };
405
406  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
407public:
408  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
409    Kind = o.Kind;
410    StartLoc = o.StartLoc;
411    EndLoc = o.EndLoc;
412    switch (Kind) {
413    case k_CondCode:
414      CC = o.CC;
415      break;
416    case k_ITCondMask:
417      ITMask = o.ITMask;
418      break;
419    case k_Token:
420      Tok = o.Tok;
421      break;
422    case k_CCOut:
423    case k_Register:
424      Reg = o.Reg;
425      break;
426    case k_RegisterList:
427    case k_DPRRegisterList:
428    case k_SPRRegisterList:
429      Registers = o.Registers;
430      break;
431    case k_VectorList:
432    case k_VectorListAllLanes:
433    case k_VectorListIndexed:
434      VectorList = o.VectorList;
435      break;
436    case k_CoprocNum:
437    case k_CoprocReg:
438      Cop = o.Cop;
439      break;
440    case k_CoprocOption:
441      CoprocOption = o.CoprocOption;
442      break;
443    case k_Immediate:
444      Imm = o.Imm;
445      break;
446    case k_MemBarrierOpt:
447      MBOpt = o.MBOpt;
448      break;
449    case k_Memory:
450      Memory = o.Memory;
451      break;
452    case k_PostIndexRegister:
453      PostIdxReg = o.PostIdxReg;
454      break;
455    case k_MSRMask:
456      MMask = o.MMask;
457      break;
458    case k_ProcIFlags:
459      IFlags = o.IFlags;
460      break;
461    case k_ShifterImmediate:
462      ShifterImm = o.ShifterImm;
463      break;
464    case k_ShiftedRegister:
465      RegShiftedReg = o.RegShiftedReg;
466      break;
467    case k_ShiftedImmediate:
468      RegShiftedImm = o.RegShiftedImm;
469      break;
470    case k_RotateImmediate:
471      RotImm = o.RotImm;
472      break;
473    case k_BitfieldDescriptor:
474      Bitfield = o.Bitfield;
475      break;
476    case k_VectorIndex:
477      VectorIndex = o.VectorIndex;
478      break;
479    }
480  }
481
482  /// getStartLoc - Get the location of the first token of this operand.
483  SMLoc getStartLoc() const { return StartLoc; }
484  /// getEndLoc - Get the location of the last token of this operand.
485  SMLoc getEndLoc() const { return EndLoc; }
486
487  SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
488
489  ARMCC::CondCodes getCondCode() const {
490    assert(Kind == k_CondCode && "Invalid access!");
491    return CC.Val;
492  }
493
494  unsigned getCoproc() const {
495    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
496    return Cop.Val;
497  }
498
499  StringRef getToken() const {
500    assert(Kind == k_Token && "Invalid access!");
501    return StringRef(Tok.Data, Tok.Length);
502  }
503
504  unsigned getReg() const {
505    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
506    return Reg.RegNum;
507  }
508
509  const SmallVectorImpl<unsigned> &getRegList() const {
510    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
511            Kind == k_SPRRegisterList) && "Invalid access!");
512    return Registers;
513  }
514
515  const MCExpr *getImm() const {
516    assert(isImm() && "Invalid access!");
517    return Imm.Val;
518  }
519
520  unsigned getVectorIndex() const {
521    assert(Kind == k_VectorIndex && "Invalid access!");
522    return VectorIndex.Val;
523  }
524
525  ARM_MB::MemBOpt getMemBarrierOpt() const {
526    assert(Kind == k_MemBarrierOpt && "Invalid access!");
527    return MBOpt.Val;
528  }
529
530  ARM_PROC::IFlags getProcIFlags() const {
531    assert(Kind == k_ProcIFlags && "Invalid access!");
532    return IFlags.Val;
533  }
534
535  unsigned getMSRMask() const {
536    assert(Kind == k_MSRMask && "Invalid access!");
537    return MMask.Val;
538  }
539
540  bool isCoprocNum() const { return Kind == k_CoprocNum; }
541  bool isCoprocReg() const { return Kind == k_CoprocReg; }
542  bool isCoprocOption() const { return Kind == k_CoprocOption; }
543  bool isCondCode() const { return Kind == k_CondCode; }
544  bool isCCOut() const { return Kind == k_CCOut; }
545  bool isITMask() const { return Kind == k_ITCondMask; }
546  bool isITCondCode() const { return Kind == k_CondCode; }
547  bool isImm() const { return Kind == k_Immediate; }
548  bool isFPImm() const {
549    if (!isImm()) return false;
550    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
551    if (!CE) return false;
552    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
553    return Val != -1;
554  }
555  bool isFBits16() const {
556    if (!isImm()) return false;
557    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
558    if (!CE) return false;
559    int64_t Value = CE->getValue();
560    return Value >= 0 && Value <= 16;
561  }
562  bool isFBits32() const {
563    if (!isImm()) return false;
564    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
565    if (!CE) return false;
566    int64_t Value = CE->getValue();
567    return Value >= 1 && Value <= 32;
568  }
569  bool isImm8s4() const {
570    if (!isImm()) return false;
571    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
572    if (!CE) return false;
573    int64_t Value = CE->getValue();
574    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
575  }
576  bool isImm0_1020s4() const {
577    if (!isImm()) return false;
578    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
579    if (!CE) return false;
580    int64_t Value = CE->getValue();
581    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
582  }
583  bool isImm0_508s4() const {
584    if (!isImm()) return false;
585    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
586    if (!CE) return false;
587    int64_t Value = CE->getValue();
588    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
589  }
590  bool isImm0_508s4Neg() const {
591    if (!isImm()) return false;
592    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
593    if (!CE) return false;
594    int64_t Value = -CE->getValue();
595    // explicitly exclude zero. we want that to use the normal 0_508 version.
596    return ((Value & 3) == 0) && Value > 0 && Value <= 508;
597  }
598  bool isImm0_255() const {
599    if (!isImm()) return false;
600    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
601    if (!CE) return false;
602    int64_t Value = CE->getValue();
603    return Value >= 0 && Value < 256;
604  }
605  bool isImm0_4095() const {
606    if (!isImm()) return false;
607    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
608    if (!CE) return false;
609    int64_t Value = CE->getValue();
610    return Value >= 0 && Value < 4096;
611  }
612  bool isImm0_4095Neg() const {
613    if (!isImm()) return false;
614    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
615    if (!CE) return false;
616    int64_t Value = -CE->getValue();
617    return Value > 0 && Value < 4096;
618  }
619  bool isImm0_1() const {
620    if (!isImm()) return false;
621    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
622    if (!CE) return false;
623    int64_t Value = CE->getValue();
624    return Value >= 0 && Value < 2;
625  }
626  bool isImm0_3() const {
627    if (!isImm()) return false;
628    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
629    if (!CE) return false;
630    int64_t Value = CE->getValue();
631    return Value >= 0 && Value < 4;
632  }
633  bool isImm0_7() const {
634    if (!isImm()) return false;
635    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
636    if (!CE) return false;
637    int64_t Value = CE->getValue();
638    return Value >= 0 && Value < 8;
639  }
640  bool isImm0_15() const {
641    if (!isImm()) return false;
642    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
643    if (!CE) return false;
644    int64_t Value = CE->getValue();
645    return Value >= 0 && Value < 16;
646  }
647  bool isImm0_31() const {
648    if (!isImm()) return false;
649    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
650    if (!CE) return false;
651    int64_t Value = CE->getValue();
652    return Value >= 0 && Value < 32;
653  }
654  bool isImm0_63() const {
655    if (!isImm()) return false;
656    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
657    if (!CE) return false;
658    int64_t Value = CE->getValue();
659    return Value >= 0 && Value < 64;
660  }
661  bool isImm8() const {
662    if (!isImm()) return false;
663    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
664    if (!CE) return false;
665    int64_t Value = CE->getValue();
666    return Value == 8;
667  }
668  bool isImm16() const {
669    if (!isImm()) return false;
670    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
671    if (!CE) return false;
672    int64_t Value = CE->getValue();
673    return Value == 16;
674  }
675  bool isImm32() const {
676    if (!isImm()) return false;
677    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
678    if (!CE) return false;
679    int64_t Value = CE->getValue();
680    return Value == 32;
681  }
682  bool isShrImm8() const {
683    if (!isImm()) return false;
684    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
685    if (!CE) return false;
686    int64_t Value = CE->getValue();
687    return Value > 0 && Value <= 8;
688  }
689  bool isShrImm16() const {
690    if (!isImm()) return false;
691    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
692    if (!CE) return false;
693    int64_t Value = CE->getValue();
694    return Value > 0 && Value <= 16;
695  }
696  bool isShrImm32() const {
697    if (!isImm()) return false;
698    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
699    if (!CE) return false;
700    int64_t Value = CE->getValue();
701    return Value > 0 && Value <= 32;
702  }
703  bool isShrImm64() const {
704    if (!isImm()) return false;
705    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
706    if (!CE) return false;
707    int64_t Value = CE->getValue();
708    return Value > 0 && Value <= 64;
709  }
710  bool isImm1_7() const {
711    if (!isImm()) return false;
712    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
713    if (!CE) return false;
714    int64_t Value = CE->getValue();
715    return Value > 0 && Value < 8;
716  }
717  bool isImm1_15() const {
718    if (!isImm()) return false;
719    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
720    if (!CE) return false;
721    int64_t Value = CE->getValue();
722    return Value > 0 && Value < 16;
723  }
724  bool isImm1_31() const {
725    if (!isImm()) return false;
726    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
727    if (!CE) return false;
728    int64_t Value = CE->getValue();
729    return Value > 0 && Value < 32;
730  }
731  bool isImm1_16() const {
732    if (!isImm()) return false;
733    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
734    if (!CE) return false;
735    int64_t Value = CE->getValue();
736    return Value > 0 && Value < 17;
737  }
738  bool isImm1_32() const {
739    if (!isImm()) return false;
740    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
741    if (!CE) return false;
742    int64_t Value = CE->getValue();
743    return Value > 0 && Value < 33;
744  }
745  bool isImm0_32() const {
746    if (!isImm()) return false;
747    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
748    if (!CE) return false;
749    int64_t Value = CE->getValue();
750    return Value >= 0 && Value < 33;
751  }
752  bool isImm0_65535() const {
753    if (!isImm()) return false;
754    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
755    if (!CE) return false;
756    int64_t Value = CE->getValue();
757    return Value >= 0 && Value < 65536;
758  }
759  bool isImm0_65535Expr() const {
760    if (!isImm()) return false;
761    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
762    // If it's not a constant expression, it'll generate a fixup and be
763    // handled later.
764    if (!CE) return true;
765    int64_t Value = CE->getValue();
766    return Value >= 0 && Value < 65536;
767  }
768  bool isImm24bit() const {
769    if (!isImm()) return false;
770    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
771    if (!CE) return false;
772    int64_t Value = CE->getValue();
773    return Value >= 0 && Value <= 0xffffff;
774  }
775  bool isImmThumbSR() const {
776    if (!isImm()) return false;
777    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
778    if (!CE) return false;
779    int64_t Value = CE->getValue();
780    return Value > 0 && Value < 33;
781  }
782  bool isPKHLSLImm() const {
783    if (!isImm()) return false;
784    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
785    if (!CE) return false;
786    int64_t Value = CE->getValue();
787    return Value >= 0 && Value < 32;
788  }
789  bool isPKHASRImm() const {
790    if (!isImm()) return false;
791    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
792    if (!CE) return false;
793    int64_t Value = CE->getValue();
794    return Value > 0 && Value <= 32;
795  }
796  bool isARMSOImm() const {
797    if (!isImm()) return false;
798    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
799    if (!CE) return false;
800    int64_t Value = CE->getValue();
801    return ARM_AM::getSOImmVal(Value) != -1;
802  }
803  bool isARMSOImmNot() const {
804    if (!isImm()) return false;
805    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
806    if (!CE) return false;
807    int64_t Value = CE->getValue();
808    return ARM_AM::getSOImmVal(~Value) != -1;
809  }
810  bool isARMSOImmNeg() const {
811    if (!isImm()) return false;
812    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
813    if (!CE) return false;
814    int64_t Value = CE->getValue();
815    // Only use this when not representable as a plain so_imm.
816    return ARM_AM::getSOImmVal(Value) == -1 &&
817      ARM_AM::getSOImmVal(-Value) != -1;
818  }
819  bool isT2SOImm() const {
820    if (!isImm()) return false;
821    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
822    if (!CE) return false;
823    int64_t Value = CE->getValue();
824    return ARM_AM::getT2SOImmVal(Value) != -1;
825  }
826  bool isT2SOImmNot() const {
827    if (!isImm()) return false;
828    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
829    if (!CE) return false;
830    int64_t Value = CE->getValue();
831    return ARM_AM::getT2SOImmVal(~Value) != -1;
832  }
833  bool isT2SOImmNeg() const {
834    if (!isImm()) return false;
835    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
836    if (!CE) return false;
837    int64_t Value = CE->getValue();
838    // Only use this when not representable as a plain so_imm.
839    return ARM_AM::getT2SOImmVal(Value) == -1 &&
840      ARM_AM::getT2SOImmVal(-Value) != -1;
841  }
842  bool isSetEndImm() const {
843    if (!isImm()) return false;
844    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
845    if (!CE) return false;
846    int64_t Value = CE->getValue();
847    return Value == 1 || Value == 0;
848  }
849  bool isReg() const { return Kind == k_Register; }
850  bool isRegList() const { return Kind == k_RegisterList; }
851  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
852  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
853  bool isToken() const { return Kind == k_Token; }
854  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
855  bool isMemory() const { return Kind == k_Memory; }
856  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
857  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
858  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
859  bool isRotImm() const { return Kind == k_RotateImmediate; }
860  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
861  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
862  bool isPostIdxReg() const {
863    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
864  }
865  bool isMemNoOffset(bool alignOK = false) const {
866    if (!isMemory())
867      return false;
868    // No offset of any kind.
869    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
870     (alignOK || Memory.Alignment == 0);
871  }
872  bool isMemPCRelImm12() const {
873    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
874      return false;
875    // Base register must be PC.
876    if (Memory.BaseRegNum != ARM::PC)
877      return false;
878    // Immediate offset in range [-4095, 4095].
879    if (!Memory.OffsetImm) return true;
880    int64_t Val = Memory.OffsetImm->getValue();
881    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
882  }
883  bool isAlignedMemory() const {
884    return isMemNoOffset(true);
885  }
886  bool isAddrMode2() const {
887    if (!isMemory() || Memory.Alignment != 0) return false;
888    // Check for register offset.
889    if (Memory.OffsetRegNum) return true;
890    // Immediate offset in range [-4095, 4095].
891    if (!Memory.OffsetImm) return true;
892    int64_t Val = Memory.OffsetImm->getValue();
893    return Val > -4096 && Val < 4096;
894  }
895  bool isAM2OffsetImm() const {
896    if (!isImm()) return false;
897    // Immediate offset in range [-4095, 4095].
898    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
899    if (!CE) return false;
900    int64_t Val = CE->getValue();
901    return Val > -4096 && Val < 4096;
902  }
903  bool isAddrMode3() const {
904    // If we have an immediate that's not a constant, treat it as a label
905    // reference needing a fixup. If it is a constant, it's something else
906    // and we reject it.
907    if (isImm() && !isa<MCConstantExpr>(getImm()))
908      return true;
909    if (!isMemory() || Memory.Alignment != 0) return false;
910    // No shifts are legal for AM3.
911    if (Memory.ShiftType != ARM_AM::no_shift) return false;
912    // Check for register offset.
913    if (Memory.OffsetRegNum) return true;
914    // Immediate offset in range [-255, 255].
915    if (!Memory.OffsetImm) return true;
916    int64_t Val = Memory.OffsetImm->getValue();
917    return Val > -256 && Val < 256;
918  }
919  bool isAM3Offset() const {
920    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
921      return false;
922    if (Kind == k_PostIndexRegister)
923      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
924    // Immediate offset in range [-255, 255].
925    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
926    if (!CE) return false;
927    int64_t Val = CE->getValue();
928    // Special case, #-0 is INT32_MIN.
929    return (Val > -256 && Val < 256) || Val == INT32_MIN;
930  }
931  bool isAddrMode5() const {
932    // If we have an immediate that's not a constant, treat it as a label
933    // reference needing a fixup. If it is a constant, it's something else
934    // and we reject it.
935    if (isImm() && !isa<MCConstantExpr>(getImm()))
936      return true;
937    if (!isMemory() || Memory.Alignment != 0) return false;
938    // Check for register offset.
939    if (Memory.OffsetRegNum) return false;
940    // Immediate offset in range [-1020, 1020] and a multiple of 4.
941    if (!Memory.OffsetImm) return true;
942    int64_t Val = Memory.OffsetImm->getValue();
943    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
944      Val == INT32_MIN;
945  }
946  bool isMemTBB() const {
947    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
948        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
949      return false;
950    return true;
951  }
952  bool isMemTBH() const {
953    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
954        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
955        Memory.Alignment != 0 )
956      return false;
957    return true;
958  }
959  bool isMemRegOffset() const {
960    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
961      return false;
962    return true;
963  }
964  bool isT2MemRegOffset() const {
965    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
966        Memory.Alignment != 0)
967      return false;
968    // Only lsl #{0, 1, 2, 3} allowed.
969    if (Memory.ShiftType == ARM_AM::no_shift)
970      return true;
971    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
972      return false;
973    return true;
974  }
975  bool isMemThumbRR() const {
976    // Thumb reg+reg addressing is simple. Just two registers, a base and
977    // an offset. No shifts, negations or any other complicating factors.
978    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
979        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
980      return false;
981    return isARMLowRegister(Memory.BaseRegNum) &&
982      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
983  }
984  bool isMemThumbRIs4() const {
985    if (!isMemory() || Memory.OffsetRegNum != 0 ||
986        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
987      return false;
988    // Immediate offset, multiple of 4 in range [0, 124].
989    if (!Memory.OffsetImm) return true;
990    int64_t Val = Memory.OffsetImm->getValue();
991    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
992  }
993  bool isMemThumbRIs2() const {
994    if (!isMemory() || Memory.OffsetRegNum != 0 ||
995        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
996      return false;
997    // Immediate offset, multiple of 4 in range [0, 62].
998    if (!Memory.OffsetImm) return true;
999    int64_t Val = Memory.OffsetImm->getValue();
1000    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1001  }
1002  bool isMemThumbRIs1() const {
1003    if (!isMemory() || Memory.OffsetRegNum != 0 ||
1004        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1005      return false;
1006    // Immediate offset in range [0, 31].
1007    if (!Memory.OffsetImm) return true;
1008    int64_t Val = Memory.OffsetImm->getValue();
1009    return Val >= 0 && Val <= 31;
1010  }
1011  bool isMemThumbSPI() const {
1012    if (!isMemory() || Memory.OffsetRegNum != 0 ||
1013        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1014      return false;
1015    // Immediate offset, multiple of 4 in range [0, 1020].
1016    if (!Memory.OffsetImm) return true;
1017    int64_t Val = Memory.OffsetImm->getValue();
1018    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1019  }
1020  bool isMemImm8s4Offset() const {
1021    // If we have an immediate that's not a constant, treat it as a label
1022    // reference needing a fixup. If it is a constant, it's something else
1023    // and we reject it.
1024    if (isImm() && !isa<MCConstantExpr>(getImm()))
1025      return true;
1026    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1027      return false;
1028    // Immediate offset a multiple of 4 in range [-1020, 1020].
1029    if (!Memory.OffsetImm) return true;
1030    int64_t Val = Memory.OffsetImm->getValue();
1031    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1032  }
1033  bool isMemImm0_1020s4Offset() const {
1034    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1035      return false;
1036    // Immediate offset a multiple of 4 in range [0, 1020].
1037    if (!Memory.OffsetImm) return true;
1038    int64_t Val = Memory.OffsetImm->getValue();
1039    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1040  }
1041  bool isMemImm8Offset() const {
1042    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1043      return false;
1044    // Base reg of PC isn't allowed for these encodings.
1045    if (Memory.BaseRegNum == ARM::PC) return false;
1046    // Immediate offset in range [-255, 255].
1047    if (!Memory.OffsetImm) return true;
1048    int64_t Val = Memory.OffsetImm->getValue();
1049    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1050  }
1051  bool isMemPosImm8Offset() const {
1052    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1053      return false;
1054    // Immediate offset in range [0, 255].
1055    if (!Memory.OffsetImm) return true;
1056    int64_t Val = Memory.OffsetImm->getValue();
1057    return Val >= 0 && Val < 256;
1058  }
1059  bool isMemNegImm8Offset() const {
1060    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1061      return false;
1062    // Base reg of PC isn't allowed for these encodings.
1063    if (Memory.BaseRegNum == ARM::PC) return false;
1064    // Immediate offset in range [-255, -1].
1065    if (!Memory.OffsetImm) return false;
1066    int64_t Val = Memory.OffsetImm->getValue();
1067    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1068  }
1069  bool isMemUImm12Offset() const {
1070    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1071      return false;
1072    // Immediate offset in range [0, 4095].
1073    if (!Memory.OffsetImm) return true;
1074    int64_t Val = Memory.OffsetImm->getValue();
1075    return (Val >= 0 && Val < 4096);
1076  }
1077  bool isMemImm12Offset() const {
1078    // If we have an immediate that's not a constant, treat it as a label
1079    // reference needing a fixup. If it is a constant, it's something else
1080    // and we reject it.
1081    if (isImm() && !isa<MCConstantExpr>(getImm()))
1082      return true;
1083
1084    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1085      return false;
1086    // Immediate offset in range [-4095, 4095].
1087    if (!Memory.OffsetImm) return true;
1088    int64_t Val = Memory.OffsetImm->getValue();
1089    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1090  }
1091  bool isPostIdxImm8() const {
1092    if (!isImm()) return false;
1093    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1094    if (!CE) return false;
1095    int64_t Val = CE->getValue();
1096    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1097  }
1098  bool isPostIdxImm8s4() const {
1099    if (!isImm()) return false;
1100    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1101    if (!CE) return false;
1102    int64_t Val = CE->getValue();
1103    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1104      (Val == INT32_MIN);
1105  }
1106
1107  bool isMSRMask() const { return Kind == k_MSRMask; }
1108  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1109
1110  // NEON operands.
1111  bool isSingleSpacedVectorList() const {
1112    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1113  }
1114  bool isDoubleSpacedVectorList() const {
1115    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1116  }
1117  bool isVecListOneD() const {
1118    if (!isSingleSpacedVectorList()) return false;
1119    return VectorList.Count == 1;
1120  }
1121
1122  bool isVecListDPair() const {
1123    if (!isSingleSpacedVectorList()) return false;
1124    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1125              .contains(VectorList.RegNum));
1126  }
1127
1128  bool isVecListThreeD() const {
1129    if (!isSingleSpacedVectorList()) return false;
1130    return VectorList.Count == 3;
1131  }
1132
1133  bool isVecListFourD() const {
1134    if (!isSingleSpacedVectorList()) return false;
1135    return VectorList.Count == 4;
1136  }
1137
1138  bool isVecListDPairSpaced() const {
1139    if (isSingleSpacedVectorList()) return false;
1140    return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1141              .contains(VectorList.RegNum));
1142  }
1143
1144  bool isVecListThreeQ() const {
1145    if (!isDoubleSpacedVectorList()) return false;
1146    return VectorList.Count == 3;
1147  }
1148
1149  bool isVecListFourQ() const {
1150    if (!isDoubleSpacedVectorList()) return false;
1151    return VectorList.Count == 4;
1152  }
1153
1154  bool isSingleSpacedVectorAllLanes() const {
1155    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1156  }
1157  bool isDoubleSpacedVectorAllLanes() const {
1158    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1159  }
1160  bool isVecListOneDAllLanes() const {
1161    if (!isSingleSpacedVectorAllLanes()) return false;
1162    return VectorList.Count == 1;
1163  }
1164
1165  bool isVecListDPairAllLanes() const {
1166    if (!isSingleSpacedVectorAllLanes()) return false;
1167    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1168              .contains(VectorList.RegNum));
1169  }
1170
1171  bool isVecListDPairSpacedAllLanes() const {
1172    if (!isDoubleSpacedVectorAllLanes()) return false;
1173    return VectorList.Count == 2;
1174  }
1175
1176  bool isVecListThreeDAllLanes() const {
1177    if (!isSingleSpacedVectorAllLanes()) return false;
1178    return VectorList.Count == 3;
1179  }
1180
1181  bool isVecListThreeQAllLanes() const {
1182    if (!isDoubleSpacedVectorAllLanes()) return false;
1183    return VectorList.Count == 3;
1184  }
1185
1186  bool isVecListFourDAllLanes() const {
1187    if (!isSingleSpacedVectorAllLanes()) return false;
1188    return VectorList.Count == 4;
1189  }
1190
1191  bool isVecListFourQAllLanes() const {
1192    if (!isDoubleSpacedVectorAllLanes()) return false;
1193    return VectorList.Count == 4;
1194  }
1195
1196  bool isSingleSpacedVectorIndexed() const {
1197    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1198  }
1199  bool isDoubleSpacedVectorIndexed() const {
1200    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1201  }
1202  bool isVecListOneDByteIndexed() const {
1203    if (!isSingleSpacedVectorIndexed()) return false;
1204    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1205  }
1206
1207  bool isVecListOneDHWordIndexed() const {
1208    if (!isSingleSpacedVectorIndexed()) return false;
1209    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1210  }
1211
1212  bool isVecListOneDWordIndexed() const {
1213    if (!isSingleSpacedVectorIndexed()) return false;
1214    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1215  }
1216
1217  bool isVecListTwoDByteIndexed() const {
1218    if (!isSingleSpacedVectorIndexed()) return false;
1219    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1220  }
1221
1222  bool isVecListTwoDHWordIndexed() const {
1223    if (!isSingleSpacedVectorIndexed()) return false;
1224    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1225  }
1226
1227  bool isVecListTwoQWordIndexed() const {
1228    if (!isDoubleSpacedVectorIndexed()) return false;
1229    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1230  }
1231
1232  bool isVecListTwoQHWordIndexed() const {
1233    if (!isDoubleSpacedVectorIndexed()) return false;
1234    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1235  }
1236
1237  bool isVecListTwoDWordIndexed() const {
1238    if (!isSingleSpacedVectorIndexed()) return false;
1239    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1240  }
1241
1242  bool isVecListThreeDByteIndexed() const {
1243    if (!isSingleSpacedVectorIndexed()) return false;
1244    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1245  }
1246
1247  bool isVecListThreeDHWordIndexed() const {
1248    if (!isSingleSpacedVectorIndexed()) return false;
1249    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1250  }
1251
1252  bool isVecListThreeQWordIndexed() const {
1253    if (!isDoubleSpacedVectorIndexed()) return false;
1254    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1255  }
1256
1257  bool isVecListThreeQHWordIndexed() const {
1258    if (!isDoubleSpacedVectorIndexed()) return false;
1259    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1260  }
1261
1262  bool isVecListThreeDWordIndexed() const {
1263    if (!isSingleSpacedVectorIndexed()) return false;
1264    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1265  }
1266
1267  bool isVecListFourDByteIndexed() const {
1268    if (!isSingleSpacedVectorIndexed()) return false;
1269    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1270  }
1271
1272  bool isVecListFourDHWordIndexed() const {
1273    if (!isSingleSpacedVectorIndexed()) return false;
1274    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1275  }
1276
1277  bool isVecListFourQWordIndexed() const {
1278    if (!isDoubleSpacedVectorIndexed()) return false;
1279    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1280  }
1281
1282  bool isVecListFourQHWordIndexed() const {
1283    if (!isDoubleSpacedVectorIndexed()) return false;
1284    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1285  }
1286
1287  bool isVecListFourDWordIndexed() const {
1288    if (!isSingleSpacedVectorIndexed()) return false;
1289    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1290  }
1291
1292  bool isVectorIndex8() const {
1293    if (Kind != k_VectorIndex) return false;
1294    return VectorIndex.Val < 8;
1295  }
1296  bool isVectorIndex16() const {
1297    if (Kind != k_VectorIndex) return false;
1298    return VectorIndex.Val < 4;
1299  }
1300  bool isVectorIndex32() const {
1301    if (Kind != k_VectorIndex) return false;
1302    return VectorIndex.Val < 2;
1303  }
1304
1305  bool isNEONi8splat() const {
1306    if (!isImm()) return false;
1307    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1308    // Must be a constant.
1309    if (!CE) return false;
1310    int64_t Value = CE->getValue();
1311    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1312    // value.
1313    return Value >= 0 && Value < 256;
1314  }
1315
1316  bool isNEONi16splat() const {
1317    if (!isImm()) return false;
1318    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1319    // Must be a constant.
1320    if (!CE) return false;
1321    int64_t Value = CE->getValue();
1322    // i16 value in the range [0,255] or [0x0100, 0xff00]
1323    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1324  }
1325
1326  bool isNEONi32splat() const {
1327    if (!isImm()) return false;
1328    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1329    // Must be a constant.
1330    if (!CE) return false;
1331    int64_t Value = CE->getValue();
1332    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1333    return (Value >= 0 && Value < 256) ||
1334      (Value >= 0x0100 && Value <= 0xff00) ||
1335      (Value >= 0x010000 && Value <= 0xff0000) ||
1336      (Value >= 0x01000000 && Value <= 0xff000000);
1337  }
1338
1339  bool isNEONi32vmov() const {
1340    if (!isImm()) return false;
1341    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1342    // Must be a constant.
1343    if (!CE) return false;
1344    int64_t Value = CE->getValue();
1345    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1346    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1347    return (Value >= 0 && Value < 256) ||
1348      (Value >= 0x0100 && Value <= 0xff00) ||
1349      (Value >= 0x010000 && Value <= 0xff0000) ||
1350      (Value >= 0x01000000 && Value <= 0xff000000) ||
1351      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1352      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1353  }
1354  bool isNEONi32vmovNeg() const {
1355    if (!isImm()) return false;
1356    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1357    // Must be a constant.
1358    if (!CE) return false;
1359    int64_t Value = ~CE->getValue();
1360    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1361    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1362    return (Value >= 0 && Value < 256) ||
1363      (Value >= 0x0100 && Value <= 0xff00) ||
1364      (Value >= 0x010000 && Value <= 0xff0000) ||
1365      (Value >= 0x01000000 && Value <= 0xff000000) ||
1366      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1367      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1368  }
1369
1370  bool isNEONi64splat() const {
1371    if (!isImm()) return false;
1372    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1373    // Must be a constant.
1374    if (!CE) return false;
1375    uint64_t Value = CE->getValue();
1376    // i64 value with each byte being either 0 or 0xff.
1377    for (unsigned i = 0; i < 8; ++i)
1378      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1379    return true;
1380  }
1381
1382  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1383    // Add as immediates when possible.  Null MCExpr = 0.
1384    if (Expr == 0)
1385      Inst.addOperand(MCOperand::CreateImm(0));
1386    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1387      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1388    else
1389      Inst.addOperand(MCOperand::CreateExpr(Expr));
1390  }
1391
1392  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1393    assert(N == 2 && "Invalid number of operands!");
1394    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1395    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1396    Inst.addOperand(MCOperand::CreateReg(RegNum));
1397  }
1398
1399  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1400    assert(N == 1 && "Invalid number of operands!");
1401    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1402  }
1403
1404  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1405    assert(N == 1 && "Invalid number of operands!");
1406    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1407  }
1408
1409  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1410    assert(N == 1 && "Invalid number of operands!");
1411    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1412  }
1413
1414  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1415    assert(N == 1 && "Invalid number of operands!");
1416    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1417  }
1418
1419  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1420    assert(N == 1 && "Invalid number of operands!");
1421    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1422  }
1423
1424  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1425    assert(N == 1 && "Invalid number of operands!");
1426    Inst.addOperand(MCOperand::CreateReg(getReg()));
1427  }
1428
1429  void addRegOperands(MCInst &Inst, unsigned N) const {
1430    assert(N == 1 && "Invalid number of operands!");
1431    Inst.addOperand(MCOperand::CreateReg(getReg()));
1432  }
1433
1434  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1435    assert(N == 3 && "Invalid number of operands!");
1436    assert(isRegShiftedReg() &&
1437           "addRegShiftedRegOperands() on non RegShiftedReg!");
1438    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1439    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1440    Inst.addOperand(MCOperand::CreateImm(
1441      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1442  }
1443
1444  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1445    assert(N == 2 && "Invalid number of operands!");
1446    assert(isRegShiftedImm() &&
1447           "addRegShiftedImmOperands() on non RegShiftedImm!");
1448    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1449    Inst.addOperand(MCOperand::CreateImm(
1450      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1451  }
1452
1453  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1454    assert(N == 1 && "Invalid number of operands!");
1455    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1456                                         ShifterImm.Imm));
1457  }
1458
1459  void addRegListOperands(MCInst &Inst, unsigned N) const {
1460    assert(N == 1 && "Invalid number of operands!");
1461    const SmallVectorImpl<unsigned> &RegList = getRegList();
1462    for (SmallVectorImpl<unsigned>::const_iterator
1463           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1464      Inst.addOperand(MCOperand::CreateReg(*I));
1465  }
1466
1467  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1468    addRegListOperands(Inst, N);
1469  }
1470
1471  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1472    addRegListOperands(Inst, N);
1473  }
1474
1475  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1476    assert(N == 1 && "Invalid number of operands!");
1477    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1478    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1479  }
1480
1481  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1482    assert(N == 1 && "Invalid number of operands!");
1483    // Munge the lsb/width into a bitfield mask.
1484    unsigned lsb = Bitfield.LSB;
1485    unsigned width = Bitfield.Width;
1486    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1487    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1488                      (32 - (lsb + width)));
1489    Inst.addOperand(MCOperand::CreateImm(Mask));
1490  }
1491
1492  void addImmOperands(MCInst &Inst, unsigned N) const {
1493    assert(N == 1 && "Invalid number of operands!");
1494    addExpr(Inst, getImm());
1495  }
1496
1497  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1498    assert(N == 1 && "Invalid number of operands!");
1499    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1500    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1501  }
1502
1503  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1504    assert(N == 1 && "Invalid number of operands!");
1505    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1506    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1507  }
1508
1509  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1510    assert(N == 1 && "Invalid number of operands!");
1511    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1512    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1513    Inst.addOperand(MCOperand::CreateImm(Val));
1514  }
1515
1516  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1517    assert(N == 1 && "Invalid number of operands!");
1518    // FIXME: We really want to scale the value here, but the LDRD/STRD
1519    // instruction don't encode operands that way yet.
1520    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1521    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1522  }
1523
1524  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1525    assert(N == 1 && "Invalid number of operands!");
1526    // The immediate is scaled by four in the encoding and is stored
1527    // in the MCInst as such. Lop off the low two bits here.
1528    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1529    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1530  }
1531
1532  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1533    assert(N == 1 && "Invalid number of operands!");
1534    // The immediate is scaled by four in the encoding and is stored
1535    // in the MCInst as such. Lop off the low two bits here.
1536    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1537    Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
1538  }
1539
1540  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1541    assert(N == 1 && "Invalid number of operands!");
1542    // The immediate is scaled by four in the encoding and is stored
1543    // in the MCInst as such. Lop off the low two bits here.
1544    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1545    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1546  }
1547
1548  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1549    assert(N == 1 && "Invalid number of operands!");
1550    // The constant encodes as the immediate-1, and we store in the instruction
1551    // the bits as encoded, so subtract off one here.
1552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1553    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1554  }
1555
1556  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1557    assert(N == 1 && "Invalid number of operands!");
1558    // The constant encodes as the immediate-1, and we store in the instruction
1559    // the bits as encoded, so subtract off one here.
1560    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1561    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1562  }
1563
1564  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1565    assert(N == 1 && "Invalid number of operands!");
1566    // The constant encodes as the immediate, except for 32, which encodes as
1567    // zero.
1568    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1569    unsigned Imm = CE->getValue();
1570    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1571  }
1572
1573  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1574    assert(N == 1 && "Invalid number of operands!");
1575    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1576    // the instruction as well.
1577    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1578    int Val = CE->getValue();
1579    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1580  }
1581
1582  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1583    assert(N == 1 && "Invalid number of operands!");
1584    // The operand is actually a t2_so_imm, but we have its bitwise
1585    // negation in the assembly source, so twiddle it here.
1586    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1587    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1588  }
1589
1590  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1591    assert(N == 1 && "Invalid number of operands!");
1592    // The operand is actually a t2_so_imm, but we have its
1593    // negation in the assembly source, so twiddle it here.
1594    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1595    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1596  }
1597
1598  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1599    assert(N == 1 && "Invalid number of operands!");
1600    // The operand is actually an imm0_4095, but we have its
1601    // negation in the assembly source, so twiddle it here.
1602    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1603    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1604  }
1605
1606  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1607    assert(N == 1 && "Invalid number of operands!");
1608    // The operand is actually a so_imm, but we have its bitwise
1609    // negation in the assembly source, so twiddle it here.
1610    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1611    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1612  }
1613
1614  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1615    assert(N == 1 && "Invalid number of operands!");
1616    // The operand is actually a so_imm, but we have its
1617    // negation in the assembly source, so twiddle it here.
1618    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1619    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1620  }
1621
1622  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1623    assert(N == 1 && "Invalid number of operands!");
1624    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1625  }
1626
1627  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1628    assert(N == 1 && "Invalid number of operands!");
1629    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1630  }
1631
1632  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1633    assert(N == 1 && "Invalid number of operands!");
1634    int32_t Imm = Memory.OffsetImm->getValue();
1635    // FIXME: Handle #-0
1636    if (Imm == INT32_MIN) Imm = 0;
1637    Inst.addOperand(MCOperand::CreateImm(Imm));
1638  }
1639
1640  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1641    assert(N == 2 && "Invalid number of operands!");
1642    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1643    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1644  }
1645
1646  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1647    assert(N == 3 && "Invalid number of operands!");
1648    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1649    if (!Memory.OffsetRegNum) {
1650      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1651      // Special case for #-0
1652      if (Val == INT32_MIN) Val = 0;
1653      if (Val < 0) Val = -Val;
1654      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1655    } else {
1656      // For register offset, we encode the shift type and negation flag
1657      // here.
1658      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1659                              Memory.ShiftImm, Memory.ShiftType);
1660    }
1661    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1662    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1663    Inst.addOperand(MCOperand::CreateImm(Val));
1664  }
1665
1666  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1667    assert(N == 2 && "Invalid number of operands!");
1668    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1669    assert(CE && "non-constant AM2OffsetImm operand!");
1670    int32_t Val = CE->getValue();
1671    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1672    // Special case for #-0
1673    if (Val == INT32_MIN) Val = 0;
1674    if (Val < 0) Val = -Val;
1675    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1676    Inst.addOperand(MCOperand::CreateReg(0));
1677    Inst.addOperand(MCOperand::CreateImm(Val));
1678  }
1679
1680  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1681    assert(N == 3 && "Invalid number of operands!");
1682    // If we have an immediate that's not a constant, treat it as a label
1683    // reference needing a fixup. If it is a constant, it's something else
1684    // and we reject it.
1685    if (isImm()) {
1686      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1687      Inst.addOperand(MCOperand::CreateReg(0));
1688      Inst.addOperand(MCOperand::CreateImm(0));
1689      return;
1690    }
1691
1692    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1693    if (!Memory.OffsetRegNum) {
1694      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1695      // Special case for #-0
1696      if (Val == INT32_MIN) Val = 0;
1697      if (Val < 0) Val = -Val;
1698      Val = ARM_AM::getAM3Opc(AddSub, Val);
1699    } else {
1700      // For register offset, we encode the shift type and negation flag
1701      // here.
1702      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1703    }
1704    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1705    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1706    Inst.addOperand(MCOperand::CreateImm(Val));
1707  }
1708
1709  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1710    assert(N == 2 && "Invalid number of operands!");
1711    if (Kind == k_PostIndexRegister) {
1712      int32_t Val =
1713        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1714      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1715      Inst.addOperand(MCOperand::CreateImm(Val));
1716      return;
1717    }
1718
1719    // Constant offset.
1720    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1721    int32_t Val = CE->getValue();
1722    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1723    // Special case for #-0
1724    if (Val == INT32_MIN) Val = 0;
1725    if (Val < 0) Val = -Val;
1726    Val = ARM_AM::getAM3Opc(AddSub, Val);
1727    Inst.addOperand(MCOperand::CreateReg(0));
1728    Inst.addOperand(MCOperand::CreateImm(Val));
1729  }
1730
1731  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1732    assert(N == 2 && "Invalid number of operands!");
1733    // If we have an immediate that's not a constant, treat it as a label
1734    // reference needing a fixup. If it is a constant, it's something else
1735    // and we reject it.
1736    if (isImm()) {
1737      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1738      Inst.addOperand(MCOperand::CreateImm(0));
1739      return;
1740    }
1741
1742    // The lower two bits are always zero and as such are not encoded.
1743    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1744    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1745    // Special case for #-0
1746    if (Val == INT32_MIN) Val = 0;
1747    if (Val < 0) Val = -Val;
1748    Val = ARM_AM::getAM5Opc(AddSub, Val);
1749    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1750    Inst.addOperand(MCOperand::CreateImm(Val));
1751  }
1752
1753  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1754    assert(N == 2 && "Invalid number of operands!");
1755    // If we have an immediate that's not a constant, treat it as a label
1756    // reference needing a fixup. If it is a constant, it's something else
1757    // and we reject it.
1758    if (isImm()) {
1759      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1760      Inst.addOperand(MCOperand::CreateImm(0));
1761      return;
1762    }
1763
1764    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1765    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1766    Inst.addOperand(MCOperand::CreateImm(Val));
1767  }
1768
1769  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1770    assert(N == 2 && "Invalid number of operands!");
1771    // The lower two bits are always zero and as such are not encoded.
1772    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1773    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1774    Inst.addOperand(MCOperand::CreateImm(Val));
1775  }
1776
1777  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1778    assert(N == 2 && "Invalid number of operands!");
1779    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1780    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1781    Inst.addOperand(MCOperand::CreateImm(Val));
1782  }
1783
1784  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1785    addMemImm8OffsetOperands(Inst, N);
1786  }
1787
1788  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1789    addMemImm8OffsetOperands(Inst, N);
1790  }
1791
1792  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1793    assert(N == 2 && "Invalid number of operands!");
1794    // If this is an immediate, it's a label reference.
1795    if (isImm()) {
1796      addExpr(Inst, getImm());
1797      Inst.addOperand(MCOperand::CreateImm(0));
1798      return;
1799    }
1800
1801    // Otherwise, it's a normal memory reg+offset.
1802    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1803    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1804    Inst.addOperand(MCOperand::CreateImm(Val));
1805  }
1806
1807  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1808    assert(N == 2 && "Invalid number of operands!");
1809    // If this is an immediate, it's a label reference.
1810    if (isImm()) {
1811      addExpr(Inst, getImm());
1812      Inst.addOperand(MCOperand::CreateImm(0));
1813      return;
1814    }
1815
1816    // Otherwise, it's a normal memory reg+offset.
1817    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1818    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1819    Inst.addOperand(MCOperand::CreateImm(Val));
1820  }
1821
1822  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1823    assert(N == 2 && "Invalid number of operands!");
1824    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1825    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1826  }
1827
1828  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1829    assert(N == 2 && "Invalid number of operands!");
1830    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1831    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1832  }
1833
1834  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1835    assert(N == 3 && "Invalid number of operands!");
1836    unsigned Val =
1837      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1838                        Memory.ShiftImm, Memory.ShiftType);
1839    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1840    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1841    Inst.addOperand(MCOperand::CreateImm(Val));
1842  }
1843
1844  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1845    assert(N == 3 && "Invalid number of operands!");
1846    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1847    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1848    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1849  }
1850
1851  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1852    assert(N == 2 && "Invalid number of operands!");
1853    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1854    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1855  }
1856
1857  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1858    assert(N == 2 && "Invalid number of operands!");
1859    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1860    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1861    Inst.addOperand(MCOperand::CreateImm(Val));
1862  }
1863
1864  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1865    assert(N == 2 && "Invalid number of operands!");
1866    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1867    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1868    Inst.addOperand(MCOperand::CreateImm(Val));
1869  }
1870
1871  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1872    assert(N == 2 && "Invalid number of operands!");
1873    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1874    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1875    Inst.addOperand(MCOperand::CreateImm(Val));
1876  }
1877
1878  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1879    assert(N == 2 && "Invalid number of operands!");
1880    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1881    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1882    Inst.addOperand(MCOperand::CreateImm(Val));
1883  }
1884
1885  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1886    assert(N == 1 && "Invalid number of operands!");
1887    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1888    assert(CE && "non-constant post-idx-imm8 operand!");
1889    int Imm = CE->getValue();
1890    bool isAdd = Imm >= 0;
1891    if (Imm == INT32_MIN) Imm = 0;
1892    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1893    Inst.addOperand(MCOperand::CreateImm(Imm));
1894  }
1895
1896  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1897    assert(N == 1 && "Invalid number of operands!");
1898    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1899    assert(CE && "non-constant post-idx-imm8s4 operand!");
1900    int Imm = CE->getValue();
1901    bool isAdd = Imm >= 0;
1902    if (Imm == INT32_MIN) Imm = 0;
1903    // Immediate is scaled by 4.
1904    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1905    Inst.addOperand(MCOperand::CreateImm(Imm));
1906  }
1907
1908  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1909    assert(N == 2 && "Invalid number of operands!");
1910    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1911    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1912  }
1913
1914  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1915    assert(N == 2 && "Invalid number of operands!");
1916    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1917    // The sign, shift type, and shift amount are encoded in a single operand
1918    // using the AM2 encoding helpers.
1919    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1920    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1921                                     PostIdxReg.ShiftTy);
1922    Inst.addOperand(MCOperand::CreateImm(Imm));
1923  }
1924
1925  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1926    assert(N == 1 && "Invalid number of operands!");
1927    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1928  }
1929
1930  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1931    assert(N == 1 && "Invalid number of operands!");
1932    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1933  }
1934
1935  void addVecListOperands(MCInst &Inst, unsigned N) const {
1936    assert(N == 1 && "Invalid number of operands!");
1937    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1938  }
1939
1940  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1941    assert(N == 2 && "Invalid number of operands!");
1942    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1943    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1944  }
1945
1946  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1947    assert(N == 1 && "Invalid number of operands!");
1948    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1949  }
1950
1951  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1952    assert(N == 1 && "Invalid number of operands!");
1953    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1954  }
1955
1956  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1957    assert(N == 1 && "Invalid number of operands!");
1958    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1959  }
1960
1961  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1962    assert(N == 1 && "Invalid number of operands!");
1963    // The immediate encodes the type of constant as well as the value.
1964    // Mask in that this is an i8 splat.
1965    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1966    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1967  }
1968
1969  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1970    assert(N == 1 && "Invalid number of operands!");
1971    // The immediate encodes the type of constant as well as the value.
1972    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1973    unsigned Value = CE->getValue();
1974    if (Value >= 256)
1975      Value = (Value >> 8) | 0xa00;
1976    else
1977      Value |= 0x800;
1978    Inst.addOperand(MCOperand::CreateImm(Value));
1979  }
1980
1981  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1982    assert(N == 1 && "Invalid number of operands!");
1983    // The immediate encodes the type of constant as well as the value.
1984    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1985    unsigned Value = CE->getValue();
1986    if (Value >= 256 && Value <= 0xff00)
1987      Value = (Value >> 8) | 0x200;
1988    else if (Value > 0xffff && Value <= 0xff0000)
1989      Value = (Value >> 16) | 0x400;
1990    else if (Value > 0xffffff)
1991      Value = (Value >> 24) | 0x600;
1992    Inst.addOperand(MCOperand::CreateImm(Value));
1993  }
1994
1995  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1996    assert(N == 1 && "Invalid number of operands!");
1997    // The immediate encodes the type of constant as well as the value.
1998    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1999    unsigned Value = CE->getValue();
2000    if (Value >= 256 && Value <= 0xffff)
2001      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2002    else if (Value > 0xffff && Value <= 0xffffff)
2003      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2004    else if (Value > 0xffffff)
2005      Value = (Value >> 24) | 0x600;
2006    Inst.addOperand(MCOperand::CreateImm(Value));
2007  }
2008
2009  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2010    assert(N == 1 && "Invalid number of operands!");
2011    // The immediate encodes the type of constant as well as the value.
2012    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2013    unsigned Value = ~CE->getValue();
2014    if (Value >= 256 && Value <= 0xffff)
2015      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2016    else if (Value > 0xffff && Value <= 0xffffff)
2017      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2018    else if (Value > 0xffffff)
2019      Value = (Value >> 24) | 0x600;
2020    Inst.addOperand(MCOperand::CreateImm(Value));
2021  }
2022
2023  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2024    assert(N == 1 && "Invalid number of operands!");
2025    // The immediate encodes the type of constant as well as the value.
2026    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2027    uint64_t Value = CE->getValue();
2028    unsigned Imm = 0;
2029    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2030      Imm |= (Value & 1) << i;
2031    }
2032    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
2033  }
2034
2035  virtual void print(raw_ostream &OS) const;
2036
2037  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
2038    ARMOperand *Op = new ARMOperand(k_ITCondMask);
2039    Op->ITMask.Mask = Mask;
2040    Op->StartLoc = S;
2041    Op->EndLoc = S;
2042    return Op;
2043  }
2044
2045  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
2046    ARMOperand *Op = new ARMOperand(k_CondCode);
2047    Op->CC.Val = CC;
2048    Op->StartLoc = S;
2049    Op->EndLoc = S;
2050    return Op;
2051  }
2052
2053  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2054    ARMOperand *Op = new ARMOperand(k_CoprocNum);
2055    Op->Cop.Val = CopVal;
2056    Op->StartLoc = S;
2057    Op->EndLoc = S;
2058    return Op;
2059  }
2060
2061  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2062    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2063    Op->Cop.Val = CopVal;
2064    Op->StartLoc = S;
2065    Op->EndLoc = S;
2066    return Op;
2067  }
2068
2069  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2070    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2071    Op->Cop.Val = Val;
2072    Op->StartLoc = S;
2073    Op->EndLoc = E;
2074    return Op;
2075  }
2076
2077  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2078    ARMOperand *Op = new ARMOperand(k_CCOut);
2079    Op->Reg.RegNum = RegNum;
2080    Op->StartLoc = S;
2081    Op->EndLoc = S;
2082    return Op;
2083  }
2084
2085  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2086    ARMOperand *Op = new ARMOperand(k_Token);
2087    Op->Tok.Data = Str.data();
2088    Op->Tok.Length = Str.size();
2089    Op->StartLoc = S;
2090    Op->EndLoc = S;
2091    return Op;
2092  }
2093
2094  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2095    ARMOperand *Op = new ARMOperand(k_Register);
2096    Op->Reg.RegNum = RegNum;
2097    Op->StartLoc = S;
2098    Op->EndLoc = E;
2099    return Op;
2100  }
2101
2102  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2103                                           unsigned SrcReg,
2104                                           unsigned ShiftReg,
2105                                           unsigned ShiftImm,
2106                                           SMLoc S, SMLoc E) {
2107    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2108    Op->RegShiftedReg.ShiftTy = ShTy;
2109    Op->RegShiftedReg.SrcReg = SrcReg;
2110    Op->RegShiftedReg.ShiftReg = ShiftReg;
2111    Op->RegShiftedReg.ShiftImm = ShiftImm;
2112    Op->StartLoc = S;
2113    Op->EndLoc = E;
2114    return Op;
2115  }
2116
2117  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2118                                            unsigned SrcReg,
2119                                            unsigned ShiftImm,
2120                                            SMLoc S, SMLoc E) {
2121    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2122    Op->RegShiftedImm.ShiftTy = ShTy;
2123    Op->RegShiftedImm.SrcReg = SrcReg;
2124    Op->RegShiftedImm.ShiftImm = ShiftImm;
2125    Op->StartLoc = S;
2126    Op->EndLoc = E;
2127    return Op;
2128  }
2129
2130  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2131                                   SMLoc S, SMLoc E) {
2132    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2133    Op->ShifterImm.isASR = isASR;
2134    Op->ShifterImm.Imm = Imm;
2135    Op->StartLoc = S;
2136    Op->EndLoc = E;
2137    return Op;
2138  }
2139
2140  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2141    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2142    Op->RotImm.Imm = Imm;
2143    Op->StartLoc = S;
2144    Op->EndLoc = E;
2145    return Op;
2146  }
2147
2148  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2149                                    SMLoc S, SMLoc E) {
2150    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2151    Op->Bitfield.LSB = LSB;
2152    Op->Bitfield.Width = Width;
2153    Op->StartLoc = S;
2154    Op->EndLoc = E;
2155    return Op;
2156  }
2157
2158  static ARMOperand *
2159  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2160                SMLoc StartLoc, SMLoc EndLoc) {
2161    KindTy Kind = k_RegisterList;
2162
2163    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2164      Kind = k_DPRRegisterList;
2165    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2166             contains(Regs.front().first))
2167      Kind = k_SPRRegisterList;
2168
2169    ARMOperand *Op = new ARMOperand(Kind);
2170    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2171           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2172      Op->Registers.push_back(I->first);
2173    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2174    Op->StartLoc = StartLoc;
2175    Op->EndLoc = EndLoc;
2176    return Op;
2177  }
2178
2179  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2180                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2181    ARMOperand *Op = new ARMOperand(k_VectorList);
2182    Op->VectorList.RegNum = RegNum;
2183    Op->VectorList.Count = Count;
2184    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2185    Op->StartLoc = S;
2186    Op->EndLoc = E;
2187    return Op;
2188  }
2189
2190  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2191                                              bool isDoubleSpaced,
2192                                              SMLoc S, SMLoc E) {
2193    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2194    Op->VectorList.RegNum = RegNum;
2195    Op->VectorList.Count = Count;
2196    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2197    Op->StartLoc = S;
2198    Op->EndLoc = E;
2199    return Op;
2200  }
2201
2202  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2203                                             unsigned Index,
2204                                             bool isDoubleSpaced,
2205                                             SMLoc S, SMLoc E) {
2206    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2207    Op->VectorList.RegNum = RegNum;
2208    Op->VectorList.Count = Count;
2209    Op->VectorList.LaneIndex = Index;
2210    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2211    Op->StartLoc = S;
2212    Op->EndLoc = E;
2213    return Op;
2214  }
2215
2216  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2217                                       MCContext &Ctx) {
2218    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2219    Op->VectorIndex.Val = Idx;
2220    Op->StartLoc = S;
2221    Op->EndLoc = E;
2222    return Op;
2223  }
2224
2225  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2226    ARMOperand *Op = new ARMOperand(k_Immediate);
2227    Op->Imm.Val = Val;
2228    Op->StartLoc = S;
2229    Op->EndLoc = E;
2230    return Op;
2231  }
2232
2233  static ARMOperand *CreateMem(unsigned BaseRegNum,
2234                               const MCConstantExpr *OffsetImm,
2235                               unsigned OffsetRegNum,
2236                               ARM_AM::ShiftOpc ShiftType,
2237                               unsigned ShiftImm,
2238                               unsigned Alignment,
2239                               bool isNegative,
2240                               SMLoc S, SMLoc E) {
2241    ARMOperand *Op = new ARMOperand(k_Memory);
2242    Op->Memory.BaseRegNum = BaseRegNum;
2243    Op->Memory.OffsetImm = OffsetImm;
2244    Op->Memory.OffsetRegNum = OffsetRegNum;
2245    Op->Memory.ShiftType = ShiftType;
2246    Op->Memory.ShiftImm = ShiftImm;
2247    Op->Memory.Alignment = Alignment;
2248    Op->Memory.isNegative = isNegative;
2249    Op->StartLoc = S;
2250    Op->EndLoc = E;
2251    return Op;
2252  }
2253
2254  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2255                                      ARM_AM::ShiftOpc ShiftTy,
2256                                      unsigned ShiftImm,
2257                                      SMLoc S, SMLoc E) {
2258    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2259    Op->PostIdxReg.RegNum = RegNum;
2260    Op->PostIdxReg.isAdd = isAdd;
2261    Op->PostIdxReg.ShiftTy = ShiftTy;
2262    Op->PostIdxReg.ShiftImm = ShiftImm;
2263    Op->StartLoc = S;
2264    Op->EndLoc = E;
2265    return Op;
2266  }
2267
2268  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2269    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2270    Op->MBOpt.Val = Opt;
2271    Op->StartLoc = S;
2272    Op->EndLoc = S;
2273    return Op;
2274  }
2275
2276  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2277    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2278    Op->IFlags.Val = IFlags;
2279    Op->StartLoc = S;
2280    Op->EndLoc = S;
2281    return Op;
2282  }
2283
2284  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2285    ARMOperand *Op = new ARMOperand(k_MSRMask);
2286    Op->MMask.Val = MMask;
2287    Op->StartLoc = S;
2288    Op->EndLoc = S;
2289    return Op;
2290  }
2291};
2292
2293} // end anonymous namespace.
2294
2295void ARMOperand::print(raw_ostream &OS) const {
2296  switch (Kind) {
2297  case k_CondCode:
2298    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2299    break;
2300  case k_CCOut:
2301    OS << "<ccout " << getReg() << ">";
2302    break;
2303  case k_ITCondMask: {
2304    static const char *MaskStr[] = {
2305      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2306      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2307    };
2308    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2309    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2310    break;
2311  }
2312  case k_CoprocNum:
2313    OS << "<coprocessor number: " << getCoproc() << ">";
2314    break;
2315  case k_CoprocReg:
2316    OS << "<coprocessor register: " << getCoproc() << ">";
2317    break;
2318  case k_CoprocOption:
2319    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2320    break;
2321  case k_MSRMask:
2322    OS << "<mask: " << getMSRMask() << ">";
2323    break;
2324  case k_Immediate:
2325    getImm()->print(OS);
2326    break;
2327  case k_MemBarrierOpt:
2328    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2329    break;
2330  case k_Memory:
2331    OS << "<memory "
2332       << " base:" << Memory.BaseRegNum;
2333    OS << ">";
2334    break;
2335  case k_PostIndexRegister:
2336    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2337       << PostIdxReg.RegNum;
2338    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2339      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2340         << PostIdxReg.ShiftImm;
2341    OS << ">";
2342    break;
2343  case k_ProcIFlags: {
2344    OS << "<ARM_PROC::";
2345    unsigned IFlags = getProcIFlags();
2346    for (int i=2; i >= 0; --i)
2347      if (IFlags & (1 << i))
2348        OS << ARM_PROC::IFlagsToString(1 << i);
2349    OS << ">";
2350    break;
2351  }
2352  case k_Register:
2353    OS << "<register " << getReg() << ">";
2354    break;
2355  case k_ShifterImmediate:
2356    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2357       << " #" << ShifterImm.Imm << ">";
2358    break;
2359  case k_ShiftedRegister:
2360    OS << "<so_reg_reg "
2361       << RegShiftedReg.SrcReg << " "
2362       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2363       << " " << RegShiftedReg.ShiftReg << ">";
2364    break;
2365  case k_ShiftedImmediate:
2366    OS << "<so_reg_imm "
2367       << RegShiftedImm.SrcReg << " "
2368       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2369       << " #" << RegShiftedImm.ShiftImm << ">";
2370    break;
2371  case k_RotateImmediate:
2372    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2373    break;
2374  case k_BitfieldDescriptor:
2375    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2376       << ", width: " << Bitfield.Width << ">";
2377    break;
2378  case k_RegisterList:
2379  case k_DPRRegisterList:
2380  case k_SPRRegisterList: {
2381    OS << "<register_list ";
2382
2383    const SmallVectorImpl<unsigned> &RegList = getRegList();
2384    for (SmallVectorImpl<unsigned>::const_iterator
2385           I = RegList.begin(), E = RegList.end(); I != E; ) {
2386      OS << *I;
2387      if (++I < E) OS << ", ";
2388    }
2389
2390    OS << ">";
2391    break;
2392  }
2393  case k_VectorList:
2394    OS << "<vector_list " << VectorList.Count << " * "
2395       << VectorList.RegNum << ">";
2396    break;
2397  case k_VectorListAllLanes:
2398    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2399       << VectorList.RegNum << ">";
2400    break;
2401  case k_VectorListIndexed:
2402    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2403       << VectorList.Count << " * " << VectorList.RegNum << ">";
2404    break;
2405  case k_Token:
2406    OS << "'" << getToken() << "'";
2407    break;
2408  case k_VectorIndex:
2409    OS << "<vectorindex " << getVectorIndex() << ">";
2410    break;
2411  }
2412}
2413
2414/// @name Auto-generated Match Functions
2415/// {
2416
2417static unsigned MatchRegisterName(StringRef Name);
2418
2419/// }
2420
2421bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2422                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2423  StartLoc = Parser.getTok().getLoc();
2424  RegNo = tryParseRegister();
2425  EndLoc = Parser.getTok().getLoc();
2426
2427  return (RegNo == (unsigned)-1);
2428}
2429
2430/// Try to parse a register name.  The token must be an Identifier when called,
2431/// and if it is a register name the token is eaten and the register number is
2432/// returned.  Otherwise return -1.
2433///
2434int ARMAsmParser::tryParseRegister() {
2435  const AsmToken &Tok = Parser.getTok();
2436  if (Tok.isNot(AsmToken::Identifier)) return -1;
2437
2438  std::string lowerCase = Tok.getString().lower();
2439  unsigned RegNum = MatchRegisterName(lowerCase);
2440  if (!RegNum) {
2441    RegNum = StringSwitch<unsigned>(lowerCase)
2442      .Case("r13", ARM::SP)
2443      .Case("r14", ARM::LR)
2444      .Case("r15", ARM::PC)
2445      .Case("ip", ARM::R12)
2446      // Additional register name aliases for 'gas' compatibility.
2447      .Case("a1", ARM::R0)
2448      .Case("a2", ARM::R1)
2449      .Case("a3", ARM::R2)
2450      .Case("a4", ARM::R3)
2451      .Case("v1", ARM::R4)
2452      .Case("v2", ARM::R5)
2453      .Case("v3", ARM::R6)
2454      .Case("v4", ARM::R7)
2455      .Case("v5", ARM::R8)
2456      .Case("v6", ARM::R9)
2457      .Case("v7", ARM::R10)
2458      .Case("v8", ARM::R11)
2459      .Case("sb", ARM::R9)
2460      .Case("sl", ARM::R10)
2461      .Case("fp", ARM::R11)
2462      .Default(0);
2463  }
2464  if (!RegNum) {
2465    // Check for aliases registered via .req. Canonicalize to lower case.
2466    // That's more consistent since register names are case insensitive, and
2467    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2468    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2469    // If no match, return failure.
2470    if (Entry == RegisterReqs.end())
2471      return -1;
2472    Parser.Lex(); // Eat identifier token.
2473    return Entry->getValue();
2474  }
2475
2476  Parser.Lex(); // Eat identifier token.
2477
2478  return RegNum;
2479}
2480
2481// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2482// If a recoverable error occurs, return 1. If an irrecoverable error
2483// occurs, return -1. An irrecoverable error is one where tokens have been
2484// consumed in the process of trying to parse the shifter (i.e., when it is
2485// indeed a shifter operand, but malformed).
2486int ARMAsmParser::tryParseShiftRegister(
2487                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2488  SMLoc S = Parser.getTok().getLoc();
2489  const AsmToken &Tok = Parser.getTok();
2490  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2491
2492  std::string lowerCase = Tok.getString().lower();
2493  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2494      .Case("asl", ARM_AM::lsl)
2495      .Case("lsl", ARM_AM::lsl)
2496      .Case("lsr", ARM_AM::lsr)
2497      .Case("asr", ARM_AM::asr)
2498      .Case("ror", ARM_AM::ror)
2499      .Case("rrx", ARM_AM::rrx)
2500      .Default(ARM_AM::no_shift);
2501
2502  if (ShiftTy == ARM_AM::no_shift)
2503    return 1;
2504
2505  Parser.Lex(); // Eat the operator.
2506
2507  // The source register for the shift has already been added to the
2508  // operand list, so we need to pop it off and combine it into the shifted
2509  // register operand instead.
2510  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2511  if (!PrevOp->isReg())
2512    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2513  int SrcReg = PrevOp->getReg();
2514  int64_t Imm = 0;
2515  int ShiftReg = 0;
2516  if (ShiftTy == ARM_AM::rrx) {
2517    // RRX Doesn't have an explicit shift amount. The encoder expects
2518    // the shift register to be the same as the source register. Seems odd,
2519    // but OK.
2520    ShiftReg = SrcReg;
2521  } else {
2522    // Figure out if this is shifted by a constant or a register (for non-RRX).
2523    if (Parser.getTok().is(AsmToken::Hash) ||
2524        Parser.getTok().is(AsmToken::Dollar)) {
2525      Parser.Lex(); // Eat hash.
2526      SMLoc ImmLoc = Parser.getTok().getLoc();
2527      const MCExpr *ShiftExpr = 0;
2528      if (getParser().ParseExpression(ShiftExpr)) {
2529        Error(ImmLoc, "invalid immediate shift value");
2530        return -1;
2531      }
2532      // The expression must be evaluatable as an immediate.
2533      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2534      if (!CE) {
2535        Error(ImmLoc, "invalid immediate shift value");
2536        return -1;
2537      }
2538      // Range check the immediate.
2539      // lsl, ror: 0 <= imm <= 31
2540      // lsr, asr: 0 <= imm <= 32
2541      Imm = CE->getValue();
2542      if (Imm < 0 ||
2543          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2544          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2545        Error(ImmLoc, "immediate shift value out of range");
2546        return -1;
2547      }
2548      // shift by zero is a nop. Always send it through as lsl.
2549      // ('as' compatibility)
2550      if (Imm == 0)
2551        ShiftTy = ARM_AM::lsl;
2552    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2553      ShiftReg = tryParseRegister();
2554      SMLoc L = Parser.getTok().getLoc();
2555      if (ShiftReg == -1) {
2556        Error (L, "expected immediate or register in shift operand");
2557        return -1;
2558      }
2559    } else {
2560      Error (Parser.getTok().getLoc(),
2561                    "expected immediate or register in shift operand");
2562      return -1;
2563    }
2564  }
2565
2566  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2567    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2568                                                         ShiftReg, Imm,
2569                                               S, Parser.getTok().getLoc()));
2570  else
2571    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2572                                               S, Parser.getTok().getLoc()));
2573
2574  return 0;
2575}
2576
2577
2578/// Try to parse a register name.  The token must be an Identifier when called.
2579/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2580/// if there is a "writeback". 'true' if it's not a register.
2581///
2582/// TODO this is likely to change to allow different register types and or to
2583/// parse for a specific register type.
2584bool ARMAsmParser::
2585tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2586  SMLoc S = Parser.getTok().getLoc();
2587  int RegNo = tryParseRegister();
2588  if (RegNo == -1)
2589    return true;
2590
2591  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2592
2593  const AsmToken &ExclaimTok = Parser.getTok();
2594  if (ExclaimTok.is(AsmToken::Exclaim)) {
2595    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2596                                               ExclaimTok.getLoc()));
2597    Parser.Lex(); // Eat exclaim token
2598    return false;
2599  }
2600
2601  // Also check for an index operand. This is only legal for vector registers,
2602  // but that'll get caught OK in operand matching, so we don't need to
2603  // explicitly filter everything else out here.
2604  if (Parser.getTok().is(AsmToken::LBrac)) {
2605    SMLoc SIdx = Parser.getTok().getLoc();
2606    Parser.Lex(); // Eat left bracket token.
2607
2608    const MCExpr *ImmVal;
2609    if (getParser().ParseExpression(ImmVal))
2610      return true;
2611    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2612    if (!MCE)
2613      return TokError("immediate value expected for vector index");
2614
2615    SMLoc E = Parser.getTok().getLoc();
2616    if (Parser.getTok().isNot(AsmToken::RBrac))
2617      return Error(E, "']' expected");
2618
2619    Parser.Lex(); // Eat right bracket token.
2620
2621    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2622                                                     SIdx, E,
2623                                                     getContext()));
2624  }
2625
2626  return false;
2627}
2628
2629/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2630/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2631/// "c5", ...
2632static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2633  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2634  // but efficient.
2635  switch (Name.size()) {
2636  default: return -1;
2637  case 2:
2638    if (Name[0] != CoprocOp)
2639      return -1;
2640    switch (Name[1]) {
2641    default:  return -1;
2642    case '0': return 0;
2643    case '1': return 1;
2644    case '2': return 2;
2645    case '3': return 3;
2646    case '4': return 4;
2647    case '5': return 5;
2648    case '6': return 6;
2649    case '7': return 7;
2650    case '8': return 8;
2651    case '9': return 9;
2652    }
2653  case 3:
2654    if (Name[0] != CoprocOp || Name[1] != '1')
2655      return -1;
2656    switch (Name[2]) {
2657    default:  return -1;
2658    case '0': return 10;
2659    case '1': return 11;
2660    case '2': return 12;
2661    case '3': return 13;
2662    case '4': return 14;
2663    case '5': return 15;
2664    }
2665  }
2666}
2667
2668/// parseITCondCode - Try to parse a condition code for an IT instruction.
2669ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2670parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2671  SMLoc S = Parser.getTok().getLoc();
2672  const AsmToken &Tok = Parser.getTok();
2673  if (!Tok.is(AsmToken::Identifier))
2674    return MatchOperand_NoMatch;
2675  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2676    .Case("eq", ARMCC::EQ)
2677    .Case("ne", ARMCC::NE)
2678    .Case("hs", ARMCC::HS)
2679    .Case("cs", ARMCC::HS)
2680    .Case("lo", ARMCC::LO)
2681    .Case("cc", ARMCC::LO)
2682    .Case("mi", ARMCC::MI)
2683    .Case("pl", ARMCC::PL)
2684    .Case("vs", ARMCC::VS)
2685    .Case("vc", ARMCC::VC)
2686    .Case("hi", ARMCC::HI)
2687    .Case("ls", ARMCC::LS)
2688    .Case("ge", ARMCC::GE)
2689    .Case("lt", ARMCC::LT)
2690    .Case("gt", ARMCC::GT)
2691    .Case("le", ARMCC::LE)
2692    .Case("al", ARMCC::AL)
2693    .Default(~0U);
2694  if (CC == ~0U)
2695    return MatchOperand_NoMatch;
2696  Parser.Lex(); // Eat the token.
2697
2698  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2699
2700  return MatchOperand_Success;
2701}
2702
2703/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2704/// token must be an Identifier when called, and if it is a coprocessor
2705/// number, the token is eaten and the operand is added to the operand list.
2706ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2707parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2708  SMLoc S = Parser.getTok().getLoc();
2709  const AsmToken &Tok = Parser.getTok();
2710  if (Tok.isNot(AsmToken::Identifier))
2711    return MatchOperand_NoMatch;
2712
2713  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2714  if (Num == -1)
2715    return MatchOperand_NoMatch;
2716
2717  Parser.Lex(); // Eat identifier token.
2718  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2719  return MatchOperand_Success;
2720}
2721
2722/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2723/// token must be an Identifier when called, and if it is a coprocessor
2724/// number, the token is eaten and the operand is added to the operand list.
2725ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2726parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2727  SMLoc S = Parser.getTok().getLoc();
2728  const AsmToken &Tok = Parser.getTok();
2729  if (Tok.isNot(AsmToken::Identifier))
2730    return MatchOperand_NoMatch;
2731
2732  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2733  if (Reg == -1)
2734    return MatchOperand_NoMatch;
2735
2736  Parser.Lex(); // Eat identifier token.
2737  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2738  return MatchOperand_Success;
2739}
2740
2741/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2742/// coproc_option : '{' imm0_255 '}'
2743ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2744parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2745  SMLoc S = Parser.getTok().getLoc();
2746
2747  // If this isn't a '{', this isn't a coprocessor immediate operand.
2748  if (Parser.getTok().isNot(AsmToken::LCurly))
2749    return MatchOperand_NoMatch;
2750  Parser.Lex(); // Eat the '{'
2751
2752  const MCExpr *Expr;
2753  SMLoc Loc = Parser.getTok().getLoc();
2754  if (getParser().ParseExpression(Expr)) {
2755    Error(Loc, "illegal expression");
2756    return MatchOperand_ParseFail;
2757  }
2758  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2759  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2760    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2761    return MatchOperand_ParseFail;
2762  }
2763  int Val = CE->getValue();
2764
2765  // Check for and consume the closing '}'
2766  if (Parser.getTok().isNot(AsmToken::RCurly))
2767    return MatchOperand_ParseFail;
2768  SMLoc E = Parser.getTok().getLoc();
2769  Parser.Lex(); // Eat the '}'
2770
2771  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2772  return MatchOperand_Success;
2773}
2774
2775// For register list parsing, we need to map from raw GPR register numbering
2776// to the enumeration values. The enumeration values aren't sorted by
2777// register number due to our using "sp", "lr" and "pc" as canonical names.
2778static unsigned getNextRegister(unsigned Reg) {
2779  // If this is a GPR, we need to do it manually, otherwise we can rely
2780  // on the sort ordering of the enumeration since the other reg-classes
2781  // are sane.
2782  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2783    return Reg + 1;
2784  switch(Reg) {
2785  default: llvm_unreachable("Invalid GPR number!");
2786  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2787  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2788  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2789  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2790  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2791  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2792  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2793  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2794  }
2795}
2796
2797// Return the low-subreg of a given Q register.
2798static unsigned getDRegFromQReg(unsigned QReg) {
2799  switch (QReg) {
2800  default: llvm_unreachable("expected a Q register!");
2801  case ARM::Q0:  return ARM::D0;
2802  case ARM::Q1:  return ARM::D2;
2803  case ARM::Q2:  return ARM::D4;
2804  case ARM::Q3:  return ARM::D6;
2805  case ARM::Q4:  return ARM::D8;
2806  case ARM::Q5:  return ARM::D10;
2807  case ARM::Q6:  return ARM::D12;
2808  case ARM::Q7:  return ARM::D14;
2809  case ARM::Q8:  return ARM::D16;
2810  case ARM::Q9:  return ARM::D18;
2811  case ARM::Q10: return ARM::D20;
2812  case ARM::Q11: return ARM::D22;
2813  case ARM::Q12: return ARM::D24;
2814  case ARM::Q13: return ARM::D26;
2815  case ARM::Q14: return ARM::D28;
2816  case ARM::Q15: return ARM::D30;
2817  }
2818}
2819
2820/// Parse a register list.
2821bool ARMAsmParser::
2822parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2823  assert(Parser.getTok().is(AsmToken::LCurly) &&
2824         "Token is not a Left Curly Brace");
2825  SMLoc S = Parser.getTok().getLoc();
2826  Parser.Lex(); // Eat '{' token.
2827  SMLoc RegLoc = Parser.getTok().getLoc();
2828
2829  // Check the first register in the list to see what register class
2830  // this is a list of.
2831  int Reg = tryParseRegister();
2832  if (Reg == -1)
2833    return Error(RegLoc, "register expected");
2834
2835  // The reglist instructions have at most 16 registers, so reserve
2836  // space for that many.
2837  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2838
2839  // Allow Q regs and just interpret them as the two D sub-registers.
2840  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2841    Reg = getDRegFromQReg(Reg);
2842    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2843    ++Reg;
2844  }
2845  const MCRegisterClass *RC;
2846  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2847    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2848  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2849    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2850  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2851    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2852  else
2853    return Error(RegLoc, "invalid register in register list");
2854
2855  // Store the register.
2856  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2857
2858  // This starts immediately after the first register token in the list,
2859  // so we can see either a comma or a minus (range separator) as a legal
2860  // next token.
2861  while (Parser.getTok().is(AsmToken::Comma) ||
2862         Parser.getTok().is(AsmToken::Minus)) {
2863    if (Parser.getTok().is(AsmToken::Minus)) {
2864      Parser.Lex(); // Eat the minus.
2865      SMLoc EndLoc = Parser.getTok().getLoc();
2866      int EndReg = tryParseRegister();
2867      if (EndReg == -1)
2868        return Error(EndLoc, "register expected");
2869      // Allow Q regs and just interpret them as the two D sub-registers.
2870      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2871        EndReg = getDRegFromQReg(EndReg) + 1;
2872      // If the register is the same as the start reg, there's nothing
2873      // more to do.
2874      if (Reg == EndReg)
2875        continue;
2876      // The register must be in the same register class as the first.
2877      if (!RC->contains(EndReg))
2878        return Error(EndLoc, "invalid register in register list");
2879      // Ranges must go from low to high.
2880      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2881        return Error(EndLoc, "bad range in register list");
2882
2883      // Add all the registers in the range to the register list.
2884      while (Reg != EndReg) {
2885        Reg = getNextRegister(Reg);
2886        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2887      }
2888      continue;
2889    }
2890    Parser.Lex(); // Eat the comma.
2891    RegLoc = Parser.getTok().getLoc();
2892    int OldReg = Reg;
2893    const AsmToken RegTok = Parser.getTok();
2894    Reg = tryParseRegister();
2895    if (Reg == -1)
2896      return Error(RegLoc, "register expected");
2897    // Allow Q regs and just interpret them as the two D sub-registers.
2898    bool isQReg = false;
2899    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2900      Reg = getDRegFromQReg(Reg);
2901      isQReg = true;
2902    }
2903    // The register must be in the same register class as the first.
2904    if (!RC->contains(Reg))
2905      return Error(RegLoc, "invalid register in register list");
2906    // List must be monotonically increasing.
2907    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) {
2908      if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2909        Warning(RegLoc, "register list not in ascending order");
2910      else
2911        return Error(RegLoc, "register list not in ascending order");
2912    }
2913    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2914      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2915              ") in register list");
2916      continue;
2917    }
2918    // VFP register lists must also be contiguous.
2919    // It's OK to use the enumeration values directly here rather, as the
2920    // VFP register classes have the enum sorted properly.
2921    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2922        Reg != OldReg + 1)
2923      return Error(RegLoc, "non-contiguous register range");
2924    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2925    if (isQReg)
2926      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2927  }
2928
2929  SMLoc E = Parser.getTok().getLoc();
2930  if (Parser.getTok().isNot(AsmToken::RCurly))
2931    return Error(E, "'}' expected");
2932  Parser.Lex(); // Eat '}' token.
2933
2934  // Push the register list operand.
2935  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2936
2937  // The ARM system instruction variants for LDM/STM have a '^' token here.
2938  if (Parser.getTok().is(AsmToken::Caret)) {
2939    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2940    Parser.Lex(); // Eat '^' token.
2941  }
2942
2943  return false;
2944}
2945
2946// Helper function to parse the lane index for vector lists.
2947ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2948parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2949  Index = 0; // Always return a defined index value.
2950  if (Parser.getTok().is(AsmToken::LBrac)) {
2951    Parser.Lex(); // Eat the '['.
2952    if (Parser.getTok().is(AsmToken::RBrac)) {
2953      // "Dn[]" is the 'all lanes' syntax.
2954      LaneKind = AllLanes;
2955      Parser.Lex(); // Eat the ']'.
2956      return MatchOperand_Success;
2957    }
2958
2959    // There's an optional '#' token here. Normally there wouldn't be, but
2960    // inline assemble puts one in, and it's friendly to accept that.
2961    if (Parser.getTok().is(AsmToken::Hash))
2962      Parser.Lex(); // Eat the '#'
2963
2964    const MCExpr *LaneIndex;
2965    SMLoc Loc = Parser.getTok().getLoc();
2966    if (getParser().ParseExpression(LaneIndex)) {
2967      Error(Loc, "illegal expression");
2968      return MatchOperand_ParseFail;
2969    }
2970    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2971    if (!CE) {
2972      Error(Loc, "lane index must be empty or an integer");
2973      return MatchOperand_ParseFail;
2974    }
2975    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2976      Error(Parser.getTok().getLoc(), "']' expected");
2977      return MatchOperand_ParseFail;
2978    }
2979    Parser.Lex(); // Eat the ']'.
2980    int64_t Val = CE->getValue();
2981
2982    // FIXME: Make this range check context sensitive for .8, .16, .32.
2983    if (Val < 0 || Val > 7) {
2984      Error(Parser.getTok().getLoc(), "lane index out of range");
2985      return MatchOperand_ParseFail;
2986    }
2987    Index = Val;
2988    LaneKind = IndexedLane;
2989    return MatchOperand_Success;
2990  }
2991  LaneKind = NoLanes;
2992  return MatchOperand_Success;
2993}
2994
2995// parse a vector register list
2996ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2997parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2998  VectorLaneTy LaneKind;
2999  unsigned LaneIndex;
3000  SMLoc S = Parser.getTok().getLoc();
3001  // As an extension (to match gas), support a plain D register or Q register
3002  // (without encosing curly braces) as a single or double entry list,
3003  // respectively.
3004  if (Parser.getTok().is(AsmToken::Identifier)) {
3005    int Reg = tryParseRegister();
3006    if (Reg == -1)
3007      return MatchOperand_NoMatch;
3008    SMLoc E = Parser.getTok().getLoc();
3009    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3010      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
3011      if (Res != MatchOperand_Success)
3012        return Res;
3013      switch (LaneKind) {
3014      case NoLanes:
3015        E = Parser.getTok().getLoc();
3016        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3017        break;
3018      case AllLanes:
3019        E = Parser.getTok().getLoc();
3020        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3021                                                                S, E));
3022        break;
3023      case IndexedLane:
3024        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3025                                                               LaneIndex,
3026                                                               false, S, E));
3027        break;
3028      }
3029      return MatchOperand_Success;
3030    }
3031    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3032      Reg = getDRegFromQReg(Reg);
3033      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
3034      if (Res != MatchOperand_Success)
3035        return Res;
3036      switch (LaneKind) {
3037      case NoLanes:
3038        E = Parser.getTok().getLoc();
3039        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3040                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3041        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3042        break;
3043      case AllLanes:
3044        E = Parser.getTok().getLoc();
3045        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3046                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3047        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3048                                                                S, E));
3049        break;
3050      case IndexedLane:
3051        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3052                                                               LaneIndex,
3053                                                               false, S, E));
3054        break;
3055      }
3056      return MatchOperand_Success;
3057    }
3058    Error(S, "vector register expected");
3059    return MatchOperand_ParseFail;
3060  }
3061
3062  if (Parser.getTok().isNot(AsmToken::LCurly))
3063    return MatchOperand_NoMatch;
3064
3065  Parser.Lex(); // Eat '{' token.
3066  SMLoc RegLoc = Parser.getTok().getLoc();
3067
3068  int Reg = tryParseRegister();
3069  if (Reg == -1) {
3070    Error(RegLoc, "register expected");
3071    return MatchOperand_ParseFail;
3072  }
3073  unsigned Count = 1;
3074  int Spacing = 0;
3075  unsigned FirstReg = Reg;
3076  // The list is of D registers, but we also allow Q regs and just interpret
3077  // them as the two D sub-registers.
3078  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3079    FirstReg = Reg = getDRegFromQReg(Reg);
3080    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3081                 // it's ambiguous with four-register single spaced.
3082    ++Reg;
3083    ++Count;
3084  }
3085  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
3086    return MatchOperand_ParseFail;
3087
3088  while (Parser.getTok().is(AsmToken::Comma) ||
3089         Parser.getTok().is(AsmToken::Minus)) {
3090    if (Parser.getTok().is(AsmToken::Minus)) {
3091      if (!Spacing)
3092        Spacing = 1; // Register range implies a single spaced list.
3093      else if (Spacing == 2) {
3094        Error(Parser.getTok().getLoc(),
3095              "sequential registers in double spaced list");
3096        return MatchOperand_ParseFail;
3097      }
3098      Parser.Lex(); // Eat the minus.
3099      SMLoc EndLoc = Parser.getTok().getLoc();
3100      int EndReg = tryParseRegister();
3101      if (EndReg == -1) {
3102        Error(EndLoc, "register expected");
3103        return MatchOperand_ParseFail;
3104      }
3105      // Allow Q regs and just interpret them as the two D sub-registers.
3106      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3107        EndReg = getDRegFromQReg(EndReg) + 1;
3108      // If the register is the same as the start reg, there's nothing
3109      // more to do.
3110      if (Reg == EndReg)
3111        continue;
3112      // The register must be in the same register class as the first.
3113      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3114        Error(EndLoc, "invalid register in register list");
3115        return MatchOperand_ParseFail;
3116      }
3117      // Ranges must go from low to high.
3118      if (Reg > EndReg) {
3119        Error(EndLoc, "bad range in register list");
3120        return MatchOperand_ParseFail;
3121      }
3122      // Parse the lane specifier if present.
3123      VectorLaneTy NextLaneKind;
3124      unsigned NextLaneIndex;
3125      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3126        return MatchOperand_ParseFail;
3127      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3128        Error(EndLoc, "mismatched lane index in register list");
3129        return MatchOperand_ParseFail;
3130      }
3131      EndLoc = Parser.getTok().getLoc();
3132
3133      // Add all the registers in the range to the register list.
3134      Count += EndReg - Reg;
3135      Reg = EndReg;
3136      continue;
3137    }
3138    Parser.Lex(); // Eat the comma.
3139    RegLoc = Parser.getTok().getLoc();
3140    int OldReg = Reg;
3141    Reg = tryParseRegister();
3142    if (Reg == -1) {
3143      Error(RegLoc, "register expected");
3144      return MatchOperand_ParseFail;
3145    }
3146    // vector register lists must be contiguous.
3147    // It's OK to use the enumeration values directly here rather, as the
3148    // VFP register classes have the enum sorted properly.
3149    //
3150    // The list is of D registers, but we also allow Q regs and just interpret
3151    // them as the two D sub-registers.
3152    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3153      if (!Spacing)
3154        Spacing = 1; // Register range implies a single spaced list.
3155      else if (Spacing == 2) {
3156        Error(RegLoc,
3157              "invalid register in double-spaced list (must be 'D' register')");
3158        return MatchOperand_ParseFail;
3159      }
3160      Reg = getDRegFromQReg(Reg);
3161      if (Reg != OldReg + 1) {
3162        Error(RegLoc, "non-contiguous register range");
3163        return MatchOperand_ParseFail;
3164      }
3165      ++Reg;
3166      Count += 2;
3167      // Parse the lane specifier if present.
3168      VectorLaneTy NextLaneKind;
3169      unsigned NextLaneIndex;
3170      SMLoc EndLoc = Parser.getTok().getLoc();
3171      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3172        return MatchOperand_ParseFail;
3173      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3174        Error(EndLoc, "mismatched lane index in register list");
3175        return MatchOperand_ParseFail;
3176      }
3177      continue;
3178    }
3179    // Normal D register.
3180    // Figure out the register spacing (single or double) of the list if
3181    // we don't know it already.
3182    if (!Spacing)
3183      Spacing = 1 + (Reg == OldReg + 2);
3184
3185    // Just check that it's contiguous and keep going.
3186    if (Reg != OldReg + Spacing) {
3187      Error(RegLoc, "non-contiguous register range");
3188      return MatchOperand_ParseFail;
3189    }
3190    ++Count;
3191    // Parse the lane specifier if present.
3192    VectorLaneTy NextLaneKind;
3193    unsigned NextLaneIndex;
3194    SMLoc EndLoc = Parser.getTok().getLoc();
3195    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3196      return MatchOperand_ParseFail;
3197    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3198      Error(EndLoc, "mismatched lane index in register list");
3199      return MatchOperand_ParseFail;
3200    }
3201  }
3202
3203  SMLoc E = Parser.getTok().getLoc();
3204  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3205    Error(E, "'}' expected");
3206    return MatchOperand_ParseFail;
3207  }
3208  Parser.Lex(); // Eat '}' token.
3209
3210  switch (LaneKind) {
3211  case NoLanes:
3212    // Two-register operands have been converted to the
3213    // composite register classes.
3214    if (Count == 2) {
3215      const MCRegisterClass *RC = (Spacing == 1) ?
3216        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3217        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3218      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3219    }
3220
3221    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3222                                                    (Spacing == 2), S, E));
3223    break;
3224  case AllLanes:
3225    // Two-register operands have been converted to the
3226    // composite register classes.
3227    if (Count == 2) {
3228      const MCRegisterClass *RC = (Spacing == 1) ?
3229        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3230        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3231      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3232    }
3233    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3234                                                            (Spacing == 2),
3235                                                            S, E));
3236    break;
3237  case IndexedLane:
3238    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3239                                                           LaneIndex,
3240                                                           (Spacing == 2),
3241                                                           S, E));
3242    break;
3243  }
3244  return MatchOperand_Success;
3245}
3246
3247/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3248ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3249parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3250  SMLoc S = Parser.getTok().getLoc();
3251  const AsmToken &Tok = Parser.getTok();
3252  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3253  StringRef OptStr = Tok.getString();
3254
3255  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3256    .Case("sy",    ARM_MB::SY)
3257    .Case("st",    ARM_MB::ST)
3258    .Case("sh",    ARM_MB::ISH)
3259    .Case("ish",   ARM_MB::ISH)
3260    .Case("shst",  ARM_MB::ISHST)
3261    .Case("ishst", ARM_MB::ISHST)
3262    .Case("nsh",   ARM_MB::NSH)
3263    .Case("un",    ARM_MB::NSH)
3264    .Case("nshst", ARM_MB::NSHST)
3265    .Case("unst",  ARM_MB::NSHST)
3266    .Case("osh",   ARM_MB::OSH)
3267    .Case("oshst", ARM_MB::OSHST)
3268    .Default(~0U);
3269
3270  if (Opt == ~0U)
3271    return MatchOperand_NoMatch;
3272
3273  Parser.Lex(); // Eat identifier token.
3274  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3275  return MatchOperand_Success;
3276}
3277
3278/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3279ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3280parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3281  SMLoc S = Parser.getTok().getLoc();
3282  const AsmToken &Tok = Parser.getTok();
3283  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3284  StringRef IFlagsStr = Tok.getString();
3285
3286  // An iflags string of "none" is interpreted to mean that none of the AIF
3287  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3288  unsigned IFlags = 0;
3289  if (IFlagsStr != "none") {
3290        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3291      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3292        .Case("a", ARM_PROC::A)
3293        .Case("i", ARM_PROC::I)
3294        .Case("f", ARM_PROC::F)
3295        .Default(~0U);
3296
3297      // If some specific iflag is already set, it means that some letter is
3298      // present more than once, this is not acceptable.
3299      if (Flag == ~0U || (IFlags & Flag))
3300        return MatchOperand_NoMatch;
3301
3302      IFlags |= Flag;
3303    }
3304  }
3305
3306  Parser.Lex(); // Eat identifier token.
3307  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3308  return MatchOperand_Success;
3309}
3310
3311/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3312ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3313parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3314  SMLoc S = Parser.getTok().getLoc();
3315  const AsmToken &Tok = Parser.getTok();
3316  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3317  StringRef Mask = Tok.getString();
3318
3319  if (isMClass()) {
3320    // See ARMv6-M 10.1.1
3321    std::string Name = Mask.lower();
3322    unsigned FlagsVal = StringSwitch<unsigned>(Name)
3323      .Case("apsr", 0)
3324      .Case("iapsr", 1)
3325      .Case("eapsr", 2)
3326      .Case("xpsr", 3)
3327      .Case("ipsr", 5)
3328      .Case("epsr", 6)
3329      .Case("iepsr", 7)
3330      .Case("msp", 8)
3331      .Case("psp", 9)
3332      .Case("primask", 16)
3333      .Case("basepri", 17)
3334      .Case("basepri_max", 18)
3335      .Case("faultmask", 19)
3336      .Case("control", 20)
3337      .Default(~0U);
3338
3339    if (FlagsVal == ~0U)
3340      return MatchOperand_NoMatch;
3341
3342    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3343      // basepri, basepri_max and faultmask only valid for V7m.
3344      return MatchOperand_NoMatch;
3345
3346    Parser.Lex(); // Eat identifier token.
3347    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3348    return MatchOperand_Success;
3349  }
3350
3351  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3352  size_t Start = 0, Next = Mask.find('_');
3353  StringRef Flags = "";
3354  std::string SpecReg = Mask.slice(Start, Next).lower();
3355  if (Next != StringRef::npos)
3356    Flags = Mask.slice(Next+1, Mask.size());
3357
3358  // FlagsVal contains the complete mask:
3359  // 3-0: Mask
3360  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3361  unsigned FlagsVal = 0;
3362
3363  if (SpecReg == "apsr") {
3364    FlagsVal = StringSwitch<unsigned>(Flags)
3365    .Case("nzcvq",  0x8) // same as CPSR_f
3366    .Case("g",      0x4) // same as CPSR_s
3367    .Case("nzcvqg", 0xc) // same as CPSR_fs
3368    .Default(~0U);
3369
3370    if (FlagsVal == ~0U) {
3371      if (!Flags.empty())
3372        return MatchOperand_NoMatch;
3373      else
3374        FlagsVal = 8; // No flag
3375    }
3376  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3377    // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
3378    if (Flags == "all" || Flags == "")
3379      Flags = "fc";
3380    for (int i = 0, e = Flags.size(); i != e; ++i) {
3381      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3382      .Case("c", 1)
3383      .Case("x", 2)
3384      .Case("s", 4)
3385      .Case("f", 8)
3386      .Default(~0U);
3387
3388      // If some specific flag is already set, it means that some letter is
3389      // present more than once, this is not acceptable.
3390      if (FlagsVal == ~0U || (FlagsVal & Flag))
3391        return MatchOperand_NoMatch;
3392      FlagsVal |= Flag;
3393    }
3394  } else // No match for special register.
3395    return MatchOperand_NoMatch;
3396
3397  // Special register without flags is NOT equivalent to "fc" flags.
3398  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3399  // two lines would enable gas compatibility at the expense of breaking
3400  // round-tripping.
3401  //
3402  // if (!FlagsVal)
3403  //  FlagsVal = 0x9;
3404
3405  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3406  if (SpecReg == "spsr")
3407    FlagsVal |= 16;
3408
3409  Parser.Lex(); // Eat identifier token.
3410  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3411  return MatchOperand_Success;
3412}
3413
3414ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3415parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3416            int Low, int High) {
3417  const AsmToken &Tok = Parser.getTok();
3418  if (Tok.isNot(AsmToken::Identifier)) {
3419    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3420    return MatchOperand_ParseFail;
3421  }
3422  StringRef ShiftName = Tok.getString();
3423  std::string LowerOp = Op.lower();
3424  std::string UpperOp = Op.upper();
3425  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3426    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3427    return MatchOperand_ParseFail;
3428  }
3429  Parser.Lex(); // Eat shift type token.
3430
3431  // There must be a '#' and a shift amount.
3432  if (Parser.getTok().isNot(AsmToken::Hash) &&
3433      Parser.getTok().isNot(AsmToken::Dollar)) {
3434    Error(Parser.getTok().getLoc(), "'#' expected");
3435    return MatchOperand_ParseFail;
3436  }
3437  Parser.Lex(); // Eat hash token.
3438
3439  const MCExpr *ShiftAmount;
3440  SMLoc Loc = Parser.getTok().getLoc();
3441  if (getParser().ParseExpression(ShiftAmount)) {
3442    Error(Loc, "illegal expression");
3443    return MatchOperand_ParseFail;
3444  }
3445  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3446  if (!CE) {
3447    Error(Loc, "constant expression expected");
3448    return MatchOperand_ParseFail;
3449  }
3450  int Val = CE->getValue();
3451  if (Val < Low || Val > High) {
3452    Error(Loc, "immediate value out of range");
3453    return MatchOperand_ParseFail;
3454  }
3455
3456  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3457
3458  return MatchOperand_Success;
3459}
3460
3461ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3462parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3463  const AsmToken &Tok = Parser.getTok();
3464  SMLoc S = Tok.getLoc();
3465  if (Tok.isNot(AsmToken::Identifier)) {
3466    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3467    return MatchOperand_ParseFail;
3468  }
3469  int Val = StringSwitch<int>(Tok.getString())
3470    .Case("be", 1)
3471    .Case("le", 0)
3472    .Default(-1);
3473  Parser.Lex(); // Eat the token.
3474
3475  if (Val == -1) {
3476    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3477    return MatchOperand_ParseFail;
3478  }
3479  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3480                                                                  getContext()),
3481                                           S, Parser.getTok().getLoc()));
3482  return MatchOperand_Success;
3483}
3484
3485/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3486/// instructions. Legal values are:
3487///     lsl #n  'n' in [0,31]
3488///     asr #n  'n' in [1,32]
3489///             n == 32 encoded as n == 0.
3490ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3491parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3492  const AsmToken &Tok = Parser.getTok();
3493  SMLoc S = Tok.getLoc();
3494  if (Tok.isNot(AsmToken::Identifier)) {
3495    Error(S, "shift operator 'asr' or 'lsl' expected");
3496    return MatchOperand_ParseFail;
3497  }
3498  StringRef ShiftName = Tok.getString();
3499  bool isASR;
3500  if (ShiftName == "lsl" || ShiftName == "LSL")
3501    isASR = false;
3502  else if (ShiftName == "asr" || ShiftName == "ASR")
3503    isASR = true;
3504  else {
3505    Error(S, "shift operator 'asr' or 'lsl' expected");
3506    return MatchOperand_ParseFail;
3507  }
3508  Parser.Lex(); // Eat the operator.
3509
3510  // A '#' and a shift amount.
3511  if (Parser.getTok().isNot(AsmToken::Hash) &&
3512      Parser.getTok().isNot(AsmToken::Dollar)) {
3513    Error(Parser.getTok().getLoc(), "'#' expected");
3514    return MatchOperand_ParseFail;
3515  }
3516  Parser.Lex(); // Eat hash token.
3517
3518  const MCExpr *ShiftAmount;
3519  SMLoc E = Parser.getTok().getLoc();
3520  if (getParser().ParseExpression(ShiftAmount)) {
3521    Error(E, "malformed shift expression");
3522    return MatchOperand_ParseFail;
3523  }
3524  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3525  if (!CE) {
3526    Error(E, "shift amount must be an immediate");
3527    return MatchOperand_ParseFail;
3528  }
3529
3530  int64_t Val = CE->getValue();
3531  if (isASR) {
3532    // Shift amount must be in [1,32]
3533    if (Val < 1 || Val > 32) {
3534      Error(E, "'asr' shift amount must be in range [1,32]");
3535      return MatchOperand_ParseFail;
3536    }
3537    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3538    if (isThumb() && Val == 32) {
3539      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3540      return MatchOperand_ParseFail;
3541    }
3542    if (Val == 32) Val = 0;
3543  } else {
3544    // Shift amount must be in [1,32]
3545    if (Val < 0 || Val > 31) {
3546      Error(E, "'lsr' shift amount must be in range [0,31]");
3547      return MatchOperand_ParseFail;
3548    }
3549  }
3550
3551  E = Parser.getTok().getLoc();
3552  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3553
3554  return MatchOperand_Success;
3555}
3556
3557/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3558/// of instructions. Legal values are:
3559///     ror #n  'n' in {0, 8, 16, 24}
3560ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3561parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3562  const AsmToken &Tok = Parser.getTok();
3563  SMLoc S = Tok.getLoc();
3564  if (Tok.isNot(AsmToken::Identifier))
3565    return MatchOperand_NoMatch;
3566  StringRef ShiftName = Tok.getString();
3567  if (ShiftName != "ror" && ShiftName != "ROR")
3568    return MatchOperand_NoMatch;
3569  Parser.Lex(); // Eat the operator.
3570
3571  // A '#' and a rotate amount.
3572  if (Parser.getTok().isNot(AsmToken::Hash) &&
3573      Parser.getTok().isNot(AsmToken::Dollar)) {
3574    Error(Parser.getTok().getLoc(), "'#' expected");
3575    return MatchOperand_ParseFail;
3576  }
3577  Parser.Lex(); // Eat hash token.
3578
3579  const MCExpr *ShiftAmount;
3580  SMLoc E = Parser.getTok().getLoc();
3581  if (getParser().ParseExpression(ShiftAmount)) {
3582    Error(E, "malformed rotate expression");
3583    return MatchOperand_ParseFail;
3584  }
3585  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3586  if (!CE) {
3587    Error(E, "rotate amount must be an immediate");
3588    return MatchOperand_ParseFail;
3589  }
3590
3591  int64_t Val = CE->getValue();
3592  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3593  // normally, zero is represented in asm by omitting the rotate operand
3594  // entirely.
3595  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3596    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3597    return MatchOperand_ParseFail;
3598  }
3599
3600  E = Parser.getTok().getLoc();
3601  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3602
3603  return MatchOperand_Success;
3604}
3605
3606ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3607parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3608  SMLoc S = Parser.getTok().getLoc();
3609  // The bitfield descriptor is really two operands, the LSB and the width.
3610  if (Parser.getTok().isNot(AsmToken::Hash) &&
3611      Parser.getTok().isNot(AsmToken::Dollar)) {
3612    Error(Parser.getTok().getLoc(), "'#' expected");
3613    return MatchOperand_ParseFail;
3614  }
3615  Parser.Lex(); // Eat hash token.
3616
3617  const MCExpr *LSBExpr;
3618  SMLoc E = Parser.getTok().getLoc();
3619  if (getParser().ParseExpression(LSBExpr)) {
3620    Error(E, "malformed immediate expression");
3621    return MatchOperand_ParseFail;
3622  }
3623  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3624  if (!CE) {
3625    Error(E, "'lsb' operand must be an immediate");
3626    return MatchOperand_ParseFail;
3627  }
3628
3629  int64_t LSB = CE->getValue();
3630  // The LSB must be in the range [0,31]
3631  if (LSB < 0 || LSB > 31) {
3632    Error(E, "'lsb' operand must be in the range [0,31]");
3633    return MatchOperand_ParseFail;
3634  }
3635  E = Parser.getTok().getLoc();
3636
3637  // Expect another immediate operand.
3638  if (Parser.getTok().isNot(AsmToken::Comma)) {
3639    Error(Parser.getTok().getLoc(), "too few operands");
3640    return MatchOperand_ParseFail;
3641  }
3642  Parser.Lex(); // Eat hash token.
3643  if (Parser.getTok().isNot(AsmToken::Hash) &&
3644      Parser.getTok().isNot(AsmToken::Dollar)) {
3645    Error(Parser.getTok().getLoc(), "'#' expected");
3646    return MatchOperand_ParseFail;
3647  }
3648  Parser.Lex(); // Eat hash token.
3649
3650  const MCExpr *WidthExpr;
3651  if (getParser().ParseExpression(WidthExpr)) {
3652    Error(E, "malformed immediate expression");
3653    return MatchOperand_ParseFail;
3654  }
3655  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3656  if (!CE) {
3657    Error(E, "'width' operand must be an immediate");
3658    return MatchOperand_ParseFail;
3659  }
3660
3661  int64_t Width = CE->getValue();
3662  // The LSB must be in the range [1,32-lsb]
3663  if (Width < 1 || Width > 32 - LSB) {
3664    Error(E, "'width' operand must be in the range [1,32-lsb]");
3665    return MatchOperand_ParseFail;
3666  }
3667  E = Parser.getTok().getLoc();
3668
3669  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3670
3671  return MatchOperand_Success;
3672}
3673
3674ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3675parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3676  // Check for a post-index addressing register operand. Specifically:
3677  // postidx_reg := '+' register {, shift}
3678  //              | '-' register {, shift}
3679  //              | register {, shift}
3680
3681  // This method must return MatchOperand_NoMatch without consuming any tokens
3682  // in the case where there is no match, as other alternatives take other
3683  // parse methods.
3684  AsmToken Tok = Parser.getTok();
3685  SMLoc S = Tok.getLoc();
3686  bool haveEaten = false;
3687  bool isAdd = true;
3688  int Reg = -1;
3689  if (Tok.is(AsmToken::Plus)) {
3690    Parser.Lex(); // Eat the '+' token.
3691    haveEaten = true;
3692  } else if (Tok.is(AsmToken::Minus)) {
3693    Parser.Lex(); // Eat the '-' token.
3694    isAdd = false;
3695    haveEaten = true;
3696  }
3697  if (Parser.getTok().is(AsmToken::Identifier))
3698    Reg = tryParseRegister();
3699  if (Reg == -1) {
3700    if (!haveEaten)
3701      return MatchOperand_NoMatch;
3702    Error(Parser.getTok().getLoc(), "register expected");
3703    return MatchOperand_ParseFail;
3704  }
3705  SMLoc E = Parser.getTok().getLoc();
3706
3707  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3708  unsigned ShiftImm = 0;
3709  if (Parser.getTok().is(AsmToken::Comma)) {
3710    Parser.Lex(); // Eat the ','.
3711    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3712      return MatchOperand_ParseFail;
3713  }
3714
3715  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3716                                                  ShiftImm, S, E));
3717
3718  return MatchOperand_Success;
3719}
3720
3721ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3722parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3723  // Check for a post-index addressing register operand. Specifically:
3724  // am3offset := '+' register
3725  //              | '-' register
3726  //              | register
3727  //              | # imm
3728  //              | # + imm
3729  //              | # - imm
3730
3731  // This method must return MatchOperand_NoMatch without consuming any tokens
3732  // in the case where there is no match, as other alternatives take other
3733  // parse methods.
3734  AsmToken Tok = Parser.getTok();
3735  SMLoc S = Tok.getLoc();
3736
3737  // Do immediates first, as we always parse those if we have a '#'.
3738  if (Parser.getTok().is(AsmToken::Hash) ||
3739      Parser.getTok().is(AsmToken::Dollar)) {
3740    Parser.Lex(); // Eat the '#'.
3741    // Explicitly look for a '-', as we need to encode negative zero
3742    // differently.
3743    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3744    const MCExpr *Offset;
3745    if (getParser().ParseExpression(Offset))
3746      return MatchOperand_ParseFail;
3747    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3748    if (!CE) {
3749      Error(S, "constant expression expected");
3750      return MatchOperand_ParseFail;
3751    }
3752    SMLoc E = Tok.getLoc();
3753    // Negative zero is encoded as the flag value INT32_MIN.
3754    int32_t Val = CE->getValue();
3755    if (isNegative && Val == 0)
3756      Val = INT32_MIN;
3757
3758    Operands.push_back(
3759      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3760
3761    return MatchOperand_Success;
3762  }
3763
3764
3765  bool haveEaten = false;
3766  bool isAdd = true;
3767  int Reg = -1;
3768  if (Tok.is(AsmToken::Plus)) {
3769    Parser.Lex(); // Eat the '+' token.
3770    haveEaten = true;
3771  } else if (Tok.is(AsmToken::Minus)) {
3772    Parser.Lex(); // Eat the '-' token.
3773    isAdd = false;
3774    haveEaten = true;
3775  }
3776  if (Parser.getTok().is(AsmToken::Identifier))
3777    Reg = tryParseRegister();
3778  if (Reg == -1) {
3779    if (!haveEaten)
3780      return MatchOperand_NoMatch;
3781    Error(Parser.getTok().getLoc(), "register expected");
3782    return MatchOperand_ParseFail;
3783  }
3784  SMLoc E = Parser.getTok().getLoc();
3785
3786  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3787                                                  0, S, E));
3788
3789  return MatchOperand_Success;
3790}
3791
3792/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3793/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3794/// when they refer multiple MIOperands inside a single one.
3795bool ARMAsmParser::
3796cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3797             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3798  // Rt, Rt2
3799  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3800  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3801  // Create a writeback register dummy placeholder.
3802  Inst.addOperand(MCOperand::CreateReg(0));
3803  // addr
3804  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3805  // pred
3806  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3807  return true;
3808}
3809
3810/// cvtT2StrdPre - Convert parsed operands to MCInst.
3811/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3812/// when they refer multiple MIOperands inside a single one.
3813bool ARMAsmParser::
3814cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3815             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3816  // Create a writeback register dummy placeholder.
3817  Inst.addOperand(MCOperand::CreateReg(0));
3818  // Rt, Rt2
3819  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3820  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3821  // addr
3822  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3823  // pred
3824  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3825  return true;
3826}
3827
3828/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3829/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3830/// when they refer multiple MIOperands inside a single one.
3831bool ARMAsmParser::
3832cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3833                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3834  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3835
3836  // Create a writeback register dummy placeholder.
3837  Inst.addOperand(MCOperand::CreateImm(0));
3838
3839  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3840  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3841  return true;
3842}
3843
3844/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3845/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3846/// when they refer multiple MIOperands inside a single one.
3847bool ARMAsmParser::
3848cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3849                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3850  // Create a writeback register dummy placeholder.
3851  Inst.addOperand(MCOperand::CreateImm(0));
3852  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3853  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3854  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3855  return true;
3856}
3857
3858/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3859/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3860/// when they refer multiple MIOperands inside a single one.
3861bool ARMAsmParser::
3862cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3863                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3864  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3865
3866  // Create a writeback register dummy placeholder.
3867  Inst.addOperand(MCOperand::CreateImm(0));
3868
3869  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3870  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3871  return true;
3872}
3873
3874/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3875/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3876/// when they refer multiple MIOperands inside a single one.
3877bool ARMAsmParser::
3878cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3879                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3880  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3881
3882  // Create a writeback register dummy placeholder.
3883  Inst.addOperand(MCOperand::CreateImm(0));
3884
3885  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3886  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3887  return true;
3888}
3889
3890
3891/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3892/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3893/// when they refer multiple MIOperands inside a single one.
3894bool ARMAsmParser::
3895cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3896                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3897  // Create a writeback register dummy placeholder.
3898  Inst.addOperand(MCOperand::CreateImm(0));
3899  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3900  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3901  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3902  return true;
3903}
3904
3905/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3906/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3907/// when they refer multiple MIOperands inside a single one.
3908bool ARMAsmParser::
3909cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3910                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3911  // Create a writeback register dummy placeholder.
3912  Inst.addOperand(MCOperand::CreateImm(0));
3913  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3914  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3915  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3916  return true;
3917}
3918
3919/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3920/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3921/// when they refer multiple MIOperands inside a single one.
3922bool ARMAsmParser::
3923cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3924                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3925  // Create a writeback register dummy placeholder.
3926  Inst.addOperand(MCOperand::CreateImm(0));
3927  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3928  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3929  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3930  return true;
3931}
3932
3933/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3934/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3935/// when they refer multiple MIOperands inside a single one.
3936bool ARMAsmParser::
3937cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3938                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3939  // Rt
3940  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3941  // Create a writeback register dummy placeholder.
3942  Inst.addOperand(MCOperand::CreateImm(0));
3943  // addr
3944  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3945  // offset
3946  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3947  // pred
3948  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3949  return true;
3950}
3951
3952/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3953/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3954/// when they refer multiple MIOperands inside a single one.
3955bool ARMAsmParser::
3956cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3957                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3958  // Rt
3959  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3960  // Create a writeback register dummy placeholder.
3961  Inst.addOperand(MCOperand::CreateImm(0));
3962  // addr
3963  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3964  // offset
3965  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3966  // pred
3967  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3968  return true;
3969}
3970
3971/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3972/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3973/// when they refer multiple MIOperands inside a single one.
3974bool ARMAsmParser::
3975cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3976                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3977  // Create a writeback register dummy placeholder.
3978  Inst.addOperand(MCOperand::CreateImm(0));
3979  // Rt
3980  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3981  // addr
3982  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3983  // offset
3984  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3985  // pred
3986  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3987  return true;
3988}
3989
3990/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3991/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3992/// when they refer multiple MIOperands inside a single one.
3993bool ARMAsmParser::
3994cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3995                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3996  // Create a writeback register dummy placeholder.
3997  Inst.addOperand(MCOperand::CreateImm(0));
3998  // Rt
3999  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4000  // addr
4001  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4002  // offset
4003  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
4004  // pred
4005  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4006  return true;
4007}
4008
4009/// cvtLdrdPre - Convert parsed operands to MCInst.
4010/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4011/// when they refer multiple MIOperands inside a single one.
4012bool ARMAsmParser::
4013cvtLdrdPre(MCInst &Inst, unsigned Opcode,
4014           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4015  // Rt, Rt2
4016  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4017  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4018  // Create a writeback register dummy placeholder.
4019  Inst.addOperand(MCOperand::CreateImm(0));
4020  // addr
4021  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4022  // pred
4023  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4024  return true;
4025}
4026
4027/// cvtStrdPre - Convert parsed operands to MCInst.
4028/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4029/// when they refer multiple MIOperands inside a single one.
4030bool ARMAsmParser::
4031cvtStrdPre(MCInst &Inst, unsigned Opcode,
4032           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4033  // Create a writeback register dummy placeholder.
4034  Inst.addOperand(MCOperand::CreateImm(0));
4035  // Rt, Rt2
4036  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4037  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4038  // addr
4039  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4040  // pred
4041  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4042  return true;
4043}
4044
4045/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4046/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4047/// when they refer multiple MIOperands inside a single one.
4048bool ARMAsmParser::
4049cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
4050                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4051  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4052  // Create a writeback register dummy placeholder.
4053  Inst.addOperand(MCOperand::CreateImm(0));
4054  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4055  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4056  return true;
4057}
4058
4059/// cvtThumbMultiple- Convert parsed operands to MCInst.
4060/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4061/// when they refer multiple MIOperands inside a single one.
4062bool ARMAsmParser::
4063cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
4064           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4065  // The second source operand must be the same register as the destination
4066  // operand.
4067  if (Operands.size() == 6 &&
4068      (((ARMOperand*)Operands[3])->getReg() !=
4069       ((ARMOperand*)Operands[5])->getReg()) &&
4070      (((ARMOperand*)Operands[3])->getReg() !=
4071       ((ARMOperand*)Operands[4])->getReg())) {
4072    Error(Operands[3]->getStartLoc(),
4073          "destination register must match source register");
4074    return false;
4075  }
4076  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4077  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4078  // If we have a three-operand form, make sure to set Rn to be the operand
4079  // that isn't the same as Rd.
4080  unsigned RegOp = 4;
4081  if (Operands.size() == 6 &&
4082      ((ARMOperand*)Operands[4])->getReg() ==
4083        ((ARMOperand*)Operands[3])->getReg())
4084    RegOp = 5;
4085  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4086  Inst.addOperand(Inst.getOperand(0));
4087  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4088
4089  return true;
4090}
4091
4092bool ARMAsmParser::
4093cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
4094              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4095  // Vd
4096  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4097  // Create a writeback register dummy placeholder.
4098  Inst.addOperand(MCOperand::CreateImm(0));
4099  // Vn
4100  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4101  // pred
4102  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4103  return true;
4104}
4105
4106bool ARMAsmParser::
4107cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
4108                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4109  // Vd
4110  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4111  // Create a writeback register dummy placeholder.
4112  Inst.addOperand(MCOperand::CreateImm(0));
4113  // Vn
4114  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4115  // Vm
4116  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4117  // pred
4118  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4119  return true;
4120}
4121
4122bool ARMAsmParser::
4123cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
4124              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4125  // Create a writeback register dummy placeholder.
4126  Inst.addOperand(MCOperand::CreateImm(0));
4127  // Vn
4128  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4129  // Vt
4130  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4131  // pred
4132  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4133  return true;
4134}
4135
4136bool ARMAsmParser::
4137cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
4138                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4139  // Create a writeback register dummy placeholder.
4140  Inst.addOperand(MCOperand::CreateImm(0));
4141  // Vn
4142  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4143  // Vm
4144  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4145  // Vt
4146  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4147  // pred
4148  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4149  return true;
4150}
4151
4152/// Parse an ARM memory expression, return false if successful else return true
4153/// or an error.  The first token must be a '[' when called.
4154bool ARMAsmParser::
4155parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4156  SMLoc S, E;
4157  assert(Parser.getTok().is(AsmToken::LBrac) &&
4158         "Token is not a Left Bracket");
4159  S = Parser.getTok().getLoc();
4160  Parser.Lex(); // Eat left bracket token.
4161
4162  const AsmToken &BaseRegTok = Parser.getTok();
4163  int BaseRegNum = tryParseRegister();
4164  if (BaseRegNum == -1)
4165    return Error(BaseRegTok.getLoc(), "register expected");
4166
4167  // The next token must either be a comma or a closing bracket.
4168  const AsmToken &Tok = Parser.getTok();
4169  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4170    return Error(Tok.getLoc(), "malformed memory operand");
4171
4172  if (Tok.is(AsmToken::RBrac)) {
4173    E = Tok.getLoc();
4174    Parser.Lex(); // Eat right bracket token.
4175
4176    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4177                                             0, 0, false, S, E));
4178
4179    // If there's a pre-indexing writeback marker, '!', just add it as a token
4180    // operand. It's rather odd, but syntactically valid.
4181    if (Parser.getTok().is(AsmToken::Exclaim)) {
4182      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4183      Parser.Lex(); // Eat the '!'.
4184    }
4185
4186    return false;
4187  }
4188
4189  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4190  Parser.Lex(); // Eat the comma.
4191
4192  // If we have a ':', it's an alignment specifier.
4193  if (Parser.getTok().is(AsmToken::Colon)) {
4194    Parser.Lex(); // Eat the ':'.
4195    E = Parser.getTok().getLoc();
4196
4197    const MCExpr *Expr;
4198    if (getParser().ParseExpression(Expr))
4199     return true;
4200
4201    // The expression has to be a constant. Memory references with relocations
4202    // don't come through here, as they use the <label> forms of the relevant
4203    // instructions.
4204    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4205    if (!CE)
4206      return Error (E, "constant expression expected");
4207
4208    unsigned Align = 0;
4209    switch (CE->getValue()) {
4210    default:
4211      return Error(E,
4212                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4213    case 16:  Align = 2; break;
4214    case 32:  Align = 4; break;
4215    case 64:  Align = 8; break;
4216    case 128: Align = 16; break;
4217    case 256: Align = 32; break;
4218    }
4219
4220    // Now we should have the closing ']'
4221    E = Parser.getTok().getLoc();
4222    if (Parser.getTok().isNot(AsmToken::RBrac))
4223      return Error(E, "']' expected");
4224    Parser.Lex(); // Eat right bracket token.
4225
4226    // Don't worry about range checking the value here. That's handled by
4227    // the is*() predicates.
4228    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4229                                             ARM_AM::no_shift, 0, Align,
4230                                             false, S, E));
4231
4232    // If there's a pre-indexing writeback marker, '!', just add it as a token
4233    // operand.
4234    if (Parser.getTok().is(AsmToken::Exclaim)) {
4235      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4236      Parser.Lex(); // Eat the '!'.
4237    }
4238
4239    return false;
4240  }
4241
4242  // If we have a '#', it's an immediate offset, else assume it's a register
4243  // offset. Be friendly and also accept a plain integer (without a leading
4244  // hash) for gas compatibility.
4245  if (Parser.getTok().is(AsmToken::Hash) ||
4246      Parser.getTok().is(AsmToken::Dollar) ||
4247      Parser.getTok().is(AsmToken::Integer)) {
4248    if (Parser.getTok().isNot(AsmToken::Integer))
4249      Parser.Lex(); // Eat the '#'.
4250    E = Parser.getTok().getLoc();
4251
4252    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4253    const MCExpr *Offset;
4254    if (getParser().ParseExpression(Offset))
4255     return true;
4256
4257    // The expression has to be a constant. Memory references with relocations
4258    // don't come through here, as they use the <label> forms of the relevant
4259    // instructions.
4260    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4261    if (!CE)
4262      return Error (E, "constant expression expected");
4263
4264    // If the constant was #-0, represent it as INT32_MIN.
4265    int32_t Val = CE->getValue();
4266    if (isNegative && Val == 0)
4267      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4268
4269    // Now we should have the closing ']'
4270    E = Parser.getTok().getLoc();
4271    if (Parser.getTok().isNot(AsmToken::RBrac))
4272      return Error(E, "']' expected");
4273    Parser.Lex(); // Eat right bracket token.
4274
4275    // Don't worry about range checking the value here. That's handled by
4276    // the is*() predicates.
4277    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4278                                             ARM_AM::no_shift, 0, 0,
4279                                             false, S, E));
4280
4281    // If there's a pre-indexing writeback marker, '!', just add it as a token
4282    // operand.
4283    if (Parser.getTok().is(AsmToken::Exclaim)) {
4284      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4285      Parser.Lex(); // Eat the '!'.
4286    }
4287
4288    return false;
4289  }
4290
4291  // The register offset is optionally preceded by a '+' or '-'
4292  bool isNegative = false;
4293  if (Parser.getTok().is(AsmToken::Minus)) {
4294    isNegative = true;
4295    Parser.Lex(); // Eat the '-'.
4296  } else if (Parser.getTok().is(AsmToken::Plus)) {
4297    // Nothing to do.
4298    Parser.Lex(); // Eat the '+'.
4299  }
4300
4301  E = Parser.getTok().getLoc();
4302  int OffsetRegNum = tryParseRegister();
4303  if (OffsetRegNum == -1)
4304    return Error(E, "register expected");
4305
4306  // If there's a shift operator, handle it.
4307  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4308  unsigned ShiftImm = 0;
4309  if (Parser.getTok().is(AsmToken::Comma)) {
4310    Parser.Lex(); // Eat the ','.
4311    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4312      return true;
4313  }
4314
4315  // Now we should have the closing ']'
4316  E = Parser.getTok().getLoc();
4317  if (Parser.getTok().isNot(AsmToken::RBrac))
4318    return Error(E, "']' expected");
4319  Parser.Lex(); // Eat right bracket token.
4320
4321  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4322                                           ShiftType, ShiftImm, 0, isNegative,
4323                                           S, E));
4324
4325  // If there's a pre-indexing writeback marker, '!', just add it as a token
4326  // operand.
4327  if (Parser.getTok().is(AsmToken::Exclaim)) {
4328    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4329    Parser.Lex(); // Eat the '!'.
4330  }
4331
4332  return false;
4333}
4334
4335/// parseMemRegOffsetShift - one of these two:
4336///   ( lsl | lsr | asr | ror ) , # shift_amount
4337///   rrx
4338/// return true if it parses a shift otherwise it returns false.
4339bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4340                                          unsigned &Amount) {
4341  SMLoc Loc = Parser.getTok().getLoc();
4342  const AsmToken &Tok = Parser.getTok();
4343  if (Tok.isNot(AsmToken::Identifier))
4344    return true;
4345  StringRef ShiftName = Tok.getString();
4346  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4347      ShiftName == "asl" || ShiftName == "ASL")
4348    St = ARM_AM::lsl;
4349  else if (ShiftName == "lsr" || ShiftName == "LSR")
4350    St = ARM_AM::lsr;
4351  else if (ShiftName == "asr" || ShiftName == "ASR")
4352    St = ARM_AM::asr;
4353  else if (ShiftName == "ror" || ShiftName == "ROR")
4354    St = ARM_AM::ror;
4355  else if (ShiftName == "rrx" || ShiftName == "RRX")
4356    St = ARM_AM::rrx;
4357  else
4358    return Error(Loc, "illegal shift operator");
4359  Parser.Lex(); // Eat shift type token.
4360
4361  // rrx stands alone.
4362  Amount = 0;
4363  if (St != ARM_AM::rrx) {
4364    Loc = Parser.getTok().getLoc();
4365    // A '#' and a shift amount.
4366    const AsmToken &HashTok = Parser.getTok();
4367    if (HashTok.isNot(AsmToken::Hash) &&
4368        HashTok.isNot(AsmToken::Dollar))
4369      return Error(HashTok.getLoc(), "'#' expected");
4370    Parser.Lex(); // Eat hash token.
4371
4372    const MCExpr *Expr;
4373    if (getParser().ParseExpression(Expr))
4374      return true;
4375    // Range check the immediate.
4376    // lsl, ror: 0 <= imm <= 31
4377    // lsr, asr: 0 <= imm <= 32
4378    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4379    if (!CE)
4380      return Error(Loc, "shift amount must be an immediate");
4381    int64_t Imm = CE->getValue();
4382    if (Imm < 0 ||
4383        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4384        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4385      return Error(Loc, "immediate shift value out of range");
4386    Amount = Imm;
4387  }
4388
4389  return false;
4390}
4391
4392/// parseFPImm - A floating point immediate expression operand.
4393ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4394parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4395  // Anything that can accept a floating point constant as an operand
4396  // needs to go through here, as the regular ParseExpression is
4397  // integer only.
4398  //
4399  // This routine still creates a generic Immediate operand, containing
4400  // a bitcast of the 64-bit floating point value. The various operands
4401  // that accept floats can check whether the value is valid for them
4402  // via the standard is*() predicates.
4403
4404  SMLoc S = Parser.getTok().getLoc();
4405
4406  if (Parser.getTok().isNot(AsmToken::Hash) &&
4407      Parser.getTok().isNot(AsmToken::Dollar))
4408    return MatchOperand_NoMatch;
4409
4410  // Disambiguate the VMOV forms that can accept an FP immediate.
4411  // vmov.f32 <sreg>, #imm
4412  // vmov.f64 <dreg>, #imm
4413  // vmov.f32 <dreg>, #imm  @ vector f32x2
4414  // vmov.f32 <qreg>, #imm  @ vector f32x4
4415  //
4416  // There are also the NEON VMOV instructions which expect an
4417  // integer constant. Make sure we don't try to parse an FPImm
4418  // for these:
4419  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4420  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4421  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4422                           TyOp->getToken() != ".f64"))
4423    return MatchOperand_NoMatch;
4424
4425  Parser.Lex(); // Eat the '#'.
4426
4427  // Handle negation, as that still comes through as a separate token.
4428  bool isNegative = false;
4429  if (Parser.getTok().is(AsmToken::Minus)) {
4430    isNegative = true;
4431    Parser.Lex();
4432  }
4433  const AsmToken &Tok = Parser.getTok();
4434  SMLoc Loc = Tok.getLoc();
4435  if (Tok.is(AsmToken::Real)) {
4436    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4437    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4438    // If we had a '-' in front, toggle the sign bit.
4439    IntVal ^= (uint64_t)isNegative << 31;
4440    Parser.Lex(); // Eat the token.
4441    Operands.push_back(ARMOperand::CreateImm(
4442          MCConstantExpr::Create(IntVal, getContext()),
4443          S, Parser.getTok().getLoc()));
4444    return MatchOperand_Success;
4445  }
4446  // Also handle plain integers. Instructions which allow floating point
4447  // immediates also allow a raw encoded 8-bit value.
4448  if (Tok.is(AsmToken::Integer)) {
4449    int64_t Val = Tok.getIntVal();
4450    Parser.Lex(); // Eat the token.
4451    if (Val > 255 || Val < 0) {
4452      Error(Loc, "encoded floating point value out of range");
4453      return MatchOperand_ParseFail;
4454    }
4455    double RealVal = ARM_AM::getFPImmFloat(Val);
4456    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4457    Operands.push_back(ARMOperand::CreateImm(
4458        MCConstantExpr::Create(Val, getContext()), S,
4459        Parser.getTok().getLoc()));
4460    return MatchOperand_Success;
4461  }
4462
4463  Error(Loc, "invalid floating point immediate");
4464  return MatchOperand_ParseFail;
4465}
4466
4467/// Parse a arm instruction operand.  For now this parses the operand regardless
4468/// of the mnemonic.
4469bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4470                                StringRef Mnemonic) {
4471  SMLoc S, E;
4472
4473  // Check if the current operand has a custom associated parser, if so, try to
4474  // custom parse the operand, or fallback to the general approach.
4475  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4476  if (ResTy == MatchOperand_Success)
4477    return false;
4478  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4479  // there was a match, but an error occurred, in which case, just return that
4480  // the operand parsing failed.
4481  if (ResTy == MatchOperand_ParseFail)
4482    return true;
4483
4484  switch (getLexer().getKind()) {
4485  default:
4486    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4487    return true;
4488  case AsmToken::Identifier: {
4489    if (!tryParseRegisterWithWriteBack(Operands))
4490      return false;
4491    int Res = tryParseShiftRegister(Operands);
4492    if (Res == 0) // success
4493      return false;
4494    else if (Res == -1) // irrecoverable error
4495      return true;
4496    // If this is VMRS, check for the apsr_nzcv operand.
4497    if (Mnemonic == "vmrs" &&
4498        Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4499      S = Parser.getTok().getLoc();
4500      Parser.Lex();
4501      Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4502      return false;
4503    }
4504
4505    // Fall though for the Identifier case that is not a register or a
4506    // special name.
4507  }
4508  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4509  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4510  case AsmToken::String:  // quoted label names.
4511  case AsmToken::Dot: {   // . as a branch target
4512    // This was not a register so parse other operands that start with an
4513    // identifier (like labels) as expressions and create them as immediates.
4514    const MCExpr *IdVal;
4515    S = Parser.getTok().getLoc();
4516    if (getParser().ParseExpression(IdVal))
4517      return true;
4518    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4519    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4520    return false;
4521  }
4522  case AsmToken::LBrac:
4523    return parseMemory(Operands);
4524  case AsmToken::LCurly:
4525    return parseRegisterList(Operands);
4526  case AsmToken::Dollar:
4527  case AsmToken::Hash: {
4528    // #42 -> immediate.
4529    S = Parser.getTok().getLoc();
4530    Parser.Lex();
4531
4532    if (Parser.getTok().isNot(AsmToken::Colon)) {
4533      bool isNegative = Parser.getTok().is(AsmToken::Minus);
4534      const MCExpr *ImmVal;
4535      if (getParser().ParseExpression(ImmVal))
4536        return true;
4537      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4538      if (CE) {
4539        int32_t Val = CE->getValue();
4540        if (isNegative && Val == 0)
4541          ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4542      }
4543      E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4544      Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4545      return false;
4546    }
4547    // w/ a ':' after the '#', it's just like a plain ':'.
4548    // FALLTHROUGH
4549  }
4550  case AsmToken::Colon: {
4551    // ":lower16:" and ":upper16:" expression prefixes
4552    // FIXME: Check it's an expression prefix,
4553    // e.g. (FOO - :lower16:BAR) isn't legal.
4554    ARMMCExpr::VariantKind RefKind;
4555    if (parsePrefix(RefKind))
4556      return true;
4557
4558    const MCExpr *SubExprVal;
4559    if (getParser().ParseExpression(SubExprVal))
4560      return true;
4561
4562    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4563                                                   getContext());
4564    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4565    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4566    return false;
4567  }
4568  }
4569}
4570
4571// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4572//  :lower16: and :upper16:.
4573bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4574  RefKind = ARMMCExpr::VK_ARM_None;
4575
4576  // :lower16: and :upper16: modifiers
4577  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4578  Parser.Lex(); // Eat ':'
4579
4580  if (getLexer().isNot(AsmToken::Identifier)) {
4581    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4582    return true;
4583  }
4584
4585  StringRef IDVal = Parser.getTok().getIdentifier();
4586  if (IDVal == "lower16") {
4587    RefKind = ARMMCExpr::VK_ARM_LO16;
4588  } else if (IDVal == "upper16") {
4589    RefKind = ARMMCExpr::VK_ARM_HI16;
4590  } else {
4591    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4592    return true;
4593  }
4594  Parser.Lex();
4595
4596  if (getLexer().isNot(AsmToken::Colon)) {
4597    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4598    return true;
4599  }
4600  Parser.Lex(); // Eat the last ':'
4601  return false;
4602}
4603
4604/// \brief Given a mnemonic, split out possible predication code and carry
4605/// setting letters to form a canonical mnemonic and flags.
4606//
4607// FIXME: Would be nice to autogen this.
4608// FIXME: This is a bit of a maze of special cases.
4609StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4610                                      unsigned &PredicationCode,
4611                                      bool &CarrySetting,
4612                                      unsigned &ProcessorIMod,
4613                                      StringRef &ITMask) {
4614  PredicationCode = ARMCC::AL;
4615  CarrySetting = false;
4616  ProcessorIMod = 0;
4617
4618  // Ignore some mnemonics we know aren't predicated forms.
4619  //
4620  // FIXME: Would be nice to autogen this.
4621  if ((Mnemonic == "movs" && isThumb()) ||
4622      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4623      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4624      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4625      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4626      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4627      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4628      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4629      Mnemonic == "fmuls")
4630    return Mnemonic;
4631
4632  // First, split out any predication code. Ignore mnemonics we know aren't
4633  // predicated but do have a carry-set and so weren't caught above.
4634  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4635      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4636      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4637      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4638    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4639      .Case("eq", ARMCC::EQ)
4640      .Case("ne", ARMCC::NE)
4641      .Case("hs", ARMCC::HS)
4642      .Case("cs", ARMCC::HS)
4643      .Case("lo", ARMCC::LO)
4644      .Case("cc", ARMCC::LO)
4645      .Case("mi", ARMCC::MI)
4646      .Case("pl", ARMCC::PL)
4647      .Case("vs", ARMCC::VS)
4648      .Case("vc", ARMCC::VC)
4649      .Case("hi", ARMCC::HI)
4650      .Case("ls", ARMCC::LS)
4651      .Case("ge", ARMCC::GE)
4652      .Case("lt", ARMCC::LT)
4653      .Case("gt", ARMCC::GT)
4654      .Case("le", ARMCC::LE)
4655      .Case("al", ARMCC::AL)
4656      .Default(~0U);
4657    if (CC != ~0U) {
4658      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4659      PredicationCode = CC;
4660    }
4661  }
4662
4663  // Next, determine if we have a carry setting bit. We explicitly ignore all
4664  // the instructions we know end in 's'.
4665  if (Mnemonic.endswith("s") &&
4666      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4667        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4668        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4669        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4670        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4671        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4672        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4673        Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4674        Mnemonic == "vfms" || Mnemonic == "vfnms" ||
4675        (Mnemonic == "movs" && isThumb()))) {
4676    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4677    CarrySetting = true;
4678  }
4679
4680  // The "cps" instruction can have a interrupt mode operand which is glued into
4681  // the mnemonic. Check if this is the case, split it and parse the imod op
4682  if (Mnemonic.startswith("cps")) {
4683    // Split out any imod code.
4684    unsigned IMod =
4685      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4686      .Case("ie", ARM_PROC::IE)
4687      .Case("id", ARM_PROC::ID)
4688      .Default(~0U);
4689    if (IMod != ~0U) {
4690      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4691      ProcessorIMod = IMod;
4692    }
4693  }
4694
4695  // The "it" instruction has the condition mask on the end of the mnemonic.
4696  if (Mnemonic.startswith("it")) {
4697    ITMask = Mnemonic.slice(2, Mnemonic.size());
4698    Mnemonic = Mnemonic.slice(0, 2);
4699  }
4700
4701  return Mnemonic;
4702}
4703
4704/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4705/// inclusion of carry set or predication code operands.
4706//
4707// FIXME: It would be nice to autogen this.
4708void ARMAsmParser::
4709getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4710                      bool &CanAcceptPredicationCode) {
4711  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4712      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4713      Mnemonic == "add" || Mnemonic == "adc" ||
4714      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4715      Mnemonic == "orr" || Mnemonic == "mvn" ||
4716      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4717      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4718      Mnemonic == "vfm" || Mnemonic == "vfnm" ||
4719      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4720                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4721                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4722    CanAcceptCarrySet = true;
4723  } else
4724    CanAcceptCarrySet = false;
4725
4726  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4727      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4728      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4729      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4730      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4731      (Mnemonic == "clrex" && !isThumb()) ||
4732      (Mnemonic == "nop" && isThumbOne()) ||
4733      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4734        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4735        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4736      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4737       !isThumb()) ||
4738      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4739    CanAcceptPredicationCode = false;
4740  } else
4741    CanAcceptPredicationCode = true;
4742
4743  if (isThumb()) {
4744    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4745        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4746      CanAcceptPredicationCode = false;
4747  }
4748}
4749
4750bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4751                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4752  // FIXME: This is all horribly hacky. We really need a better way to deal
4753  // with optional operands like this in the matcher table.
4754
4755  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4756  // another does not. Specifically, the MOVW instruction does not. So we
4757  // special case it here and remove the defaulted (non-setting) cc_out
4758  // operand if that's the instruction we're trying to match.
4759  //
4760  // We do this as post-processing of the explicit operands rather than just
4761  // conditionally adding the cc_out in the first place because we need
4762  // to check the type of the parsed immediate operand.
4763  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4764      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4765      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4766      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4767    return true;
4768
4769  // Register-register 'add' for thumb does not have a cc_out operand
4770  // when there are only two register operands.
4771  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4772      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4773      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4774      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4775    return true;
4776  // Register-register 'add' for thumb does not have a cc_out operand
4777  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4778  // have to check the immediate range here since Thumb2 has a variant
4779  // that can handle a different range and has a cc_out operand.
4780  if (((isThumb() && Mnemonic == "add") ||
4781       (isThumbTwo() && Mnemonic == "sub")) &&
4782      Operands.size() == 6 &&
4783      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4784      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4785      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4786      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4787      ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
4788       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4789    return true;
4790  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4791  // imm0_4095 variant. That's the least-preferred variant when
4792  // selecting via the generic "add" mnemonic, so to know that we
4793  // should remove the cc_out operand, we have to explicitly check that
4794  // it's not one of the other variants. Ugh.
4795  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4796      Operands.size() == 6 &&
4797      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4798      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4799      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4800    // Nest conditions rather than one big 'if' statement for readability.
4801    //
4802    // If either register is a high reg, it's either one of the SP
4803    // variants (handled above) or a 32-bit encoding, so we just
4804    // check against T3. If the second register is the PC, this is an
4805    // alternate form of ADR, which uses encoding T4, so check for that too.
4806    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4807         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4808        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4809        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4810      return false;
4811    // If both registers are low, we're in an IT block, and the immediate is
4812    // in range, we should use encoding T1 instead, which has a cc_out.
4813    if (inITBlock() &&
4814        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4815        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4816        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4817      return false;
4818
4819    // Otherwise, we use encoding T4, which does not have a cc_out
4820    // operand.
4821    return true;
4822  }
4823
4824  // The thumb2 multiply instruction doesn't have a CCOut register, so
4825  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4826  // use the 16-bit encoding or not.
4827  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4828      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4829      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4830      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4831      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4832      // If the registers aren't low regs, the destination reg isn't the
4833      // same as one of the source regs, or the cc_out operand is zero
4834      // outside of an IT block, we have to use the 32-bit encoding, so
4835      // remove the cc_out operand.
4836      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4837       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4838       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4839       !inITBlock() ||
4840       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4841        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4842        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4843        static_cast<ARMOperand*>(Operands[4])->getReg())))
4844    return true;
4845
4846  // Also check the 'mul' syntax variant that doesn't specify an explicit
4847  // destination register.
4848  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4849      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4850      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4851      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4852      // If the registers aren't low regs  or the cc_out operand is zero
4853      // outside of an IT block, we have to use the 32-bit encoding, so
4854      // remove the cc_out operand.
4855      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4856       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4857       !inITBlock()))
4858    return true;
4859
4860
4861
4862  // Register-register 'add/sub' for thumb does not have a cc_out operand
4863  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4864  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4865  // right, this will result in better diagnostics (which operand is off)
4866  // anyway.
4867  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4868      (Operands.size() == 5 || Operands.size() == 6) &&
4869      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4870      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4871      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4872      (static_cast<ARMOperand*>(Operands[4])->isImm() ||
4873       (Operands.size() == 6 &&
4874        static_cast<ARMOperand*>(Operands[5])->isImm())))
4875    return true;
4876
4877  return false;
4878}
4879
4880static bool isDataTypeToken(StringRef Tok) {
4881  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4882    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4883    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4884    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4885    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4886    Tok == ".f" || Tok == ".d";
4887}
4888
4889// FIXME: This bit should probably be handled via an explicit match class
4890// in the .td files that matches the suffix instead of having it be
4891// a literal string token the way it is now.
4892static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4893  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4894}
4895
4896static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4897/// Parse an arm instruction mnemonic followed by its operands.
4898bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4899                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4900  // Apply mnemonic aliases before doing anything else, as the destination
4901  // mnemnonic may include suffices and we want to handle them normally.
4902  // The generic tblgen'erated code does this later, at the start of
4903  // MatchInstructionImpl(), but that's too late for aliases that include
4904  // any sort of suffix.
4905  unsigned AvailableFeatures = getAvailableFeatures();
4906  applyMnemonicAliases(Name, AvailableFeatures);
4907
4908  // First check for the ARM-specific .req directive.
4909  if (Parser.getTok().is(AsmToken::Identifier) &&
4910      Parser.getTok().getIdentifier() == ".req") {
4911    parseDirectiveReq(Name, NameLoc);
4912    // We always return 'error' for this, as we're done with this
4913    // statement and don't need to match the 'instruction."
4914    return true;
4915  }
4916
4917  // Create the leading tokens for the mnemonic, split by '.' characters.
4918  size_t Start = 0, Next = Name.find('.');
4919  StringRef Mnemonic = Name.slice(Start, Next);
4920
4921  // Split out the predication code and carry setting flag from the mnemonic.
4922  unsigned PredicationCode;
4923  unsigned ProcessorIMod;
4924  bool CarrySetting;
4925  StringRef ITMask;
4926  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4927                           ProcessorIMod, ITMask);
4928
4929  // In Thumb1, only the branch (B) instruction can be predicated.
4930  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4931    Parser.EatToEndOfStatement();
4932    return Error(NameLoc, "conditional execution not supported in Thumb1");
4933  }
4934
4935  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4936
4937  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4938  // is the mask as it will be for the IT encoding if the conditional
4939  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4940  // where the conditional bit0 is zero, the instruction post-processing
4941  // will adjust the mask accordingly.
4942  if (Mnemonic == "it") {
4943    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4944    if (ITMask.size() > 3) {
4945      Parser.EatToEndOfStatement();
4946      return Error(Loc, "too many conditions on IT instruction");
4947    }
4948    unsigned Mask = 8;
4949    for (unsigned i = ITMask.size(); i != 0; --i) {
4950      char pos = ITMask[i - 1];
4951      if (pos != 't' && pos != 'e') {
4952        Parser.EatToEndOfStatement();
4953        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4954      }
4955      Mask >>= 1;
4956      if (ITMask[i - 1] == 't')
4957        Mask |= 8;
4958    }
4959    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4960  }
4961
4962  // FIXME: This is all a pretty gross hack. We should automatically handle
4963  // optional operands like this via tblgen.
4964
4965  // Next, add the CCOut and ConditionCode operands, if needed.
4966  //
4967  // For mnemonics which can ever incorporate a carry setting bit or predication
4968  // code, our matching model involves us always generating CCOut and
4969  // ConditionCode operands to match the mnemonic "as written" and then we let
4970  // the matcher deal with finding the right instruction or generating an
4971  // appropriate error.
4972  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4973  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4974
4975  // If we had a carry-set on an instruction that can't do that, issue an
4976  // error.
4977  if (!CanAcceptCarrySet && CarrySetting) {
4978    Parser.EatToEndOfStatement();
4979    return Error(NameLoc, "instruction '" + Mnemonic +
4980                 "' can not set flags, but 's' suffix specified");
4981  }
4982  // If we had a predication code on an instruction that can't do that, issue an
4983  // error.
4984  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4985    Parser.EatToEndOfStatement();
4986    return Error(NameLoc, "instruction '" + Mnemonic +
4987                 "' is not predicable, but condition code specified");
4988  }
4989
4990  // Add the carry setting operand, if necessary.
4991  if (CanAcceptCarrySet) {
4992    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4993    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4994                                               Loc));
4995  }
4996
4997  // Add the predication code operand, if necessary.
4998  if (CanAcceptPredicationCode) {
4999    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
5000                                      CarrySetting);
5001    Operands.push_back(ARMOperand::CreateCondCode(
5002                         ARMCC::CondCodes(PredicationCode), Loc));
5003  }
5004
5005  // Add the processor imod operand, if necessary.
5006  if (ProcessorIMod) {
5007    Operands.push_back(ARMOperand::CreateImm(
5008          MCConstantExpr::Create(ProcessorIMod, getContext()),
5009                                 NameLoc, NameLoc));
5010  }
5011
5012  // Add the remaining tokens in the mnemonic.
5013  while (Next != StringRef::npos) {
5014    Start = Next;
5015    Next = Name.find('.', Start + 1);
5016    StringRef ExtraToken = Name.slice(Start, Next);
5017
5018    // Some NEON instructions have an optional datatype suffix that is
5019    // completely ignored. Check for that.
5020    if (isDataTypeToken(ExtraToken) &&
5021        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5022      continue;
5023
5024    if (ExtraToken != ".n") {
5025      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5026      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5027    }
5028  }
5029
5030  // Read the remaining operands.
5031  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5032    // Read the first operand.
5033    if (parseOperand(Operands, Mnemonic)) {
5034      Parser.EatToEndOfStatement();
5035      return true;
5036    }
5037
5038    while (getLexer().is(AsmToken::Comma)) {
5039      Parser.Lex();  // Eat the comma.
5040
5041      // Parse and remember the operand.
5042      if (parseOperand(Operands, Mnemonic)) {
5043        Parser.EatToEndOfStatement();
5044        return true;
5045      }
5046    }
5047  }
5048
5049  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5050    SMLoc Loc = getLexer().getLoc();
5051    Parser.EatToEndOfStatement();
5052    return Error(Loc, "unexpected token in argument list");
5053  }
5054
5055  Parser.Lex(); // Consume the EndOfStatement
5056
5057  // Some instructions, mostly Thumb, have forms for the same mnemonic that
5058  // do and don't have a cc_out optional-def operand. With some spot-checks
5059  // of the operand list, we can figure out which variant we're trying to
5060  // parse and adjust accordingly before actually matching. We shouldn't ever
5061  // try to remove a cc_out operand that was explicitly set on the the
5062  // mnemonic, of course (CarrySetting == true). Reason number #317 the
5063  // table driven matcher doesn't fit well with the ARM instruction set.
5064  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5065    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5066    Operands.erase(Operands.begin() + 1);
5067    delete Op;
5068  }
5069
5070  // ARM mode 'blx' need special handling, as the register operand version
5071  // is predicable, but the label operand version is not. So, we can't rely
5072  // on the Mnemonic based checking to correctly figure out when to put
5073  // a k_CondCode operand in the list. If we're trying to match the label
5074  // version, remove the k_CondCode operand here.
5075  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5076      static_cast<ARMOperand*>(Operands[2])->isImm()) {
5077    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5078    Operands.erase(Operands.begin() + 1);
5079    delete Op;
5080  }
5081
5082  // The vector-compare-to-zero instructions have a literal token "#0" at
5083  // the end that comes to here as an immediate operand. Convert it to a
5084  // token to play nicely with the matcher.
5085  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
5086      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
5087      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5088    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5089    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5090    if (CE && CE->getValue() == 0) {
5091      Operands.erase(Operands.begin() + 5);
5092      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5093      delete Op;
5094    }
5095  }
5096  // VCMP{E} does the same thing, but with a different operand count.
5097  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
5098      static_cast<ARMOperand*>(Operands[4])->isImm()) {
5099    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
5100    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5101    if (CE && CE->getValue() == 0) {
5102      Operands.erase(Operands.begin() + 4);
5103      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5104      delete Op;
5105    }
5106  }
5107  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
5108  // end. Convert it to a token here. Take care not to convert those
5109  // that should hit the Thumb2 encoding.
5110  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
5111      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5112      static_cast<ARMOperand*>(Operands[4])->isReg() &&
5113      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5114    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5115    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5116    if (CE && CE->getValue() == 0 &&
5117        (isThumbOne() ||
5118         // The cc_out operand matches the IT block.
5119         ((inITBlock() != CarrySetting) &&
5120         // Neither register operand is a high register.
5121         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
5122          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
5123      Operands.erase(Operands.begin() + 5);
5124      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5125      delete Op;
5126    }
5127  }
5128
5129  return false;
5130}
5131
5132// Validate context-sensitive operand constraints.
5133
5134// return 'true' if register list contains non-low GPR registers,
5135// 'false' otherwise. If Reg is in the register list or is HiReg, set
5136// 'containsReg' to true.
5137static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5138                                 unsigned HiReg, bool &containsReg) {
5139  containsReg = false;
5140  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5141    unsigned OpReg = Inst.getOperand(i).getReg();
5142    if (OpReg == Reg)
5143      containsReg = true;
5144    // Anything other than a low register isn't legal here.
5145    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5146      return true;
5147  }
5148  return false;
5149}
5150
5151// Check if the specified regisgter is in the register list of the inst,
5152// starting at the indicated operand number.
5153static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5154  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5155    unsigned OpReg = Inst.getOperand(i).getReg();
5156    if (OpReg == Reg)
5157      return true;
5158  }
5159  return false;
5160}
5161
5162// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5163// the ARMInsts array) instead. Getting that here requires awkward
5164// API changes, though. Better way?
5165namespace llvm {
5166extern const MCInstrDesc ARMInsts[];
5167}
5168static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5169  return ARMInsts[Opcode];
5170}
5171
5172// FIXME: We would really like to be able to tablegen'erate this.
5173bool ARMAsmParser::
5174validateInstruction(MCInst &Inst,
5175                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5176  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5177  SMLoc Loc = Operands[0]->getStartLoc();
5178  // Check the IT block state first.
5179  // NOTE: BKPT instruction has the interesting property of being
5180  // allowed in IT blocks, but not being predicable.  It just always
5181  // executes.
5182  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5183      Inst.getOpcode() != ARM::BKPT) {
5184    unsigned bit = 1;
5185    if (ITState.FirstCond)
5186      ITState.FirstCond = false;
5187    else
5188      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5189    // The instruction must be predicable.
5190    if (!MCID.isPredicable())
5191      return Error(Loc, "instructions in IT block must be predicable");
5192    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5193    unsigned ITCond = bit ? ITState.Cond :
5194      ARMCC::getOppositeCondition(ITState.Cond);
5195    if (Cond != ITCond) {
5196      // Find the condition code Operand to get its SMLoc information.
5197      SMLoc CondLoc;
5198      for (unsigned i = 1; i < Operands.size(); ++i)
5199        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5200          CondLoc = Operands[i]->getStartLoc();
5201      return Error(CondLoc, "incorrect condition in IT block; got '" +
5202                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5203                   "', but expected '" +
5204                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5205    }
5206  // Check for non-'al' condition codes outside of the IT block.
5207  } else if (isThumbTwo() && MCID.isPredicable() &&
5208             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5209             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5210             Inst.getOpcode() != ARM::t2B)
5211    return Error(Loc, "predicated instructions must be in IT block");
5212
5213  switch (Inst.getOpcode()) {
5214  case ARM::LDRD:
5215  case ARM::LDRD_PRE:
5216  case ARM::LDRD_POST:
5217  case ARM::LDREXD: {
5218    // Rt2 must be Rt + 1.
5219    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5220    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5221    if (Rt2 != Rt + 1)
5222      return Error(Operands[3]->getStartLoc(),
5223                   "destination operands must be sequential");
5224    return false;
5225  }
5226  case ARM::STRD: {
5227    // Rt2 must be Rt + 1.
5228    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5229    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5230    if (Rt2 != Rt + 1)
5231      return Error(Operands[3]->getStartLoc(),
5232                   "source operands must be sequential");
5233    return false;
5234  }
5235  case ARM::STRD_PRE:
5236  case ARM::STRD_POST:
5237  case ARM::STREXD: {
5238    // Rt2 must be Rt + 1.
5239    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5240    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5241    if (Rt2 != Rt + 1)
5242      return Error(Operands[3]->getStartLoc(),
5243                   "source operands must be sequential");
5244    return false;
5245  }
5246  case ARM::SBFX:
5247  case ARM::UBFX: {
5248    // width must be in range [1, 32-lsb]
5249    unsigned lsb = Inst.getOperand(2).getImm();
5250    unsigned widthm1 = Inst.getOperand(3).getImm();
5251    if (widthm1 >= 32 - lsb)
5252      return Error(Operands[5]->getStartLoc(),
5253                   "bitfield width must be in range [1,32-lsb]");
5254    return false;
5255  }
5256  case ARM::tLDMIA: {
5257    // If we're parsing Thumb2, the .w variant is available and handles
5258    // most cases that are normally illegal for a Thumb1 LDM
5259    // instruction. We'll make the transformation in processInstruction()
5260    // if necessary.
5261    //
5262    // Thumb LDM instructions are writeback iff the base register is not
5263    // in the register list.
5264    unsigned Rn = Inst.getOperand(0).getReg();
5265    bool hasWritebackToken =
5266      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5267       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5268    bool listContainsBase;
5269    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5270      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5271                   "registers must be in range r0-r7");
5272    // If we should have writeback, then there should be a '!' token.
5273    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5274      return Error(Operands[2]->getStartLoc(),
5275                   "writeback operator '!' expected");
5276    // If we should not have writeback, there must not be a '!'. This is
5277    // true even for the 32-bit wide encodings.
5278    if (listContainsBase && hasWritebackToken)
5279      return Error(Operands[3]->getStartLoc(),
5280                   "writeback operator '!' not allowed when base register "
5281                   "in register list");
5282
5283    break;
5284  }
5285  case ARM::t2LDMIA_UPD: {
5286    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5287      return Error(Operands[4]->getStartLoc(),
5288                   "writeback operator '!' not allowed when base register "
5289                   "in register list");
5290    break;
5291  }
5292  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5293  // so only issue a diagnostic for thumb1. The instructions will be
5294  // switched to the t2 encodings in processInstruction() if necessary.
5295  case ARM::tPOP: {
5296    bool listContainsBase;
5297    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5298        !isThumbTwo())
5299      return Error(Operands[2]->getStartLoc(),
5300                   "registers must be in range r0-r7 or pc");
5301    break;
5302  }
5303  case ARM::tPUSH: {
5304    bool listContainsBase;
5305    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5306        !isThumbTwo())
5307      return Error(Operands[2]->getStartLoc(),
5308                   "registers must be in range r0-r7 or lr");
5309    break;
5310  }
5311  case ARM::tSTMIA_UPD: {
5312    bool listContainsBase;
5313    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5314      return Error(Operands[4]->getStartLoc(),
5315                   "registers must be in range r0-r7");
5316    break;
5317  }
5318  }
5319
5320  return false;
5321}
5322
5323static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5324  switch(Opc) {
5325  default: llvm_unreachable("unexpected opcode!");
5326  // VST1LN
5327  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5328  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5329  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5330  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5331  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5332  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5333  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5334  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5335  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5336
5337  // VST2LN
5338  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5339  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5340  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5341  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5342  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5343
5344  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5345  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5346  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5347  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5348  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5349
5350  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5351  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5352  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5353  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5354  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5355
5356  // VST3LN
5357  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5358  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5359  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5360  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5361  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5362  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5363  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5364  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5365  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5366  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5367  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5368  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5369  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5370  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5371  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5372
5373  // VST3
5374  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5375  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5376  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5377  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5378  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5379  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5380  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5381  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5382  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5383  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5384  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5385  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5386  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5387  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5388  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5389  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5390  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5391  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5392
5393  // VST4LN
5394  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5395  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5396  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5397  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5398  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5399  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5400  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5401  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5402  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5403  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5404  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5405  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5406  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5407  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5408  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5409
5410  // VST4
5411  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5412  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5413  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5414  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5415  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5416  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5417  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5418  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5419  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5420  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5421  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5422  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5423  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5424  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5425  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5426  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5427  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5428  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5429  }
5430}
5431
5432static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5433  switch(Opc) {
5434  default: llvm_unreachable("unexpected opcode!");
5435  // VLD1LN
5436  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5437  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5438  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5439  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5440  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5441  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5442  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5443  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5444  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5445
5446  // VLD2LN
5447  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5448  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5449  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5450  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5451  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5452  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5453  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5454  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5455  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5456  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5457  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5458  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5459  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5460  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5461  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5462
5463  // VLD3DUP
5464  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5465  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5466  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5467  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5468  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5469  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5470  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5471  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5472  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5473  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5474  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5475  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5476  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5477  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5478  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5479  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5480  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5481  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5482
5483  // VLD3LN
5484  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5485  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5486  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5487  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5488  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5489  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5490  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5491  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5492  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5493  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5494  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5495  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5496  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5497  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5498  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5499
5500  // VLD3
5501  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5502  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5503  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5504  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5505  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5506  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5507  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5508  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5509  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5510  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5511  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5512  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5513  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5514  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5515  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5516  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5517  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5518  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5519
5520  // VLD4LN
5521  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5522  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5523  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5524  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5525  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5526  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5527  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5528  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5529  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5530  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5531  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5532  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5533  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5534  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5535  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5536
5537  // VLD4DUP
5538  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5539  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5540  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5541  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5542  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5543  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5544  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5545  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5546  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5547  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5548  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5549  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5550  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5551  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5552  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5553  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5554  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5555  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5556
5557  // VLD4
5558  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5559  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5560  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5561  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5562  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5563  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5564  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5565  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5566  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5567  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5568  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5569  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5570  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5571  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5572  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5573  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5574  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5575  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5576  }
5577}
5578
5579bool ARMAsmParser::
5580processInstruction(MCInst &Inst,
5581                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5582  switch (Inst.getOpcode()) {
5583  // Aliases for alternate PC+imm syntax of LDR instructions.
5584  case ARM::t2LDRpcrel:
5585    Inst.setOpcode(ARM::t2LDRpci);
5586    return true;
5587  case ARM::t2LDRBpcrel:
5588    Inst.setOpcode(ARM::t2LDRBpci);
5589    return true;
5590  case ARM::t2LDRHpcrel:
5591    Inst.setOpcode(ARM::t2LDRHpci);
5592    return true;
5593  case ARM::t2LDRSBpcrel:
5594    Inst.setOpcode(ARM::t2LDRSBpci);
5595    return true;
5596  case ARM::t2LDRSHpcrel:
5597    Inst.setOpcode(ARM::t2LDRSHpci);
5598    return true;
5599  // Handle NEON VST complex aliases.
5600  case ARM::VST1LNdWB_register_Asm_8:
5601  case ARM::VST1LNdWB_register_Asm_16:
5602  case ARM::VST1LNdWB_register_Asm_32: {
5603    MCInst TmpInst;
5604    // Shuffle the operands around so the lane index operand is in the
5605    // right place.
5606    unsigned Spacing;
5607    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5608    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5609    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5610    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5611    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5612    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5613    TmpInst.addOperand(Inst.getOperand(1)); // lane
5614    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5615    TmpInst.addOperand(Inst.getOperand(6));
5616    Inst = TmpInst;
5617    return true;
5618  }
5619
5620  case ARM::VST2LNdWB_register_Asm_8:
5621  case ARM::VST2LNdWB_register_Asm_16:
5622  case ARM::VST2LNdWB_register_Asm_32:
5623  case ARM::VST2LNqWB_register_Asm_16:
5624  case ARM::VST2LNqWB_register_Asm_32: {
5625    MCInst TmpInst;
5626    // Shuffle the operands around so the lane index operand is in the
5627    // right place.
5628    unsigned Spacing;
5629    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5630    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5631    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5632    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5633    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5634    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5635    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5636                                            Spacing));
5637    TmpInst.addOperand(Inst.getOperand(1)); // lane
5638    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5639    TmpInst.addOperand(Inst.getOperand(6));
5640    Inst = TmpInst;
5641    return true;
5642  }
5643
5644  case ARM::VST3LNdWB_register_Asm_8:
5645  case ARM::VST3LNdWB_register_Asm_16:
5646  case ARM::VST3LNdWB_register_Asm_32:
5647  case ARM::VST3LNqWB_register_Asm_16:
5648  case ARM::VST3LNqWB_register_Asm_32: {
5649    MCInst TmpInst;
5650    // Shuffle the operands around so the lane index operand is in the
5651    // right place.
5652    unsigned Spacing;
5653    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5654    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5655    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5656    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5657    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5658    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5659    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5660                                            Spacing));
5661    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5662                                            Spacing * 2));
5663    TmpInst.addOperand(Inst.getOperand(1)); // lane
5664    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5665    TmpInst.addOperand(Inst.getOperand(6));
5666    Inst = TmpInst;
5667    return true;
5668  }
5669
5670  case ARM::VST4LNdWB_register_Asm_8:
5671  case ARM::VST4LNdWB_register_Asm_16:
5672  case ARM::VST4LNdWB_register_Asm_32:
5673  case ARM::VST4LNqWB_register_Asm_16:
5674  case ARM::VST4LNqWB_register_Asm_32: {
5675    MCInst TmpInst;
5676    // Shuffle the operands around so the lane index operand is in the
5677    // right place.
5678    unsigned Spacing;
5679    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5680    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5681    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5682    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5683    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5684    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5685    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5686                                            Spacing));
5687    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5688                                            Spacing * 2));
5689    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5690                                            Spacing * 3));
5691    TmpInst.addOperand(Inst.getOperand(1)); // lane
5692    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5693    TmpInst.addOperand(Inst.getOperand(6));
5694    Inst = TmpInst;
5695    return true;
5696  }
5697
5698  case ARM::VST1LNdWB_fixed_Asm_8:
5699  case ARM::VST1LNdWB_fixed_Asm_16:
5700  case ARM::VST1LNdWB_fixed_Asm_32: {
5701    MCInst TmpInst;
5702    // Shuffle the operands around so the lane index operand is in the
5703    // right place.
5704    unsigned Spacing;
5705    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5706    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5707    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5708    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5709    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5710    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5711    TmpInst.addOperand(Inst.getOperand(1)); // lane
5712    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5713    TmpInst.addOperand(Inst.getOperand(5));
5714    Inst = TmpInst;
5715    return true;
5716  }
5717
5718  case ARM::VST2LNdWB_fixed_Asm_8:
5719  case ARM::VST2LNdWB_fixed_Asm_16:
5720  case ARM::VST2LNdWB_fixed_Asm_32:
5721  case ARM::VST2LNqWB_fixed_Asm_16:
5722  case ARM::VST2LNqWB_fixed_Asm_32: {
5723    MCInst TmpInst;
5724    // Shuffle the operands around so the lane index operand is in the
5725    // right place.
5726    unsigned Spacing;
5727    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5728    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5729    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5730    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5731    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5732    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5733    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5734                                            Spacing));
5735    TmpInst.addOperand(Inst.getOperand(1)); // lane
5736    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5737    TmpInst.addOperand(Inst.getOperand(5));
5738    Inst = TmpInst;
5739    return true;
5740  }
5741
5742  case ARM::VST3LNdWB_fixed_Asm_8:
5743  case ARM::VST3LNdWB_fixed_Asm_16:
5744  case ARM::VST3LNdWB_fixed_Asm_32:
5745  case ARM::VST3LNqWB_fixed_Asm_16:
5746  case ARM::VST3LNqWB_fixed_Asm_32: {
5747    MCInst TmpInst;
5748    // Shuffle the operands around so the lane index operand is in the
5749    // right place.
5750    unsigned Spacing;
5751    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5752    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5753    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5754    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5755    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5756    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5757    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5758                                            Spacing));
5759    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5760                                            Spacing * 2));
5761    TmpInst.addOperand(Inst.getOperand(1)); // lane
5762    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5763    TmpInst.addOperand(Inst.getOperand(5));
5764    Inst = TmpInst;
5765    return true;
5766  }
5767
5768  case ARM::VST4LNdWB_fixed_Asm_8:
5769  case ARM::VST4LNdWB_fixed_Asm_16:
5770  case ARM::VST4LNdWB_fixed_Asm_32:
5771  case ARM::VST4LNqWB_fixed_Asm_16:
5772  case ARM::VST4LNqWB_fixed_Asm_32: {
5773    MCInst TmpInst;
5774    // Shuffle the operands around so the lane index operand is in the
5775    // right place.
5776    unsigned Spacing;
5777    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5778    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5779    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5780    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5781    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5782    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5783    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5784                                            Spacing));
5785    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5786                                            Spacing * 2));
5787    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5788                                            Spacing * 3));
5789    TmpInst.addOperand(Inst.getOperand(1)); // lane
5790    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5791    TmpInst.addOperand(Inst.getOperand(5));
5792    Inst = TmpInst;
5793    return true;
5794  }
5795
5796  case ARM::VST1LNdAsm_8:
5797  case ARM::VST1LNdAsm_16:
5798  case ARM::VST1LNdAsm_32: {
5799    MCInst TmpInst;
5800    // Shuffle the operands around so the lane index operand is in the
5801    // right place.
5802    unsigned Spacing;
5803    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5804    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5805    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5806    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5807    TmpInst.addOperand(Inst.getOperand(1)); // lane
5808    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5809    TmpInst.addOperand(Inst.getOperand(5));
5810    Inst = TmpInst;
5811    return true;
5812  }
5813
5814  case ARM::VST2LNdAsm_8:
5815  case ARM::VST2LNdAsm_16:
5816  case ARM::VST2LNdAsm_32:
5817  case ARM::VST2LNqAsm_16:
5818  case ARM::VST2LNqAsm_32: {
5819    MCInst TmpInst;
5820    // Shuffle the operands around so the lane index operand is in the
5821    // right place.
5822    unsigned Spacing;
5823    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5824    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5825    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5826    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5827    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5828                                            Spacing));
5829    TmpInst.addOperand(Inst.getOperand(1)); // lane
5830    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5831    TmpInst.addOperand(Inst.getOperand(5));
5832    Inst = TmpInst;
5833    return true;
5834  }
5835
5836  case ARM::VST3LNdAsm_8:
5837  case ARM::VST3LNdAsm_16:
5838  case ARM::VST3LNdAsm_32:
5839  case ARM::VST3LNqAsm_16:
5840  case ARM::VST3LNqAsm_32: {
5841    MCInst TmpInst;
5842    // Shuffle the operands around so the lane index operand is in the
5843    // right place.
5844    unsigned Spacing;
5845    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5846    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5847    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5848    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5849    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5850                                            Spacing));
5851    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5852                                            Spacing * 2));
5853    TmpInst.addOperand(Inst.getOperand(1)); // lane
5854    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5855    TmpInst.addOperand(Inst.getOperand(5));
5856    Inst = TmpInst;
5857    return true;
5858  }
5859
5860  case ARM::VST4LNdAsm_8:
5861  case ARM::VST4LNdAsm_16:
5862  case ARM::VST4LNdAsm_32:
5863  case ARM::VST4LNqAsm_16:
5864  case ARM::VST4LNqAsm_32: {
5865    MCInst TmpInst;
5866    // Shuffle the operands around so the lane index operand is in the
5867    // right place.
5868    unsigned Spacing;
5869    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5870    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5871    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5872    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5873    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5874                                            Spacing));
5875    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5876                                            Spacing * 2));
5877    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5878                                            Spacing * 3));
5879    TmpInst.addOperand(Inst.getOperand(1)); // lane
5880    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5881    TmpInst.addOperand(Inst.getOperand(5));
5882    Inst = TmpInst;
5883    return true;
5884  }
5885
5886  // Handle NEON VLD complex aliases.
5887  case ARM::VLD1LNdWB_register_Asm_8:
5888  case ARM::VLD1LNdWB_register_Asm_16:
5889  case ARM::VLD1LNdWB_register_Asm_32: {
5890    MCInst TmpInst;
5891    // Shuffle the operands around so the lane index operand is in the
5892    // right place.
5893    unsigned Spacing;
5894    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5895    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5896    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5897    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5898    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5899    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5900    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5901    TmpInst.addOperand(Inst.getOperand(1)); // lane
5902    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5903    TmpInst.addOperand(Inst.getOperand(6));
5904    Inst = TmpInst;
5905    return true;
5906  }
5907
5908  case ARM::VLD2LNdWB_register_Asm_8:
5909  case ARM::VLD2LNdWB_register_Asm_16:
5910  case ARM::VLD2LNdWB_register_Asm_32:
5911  case ARM::VLD2LNqWB_register_Asm_16:
5912  case ARM::VLD2LNqWB_register_Asm_32: {
5913    MCInst TmpInst;
5914    // Shuffle the operands around so the lane index operand is in the
5915    // right place.
5916    unsigned Spacing;
5917    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5918    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5919    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5920                                            Spacing));
5921    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5922    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5923    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5924    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5925    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5926    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5927                                            Spacing));
5928    TmpInst.addOperand(Inst.getOperand(1)); // lane
5929    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5930    TmpInst.addOperand(Inst.getOperand(6));
5931    Inst = TmpInst;
5932    return true;
5933  }
5934
5935  case ARM::VLD3LNdWB_register_Asm_8:
5936  case ARM::VLD3LNdWB_register_Asm_16:
5937  case ARM::VLD3LNdWB_register_Asm_32:
5938  case ARM::VLD3LNqWB_register_Asm_16:
5939  case ARM::VLD3LNqWB_register_Asm_32: {
5940    MCInst TmpInst;
5941    // Shuffle the operands around so the lane index operand is in the
5942    // right place.
5943    unsigned Spacing;
5944    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5945    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5946    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5947                                            Spacing));
5948    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5949                                            Spacing * 2));
5950    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5951    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5952    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5953    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5954    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5955    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5956                                            Spacing));
5957    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5958                                            Spacing * 2));
5959    TmpInst.addOperand(Inst.getOperand(1)); // lane
5960    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5961    TmpInst.addOperand(Inst.getOperand(6));
5962    Inst = TmpInst;
5963    return true;
5964  }
5965
5966  case ARM::VLD4LNdWB_register_Asm_8:
5967  case ARM::VLD4LNdWB_register_Asm_16:
5968  case ARM::VLD4LNdWB_register_Asm_32:
5969  case ARM::VLD4LNqWB_register_Asm_16:
5970  case ARM::VLD4LNqWB_register_Asm_32: {
5971    MCInst TmpInst;
5972    // Shuffle the operands around so the lane index operand is in the
5973    // right place.
5974    unsigned Spacing;
5975    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5976    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5977    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5978                                            Spacing));
5979    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5980                                            Spacing * 2));
5981    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5982                                            Spacing * 3));
5983    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5984    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5985    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5986    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5987    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5988    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5989                                            Spacing));
5990    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5991                                            Spacing * 2));
5992    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5993                                            Spacing * 3));
5994    TmpInst.addOperand(Inst.getOperand(1)); // lane
5995    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5996    TmpInst.addOperand(Inst.getOperand(6));
5997    Inst = TmpInst;
5998    return true;
5999  }
6000
6001  case ARM::VLD1LNdWB_fixed_Asm_8:
6002  case ARM::VLD1LNdWB_fixed_Asm_16:
6003  case ARM::VLD1LNdWB_fixed_Asm_32: {
6004    MCInst TmpInst;
6005    // Shuffle the operands around so the lane index operand is in the
6006    // right place.
6007    unsigned Spacing;
6008    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6009    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6010    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6011    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6012    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6013    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6014    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6015    TmpInst.addOperand(Inst.getOperand(1)); // lane
6016    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6017    TmpInst.addOperand(Inst.getOperand(5));
6018    Inst = TmpInst;
6019    return true;
6020  }
6021
6022  case ARM::VLD2LNdWB_fixed_Asm_8:
6023  case ARM::VLD2LNdWB_fixed_Asm_16:
6024  case ARM::VLD2LNdWB_fixed_Asm_32:
6025  case ARM::VLD2LNqWB_fixed_Asm_16:
6026  case ARM::VLD2LNqWB_fixed_Asm_32: {
6027    MCInst TmpInst;
6028    // Shuffle the operands around so the lane index operand is in the
6029    // right place.
6030    unsigned Spacing;
6031    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6032    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6033    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6034                                            Spacing));
6035    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6036    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6037    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6038    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6039    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6040    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6041                                            Spacing));
6042    TmpInst.addOperand(Inst.getOperand(1)); // lane
6043    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6044    TmpInst.addOperand(Inst.getOperand(5));
6045    Inst = TmpInst;
6046    return true;
6047  }
6048
6049  case ARM::VLD3LNdWB_fixed_Asm_8:
6050  case ARM::VLD3LNdWB_fixed_Asm_16:
6051  case ARM::VLD3LNdWB_fixed_Asm_32:
6052  case ARM::VLD3LNqWB_fixed_Asm_16:
6053  case ARM::VLD3LNqWB_fixed_Asm_32: {
6054    MCInst TmpInst;
6055    // Shuffle the operands around so the lane index operand is in the
6056    // right place.
6057    unsigned Spacing;
6058    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6059    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6060    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6061                                            Spacing));
6062    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6063                                            Spacing * 2));
6064    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6065    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6066    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6067    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6068    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6069    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6070                                            Spacing));
6071    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6072                                            Spacing * 2));
6073    TmpInst.addOperand(Inst.getOperand(1)); // lane
6074    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6075    TmpInst.addOperand(Inst.getOperand(5));
6076    Inst = TmpInst;
6077    return true;
6078  }
6079
6080  case ARM::VLD4LNdWB_fixed_Asm_8:
6081  case ARM::VLD4LNdWB_fixed_Asm_16:
6082  case ARM::VLD4LNdWB_fixed_Asm_32:
6083  case ARM::VLD4LNqWB_fixed_Asm_16:
6084  case ARM::VLD4LNqWB_fixed_Asm_32: {
6085    MCInst TmpInst;
6086    // Shuffle the operands around so the lane index operand is in the
6087    // right place.
6088    unsigned Spacing;
6089    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6090    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6091    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6092                                            Spacing));
6093    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6094                                            Spacing * 2));
6095    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6096                                            Spacing * 3));
6097    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6098    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6099    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6100    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6101    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6102    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6103                                            Spacing));
6104    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6105                                            Spacing * 2));
6106    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6107                                            Spacing * 3));
6108    TmpInst.addOperand(Inst.getOperand(1)); // lane
6109    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6110    TmpInst.addOperand(Inst.getOperand(5));
6111    Inst = TmpInst;
6112    return true;
6113  }
6114
6115  case ARM::VLD1LNdAsm_8:
6116  case ARM::VLD1LNdAsm_16:
6117  case ARM::VLD1LNdAsm_32: {
6118    MCInst TmpInst;
6119    // Shuffle the operands around so the lane index operand is in the
6120    // right place.
6121    unsigned Spacing;
6122    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6123    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6124    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6125    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6126    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6127    TmpInst.addOperand(Inst.getOperand(1)); // lane
6128    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6129    TmpInst.addOperand(Inst.getOperand(5));
6130    Inst = TmpInst;
6131    return true;
6132  }
6133
6134  case ARM::VLD2LNdAsm_8:
6135  case ARM::VLD2LNdAsm_16:
6136  case ARM::VLD2LNdAsm_32:
6137  case ARM::VLD2LNqAsm_16:
6138  case ARM::VLD2LNqAsm_32: {
6139    MCInst TmpInst;
6140    // Shuffle the operands around so the lane index operand is in the
6141    // right place.
6142    unsigned Spacing;
6143    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6144    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6145    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6146                                            Spacing));
6147    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6148    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6149    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6150    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6151                                            Spacing));
6152    TmpInst.addOperand(Inst.getOperand(1)); // lane
6153    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6154    TmpInst.addOperand(Inst.getOperand(5));
6155    Inst = TmpInst;
6156    return true;
6157  }
6158
6159  case ARM::VLD3LNdAsm_8:
6160  case ARM::VLD3LNdAsm_16:
6161  case ARM::VLD3LNdAsm_32:
6162  case ARM::VLD3LNqAsm_16:
6163  case ARM::VLD3LNqAsm_32: {
6164    MCInst TmpInst;
6165    // Shuffle the operands around so the lane index operand is in the
6166    // right place.
6167    unsigned Spacing;
6168    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6169    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6170    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6171                                            Spacing));
6172    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6173                                            Spacing * 2));
6174    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6175    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6176    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6177    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6178                                            Spacing));
6179    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6180                                            Spacing * 2));
6181    TmpInst.addOperand(Inst.getOperand(1)); // lane
6182    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6183    TmpInst.addOperand(Inst.getOperand(5));
6184    Inst = TmpInst;
6185    return true;
6186  }
6187
6188  case ARM::VLD4LNdAsm_8:
6189  case ARM::VLD4LNdAsm_16:
6190  case ARM::VLD4LNdAsm_32:
6191  case ARM::VLD4LNqAsm_16:
6192  case ARM::VLD4LNqAsm_32: {
6193    MCInst TmpInst;
6194    // Shuffle the operands around so the lane index operand is in the
6195    // right place.
6196    unsigned Spacing;
6197    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6198    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6199    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6200                                            Spacing));
6201    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6202                                            Spacing * 2));
6203    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6204                                            Spacing * 3));
6205    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6206    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6207    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6208    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6209                                            Spacing));
6210    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6211                                            Spacing * 2));
6212    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6213                                            Spacing * 3));
6214    TmpInst.addOperand(Inst.getOperand(1)); // lane
6215    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6216    TmpInst.addOperand(Inst.getOperand(5));
6217    Inst = TmpInst;
6218    return true;
6219  }
6220
6221  // VLD3DUP single 3-element structure to all lanes instructions.
6222  case ARM::VLD3DUPdAsm_8:
6223  case ARM::VLD3DUPdAsm_16:
6224  case ARM::VLD3DUPdAsm_32:
6225  case ARM::VLD3DUPqAsm_8:
6226  case ARM::VLD3DUPqAsm_16:
6227  case ARM::VLD3DUPqAsm_32: {
6228    MCInst TmpInst;
6229    unsigned Spacing;
6230    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6231    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6232    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6233                                            Spacing));
6234    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6235                                            Spacing * 2));
6236    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6237    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6238    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6239    TmpInst.addOperand(Inst.getOperand(4));
6240    Inst = TmpInst;
6241    return true;
6242  }
6243
6244  case ARM::VLD3DUPdWB_fixed_Asm_8:
6245  case ARM::VLD3DUPdWB_fixed_Asm_16:
6246  case ARM::VLD3DUPdWB_fixed_Asm_32:
6247  case ARM::VLD3DUPqWB_fixed_Asm_8:
6248  case ARM::VLD3DUPqWB_fixed_Asm_16:
6249  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6250    MCInst TmpInst;
6251    unsigned Spacing;
6252    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6253    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6254    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6255                                            Spacing));
6256    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6257                                            Spacing * 2));
6258    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6259    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6260    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6261    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6262    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6263    TmpInst.addOperand(Inst.getOperand(4));
6264    Inst = TmpInst;
6265    return true;
6266  }
6267
6268  case ARM::VLD3DUPdWB_register_Asm_8:
6269  case ARM::VLD3DUPdWB_register_Asm_16:
6270  case ARM::VLD3DUPdWB_register_Asm_32:
6271  case ARM::VLD3DUPqWB_register_Asm_8:
6272  case ARM::VLD3DUPqWB_register_Asm_16:
6273  case ARM::VLD3DUPqWB_register_Asm_32: {
6274    MCInst TmpInst;
6275    unsigned Spacing;
6276    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6277    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6278    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6279                                            Spacing));
6280    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6281                                            Spacing * 2));
6282    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6283    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6284    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6285    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6286    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6287    TmpInst.addOperand(Inst.getOperand(5));
6288    Inst = TmpInst;
6289    return true;
6290  }
6291
6292  // VLD3 multiple 3-element structure instructions.
6293  case ARM::VLD3dAsm_8:
6294  case ARM::VLD3dAsm_16:
6295  case ARM::VLD3dAsm_32:
6296  case ARM::VLD3qAsm_8:
6297  case ARM::VLD3qAsm_16:
6298  case ARM::VLD3qAsm_32: {
6299    MCInst TmpInst;
6300    unsigned Spacing;
6301    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6302    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6303    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6304                                            Spacing));
6305    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6306                                            Spacing * 2));
6307    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6308    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6309    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6310    TmpInst.addOperand(Inst.getOperand(4));
6311    Inst = TmpInst;
6312    return true;
6313  }
6314
6315  case ARM::VLD3dWB_fixed_Asm_8:
6316  case ARM::VLD3dWB_fixed_Asm_16:
6317  case ARM::VLD3dWB_fixed_Asm_32:
6318  case ARM::VLD3qWB_fixed_Asm_8:
6319  case ARM::VLD3qWB_fixed_Asm_16:
6320  case ARM::VLD3qWB_fixed_Asm_32: {
6321    MCInst TmpInst;
6322    unsigned Spacing;
6323    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6324    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6325    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6326                                            Spacing));
6327    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6328                                            Spacing * 2));
6329    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6330    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6331    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6332    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6333    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6334    TmpInst.addOperand(Inst.getOperand(4));
6335    Inst = TmpInst;
6336    return true;
6337  }
6338
6339  case ARM::VLD3dWB_register_Asm_8:
6340  case ARM::VLD3dWB_register_Asm_16:
6341  case ARM::VLD3dWB_register_Asm_32:
6342  case ARM::VLD3qWB_register_Asm_8:
6343  case ARM::VLD3qWB_register_Asm_16:
6344  case ARM::VLD3qWB_register_Asm_32: {
6345    MCInst TmpInst;
6346    unsigned Spacing;
6347    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6348    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6349    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6350                                            Spacing));
6351    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6352                                            Spacing * 2));
6353    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6354    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6355    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6356    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6357    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6358    TmpInst.addOperand(Inst.getOperand(5));
6359    Inst = TmpInst;
6360    return true;
6361  }
6362
6363  // VLD4DUP single 3-element structure to all lanes instructions.
6364  case ARM::VLD4DUPdAsm_8:
6365  case ARM::VLD4DUPdAsm_16:
6366  case ARM::VLD4DUPdAsm_32:
6367  case ARM::VLD4DUPqAsm_8:
6368  case ARM::VLD4DUPqAsm_16:
6369  case ARM::VLD4DUPqAsm_32: {
6370    MCInst TmpInst;
6371    unsigned Spacing;
6372    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6373    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6374    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6375                                            Spacing));
6376    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6377                                            Spacing * 2));
6378    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6379                                            Spacing * 3));
6380    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6381    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6382    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6383    TmpInst.addOperand(Inst.getOperand(4));
6384    Inst = TmpInst;
6385    return true;
6386  }
6387
6388  case ARM::VLD4DUPdWB_fixed_Asm_8:
6389  case ARM::VLD4DUPdWB_fixed_Asm_16:
6390  case ARM::VLD4DUPdWB_fixed_Asm_32:
6391  case ARM::VLD4DUPqWB_fixed_Asm_8:
6392  case ARM::VLD4DUPqWB_fixed_Asm_16:
6393  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6394    MCInst TmpInst;
6395    unsigned Spacing;
6396    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6397    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6398    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6399                                            Spacing));
6400    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6401                                            Spacing * 2));
6402    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6403                                            Spacing * 3));
6404    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6405    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6406    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6407    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6408    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6409    TmpInst.addOperand(Inst.getOperand(4));
6410    Inst = TmpInst;
6411    return true;
6412  }
6413
6414  case ARM::VLD4DUPdWB_register_Asm_8:
6415  case ARM::VLD4DUPdWB_register_Asm_16:
6416  case ARM::VLD4DUPdWB_register_Asm_32:
6417  case ARM::VLD4DUPqWB_register_Asm_8:
6418  case ARM::VLD4DUPqWB_register_Asm_16:
6419  case ARM::VLD4DUPqWB_register_Asm_32: {
6420    MCInst TmpInst;
6421    unsigned Spacing;
6422    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6423    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6424    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6425                                            Spacing));
6426    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6427                                            Spacing * 2));
6428    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6429                                            Spacing * 3));
6430    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6431    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6432    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6433    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6434    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6435    TmpInst.addOperand(Inst.getOperand(5));
6436    Inst = TmpInst;
6437    return true;
6438  }
6439
6440  // VLD4 multiple 4-element structure instructions.
6441  case ARM::VLD4dAsm_8:
6442  case ARM::VLD4dAsm_16:
6443  case ARM::VLD4dAsm_32:
6444  case ARM::VLD4qAsm_8:
6445  case ARM::VLD4qAsm_16:
6446  case ARM::VLD4qAsm_32: {
6447    MCInst TmpInst;
6448    unsigned Spacing;
6449    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6450    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6451    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6452                                            Spacing));
6453    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6454                                            Spacing * 2));
6455    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6456                                            Spacing * 3));
6457    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6458    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6459    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6460    TmpInst.addOperand(Inst.getOperand(4));
6461    Inst = TmpInst;
6462    return true;
6463  }
6464
6465  case ARM::VLD4dWB_fixed_Asm_8:
6466  case ARM::VLD4dWB_fixed_Asm_16:
6467  case ARM::VLD4dWB_fixed_Asm_32:
6468  case ARM::VLD4qWB_fixed_Asm_8:
6469  case ARM::VLD4qWB_fixed_Asm_16:
6470  case ARM::VLD4qWB_fixed_Asm_32: {
6471    MCInst TmpInst;
6472    unsigned Spacing;
6473    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6474    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6475    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6476                                            Spacing));
6477    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6478                                            Spacing * 2));
6479    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6480                                            Spacing * 3));
6481    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6482    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6483    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6484    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6485    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6486    TmpInst.addOperand(Inst.getOperand(4));
6487    Inst = TmpInst;
6488    return true;
6489  }
6490
6491  case ARM::VLD4dWB_register_Asm_8:
6492  case ARM::VLD4dWB_register_Asm_16:
6493  case ARM::VLD4dWB_register_Asm_32:
6494  case ARM::VLD4qWB_register_Asm_8:
6495  case ARM::VLD4qWB_register_Asm_16:
6496  case ARM::VLD4qWB_register_Asm_32: {
6497    MCInst TmpInst;
6498    unsigned Spacing;
6499    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6500    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6501    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6502                                            Spacing));
6503    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6504                                            Spacing * 2));
6505    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6506                                            Spacing * 3));
6507    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6508    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6509    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6510    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6511    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6512    TmpInst.addOperand(Inst.getOperand(5));
6513    Inst = TmpInst;
6514    return true;
6515  }
6516
6517  // VST3 multiple 3-element structure instructions.
6518  case ARM::VST3dAsm_8:
6519  case ARM::VST3dAsm_16:
6520  case ARM::VST3dAsm_32:
6521  case ARM::VST3qAsm_8:
6522  case ARM::VST3qAsm_16:
6523  case ARM::VST3qAsm_32: {
6524    MCInst TmpInst;
6525    unsigned Spacing;
6526    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6527    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6528    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6529    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6530    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6531                                            Spacing));
6532    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6533                                            Spacing * 2));
6534    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6535    TmpInst.addOperand(Inst.getOperand(4));
6536    Inst = TmpInst;
6537    return true;
6538  }
6539
6540  case ARM::VST3dWB_fixed_Asm_8:
6541  case ARM::VST3dWB_fixed_Asm_16:
6542  case ARM::VST3dWB_fixed_Asm_32:
6543  case ARM::VST3qWB_fixed_Asm_8:
6544  case ARM::VST3qWB_fixed_Asm_16:
6545  case ARM::VST3qWB_fixed_Asm_32: {
6546    MCInst TmpInst;
6547    unsigned Spacing;
6548    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6549    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6550    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6551    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6552    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6553    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6554    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6555                                            Spacing));
6556    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6557                                            Spacing * 2));
6558    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6559    TmpInst.addOperand(Inst.getOperand(4));
6560    Inst = TmpInst;
6561    return true;
6562  }
6563
6564  case ARM::VST3dWB_register_Asm_8:
6565  case ARM::VST3dWB_register_Asm_16:
6566  case ARM::VST3dWB_register_Asm_32:
6567  case ARM::VST3qWB_register_Asm_8:
6568  case ARM::VST3qWB_register_Asm_16:
6569  case ARM::VST3qWB_register_Asm_32: {
6570    MCInst TmpInst;
6571    unsigned Spacing;
6572    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6573    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6574    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6575    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6576    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6577    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6578    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6579                                            Spacing));
6580    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6581                                            Spacing * 2));
6582    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6583    TmpInst.addOperand(Inst.getOperand(5));
6584    Inst = TmpInst;
6585    return true;
6586  }
6587
6588  // VST4 multiple 3-element structure instructions.
6589  case ARM::VST4dAsm_8:
6590  case ARM::VST4dAsm_16:
6591  case ARM::VST4dAsm_32:
6592  case ARM::VST4qAsm_8:
6593  case ARM::VST4qAsm_16:
6594  case ARM::VST4qAsm_32: {
6595    MCInst TmpInst;
6596    unsigned Spacing;
6597    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6598    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6599    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6600    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6601    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6602                                            Spacing));
6603    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6604                                            Spacing * 2));
6605    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6606                                            Spacing * 3));
6607    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6608    TmpInst.addOperand(Inst.getOperand(4));
6609    Inst = TmpInst;
6610    return true;
6611  }
6612
6613  case ARM::VST4dWB_fixed_Asm_8:
6614  case ARM::VST4dWB_fixed_Asm_16:
6615  case ARM::VST4dWB_fixed_Asm_32:
6616  case ARM::VST4qWB_fixed_Asm_8:
6617  case ARM::VST4qWB_fixed_Asm_16:
6618  case ARM::VST4qWB_fixed_Asm_32: {
6619    MCInst TmpInst;
6620    unsigned Spacing;
6621    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6622    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6623    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6624    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6625    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6626    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6627    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6628                                            Spacing));
6629    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6630                                            Spacing * 2));
6631    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6632                                            Spacing * 3));
6633    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6634    TmpInst.addOperand(Inst.getOperand(4));
6635    Inst = TmpInst;
6636    return true;
6637  }
6638
6639  case ARM::VST4dWB_register_Asm_8:
6640  case ARM::VST4dWB_register_Asm_16:
6641  case ARM::VST4dWB_register_Asm_32:
6642  case ARM::VST4qWB_register_Asm_8:
6643  case ARM::VST4qWB_register_Asm_16:
6644  case ARM::VST4qWB_register_Asm_32: {
6645    MCInst TmpInst;
6646    unsigned Spacing;
6647    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6648    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6649    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6650    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6651    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6652    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6653    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6654                                            Spacing));
6655    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6656                                            Spacing * 2));
6657    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6658                                            Spacing * 3));
6659    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6660    TmpInst.addOperand(Inst.getOperand(5));
6661    Inst = TmpInst;
6662    return true;
6663  }
6664
6665  // Handle encoding choice for the shift-immediate instructions.
6666  case ARM::t2LSLri:
6667  case ARM::t2LSRri:
6668  case ARM::t2ASRri: {
6669    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6670        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6671        Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
6672        !(static_cast<ARMOperand*>(Operands[3])->isToken() &&
6673         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) {
6674      unsigned NewOpc;
6675      switch (Inst.getOpcode()) {
6676      default: llvm_unreachable("unexpected opcode");
6677      case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
6678      case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
6679      case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
6680      }
6681      // The Thumb1 operands aren't in the same order. Awesome, eh?
6682      MCInst TmpInst;
6683      TmpInst.setOpcode(NewOpc);
6684      TmpInst.addOperand(Inst.getOperand(0));
6685      TmpInst.addOperand(Inst.getOperand(5));
6686      TmpInst.addOperand(Inst.getOperand(1));
6687      TmpInst.addOperand(Inst.getOperand(2));
6688      TmpInst.addOperand(Inst.getOperand(3));
6689      TmpInst.addOperand(Inst.getOperand(4));
6690      Inst = TmpInst;
6691      return true;
6692    }
6693    return false;
6694  }
6695
6696  // Handle the Thumb2 mode MOV complex aliases.
6697  case ARM::t2MOVsr:
6698  case ARM::t2MOVSsr: {
6699    // Which instruction to expand to depends on the CCOut operand and
6700    // whether we're in an IT block if the register operands are low
6701    // registers.
6702    bool isNarrow = false;
6703    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6704        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6705        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6706        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6707        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6708      isNarrow = true;
6709    MCInst TmpInst;
6710    unsigned newOpc;
6711    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6712    default: llvm_unreachable("unexpected opcode!");
6713    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6714    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6715    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6716    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6717    }
6718    TmpInst.setOpcode(newOpc);
6719    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6720    if (isNarrow)
6721      TmpInst.addOperand(MCOperand::CreateReg(
6722          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6723    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6724    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6725    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6726    TmpInst.addOperand(Inst.getOperand(5));
6727    if (!isNarrow)
6728      TmpInst.addOperand(MCOperand::CreateReg(
6729          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6730    Inst = TmpInst;
6731    return true;
6732  }
6733  case ARM::t2MOVsi:
6734  case ARM::t2MOVSsi: {
6735    // Which instruction to expand to depends on the CCOut operand and
6736    // whether we're in an IT block if the register operands are low
6737    // registers.
6738    bool isNarrow = false;
6739    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6740        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6741        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6742      isNarrow = true;
6743    MCInst TmpInst;
6744    unsigned newOpc;
6745    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6746    default: llvm_unreachable("unexpected opcode!");
6747    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6748    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6749    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6750    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6751    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6752    }
6753    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6754    if (Ammount == 32) Ammount = 0;
6755    TmpInst.setOpcode(newOpc);
6756    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6757    if (isNarrow)
6758      TmpInst.addOperand(MCOperand::CreateReg(
6759          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6760    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6761    if (newOpc != ARM::t2RRX)
6762      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
6763    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6764    TmpInst.addOperand(Inst.getOperand(4));
6765    if (!isNarrow)
6766      TmpInst.addOperand(MCOperand::CreateReg(
6767          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6768    Inst = TmpInst;
6769    return true;
6770  }
6771  // Handle the ARM mode MOV complex aliases.
6772  case ARM::ASRr:
6773  case ARM::LSRr:
6774  case ARM::LSLr:
6775  case ARM::RORr: {
6776    ARM_AM::ShiftOpc ShiftTy;
6777    switch(Inst.getOpcode()) {
6778    default: llvm_unreachable("unexpected opcode!");
6779    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6780    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6781    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6782    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6783    }
6784    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6785    MCInst TmpInst;
6786    TmpInst.setOpcode(ARM::MOVsr);
6787    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6788    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6789    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6790    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6791    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6792    TmpInst.addOperand(Inst.getOperand(4));
6793    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6794    Inst = TmpInst;
6795    return true;
6796  }
6797  case ARM::ASRi:
6798  case ARM::LSRi:
6799  case ARM::LSLi:
6800  case ARM::RORi: {
6801    ARM_AM::ShiftOpc ShiftTy;
6802    switch(Inst.getOpcode()) {
6803    default: llvm_unreachable("unexpected opcode!");
6804    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6805    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6806    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6807    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6808    }
6809    // A shift by zero is a plain MOVr, not a MOVsi.
6810    unsigned Amt = Inst.getOperand(2).getImm();
6811    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6812    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6813    MCInst TmpInst;
6814    TmpInst.setOpcode(Opc);
6815    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6816    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6817    if (Opc == ARM::MOVsi)
6818      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6819    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6820    TmpInst.addOperand(Inst.getOperand(4));
6821    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6822    Inst = TmpInst;
6823    return true;
6824  }
6825  case ARM::RRXi: {
6826    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6827    MCInst TmpInst;
6828    TmpInst.setOpcode(ARM::MOVsi);
6829    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6830    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6831    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6832    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6833    TmpInst.addOperand(Inst.getOperand(3));
6834    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6835    Inst = TmpInst;
6836    return true;
6837  }
6838  case ARM::t2LDMIA_UPD: {
6839    // If this is a load of a single register, then we should use
6840    // a post-indexed LDR instruction instead, per the ARM ARM.
6841    if (Inst.getNumOperands() != 5)
6842      return false;
6843    MCInst TmpInst;
6844    TmpInst.setOpcode(ARM::t2LDR_POST);
6845    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6846    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6847    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6848    TmpInst.addOperand(MCOperand::CreateImm(4));
6849    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6850    TmpInst.addOperand(Inst.getOperand(3));
6851    Inst = TmpInst;
6852    return true;
6853  }
6854  case ARM::t2STMDB_UPD: {
6855    // If this is a store of a single register, then we should use
6856    // a pre-indexed STR instruction instead, per the ARM ARM.
6857    if (Inst.getNumOperands() != 5)
6858      return false;
6859    MCInst TmpInst;
6860    TmpInst.setOpcode(ARM::t2STR_PRE);
6861    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6862    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6863    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6864    TmpInst.addOperand(MCOperand::CreateImm(-4));
6865    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6866    TmpInst.addOperand(Inst.getOperand(3));
6867    Inst = TmpInst;
6868    return true;
6869  }
6870  case ARM::LDMIA_UPD:
6871    // If this is a load of a single register via a 'pop', then we should use
6872    // a post-indexed LDR instruction instead, per the ARM ARM.
6873    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
6874        Inst.getNumOperands() == 5) {
6875      MCInst TmpInst;
6876      TmpInst.setOpcode(ARM::LDR_POST_IMM);
6877      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6878      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6879      TmpInst.addOperand(Inst.getOperand(1)); // Rn
6880      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
6881      TmpInst.addOperand(MCOperand::CreateImm(4));
6882      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6883      TmpInst.addOperand(Inst.getOperand(3));
6884      Inst = TmpInst;
6885      return true;
6886    }
6887    break;
6888  case ARM::STMDB_UPD:
6889    // If this is a store of a single register via a 'push', then we should use
6890    // a pre-indexed STR instruction instead, per the ARM ARM.
6891    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
6892        Inst.getNumOperands() == 5) {
6893      MCInst TmpInst;
6894      TmpInst.setOpcode(ARM::STR_PRE_IMM);
6895      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6896      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6897      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
6898      TmpInst.addOperand(MCOperand::CreateImm(-4));
6899      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6900      TmpInst.addOperand(Inst.getOperand(3));
6901      Inst = TmpInst;
6902    }
6903    break;
6904  case ARM::t2ADDri12:
6905    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
6906    // mnemonic was used (not "addw"), encoding T3 is preferred.
6907    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
6908        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6909      break;
6910    Inst.setOpcode(ARM::t2ADDri);
6911    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6912    break;
6913  case ARM::t2SUBri12:
6914    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
6915    // mnemonic was used (not "subw"), encoding T3 is preferred.
6916    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
6917        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6918      break;
6919    Inst.setOpcode(ARM::t2SUBri);
6920    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6921    break;
6922  case ARM::tADDi8:
6923    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6924    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6925    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6926    // to encoding T1 if <Rd> is omitted."
6927    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6928      Inst.setOpcode(ARM::tADDi3);
6929      return true;
6930    }
6931    break;
6932  case ARM::tSUBi8:
6933    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6934    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6935    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6936    // to encoding T1 if <Rd> is omitted."
6937    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6938      Inst.setOpcode(ARM::tSUBi3);
6939      return true;
6940    }
6941    break;
6942  case ARM::t2ADDri:
6943  case ARM::t2SUBri: {
6944    // If the destination and first source operand are the same, and
6945    // the flags are compatible with the current IT status, use encoding T2
6946    // instead of T3. For compatibility with the system 'as'. Make sure the
6947    // wide encoding wasn't explicit.
6948    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6949        !isARMLowRegister(Inst.getOperand(0).getReg()) ||
6950        (unsigned)Inst.getOperand(2).getImm() > 255 ||
6951        ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
6952        (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
6953        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6954         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6955      break;
6956    MCInst TmpInst;
6957    TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
6958                      ARM::tADDi8 : ARM::tSUBi8);
6959    TmpInst.addOperand(Inst.getOperand(0));
6960    TmpInst.addOperand(Inst.getOperand(5));
6961    TmpInst.addOperand(Inst.getOperand(0));
6962    TmpInst.addOperand(Inst.getOperand(2));
6963    TmpInst.addOperand(Inst.getOperand(3));
6964    TmpInst.addOperand(Inst.getOperand(4));
6965    Inst = TmpInst;
6966    return true;
6967  }
6968  case ARM::t2ADDrr: {
6969    // If the destination and first source operand are the same, and
6970    // there's no setting of the flags, use encoding T2 instead of T3.
6971    // Note that this is only for ADD, not SUB. This mirrors the system
6972    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6973    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6974        Inst.getOperand(5).getReg() != 0 ||
6975        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6976         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6977      break;
6978    MCInst TmpInst;
6979    TmpInst.setOpcode(ARM::tADDhirr);
6980    TmpInst.addOperand(Inst.getOperand(0));
6981    TmpInst.addOperand(Inst.getOperand(0));
6982    TmpInst.addOperand(Inst.getOperand(2));
6983    TmpInst.addOperand(Inst.getOperand(3));
6984    TmpInst.addOperand(Inst.getOperand(4));
6985    Inst = TmpInst;
6986    return true;
6987  }
6988  case ARM::tB:
6989    // A Thumb conditional branch outside of an IT block is a tBcc.
6990    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6991      Inst.setOpcode(ARM::tBcc);
6992      return true;
6993    }
6994    break;
6995  case ARM::t2B:
6996    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6997    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6998      Inst.setOpcode(ARM::t2Bcc);
6999      return true;
7000    }
7001    break;
7002  case ARM::t2Bcc:
7003    // If the conditional is AL or we're in an IT block, we really want t2B.
7004    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
7005      Inst.setOpcode(ARM::t2B);
7006      return true;
7007    }
7008    break;
7009  case ARM::tBcc:
7010    // If the conditional is AL, we really want tB.
7011    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
7012      Inst.setOpcode(ARM::tB);
7013      return true;
7014    }
7015    break;
7016  case ARM::tLDMIA: {
7017    // If the register list contains any high registers, or if the writeback
7018    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
7019    // instead if we're in Thumb2. Otherwise, this should have generated
7020    // an error in validateInstruction().
7021    unsigned Rn = Inst.getOperand(0).getReg();
7022    bool hasWritebackToken =
7023      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7024       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
7025    bool listContainsBase;
7026    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
7027        (!listContainsBase && !hasWritebackToken) ||
7028        (listContainsBase && hasWritebackToken)) {
7029      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7030      assert (isThumbTwo());
7031      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
7032      // If we're switching to the updating version, we need to insert
7033      // the writeback tied operand.
7034      if (hasWritebackToken)
7035        Inst.insert(Inst.begin(),
7036                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
7037      return true;
7038    }
7039    break;
7040  }
7041  case ARM::tSTMIA_UPD: {
7042    // If the register list contains any high registers, we need to use
7043    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7044    // should have generated an error in validateInstruction().
7045    unsigned Rn = Inst.getOperand(0).getReg();
7046    bool listContainsBase;
7047    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
7048      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7049      assert (isThumbTwo());
7050      Inst.setOpcode(ARM::t2STMIA_UPD);
7051      return true;
7052    }
7053    break;
7054  }
7055  case ARM::tPOP: {
7056    bool listContainsBase;
7057    // If the register list contains any high registers, we need to use
7058    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7059    // should have generated an error in validateInstruction().
7060    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
7061      return false;
7062    assert (isThumbTwo());
7063    Inst.setOpcode(ARM::t2LDMIA_UPD);
7064    // Add the base register and writeback operands.
7065    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7066    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7067    return true;
7068  }
7069  case ARM::tPUSH: {
7070    bool listContainsBase;
7071    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
7072      return false;
7073    assert (isThumbTwo());
7074    Inst.setOpcode(ARM::t2STMDB_UPD);
7075    // Add the base register and writeback operands.
7076    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7077    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7078    return true;
7079  }
7080  case ARM::t2MOVi: {
7081    // If we can use the 16-bit encoding and the user didn't explicitly
7082    // request the 32-bit variant, transform it here.
7083    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7084        (unsigned)Inst.getOperand(1).getImm() <= 255 &&
7085        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
7086         Inst.getOperand(4).getReg() == ARM::CPSR) ||
7087        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
7088        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7089         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7090      // The operands aren't in the same order for tMOVi8...
7091      MCInst TmpInst;
7092      TmpInst.setOpcode(ARM::tMOVi8);
7093      TmpInst.addOperand(Inst.getOperand(0));
7094      TmpInst.addOperand(Inst.getOperand(4));
7095      TmpInst.addOperand(Inst.getOperand(1));
7096      TmpInst.addOperand(Inst.getOperand(2));
7097      TmpInst.addOperand(Inst.getOperand(3));
7098      Inst = TmpInst;
7099      return true;
7100    }
7101    break;
7102  }
7103  case ARM::t2MOVr: {
7104    // If we can use the 16-bit encoding and the user didn't explicitly
7105    // request the 32-bit variant, transform it here.
7106    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7107        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7108        Inst.getOperand(2).getImm() == ARMCC::AL &&
7109        Inst.getOperand(4).getReg() == ARM::CPSR &&
7110        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7111         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7112      // The operands aren't the same for tMOV[S]r... (no cc_out)
7113      MCInst TmpInst;
7114      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7115      TmpInst.addOperand(Inst.getOperand(0));
7116      TmpInst.addOperand(Inst.getOperand(1));
7117      TmpInst.addOperand(Inst.getOperand(2));
7118      TmpInst.addOperand(Inst.getOperand(3));
7119      Inst = TmpInst;
7120      return true;
7121    }
7122    break;
7123  }
7124  case ARM::t2SXTH:
7125  case ARM::t2SXTB:
7126  case ARM::t2UXTH:
7127  case ARM::t2UXTB: {
7128    // If we can use the 16-bit encoding and the user didn't explicitly
7129    // request the 32-bit variant, transform it here.
7130    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7131        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7132        Inst.getOperand(2).getImm() == 0 &&
7133        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7134         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7135      unsigned NewOpc;
7136      switch (Inst.getOpcode()) {
7137      default: llvm_unreachable("Illegal opcode!");
7138      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7139      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7140      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7141      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7142      }
7143      // The operands aren't the same for thumb1 (no rotate operand).
7144      MCInst TmpInst;
7145      TmpInst.setOpcode(NewOpc);
7146      TmpInst.addOperand(Inst.getOperand(0));
7147      TmpInst.addOperand(Inst.getOperand(1));
7148      TmpInst.addOperand(Inst.getOperand(3));
7149      TmpInst.addOperand(Inst.getOperand(4));
7150      Inst = TmpInst;
7151      return true;
7152    }
7153    break;
7154  }
7155  case ARM::MOVsi: {
7156    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7157    if (SOpc == ARM_AM::rrx) return false;
7158    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7159      // Shifting by zero is accepted as a vanilla 'MOVr'
7160      MCInst TmpInst;
7161      TmpInst.setOpcode(ARM::MOVr);
7162      TmpInst.addOperand(Inst.getOperand(0));
7163      TmpInst.addOperand(Inst.getOperand(1));
7164      TmpInst.addOperand(Inst.getOperand(3));
7165      TmpInst.addOperand(Inst.getOperand(4));
7166      TmpInst.addOperand(Inst.getOperand(5));
7167      Inst = TmpInst;
7168      return true;
7169    }
7170    return false;
7171  }
7172  case ARM::ANDrsi:
7173  case ARM::ORRrsi:
7174  case ARM::EORrsi:
7175  case ARM::BICrsi:
7176  case ARM::SUBrsi:
7177  case ARM::ADDrsi: {
7178    unsigned newOpc;
7179    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7180    if (SOpc == ARM_AM::rrx) return false;
7181    switch (Inst.getOpcode()) {
7182    default: llvm_unreachable("unexpected opcode!");
7183    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7184    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7185    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7186    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7187    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7188    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7189    }
7190    // If the shift is by zero, use the non-shifted instruction definition.
7191    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
7192      MCInst TmpInst;
7193      TmpInst.setOpcode(newOpc);
7194      TmpInst.addOperand(Inst.getOperand(0));
7195      TmpInst.addOperand(Inst.getOperand(1));
7196      TmpInst.addOperand(Inst.getOperand(2));
7197      TmpInst.addOperand(Inst.getOperand(4));
7198      TmpInst.addOperand(Inst.getOperand(5));
7199      TmpInst.addOperand(Inst.getOperand(6));
7200      Inst = TmpInst;
7201      return true;
7202    }
7203    return false;
7204  }
7205  case ARM::ITasm:
7206  case ARM::t2IT: {
7207    // The mask bits for all but the first condition are represented as
7208    // the low bit of the condition code value implies 't'. We currently
7209    // always have 1 implies 't', so XOR toggle the bits if the low bit
7210    // of the condition code is zero. The encoding also expects the low
7211    // bit of the condition to be encoded as bit 4 of the mask operand,
7212    // so mask that in if needed
7213    MCOperand &MO = Inst.getOperand(1);
7214    unsigned Mask = MO.getImm();
7215    unsigned OrigMask = Mask;
7216    unsigned TZ = CountTrailingZeros_32(Mask);
7217    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7218      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7219      for (unsigned i = 3; i != TZ; --i)
7220        Mask ^= 1 << i;
7221    } else
7222      Mask |= 0x10;
7223    MO.setImm(Mask);
7224
7225    // Set up the IT block state according to the IT instruction we just
7226    // matched.
7227    assert(!inITBlock() && "nested IT blocks?!");
7228    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7229    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7230    ITState.CurPosition = 0;
7231    ITState.FirstCond = true;
7232    break;
7233  }
7234  }
7235  return false;
7236}
7237
7238unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7239  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7240  // suffix depending on whether they're in an IT block or not.
7241  unsigned Opc = Inst.getOpcode();
7242  const MCInstrDesc &MCID = getInstDesc(Opc);
7243  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7244    assert(MCID.hasOptionalDef() &&
7245           "optionally flag setting instruction missing optional def operand");
7246    assert(MCID.NumOperands == Inst.getNumOperands() &&
7247           "operand count mismatch!");
7248    // Find the optional-def operand (cc_out).
7249    unsigned OpNo;
7250    for (OpNo = 0;
7251         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7252         ++OpNo)
7253      ;
7254    // If we're parsing Thumb1, reject it completely.
7255    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7256      return Match_MnemonicFail;
7257    // If we're parsing Thumb2, which form is legal depends on whether we're
7258    // in an IT block.
7259    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7260        !inITBlock())
7261      return Match_RequiresITBlock;
7262    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7263        inITBlock())
7264      return Match_RequiresNotITBlock;
7265  }
7266  // Some high-register supporting Thumb1 encodings only allow both registers
7267  // to be from r0-r7 when in Thumb2.
7268  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7269           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7270           isARMLowRegister(Inst.getOperand(2).getReg()))
7271    return Match_RequiresThumb2;
7272  // Others only require ARMv6 or later.
7273  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7274           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7275           isARMLowRegister(Inst.getOperand(1).getReg()))
7276    return Match_RequiresV6;
7277  return Match_Success;
7278}
7279
7280bool ARMAsmParser::
7281MatchAndEmitInstruction(SMLoc IDLoc,
7282                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7283                        MCStreamer &Out) {
7284  MCInst Inst;
7285  unsigned ErrorInfo;
7286  unsigned MatchResult;
7287  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
7288  switch (MatchResult) {
7289  default: break;
7290  case Match_Success:
7291    // Context sensitive operand constraints aren't handled by the matcher,
7292    // so check them here.
7293    if (validateInstruction(Inst, Operands)) {
7294      // Still progress the IT block, otherwise one wrong condition causes
7295      // nasty cascading errors.
7296      forwardITPosition();
7297      return true;
7298    }
7299
7300    // Some instructions need post-processing to, for example, tweak which
7301    // encoding is selected. Loop on it while changes happen so the
7302    // individual transformations can chain off each other. E.g.,
7303    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7304    while (processInstruction(Inst, Operands))
7305      ;
7306
7307    // Only move forward at the very end so that everything in validate
7308    // and process gets a consistent answer about whether we're in an IT
7309    // block.
7310    forwardITPosition();
7311
7312    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7313    // doesn't actually encode.
7314    if (Inst.getOpcode() == ARM::ITasm)
7315      return false;
7316
7317    Inst.setLoc(IDLoc);
7318    Out.EmitInstruction(Inst);
7319    return false;
7320  case Match_MissingFeature:
7321    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
7322    return true;
7323  case Match_InvalidOperand: {
7324    SMLoc ErrorLoc = IDLoc;
7325    if (ErrorInfo != ~0U) {
7326      if (ErrorInfo >= Operands.size())
7327        return Error(IDLoc, "too few operands for instruction");
7328
7329      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7330      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7331    }
7332
7333    return Error(ErrorLoc, "invalid operand for instruction");
7334  }
7335  case Match_MnemonicFail:
7336    return Error(IDLoc, "invalid instruction",
7337                 ((ARMOperand*)Operands[0])->getLocRange());
7338  case Match_ConversionFail:
7339    // The converter function will have already emited a diagnostic.
7340    return true;
7341  case Match_RequiresNotITBlock:
7342    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7343  case Match_RequiresITBlock:
7344    return Error(IDLoc, "instruction only valid inside IT block");
7345  case Match_RequiresV6:
7346    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7347  case Match_RequiresThumb2:
7348    return Error(IDLoc, "instruction variant requires Thumb2");
7349  }
7350
7351  llvm_unreachable("Implement any new match types added!");
7352}
7353
7354/// parseDirective parses the arm specific directives
7355bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7356  StringRef IDVal = DirectiveID.getIdentifier();
7357  if (IDVal == ".word")
7358    return parseDirectiveWord(4, DirectiveID.getLoc());
7359  else if (IDVal == ".thumb")
7360    return parseDirectiveThumb(DirectiveID.getLoc());
7361  else if (IDVal == ".arm")
7362    return parseDirectiveARM(DirectiveID.getLoc());
7363  else if (IDVal == ".thumb_func")
7364    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7365  else if (IDVal == ".code")
7366    return parseDirectiveCode(DirectiveID.getLoc());
7367  else if (IDVal == ".syntax")
7368    return parseDirectiveSyntax(DirectiveID.getLoc());
7369  else if (IDVal == ".unreq")
7370    return parseDirectiveUnreq(DirectiveID.getLoc());
7371  else if (IDVal == ".arch")
7372    return parseDirectiveArch(DirectiveID.getLoc());
7373  else if (IDVal == ".eabi_attribute")
7374    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7375  return true;
7376}
7377
7378/// parseDirectiveWord
7379///  ::= .word [ expression (, expression)* ]
7380bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7381  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7382    for (;;) {
7383      const MCExpr *Value;
7384      if (getParser().ParseExpression(Value))
7385        return true;
7386
7387      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
7388
7389      if (getLexer().is(AsmToken::EndOfStatement))
7390        break;
7391
7392      // FIXME: Improve diagnostic.
7393      if (getLexer().isNot(AsmToken::Comma))
7394        return Error(L, "unexpected token in directive");
7395      Parser.Lex();
7396    }
7397  }
7398
7399  Parser.Lex();
7400  return false;
7401}
7402
7403/// parseDirectiveThumb
7404///  ::= .thumb
7405bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7406  if (getLexer().isNot(AsmToken::EndOfStatement))
7407    return Error(L, "unexpected token in directive");
7408  Parser.Lex();
7409
7410  if (!isThumb())
7411    SwitchMode();
7412  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7413  return false;
7414}
7415
7416/// parseDirectiveARM
7417///  ::= .arm
7418bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7419  if (getLexer().isNot(AsmToken::EndOfStatement))
7420    return Error(L, "unexpected token in directive");
7421  Parser.Lex();
7422
7423  if (isThumb())
7424    SwitchMode();
7425  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7426  return false;
7427}
7428
7429/// parseDirectiveThumbFunc
7430///  ::= .thumbfunc symbol_name
7431bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7432  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7433  bool isMachO = MAI.hasSubsectionsViaSymbols();
7434  StringRef Name;
7435  bool needFuncName = true;
7436
7437  // Darwin asm has (optionally) function name after .thumb_func direction
7438  // ELF doesn't
7439  if (isMachO) {
7440    const AsmToken &Tok = Parser.getTok();
7441    if (Tok.isNot(AsmToken::EndOfStatement)) {
7442      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7443        return Error(L, "unexpected token in .thumb_func directive");
7444      Name = Tok.getIdentifier();
7445      Parser.Lex(); // Consume the identifier token.
7446      needFuncName = false;
7447    }
7448  }
7449
7450  if (getLexer().isNot(AsmToken::EndOfStatement))
7451    return Error(L, "unexpected token in directive");
7452
7453  // Eat the end of statement and any blank lines that follow.
7454  while (getLexer().is(AsmToken::EndOfStatement))
7455    Parser.Lex();
7456
7457  // FIXME: assuming function name will be the line following .thumb_func
7458  // We really should be checking the next symbol definition even if there's
7459  // stuff in between.
7460  if (needFuncName) {
7461    Name = Parser.getTok().getIdentifier();
7462  }
7463
7464  // Mark symbol as a thumb symbol.
7465  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7466  getParser().getStreamer().EmitThumbFunc(Func);
7467  return false;
7468}
7469
7470/// parseDirectiveSyntax
7471///  ::= .syntax unified | divided
7472bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7473  const AsmToken &Tok = Parser.getTok();
7474  if (Tok.isNot(AsmToken::Identifier))
7475    return Error(L, "unexpected token in .syntax directive");
7476  StringRef Mode = Tok.getString();
7477  if (Mode == "unified" || Mode == "UNIFIED")
7478    Parser.Lex();
7479  else if (Mode == "divided" || Mode == "DIVIDED")
7480    return Error(L, "'.syntax divided' arm asssembly not supported");
7481  else
7482    return Error(L, "unrecognized syntax mode in .syntax directive");
7483
7484  if (getLexer().isNot(AsmToken::EndOfStatement))
7485    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7486  Parser.Lex();
7487
7488  // TODO tell the MC streamer the mode
7489  // getParser().getStreamer().Emit???();
7490  return false;
7491}
7492
7493/// parseDirectiveCode
7494///  ::= .code 16 | 32
7495bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7496  const AsmToken &Tok = Parser.getTok();
7497  if (Tok.isNot(AsmToken::Integer))
7498    return Error(L, "unexpected token in .code directive");
7499  int64_t Val = Parser.getTok().getIntVal();
7500  if (Val == 16)
7501    Parser.Lex();
7502  else if (Val == 32)
7503    Parser.Lex();
7504  else
7505    return Error(L, "invalid operand to .code directive");
7506
7507  if (getLexer().isNot(AsmToken::EndOfStatement))
7508    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7509  Parser.Lex();
7510
7511  if (Val == 16) {
7512    if (!isThumb())
7513      SwitchMode();
7514    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7515  } else {
7516    if (isThumb())
7517      SwitchMode();
7518    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7519  }
7520
7521  return false;
7522}
7523
7524/// parseDirectiveReq
7525///  ::= name .req registername
7526bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7527  Parser.Lex(); // Eat the '.req' token.
7528  unsigned Reg;
7529  SMLoc SRegLoc, ERegLoc;
7530  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7531    Parser.EatToEndOfStatement();
7532    return Error(SRegLoc, "register name expected");
7533  }
7534
7535  // Shouldn't be anything else.
7536  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7537    Parser.EatToEndOfStatement();
7538    return Error(Parser.getTok().getLoc(),
7539                 "unexpected input in .req directive.");
7540  }
7541
7542  Parser.Lex(); // Consume the EndOfStatement
7543
7544  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7545    return Error(SRegLoc, "redefinition of '" + Name +
7546                          "' does not match original.");
7547
7548  return false;
7549}
7550
7551/// parseDirectiveUneq
7552///  ::= .unreq registername
7553bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7554  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7555    Parser.EatToEndOfStatement();
7556    return Error(L, "unexpected input in .unreq directive.");
7557  }
7558  RegisterReqs.erase(Parser.getTok().getIdentifier());
7559  Parser.Lex(); // Eat the identifier.
7560  return false;
7561}
7562
7563/// parseDirectiveArch
7564///  ::= .arch token
7565bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7566  return true;
7567}
7568
7569/// parseDirectiveEabiAttr
7570///  ::= .eabi_attribute int, int
7571bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7572  return true;
7573}
7574
7575extern "C" void LLVMInitializeARMAsmLexer();
7576
7577/// Force static initialization.
7578extern "C" void LLVMInitializeARMAsmParser() {
7579  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7580  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7581  LLVMInitializeARMAsmLexer();
7582}
7583
7584#define GET_REGISTER_MATCHER
7585#define GET_MATCHER_IMPLEMENTATION
7586#include "ARMGenAsmMatcher.inc"
7587