ARMAsmParser.cpp revision b56e4115ed33dae56108ed4ce88ee3a0e0392bfc
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47  const MCRegisterInfo *MRI;
48
49  // Map of register aliases registers via the .req directive.
50  StringMap<unsigned> RegisterReqs;
51
52  struct {
53    ARMCC::CondCodes Cond;    // Condition for IT block.
54    unsigned Mask:4;          // Condition mask for instructions.
55                              // Starting at first 1 (from lsb).
56                              //   '1'  condition as indicated in IT.
57                              //   '0'  inverse of condition (else).
58                              // Count of instructions in IT block is
59                              // 4 - trailingzeroes(mask)
60
61    bool FirstCond;           // Explicit flag for when we're parsing the
62                              // First instruction in the IT block. It's
63                              // implied in the mask, so needs special
64                              // handling.
65
66    unsigned CurPosition;     // Current position in parsing of IT
67                              // block. In range [0,3]. Initialized
68                              // according to count of instructions in block.
69                              // ~0U if no active IT block.
70  } ITState;
71  bool inITBlock() { return ITState.CurPosition != ~0U;}
72  void forwardITPosition() {
73    if (!inITBlock()) return;
74    // Move to the next instruction in the IT block, if there is one. If not,
75    // mark the block as done.
76    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
77    if (++ITState.CurPosition == 5 - TZ)
78      ITState.CurPosition = ~0U; // Done with the IT block after this.
79  }
80
81
82  MCAsmParser &getParser() const { return Parser; }
83  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
84
85  bool Warning(SMLoc L, const Twine &Msg,
86               ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
87    return Parser.Warning(L, Msg, Ranges);
88  }
89  bool Error(SMLoc L, const Twine &Msg,
90             ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
91    return Parser.Error(L, Msg, Ranges);
92  }
93
94  int tryParseRegister();
95  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
96  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
97  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
98  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
99  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
100  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
101  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
102                              unsigned &ShiftAmount);
103  bool parseDirectiveWord(unsigned Size, SMLoc L);
104  bool parseDirectiveThumb(SMLoc L);
105  bool parseDirectiveARM(SMLoc L);
106  bool parseDirectiveThumbFunc(SMLoc L);
107  bool parseDirectiveCode(SMLoc L);
108  bool parseDirectiveSyntax(SMLoc L);
109  bool parseDirectiveReq(StringRef Name, SMLoc L);
110  bool parseDirectiveUnreq(SMLoc L);
111  bool parseDirectiveArch(SMLoc L);
112  bool parseDirectiveEabiAttr(SMLoc L);
113
114  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
115                          bool &CarrySetting, unsigned &ProcessorIMod,
116                          StringRef &ITMask);
117  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
118                             bool &CanAcceptPredicationCode);
119
120  bool isThumb() const {
121    // FIXME: Can tablegen auto-generate this?
122    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
123  }
124  bool isThumbOne() const {
125    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
126  }
127  bool isThumbTwo() const {
128    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
129  }
130  bool hasV6Ops() const {
131    return STI.getFeatureBits() & ARM::HasV6Ops;
132  }
133  bool hasV7Ops() const {
134    return STI.getFeatureBits() & ARM::HasV7Ops;
135  }
136  void SwitchMode() {
137    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
138    setAvailableFeatures(FB);
139  }
140  bool isMClass() const {
141    return STI.getFeatureBits() & ARM::FeatureMClass;
142  }
143
144  /// @name Auto-generated Match Functions
145  /// {
146
147#define GET_ASSEMBLER_HEADER
148#include "ARMGenAsmMatcher.inc"
149
150  /// }
151
152  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
153  OperandMatchResultTy parseCoprocNumOperand(
154    SmallVectorImpl<MCParsedAsmOperand*>&);
155  OperandMatchResultTy parseCoprocRegOperand(
156    SmallVectorImpl<MCParsedAsmOperand*>&);
157  OperandMatchResultTy parseCoprocOptionOperand(
158    SmallVectorImpl<MCParsedAsmOperand*>&);
159  OperandMatchResultTy parseMemBarrierOptOperand(
160    SmallVectorImpl<MCParsedAsmOperand*>&);
161  OperandMatchResultTy parseProcIFlagsOperand(
162    SmallVectorImpl<MCParsedAsmOperand*>&);
163  OperandMatchResultTy parseMSRMaskOperand(
164    SmallVectorImpl<MCParsedAsmOperand*>&);
165  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
166                                   StringRef Op, int Low, int High);
167  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
168    return parsePKHImm(O, "lsl", 0, 31);
169  }
170  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
171    return parsePKHImm(O, "asr", 1, 32);
172  }
173  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
175  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
176  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
177  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
178  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
179  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
180  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
181  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
182
183  // Asm Match Converter Methods
184  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
185                    const SmallVectorImpl<MCParsedAsmOperand*> &);
186  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
187                    const SmallVectorImpl<MCParsedAsmOperand*> &);
188  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
189                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
190  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
191                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
192  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
193                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
194  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
195                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
196  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
197                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
198  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
199                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
200  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
201                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
202  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
203                             const SmallVectorImpl<MCParsedAsmOperand*> &);
204  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
205                             const SmallVectorImpl<MCParsedAsmOperand*> &);
206  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
207                             const SmallVectorImpl<MCParsedAsmOperand*> &);
208  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
209                             const SmallVectorImpl<MCParsedAsmOperand*> &);
210  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
211                  const SmallVectorImpl<MCParsedAsmOperand*> &);
212  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
213                  const SmallVectorImpl<MCParsedAsmOperand*> &);
214  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
215                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
216  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
217                        const SmallVectorImpl<MCParsedAsmOperand*> &);
218  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
219                     const SmallVectorImpl<MCParsedAsmOperand*> &);
220  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
221                        const SmallVectorImpl<MCParsedAsmOperand*> &);
222  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
223                     const SmallVectorImpl<MCParsedAsmOperand*> &);
224  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
225                        const SmallVectorImpl<MCParsedAsmOperand*> &);
226
227  bool validateInstruction(MCInst &Inst,
228                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
229  bool processInstruction(MCInst &Inst,
230                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
231  bool shouldOmitCCOutOperand(StringRef Mnemonic,
232                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
233
234public:
235  enum ARMMatchResultTy {
236    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
237    Match_RequiresNotITBlock,
238    Match_RequiresV6,
239    Match_RequiresThumb2
240  };
241
242  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
243    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
244    MCAsmParserExtension::Initialize(_Parser);
245
246    // Cache the MCRegisterInfo.
247    MRI = &getContext().getRegisterInfo();
248
249    // Initialize the set of available features.
250    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
251
252    // Not in an ITBlock to start with.
253    ITState.CurPosition = ~0U;
254  }
255
256  // Implementation of the MCTargetAsmParser interface:
257  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
258  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
259                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
260  bool ParseDirective(AsmToken DirectiveID);
261
262  unsigned checkTargetMatchPredicate(MCInst &Inst);
263
264  bool MatchAndEmitInstruction(SMLoc IDLoc,
265                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
266                               MCStreamer &Out);
267};
268} // end anonymous namespace
269
270namespace {
271
272/// ARMOperand - Instances of this class represent a parsed ARM machine
273/// instruction.
274class ARMOperand : public MCParsedAsmOperand {
275  enum KindTy {
276    k_CondCode,
277    k_CCOut,
278    k_ITCondMask,
279    k_CoprocNum,
280    k_CoprocReg,
281    k_CoprocOption,
282    k_Immediate,
283    k_MemBarrierOpt,
284    k_Memory,
285    k_PostIndexRegister,
286    k_MSRMask,
287    k_ProcIFlags,
288    k_VectorIndex,
289    k_Register,
290    k_RegisterList,
291    k_DPRRegisterList,
292    k_SPRRegisterList,
293    k_VectorList,
294    k_VectorListAllLanes,
295    k_VectorListIndexed,
296    k_ShiftedRegister,
297    k_ShiftedImmediate,
298    k_ShifterImmediate,
299    k_RotateImmediate,
300    k_BitfieldDescriptor,
301    k_Token
302  } Kind;
303
304  SMLoc StartLoc, EndLoc;
305  SmallVector<unsigned, 8> Registers;
306
307  union {
308    struct {
309      ARMCC::CondCodes Val;
310    } CC;
311
312    struct {
313      unsigned Val;
314    } Cop;
315
316    struct {
317      unsigned Val;
318    } CoprocOption;
319
320    struct {
321      unsigned Mask:4;
322    } ITMask;
323
324    struct {
325      ARM_MB::MemBOpt Val;
326    } MBOpt;
327
328    struct {
329      ARM_PROC::IFlags Val;
330    } IFlags;
331
332    struct {
333      unsigned Val;
334    } MMask;
335
336    struct {
337      const char *Data;
338      unsigned Length;
339    } Tok;
340
341    struct {
342      unsigned RegNum;
343    } Reg;
344
345    // A vector register list is a sequential list of 1 to 4 registers.
346    struct {
347      unsigned RegNum;
348      unsigned Count;
349      unsigned LaneIndex;
350      bool isDoubleSpaced;
351    } VectorList;
352
353    struct {
354      unsigned Val;
355    } VectorIndex;
356
357    struct {
358      const MCExpr *Val;
359    } Imm;
360
361    /// Combined record for all forms of ARM address expressions.
362    struct {
363      unsigned BaseRegNum;
364      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
365      // was specified.
366      const MCConstantExpr *OffsetImm;  // Offset immediate value
367      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
368      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
369      unsigned ShiftImm;        // shift for OffsetReg.
370      unsigned Alignment;       // 0 = no alignment specified
371                                // n = alignment in bytes (2, 4, 8, 16, or 32)
372      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
373    } Memory;
374
375    struct {
376      unsigned RegNum;
377      bool isAdd;
378      ARM_AM::ShiftOpc ShiftTy;
379      unsigned ShiftImm;
380    } PostIdxReg;
381
382    struct {
383      bool isASR;
384      unsigned Imm;
385    } ShifterImm;
386    struct {
387      ARM_AM::ShiftOpc ShiftTy;
388      unsigned SrcReg;
389      unsigned ShiftReg;
390      unsigned ShiftImm;
391    } RegShiftedReg;
392    struct {
393      ARM_AM::ShiftOpc ShiftTy;
394      unsigned SrcReg;
395      unsigned ShiftImm;
396    } RegShiftedImm;
397    struct {
398      unsigned Imm;
399    } RotImm;
400    struct {
401      unsigned LSB;
402      unsigned Width;
403    } Bitfield;
404  };
405
406  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
407public:
408  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
409    Kind = o.Kind;
410    StartLoc = o.StartLoc;
411    EndLoc = o.EndLoc;
412    switch (Kind) {
413    case k_CondCode:
414      CC = o.CC;
415      break;
416    case k_ITCondMask:
417      ITMask = o.ITMask;
418      break;
419    case k_Token:
420      Tok = o.Tok;
421      break;
422    case k_CCOut:
423    case k_Register:
424      Reg = o.Reg;
425      break;
426    case k_RegisterList:
427    case k_DPRRegisterList:
428    case k_SPRRegisterList:
429      Registers = o.Registers;
430      break;
431    case k_VectorList:
432    case k_VectorListAllLanes:
433    case k_VectorListIndexed:
434      VectorList = o.VectorList;
435      break;
436    case k_CoprocNum:
437    case k_CoprocReg:
438      Cop = o.Cop;
439      break;
440    case k_CoprocOption:
441      CoprocOption = o.CoprocOption;
442      break;
443    case k_Immediate:
444      Imm = o.Imm;
445      break;
446    case k_MemBarrierOpt:
447      MBOpt = o.MBOpt;
448      break;
449    case k_Memory:
450      Memory = o.Memory;
451      break;
452    case k_PostIndexRegister:
453      PostIdxReg = o.PostIdxReg;
454      break;
455    case k_MSRMask:
456      MMask = o.MMask;
457      break;
458    case k_ProcIFlags:
459      IFlags = o.IFlags;
460      break;
461    case k_ShifterImmediate:
462      ShifterImm = o.ShifterImm;
463      break;
464    case k_ShiftedRegister:
465      RegShiftedReg = o.RegShiftedReg;
466      break;
467    case k_ShiftedImmediate:
468      RegShiftedImm = o.RegShiftedImm;
469      break;
470    case k_RotateImmediate:
471      RotImm = o.RotImm;
472      break;
473    case k_BitfieldDescriptor:
474      Bitfield = o.Bitfield;
475      break;
476    case k_VectorIndex:
477      VectorIndex = o.VectorIndex;
478      break;
479    }
480  }
481
482  /// getStartLoc - Get the location of the first token of this operand.
483  SMLoc getStartLoc() const { return StartLoc; }
484  /// getEndLoc - Get the location of the last token of this operand.
485  SMLoc getEndLoc() const { return EndLoc; }
486
487  SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
488
489  ARMCC::CondCodes getCondCode() const {
490    assert(Kind == k_CondCode && "Invalid access!");
491    return CC.Val;
492  }
493
494  unsigned getCoproc() const {
495    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
496    return Cop.Val;
497  }
498
499  StringRef getToken() const {
500    assert(Kind == k_Token && "Invalid access!");
501    return StringRef(Tok.Data, Tok.Length);
502  }
503
504  unsigned getReg() const {
505    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
506    return Reg.RegNum;
507  }
508
509  const SmallVectorImpl<unsigned> &getRegList() const {
510    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
511            Kind == k_SPRRegisterList) && "Invalid access!");
512    return Registers;
513  }
514
515  const MCExpr *getImm() const {
516    assert(isImm() && "Invalid access!");
517    return Imm.Val;
518  }
519
520  unsigned getVectorIndex() const {
521    assert(Kind == k_VectorIndex && "Invalid access!");
522    return VectorIndex.Val;
523  }
524
525  ARM_MB::MemBOpt getMemBarrierOpt() const {
526    assert(Kind == k_MemBarrierOpt && "Invalid access!");
527    return MBOpt.Val;
528  }
529
530  ARM_PROC::IFlags getProcIFlags() const {
531    assert(Kind == k_ProcIFlags && "Invalid access!");
532    return IFlags.Val;
533  }
534
535  unsigned getMSRMask() const {
536    assert(Kind == k_MSRMask && "Invalid access!");
537    return MMask.Val;
538  }
539
540  bool isCoprocNum() const { return Kind == k_CoprocNum; }
541  bool isCoprocReg() const { return Kind == k_CoprocReg; }
542  bool isCoprocOption() const { return Kind == k_CoprocOption; }
543  bool isCondCode() const { return Kind == k_CondCode; }
544  bool isCCOut() const { return Kind == k_CCOut; }
545  bool isITMask() const { return Kind == k_ITCondMask; }
546  bool isITCondCode() const { return Kind == k_CondCode; }
547  bool isImm() const { return Kind == k_Immediate; }
548  bool isFPImm() const {
549    if (!isImm()) return false;
550    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
551    if (!CE) return false;
552    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
553    return Val != -1;
554  }
555  bool isFBits16() const {
556    if (!isImm()) return false;
557    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
558    if (!CE) return false;
559    int64_t Value = CE->getValue();
560    return Value >= 0 && Value <= 16;
561  }
562  bool isFBits32() const {
563    if (!isImm()) return false;
564    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
565    if (!CE) return false;
566    int64_t Value = CE->getValue();
567    return Value >= 1 && Value <= 32;
568  }
569  bool isImm8s4() const {
570    if (!isImm()) return false;
571    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
572    if (!CE) return false;
573    int64_t Value = CE->getValue();
574    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
575  }
576  bool isImm0_1020s4() const {
577    if (!isImm()) return false;
578    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
579    if (!CE) return false;
580    int64_t Value = CE->getValue();
581    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
582  }
583  bool isImm0_508s4() const {
584    if (!isImm()) return false;
585    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
586    if (!CE) return false;
587    int64_t Value = CE->getValue();
588    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
589  }
590  bool isImm0_508s4Neg() const {
591    if (!isImm()) return false;
592    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
593    if (!CE) return false;
594    int64_t Value = -CE->getValue();
595    // explicitly exclude zero. we want that to use the normal 0_508 version.
596    return ((Value & 3) == 0) && Value > 0 && Value <= 508;
597  }
598  bool isImm0_255() const {
599    if (!isImm()) return false;
600    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
601    if (!CE) return false;
602    int64_t Value = CE->getValue();
603    return Value >= 0 && Value < 256;
604  }
605  bool isImm0_4095() const {
606    if (!isImm()) return false;
607    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
608    if (!CE) return false;
609    int64_t Value = CE->getValue();
610    return Value >= 0 && Value < 4096;
611  }
612  bool isImm0_4095Neg() const {
613    if (!isImm()) return false;
614    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
615    if (!CE) return false;
616    int64_t Value = -CE->getValue();
617    return Value > 0 && Value < 4096;
618  }
619  bool isImm0_1() const {
620    if (!isImm()) return false;
621    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
622    if (!CE) return false;
623    int64_t Value = CE->getValue();
624    return Value >= 0 && Value < 2;
625  }
626  bool isImm0_3() const {
627    if (!isImm()) return false;
628    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
629    if (!CE) return false;
630    int64_t Value = CE->getValue();
631    return Value >= 0 && Value < 4;
632  }
633  bool isImm0_7() const {
634    if (!isImm()) return false;
635    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
636    if (!CE) return false;
637    int64_t Value = CE->getValue();
638    return Value >= 0 && Value < 8;
639  }
640  bool isImm0_15() const {
641    if (!isImm()) return false;
642    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
643    if (!CE) return false;
644    int64_t Value = CE->getValue();
645    return Value >= 0 && Value < 16;
646  }
647  bool isImm0_31() const {
648    if (!isImm()) return false;
649    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
650    if (!CE) return false;
651    int64_t Value = CE->getValue();
652    return Value >= 0 && Value < 32;
653  }
654  bool isImm0_63() const {
655    if (!isImm()) return false;
656    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
657    if (!CE) return false;
658    int64_t Value = CE->getValue();
659    return Value >= 0 && Value < 64;
660  }
661  bool isImm8() const {
662    if (!isImm()) return false;
663    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
664    if (!CE) return false;
665    int64_t Value = CE->getValue();
666    return Value == 8;
667  }
668  bool isImm16() const {
669    if (!isImm()) return false;
670    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
671    if (!CE) return false;
672    int64_t Value = CE->getValue();
673    return Value == 16;
674  }
675  bool isImm32() const {
676    if (!isImm()) return false;
677    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
678    if (!CE) return false;
679    int64_t Value = CE->getValue();
680    return Value == 32;
681  }
682  bool isShrImm8() const {
683    if (!isImm()) return false;
684    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
685    if (!CE) return false;
686    int64_t Value = CE->getValue();
687    return Value > 0 && Value <= 8;
688  }
689  bool isShrImm16() const {
690    if (!isImm()) return false;
691    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
692    if (!CE) return false;
693    int64_t Value = CE->getValue();
694    return Value > 0 && Value <= 16;
695  }
696  bool isShrImm32() const {
697    if (!isImm()) return false;
698    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
699    if (!CE) return false;
700    int64_t Value = CE->getValue();
701    return Value > 0 && Value <= 32;
702  }
703  bool isShrImm64() const {
704    if (!isImm()) return false;
705    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
706    if (!CE) return false;
707    int64_t Value = CE->getValue();
708    return Value > 0 && Value <= 64;
709  }
710  bool isImm1_7() const {
711    if (!isImm()) return false;
712    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
713    if (!CE) return false;
714    int64_t Value = CE->getValue();
715    return Value > 0 && Value < 8;
716  }
717  bool isImm1_15() const {
718    if (!isImm()) return false;
719    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
720    if (!CE) return false;
721    int64_t Value = CE->getValue();
722    return Value > 0 && Value < 16;
723  }
724  bool isImm1_31() const {
725    if (!isImm()) return false;
726    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
727    if (!CE) return false;
728    int64_t Value = CE->getValue();
729    return Value > 0 && Value < 32;
730  }
731  bool isImm1_16() const {
732    if (!isImm()) return false;
733    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
734    if (!CE) return false;
735    int64_t Value = CE->getValue();
736    return Value > 0 && Value < 17;
737  }
738  bool isImm1_32() const {
739    if (!isImm()) return false;
740    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
741    if (!CE) return false;
742    int64_t Value = CE->getValue();
743    return Value > 0 && Value < 33;
744  }
745  bool isImm0_32() const {
746    if (!isImm()) return false;
747    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
748    if (!CE) return false;
749    int64_t Value = CE->getValue();
750    return Value >= 0 && Value < 33;
751  }
752  bool isImm0_65535() const {
753    if (!isImm()) return false;
754    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
755    if (!CE) return false;
756    int64_t Value = CE->getValue();
757    return Value >= 0 && Value < 65536;
758  }
759  bool isImm0_65535Expr() const {
760    if (!isImm()) return false;
761    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
762    // If it's not a constant expression, it'll generate a fixup and be
763    // handled later.
764    if (!CE) return true;
765    int64_t Value = CE->getValue();
766    return Value >= 0 && Value < 65536;
767  }
768  bool isImm24bit() const {
769    if (!isImm()) return false;
770    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
771    if (!CE) return false;
772    int64_t Value = CE->getValue();
773    return Value >= 0 && Value <= 0xffffff;
774  }
775  bool isImmThumbSR() const {
776    if (!isImm()) return false;
777    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
778    if (!CE) return false;
779    int64_t Value = CE->getValue();
780    return Value > 0 && Value < 33;
781  }
782  bool isPKHLSLImm() const {
783    if (!isImm()) return false;
784    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
785    if (!CE) return false;
786    int64_t Value = CE->getValue();
787    return Value >= 0 && Value < 32;
788  }
789  bool isPKHASRImm() const {
790    if (!isImm()) return false;
791    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
792    if (!CE) return false;
793    int64_t Value = CE->getValue();
794    return Value > 0 && Value <= 32;
795  }
796  bool isARMSOImm() const {
797    if (!isImm()) return false;
798    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
799    if (!CE) return false;
800    int64_t Value = CE->getValue();
801    return ARM_AM::getSOImmVal(Value) != -1;
802  }
803  bool isARMSOImmNot() const {
804    if (!isImm()) return false;
805    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
806    if (!CE) return false;
807    int64_t Value = CE->getValue();
808    return ARM_AM::getSOImmVal(~Value) != -1;
809  }
810  bool isARMSOImmNeg() const {
811    if (!isImm()) return false;
812    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
813    if (!CE) return false;
814    int64_t Value = CE->getValue();
815    // Only use this when not representable as a plain so_imm.
816    return ARM_AM::getSOImmVal(Value) == -1 &&
817      ARM_AM::getSOImmVal(-Value) != -1;
818  }
819  bool isT2SOImm() const {
820    if (!isImm()) return false;
821    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
822    if (!CE) return false;
823    int64_t Value = CE->getValue();
824    return ARM_AM::getT2SOImmVal(Value) != -1;
825  }
826  bool isT2SOImmNot() const {
827    if (!isImm()) return false;
828    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
829    if (!CE) return false;
830    int64_t Value = CE->getValue();
831    return ARM_AM::getT2SOImmVal(~Value) != -1;
832  }
833  bool isT2SOImmNeg() const {
834    if (!isImm()) return false;
835    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
836    if (!CE) return false;
837    int64_t Value = CE->getValue();
838    // Only use this when not representable as a plain so_imm.
839    return ARM_AM::getT2SOImmVal(Value) == -1 &&
840      ARM_AM::getT2SOImmVal(-Value) != -1;
841  }
842  bool isSetEndImm() const {
843    if (!isImm()) return false;
844    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
845    if (!CE) return false;
846    int64_t Value = CE->getValue();
847    return Value == 1 || Value == 0;
848  }
849  bool isReg() const { return Kind == k_Register; }
850  bool isRegList() const { return Kind == k_RegisterList; }
851  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
852  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
853  bool isToken() const { return Kind == k_Token; }
854  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
855  bool isMemory() const { return Kind == k_Memory; }
856  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
857  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
858  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
859  bool isRotImm() const { return Kind == k_RotateImmediate; }
860  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
861  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
862  bool isPostIdxReg() const {
863    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
864  }
865  bool isMemNoOffset(bool alignOK = false) const {
866    if (!isMemory())
867      return false;
868    // No offset of any kind.
869    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
870     (alignOK || Memory.Alignment == 0);
871  }
872  bool isMemPCRelImm12() const {
873    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
874      return false;
875    // Base register must be PC.
876    if (Memory.BaseRegNum != ARM::PC)
877      return false;
878    // Immediate offset in range [-4095, 4095].
879    if (!Memory.OffsetImm) return true;
880    int64_t Val = Memory.OffsetImm->getValue();
881    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
882  }
883  bool isAlignedMemory() const {
884    return isMemNoOffset(true);
885  }
886  bool isAddrMode2() const {
887    if (!isMemory() || Memory.Alignment != 0) return false;
888    // Check for register offset.
889    if (Memory.OffsetRegNum) return true;
890    // Immediate offset in range [-4095, 4095].
891    if (!Memory.OffsetImm) return true;
892    int64_t Val = Memory.OffsetImm->getValue();
893    return Val > -4096 && Val < 4096;
894  }
895  bool isAM2OffsetImm() const {
896    if (!isImm()) return false;
897    // Immediate offset in range [-4095, 4095].
898    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
899    if (!CE) return false;
900    int64_t Val = CE->getValue();
901    return Val > -4096 && Val < 4096;
902  }
903  bool isAddrMode3() const {
904    // If we have an immediate that's not a constant, treat it as a label
905    // reference needing a fixup. If it is a constant, it's something else
906    // and we reject it.
907    if (isImm() && !isa<MCConstantExpr>(getImm()))
908      return true;
909    if (!isMemory() || Memory.Alignment != 0) return false;
910    // No shifts are legal for AM3.
911    if (Memory.ShiftType != ARM_AM::no_shift) return false;
912    // Check for register offset.
913    if (Memory.OffsetRegNum) return true;
914    // Immediate offset in range [-255, 255].
915    if (!Memory.OffsetImm) return true;
916    int64_t Val = Memory.OffsetImm->getValue();
917    return Val > -256 && Val < 256;
918  }
919  bool isAM3Offset() const {
920    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
921      return false;
922    if (Kind == k_PostIndexRegister)
923      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
924    // Immediate offset in range [-255, 255].
925    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
926    if (!CE) return false;
927    int64_t Val = CE->getValue();
928    // Special case, #-0 is INT32_MIN.
929    return (Val > -256 && Val < 256) || Val == INT32_MIN;
930  }
931  bool isAddrMode5() const {
932    // If we have an immediate that's not a constant, treat it as a label
933    // reference needing a fixup. If it is a constant, it's something else
934    // and we reject it.
935    if (isImm() && !isa<MCConstantExpr>(getImm()))
936      return true;
937    if (!isMemory() || Memory.Alignment != 0) return false;
938    // Check for register offset.
939    if (Memory.OffsetRegNum) return false;
940    // Immediate offset in range [-1020, 1020] and a multiple of 4.
941    if (!Memory.OffsetImm) return true;
942    int64_t Val = Memory.OffsetImm->getValue();
943    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
944      Val == INT32_MIN;
945  }
946  bool isMemTBB() const {
947    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
948        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
949      return false;
950    return true;
951  }
952  bool isMemTBH() const {
953    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
954        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
955        Memory.Alignment != 0 )
956      return false;
957    return true;
958  }
959  bool isMemRegOffset() const {
960    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
961      return false;
962    return true;
963  }
964  bool isT2MemRegOffset() const {
965    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
966        Memory.Alignment != 0)
967      return false;
968    // Only lsl #{0, 1, 2, 3} allowed.
969    if (Memory.ShiftType == ARM_AM::no_shift)
970      return true;
971    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
972      return false;
973    return true;
974  }
975  bool isMemThumbRR() const {
976    // Thumb reg+reg addressing is simple. Just two registers, a base and
977    // an offset. No shifts, negations or any other complicating factors.
978    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
979        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
980      return false;
981    return isARMLowRegister(Memory.BaseRegNum) &&
982      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
983  }
984  bool isMemThumbRIs4() const {
985    if (!isMemory() || Memory.OffsetRegNum != 0 ||
986        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
987      return false;
988    // Immediate offset, multiple of 4 in range [0, 124].
989    if (!Memory.OffsetImm) return true;
990    int64_t Val = Memory.OffsetImm->getValue();
991    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
992  }
993  bool isMemThumbRIs2() const {
994    if (!isMemory() || Memory.OffsetRegNum != 0 ||
995        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
996      return false;
997    // Immediate offset, multiple of 4 in range [0, 62].
998    if (!Memory.OffsetImm) return true;
999    int64_t Val = Memory.OffsetImm->getValue();
1000    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1001  }
1002  bool isMemThumbRIs1() const {
1003    if (!isMemory() || Memory.OffsetRegNum != 0 ||
1004        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1005      return false;
1006    // Immediate offset in range [0, 31].
1007    if (!Memory.OffsetImm) return true;
1008    int64_t Val = Memory.OffsetImm->getValue();
1009    return Val >= 0 && Val <= 31;
1010  }
1011  bool isMemThumbSPI() const {
1012    if (!isMemory() || Memory.OffsetRegNum != 0 ||
1013        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1014      return false;
1015    // Immediate offset, multiple of 4 in range [0, 1020].
1016    if (!Memory.OffsetImm) return true;
1017    int64_t Val = Memory.OffsetImm->getValue();
1018    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1019  }
1020  bool isMemImm8s4Offset() const {
1021    // If we have an immediate that's not a constant, treat it as a label
1022    // reference needing a fixup. If it is a constant, it's something else
1023    // and we reject it.
1024    if (isImm() && !isa<MCConstantExpr>(getImm()))
1025      return true;
1026    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1027      return false;
1028    // Immediate offset a multiple of 4 in range [-1020, 1020].
1029    if (!Memory.OffsetImm) return true;
1030    int64_t Val = Memory.OffsetImm->getValue();
1031    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1032  }
1033  bool isMemImm0_1020s4Offset() const {
1034    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1035      return false;
1036    // Immediate offset a multiple of 4 in range [0, 1020].
1037    if (!Memory.OffsetImm) return true;
1038    int64_t Val = Memory.OffsetImm->getValue();
1039    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1040  }
1041  bool isMemImm8Offset() const {
1042    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1043      return false;
1044    // Base reg of PC isn't allowed for these encodings.
1045    if (Memory.BaseRegNum == ARM::PC) return false;
1046    // Immediate offset in range [-255, 255].
1047    if (!Memory.OffsetImm) return true;
1048    int64_t Val = Memory.OffsetImm->getValue();
1049    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1050  }
1051  bool isMemPosImm8Offset() const {
1052    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1053      return false;
1054    // Immediate offset in range [0, 255].
1055    if (!Memory.OffsetImm) return true;
1056    int64_t Val = Memory.OffsetImm->getValue();
1057    return Val >= 0 && Val < 256;
1058  }
1059  bool isMemNegImm8Offset() const {
1060    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1061      return false;
1062    // Base reg of PC isn't allowed for these encodings.
1063    if (Memory.BaseRegNum == ARM::PC) return false;
1064    // Immediate offset in range [-255, -1].
1065    if (!Memory.OffsetImm) return false;
1066    int64_t Val = Memory.OffsetImm->getValue();
1067    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1068  }
1069  bool isMemUImm12Offset() const {
1070    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1071      return false;
1072    // Immediate offset in range [0, 4095].
1073    if (!Memory.OffsetImm) return true;
1074    int64_t Val = Memory.OffsetImm->getValue();
1075    return (Val >= 0 && Val < 4096);
1076  }
1077  bool isMemImm12Offset() const {
1078    // If we have an immediate that's not a constant, treat it as a label
1079    // reference needing a fixup. If it is a constant, it's something else
1080    // and we reject it.
1081    if (isImm() && !isa<MCConstantExpr>(getImm()))
1082      return true;
1083
1084    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1085      return false;
1086    // Immediate offset in range [-4095, 4095].
1087    if (!Memory.OffsetImm) return true;
1088    int64_t Val = Memory.OffsetImm->getValue();
1089    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1090  }
1091  bool isPostIdxImm8() const {
1092    if (!isImm()) return false;
1093    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1094    if (!CE) return false;
1095    int64_t Val = CE->getValue();
1096    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1097  }
1098  bool isPostIdxImm8s4() const {
1099    if (!isImm()) return false;
1100    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1101    if (!CE) return false;
1102    int64_t Val = CE->getValue();
1103    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1104      (Val == INT32_MIN);
1105  }
1106
1107  bool isMSRMask() const { return Kind == k_MSRMask; }
1108  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1109
1110  // NEON operands.
1111  bool isSingleSpacedVectorList() const {
1112    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1113  }
1114  bool isDoubleSpacedVectorList() const {
1115    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1116  }
1117  bool isVecListOneD() const {
1118    if (!isSingleSpacedVectorList()) return false;
1119    return VectorList.Count == 1;
1120  }
1121
1122  bool isVecListDPair() const {
1123    if (!isSingleSpacedVectorList()) return false;
1124    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1125              .contains(VectorList.RegNum));
1126  }
1127
1128  bool isVecListThreeD() const {
1129    if (!isSingleSpacedVectorList()) return false;
1130    return VectorList.Count == 3;
1131  }
1132
1133  bool isVecListFourD() const {
1134    if (!isSingleSpacedVectorList()) return false;
1135    return VectorList.Count == 4;
1136  }
1137
1138  bool isVecListDPairSpaced() const {
1139    if (isSingleSpacedVectorList()) return false;
1140    return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1141              .contains(VectorList.RegNum));
1142  }
1143
1144  bool isVecListThreeQ() const {
1145    if (!isDoubleSpacedVectorList()) return false;
1146    return VectorList.Count == 3;
1147  }
1148
1149  bool isVecListFourQ() const {
1150    if (!isDoubleSpacedVectorList()) return false;
1151    return VectorList.Count == 4;
1152  }
1153
1154  bool isSingleSpacedVectorAllLanes() const {
1155    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1156  }
1157  bool isDoubleSpacedVectorAllLanes() const {
1158    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1159  }
1160  bool isVecListOneDAllLanes() const {
1161    if (!isSingleSpacedVectorAllLanes()) return false;
1162    return VectorList.Count == 1;
1163  }
1164
1165  bool isVecListDPairAllLanes() const {
1166    if (!isSingleSpacedVectorAllLanes()) return false;
1167    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1168              .contains(VectorList.RegNum));
1169  }
1170
1171  bool isVecListDPairSpacedAllLanes() const {
1172    if (!isDoubleSpacedVectorAllLanes()) return false;
1173    return VectorList.Count == 2;
1174  }
1175
1176  bool isVecListThreeDAllLanes() const {
1177    if (!isSingleSpacedVectorAllLanes()) return false;
1178    return VectorList.Count == 3;
1179  }
1180
1181  bool isVecListThreeQAllLanes() const {
1182    if (!isDoubleSpacedVectorAllLanes()) return false;
1183    return VectorList.Count == 3;
1184  }
1185
1186  bool isVecListFourDAllLanes() const {
1187    if (!isSingleSpacedVectorAllLanes()) return false;
1188    return VectorList.Count == 4;
1189  }
1190
1191  bool isVecListFourQAllLanes() const {
1192    if (!isDoubleSpacedVectorAllLanes()) return false;
1193    return VectorList.Count == 4;
1194  }
1195
1196  bool isSingleSpacedVectorIndexed() const {
1197    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1198  }
1199  bool isDoubleSpacedVectorIndexed() const {
1200    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1201  }
1202  bool isVecListOneDByteIndexed() const {
1203    if (!isSingleSpacedVectorIndexed()) return false;
1204    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1205  }
1206
1207  bool isVecListOneDHWordIndexed() const {
1208    if (!isSingleSpacedVectorIndexed()) return false;
1209    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1210  }
1211
1212  bool isVecListOneDWordIndexed() const {
1213    if (!isSingleSpacedVectorIndexed()) return false;
1214    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1215  }
1216
1217  bool isVecListTwoDByteIndexed() const {
1218    if (!isSingleSpacedVectorIndexed()) return false;
1219    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1220  }
1221
1222  bool isVecListTwoDHWordIndexed() const {
1223    if (!isSingleSpacedVectorIndexed()) return false;
1224    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1225  }
1226
1227  bool isVecListTwoQWordIndexed() const {
1228    if (!isDoubleSpacedVectorIndexed()) return false;
1229    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1230  }
1231
1232  bool isVecListTwoQHWordIndexed() const {
1233    if (!isDoubleSpacedVectorIndexed()) return false;
1234    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1235  }
1236
1237  bool isVecListTwoDWordIndexed() const {
1238    if (!isSingleSpacedVectorIndexed()) return false;
1239    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1240  }
1241
1242  bool isVecListThreeDByteIndexed() const {
1243    if (!isSingleSpacedVectorIndexed()) return false;
1244    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1245  }
1246
1247  bool isVecListThreeDHWordIndexed() const {
1248    if (!isSingleSpacedVectorIndexed()) return false;
1249    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1250  }
1251
1252  bool isVecListThreeQWordIndexed() const {
1253    if (!isDoubleSpacedVectorIndexed()) return false;
1254    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1255  }
1256
1257  bool isVecListThreeQHWordIndexed() const {
1258    if (!isDoubleSpacedVectorIndexed()) return false;
1259    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1260  }
1261
1262  bool isVecListThreeDWordIndexed() const {
1263    if (!isSingleSpacedVectorIndexed()) return false;
1264    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1265  }
1266
1267  bool isVecListFourDByteIndexed() const {
1268    if (!isSingleSpacedVectorIndexed()) return false;
1269    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1270  }
1271
1272  bool isVecListFourDHWordIndexed() const {
1273    if (!isSingleSpacedVectorIndexed()) return false;
1274    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1275  }
1276
1277  bool isVecListFourQWordIndexed() const {
1278    if (!isDoubleSpacedVectorIndexed()) return false;
1279    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1280  }
1281
1282  bool isVecListFourQHWordIndexed() const {
1283    if (!isDoubleSpacedVectorIndexed()) return false;
1284    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1285  }
1286
1287  bool isVecListFourDWordIndexed() const {
1288    if (!isSingleSpacedVectorIndexed()) return false;
1289    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1290  }
1291
1292  bool isVectorIndex8() const {
1293    if (Kind != k_VectorIndex) return false;
1294    return VectorIndex.Val < 8;
1295  }
1296  bool isVectorIndex16() const {
1297    if (Kind != k_VectorIndex) return false;
1298    return VectorIndex.Val < 4;
1299  }
1300  bool isVectorIndex32() const {
1301    if (Kind != k_VectorIndex) return false;
1302    return VectorIndex.Val < 2;
1303  }
1304
1305  bool isNEONi8splat() const {
1306    if (!isImm()) return false;
1307    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1308    // Must be a constant.
1309    if (!CE) return false;
1310    int64_t Value = CE->getValue();
1311    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1312    // value.
1313    return Value >= 0 && Value < 256;
1314  }
1315
1316  bool isNEONi16splat() const {
1317    if (!isImm()) return false;
1318    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1319    // Must be a constant.
1320    if (!CE) return false;
1321    int64_t Value = CE->getValue();
1322    // i16 value in the range [0,255] or [0x0100, 0xff00]
1323    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1324  }
1325
1326  bool isNEONi32splat() const {
1327    if (!isImm()) return false;
1328    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1329    // Must be a constant.
1330    if (!CE) return false;
1331    int64_t Value = CE->getValue();
1332    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1333    return (Value >= 0 && Value < 256) ||
1334      (Value >= 0x0100 && Value <= 0xff00) ||
1335      (Value >= 0x010000 && Value <= 0xff0000) ||
1336      (Value >= 0x01000000 && Value <= 0xff000000);
1337  }
1338
1339  bool isNEONi32vmov() const {
1340    if (!isImm()) return false;
1341    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1342    // Must be a constant.
1343    if (!CE) return false;
1344    int64_t Value = CE->getValue();
1345    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1346    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1347    return (Value >= 0 && Value < 256) ||
1348      (Value >= 0x0100 && Value <= 0xff00) ||
1349      (Value >= 0x010000 && Value <= 0xff0000) ||
1350      (Value >= 0x01000000 && Value <= 0xff000000) ||
1351      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1352      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1353  }
1354  bool isNEONi32vmovNeg() const {
1355    if (!isImm()) return false;
1356    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1357    // Must be a constant.
1358    if (!CE) return false;
1359    int64_t Value = ~CE->getValue();
1360    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1361    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1362    return (Value >= 0 && Value < 256) ||
1363      (Value >= 0x0100 && Value <= 0xff00) ||
1364      (Value >= 0x010000 && Value <= 0xff0000) ||
1365      (Value >= 0x01000000 && Value <= 0xff000000) ||
1366      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1367      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1368  }
1369
1370  bool isNEONi64splat() const {
1371    if (!isImm()) return false;
1372    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1373    // Must be a constant.
1374    if (!CE) return false;
1375    uint64_t Value = CE->getValue();
1376    // i64 value with each byte being either 0 or 0xff.
1377    for (unsigned i = 0; i < 8; ++i)
1378      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1379    return true;
1380  }
1381
1382  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1383    // Add as immediates when possible.  Null MCExpr = 0.
1384    if (Expr == 0)
1385      Inst.addOperand(MCOperand::CreateImm(0));
1386    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1387      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1388    else
1389      Inst.addOperand(MCOperand::CreateExpr(Expr));
1390  }
1391
1392  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1393    assert(N == 2 && "Invalid number of operands!");
1394    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1395    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1396    Inst.addOperand(MCOperand::CreateReg(RegNum));
1397  }
1398
1399  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1400    assert(N == 1 && "Invalid number of operands!");
1401    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1402  }
1403
1404  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1405    assert(N == 1 && "Invalid number of operands!");
1406    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1407  }
1408
1409  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1410    assert(N == 1 && "Invalid number of operands!");
1411    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1412  }
1413
1414  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1415    assert(N == 1 && "Invalid number of operands!");
1416    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1417  }
1418
1419  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1420    assert(N == 1 && "Invalid number of operands!");
1421    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1422  }
1423
1424  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1425    assert(N == 1 && "Invalid number of operands!");
1426    Inst.addOperand(MCOperand::CreateReg(getReg()));
1427  }
1428
1429  void addRegOperands(MCInst &Inst, unsigned N) const {
1430    assert(N == 1 && "Invalid number of operands!");
1431    Inst.addOperand(MCOperand::CreateReg(getReg()));
1432  }
1433
1434  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1435    assert(N == 3 && "Invalid number of operands!");
1436    assert(isRegShiftedReg() &&
1437           "addRegShiftedRegOperands() on non RegShiftedReg!");
1438    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1439    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1440    Inst.addOperand(MCOperand::CreateImm(
1441      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1442  }
1443
1444  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1445    assert(N == 2 && "Invalid number of operands!");
1446    assert(isRegShiftedImm() &&
1447           "addRegShiftedImmOperands() on non RegShiftedImm!");
1448    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1449    // Shift of #32 is encoded as 0 where permitted
1450    unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
1451    Inst.addOperand(MCOperand::CreateImm(
1452      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
1453  }
1454
1455  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1456    assert(N == 1 && "Invalid number of operands!");
1457    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1458                                         ShifterImm.Imm));
1459  }
1460
1461  void addRegListOperands(MCInst &Inst, unsigned N) const {
1462    assert(N == 1 && "Invalid number of operands!");
1463    const SmallVectorImpl<unsigned> &RegList = getRegList();
1464    for (SmallVectorImpl<unsigned>::const_iterator
1465           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1466      Inst.addOperand(MCOperand::CreateReg(*I));
1467  }
1468
1469  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1470    addRegListOperands(Inst, N);
1471  }
1472
1473  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1474    addRegListOperands(Inst, N);
1475  }
1476
1477  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1478    assert(N == 1 && "Invalid number of operands!");
1479    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1480    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1481  }
1482
1483  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1484    assert(N == 1 && "Invalid number of operands!");
1485    // Munge the lsb/width into a bitfield mask.
1486    unsigned lsb = Bitfield.LSB;
1487    unsigned width = Bitfield.Width;
1488    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1489    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1490                      (32 - (lsb + width)));
1491    Inst.addOperand(MCOperand::CreateImm(Mask));
1492  }
1493
1494  void addImmOperands(MCInst &Inst, unsigned N) const {
1495    assert(N == 1 && "Invalid number of operands!");
1496    addExpr(Inst, getImm());
1497  }
1498
1499  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1500    assert(N == 1 && "Invalid number of operands!");
1501    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1502    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1503  }
1504
1505  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1506    assert(N == 1 && "Invalid number of operands!");
1507    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1508    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1509  }
1510
1511  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1512    assert(N == 1 && "Invalid number of operands!");
1513    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1514    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1515    Inst.addOperand(MCOperand::CreateImm(Val));
1516  }
1517
1518  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1519    assert(N == 1 && "Invalid number of operands!");
1520    // FIXME: We really want to scale the value here, but the LDRD/STRD
1521    // instruction don't encode operands that way yet.
1522    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1523    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1524  }
1525
1526  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1527    assert(N == 1 && "Invalid number of operands!");
1528    // The immediate is scaled by four in the encoding and is stored
1529    // in the MCInst as such. Lop off the low two bits here.
1530    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1531    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1532  }
1533
1534  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1535    assert(N == 1 && "Invalid number of operands!");
1536    // The immediate is scaled by four in the encoding and is stored
1537    // in the MCInst as such. Lop off the low two bits here.
1538    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1539    Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
1540  }
1541
1542  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1543    assert(N == 1 && "Invalid number of operands!");
1544    // The immediate is scaled by four in the encoding and is stored
1545    // in the MCInst as such. Lop off the low two bits here.
1546    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1547    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1548  }
1549
1550  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1551    assert(N == 1 && "Invalid number of operands!");
1552    // The constant encodes as the immediate-1, and we store in the instruction
1553    // the bits as encoded, so subtract off one here.
1554    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1555    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1556  }
1557
1558  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1559    assert(N == 1 && "Invalid number of operands!");
1560    // The constant encodes as the immediate-1, and we store in the instruction
1561    // the bits as encoded, so subtract off one here.
1562    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1563    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1564  }
1565
1566  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1567    assert(N == 1 && "Invalid number of operands!");
1568    // The constant encodes as the immediate, except for 32, which encodes as
1569    // zero.
1570    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1571    unsigned Imm = CE->getValue();
1572    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1573  }
1574
1575  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1576    assert(N == 1 && "Invalid number of operands!");
1577    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1578    // the instruction as well.
1579    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1580    int Val = CE->getValue();
1581    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1582  }
1583
1584  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1585    assert(N == 1 && "Invalid number of operands!");
1586    // The operand is actually a t2_so_imm, but we have its bitwise
1587    // negation in the assembly source, so twiddle it here.
1588    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1589    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1590  }
1591
1592  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1593    assert(N == 1 && "Invalid number of operands!");
1594    // The operand is actually a t2_so_imm, but we have its
1595    // negation in the assembly source, so twiddle it here.
1596    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1597    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1598  }
1599
1600  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1601    assert(N == 1 && "Invalid number of operands!");
1602    // The operand is actually an imm0_4095, but we have its
1603    // negation in the assembly source, so twiddle it here.
1604    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1605    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1606  }
1607
1608  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1609    assert(N == 1 && "Invalid number of operands!");
1610    // The operand is actually a so_imm, but we have its bitwise
1611    // negation in the assembly source, so twiddle it here.
1612    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1613    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1614  }
1615
1616  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1617    assert(N == 1 && "Invalid number of operands!");
1618    // The operand is actually a so_imm, but we have its
1619    // negation in the assembly source, so twiddle it here.
1620    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1621    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1622  }
1623
1624  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1625    assert(N == 1 && "Invalid number of operands!");
1626    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1627  }
1628
1629  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1630    assert(N == 1 && "Invalid number of operands!");
1631    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1632  }
1633
1634  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1635    assert(N == 1 && "Invalid number of operands!");
1636    int32_t Imm = Memory.OffsetImm->getValue();
1637    // FIXME: Handle #-0
1638    if (Imm == INT32_MIN) Imm = 0;
1639    Inst.addOperand(MCOperand::CreateImm(Imm));
1640  }
1641
1642  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1643    assert(N == 2 && "Invalid number of operands!");
1644    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1645    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1646  }
1647
1648  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1649    assert(N == 3 && "Invalid number of operands!");
1650    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1651    if (!Memory.OffsetRegNum) {
1652      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1653      // Special case for #-0
1654      if (Val == INT32_MIN) Val = 0;
1655      if (Val < 0) Val = -Val;
1656      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1657    } else {
1658      // For register offset, we encode the shift type and negation flag
1659      // here.
1660      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1661                              Memory.ShiftImm, Memory.ShiftType);
1662    }
1663    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1664    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1665    Inst.addOperand(MCOperand::CreateImm(Val));
1666  }
1667
1668  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1669    assert(N == 2 && "Invalid number of operands!");
1670    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1671    assert(CE && "non-constant AM2OffsetImm operand!");
1672    int32_t Val = CE->getValue();
1673    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1674    // Special case for #-0
1675    if (Val == INT32_MIN) Val = 0;
1676    if (Val < 0) Val = -Val;
1677    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1678    Inst.addOperand(MCOperand::CreateReg(0));
1679    Inst.addOperand(MCOperand::CreateImm(Val));
1680  }
1681
1682  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1683    assert(N == 3 && "Invalid number of operands!");
1684    // If we have an immediate that's not a constant, treat it as a label
1685    // reference needing a fixup. If it is a constant, it's something else
1686    // and we reject it.
1687    if (isImm()) {
1688      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1689      Inst.addOperand(MCOperand::CreateReg(0));
1690      Inst.addOperand(MCOperand::CreateImm(0));
1691      return;
1692    }
1693
1694    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1695    if (!Memory.OffsetRegNum) {
1696      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1697      // Special case for #-0
1698      if (Val == INT32_MIN) Val = 0;
1699      if (Val < 0) Val = -Val;
1700      Val = ARM_AM::getAM3Opc(AddSub, Val);
1701    } else {
1702      // For register offset, we encode the shift type and negation flag
1703      // here.
1704      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1705    }
1706    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1707    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1708    Inst.addOperand(MCOperand::CreateImm(Val));
1709  }
1710
1711  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1712    assert(N == 2 && "Invalid number of operands!");
1713    if (Kind == k_PostIndexRegister) {
1714      int32_t Val =
1715        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1716      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1717      Inst.addOperand(MCOperand::CreateImm(Val));
1718      return;
1719    }
1720
1721    // Constant offset.
1722    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1723    int32_t Val = CE->getValue();
1724    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1725    // Special case for #-0
1726    if (Val == INT32_MIN) Val = 0;
1727    if (Val < 0) Val = -Val;
1728    Val = ARM_AM::getAM3Opc(AddSub, Val);
1729    Inst.addOperand(MCOperand::CreateReg(0));
1730    Inst.addOperand(MCOperand::CreateImm(Val));
1731  }
1732
1733  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1734    assert(N == 2 && "Invalid number of operands!");
1735    // If we have an immediate that's not a constant, treat it as a label
1736    // reference needing a fixup. If it is a constant, it's something else
1737    // and we reject it.
1738    if (isImm()) {
1739      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1740      Inst.addOperand(MCOperand::CreateImm(0));
1741      return;
1742    }
1743
1744    // The lower two bits are always zero and as such are not encoded.
1745    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1746    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1747    // Special case for #-0
1748    if (Val == INT32_MIN) Val = 0;
1749    if (Val < 0) Val = -Val;
1750    Val = ARM_AM::getAM5Opc(AddSub, Val);
1751    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1752    Inst.addOperand(MCOperand::CreateImm(Val));
1753  }
1754
1755  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1756    assert(N == 2 && "Invalid number of operands!");
1757    // If we have an immediate that's not a constant, treat it as a label
1758    // reference needing a fixup. If it is a constant, it's something else
1759    // and we reject it.
1760    if (isImm()) {
1761      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1762      Inst.addOperand(MCOperand::CreateImm(0));
1763      return;
1764    }
1765
1766    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1767    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1768    Inst.addOperand(MCOperand::CreateImm(Val));
1769  }
1770
1771  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1772    assert(N == 2 && "Invalid number of operands!");
1773    // The lower two bits are always zero and as such are not encoded.
1774    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1775    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1776    Inst.addOperand(MCOperand::CreateImm(Val));
1777  }
1778
1779  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1780    assert(N == 2 && "Invalid number of operands!");
1781    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1782    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1783    Inst.addOperand(MCOperand::CreateImm(Val));
1784  }
1785
1786  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1787    addMemImm8OffsetOperands(Inst, N);
1788  }
1789
1790  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1791    addMemImm8OffsetOperands(Inst, N);
1792  }
1793
1794  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1795    assert(N == 2 && "Invalid number of operands!");
1796    // If this is an immediate, it's a label reference.
1797    if (isImm()) {
1798      addExpr(Inst, getImm());
1799      Inst.addOperand(MCOperand::CreateImm(0));
1800      return;
1801    }
1802
1803    // Otherwise, it's a normal memory reg+offset.
1804    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1805    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1806    Inst.addOperand(MCOperand::CreateImm(Val));
1807  }
1808
1809  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1810    assert(N == 2 && "Invalid number of operands!");
1811    // If this is an immediate, it's a label reference.
1812    if (isImm()) {
1813      addExpr(Inst, getImm());
1814      Inst.addOperand(MCOperand::CreateImm(0));
1815      return;
1816    }
1817
1818    // Otherwise, it's a normal memory reg+offset.
1819    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1820    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1821    Inst.addOperand(MCOperand::CreateImm(Val));
1822  }
1823
1824  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1825    assert(N == 2 && "Invalid number of operands!");
1826    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1827    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1828  }
1829
1830  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1831    assert(N == 2 && "Invalid number of operands!");
1832    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1833    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1834  }
1835
1836  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1837    assert(N == 3 && "Invalid number of operands!");
1838    unsigned Val =
1839      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1840                        Memory.ShiftImm, Memory.ShiftType);
1841    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1842    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1843    Inst.addOperand(MCOperand::CreateImm(Val));
1844  }
1845
1846  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1847    assert(N == 3 && "Invalid number of operands!");
1848    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1849    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1850    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1851  }
1852
1853  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1854    assert(N == 2 && "Invalid number of operands!");
1855    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1856    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1857  }
1858
1859  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1860    assert(N == 2 && "Invalid number of operands!");
1861    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1862    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1863    Inst.addOperand(MCOperand::CreateImm(Val));
1864  }
1865
1866  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1867    assert(N == 2 && "Invalid number of operands!");
1868    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1869    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1870    Inst.addOperand(MCOperand::CreateImm(Val));
1871  }
1872
1873  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1874    assert(N == 2 && "Invalid number of operands!");
1875    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1876    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1877    Inst.addOperand(MCOperand::CreateImm(Val));
1878  }
1879
1880  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1881    assert(N == 2 && "Invalid number of operands!");
1882    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1883    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1884    Inst.addOperand(MCOperand::CreateImm(Val));
1885  }
1886
1887  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1888    assert(N == 1 && "Invalid number of operands!");
1889    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1890    assert(CE && "non-constant post-idx-imm8 operand!");
1891    int Imm = CE->getValue();
1892    bool isAdd = Imm >= 0;
1893    if (Imm == INT32_MIN) Imm = 0;
1894    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1895    Inst.addOperand(MCOperand::CreateImm(Imm));
1896  }
1897
1898  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1899    assert(N == 1 && "Invalid number of operands!");
1900    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1901    assert(CE && "non-constant post-idx-imm8s4 operand!");
1902    int Imm = CE->getValue();
1903    bool isAdd = Imm >= 0;
1904    if (Imm == INT32_MIN) Imm = 0;
1905    // Immediate is scaled by 4.
1906    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1907    Inst.addOperand(MCOperand::CreateImm(Imm));
1908  }
1909
1910  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1911    assert(N == 2 && "Invalid number of operands!");
1912    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1913    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1914  }
1915
1916  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1917    assert(N == 2 && "Invalid number of operands!");
1918    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1919    // The sign, shift type, and shift amount are encoded in a single operand
1920    // using the AM2 encoding helpers.
1921    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1922    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1923                                     PostIdxReg.ShiftTy);
1924    Inst.addOperand(MCOperand::CreateImm(Imm));
1925  }
1926
1927  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1928    assert(N == 1 && "Invalid number of operands!");
1929    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1930  }
1931
1932  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1933    assert(N == 1 && "Invalid number of operands!");
1934    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1935  }
1936
1937  void addVecListOperands(MCInst &Inst, unsigned N) const {
1938    assert(N == 1 && "Invalid number of operands!");
1939    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1940  }
1941
1942  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1943    assert(N == 2 && "Invalid number of operands!");
1944    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1945    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1946  }
1947
1948  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1949    assert(N == 1 && "Invalid number of operands!");
1950    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1951  }
1952
1953  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1954    assert(N == 1 && "Invalid number of operands!");
1955    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1956  }
1957
1958  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1959    assert(N == 1 && "Invalid number of operands!");
1960    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1961  }
1962
1963  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1964    assert(N == 1 && "Invalid number of operands!");
1965    // The immediate encodes the type of constant as well as the value.
1966    // Mask in that this is an i8 splat.
1967    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1968    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1969  }
1970
1971  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1972    assert(N == 1 && "Invalid number of operands!");
1973    // The immediate encodes the type of constant as well as the value.
1974    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1975    unsigned Value = CE->getValue();
1976    if (Value >= 256)
1977      Value = (Value >> 8) | 0xa00;
1978    else
1979      Value |= 0x800;
1980    Inst.addOperand(MCOperand::CreateImm(Value));
1981  }
1982
1983  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1984    assert(N == 1 && "Invalid number of operands!");
1985    // The immediate encodes the type of constant as well as the value.
1986    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1987    unsigned Value = CE->getValue();
1988    if (Value >= 256 && Value <= 0xff00)
1989      Value = (Value >> 8) | 0x200;
1990    else if (Value > 0xffff && Value <= 0xff0000)
1991      Value = (Value >> 16) | 0x400;
1992    else if (Value > 0xffffff)
1993      Value = (Value >> 24) | 0x600;
1994    Inst.addOperand(MCOperand::CreateImm(Value));
1995  }
1996
1997  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1998    assert(N == 1 && "Invalid number of operands!");
1999    // The immediate encodes the type of constant as well as the value.
2000    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2001    unsigned Value = CE->getValue();
2002    if (Value >= 256 && Value <= 0xffff)
2003      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2004    else if (Value > 0xffff && Value <= 0xffffff)
2005      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2006    else if (Value > 0xffffff)
2007      Value = (Value >> 24) | 0x600;
2008    Inst.addOperand(MCOperand::CreateImm(Value));
2009  }
2010
2011  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2012    assert(N == 1 && "Invalid number of operands!");
2013    // The immediate encodes the type of constant as well as the value.
2014    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2015    unsigned Value = ~CE->getValue();
2016    if (Value >= 256 && Value <= 0xffff)
2017      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2018    else if (Value > 0xffff && Value <= 0xffffff)
2019      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2020    else if (Value > 0xffffff)
2021      Value = (Value >> 24) | 0x600;
2022    Inst.addOperand(MCOperand::CreateImm(Value));
2023  }
2024
2025  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2026    assert(N == 1 && "Invalid number of operands!");
2027    // The immediate encodes the type of constant as well as the value.
2028    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2029    uint64_t Value = CE->getValue();
2030    unsigned Imm = 0;
2031    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2032      Imm |= (Value & 1) << i;
2033    }
2034    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
2035  }
2036
2037  virtual void print(raw_ostream &OS) const;
2038
2039  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
2040    ARMOperand *Op = new ARMOperand(k_ITCondMask);
2041    Op->ITMask.Mask = Mask;
2042    Op->StartLoc = S;
2043    Op->EndLoc = S;
2044    return Op;
2045  }
2046
2047  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
2048    ARMOperand *Op = new ARMOperand(k_CondCode);
2049    Op->CC.Val = CC;
2050    Op->StartLoc = S;
2051    Op->EndLoc = S;
2052    return Op;
2053  }
2054
2055  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2056    ARMOperand *Op = new ARMOperand(k_CoprocNum);
2057    Op->Cop.Val = CopVal;
2058    Op->StartLoc = S;
2059    Op->EndLoc = S;
2060    return Op;
2061  }
2062
2063  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2064    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2065    Op->Cop.Val = CopVal;
2066    Op->StartLoc = S;
2067    Op->EndLoc = S;
2068    return Op;
2069  }
2070
2071  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2072    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2073    Op->Cop.Val = Val;
2074    Op->StartLoc = S;
2075    Op->EndLoc = E;
2076    return Op;
2077  }
2078
2079  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2080    ARMOperand *Op = new ARMOperand(k_CCOut);
2081    Op->Reg.RegNum = RegNum;
2082    Op->StartLoc = S;
2083    Op->EndLoc = S;
2084    return Op;
2085  }
2086
2087  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2088    ARMOperand *Op = new ARMOperand(k_Token);
2089    Op->Tok.Data = Str.data();
2090    Op->Tok.Length = Str.size();
2091    Op->StartLoc = S;
2092    Op->EndLoc = S;
2093    return Op;
2094  }
2095
2096  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2097    ARMOperand *Op = new ARMOperand(k_Register);
2098    Op->Reg.RegNum = RegNum;
2099    Op->StartLoc = S;
2100    Op->EndLoc = E;
2101    return Op;
2102  }
2103
2104  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2105                                           unsigned SrcReg,
2106                                           unsigned ShiftReg,
2107                                           unsigned ShiftImm,
2108                                           SMLoc S, SMLoc E) {
2109    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2110    Op->RegShiftedReg.ShiftTy = ShTy;
2111    Op->RegShiftedReg.SrcReg = SrcReg;
2112    Op->RegShiftedReg.ShiftReg = ShiftReg;
2113    Op->RegShiftedReg.ShiftImm = ShiftImm;
2114    Op->StartLoc = S;
2115    Op->EndLoc = E;
2116    return Op;
2117  }
2118
2119  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2120                                            unsigned SrcReg,
2121                                            unsigned ShiftImm,
2122                                            SMLoc S, SMLoc E) {
2123    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2124    Op->RegShiftedImm.ShiftTy = ShTy;
2125    Op->RegShiftedImm.SrcReg = SrcReg;
2126    Op->RegShiftedImm.ShiftImm = ShiftImm;
2127    Op->StartLoc = S;
2128    Op->EndLoc = E;
2129    return Op;
2130  }
2131
2132  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2133                                   SMLoc S, SMLoc E) {
2134    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2135    Op->ShifterImm.isASR = isASR;
2136    Op->ShifterImm.Imm = Imm;
2137    Op->StartLoc = S;
2138    Op->EndLoc = E;
2139    return Op;
2140  }
2141
2142  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2143    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2144    Op->RotImm.Imm = Imm;
2145    Op->StartLoc = S;
2146    Op->EndLoc = E;
2147    return Op;
2148  }
2149
2150  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2151                                    SMLoc S, SMLoc E) {
2152    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2153    Op->Bitfield.LSB = LSB;
2154    Op->Bitfield.Width = Width;
2155    Op->StartLoc = S;
2156    Op->EndLoc = E;
2157    return Op;
2158  }
2159
2160  static ARMOperand *
2161  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2162                SMLoc StartLoc, SMLoc EndLoc) {
2163    KindTy Kind = k_RegisterList;
2164
2165    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2166      Kind = k_DPRRegisterList;
2167    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2168             contains(Regs.front().first))
2169      Kind = k_SPRRegisterList;
2170
2171    ARMOperand *Op = new ARMOperand(Kind);
2172    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2173           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2174      Op->Registers.push_back(I->first);
2175    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2176    Op->StartLoc = StartLoc;
2177    Op->EndLoc = EndLoc;
2178    return Op;
2179  }
2180
2181  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2182                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2183    ARMOperand *Op = new ARMOperand(k_VectorList);
2184    Op->VectorList.RegNum = RegNum;
2185    Op->VectorList.Count = Count;
2186    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2187    Op->StartLoc = S;
2188    Op->EndLoc = E;
2189    return Op;
2190  }
2191
2192  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2193                                              bool isDoubleSpaced,
2194                                              SMLoc S, SMLoc E) {
2195    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2196    Op->VectorList.RegNum = RegNum;
2197    Op->VectorList.Count = Count;
2198    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2199    Op->StartLoc = S;
2200    Op->EndLoc = E;
2201    return Op;
2202  }
2203
2204  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2205                                             unsigned Index,
2206                                             bool isDoubleSpaced,
2207                                             SMLoc S, SMLoc E) {
2208    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2209    Op->VectorList.RegNum = RegNum;
2210    Op->VectorList.Count = Count;
2211    Op->VectorList.LaneIndex = Index;
2212    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2213    Op->StartLoc = S;
2214    Op->EndLoc = E;
2215    return Op;
2216  }
2217
2218  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2219                                       MCContext &Ctx) {
2220    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2221    Op->VectorIndex.Val = Idx;
2222    Op->StartLoc = S;
2223    Op->EndLoc = E;
2224    return Op;
2225  }
2226
2227  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2228    ARMOperand *Op = new ARMOperand(k_Immediate);
2229    Op->Imm.Val = Val;
2230    Op->StartLoc = S;
2231    Op->EndLoc = E;
2232    return Op;
2233  }
2234
2235  static ARMOperand *CreateMem(unsigned BaseRegNum,
2236                               const MCConstantExpr *OffsetImm,
2237                               unsigned OffsetRegNum,
2238                               ARM_AM::ShiftOpc ShiftType,
2239                               unsigned ShiftImm,
2240                               unsigned Alignment,
2241                               bool isNegative,
2242                               SMLoc S, SMLoc E) {
2243    ARMOperand *Op = new ARMOperand(k_Memory);
2244    Op->Memory.BaseRegNum = BaseRegNum;
2245    Op->Memory.OffsetImm = OffsetImm;
2246    Op->Memory.OffsetRegNum = OffsetRegNum;
2247    Op->Memory.ShiftType = ShiftType;
2248    Op->Memory.ShiftImm = ShiftImm;
2249    Op->Memory.Alignment = Alignment;
2250    Op->Memory.isNegative = isNegative;
2251    Op->StartLoc = S;
2252    Op->EndLoc = E;
2253    return Op;
2254  }
2255
2256  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2257                                      ARM_AM::ShiftOpc ShiftTy,
2258                                      unsigned ShiftImm,
2259                                      SMLoc S, SMLoc E) {
2260    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2261    Op->PostIdxReg.RegNum = RegNum;
2262    Op->PostIdxReg.isAdd = isAdd;
2263    Op->PostIdxReg.ShiftTy = ShiftTy;
2264    Op->PostIdxReg.ShiftImm = ShiftImm;
2265    Op->StartLoc = S;
2266    Op->EndLoc = E;
2267    return Op;
2268  }
2269
2270  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2271    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2272    Op->MBOpt.Val = Opt;
2273    Op->StartLoc = S;
2274    Op->EndLoc = S;
2275    return Op;
2276  }
2277
2278  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2279    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2280    Op->IFlags.Val = IFlags;
2281    Op->StartLoc = S;
2282    Op->EndLoc = S;
2283    return Op;
2284  }
2285
2286  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2287    ARMOperand *Op = new ARMOperand(k_MSRMask);
2288    Op->MMask.Val = MMask;
2289    Op->StartLoc = S;
2290    Op->EndLoc = S;
2291    return Op;
2292  }
2293};
2294
2295} // end anonymous namespace.
2296
2297void ARMOperand::print(raw_ostream &OS) const {
2298  switch (Kind) {
2299  case k_CondCode:
2300    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2301    break;
2302  case k_CCOut:
2303    OS << "<ccout " << getReg() << ">";
2304    break;
2305  case k_ITCondMask: {
2306    static const char *MaskStr[] = {
2307      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2308      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2309    };
2310    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2311    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2312    break;
2313  }
2314  case k_CoprocNum:
2315    OS << "<coprocessor number: " << getCoproc() << ">";
2316    break;
2317  case k_CoprocReg:
2318    OS << "<coprocessor register: " << getCoproc() << ">";
2319    break;
2320  case k_CoprocOption:
2321    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2322    break;
2323  case k_MSRMask:
2324    OS << "<mask: " << getMSRMask() << ">";
2325    break;
2326  case k_Immediate:
2327    getImm()->print(OS);
2328    break;
2329  case k_MemBarrierOpt:
2330    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2331    break;
2332  case k_Memory:
2333    OS << "<memory "
2334       << " base:" << Memory.BaseRegNum;
2335    OS << ">";
2336    break;
2337  case k_PostIndexRegister:
2338    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2339       << PostIdxReg.RegNum;
2340    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2341      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2342         << PostIdxReg.ShiftImm;
2343    OS << ">";
2344    break;
2345  case k_ProcIFlags: {
2346    OS << "<ARM_PROC::";
2347    unsigned IFlags = getProcIFlags();
2348    for (int i=2; i >= 0; --i)
2349      if (IFlags & (1 << i))
2350        OS << ARM_PROC::IFlagsToString(1 << i);
2351    OS << ">";
2352    break;
2353  }
2354  case k_Register:
2355    OS << "<register " << getReg() << ">";
2356    break;
2357  case k_ShifterImmediate:
2358    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2359       << " #" << ShifterImm.Imm << ">";
2360    break;
2361  case k_ShiftedRegister:
2362    OS << "<so_reg_reg "
2363       << RegShiftedReg.SrcReg << " "
2364       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2365       << " " << RegShiftedReg.ShiftReg << ">";
2366    break;
2367  case k_ShiftedImmediate:
2368    OS << "<so_reg_imm "
2369       << RegShiftedImm.SrcReg << " "
2370       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2371       << " #" << RegShiftedImm.ShiftImm << ">";
2372    break;
2373  case k_RotateImmediate:
2374    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2375    break;
2376  case k_BitfieldDescriptor:
2377    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2378       << ", width: " << Bitfield.Width << ">";
2379    break;
2380  case k_RegisterList:
2381  case k_DPRRegisterList:
2382  case k_SPRRegisterList: {
2383    OS << "<register_list ";
2384
2385    const SmallVectorImpl<unsigned> &RegList = getRegList();
2386    for (SmallVectorImpl<unsigned>::const_iterator
2387           I = RegList.begin(), E = RegList.end(); I != E; ) {
2388      OS << *I;
2389      if (++I < E) OS << ", ";
2390    }
2391
2392    OS << ">";
2393    break;
2394  }
2395  case k_VectorList:
2396    OS << "<vector_list " << VectorList.Count << " * "
2397       << VectorList.RegNum << ">";
2398    break;
2399  case k_VectorListAllLanes:
2400    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2401       << VectorList.RegNum << ">";
2402    break;
2403  case k_VectorListIndexed:
2404    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2405       << VectorList.Count << " * " << VectorList.RegNum << ">";
2406    break;
2407  case k_Token:
2408    OS << "'" << getToken() << "'";
2409    break;
2410  case k_VectorIndex:
2411    OS << "<vectorindex " << getVectorIndex() << ">";
2412    break;
2413  }
2414}
2415
2416/// @name Auto-generated Match Functions
2417/// {
2418
2419static unsigned MatchRegisterName(StringRef Name);
2420
2421/// }
2422
2423bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2424                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2425  StartLoc = Parser.getTok().getLoc();
2426  RegNo = tryParseRegister();
2427  EndLoc = Parser.getTok().getLoc();
2428
2429  return (RegNo == (unsigned)-1);
2430}
2431
2432/// Try to parse a register name.  The token must be an Identifier when called,
2433/// and if it is a register name the token is eaten and the register number is
2434/// returned.  Otherwise return -1.
2435///
2436int ARMAsmParser::tryParseRegister() {
2437  const AsmToken &Tok = Parser.getTok();
2438  if (Tok.isNot(AsmToken::Identifier)) return -1;
2439
2440  std::string lowerCase = Tok.getString().lower();
2441  unsigned RegNum = MatchRegisterName(lowerCase);
2442  if (!RegNum) {
2443    RegNum = StringSwitch<unsigned>(lowerCase)
2444      .Case("r13", ARM::SP)
2445      .Case("r14", ARM::LR)
2446      .Case("r15", ARM::PC)
2447      .Case("ip", ARM::R12)
2448      // Additional register name aliases for 'gas' compatibility.
2449      .Case("a1", ARM::R0)
2450      .Case("a2", ARM::R1)
2451      .Case("a3", ARM::R2)
2452      .Case("a4", ARM::R3)
2453      .Case("v1", ARM::R4)
2454      .Case("v2", ARM::R5)
2455      .Case("v3", ARM::R6)
2456      .Case("v4", ARM::R7)
2457      .Case("v5", ARM::R8)
2458      .Case("v6", ARM::R9)
2459      .Case("v7", ARM::R10)
2460      .Case("v8", ARM::R11)
2461      .Case("sb", ARM::R9)
2462      .Case("sl", ARM::R10)
2463      .Case("fp", ARM::R11)
2464      .Default(0);
2465  }
2466  if (!RegNum) {
2467    // Check for aliases registered via .req. Canonicalize to lower case.
2468    // That's more consistent since register names are case insensitive, and
2469    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2470    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2471    // If no match, return failure.
2472    if (Entry == RegisterReqs.end())
2473      return -1;
2474    Parser.Lex(); // Eat identifier token.
2475    return Entry->getValue();
2476  }
2477
2478  Parser.Lex(); // Eat identifier token.
2479
2480  return RegNum;
2481}
2482
2483// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2484// If a recoverable error occurs, return 1. If an irrecoverable error
2485// occurs, return -1. An irrecoverable error is one where tokens have been
2486// consumed in the process of trying to parse the shifter (i.e., when it is
2487// indeed a shifter operand, but malformed).
2488int ARMAsmParser::tryParseShiftRegister(
2489                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2490  SMLoc S = Parser.getTok().getLoc();
2491  const AsmToken &Tok = Parser.getTok();
2492  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2493
2494  std::string lowerCase = Tok.getString().lower();
2495  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2496      .Case("asl", ARM_AM::lsl)
2497      .Case("lsl", ARM_AM::lsl)
2498      .Case("lsr", ARM_AM::lsr)
2499      .Case("asr", ARM_AM::asr)
2500      .Case("ror", ARM_AM::ror)
2501      .Case("rrx", ARM_AM::rrx)
2502      .Default(ARM_AM::no_shift);
2503
2504  if (ShiftTy == ARM_AM::no_shift)
2505    return 1;
2506
2507  Parser.Lex(); // Eat the operator.
2508
2509  // The source register for the shift has already been added to the
2510  // operand list, so we need to pop it off and combine it into the shifted
2511  // register operand instead.
2512  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2513  if (!PrevOp->isReg())
2514    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2515  int SrcReg = PrevOp->getReg();
2516  int64_t Imm = 0;
2517  int ShiftReg = 0;
2518  if (ShiftTy == ARM_AM::rrx) {
2519    // RRX Doesn't have an explicit shift amount. The encoder expects
2520    // the shift register to be the same as the source register. Seems odd,
2521    // but OK.
2522    ShiftReg = SrcReg;
2523  } else {
2524    // Figure out if this is shifted by a constant or a register (for non-RRX).
2525    if (Parser.getTok().is(AsmToken::Hash) ||
2526        Parser.getTok().is(AsmToken::Dollar)) {
2527      Parser.Lex(); // Eat hash.
2528      SMLoc ImmLoc = Parser.getTok().getLoc();
2529      const MCExpr *ShiftExpr = 0;
2530      if (getParser().ParseExpression(ShiftExpr)) {
2531        Error(ImmLoc, "invalid immediate shift value");
2532        return -1;
2533      }
2534      // The expression must be evaluatable as an immediate.
2535      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2536      if (!CE) {
2537        Error(ImmLoc, "invalid immediate shift value");
2538        return -1;
2539      }
2540      // Range check the immediate.
2541      // lsl, ror: 0 <= imm <= 31
2542      // lsr, asr: 0 <= imm <= 32
2543      Imm = CE->getValue();
2544      if (Imm < 0 ||
2545          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2546          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2547        Error(ImmLoc, "immediate shift value out of range");
2548        return -1;
2549      }
2550      // shift by zero is a nop. Always send it through as lsl.
2551      // ('as' compatibility)
2552      if (Imm == 0)
2553        ShiftTy = ARM_AM::lsl;
2554    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2555      ShiftReg = tryParseRegister();
2556      SMLoc L = Parser.getTok().getLoc();
2557      if (ShiftReg == -1) {
2558        Error (L, "expected immediate or register in shift operand");
2559        return -1;
2560      }
2561    } else {
2562      Error (Parser.getTok().getLoc(),
2563                    "expected immediate or register in shift operand");
2564      return -1;
2565    }
2566  }
2567
2568  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2569    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2570                                                         ShiftReg, Imm,
2571                                               S, Parser.getTok().getLoc()));
2572  else
2573    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2574                                               S, Parser.getTok().getLoc()));
2575
2576  return 0;
2577}
2578
2579
2580/// Try to parse a register name.  The token must be an Identifier when called.
2581/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2582/// if there is a "writeback". 'true' if it's not a register.
2583///
2584/// TODO this is likely to change to allow different register types and or to
2585/// parse for a specific register type.
2586bool ARMAsmParser::
2587tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2588  SMLoc S = Parser.getTok().getLoc();
2589  int RegNo = tryParseRegister();
2590  if (RegNo == -1)
2591    return true;
2592
2593  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2594
2595  const AsmToken &ExclaimTok = Parser.getTok();
2596  if (ExclaimTok.is(AsmToken::Exclaim)) {
2597    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2598                                               ExclaimTok.getLoc()));
2599    Parser.Lex(); // Eat exclaim token
2600    return false;
2601  }
2602
2603  // Also check for an index operand. This is only legal for vector registers,
2604  // but that'll get caught OK in operand matching, so we don't need to
2605  // explicitly filter everything else out here.
2606  if (Parser.getTok().is(AsmToken::LBrac)) {
2607    SMLoc SIdx = Parser.getTok().getLoc();
2608    Parser.Lex(); // Eat left bracket token.
2609
2610    const MCExpr *ImmVal;
2611    if (getParser().ParseExpression(ImmVal))
2612      return true;
2613    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2614    if (!MCE)
2615      return TokError("immediate value expected for vector index");
2616
2617    SMLoc E = Parser.getTok().getLoc();
2618    if (Parser.getTok().isNot(AsmToken::RBrac))
2619      return Error(E, "']' expected");
2620
2621    Parser.Lex(); // Eat right bracket token.
2622
2623    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2624                                                     SIdx, E,
2625                                                     getContext()));
2626  }
2627
2628  return false;
2629}
2630
2631/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2632/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2633/// "c5", ...
2634static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2635  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2636  // but efficient.
2637  switch (Name.size()) {
2638  default: return -1;
2639  case 2:
2640    if (Name[0] != CoprocOp)
2641      return -1;
2642    switch (Name[1]) {
2643    default:  return -1;
2644    case '0': return 0;
2645    case '1': return 1;
2646    case '2': return 2;
2647    case '3': return 3;
2648    case '4': return 4;
2649    case '5': return 5;
2650    case '6': return 6;
2651    case '7': return 7;
2652    case '8': return 8;
2653    case '9': return 9;
2654    }
2655  case 3:
2656    if (Name[0] != CoprocOp || Name[1] != '1')
2657      return -1;
2658    switch (Name[2]) {
2659    default:  return -1;
2660    case '0': return 10;
2661    case '1': return 11;
2662    case '2': return 12;
2663    case '3': return 13;
2664    case '4': return 14;
2665    case '5': return 15;
2666    }
2667  }
2668}
2669
2670/// parseITCondCode - Try to parse a condition code for an IT instruction.
2671ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2672parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2673  SMLoc S = Parser.getTok().getLoc();
2674  const AsmToken &Tok = Parser.getTok();
2675  if (!Tok.is(AsmToken::Identifier))
2676    return MatchOperand_NoMatch;
2677  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2678    .Case("eq", ARMCC::EQ)
2679    .Case("ne", ARMCC::NE)
2680    .Case("hs", ARMCC::HS)
2681    .Case("cs", ARMCC::HS)
2682    .Case("lo", ARMCC::LO)
2683    .Case("cc", ARMCC::LO)
2684    .Case("mi", ARMCC::MI)
2685    .Case("pl", ARMCC::PL)
2686    .Case("vs", ARMCC::VS)
2687    .Case("vc", ARMCC::VC)
2688    .Case("hi", ARMCC::HI)
2689    .Case("ls", ARMCC::LS)
2690    .Case("ge", ARMCC::GE)
2691    .Case("lt", ARMCC::LT)
2692    .Case("gt", ARMCC::GT)
2693    .Case("le", ARMCC::LE)
2694    .Case("al", ARMCC::AL)
2695    .Default(~0U);
2696  if (CC == ~0U)
2697    return MatchOperand_NoMatch;
2698  Parser.Lex(); // Eat the token.
2699
2700  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2701
2702  return MatchOperand_Success;
2703}
2704
2705/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2706/// token must be an Identifier when called, and if it is a coprocessor
2707/// number, the token is eaten and the operand is added to the operand list.
2708ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2709parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2710  SMLoc S = Parser.getTok().getLoc();
2711  const AsmToken &Tok = Parser.getTok();
2712  if (Tok.isNot(AsmToken::Identifier))
2713    return MatchOperand_NoMatch;
2714
2715  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2716  if (Num == -1)
2717    return MatchOperand_NoMatch;
2718
2719  Parser.Lex(); // Eat identifier token.
2720  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2721  return MatchOperand_Success;
2722}
2723
2724/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2725/// token must be an Identifier when called, and if it is a coprocessor
2726/// number, the token is eaten and the operand is added to the operand list.
2727ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2728parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2729  SMLoc S = Parser.getTok().getLoc();
2730  const AsmToken &Tok = Parser.getTok();
2731  if (Tok.isNot(AsmToken::Identifier))
2732    return MatchOperand_NoMatch;
2733
2734  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2735  if (Reg == -1)
2736    return MatchOperand_NoMatch;
2737
2738  Parser.Lex(); // Eat identifier token.
2739  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2740  return MatchOperand_Success;
2741}
2742
2743/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2744/// coproc_option : '{' imm0_255 '}'
2745ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2746parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2747  SMLoc S = Parser.getTok().getLoc();
2748
2749  // If this isn't a '{', this isn't a coprocessor immediate operand.
2750  if (Parser.getTok().isNot(AsmToken::LCurly))
2751    return MatchOperand_NoMatch;
2752  Parser.Lex(); // Eat the '{'
2753
2754  const MCExpr *Expr;
2755  SMLoc Loc = Parser.getTok().getLoc();
2756  if (getParser().ParseExpression(Expr)) {
2757    Error(Loc, "illegal expression");
2758    return MatchOperand_ParseFail;
2759  }
2760  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2761  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2762    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2763    return MatchOperand_ParseFail;
2764  }
2765  int Val = CE->getValue();
2766
2767  // Check for and consume the closing '}'
2768  if (Parser.getTok().isNot(AsmToken::RCurly))
2769    return MatchOperand_ParseFail;
2770  SMLoc E = Parser.getTok().getLoc();
2771  Parser.Lex(); // Eat the '}'
2772
2773  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2774  return MatchOperand_Success;
2775}
2776
2777// For register list parsing, we need to map from raw GPR register numbering
2778// to the enumeration values. The enumeration values aren't sorted by
2779// register number due to our using "sp", "lr" and "pc" as canonical names.
2780static unsigned getNextRegister(unsigned Reg) {
2781  // If this is a GPR, we need to do it manually, otherwise we can rely
2782  // on the sort ordering of the enumeration since the other reg-classes
2783  // are sane.
2784  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2785    return Reg + 1;
2786  switch(Reg) {
2787  default: llvm_unreachable("Invalid GPR number!");
2788  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2789  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2790  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2791  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2792  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2793  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2794  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2795  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2796  }
2797}
2798
2799// Return the low-subreg of a given Q register.
2800static unsigned getDRegFromQReg(unsigned QReg) {
2801  switch (QReg) {
2802  default: llvm_unreachable("expected a Q register!");
2803  case ARM::Q0:  return ARM::D0;
2804  case ARM::Q1:  return ARM::D2;
2805  case ARM::Q2:  return ARM::D4;
2806  case ARM::Q3:  return ARM::D6;
2807  case ARM::Q4:  return ARM::D8;
2808  case ARM::Q5:  return ARM::D10;
2809  case ARM::Q6:  return ARM::D12;
2810  case ARM::Q7:  return ARM::D14;
2811  case ARM::Q8:  return ARM::D16;
2812  case ARM::Q9:  return ARM::D18;
2813  case ARM::Q10: return ARM::D20;
2814  case ARM::Q11: return ARM::D22;
2815  case ARM::Q12: return ARM::D24;
2816  case ARM::Q13: return ARM::D26;
2817  case ARM::Q14: return ARM::D28;
2818  case ARM::Q15: return ARM::D30;
2819  }
2820}
2821
2822/// Parse a register list.
2823bool ARMAsmParser::
2824parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2825  assert(Parser.getTok().is(AsmToken::LCurly) &&
2826         "Token is not a Left Curly Brace");
2827  SMLoc S = Parser.getTok().getLoc();
2828  Parser.Lex(); // Eat '{' token.
2829  SMLoc RegLoc = Parser.getTok().getLoc();
2830
2831  // Check the first register in the list to see what register class
2832  // this is a list of.
2833  int Reg = tryParseRegister();
2834  if (Reg == -1)
2835    return Error(RegLoc, "register expected");
2836
2837  // The reglist instructions have at most 16 registers, so reserve
2838  // space for that many.
2839  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2840
2841  // Allow Q regs and just interpret them as the two D sub-registers.
2842  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2843    Reg = getDRegFromQReg(Reg);
2844    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2845    ++Reg;
2846  }
2847  const MCRegisterClass *RC;
2848  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2849    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2850  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2851    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2852  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2853    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2854  else
2855    return Error(RegLoc, "invalid register in register list");
2856
2857  // Store the register.
2858  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2859
2860  // This starts immediately after the first register token in the list,
2861  // so we can see either a comma or a minus (range separator) as a legal
2862  // next token.
2863  while (Parser.getTok().is(AsmToken::Comma) ||
2864         Parser.getTok().is(AsmToken::Minus)) {
2865    if (Parser.getTok().is(AsmToken::Minus)) {
2866      Parser.Lex(); // Eat the minus.
2867      SMLoc EndLoc = Parser.getTok().getLoc();
2868      int EndReg = tryParseRegister();
2869      if (EndReg == -1)
2870        return Error(EndLoc, "register expected");
2871      // Allow Q regs and just interpret them as the two D sub-registers.
2872      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2873        EndReg = getDRegFromQReg(EndReg) + 1;
2874      // If the register is the same as the start reg, there's nothing
2875      // more to do.
2876      if (Reg == EndReg)
2877        continue;
2878      // The register must be in the same register class as the first.
2879      if (!RC->contains(EndReg))
2880        return Error(EndLoc, "invalid register in register list");
2881      // Ranges must go from low to high.
2882      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2883        return Error(EndLoc, "bad range in register list");
2884
2885      // Add all the registers in the range to the register list.
2886      while (Reg != EndReg) {
2887        Reg = getNextRegister(Reg);
2888        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2889      }
2890      continue;
2891    }
2892    Parser.Lex(); // Eat the comma.
2893    RegLoc = Parser.getTok().getLoc();
2894    int OldReg = Reg;
2895    const AsmToken RegTok = Parser.getTok();
2896    Reg = tryParseRegister();
2897    if (Reg == -1)
2898      return Error(RegLoc, "register expected");
2899    // Allow Q regs and just interpret them as the two D sub-registers.
2900    bool isQReg = false;
2901    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2902      Reg = getDRegFromQReg(Reg);
2903      isQReg = true;
2904    }
2905    // The register must be in the same register class as the first.
2906    if (!RC->contains(Reg))
2907      return Error(RegLoc, "invalid register in register list");
2908    // List must be monotonically increasing.
2909    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) {
2910      if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2911        Warning(RegLoc, "register list not in ascending order");
2912      else
2913        return Error(RegLoc, "register list not in ascending order");
2914    }
2915    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2916      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2917              ") in register list");
2918      continue;
2919    }
2920    // VFP register lists must also be contiguous.
2921    // It's OK to use the enumeration values directly here rather, as the
2922    // VFP register classes have the enum sorted properly.
2923    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2924        Reg != OldReg + 1)
2925      return Error(RegLoc, "non-contiguous register range");
2926    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2927    if (isQReg)
2928      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2929  }
2930
2931  SMLoc E = Parser.getTok().getLoc();
2932  if (Parser.getTok().isNot(AsmToken::RCurly))
2933    return Error(E, "'}' expected");
2934  Parser.Lex(); // Eat '}' token.
2935
2936  // Push the register list operand.
2937  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2938
2939  // The ARM system instruction variants for LDM/STM have a '^' token here.
2940  if (Parser.getTok().is(AsmToken::Caret)) {
2941    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2942    Parser.Lex(); // Eat '^' token.
2943  }
2944
2945  return false;
2946}
2947
2948// Helper function to parse the lane index for vector lists.
2949ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2950parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2951  Index = 0; // Always return a defined index value.
2952  if (Parser.getTok().is(AsmToken::LBrac)) {
2953    Parser.Lex(); // Eat the '['.
2954    if (Parser.getTok().is(AsmToken::RBrac)) {
2955      // "Dn[]" is the 'all lanes' syntax.
2956      LaneKind = AllLanes;
2957      Parser.Lex(); // Eat the ']'.
2958      return MatchOperand_Success;
2959    }
2960
2961    // There's an optional '#' token here. Normally there wouldn't be, but
2962    // inline assemble puts one in, and it's friendly to accept that.
2963    if (Parser.getTok().is(AsmToken::Hash))
2964      Parser.Lex(); // Eat the '#'
2965
2966    const MCExpr *LaneIndex;
2967    SMLoc Loc = Parser.getTok().getLoc();
2968    if (getParser().ParseExpression(LaneIndex)) {
2969      Error(Loc, "illegal expression");
2970      return MatchOperand_ParseFail;
2971    }
2972    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2973    if (!CE) {
2974      Error(Loc, "lane index must be empty or an integer");
2975      return MatchOperand_ParseFail;
2976    }
2977    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2978      Error(Parser.getTok().getLoc(), "']' expected");
2979      return MatchOperand_ParseFail;
2980    }
2981    Parser.Lex(); // Eat the ']'.
2982    int64_t Val = CE->getValue();
2983
2984    // FIXME: Make this range check context sensitive for .8, .16, .32.
2985    if (Val < 0 || Val > 7) {
2986      Error(Parser.getTok().getLoc(), "lane index out of range");
2987      return MatchOperand_ParseFail;
2988    }
2989    Index = Val;
2990    LaneKind = IndexedLane;
2991    return MatchOperand_Success;
2992  }
2993  LaneKind = NoLanes;
2994  return MatchOperand_Success;
2995}
2996
2997// parse a vector register list
2998ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2999parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3000  VectorLaneTy LaneKind;
3001  unsigned LaneIndex;
3002  SMLoc S = Parser.getTok().getLoc();
3003  // As an extension (to match gas), support a plain D register or Q register
3004  // (without encosing curly braces) as a single or double entry list,
3005  // respectively.
3006  if (Parser.getTok().is(AsmToken::Identifier)) {
3007    int Reg = tryParseRegister();
3008    if (Reg == -1)
3009      return MatchOperand_NoMatch;
3010    SMLoc E = Parser.getTok().getLoc();
3011    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3012      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
3013      if (Res != MatchOperand_Success)
3014        return Res;
3015      switch (LaneKind) {
3016      case NoLanes:
3017        E = Parser.getTok().getLoc();
3018        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3019        break;
3020      case AllLanes:
3021        E = Parser.getTok().getLoc();
3022        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3023                                                                S, E));
3024        break;
3025      case IndexedLane:
3026        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3027                                                               LaneIndex,
3028                                                               false, S, E));
3029        break;
3030      }
3031      return MatchOperand_Success;
3032    }
3033    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3034      Reg = getDRegFromQReg(Reg);
3035      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
3036      if (Res != MatchOperand_Success)
3037        return Res;
3038      switch (LaneKind) {
3039      case NoLanes:
3040        E = Parser.getTok().getLoc();
3041        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3042                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3043        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3044        break;
3045      case AllLanes:
3046        E = Parser.getTok().getLoc();
3047        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3048                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3049        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3050                                                                S, E));
3051        break;
3052      case IndexedLane:
3053        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3054                                                               LaneIndex,
3055                                                               false, S, E));
3056        break;
3057      }
3058      return MatchOperand_Success;
3059    }
3060    Error(S, "vector register expected");
3061    return MatchOperand_ParseFail;
3062  }
3063
3064  if (Parser.getTok().isNot(AsmToken::LCurly))
3065    return MatchOperand_NoMatch;
3066
3067  Parser.Lex(); // Eat '{' token.
3068  SMLoc RegLoc = Parser.getTok().getLoc();
3069
3070  int Reg = tryParseRegister();
3071  if (Reg == -1) {
3072    Error(RegLoc, "register expected");
3073    return MatchOperand_ParseFail;
3074  }
3075  unsigned Count = 1;
3076  int Spacing = 0;
3077  unsigned FirstReg = Reg;
3078  // The list is of D registers, but we also allow Q regs and just interpret
3079  // them as the two D sub-registers.
3080  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3081    FirstReg = Reg = getDRegFromQReg(Reg);
3082    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3083                 // it's ambiguous with four-register single spaced.
3084    ++Reg;
3085    ++Count;
3086  }
3087  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
3088    return MatchOperand_ParseFail;
3089
3090  while (Parser.getTok().is(AsmToken::Comma) ||
3091         Parser.getTok().is(AsmToken::Minus)) {
3092    if (Parser.getTok().is(AsmToken::Minus)) {
3093      if (!Spacing)
3094        Spacing = 1; // Register range implies a single spaced list.
3095      else if (Spacing == 2) {
3096        Error(Parser.getTok().getLoc(),
3097              "sequential registers in double spaced list");
3098        return MatchOperand_ParseFail;
3099      }
3100      Parser.Lex(); // Eat the minus.
3101      SMLoc EndLoc = Parser.getTok().getLoc();
3102      int EndReg = tryParseRegister();
3103      if (EndReg == -1) {
3104        Error(EndLoc, "register expected");
3105        return MatchOperand_ParseFail;
3106      }
3107      // Allow Q regs and just interpret them as the two D sub-registers.
3108      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3109        EndReg = getDRegFromQReg(EndReg) + 1;
3110      // If the register is the same as the start reg, there's nothing
3111      // more to do.
3112      if (Reg == EndReg)
3113        continue;
3114      // The register must be in the same register class as the first.
3115      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3116        Error(EndLoc, "invalid register in register list");
3117        return MatchOperand_ParseFail;
3118      }
3119      // Ranges must go from low to high.
3120      if (Reg > EndReg) {
3121        Error(EndLoc, "bad range in register list");
3122        return MatchOperand_ParseFail;
3123      }
3124      // Parse the lane specifier if present.
3125      VectorLaneTy NextLaneKind;
3126      unsigned NextLaneIndex;
3127      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3128        return MatchOperand_ParseFail;
3129      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3130        Error(EndLoc, "mismatched lane index in register list");
3131        return MatchOperand_ParseFail;
3132      }
3133      EndLoc = Parser.getTok().getLoc();
3134
3135      // Add all the registers in the range to the register list.
3136      Count += EndReg - Reg;
3137      Reg = EndReg;
3138      continue;
3139    }
3140    Parser.Lex(); // Eat the comma.
3141    RegLoc = Parser.getTok().getLoc();
3142    int OldReg = Reg;
3143    Reg = tryParseRegister();
3144    if (Reg == -1) {
3145      Error(RegLoc, "register expected");
3146      return MatchOperand_ParseFail;
3147    }
3148    // vector register lists must be contiguous.
3149    // It's OK to use the enumeration values directly here rather, as the
3150    // VFP register classes have the enum sorted properly.
3151    //
3152    // The list is of D registers, but we also allow Q regs and just interpret
3153    // them as the two D sub-registers.
3154    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3155      if (!Spacing)
3156        Spacing = 1; // Register range implies a single spaced list.
3157      else if (Spacing == 2) {
3158        Error(RegLoc,
3159              "invalid register in double-spaced list (must be 'D' register')");
3160        return MatchOperand_ParseFail;
3161      }
3162      Reg = getDRegFromQReg(Reg);
3163      if (Reg != OldReg + 1) {
3164        Error(RegLoc, "non-contiguous register range");
3165        return MatchOperand_ParseFail;
3166      }
3167      ++Reg;
3168      Count += 2;
3169      // Parse the lane specifier if present.
3170      VectorLaneTy NextLaneKind;
3171      unsigned NextLaneIndex;
3172      SMLoc EndLoc = Parser.getTok().getLoc();
3173      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3174        return MatchOperand_ParseFail;
3175      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3176        Error(EndLoc, "mismatched lane index in register list");
3177        return MatchOperand_ParseFail;
3178      }
3179      continue;
3180    }
3181    // Normal D register.
3182    // Figure out the register spacing (single or double) of the list if
3183    // we don't know it already.
3184    if (!Spacing)
3185      Spacing = 1 + (Reg == OldReg + 2);
3186
3187    // Just check that it's contiguous and keep going.
3188    if (Reg != OldReg + Spacing) {
3189      Error(RegLoc, "non-contiguous register range");
3190      return MatchOperand_ParseFail;
3191    }
3192    ++Count;
3193    // Parse the lane specifier if present.
3194    VectorLaneTy NextLaneKind;
3195    unsigned NextLaneIndex;
3196    SMLoc EndLoc = Parser.getTok().getLoc();
3197    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3198      return MatchOperand_ParseFail;
3199    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3200      Error(EndLoc, "mismatched lane index in register list");
3201      return MatchOperand_ParseFail;
3202    }
3203  }
3204
3205  SMLoc E = Parser.getTok().getLoc();
3206  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3207    Error(E, "'}' expected");
3208    return MatchOperand_ParseFail;
3209  }
3210  Parser.Lex(); // Eat '}' token.
3211
3212  switch (LaneKind) {
3213  case NoLanes:
3214    // Two-register operands have been converted to the
3215    // composite register classes.
3216    if (Count == 2) {
3217      const MCRegisterClass *RC = (Spacing == 1) ?
3218        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3219        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3220      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3221    }
3222
3223    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3224                                                    (Spacing == 2), S, E));
3225    break;
3226  case AllLanes:
3227    // Two-register operands have been converted to the
3228    // composite register classes.
3229    if (Count == 2) {
3230      const MCRegisterClass *RC = (Spacing == 1) ?
3231        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3232        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3233      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3234    }
3235    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3236                                                            (Spacing == 2),
3237                                                            S, E));
3238    break;
3239  case IndexedLane:
3240    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3241                                                           LaneIndex,
3242                                                           (Spacing == 2),
3243                                                           S, E));
3244    break;
3245  }
3246  return MatchOperand_Success;
3247}
3248
3249/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3250ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3251parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3252  SMLoc S = Parser.getTok().getLoc();
3253  const AsmToken &Tok = Parser.getTok();
3254  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3255  StringRef OptStr = Tok.getString();
3256
3257  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3258    .Case("sy",    ARM_MB::SY)
3259    .Case("st",    ARM_MB::ST)
3260    .Case("sh",    ARM_MB::ISH)
3261    .Case("ish",   ARM_MB::ISH)
3262    .Case("shst",  ARM_MB::ISHST)
3263    .Case("ishst", ARM_MB::ISHST)
3264    .Case("nsh",   ARM_MB::NSH)
3265    .Case("un",    ARM_MB::NSH)
3266    .Case("nshst", ARM_MB::NSHST)
3267    .Case("unst",  ARM_MB::NSHST)
3268    .Case("osh",   ARM_MB::OSH)
3269    .Case("oshst", ARM_MB::OSHST)
3270    .Default(~0U);
3271
3272  if (Opt == ~0U)
3273    return MatchOperand_NoMatch;
3274
3275  Parser.Lex(); // Eat identifier token.
3276  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3277  return MatchOperand_Success;
3278}
3279
3280/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3281ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3282parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3283  SMLoc S = Parser.getTok().getLoc();
3284  const AsmToken &Tok = Parser.getTok();
3285  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3286  StringRef IFlagsStr = Tok.getString();
3287
3288  // An iflags string of "none" is interpreted to mean that none of the AIF
3289  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3290  unsigned IFlags = 0;
3291  if (IFlagsStr != "none") {
3292        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3293      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3294        .Case("a", ARM_PROC::A)
3295        .Case("i", ARM_PROC::I)
3296        .Case("f", ARM_PROC::F)
3297        .Default(~0U);
3298
3299      // If some specific iflag is already set, it means that some letter is
3300      // present more than once, this is not acceptable.
3301      if (Flag == ~0U || (IFlags & Flag))
3302        return MatchOperand_NoMatch;
3303
3304      IFlags |= Flag;
3305    }
3306  }
3307
3308  Parser.Lex(); // Eat identifier token.
3309  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3310  return MatchOperand_Success;
3311}
3312
3313/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3314ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3315parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3316  SMLoc S = Parser.getTok().getLoc();
3317  const AsmToken &Tok = Parser.getTok();
3318  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3319  StringRef Mask = Tok.getString();
3320
3321  if (isMClass()) {
3322    // See ARMv6-M 10.1.1
3323    std::string Name = Mask.lower();
3324    unsigned FlagsVal = StringSwitch<unsigned>(Name)
3325      .Case("apsr", 0)
3326      .Case("iapsr", 1)
3327      .Case("eapsr", 2)
3328      .Case("xpsr", 3)
3329      .Case("ipsr", 5)
3330      .Case("epsr", 6)
3331      .Case("iepsr", 7)
3332      .Case("msp", 8)
3333      .Case("psp", 9)
3334      .Case("primask", 16)
3335      .Case("basepri", 17)
3336      .Case("basepri_max", 18)
3337      .Case("faultmask", 19)
3338      .Case("control", 20)
3339      .Default(~0U);
3340
3341    if (FlagsVal == ~0U)
3342      return MatchOperand_NoMatch;
3343
3344    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3345      // basepri, basepri_max and faultmask only valid for V7m.
3346      return MatchOperand_NoMatch;
3347
3348    Parser.Lex(); // Eat identifier token.
3349    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3350    return MatchOperand_Success;
3351  }
3352
3353  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3354  size_t Start = 0, Next = Mask.find('_');
3355  StringRef Flags = "";
3356  std::string SpecReg = Mask.slice(Start, Next).lower();
3357  if (Next != StringRef::npos)
3358    Flags = Mask.slice(Next+1, Mask.size());
3359
3360  // FlagsVal contains the complete mask:
3361  // 3-0: Mask
3362  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3363  unsigned FlagsVal = 0;
3364
3365  if (SpecReg == "apsr") {
3366    FlagsVal = StringSwitch<unsigned>(Flags)
3367    .Case("nzcvq",  0x8) // same as CPSR_f
3368    .Case("g",      0x4) // same as CPSR_s
3369    .Case("nzcvqg", 0xc) // same as CPSR_fs
3370    .Default(~0U);
3371
3372    if (FlagsVal == ~0U) {
3373      if (!Flags.empty())
3374        return MatchOperand_NoMatch;
3375      else
3376        FlagsVal = 8; // No flag
3377    }
3378  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3379    // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
3380    if (Flags == "all" || Flags == "")
3381      Flags = "fc";
3382    for (int i = 0, e = Flags.size(); i != e; ++i) {
3383      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3384      .Case("c", 1)
3385      .Case("x", 2)
3386      .Case("s", 4)
3387      .Case("f", 8)
3388      .Default(~0U);
3389
3390      // If some specific flag is already set, it means that some letter is
3391      // present more than once, this is not acceptable.
3392      if (FlagsVal == ~0U || (FlagsVal & Flag))
3393        return MatchOperand_NoMatch;
3394      FlagsVal |= Flag;
3395    }
3396  } else // No match for special register.
3397    return MatchOperand_NoMatch;
3398
3399  // Special register without flags is NOT equivalent to "fc" flags.
3400  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3401  // two lines would enable gas compatibility at the expense of breaking
3402  // round-tripping.
3403  //
3404  // if (!FlagsVal)
3405  //  FlagsVal = 0x9;
3406
3407  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3408  if (SpecReg == "spsr")
3409    FlagsVal |= 16;
3410
3411  Parser.Lex(); // Eat identifier token.
3412  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3413  return MatchOperand_Success;
3414}
3415
3416ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3417parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3418            int Low, int High) {
3419  const AsmToken &Tok = Parser.getTok();
3420  if (Tok.isNot(AsmToken::Identifier)) {
3421    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3422    return MatchOperand_ParseFail;
3423  }
3424  StringRef ShiftName = Tok.getString();
3425  std::string LowerOp = Op.lower();
3426  std::string UpperOp = Op.upper();
3427  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3428    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3429    return MatchOperand_ParseFail;
3430  }
3431  Parser.Lex(); // Eat shift type token.
3432
3433  // There must be a '#' and a shift amount.
3434  if (Parser.getTok().isNot(AsmToken::Hash) &&
3435      Parser.getTok().isNot(AsmToken::Dollar)) {
3436    Error(Parser.getTok().getLoc(), "'#' expected");
3437    return MatchOperand_ParseFail;
3438  }
3439  Parser.Lex(); // Eat hash token.
3440
3441  const MCExpr *ShiftAmount;
3442  SMLoc Loc = Parser.getTok().getLoc();
3443  if (getParser().ParseExpression(ShiftAmount)) {
3444    Error(Loc, "illegal expression");
3445    return MatchOperand_ParseFail;
3446  }
3447  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3448  if (!CE) {
3449    Error(Loc, "constant expression expected");
3450    return MatchOperand_ParseFail;
3451  }
3452  int Val = CE->getValue();
3453  if (Val < Low || Val > High) {
3454    Error(Loc, "immediate value out of range");
3455    return MatchOperand_ParseFail;
3456  }
3457
3458  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3459
3460  return MatchOperand_Success;
3461}
3462
3463ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3464parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3465  const AsmToken &Tok = Parser.getTok();
3466  SMLoc S = Tok.getLoc();
3467  if (Tok.isNot(AsmToken::Identifier)) {
3468    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3469    return MatchOperand_ParseFail;
3470  }
3471  int Val = StringSwitch<int>(Tok.getString())
3472    .Case("be", 1)
3473    .Case("le", 0)
3474    .Default(-1);
3475  Parser.Lex(); // Eat the token.
3476
3477  if (Val == -1) {
3478    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3479    return MatchOperand_ParseFail;
3480  }
3481  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3482                                                                  getContext()),
3483                                           S, Parser.getTok().getLoc()));
3484  return MatchOperand_Success;
3485}
3486
3487/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3488/// instructions. Legal values are:
3489///     lsl #n  'n' in [0,31]
3490///     asr #n  'n' in [1,32]
3491///             n == 32 encoded as n == 0.
3492ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3493parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3494  const AsmToken &Tok = Parser.getTok();
3495  SMLoc S = Tok.getLoc();
3496  if (Tok.isNot(AsmToken::Identifier)) {
3497    Error(S, "shift operator 'asr' or 'lsl' expected");
3498    return MatchOperand_ParseFail;
3499  }
3500  StringRef ShiftName = Tok.getString();
3501  bool isASR;
3502  if (ShiftName == "lsl" || ShiftName == "LSL")
3503    isASR = false;
3504  else if (ShiftName == "asr" || ShiftName == "ASR")
3505    isASR = true;
3506  else {
3507    Error(S, "shift operator 'asr' or 'lsl' expected");
3508    return MatchOperand_ParseFail;
3509  }
3510  Parser.Lex(); // Eat the operator.
3511
3512  // A '#' and a shift amount.
3513  if (Parser.getTok().isNot(AsmToken::Hash) &&
3514      Parser.getTok().isNot(AsmToken::Dollar)) {
3515    Error(Parser.getTok().getLoc(), "'#' expected");
3516    return MatchOperand_ParseFail;
3517  }
3518  Parser.Lex(); // Eat hash token.
3519
3520  const MCExpr *ShiftAmount;
3521  SMLoc E = Parser.getTok().getLoc();
3522  if (getParser().ParseExpression(ShiftAmount)) {
3523    Error(E, "malformed shift expression");
3524    return MatchOperand_ParseFail;
3525  }
3526  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3527  if (!CE) {
3528    Error(E, "shift amount must be an immediate");
3529    return MatchOperand_ParseFail;
3530  }
3531
3532  int64_t Val = CE->getValue();
3533  if (isASR) {
3534    // Shift amount must be in [1,32]
3535    if (Val < 1 || Val > 32) {
3536      Error(E, "'asr' shift amount must be in range [1,32]");
3537      return MatchOperand_ParseFail;
3538    }
3539    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3540    if (isThumb() && Val == 32) {
3541      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3542      return MatchOperand_ParseFail;
3543    }
3544    if (Val == 32) Val = 0;
3545  } else {
3546    // Shift amount must be in [1,32]
3547    if (Val < 0 || Val > 31) {
3548      Error(E, "'lsr' shift amount must be in range [0,31]");
3549      return MatchOperand_ParseFail;
3550    }
3551  }
3552
3553  E = Parser.getTok().getLoc();
3554  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3555
3556  return MatchOperand_Success;
3557}
3558
3559/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3560/// of instructions. Legal values are:
3561///     ror #n  'n' in {0, 8, 16, 24}
3562ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3563parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3564  const AsmToken &Tok = Parser.getTok();
3565  SMLoc S = Tok.getLoc();
3566  if (Tok.isNot(AsmToken::Identifier))
3567    return MatchOperand_NoMatch;
3568  StringRef ShiftName = Tok.getString();
3569  if (ShiftName != "ror" && ShiftName != "ROR")
3570    return MatchOperand_NoMatch;
3571  Parser.Lex(); // Eat the operator.
3572
3573  // A '#' and a rotate amount.
3574  if (Parser.getTok().isNot(AsmToken::Hash) &&
3575      Parser.getTok().isNot(AsmToken::Dollar)) {
3576    Error(Parser.getTok().getLoc(), "'#' expected");
3577    return MatchOperand_ParseFail;
3578  }
3579  Parser.Lex(); // Eat hash token.
3580
3581  const MCExpr *ShiftAmount;
3582  SMLoc E = Parser.getTok().getLoc();
3583  if (getParser().ParseExpression(ShiftAmount)) {
3584    Error(E, "malformed rotate expression");
3585    return MatchOperand_ParseFail;
3586  }
3587  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3588  if (!CE) {
3589    Error(E, "rotate amount must be an immediate");
3590    return MatchOperand_ParseFail;
3591  }
3592
3593  int64_t Val = CE->getValue();
3594  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3595  // normally, zero is represented in asm by omitting the rotate operand
3596  // entirely.
3597  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3598    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3599    return MatchOperand_ParseFail;
3600  }
3601
3602  E = Parser.getTok().getLoc();
3603  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3604
3605  return MatchOperand_Success;
3606}
3607
3608ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3609parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3610  SMLoc S = Parser.getTok().getLoc();
3611  // The bitfield descriptor is really two operands, the LSB and the width.
3612  if (Parser.getTok().isNot(AsmToken::Hash) &&
3613      Parser.getTok().isNot(AsmToken::Dollar)) {
3614    Error(Parser.getTok().getLoc(), "'#' expected");
3615    return MatchOperand_ParseFail;
3616  }
3617  Parser.Lex(); // Eat hash token.
3618
3619  const MCExpr *LSBExpr;
3620  SMLoc E = Parser.getTok().getLoc();
3621  if (getParser().ParseExpression(LSBExpr)) {
3622    Error(E, "malformed immediate expression");
3623    return MatchOperand_ParseFail;
3624  }
3625  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3626  if (!CE) {
3627    Error(E, "'lsb' operand must be an immediate");
3628    return MatchOperand_ParseFail;
3629  }
3630
3631  int64_t LSB = CE->getValue();
3632  // The LSB must be in the range [0,31]
3633  if (LSB < 0 || LSB > 31) {
3634    Error(E, "'lsb' operand must be in the range [0,31]");
3635    return MatchOperand_ParseFail;
3636  }
3637  E = Parser.getTok().getLoc();
3638
3639  // Expect another immediate operand.
3640  if (Parser.getTok().isNot(AsmToken::Comma)) {
3641    Error(Parser.getTok().getLoc(), "too few operands");
3642    return MatchOperand_ParseFail;
3643  }
3644  Parser.Lex(); // Eat hash token.
3645  if (Parser.getTok().isNot(AsmToken::Hash) &&
3646      Parser.getTok().isNot(AsmToken::Dollar)) {
3647    Error(Parser.getTok().getLoc(), "'#' expected");
3648    return MatchOperand_ParseFail;
3649  }
3650  Parser.Lex(); // Eat hash token.
3651
3652  const MCExpr *WidthExpr;
3653  if (getParser().ParseExpression(WidthExpr)) {
3654    Error(E, "malformed immediate expression");
3655    return MatchOperand_ParseFail;
3656  }
3657  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3658  if (!CE) {
3659    Error(E, "'width' operand must be an immediate");
3660    return MatchOperand_ParseFail;
3661  }
3662
3663  int64_t Width = CE->getValue();
3664  // The LSB must be in the range [1,32-lsb]
3665  if (Width < 1 || Width > 32 - LSB) {
3666    Error(E, "'width' operand must be in the range [1,32-lsb]");
3667    return MatchOperand_ParseFail;
3668  }
3669  E = Parser.getTok().getLoc();
3670
3671  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3672
3673  return MatchOperand_Success;
3674}
3675
3676ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3677parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3678  // Check for a post-index addressing register operand. Specifically:
3679  // postidx_reg := '+' register {, shift}
3680  //              | '-' register {, shift}
3681  //              | register {, shift}
3682
3683  // This method must return MatchOperand_NoMatch without consuming any tokens
3684  // in the case where there is no match, as other alternatives take other
3685  // parse methods.
3686  AsmToken Tok = Parser.getTok();
3687  SMLoc S = Tok.getLoc();
3688  bool haveEaten = false;
3689  bool isAdd = true;
3690  int Reg = -1;
3691  if (Tok.is(AsmToken::Plus)) {
3692    Parser.Lex(); // Eat the '+' token.
3693    haveEaten = true;
3694  } else if (Tok.is(AsmToken::Minus)) {
3695    Parser.Lex(); // Eat the '-' token.
3696    isAdd = false;
3697    haveEaten = true;
3698  }
3699  if (Parser.getTok().is(AsmToken::Identifier))
3700    Reg = tryParseRegister();
3701  if (Reg == -1) {
3702    if (!haveEaten)
3703      return MatchOperand_NoMatch;
3704    Error(Parser.getTok().getLoc(), "register expected");
3705    return MatchOperand_ParseFail;
3706  }
3707  SMLoc E = Parser.getTok().getLoc();
3708
3709  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3710  unsigned ShiftImm = 0;
3711  if (Parser.getTok().is(AsmToken::Comma)) {
3712    Parser.Lex(); // Eat the ','.
3713    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3714      return MatchOperand_ParseFail;
3715  }
3716
3717  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3718                                                  ShiftImm, S, E));
3719
3720  return MatchOperand_Success;
3721}
3722
3723ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3724parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3725  // Check for a post-index addressing register operand. Specifically:
3726  // am3offset := '+' register
3727  //              | '-' register
3728  //              | register
3729  //              | # imm
3730  //              | # + imm
3731  //              | # - imm
3732
3733  // This method must return MatchOperand_NoMatch without consuming any tokens
3734  // in the case where there is no match, as other alternatives take other
3735  // parse methods.
3736  AsmToken Tok = Parser.getTok();
3737  SMLoc S = Tok.getLoc();
3738
3739  // Do immediates first, as we always parse those if we have a '#'.
3740  if (Parser.getTok().is(AsmToken::Hash) ||
3741      Parser.getTok().is(AsmToken::Dollar)) {
3742    Parser.Lex(); // Eat the '#'.
3743    // Explicitly look for a '-', as we need to encode negative zero
3744    // differently.
3745    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3746    const MCExpr *Offset;
3747    if (getParser().ParseExpression(Offset))
3748      return MatchOperand_ParseFail;
3749    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3750    if (!CE) {
3751      Error(S, "constant expression expected");
3752      return MatchOperand_ParseFail;
3753    }
3754    SMLoc E = Tok.getLoc();
3755    // Negative zero is encoded as the flag value INT32_MIN.
3756    int32_t Val = CE->getValue();
3757    if (isNegative && Val == 0)
3758      Val = INT32_MIN;
3759
3760    Operands.push_back(
3761      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3762
3763    return MatchOperand_Success;
3764  }
3765
3766
3767  bool haveEaten = false;
3768  bool isAdd = true;
3769  int Reg = -1;
3770  if (Tok.is(AsmToken::Plus)) {
3771    Parser.Lex(); // Eat the '+' token.
3772    haveEaten = true;
3773  } else if (Tok.is(AsmToken::Minus)) {
3774    Parser.Lex(); // Eat the '-' token.
3775    isAdd = false;
3776    haveEaten = true;
3777  }
3778  if (Parser.getTok().is(AsmToken::Identifier))
3779    Reg = tryParseRegister();
3780  if (Reg == -1) {
3781    if (!haveEaten)
3782      return MatchOperand_NoMatch;
3783    Error(Parser.getTok().getLoc(), "register expected");
3784    return MatchOperand_ParseFail;
3785  }
3786  SMLoc E = Parser.getTok().getLoc();
3787
3788  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3789                                                  0, S, E));
3790
3791  return MatchOperand_Success;
3792}
3793
3794/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3795/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3796/// when they refer multiple MIOperands inside a single one.
3797bool ARMAsmParser::
3798cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3799             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3800  // Rt, Rt2
3801  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3802  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3803  // Create a writeback register dummy placeholder.
3804  Inst.addOperand(MCOperand::CreateReg(0));
3805  // addr
3806  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3807  // pred
3808  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3809  return true;
3810}
3811
3812/// cvtT2StrdPre - Convert parsed operands to MCInst.
3813/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3814/// when they refer multiple MIOperands inside a single one.
3815bool ARMAsmParser::
3816cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3817             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3818  // Create a writeback register dummy placeholder.
3819  Inst.addOperand(MCOperand::CreateReg(0));
3820  // Rt, Rt2
3821  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3822  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3823  // addr
3824  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3825  // pred
3826  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3827  return true;
3828}
3829
3830/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3831/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3832/// when they refer multiple MIOperands inside a single one.
3833bool ARMAsmParser::
3834cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3835                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3836  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3837
3838  // Create a writeback register dummy placeholder.
3839  Inst.addOperand(MCOperand::CreateImm(0));
3840
3841  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3842  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3843  return true;
3844}
3845
3846/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3847/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3848/// when they refer multiple MIOperands inside a single one.
3849bool ARMAsmParser::
3850cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3851                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3852  // Create a writeback register dummy placeholder.
3853  Inst.addOperand(MCOperand::CreateImm(0));
3854  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3855  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3856  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3857  return true;
3858}
3859
3860/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3861/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3862/// when they refer multiple MIOperands inside a single one.
3863bool ARMAsmParser::
3864cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3865                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3866  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3867
3868  // Create a writeback register dummy placeholder.
3869  Inst.addOperand(MCOperand::CreateImm(0));
3870
3871  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3872  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3873  return true;
3874}
3875
3876/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3877/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3878/// when they refer multiple MIOperands inside a single one.
3879bool ARMAsmParser::
3880cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3881                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3882  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3883
3884  // Create a writeback register dummy placeholder.
3885  Inst.addOperand(MCOperand::CreateImm(0));
3886
3887  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3888  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3889  return true;
3890}
3891
3892
3893/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3894/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3895/// when they refer multiple MIOperands inside a single one.
3896bool ARMAsmParser::
3897cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3898                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3899  // Create a writeback register dummy placeholder.
3900  Inst.addOperand(MCOperand::CreateImm(0));
3901  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3902  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3903  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3904  return true;
3905}
3906
3907/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3908/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3909/// when they refer multiple MIOperands inside a single one.
3910bool ARMAsmParser::
3911cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3912                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3913  // Create a writeback register dummy placeholder.
3914  Inst.addOperand(MCOperand::CreateImm(0));
3915  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3916  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3917  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3918  return true;
3919}
3920
3921/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3922/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3923/// when they refer multiple MIOperands inside a single one.
3924bool ARMAsmParser::
3925cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3926                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3927  // Create a writeback register dummy placeholder.
3928  Inst.addOperand(MCOperand::CreateImm(0));
3929  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3930  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3931  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3932  return true;
3933}
3934
3935/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3936/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3937/// when they refer multiple MIOperands inside a single one.
3938bool ARMAsmParser::
3939cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3940                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3941  // Rt
3942  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3943  // Create a writeback register dummy placeholder.
3944  Inst.addOperand(MCOperand::CreateImm(0));
3945  // addr
3946  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3947  // offset
3948  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3949  // pred
3950  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3951  return true;
3952}
3953
3954/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3955/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3956/// when they refer multiple MIOperands inside a single one.
3957bool ARMAsmParser::
3958cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3959                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3960  // Rt
3961  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3962  // Create a writeback register dummy placeholder.
3963  Inst.addOperand(MCOperand::CreateImm(0));
3964  // addr
3965  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3966  // offset
3967  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3968  // pred
3969  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3970  return true;
3971}
3972
3973/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3974/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3975/// when they refer multiple MIOperands inside a single one.
3976bool ARMAsmParser::
3977cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3978                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3979  // Create a writeback register dummy placeholder.
3980  Inst.addOperand(MCOperand::CreateImm(0));
3981  // Rt
3982  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3983  // addr
3984  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3985  // offset
3986  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3987  // pred
3988  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3989  return true;
3990}
3991
3992/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3993/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3994/// when they refer multiple MIOperands inside a single one.
3995bool ARMAsmParser::
3996cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3997                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3998  // Create a writeback register dummy placeholder.
3999  Inst.addOperand(MCOperand::CreateImm(0));
4000  // Rt
4001  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4002  // addr
4003  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4004  // offset
4005  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
4006  // pred
4007  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4008  return true;
4009}
4010
4011/// cvtLdrdPre - Convert parsed operands to MCInst.
4012/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4013/// when they refer multiple MIOperands inside a single one.
4014bool ARMAsmParser::
4015cvtLdrdPre(MCInst &Inst, unsigned Opcode,
4016           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4017  // Rt, Rt2
4018  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4019  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4020  // Create a writeback register dummy placeholder.
4021  Inst.addOperand(MCOperand::CreateImm(0));
4022  // addr
4023  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4024  // pred
4025  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4026  return true;
4027}
4028
4029/// cvtStrdPre - Convert parsed operands to MCInst.
4030/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4031/// when they refer multiple MIOperands inside a single one.
4032bool ARMAsmParser::
4033cvtStrdPre(MCInst &Inst, unsigned Opcode,
4034           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4035  // Create a writeback register dummy placeholder.
4036  Inst.addOperand(MCOperand::CreateImm(0));
4037  // Rt, Rt2
4038  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4039  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4040  // addr
4041  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4042  // pred
4043  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4044  return true;
4045}
4046
4047/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4048/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4049/// when they refer multiple MIOperands inside a single one.
4050bool ARMAsmParser::
4051cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
4052                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4053  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4054  // Create a writeback register dummy placeholder.
4055  Inst.addOperand(MCOperand::CreateImm(0));
4056  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4057  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4058  return true;
4059}
4060
4061/// cvtThumbMultiple- Convert parsed operands to MCInst.
4062/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4063/// when they refer multiple MIOperands inside a single one.
4064bool ARMAsmParser::
4065cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
4066           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4067  // The second source operand must be the same register as the destination
4068  // operand.
4069  if (Operands.size() == 6 &&
4070      (((ARMOperand*)Operands[3])->getReg() !=
4071       ((ARMOperand*)Operands[5])->getReg()) &&
4072      (((ARMOperand*)Operands[3])->getReg() !=
4073       ((ARMOperand*)Operands[4])->getReg())) {
4074    Error(Operands[3]->getStartLoc(),
4075          "destination register must match source register");
4076    return false;
4077  }
4078  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4079  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4080  // If we have a three-operand form, make sure to set Rn to be the operand
4081  // that isn't the same as Rd.
4082  unsigned RegOp = 4;
4083  if (Operands.size() == 6 &&
4084      ((ARMOperand*)Operands[4])->getReg() ==
4085        ((ARMOperand*)Operands[3])->getReg())
4086    RegOp = 5;
4087  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4088  Inst.addOperand(Inst.getOperand(0));
4089  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4090
4091  return true;
4092}
4093
4094bool ARMAsmParser::
4095cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
4096              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4097  // Vd
4098  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4099  // Create a writeback register dummy placeholder.
4100  Inst.addOperand(MCOperand::CreateImm(0));
4101  // Vn
4102  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4103  // pred
4104  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4105  return true;
4106}
4107
4108bool ARMAsmParser::
4109cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
4110                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4111  // Vd
4112  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4113  // Create a writeback register dummy placeholder.
4114  Inst.addOperand(MCOperand::CreateImm(0));
4115  // Vn
4116  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4117  // Vm
4118  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4119  // pred
4120  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4121  return true;
4122}
4123
4124bool ARMAsmParser::
4125cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
4126              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4127  // Create a writeback register dummy placeholder.
4128  Inst.addOperand(MCOperand::CreateImm(0));
4129  // Vn
4130  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4131  // Vt
4132  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4133  // pred
4134  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4135  return true;
4136}
4137
4138bool ARMAsmParser::
4139cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
4140                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4141  // Create a writeback register dummy placeholder.
4142  Inst.addOperand(MCOperand::CreateImm(0));
4143  // Vn
4144  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4145  // Vm
4146  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4147  // Vt
4148  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4149  // pred
4150  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4151  return true;
4152}
4153
4154/// Parse an ARM memory expression, return false if successful else return true
4155/// or an error.  The first token must be a '[' when called.
4156bool ARMAsmParser::
4157parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4158  SMLoc S, E;
4159  assert(Parser.getTok().is(AsmToken::LBrac) &&
4160         "Token is not a Left Bracket");
4161  S = Parser.getTok().getLoc();
4162  Parser.Lex(); // Eat left bracket token.
4163
4164  const AsmToken &BaseRegTok = Parser.getTok();
4165  int BaseRegNum = tryParseRegister();
4166  if (BaseRegNum == -1)
4167    return Error(BaseRegTok.getLoc(), "register expected");
4168
4169  // The next token must either be a comma or a closing bracket.
4170  const AsmToken &Tok = Parser.getTok();
4171  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4172    return Error(Tok.getLoc(), "malformed memory operand");
4173
4174  if (Tok.is(AsmToken::RBrac)) {
4175    E = Tok.getLoc();
4176    Parser.Lex(); // Eat right bracket token.
4177
4178    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4179                                             0, 0, false, S, E));
4180
4181    // If there's a pre-indexing writeback marker, '!', just add it as a token
4182    // operand. It's rather odd, but syntactically valid.
4183    if (Parser.getTok().is(AsmToken::Exclaim)) {
4184      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4185      Parser.Lex(); // Eat the '!'.
4186    }
4187
4188    return false;
4189  }
4190
4191  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4192  Parser.Lex(); // Eat the comma.
4193
4194  // If we have a ':', it's an alignment specifier.
4195  if (Parser.getTok().is(AsmToken::Colon)) {
4196    Parser.Lex(); // Eat the ':'.
4197    E = Parser.getTok().getLoc();
4198
4199    const MCExpr *Expr;
4200    if (getParser().ParseExpression(Expr))
4201     return true;
4202
4203    // The expression has to be a constant. Memory references with relocations
4204    // don't come through here, as they use the <label> forms of the relevant
4205    // instructions.
4206    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4207    if (!CE)
4208      return Error (E, "constant expression expected");
4209
4210    unsigned Align = 0;
4211    switch (CE->getValue()) {
4212    default:
4213      return Error(E,
4214                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4215    case 16:  Align = 2; break;
4216    case 32:  Align = 4; break;
4217    case 64:  Align = 8; break;
4218    case 128: Align = 16; break;
4219    case 256: Align = 32; break;
4220    }
4221
4222    // Now we should have the closing ']'
4223    E = Parser.getTok().getLoc();
4224    if (Parser.getTok().isNot(AsmToken::RBrac))
4225      return Error(E, "']' expected");
4226    Parser.Lex(); // Eat right bracket token.
4227
4228    // Don't worry about range checking the value here. That's handled by
4229    // the is*() predicates.
4230    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4231                                             ARM_AM::no_shift, 0, Align,
4232                                             false, S, E));
4233
4234    // If there's a pre-indexing writeback marker, '!', just add it as a token
4235    // operand.
4236    if (Parser.getTok().is(AsmToken::Exclaim)) {
4237      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4238      Parser.Lex(); // Eat the '!'.
4239    }
4240
4241    return false;
4242  }
4243
4244  // If we have a '#', it's an immediate offset, else assume it's a register
4245  // offset. Be friendly and also accept a plain integer (without a leading
4246  // hash) for gas compatibility.
4247  if (Parser.getTok().is(AsmToken::Hash) ||
4248      Parser.getTok().is(AsmToken::Dollar) ||
4249      Parser.getTok().is(AsmToken::Integer)) {
4250    if (Parser.getTok().isNot(AsmToken::Integer))
4251      Parser.Lex(); // Eat the '#'.
4252    E = Parser.getTok().getLoc();
4253
4254    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4255    const MCExpr *Offset;
4256    if (getParser().ParseExpression(Offset))
4257     return true;
4258
4259    // The expression has to be a constant. Memory references with relocations
4260    // don't come through here, as they use the <label> forms of the relevant
4261    // instructions.
4262    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4263    if (!CE)
4264      return Error (E, "constant expression expected");
4265
4266    // If the constant was #-0, represent it as INT32_MIN.
4267    int32_t Val = CE->getValue();
4268    if (isNegative && Val == 0)
4269      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4270
4271    // Now we should have the closing ']'
4272    E = Parser.getTok().getLoc();
4273    if (Parser.getTok().isNot(AsmToken::RBrac))
4274      return Error(E, "']' expected");
4275    Parser.Lex(); // Eat right bracket token.
4276
4277    // Don't worry about range checking the value here. That's handled by
4278    // the is*() predicates.
4279    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4280                                             ARM_AM::no_shift, 0, 0,
4281                                             false, S, E));
4282
4283    // If there's a pre-indexing writeback marker, '!', just add it as a token
4284    // operand.
4285    if (Parser.getTok().is(AsmToken::Exclaim)) {
4286      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4287      Parser.Lex(); // Eat the '!'.
4288    }
4289
4290    return false;
4291  }
4292
4293  // The register offset is optionally preceded by a '+' or '-'
4294  bool isNegative = false;
4295  if (Parser.getTok().is(AsmToken::Minus)) {
4296    isNegative = true;
4297    Parser.Lex(); // Eat the '-'.
4298  } else if (Parser.getTok().is(AsmToken::Plus)) {
4299    // Nothing to do.
4300    Parser.Lex(); // Eat the '+'.
4301  }
4302
4303  E = Parser.getTok().getLoc();
4304  int OffsetRegNum = tryParseRegister();
4305  if (OffsetRegNum == -1)
4306    return Error(E, "register expected");
4307
4308  // If there's a shift operator, handle it.
4309  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4310  unsigned ShiftImm = 0;
4311  if (Parser.getTok().is(AsmToken::Comma)) {
4312    Parser.Lex(); // Eat the ','.
4313    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4314      return true;
4315  }
4316
4317  // Now we should have the closing ']'
4318  E = Parser.getTok().getLoc();
4319  if (Parser.getTok().isNot(AsmToken::RBrac))
4320    return Error(E, "']' expected");
4321  Parser.Lex(); // Eat right bracket token.
4322
4323  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4324                                           ShiftType, ShiftImm, 0, isNegative,
4325                                           S, E));
4326
4327  // If there's a pre-indexing writeback marker, '!', just add it as a token
4328  // operand.
4329  if (Parser.getTok().is(AsmToken::Exclaim)) {
4330    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4331    Parser.Lex(); // Eat the '!'.
4332  }
4333
4334  return false;
4335}
4336
4337/// parseMemRegOffsetShift - one of these two:
4338///   ( lsl | lsr | asr | ror ) , # shift_amount
4339///   rrx
4340/// return true if it parses a shift otherwise it returns false.
4341bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4342                                          unsigned &Amount) {
4343  SMLoc Loc = Parser.getTok().getLoc();
4344  const AsmToken &Tok = Parser.getTok();
4345  if (Tok.isNot(AsmToken::Identifier))
4346    return true;
4347  StringRef ShiftName = Tok.getString();
4348  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4349      ShiftName == "asl" || ShiftName == "ASL")
4350    St = ARM_AM::lsl;
4351  else if (ShiftName == "lsr" || ShiftName == "LSR")
4352    St = ARM_AM::lsr;
4353  else if (ShiftName == "asr" || ShiftName == "ASR")
4354    St = ARM_AM::asr;
4355  else if (ShiftName == "ror" || ShiftName == "ROR")
4356    St = ARM_AM::ror;
4357  else if (ShiftName == "rrx" || ShiftName == "RRX")
4358    St = ARM_AM::rrx;
4359  else
4360    return Error(Loc, "illegal shift operator");
4361  Parser.Lex(); // Eat shift type token.
4362
4363  // rrx stands alone.
4364  Amount = 0;
4365  if (St != ARM_AM::rrx) {
4366    Loc = Parser.getTok().getLoc();
4367    // A '#' and a shift amount.
4368    const AsmToken &HashTok = Parser.getTok();
4369    if (HashTok.isNot(AsmToken::Hash) &&
4370        HashTok.isNot(AsmToken::Dollar))
4371      return Error(HashTok.getLoc(), "'#' expected");
4372    Parser.Lex(); // Eat hash token.
4373
4374    const MCExpr *Expr;
4375    if (getParser().ParseExpression(Expr))
4376      return true;
4377    // Range check the immediate.
4378    // lsl, ror: 0 <= imm <= 31
4379    // lsr, asr: 0 <= imm <= 32
4380    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4381    if (!CE)
4382      return Error(Loc, "shift amount must be an immediate");
4383    int64_t Imm = CE->getValue();
4384    if (Imm < 0 ||
4385        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4386        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4387      return Error(Loc, "immediate shift value out of range");
4388    Amount = Imm;
4389  }
4390
4391  return false;
4392}
4393
4394/// parseFPImm - A floating point immediate expression operand.
4395ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4396parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4397  // Anything that can accept a floating point constant as an operand
4398  // needs to go through here, as the regular ParseExpression is
4399  // integer only.
4400  //
4401  // This routine still creates a generic Immediate operand, containing
4402  // a bitcast of the 64-bit floating point value. The various operands
4403  // that accept floats can check whether the value is valid for them
4404  // via the standard is*() predicates.
4405
4406  SMLoc S = Parser.getTok().getLoc();
4407
4408  if (Parser.getTok().isNot(AsmToken::Hash) &&
4409      Parser.getTok().isNot(AsmToken::Dollar))
4410    return MatchOperand_NoMatch;
4411
4412  // Disambiguate the VMOV forms that can accept an FP immediate.
4413  // vmov.f32 <sreg>, #imm
4414  // vmov.f64 <dreg>, #imm
4415  // vmov.f32 <dreg>, #imm  @ vector f32x2
4416  // vmov.f32 <qreg>, #imm  @ vector f32x4
4417  //
4418  // There are also the NEON VMOV instructions which expect an
4419  // integer constant. Make sure we don't try to parse an FPImm
4420  // for these:
4421  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4422  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4423  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4424                           TyOp->getToken() != ".f64"))
4425    return MatchOperand_NoMatch;
4426
4427  Parser.Lex(); // Eat the '#'.
4428
4429  // Handle negation, as that still comes through as a separate token.
4430  bool isNegative = false;
4431  if (Parser.getTok().is(AsmToken::Minus)) {
4432    isNegative = true;
4433    Parser.Lex();
4434  }
4435  const AsmToken &Tok = Parser.getTok();
4436  SMLoc Loc = Tok.getLoc();
4437  if (Tok.is(AsmToken::Real)) {
4438    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4439    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4440    // If we had a '-' in front, toggle the sign bit.
4441    IntVal ^= (uint64_t)isNegative << 31;
4442    Parser.Lex(); // Eat the token.
4443    Operands.push_back(ARMOperand::CreateImm(
4444          MCConstantExpr::Create(IntVal, getContext()),
4445          S, Parser.getTok().getLoc()));
4446    return MatchOperand_Success;
4447  }
4448  // Also handle plain integers. Instructions which allow floating point
4449  // immediates also allow a raw encoded 8-bit value.
4450  if (Tok.is(AsmToken::Integer)) {
4451    int64_t Val = Tok.getIntVal();
4452    Parser.Lex(); // Eat the token.
4453    if (Val > 255 || Val < 0) {
4454      Error(Loc, "encoded floating point value out of range");
4455      return MatchOperand_ParseFail;
4456    }
4457    double RealVal = ARM_AM::getFPImmFloat(Val);
4458    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4459    Operands.push_back(ARMOperand::CreateImm(
4460        MCConstantExpr::Create(Val, getContext()), S,
4461        Parser.getTok().getLoc()));
4462    return MatchOperand_Success;
4463  }
4464
4465  Error(Loc, "invalid floating point immediate");
4466  return MatchOperand_ParseFail;
4467}
4468
4469/// Parse a arm instruction operand.  For now this parses the operand regardless
4470/// of the mnemonic.
4471bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4472                                StringRef Mnemonic) {
4473  SMLoc S, E;
4474
4475  // Check if the current operand has a custom associated parser, if so, try to
4476  // custom parse the operand, or fallback to the general approach.
4477  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4478  if (ResTy == MatchOperand_Success)
4479    return false;
4480  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4481  // there was a match, but an error occurred, in which case, just return that
4482  // the operand parsing failed.
4483  if (ResTy == MatchOperand_ParseFail)
4484    return true;
4485
4486  switch (getLexer().getKind()) {
4487  default:
4488    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4489    return true;
4490  case AsmToken::Identifier: {
4491    if (!tryParseRegisterWithWriteBack(Operands))
4492      return false;
4493    int Res = tryParseShiftRegister(Operands);
4494    if (Res == 0) // success
4495      return false;
4496    else if (Res == -1) // irrecoverable error
4497      return true;
4498    // If this is VMRS, check for the apsr_nzcv operand.
4499    if (Mnemonic == "vmrs" &&
4500        Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4501      S = Parser.getTok().getLoc();
4502      Parser.Lex();
4503      Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4504      return false;
4505    }
4506
4507    // Fall though for the Identifier case that is not a register or a
4508    // special name.
4509  }
4510  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4511  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4512  case AsmToken::String:  // quoted label names.
4513  case AsmToken::Dot: {   // . as a branch target
4514    // This was not a register so parse other operands that start with an
4515    // identifier (like labels) as expressions and create them as immediates.
4516    const MCExpr *IdVal;
4517    S = Parser.getTok().getLoc();
4518    if (getParser().ParseExpression(IdVal))
4519      return true;
4520    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4521    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4522    return false;
4523  }
4524  case AsmToken::LBrac:
4525    return parseMemory(Operands);
4526  case AsmToken::LCurly:
4527    return parseRegisterList(Operands);
4528  case AsmToken::Dollar:
4529  case AsmToken::Hash: {
4530    // #42 -> immediate.
4531    S = Parser.getTok().getLoc();
4532    Parser.Lex();
4533
4534    if (Parser.getTok().isNot(AsmToken::Colon)) {
4535      bool isNegative = Parser.getTok().is(AsmToken::Minus);
4536      const MCExpr *ImmVal;
4537      if (getParser().ParseExpression(ImmVal))
4538        return true;
4539      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4540      if (CE) {
4541        int32_t Val = CE->getValue();
4542        if (isNegative && Val == 0)
4543          ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4544      }
4545      E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4546      Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4547      return false;
4548    }
4549    // w/ a ':' after the '#', it's just like a plain ':'.
4550    // FALLTHROUGH
4551  }
4552  case AsmToken::Colon: {
4553    // ":lower16:" and ":upper16:" expression prefixes
4554    // FIXME: Check it's an expression prefix,
4555    // e.g. (FOO - :lower16:BAR) isn't legal.
4556    ARMMCExpr::VariantKind RefKind;
4557    if (parsePrefix(RefKind))
4558      return true;
4559
4560    const MCExpr *SubExprVal;
4561    if (getParser().ParseExpression(SubExprVal))
4562      return true;
4563
4564    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4565                                                   getContext());
4566    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4567    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4568    return false;
4569  }
4570  }
4571}
4572
4573// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4574//  :lower16: and :upper16:.
4575bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4576  RefKind = ARMMCExpr::VK_ARM_None;
4577
4578  // :lower16: and :upper16: modifiers
4579  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4580  Parser.Lex(); // Eat ':'
4581
4582  if (getLexer().isNot(AsmToken::Identifier)) {
4583    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4584    return true;
4585  }
4586
4587  StringRef IDVal = Parser.getTok().getIdentifier();
4588  if (IDVal == "lower16") {
4589    RefKind = ARMMCExpr::VK_ARM_LO16;
4590  } else if (IDVal == "upper16") {
4591    RefKind = ARMMCExpr::VK_ARM_HI16;
4592  } else {
4593    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4594    return true;
4595  }
4596  Parser.Lex();
4597
4598  if (getLexer().isNot(AsmToken::Colon)) {
4599    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4600    return true;
4601  }
4602  Parser.Lex(); // Eat the last ':'
4603  return false;
4604}
4605
4606/// \brief Given a mnemonic, split out possible predication code and carry
4607/// setting letters to form a canonical mnemonic and flags.
4608//
4609// FIXME: Would be nice to autogen this.
4610// FIXME: This is a bit of a maze of special cases.
4611StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4612                                      unsigned &PredicationCode,
4613                                      bool &CarrySetting,
4614                                      unsigned &ProcessorIMod,
4615                                      StringRef &ITMask) {
4616  PredicationCode = ARMCC::AL;
4617  CarrySetting = false;
4618  ProcessorIMod = 0;
4619
4620  // Ignore some mnemonics we know aren't predicated forms.
4621  //
4622  // FIXME: Would be nice to autogen this.
4623  if ((Mnemonic == "movs" && isThumb()) ||
4624      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4625      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4626      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4627      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4628      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4629      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4630      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4631      Mnemonic == "fmuls")
4632    return Mnemonic;
4633
4634  // First, split out any predication code. Ignore mnemonics we know aren't
4635  // predicated but do have a carry-set and so weren't caught above.
4636  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4637      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4638      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4639      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4640    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4641      .Case("eq", ARMCC::EQ)
4642      .Case("ne", ARMCC::NE)
4643      .Case("hs", ARMCC::HS)
4644      .Case("cs", ARMCC::HS)
4645      .Case("lo", ARMCC::LO)
4646      .Case("cc", ARMCC::LO)
4647      .Case("mi", ARMCC::MI)
4648      .Case("pl", ARMCC::PL)
4649      .Case("vs", ARMCC::VS)
4650      .Case("vc", ARMCC::VC)
4651      .Case("hi", ARMCC::HI)
4652      .Case("ls", ARMCC::LS)
4653      .Case("ge", ARMCC::GE)
4654      .Case("lt", ARMCC::LT)
4655      .Case("gt", ARMCC::GT)
4656      .Case("le", ARMCC::LE)
4657      .Case("al", ARMCC::AL)
4658      .Default(~0U);
4659    if (CC != ~0U) {
4660      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4661      PredicationCode = CC;
4662    }
4663  }
4664
4665  // Next, determine if we have a carry setting bit. We explicitly ignore all
4666  // the instructions we know end in 's'.
4667  if (Mnemonic.endswith("s") &&
4668      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4669        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4670        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4671        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4672        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4673        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4674        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4675        Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4676        Mnemonic == "vfms" || Mnemonic == "vfnms" ||
4677        (Mnemonic == "movs" && isThumb()))) {
4678    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4679    CarrySetting = true;
4680  }
4681
4682  // The "cps" instruction can have a interrupt mode operand which is glued into
4683  // the mnemonic. Check if this is the case, split it and parse the imod op
4684  if (Mnemonic.startswith("cps")) {
4685    // Split out any imod code.
4686    unsigned IMod =
4687      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4688      .Case("ie", ARM_PROC::IE)
4689      .Case("id", ARM_PROC::ID)
4690      .Default(~0U);
4691    if (IMod != ~0U) {
4692      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4693      ProcessorIMod = IMod;
4694    }
4695  }
4696
4697  // The "it" instruction has the condition mask on the end of the mnemonic.
4698  if (Mnemonic.startswith("it")) {
4699    ITMask = Mnemonic.slice(2, Mnemonic.size());
4700    Mnemonic = Mnemonic.slice(0, 2);
4701  }
4702
4703  return Mnemonic;
4704}
4705
4706/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4707/// inclusion of carry set or predication code operands.
4708//
4709// FIXME: It would be nice to autogen this.
4710void ARMAsmParser::
4711getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4712                      bool &CanAcceptPredicationCode) {
4713  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4714      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4715      Mnemonic == "add" || Mnemonic == "adc" ||
4716      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4717      Mnemonic == "orr" || Mnemonic == "mvn" ||
4718      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4719      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4720      Mnemonic == "vfm" || Mnemonic == "vfnm" ||
4721      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4722                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4723                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4724    CanAcceptCarrySet = true;
4725  } else
4726    CanAcceptCarrySet = false;
4727
4728  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4729      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4730      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4731      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4732      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4733      (Mnemonic == "clrex" && !isThumb()) ||
4734      (Mnemonic == "nop" && isThumbOne()) ||
4735      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4736        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4737        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4738      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4739       !isThumb()) ||
4740      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4741    CanAcceptPredicationCode = false;
4742  } else
4743    CanAcceptPredicationCode = true;
4744
4745  if (isThumb()) {
4746    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4747        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4748      CanAcceptPredicationCode = false;
4749  }
4750}
4751
4752bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4753                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4754  // FIXME: This is all horribly hacky. We really need a better way to deal
4755  // with optional operands like this in the matcher table.
4756
4757  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4758  // another does not. Specifically, the MOVW instruction does not. So we
4759  // special case it here and remove the defaulted (non-setting) cc_out
4760  // operand if that's the instruction we're trying to match.
4761  //
4762  // We do this as post-processing of the explicit operands rather than just
4763  // conditionally adding the cc_out in the first place because we need
4764  // to check the type of the parsed immediate operand.
4765  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4766      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4767      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4768      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4769    return true;
4770
4771  // Register-register 'add' for thumb does not have a cc_out operand
4772  // when there are only two register operands.
4773  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4774      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4775      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4776      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4777    return true;
4778  // Register-register 'add' for thumb does not have a cc_out operand
4779  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4780  // have to check the immediate range here since Thumb2 has a variant
4781  // that can handle a different range and has a cc_out operand.
4782  if (((isThumb() && Mnemonic == "add") ||
4783       (isThumbTwo() && Mnemonic == "sub")) &&
4784      Operands.size() == 6 &&
4785      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4786      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4787      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4788      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4789      ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
4790       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4791    return true;
4792  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4793  // imm0_4095 variant. That's the least-preferred variant when
4794  // selecting via the generic "add" mnemonic, so to know that we
4795  // should remove the cc_out operand, we have to explicitly check that
4796  // it's not one of the other variants. Ugh.
4797  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4798      Operands.size() == 6 &&
4799      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4800      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4801      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4802    // Nest conditions rather than one big 'if' statement for readability.
4803    //
4804    // If either register is a high reg, it's either one of the SP
4805    // variants (handled above) or a 32-bit encoding, so we just
4806    // check against T3. If the second register is the PC, this is an
4807    // alternate form of ADR, which uses encoding T4, so check for that too.
4808    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4809         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4810        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4811        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4812      return false;
4813    // If both registers are low, we're in an IT block, and the immediate is
4814    // in range, we should use encoding T1 instead, which has a cc_out.
4815    if (inITBlock() &&
4816        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4817        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4818        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4819      return false;
4820
4821    // Otherwise, we use encoding T4, which does not have a cc_out
4822    // operand.
4823    return true;
4824  }
4825
4826  // The thumb2 multiply instruction doesn't have a CCOut register, so
4827  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4828  // use the 16-bit encoding or not.
4829  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4830      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4831      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4832      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4833      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4834      // If the registers aren't low regs, the destination reg isn't the
4835      // same as one of the source regs, or the cc_out operand is zero
4836      // outside of an IT block, we have to use the 32-bit encoding, so
4837      // remove the cc_out operand.
4838      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4839       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4840       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4841       !inITBlock() ||
4842       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4843        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4844        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4845        static_cast<ARMOperand*>(Operands[4])->getReg())))
4846    return true;
4847
4848  // Also check the 'mul' syntax variant that doesn't specify an explicit
4849  // destination register.
4850  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4851      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4852      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4853      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4854      // If the registers aren't low regs  or the cc_out operand is zero
4855      // outside of an IT block, we have to use the 32-bit encoding, so
4856      // remove the cc_out operand.
4857      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4858       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4859       !inITBlock()))
4860    return true;
4861
4862
4863
4864  // Register-register 'add/sub' for thumb does not have a cc_out operand
4865  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4866  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4867  // right, this will result in better diagnostics (which operand is off)
4868  // anyway.
4869  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4870      (Operands.size() == 5 || Operands.size() == 6) &&
4871      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4872      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4873      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4874      (static_cast<ARMOperand*>(Operands[4])->isImm() ||
4875       (Operands.size() == 6 &&
4876        static_cast<ARMOperand*>(Operands[5])->isImm())))
4877    return true;
4878
4879  return false;
4880}
4881
4882static bool isDataTypeToken(StringRef Tok) {
4883  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4884    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4885    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4886    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4887    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4888    Tok == ".f" || Tok == ".d";
4889}
4890
4891// FIXME: This bit should probably be handled via an explicit match class
4892// in the .td files that matches the suffix instead of having it be
4893// a literal string token the way it is now.
4894static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4895  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4896}
4897
4898static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4899/// Parse an arm instruction mnemonic followed by its operands.
4900bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4901                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4902  // Apply mnemonic aliases before doing anything else, as the destination
4903  // mnemnonic may include suffices and we want to handle them normally.
4904  // The generic tblgen'erated code does this later, at the start of
4905  // MatchInstructionImpl(), but that's too late for aliases that include
4906  // any sort of suffix.
4907  unsigned AvailableFeatures = getAvailableFeatures();
4908  applyMnemonicAliases(Name, AvailableFeatures);
4909
4910  // First check for the ARM-specific .req directive.
4911  if (Parser.getTok().is(AsmToken::Identifier) &&
4912      Parser.getTok().getIdentifier() == ".req") {
4913    parseDirectiveReq(Name, NameLoc);
4914    // We always return 'error' for this, as we're done with this
4915    // statement and don't need to match the 'instruction."
4916    return true;
4917  }
4918
4919  // Create the leading tokens for the mnemonic, split by '.' characters.
4920  size_t Start = 0, Next = Name.find('.');
4921  StringRef Mnemonic = Name.slice(Start, Next);
4922
4923  // Split out the predication code and carry setting flag from the mnemonic.
4924  unsigned PredicationCode;
4925  unsigned ProcessorIMod;
4926  bool CarrySetting;
4927  StringRef ITMask;
4928  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4929                           ProcessorIMod, ITMask);
4930
4931  // In Thumb1, only the branch (B) instruction can be predicated.
4932  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4933    Parser.EatToEndOfStatement();
4934    return Error(NameLoc, "conditional execution not supported in Thumb1");
4935  }
4936
4937  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4938
4939  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4940  // is the mask as it will be for the IT encoding if the conditional
4941  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4942  // where the conditional bit0 is zero, the instruction post-processing
4943  // will adjust the mask accordingly.
4944  if (Mnemonic == "it") {
4945    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4946    if (ITMask.size() > 3) {
4947      Parser.EatToEndOfStatement();
4948      return Error(Loc, "too many conditions on IT instruction");
4949    }
4950    unsigned Mask = 8;
4951    for (unsigned i = ITMask.size(); i != 0; --i) {
4952      char pos = ITMask[i - 1];
4953      if (pos != 't' && pos != 'e') {
4954        Parser.EatToEndOfStatement();
4955        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4956      }
4957      Mask >>= 1;
4958      if (ITMask[i - 1] == 't')
4959        Mask |= 8;
4960    }
4961    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4962  }
4963
4964  // FIXME: This is all a pretty gross hack. We should automatically handle
4965  // optional operands like this via tblgen.
4966
4967  // Next, add the CCOut and ConditionCode operands, if needed.
4968  //
4969  // For mnemonics which can ever incorporate a carry setting bit or predication
4970  // code, our matching model involves us always generating CCOut and
4971  // ConditionCode operands to match the mnemonic "as written" and then we let
4972  // the matcher deal with finding the right instruction or generating an
4973  // appropriate error.
4974  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4975  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4976
4977  // If we had a carry-set on an instruction that can't do that, issue an
4978  // error.
4979  if (!CanAcceptCarrySet && CarrySetting) {
4980    Parser.EatToEndOfStatement();
4981    return Error(NameLoc, "instruction '" + Mnemonic +
4982                 "' can not set flags, but 's' suffix specified");
4983  }
4984  // If we had a predication code on an instruction that can't do that, issue an
4985  // error.
4986  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4987    Parser.EatToEndOfStatement();
4988    return Error(NameLoc, "instruction '" + Mnemonic +
4989                 "' is not predicable, but condition code specified");
4990  }
4991
4992  // Add the carry setting operand, if necessary.
4993  if (CanAcceptCarrySet) {
4994    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4995    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4996                                               Loc));
4997  }
4998
4999  // Add the predication code operand, if necessary.
5000  if (CanAcceptPredicationCode) {
5001    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
5002                                      CarrySetting);
5003    Operands.push_back(ARMOperand::CreateCondCode(
5004                         ARMCC::CondCodes(PredicationCode), Loc));
5005  }
5006
5007  // Add the processor imod operand, if necessary.
5008  if (ProcessorIMod) {
5009    Operands.push_back(ARMOperand::CreateImm(
5010          MCConstantExpr::Create(ProcessorIMod, getContext()),
5011                                 NameLoc, NameLoc));
5012  }
5013
5014  // Add the remaining tokens in the mnemonic.
5015  while (Next != StringRef::npos) {
5016    Start = Next;
5017    Next = Name.find('.', Start + 1);
5018    StringRef ExtraToken = Name.slice(Start, Next);
5019
5020    // Some NEON instructions have an optional datatype suffix that is
5021    // completely ignored. Check for that.
5022    if (isDataTypeToken(ExtraToken) &&
5023        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5024      continue;
5025
5026    if (ExtraToken != ".n") {
5027      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5028      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5029    }
5030  }
5031
5032  // Read the remaining operands.
5033  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5034    // Read the first operand.
5035    if (parseOperand(Operands, Mnemonic)) {
5036      Parser.EatToEndOfStatement();
5037      return true;
5038    }
5039
5040    while (getLexer().is(AsmToken::Comma)) {
5041      Parser.Lex();  // Eat the comma.
5042
5043      // Parse and remember the operand.
5044      if (parseOperand(Operands, Mnemonic)) {
5045        Parser.EatToEndOfStatement();
5046        return true;
5047      }
5048    }
5049  }
5050
5051  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5052    SMLoc Loc = getLexer().getLoc();
5053    Parser.EatToEndOfStatement();
5054    return Error(Loc, "unexpected token in argument list");
5055  }
5056
5057  Parser.Lex(); // Consume the EndOfStatement
5058
5059  // Some instructions, mostly Thumb, have forms for the same mnemonic that
5060  // do and don't have a cc_out optional-def operand. With some spot-checks
5061  // of the operand list, we can figure out which variant we're trying to
5062  // parse and adjust accordingly before actually matching. We shouldn't ever
5063  // try to remove a cc_out operand that was explicitly set on the the
5064  // mnemonic, of course (CarrySetting == true). Reason number #317 the
5065  // table driven matcher doesn't fit well with the ARM instruction set.
5066  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5067    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5068    Operands.erase(Operands.begin() + 1);
5069    delete Op;
5070  }
5071
5072  // ARM mode 'blx' need special handling, as the register operand version
5073  // is predicable, but the label operand version is not. So, we can't rely
5074  // on the Mnemonic based checking to correctly figure out when to put
5075  // a k_CondCode operand in the list. If we're trying to match the label
5076  // version, remove the k_CondCode operand here.
5077  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5078      static_cast<ARMOperand*>(Operands[2])->isImm()) {
5079    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5080    Operands.erase(Operands.begin() + 1);
5081    delete Op;
5082  }
5083
5084  // The vector-compare-to-zero instructions have a literal token "#0" at
5085  // the end that comes to here as an immediate operand. Convert it to a
5086  // token to play nicely with the matcher.
5087  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
5088      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
5089      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5090    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5091    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5092    if (CE && CE->getValue() == 0) {
5093      Operands.erase(Operands.begin() + 5);
5094      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5095      delete Op;
5096    }
5097  }
5098  // VCMP{E} does the same thing, but with a different operand count.
5099  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
5100      static_cast<ARMOperand*>(Operands[4])->isImm()) {
5101    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
5102    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5103    if (CE && CE->getValue() == 0) {
5104      Operands.erase(Operands.begin() + 4);
5105      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5106      delete Op;
5107    }
5108  }
5109  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
5110  // end. Convert it to a token here. Take care not to convert those
5111  // that should hit the Thumb2 encoding.
5112  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
5113      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5114      static_cast<ARMOperand*>(Operands[4])->isReg() &&
5115      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5116    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5117    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5118    if (CE && CE->getValue() == 0 &&
5119        (isThumbOne() ||
5120         // The cc_out operand matches the IT block.
5121         ((inITBlock() != CarrySetting) &&
5122         // Neither register operand is a high register.
5123         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
5124          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
5125      Operands.erase(Operands.begin() + 5);
5126      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5127      delete Op;
5128    }
5129  }
5130
5131  return false;
5132}
5133
5134// Validate context-sensitive operand constraints.
5135
5136// return 'true' if register list contains non-low GPR registers,
5137// 'false' otherwise. If Reg is in the register list or is HiReg, set
5138// 'containsReg' to true.
5139static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5140                                 unsigned HiReg, bool &containsReg) {
5141  containsReg = false;
5142  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5143    unsigned OpReg = Inst.getOperand(i).getReg();
5144    if (OpReg == Reg)
5145      containsReg = true;
5146    // Anything other than a low register isn't legal here.
5147    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5148      return true;
5149  }
5150  return false;
5151}
5152
5153// Check if the specified regisgter is in the register list of the inst,
5154// starting at the indicated operand number.
5155static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5156  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5157    unsigned OpReg = Inst.getOperand(i).getReg();
5158    if (OpReg == Reg)
5159      return true;
5160  }
5161  return false;
5162}
5163
5164// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5165// the ARMInsts array) instead. Getting that here requires awkward
5166// API changes, though. Better way?
5167namespace llvm {
5168extern const MCInstrDesc ARMInsts[];
5169}
5170static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5171  return ARMInsts[Opcode];
5172}
5173
5174// FIXME: We would really like to be able to tablegen'erate this.
5175bool ARMAsmParser::
5176validateInstruction(MCInst &Inst,
5177                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5178  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5179  SMLoc Loc = Operands[0]->getStartLoc();
5180  // Check the IT block state first.
5181  // NOTE: BKPT instruction has the interesting property of being
5182  // allowed in IT blocks, but not being predicable.  It just always
5183  // executes.
5184  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5185      Inst.getOpcode() != ARM::BKPT) {
5186    unsigned bit = 1;
5187    if (ITState.FirstCond)
5188      ITState.FirstCond = false;
5189    else
5190      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5191    // The instruction must be predicable.
5192    if (!MCID.isPredicable())
5193      return Error(Loc, "instructions in IT block must be predicable");
5194    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5195    unsigned ITCond = bit ? ITState.Cond :
5196      ARMCC::getOppositeCondition(ITState.Cond);
5197    if (Cond != ITCond) {
5198      // Find the condition code Operand to get its SMLoc information.
5199      SMLoc CondLoc;
5200      for (unsigned i = 1; i < Operands.size(); ++i)
5201        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5202          CondLoc = Operands[i]->getStartLoc();
5203      return Error(CondLoc, "incorrect condition in IT block; got '" +
5204                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5205                   "', but expected '" +
5206                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5207    }
5208  // Check for non-'al' condition codes outside of the IT block.
5209  } else if (isThumbTwo() && MCID.isPredicable() &&
5210             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5211             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5212             Inst.getOpcode() != ARM::t2B)
5213    return Error(Loc, "predicated instructions must be in IT block");
5214
5215  switch (Inst.getOpcode()) {
5216  case ARM::LDRD:
5217  case ARM::LDRD_PRE:
5218  case ARM::LDRD_POST:
5219  case ARM::LDREXD: {
5220    // Rt2 must be Rt + 1.
5221    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5222    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5223    if (Rt2 != Rt + 1)
5224      return Error(Operands[3]->getStartLoc(),
5225                   "destination operands must be sequential");
5226    return false;
5227  }
5228  case ARM::STRD: {
5229    // Rt2 must be Rt + 1.
5230    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5231    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5232    if (Rt2 != Rt + 1)
5233      return Error(Operands[3]->getStartLoc(),
5234                   "source operands must be sequential");
5235    return false;
5236  }
5237  case ARM::STRD_PRE:
5238  case ARM::STRD_POST:
5239  case ARM::STREXD: {
5240    // Rt2 must be Rt + 1.
5241    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5242    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5243    if (Rt2 != Rt + 1)
5244      return Error(Operands[3]->getStartLoc(),
5245                   "source operands must be sequential");
5246    return false;
5247  }
5248  case ARM::SBFX:
5249  case ARM::UBFX: {
5250    // width must be in range [1, 32-lsb]
5251    unsigned lsb = Inst.getOperand(2).getImm();
5252    unsigned widthm1 = Inst.getOperand(3).getImm();
5253    if (widthm1 >= 32 - lsb)
5254      return Error(Operands[5]->getStartLoc(),
5255                   "bitfield width must be in range [1,32-lsb]");
5256    return false;
5257  }
5258  case ARM::tLDMIA: {
5259    // If we're parsing Thumb2, the .w variant is available and handles
5260    // most cases that are normally illegal for a Thumb1 LDM
5261    // instruction. We'll make the transformation in processInstruction()
5262    // if necessary.
5263    //
5264    // Thumb LDM instructions are writeback iff the base register is not
5265    // in the register list.
5266    unsigned Rn = Inst.getOperand(0).getReg();
5267    bool hasWritebackToken =
5268      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5269       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5270    bool listContainsBase;
5271    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5272      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5273                   "registers must be in range r0-r7");
5274    // If we should have writeback, then there should be a '!' token.
5275    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5276      return Error(Operands[2]->getStartLoc(),
5277                   "writeback operator '!' expected");
5278    // If we should not have writeback, there must not be a '!'. This is
5279    // true even for the 32-bit wide encodings.
5280    if (listContainsBase && hasWritebackToken)
5281      return Error(Operands[3]->getStartLoc(),
5282                   "writeback operator '!' not allowed when base register "
5283                   "in register list");
5284
5285    break;
5286  }
5287  case ARM::t2LDMIA_UPD: {
5288    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5289      return Error(Operands[4]->getStartLoc(),
5290                   "writeback operator '!' not allowed when base register "
5291                   "in register list");
5292    break;
5293  }
5294  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5295  // so only issue a diagnostic for thumb1. The instructions will be
5296  // switched to the t2 encodings in processInstruction() if necessary.
5297  case ARM::tPOP: {
5298    bool listContainsBase;
5299    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5300        !isThumbTwo())
5301      return Error(Operands[2]->getStartLoc(),
5302                   "registers must be in range r0-r7 or pc");
5303    break;
5304  }
5305  case ARM::tPUSH: {
5306    bool listContainsBase;
5307    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5308        !isThumbTwo())
5309      return Error(Operands[2]->getStartLoc(),
5310                   "registers must be in range r0-r7 or lr");
5311    break;
5312  }
5313  case ARM::tSTMIA_UPD: {
5314    bool listContainsBase;
5315    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5316      return Error(Operands[4]->getStartLoc(),
5317                   "registers must be in range r0-r7");
5318    break;
5319  }
5320  }
5321
5322  return false;
5323}
5324
5325static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5326  switch(Opc) {
5327  default: llvm_unreachable("unexpected opcode!");
5328  // VST1LN
5329  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5330  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5331  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5332  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5333  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5334  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5335  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5336  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5337  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5338
5339  // VST2LN
5340  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5341  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5342  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5343  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5344  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5345
5346  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5347  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5348  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5349  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5350  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5351
5352  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5353  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5354  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5355  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5356  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5357
5358  // VST3LN
5359  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5360  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5361  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5362  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5363  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5364  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5365  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5366  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5367  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5368  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5369  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5370  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5371  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5372  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5373  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5374
5375  // VST3
5376  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5377  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5378  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5379  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5380  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5381  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5382  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5383  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5384  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5385  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5386  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5387  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5388  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5389  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5390  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5391  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5392  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5393  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5394
5395  // VST4LN
5396  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5397  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5398  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5399  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5400  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5401  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5402  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5403  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5404  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5405  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5406  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5407  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5408  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5409  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5410  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5411
5412  // VST4
5413  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5414  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5415  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5416  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5417  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5418  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5419  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5420  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5421  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5422  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5423  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5424  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5425  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5426  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5427  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5428  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5429  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5430  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5431  }
5432}
5433
5434static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5435  switch(Opc) {
5436  default: llvm_unreachable("unexpected opcode!");
5437  // VLD1LN
5438  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5439  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5440  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5441  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5442  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5443  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5444  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5445  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5446  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5447
5448  // VLD2LN
5449  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5450  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5451  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5452  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5453  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5454  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5455  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5456  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5457  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5458  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5459  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5460  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5461  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5462  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5463  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5464
5465  // VLD3DUP
5466  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5467  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5468  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5469  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5470  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5471  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5472  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5473  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5474  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5475  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5476  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5477  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5478  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5479  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5480  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5481  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5482  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5483  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5484
5485  // VLD3LN
5486  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5487  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5488  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5489  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5490  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5491  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5492  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5493  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5494  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5495  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5496  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5497  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5498  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5499  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5500  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5501
5502  // VLD3
5503  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5504  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5505  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5506  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5507  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5508  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5509  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5510  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5511  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5512  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5513  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5514  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5515  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5516  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5517  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5518  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5519  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5520  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5521
5522  // VLD4LN
5523  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5524  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5525  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5526  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5527  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5528  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5529  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5530  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5531  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5532  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5533  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5534  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5535  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5536  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5537  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5538
5539  // VLD4DUP
5540  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5541  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5542  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5543  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5544  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5545  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5546  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5547  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5548  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5549  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5550  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5551  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5552  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5553  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5554  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5555  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5556  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5557  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5558
5559  // VLD4
5560  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5561  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5562  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5563  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5564  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5565  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5566  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5567  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5568  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5569  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5570  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5571  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5572  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5573  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5574  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5575  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5576  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5577  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5578  }
5579}
5580
5581bool ARMAsmParser::
5582processInstruction(MCInst &Inst,
5583                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5584  switch (Inst.getOpcode()) {
5585  // Aliases for alternate PC+imm syntax of LDR instructions.
5586  case ARM::t2LDRpcrel:
5587    Inst.setOpcode(ARM::t2LDRpci);
5588    return true;
5589  case ARM::t2LDRBpcrel:
5590    Inst.setOpcode(ARM::t2LDRBpci);
5591    return true;
5592  case ARM::t2LDRHpcrel:
5593    Inst.setOpcode(ARM::t2LDRHpci);
5594    return true;
5595  case ARM::t2LDRSBpcrel:
5596    Inst.setOpcode(ARM::t2LDRSBpci);
5597    return true;
5598  case ARM::t2LDRSHpcrel:
5599    Inst.setOpcode(ARM::t2LDRSHpci);
5600    return true;
5601  // Handle NEON VST complex aliases.
5602  case ARM::VST1LNdWB_register_Asm_8:
5603  case ARM::VST1LNdWB_register_Asm_16:
5604  case ARM::VST1LNdWB_register_Asm_32: {
5605    MCInst TmpInst;
5606    // Shuffle the operands around so the lane index operand is in the
5607    // right place.
5608    unsigned Spacing;
5609    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5610    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5611    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5612    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5613    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5614    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5615    TmpInst.addOperand(Inst.getOperand(1)); // lane
5616    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5617    TmpInst.addOperand(Inst.getOperand(6));
5618    Inst = TmpInst;
5619    return true;
5620  }
5621
5622  case ARM::VST2LNdWB_register_Asm_8:
5623  case ARM::VST2LNdWB_register_Asm_16:
5624  case ARM::VST2LNdWB_register_Asm_32:
5625  case ARM::VST2LNqWB_register_Asm_16:
5626  case ARM::VST2LNqWB_register_Asm_32: {
5627    MCInst TmpInst;
5628    // Shuffle the operands around so the lane index operand is in the
5629    // right place.
5630    unsigned Spacing;
5631    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5632    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5633    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5634    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5635    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5636    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5637    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5638                                            Spacing));
5639    TmpInst.addOperand(Inst.getOperand(1)); // lane
5640    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5641    TmpInst.addOperand(Inst.getOperand(6));
5642    Inst = TmpInst;
5643    return true;
5644  }
5645
5646  case ARM::VST3LNdWB_register_Asm_8:
5647  case ARM::VST3LNdWB_register_Asm_16:
5648  case ARM::VST3LNdWB_register_Asm_32:
5649  case ARM::VST3LNqWB_register_Asm_16:
5650  case ARM::VST3LNqWB_register_Asm_32: {
5651    MCInst TmpInst;
5652    // Shuffle the operands around so the lane index operand is in the
5653    // right place.
5654    unsigned Spacing;
5655    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5656    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5657    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5658    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5659    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5660    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5661    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5662                                            Spacing));
5663    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5664                                            Spacing * 2));
5665    TmpInst.addOperand(Inst.getOperand(1)); // lane
5666    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5667    TmpInst.addOperand(Inst.getOperand(6));
5668    Inst = TmpInst;
5669    return true;
5670  }
5671
5672  case ARM::VST4LNdWB_register_Asm_8:
5673  case ARM::VST4LNdWB_register_Asm_16:
5674  case ARM::VST4LNdWB_register_Asm_32:
5675  case ARM::VST4LNqWB_register_Asm_16:
5676  case ARM::VST4LNqWB_register_Asm_32: {
5677    MCInst TmpInst;
5678    // Shuffle the operands around so the lane index operand is in the
5679    // right place.
5680    unsigned Spacing;
5681    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5682    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5683    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5684    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5685    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5686    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5687    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5688                                            Spacing));
5689    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5690                                            Spacing * 2));
5691    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5692                                            Spacing * 3));
5693    TmpInst.addOperand(Inst.getOperand(1)); // lane
5694    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5695    TmpInst.addOperand(Inst.getOperand(6));
5696    Inst = TmpInst;
5697    return true;
5698  }
5699
5700  case ARM::VST1LNdWB_fixed_Asm_8:
5701  case ARM::VST1LNdWB_fixed_Asm_16:
5702  case ARM::VST1LNdWB_fixed_Asm_32: {
5703    MCInst TmpInst;
5704    // Shuffle the operands around so the lane index operand is in the
5705    // right place.
5706    unsigned Spacing;
5707    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5708    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5709    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5710    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5711    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5712    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5713    TmpInst.addOperand(Inst.getOperand(1)); // lane
5714    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5715    TmpInst.addOperand(Inst.getOperand(5));
5716    Inst = TmpInst;
5717    return true;
5718  }
5719
5720  case ARM::VST2LNdWB_fixed_Asm_8:
5721  case ARM::VST2LNdWB_fixed_Asm_16:
5722  case ARM::VST2LNdWB_fixed_Asm_32:
5723  case ARM::VST2LNqWB_fixed_Asm_16:
5724  case ARM::VST2LNqWB_fixed_Asm_32: {
5725    MCInst TmpInst;
5726    // Shuffle the operands around so the lane index operand is in the
5727    // right place.
5728    unsigned Spacing;
5729    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5730    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5731    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5732    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5733    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5734    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5735    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5736                                            Spacing));
5737    TmpInst.addOperand(Inst.getOperand(1)); // lane
5738    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5739    TmpInst.addOperand(Inst.getOperand(5));
5740    Inst = TmpInst;
5741    return true;
5742  }
5743
5744  case ARM::VST3LNdWB_fixed_Asm_8:
5745  case ARM::VST3LNdWB_fixed_Asm_16:
5746  case ARM::VST3LNdWB_fixed_Asm_32:
5747  case ARM::VST3LNqWB_fixed_Asm_16:
5748  case ARM::VST3LNqWB_fixed_Asm_32: {
5749    MCInst TmpInst;
5750    // Shuffle the operands around so the lane index operand is in the
5751    // right place.
5752    unsigned Spacing;
5753    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5754    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5755    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5756    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5757    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5758    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5759    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5760                                            Spacing));
5761    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5762                                            Spacing * 2));
5763    TmpInst.addOperand(Inst.getOperand(1)); // lane
5764    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5765    TmpInst.addOperand(Inst.getOperand(5));
5766    Inst = TmpInst;
5767    return true;
5768  }
5769
5770  case ARM::VST4LNdWB_fixed_Asm_8:
5771  case ARM::VST4LNdWB_fixed_Asm_16:
5772  case ARM::VST4LNdWB_fixed_Asm_32:
5773  case ARM::VST4LNqWB_fixed_Asm_16:
5774  case ARM::VST4LNqWB_fixed_Asm_32: {
5775    MCInst TmpInst;
5776    // Shuffle the operands around so the lane index operand is in the
5777    // right place.
5778    unsigned Spacing;
5779    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5780    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5781    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5782    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5783    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5784    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5785    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5786                                            Spacing));
5787    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5788                                            Spacing * 2));
5789    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5790                                            Spacing * 3));
5791    TmpInst.addOperand(Inst.getOperand(1)); // lane
5792    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5793    TmpInst.addOperand(Inst.getOperand(5));
5794    Inst = TmpInst;
5795    return true;
5796  }
5797
5798  case ARM::VST1LNdAsm_8:
5799  case ARM::VST1LNdAsm_16:
5800  case ARM::VST1LNdAsm_32: {
5801    MCInst TmpInst;
5802    // Shuffle the operands around so the lane index operand is in the
5803    // right place.
5804    unsigned Spacing;
5805    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5806    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5807    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5808    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5809    TmpInst.addOperand(Inst.getOperand(1)); // lane
5810    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5811    TmpInst.addOperand(Inst.getOperand(5));
5812    Inst = TmpInst;
5813    return true;
5814  }
5815
5816  case ARM::VST2LNdAsm_8:
5817  case ARM::VST2LNdAsm_16:
5818  case ARM::VST2LNdAsm_32:
5819  case ARM::VST2LNqAsm_16:
5820  case ARM::VST2LNqAsm_32: {
5821    MCInst TmpInst;
5822    // Shuffle the operands around so the lane index operand is in the
5823    // right place.
5824    unsigned Spacing;
5825    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5826    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5827    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5828    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5829    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5830                                            Spacing));
5831    TmpInst.addOperand(Inst.getOperand(1)); // lane
5832    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5833    TmpInst.addOperand(Inst.getOperand(5));
5834    Inst = TmpInst;
5835    return true;
5836  }
5837
5838  case ARM::VST3LNdAsm_8:
5839  case ARM::VST3LNdAsm_16:
5840  case ARM::VST3LNdAsm_32:
5841  case ARM::VST3LNqAsm_16:
5842  case ARM::VST3LNqAsm_32: {
5843    MCInst TmpInst;
5844    // Shuffle the operands around so the lane index operand is in the
5845    // right place.
5846    unsigned Spacing;
5847    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5848    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5849    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5850    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5851    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5852                                            Spacing));
5853    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5854                                            Spacing * 2));
5855    TmpInst.addOperand(Inst.getOperand(1)); // lane
5856    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5857    TmpInst.addOperand(Inst.getOperand(5));
5858    Inst = TmpInst;
5859    return true;
5860  }
5861
5862  case ARM::VST4LNdAsm_8:
5863  case ARM::VST4LNdAsm_16:
5864  case ARM::VST4LNdAsm_32:
5865  case ARM::VST4LNqAsm_16:
5866  case ARM::VST4LNqAsm_32: {
5867    MCInst TmpInst;
5868    // Shuffle the operands around so the lane index operand is in the
5869    // right place.
5870    unsigned Spacing;
5871    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5872    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5873    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5874    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5875    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5876                                            Spacing));
5877    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5878                                            Spacing * 2));
5879    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5880                                            Spacing * 3));
5881    TmpInst.addOperand(Inst.getOperand(1)); // lane
5882    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5883    TmpInst.addOperand(Inst.getOperand(5));
5884    Inst = TmpInst;
5885    return true;
5886  }
5887
5888  // Handle NEON VLD complex aliases.
5889  case ARM::VLD1LNdWB_register_Asm_8:
5890  case ARM::VLD1LNdWB_register_Asm_16:
5891  case ARM::VLD1LNdWB_register_Asm_32: {
5892    MCInst TmpInst;
5893    // Shuffle the operands around so the lane index operand is in the
5894    // right place.
5895    unsigned Spacing;
5896    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5897    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5898    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5899    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5900    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5901    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5902    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5903    TmpInst.addOperand(Inst.getOperand(1)); // lane
5904    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5905    TmpInst.addOperand(Inst.getOperand(6));
5906    Inst = TmpInst;
5907    return true;
5908  }
5909
5910  case ARM::VLD2LNdWB_register_Asm_8:
5911  case ARM::VLD2LNdWB_register_Asm_16:
5912  case ARM::VLD2LNdWB_register_Asm_32:
5913  case ARM::VLD2LNqWB_register_Asm_16:
5914  case ARM::VLD2LNqWB_register_Asm_32: {
5915    MCInst TmpInst;
5916    // Shuffle the operands around so the lane index operand is in the
5917    // right place.
5918    unsigned Spacing;
5919    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5920    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5921    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5922                                            Spacing));
5923    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5924    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5925    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5926    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5927    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5928    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5929                                            Spacing));
5930    TmpInst.addOperand(Inst.getOperand(1)); // lane
5931    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5932    TmpInst.addOperand(Inst.getOperand(6));
5933    Inst = TmpInst;
5934    return true;
5935  }
5936
5937  case ARM::VLD3LNdWB_register_Asm_8:
5938  case ARM::VLD3LNdWB_register_Asm_16:
5939  case ARM::VLD3LNdWB_register_Asm_32:
5940  case ARM::VLD3LNqWB_register_Asm_16:
5941  case ARM::VLD3LNqWB_register_Asm_32: {
5942    MCInst TmpInst;
5943    // Shuffle the operands around so the lane index operand is in the
5944    // right place.
5945    unsigned Spacing;
5946    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5947    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5948    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5949                                            Spacing));
5950    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5951                                            Spacing * 2));
5952    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5953    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5954    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5955    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5956    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5957    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5958                                            Spacing));
5959    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5960                                            Spacing * 2));
5961    TmpInst.addOperand(Inst.getOperand(1)); // lane
5962    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5963    TmpInst.addOperand(Inst.getOperand(6));
5964    Inst = TmpInst;
5965    return true;
5966  }
5967
5968  case ARM::VLD4LNdWB_register_Asm_8:
5969  case ARM::VLD4LNdWB_register_Asm_16:
5970  case ARM::VLD4LNdWB_register_Asm_32:
5971  case ARM::VLD4LNqWB_register_Asm_16:
5972  case ARM::VLD4LNqWB_register_Asm_32: {
5973    MCInst TmpInst;
5974    // Shuffle the operands around so the lane index operand is in the
5975    // right place.
5976    unsigned Spacing;
5977    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5978    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5979    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5980                                            Spacing));
5981    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5982                                            Spacing * 2));
5983    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5984                                            Spacing * 3));
5985    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5986    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5987    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5988    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5989    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5990    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5991                                            Spacing));
5992    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5993                                            Spacing * 2));
5994    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5995                                            Spacing * 3));
5996    TmpInst.addOperand(Inst.getOperand(1)); // lane
5997    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5998    TmpInst.addOperand(Inst.getOperand(6));
5999    Inst = TmpInst;
6000    return true;
6001  }
6002
6003  case ARM::VLD1LNdWB_fixed_Asm_8:
6004  case ARM::VLD1LNdWB_fixed_Asm_16:
6005  case ARM::VLD1LNdWB_fixed_Asm_32: {
6006    MCInst TmpInst;
6007    // Shuffle the operands around so the lane index operand is in the
6008    // right place.
6009    unsigned Spacing;
6010    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6011    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6012    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6013    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6014    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6015    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6016    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6017    TmpInst.addOperand(Inst.getOperand(1)); // lane
6018    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6019    TmpInst.addOperand(Inst.getOperand(5));
6020    Inst = TmpInst;
6021    return true;
6022  }
6023
6024  case ARM::VLD2LNdWB_fixed_Asm_8:
6025  case ARM::VLD2LNdWB_fixed_Asm_16:
6026  case ARM::VLD2LNdWB_fixed_Asm_32:
6027  case ARM::VLD2LNqWB_fixed_Asm_16:
6028  case ARM::VLD2LNqWB_fixed_Asm_32: {
6029    MCInst TmpInst;
6030    // Shuffle the operands around so the lane index operand is in the
6031    // right place.
6032    unsigned Spacing;
6033    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6034    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6035    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6036                                            Spacing));
6037    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6038    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6039    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6040    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6041    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6042    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6043                                            Spacing));
6044    TmpInst.addOperand(Inst.getOperand(1)); // lane
6045    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6046    TmpInst.addOperand(Inst.getOperand(5));
6047    Inst = TmpInst;
6048    return true;
6049  }
6050
6051  case ARM::VLD3LNdWB_fixed_Asm_8:
6052  case ARM::VLD3LNdWB_fixed_Asm_16:
6053  case ARM::VLD3LNdWB_fixed_Asm_32:
6054  case ARM::VLD3LNqWB_fixed_Asm_16:
6055  case ARM::VLD3LNqWB_fixed_Asm_32: {
6056    MCInst TmpInst;
6057    // Shuffle the operands around so the lane index operand is in the
6058    // right place.
6059    unsigned Spacing;
6060    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6061    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6062    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6063                                            Spacing));
6064    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6065                                            Spacing * 2));
6066    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6067    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6068    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6069    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6070    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6071    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6072                                            Spacing));
6073    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6074                                            Spacing * 2));
6075    TmpInst.addOperand(Inst.getOperand(1)); // lane
6076    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6077    TmpInst.addOperand(Inst.getOperand(5));
6078    Inst = TmpInst;
6079    return true;
6080  }
6081
6082  case ARM::VLD4LNdWB_fixed_Asm_8:
6083  case ARM::VLD4LNdWB_fixed_Asm_16:
6084  case ARM::VLD4LNdWB_fixed_Asm_32:
6085  case ARM::VLD4LNqWB_fixed_Asm_16:
6086  case ARM::VLD4LNqWB_fixed_Asm_32: {
6087    MCInst TmpInst;
6088    // Shuffle the operands around so the lane index operand is in the
6089    // right place.
6090    unsigned Spacing;
6091    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6092    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6093    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6094                                            Spacing));
6095    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6096                                            Spacing * 2));
6097    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6098                                            Spacing * 3));
6099    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6100    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6101    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6102    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6103    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6104    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6105                                            Spacing));
6106    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6107                                            Spacing * 2));
6108    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6109                                            Spacing * 3));
6110    TmpInst.addOperand(Inst.getOperand(1)); // lane
6111    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6112    TmpInst.addOperand(Inst.getOperand(5));
6113    Inst = TmpInst;
6114    return true;
6115  }
6116
6117  case ARM::VLD1LNdAsm_8:
6118  case ARM::VLD1LNdAsm_16:
6119  case ARM::VLD1LNdAsm_32: {
6120    MCInst TmpInst;
6121    // Shuffle the operands around so the lane index operand is in the
6122    // right place.
6123    unsigned Spacing;
6124    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6125    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6126    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6127    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6128    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6129    TmpInst.addOperand(Inst.getOperand(1)); // lane
6130    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6131    TmpInst.addOperand(Inst.getOperand(5));
6132    Inst = TmpInst;
6133    return true;
6134  }
6135
6136  case ARM::VLD2LNdAsm_8:
6137  case ARM::VLD2LNdAsm_16:
6138  case ARM::VLD2LNdAsm_32:
6139  case ARM::VLD2LNqAsm_16:
6140  case ARM::VLD2LNqAsm_32: {
6141    MCInst TmpInst;
6142    // Shuffle the operands around so the lane index operand is in the
6143    // right place.
6144    unsigned Spacing;
6145    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6146    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6147    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6148                                            Spacing));
6149    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6150    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6151    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6152    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6153                                            Spacing));
6154    TmpInst.addOperand(Inst.getOperand(1)); // lane
6155    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6156    TmpInst.addOperand(Inst.getOperand(5));
6157    Inst = TmpInst;
6158    return true;
6159  }
6160
6161  case ARM::VLD3LNdAsm_8:
6162  case ARM::VLD3LNdAsm_16:
6163  case ARM::VLD3LNdAsm_32:
6164  case ARM::VLD3LNqAsm_16:
6165  case ARM::VLD3LNqAsm_32: {
6166    MCInst TmpInst;
6167    // Shuffle the operands around so the lane index operand is in the
6168    // right place.
6169    unsigned Spacing;
6170    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6171    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6172    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6173                                            Spacing));
6174    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6175                                            Spacing * 2));
6176    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6177    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6178    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6179    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6180                                            Spacing));
6181    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6182                                            Spacing * 2));
6183    TmpInst.addOperand(Inst.getOperand(1)); // lane
6184    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6185    TmpInst.addOperand(Inst.getOperand(5));
6186    Inst = TmpInst;
6187    return true;
6188  }
6189
6190  case ARM::VLD4LNdAsm_8:
6191  case ARM::VLD4LNdAsm_16:
6192  case ARM::VLD4LNdAsm_32:
6193  case ARM::VLD4LNqAsm_16:
6194  case ARM::VLD4LNqAsm_32: {
6195    MCInst TmpInst;
6196    // Shuffle the operands around so the lane index operand is in the
6197    // right place.
6198    unsigned Spacing;
6199    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6200    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6201    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6202                                            Spacing));
6203    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6204                                            Spacing * 2));
6205    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6206                                            Spacing * 3));
6207    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6208    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6209    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6210    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6211                                            Spacing));
6212    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6213                                            Spacing * 2));
6214    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6215                                            Spacing * 3));
6216    TmpInst.addOperand(Inst.getOperand(1)); // lane
6217    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6218    TmpInst.addOperand(Inst.getOperand(5));
6219    Inst = TmpInst;
6220    return true;
6221  }
6222
6223  // VLD3DUP single 3-element structure to all lanes instructions.
6224  case ARM::VLD3DUPdAsm_8:
6225  case ARM::VLD3DUPdAsm_16:
6226  case ARM::VLD3DUPdAsm_32:
6227  case ARM::VLD3DUPqAsm_8:
6228  case ARM::VLD3DUPqAsm_16:
6229  case ARM::VLD3DUPqAsm_32: {
6230    MCInst TmpInst;
6231    unsigned Spacing;
6232    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6233    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6234    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6235                                            Spacing));
6236    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6237                                            Spacing * 2));
6238    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6239    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6240    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6241    TmpInst.addOperand(Inst.getOperand(4));
6242    Inst = TmpInst;
6243    return true;
6244  }
6245
6246  case ARM::VLD3DUPdWB_fixed_Asm_8:
6247  case ARM::VLD3DUPdWB_fixed_Asm_16:
6248  case ARM::VLD3DUPdWB_fixed_Asm_32:
6249  case ARM::VLD3DUPqWB_fixed_Asm_8:
6250  case ARM::VLD3DUPqWB_fixed_Asm_16:
6251  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6252    MCInst TmpInst;
6253    unsigned Spacing;
6254    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6255    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6256    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6257                                            Spacing));
6258    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6259                                            Spacing * 2));
6260    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6261    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6262    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6263    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6264    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6265    TmpInst.addOperand(Inst.getOperand(4));
6266    Inst = TmpInst;
6267    return true;
6268  }
6269
6270  case ARM::VLD3DUPdWB_register_Asm_8:
6271  case ARM::VLD3DUPdWB_register_Asm_16:
6272  case ARM::VLD3DUPdWB_register_Asm_32:
6273  case ARM::VLD3DUPqWB_register_Asm_8:
6274  case ARM::VLD3DUPqWB_register_Asm_16:
6275  case ARM::VLD3DUPqWB_register_Asm_32: {
6276    MCInst TmpInst;
6277    unsigned Spacing;
6278    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6279    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6280    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6281                                            Spacing));
6282    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6283                                            Spacing * 2));
6284    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6285    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6286    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6287    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6288    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6289    TmpInst.addOperand(Inst.getOperand(5));
6290    Inst = TmpInst;
6291    return true;
6292  }
6293
6294  // VLD3 multiple 3-element structure instructions.
6295  case ARM::VLD3dAsm_8:
6296  case ARM::VLD3dAsm_16:
6297  case ARM::VLD3dAsm_32:
6298  case ARM::VLD3qAsm_8:
6299  case ARM::VLD3qAsm_16:
6300  case ARM::VLD3qAsm_32: {
6301    MCInst TmpInst;
6302    unsigned Spacing;
6303    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6304    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6305    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6306                                            Spacing));
6307    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6308                                            Spacing * 2));
6309    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6310    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6311    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6312    TmpInst.addOperand(Inst.getOperand(4));
6313    Inst = TmpInst;
6314    return true;
6315  }
6316
6317  case ARM::VLD3dWB_fixed_Asm_8:
6318  case ARM::VLD3dWB_fixed_Asm_16:
6319  case ARM::VLD3dWB_fixed_Asm_32:
6320  case ARM::VLD3qWB_fixed_Asm_8:
6321  case ARM::VLD3qWB_fixed_Asm_16:
6322  case ARM::VLD3qWB_fixed_Asm_32: {
6323    MCInst TmpInst;
6324    unsigned Spacing;
6325    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6326    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6327    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6328                                            Spacing));
6329    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6330                                            Spacing * 2));
6331    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6332    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6333    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6334    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6335    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6336    TmpInst.addOperand(Inst.getOperand(4));
6337    Inst = TmpInst;
6338    return true;
6339  }
6340
6341  case ARM::VLD3dWB_register_Asm_8:
6342  case ARM::VLD3dWB_register_Asm_16:
6343  case ARM::VLD3dWB_register_Asm_32:
6344  case ARM::VLD3qWB_register_Asm_8:
6345  case ARM::VLD3qWB_register_Asm_16:
6346  case ARM::VLD3qWB_register_Asm_32: {
6347    MCInst TmpInst;
6348    unsigned Spacing;
6349    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6350    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6351    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6352                                            Spacing));
6353    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6354                                            Spacing * 2));
6355    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6356    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6357    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6358    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6359    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6360    TmpInst.addOperand(Inst.getOperand(5));
6361    Inst = TmpInst;
6362    return true;
6363  }
6364
6365  // VLD4DUP single 3-element structure to all lanes instructions.
6366  case ARM::VLD4DUPdAsm_8:
6367  case ARM::VLD4DUPdAsm_16:
6368  case ARM::VLD4DUPdAsm_32:
6369  case ARM::VLD4DUPqAsm_8:
6370  case ARM::VLD4DUPqAsm_16:
6371  case ARM::VLD4DUPqAsm_32: {
6372    MCInst TmpInst;
6373    unsigned Spacing;
6374    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6375    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6376    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6377                                            Spacing));
6378    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6379                                            Spacing * 2));
6380    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6381                                            Spacing * 3));
6382    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6383    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6384    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6385    TmpInst.addOperand(Inst.getOperand(4));
6386    Inst = TmpInst;
6387    return true;
6388  }
6389
6390  case ARM::VLD4DUPdWB_fixed_Asm_8:
6391  case ARM::VLD4DUPdWB_fixed_Asm_16:
6392  case ARM::VLD4DUPdWB_fixed_Asm_32:
6393  case ARM::VLD4DUPqWB_fixed_Asm_8:
6394  case ARM::VLD4DUPqWB_fixed_Asm_16:
6395  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6396    MCInst TmpInst;
6397    unsigned Spacing;
6398    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6399    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6400    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6401                                            Spacing));
6402    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6403                                            Spacing * 2));
6404    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6405                                            Spacing * 3));
6406    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6407    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6408    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6409    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6410    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6411    TmpInst.addOperand(Inst.getOperand(4));
6412    Inst = TmpInst;
6413    return true;
6414  }
6415
6416  case ARM::VLD4DUPdWB_register_Asm_8:
6417  case ARM::VLD4DUPdWB_register_Asm_16:
6418  case ARM::VLD4DUPdWB_register_Asm_32:
6419  case ARM::VLD4DUPqWB_register_Asm_8:
6420  case ARM::VLD4DUPqWB_register_Asm_16:
6421  case ARM::VLD4DUPqWB_register_Asm_32: {
6422    MCInst TmpInst;
6423    unsigned Spacing;
6424    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6425    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6426    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6427                                            Spacing));
6428    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6429                                            Spacing * 2));
6430    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6431                                            Spacing * 3));
6432    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6433    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6434    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6435    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6436    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6437    TmpInst.addOperand(Inst.getOperand(5));
6438    Inst = TmpInst;
6439    return true;
6440  }
6441
6442  // VLD4 multiple 4-element structure instructions.
6443  case ARM::VLD4dAsm_8:
6444  case ARM::VLD4dAsm_16:
6445  case ARM::VLD4dAsm_32:
6446  case ARM::VLD4qAsm_8:
6447  case ARM::VLD4qAsm_16:
6448  case ARM::VLD4qAsm_32: {
6449    MCInst TmpInst;
6450    unsigned Spacing;
6451    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6452    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6453    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6454                                            Spacing));
6455    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6456                                            Spacing * 2));
6457    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6458                                            Spacing * 3));
6459    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6460    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6461    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6462    TmpInst.addOperand(Inst.getOperand(4));
6463    Inst = TmpInst;
6464    return true;
6465  }
6466
6467  case ARM::VLD4dWB_fixed_Asm_8:
6468  case ARM::VLD4dWB_fixed_Asm_16:
6469  case ARM::VLD4dWB_fixed_Asm_32:
6470  case ARM::VLD4qWB_fixed_Asm_8:
6471  case ARM::VLD4qWB_fixed_Asm_16:
6472  case ARM::VLD4qWB_fixed_Asm_32: {
6473    MCInst TmpInst;
6474    unsigned Spacing;
6475    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6476    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6477    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6478                                            Spacing));
6479    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6480                                            Spacing * 2));
6481    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6482                                            Spacing * 3));
6483    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6484    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6485    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6486    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6487    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6488    TmpInst.addOperand(Inst.getOperand(4));
6489    Inst = TmpInst;
6490    return true;
6491  }
6492
6493  case ARM::VLD4dWB_register_Asm_8:
6494  case ARM::VLD4dWB_register_Asm_16:
6495  case ARM::VLD4dWB_register_Asm_32:
6496  case ARM::VLD4qWB_register_Asm_8:
6497  case ARM::VLD4qWB_register_Asm_16:
6498  case ARM::VLD4qWB_register_Asm_32: {
6499    MCInst TmpInst;
6500    unsigned Spacing;
6501    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6502    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6503    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6504                                            Spacing));
6505    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6506                                            Spacing * 2));
6507    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6508                                            Spacing * 3));
6509    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6510    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6511    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6512    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6513    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6514    TmpInst.addOperand(Inst.getOperand(5));
6515    Inst = TmpInst;
6516    return true;
6517  }
6518
6519  // VST3 multiple 3-element structure instructions.
6520  case ARM::VST3dAsm_8:
6521  case ARM::VST3dAsm_16:
6522  case ARM::VST3dAsm_32:
6523  case ARM::VST3qAsm_8:
6524  case ARM::VST3qAsm_16:
6525  case ARM::VST3qAsm_32: {
6526    MCInst TmpInst;
6527    unsigned Spacing;
6528    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6529    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6530    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6531    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6532    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6533                                            Spacing));
6534    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6535                                            Spacing * 2));
6536    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6537    TmpInst.addOperand(Inst.getOperand(4));
6538    Inst = TmpInst;
6539    return true;
6540  }
6541
6542  case ARM::VST3dWB_fixed_Asm_8:
6543  case ARM::VST3dWB_fixed_Asm_16:
6544  case ARM::VST3dWB_fixed_Asm_32:
6545  case ARM::VST3qWB_fixed_Asm_8:
6546  case ARM::VST3qWB_fixed_Asm_16:
6547  case ARM::VST3qWB_fixed_Asm_32: {
6548    MCInst TmpInst;
6549    unsigned Spacing;
6550    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6551    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6552    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6553    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6554    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6555    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6556    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6557                                            Spacing));
6558    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6559                                            Spacing * 2));
6560    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6561    TmpInst.addOperand(Inst.getOperand(4));
6562    Inst = TmpInst;
6563    return true;
6564  }
6565
6566  case ARM::VST3dWB_register_Asm_8:
6567  case ARM::VST3dWB_register_Asm_16:
6568  case ARM::VST3dWB_register_Asm_32:
6569  case ARM::VST3qWB_register_Asm_8:
6570  case ARM::VST3qWB_register_Asm_16:
6571  case ARM::VST3qWB_register_Asm_32: {
6572    MCInst TmpInst;
6573    unsigned Spacing;
6574    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6575    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6576    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6577    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6578    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6579    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6580    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6581                                            Spacing));
6582    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6583                                            Spacing * 2));
6584    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6585    TmpInst.addOperand(Inst.getOperand(5));
6586    Inst = TmpInst;
6587    return true;
6588  }
6589
6590  // VST4 multiple 3-element structure instructions.
6591  case ARM::VST4dAsm_8:
6592  case ARM::VST4dAsm_16:
6593  case ARM::VST4dAsm_32:
6594  case ARM::VST4qAsm_8:
6595  case ARM::VST4qAsm_16:
6596  case ARM::VST4qAsm_32: {
6597    MCInst TmpInst;
6598    unsigned Spacing;
6599    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6600    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6601    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6602    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6603    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6604                                            Spacing));
6605    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6606                                            Spacing * 2));
6607    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6608                                            Spacing * 3));
6609    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6610    TmpInst.addOperand(Inst.getOperand(4));
6611    Inst = TmpInst;
6612    return true;
6613  }
6614
6615  case ARM::VST4dWB_fixed_Asm_8:
6616  case ARM::VST4dWB_fixed_Asm_16:
6617  case ARM::VST4dWB_fixed_Asm_32:
6618  case ARM::VST4qWB_fixed_Asm_8:
6619  case ARM::VST4qWB_fixed_Asm_16:
6620  case ARM::VST4qWB_fixed_Asm_32: {
6621    MCInst TmpInst;
6622    unsigned Spacing;
6623    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6624    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6625    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6626    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6627    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6628    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6629    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6630                                            Spacing));
6631    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6632                                            Spacing * 2));
6633    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6634                                            Spacing * 3));
6635    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6636    TmpInst.addOperand(Inst.getOperand(4));
6637    Inst = TmpInst;
6638    return true;
6639  }
6640
6641  case ARM::VST4dWB_register_Asm_8:
6642  case ARM::VST4dWB_register_Asm_16:
6643  case ARM::VST4dWB_register_Asm_32:
6644  case ARM::VST4qWB_register_Asm_8:
6645  case ARM::VST4qWB_register_Asm_16:
6646  case ARM::VST4qWB_register_Asm_32: {
6647    MCInst TmpInst;
6648    unsigned Spacing;
6649    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6650    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6651    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6652    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6653    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6654    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6655    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6656                                            Spacing));
6657    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6658                                            Spacing * 2));
6659    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6660                                            Spacing * 3));
6661    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6662    TmpInst.addOperand(Inst.getOperand(5));
6663    Inst = TmpInst;
6664    return true;
6665  }
6666
6667  // Handle encoding choice for the shift-immediate instructions.
6668  case ARM::t2LSLri:
6669  case ARM::t2LSRri:
6670  case ARM::t2ASRri: {
6671    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6672        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6673        Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
6674        !(static_cast<ARMOperand*>(Operands[3])->isToken() &&
6675         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) {
6676      unsigned NewOpc;
6677      switch (Inst.getOpcode()) {
6678      default: llvm_unreachable("unexpected opcode");
6679      case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
6680      case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
6681      case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
6682      }
6683      // The Thumb1 operands aren't in the same order. Awesome, eh?
6684      MCInst TmpInst;
6685      TmpInst.setOpcode(NewOpc);
6686      TmpInst.addOperand(Inst.getOperand(0));
6687      TmpInst.addOperand(Inst.getOperand(5));
6688      TmpInst.addOperand(Inst.getOperand(1));
6689      TmpInst.addOperand(Inst.getOperand(2));
6690      TmpInst.addOperand(Inst.getOperand(3));
6691      TmpInst.addOperand(Inst.getOperand(4));
6692      Inst = TmpInst;
6693      return true;
6694    }
6695    return false;
6696  }
6697
6698  // Handle the Thumb2 mode MOV complex aliases.
6699  case ARM::t2MOVsr:
6700  case ARM::t2MOVSsr: {
6701    // Which instruction to expand to depends on the CCOut operand and
6702    // whether we're in an IT block if the register operands are low
6703    // registers.
6704    bool isNarrow = false;
6705    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6706        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6707        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6708        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6709        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6710      isNarrow = true;
6711    MCInst TmpInst;
6712    unsigned newOpc;
6713    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6714    default: llvm_unreachable("unexpected opcode!");
6715    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6716    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6717    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6718    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6719    }
6720    TmpInst.setOpcode(newOpc);
6721    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6722    if (isNarrow)
6723      TmpInst.addOperand(MCOperand::CreateReg(
6724          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6725    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6726    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6727    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6728    TmpInst.addOperand(Inst.getOperand(5));
6729    if (!isNarrow)
6730      TmpInst.addOperand(MCOperand::CreateReg(
6731          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6732    Inst = TmpInst;
6733    return true;
6734  }
6735  case ARM::t2MOVsi:
6736  case ARM::t2MOVSsi: {
6737    // Which instruction to expand to depends on the CCOut operand and
6738    // whether we're in an IT block if the register operands are low
6739    // registers.
6740    bool isNarrow = false;
6741    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6742        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6743        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6744      isNarrow = true;
6745    MCInst TmpInst;
6746    unsigned newOpc;
6747    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6748    default: llvm_unreachable("unexpected opcode!");
6749    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6750    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6751    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6752    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6753    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6754    }
6755    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6756    if (Ammount == 32) Ammount = 0;
6757    TmpInst.setOpcode(newOpc);
6758    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6759    if (isNarrow)
6760      TmpInst.addOperand(MCOperand::CreateReg(
6761          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6762    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6763    if (newOpc != ARM::t2RRX)
6764      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
6765    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6766    TmpInst.addOperand(Inst.getOperand(4));
6767    if (!isNarrow)
6768      TmpInst.addOperand(MCOperand::CreateReg(
6769          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6770    Inst = TmpInst;
6771    return true;
6772  }
6773  // Handle the ARM mode MOV complex aliases.
6774  case ARM::ASRr:
6775  case ARM::LSRr:
6776  case ARM::LSLr:
6777  case ARM::RORr: {
6778    ARM_AM::ShiftOpc ShiftTy;
6779    switch(Inst.getOpcode()) {
6780    default: llvm_unreachable("unexpected opcode!");
6781    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6782    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6783    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6784    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6785    }
6786    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6787    MCInst TmpInst;
6788    TmpInst.setOpcode(ARM::MOVsr);
6789    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6790    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6791    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6792    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6793    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6794    TmpInst.addOperand(Inst.getOperand(4));
6795    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6796    Inst = TmpInst;
6797    return true;
6798  }
6799  case ARM::ASRi:
6800  case ARM::LSRi:
6801  case ARM::LSLi:
6802  case ARM::RORi: {
6803    ARM_AM::ShiftOpc ShiftTy;
6804    switch(Inst.getOpcode()) {
6805    default: llvm_unreachable("unexpected opcode!");
6806    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6807    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6808    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6809    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6810    }
6811    // A shift by zero is a plain MOVr, not a MOVsi.
6812    unsigned Amt = Inst.getOperand(2).getImm();
6813    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6814    // A shift by 32 should be encoded as 0 when permitted
6815    if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
6816      Amt = 0;
6817    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6818    MCInst TmpInst;
6819    TmpInst.setOpcode(Opc);
6820    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6821    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6822    if (Opc == ARM::MOVsi)
6823      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6824    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6825    TmpInst.addOperand(Inst.getOperand(4));
6826    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6827    Inst = TmpInst;
6828    return true;
6829  }
6830  case ARM::RRXi: {
6831    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6832    MCInst TmpInst;
6833    TmpInst.setOpcode(ARM::MOVsi);
6834    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6835    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6836    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6837    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6838    TmpInst.addOperand(Inst.getOperand(3));
6839    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6840    Inst = TmpInst;
6841    return true;
6842  }
6843  case ARM::t2LDMIA_UPD: {
6844    // If this is a load of a single register, then we should use
6845    // a post-indexed LDR instruction instead, per the ARM ARM.
6846    if (Inst.getNumOperands() != 5)
6847      return false;
6848    MCInst TmpInst;
6849    TmpInst.setOpcode(ARM::t2LDR_POST);
6850    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6851    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6852    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6853    TmpInst.addOperand(MCOperand::CreateImm(4));
6854    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6855    TmpInst.addOperand(Inst.getOperand(3));
6856    Inst = TmpInst;
6857    return true;
6858  }
6859  case ARM::t2STMDB_UPD: {
6860    // If this is a store of a single register, then we should use
6861    // a pre-indexed STR instruction instead, per the ARM ARM.
6862    if (Inst.getNumOperands() != 5)
6863      return false;
6864    MCInst TmpInst;
6865    TmpInst.setOpcode(ARM::t2STR_PRE);
6866    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6867    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6868    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6869    TmpInst.addOperand(MCOperand::CreateImm(-4));
6870    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6871    TmpInst.addOperand(Inst.getOperand(3));
6872    Inst = TmpInst;
6873    return true;
6874  }
6875  case ARM::LDMIA_UPD:
6876    // If this is a load of a single register via a 'pop', then we should use
6877    // a post-indexed LDR instruction instead, per the ARM ARM.
6878    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
6879        Inst.getNumOperands() == 5) {
6880      MCInst TmpInst;
6881      TmpInst.setOpcode(ARM::LDR_POST_IMM);
6882      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6883      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6884      TmpInst.addOperand(Inst.getOperand(1)); // Rn
6885      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
6886      TmpInst.addOperand(MCOperand::CreateImm(4));
6887      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6888      TmpInst.addOperand(Inst.getOperand(3));
6889      Inst = TmpInst;
6890      return true;
6891    }
6892    break;
6893  case ARM::STMDB_UPD:
6894    // If this is a store of a single register via a 'push', then we should use
6895    // a pre-indexed STR instruction instead, per the ARM ARM.
6896    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
6897        Inst.getNumOperands() == 5) {
6898      MCInst TmpInst;
6899      TmpInst.setOpcode(ARM::STR_PRE_IMM);
6900      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6901      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6902      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
6903      TmpInst.addOperand(MCOperand::CreateImm(-4));
6904      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6905      TmpInst.addOperand(Inst.getOperand(3));
6906      Inst = TmpInst;
6907    }
6908    break;
6909  case ARM::t2ADDri12:
6910    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
6911    // mnemonic was used (not "addw"), encoding T3 is preferred.
6912    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
6913        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6914      break;
6915    Inst.setOpcode(ARM::t2ADDri);
6916    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6917    break;
6918  case ARM::t2SUBri12:
6919    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
6920    // mnemonic was used (not "subw"), encoding T3 is preferred.
6921    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
6922        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6923      break;
6924    Inst.setOpcode(ARM::t2SUBri);
6925    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6926    break;
6927  case ARM::tADDi8:
6928    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6929    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6930    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6931    // to encoding T1 if <Rd> is omitted."
6932    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6933      Inst.setOpcode(ARM::tADDi3);
6934      return true;
6935    }
6936    break;
6937  case ARM::tSUBi8:
6938    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6939    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6940    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6941    // to encoding T1 if <Rd> is omitted."
6942    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6943      Inst.setOpcode(ARM::tSUBi3);
6944      return true;
6945    }
6946    break;
6947  case ARM::t2ADDri:
6948  case ARM::t2SUBri: {
6949    // If the destination and first source operand are the same, and
6950    // the flags are compatible with the current IT status, use encoding T2
6951    // instead of T3. For compatibility with the system 'as'. Make sure the
6952    // wide encoding wasn't explicit.
6953    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6954        !isARMLowRegister(Inst.getOperand(0).getReg()) ||
6955        (unsigned)Inst.getOperand(2).getImm() > 255 ||
6956        ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
6957        (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
6958        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6959         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6960      break;
6961    MCInst TmpInst;
6962    TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
6963                      ARM::tADDi8 : ARM::tSUBi8);
6964    TmpInst.addOperand(Inst.getOperand(0));
6965    TmpInst.addOperand(Inst.getOperand(5));
6966    TmpInst.addOperand(Inst.getOperand(0));
6967    TmpInst.addOperand(Inst.getOperand(2));
6968    TmpInst.addOperand(Inst.getOperand(3));
6969    TmpInst.addOperand(Inst.getOperand(4));
6970    Inst = TmpInst;
6971    return true;
6972  }
6973  case ARM::t2ADDrr: {
6974    // If the destination and first source operand are the same, and
6975    // there's no setting of the flags, use encoding T2 instead of T3.
6976    // Note that this is only for ADD, not SUB. This mirrors the system
6977    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6978    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6979        Inst.getOperand(5).getReg() != 0 ||
6980        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6981         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6982      break;
6983    MCInst TmpInst;
6984    TmpInst.setOpcode(ARM::tADDhirr);
6985    TmpInst.addOperand(Inst.getOperand(0));
6986    TmpInst.addOperand(Inst.getOperand(0));
6987    TmpInst.addOperand(Inst.getOperand(2));
6988    TmpInst.addOperand(Inst.getOperand(3));
6989    TmpInst.addOperand(Inst.getOperand(4));
6990    Inst = TmpInst;
6991    return true;
6992  }
6993  case ARM::tB:
6994    // A Thumb conditional branch outside of an IT block is a tBcc.
6995    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6996      Inst.setOpcode(ARM::tBcc);
6997      return true;
6998    }
6999    break;
7000  case ARM::t2B:
7001    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
7002    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
7003      Inst.setOpcode(ARM::t2Bcc);
7004      return true;
7005    }
7006    break;
7007  case ARM::t2Bcc:
7008    // If the conditional is AL or we're in an IT block, we really want t2B.
7009    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
7010      Inst.setOpcode(ARM::t2B);
7011      return true;
7012    }
7013    break;
7014  case ARM::tBcc:
7015    // If the conditional is AL, we really want tB.
7016    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
7017      Inst.setOpcode(ARM::tB);
7018      return true;
7019    }
7020    break;
7021  case ARM::tLDMIA: {
7022    // If the register list contains any high registers, or if the writeback
7023    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
7024    // instead if we're in Thumb2. Otherwise, this should have generated
7025    // an error in validateInstruction().
7026    unsigned Rn = Inst.getOperand(0).getReg();
7027    bool hasWritebackToken =
7028      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7029       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
7030    bool listContainsBase;
7031    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
7032        (!listContainsBase && !hasWritebackToken) ||
7033        (listContainsBase && hasWritebackToken)) {
7034      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7035      assert (isThumbTwo());
7036      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
7037      // If we're switching to the updating version, we need to insert
7038      // the writeback tied operand.
7039      if (hasWritebackToken)
7040        Inst.insert(Inst.begin(),
7041                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
7042      return true;
7043    }
7044    break;
7045  }
7046  case ARM::tSTMIA_UPD: {
7047    // If the register list contains any high registers, we need to use
7048    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7049    // should have generated an error in validateInstruction().
7050    unsigned Rn = Inst.getOperand(0).getReg();
7051    bool listContainsBase;
7052    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
7053      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7054      assert (isThumbTwo());
7055      Inst.setOpcode(ARM::t2STMIA_UPD);
7056      return true;
7057    }
7058    break;
7059  }
7060  case ARM::tPOP: {
7061    bool listContainsBase;
7062    // If the register list contains any high registers, we need to use
7063    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7064    // should have generated an error in validateInstruction().
7065    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
7066      return false;
7067    assert (isThumbTwo());
7068    Inst.setOpcode(ARM::t2LDMIA_UPD);
7069    // Add the base register and writeback operands.
7070    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7071    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7072    return true;
7073  }
7074  case ARM::tPUSH: {
7075    bool listContainsBase;
7076    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
7077      return false;
7078    assert (isThumbTwo());
7079    Inst.setOpcode(ARM::t2STMDB_UPD);
7080    // Add the base register and writeback operands.
7081    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7082    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7083    return true;
7084  }
7085  case ARM::t2MOVi: {
7086    // If we can use the 16-bit encoding and the user didn't explicitly
7087    // request the 32-bit variant, transform it here.
7088    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7089        (unsigned)Inst.getOperand(1).getImm() <= 255 &&
7090        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
7091         Inst.getOperand(4).getReg() == ARM::CPSR) ||
7092        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
7093        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7094         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7095      // The operands aren't in the same order for tMOVi8...
7096      MCInst TmpInst;
7097      TmpInst.setOpcode(ARM::tMOVi8);
7098      TmpInst.addOperand(Inst.getOperand(0));
7099      TmpInst.addOperand(Inst.getOperand(4));
7100      TmpInst.addOperand(Inst.getOperand(1));
7101      TmpInst.addOperand(Inst.getOperand(2));
7102      TmpInst.addOperand(Inst.getOperand(3));
7103      Inst = TmpInst;
7104      return true;
7105    }
7106    break;
7107  }
7108  case ARM::t2MOVr: {
7109    // If we can use the 16-bit encoding and the user didn't explicitly
7110    // request the 32-bit variant, transform it here.
7111    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7112        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7113        Inst.getOperand(2).getImm() == ARMCC::AL &&
7114        Inst.getOperand(4).getReg() == ARM::CPSR &&
7115        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7116         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7117      // The operands aren't the same for tMOV[S]r... (no cc_out)
7118      MCInst TmpInst;
7119      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7120      TmpInst.addOperand(Inst.getOperand(0));
7121      TmpInst.addOperand(Inst.getOperand(1));
7122      TmpInst.addOperand(Inst.getOperand(2));
7123      TmpInst.addOperand(Inst.getOperand(3));
7124      Inst = TmpInst;
7125      return true;
7126    }
7127    break;
7128  }
7129  case ARM::t2SXTH:
7130  case ARM::t2SXTB:
7131  case ARM::t2UXTH:
7132  case ARM::t2UXTB: {
7133    // If we can use the 16-bit encoding and the user didn't explicitly
7134    // request the 32-bit variant, transform it here.
7135    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7136        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7137        Inst.getOperand(2).getImm() == 0 &&
7138        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7139         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7140      unsigned NewOpc;
7141      switch (Inst.getOpcode()) {
7142      default: llvm_unreachable("Illegal opcode!");
7143      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7144      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7145      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7146      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7147      }
7148      // The operands aren't the same for thumb1 (no rotate operand).
7149      MCInst TmpInst;
7150      TmpInst.setOpcode(NewOpc);
7151      TmpInst.addOperand(Inst.getOperand(0));
7152      TmpInst.addOperand(Inst.getOperand(1));
7153      TmpInst.addOperand(Inst.getOperand(3));
7154      TmpInst.addOperand(Inst.getOperand(4));
7155      Inst = TmpInst;
7156      return true;
7157    }
7158    break;
7159  }
7160  case ARM::MOVsi: {
7161    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7162    // rrx shifts and asr/lsr of #32 is encoded as 0
7163    if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
7164      return false;
7165    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7166      // Shifting by zero is accepted as a vanilla 'MOVr'
7167      MCInst TmpInst;
7168      TmpInst.setOpcode(ARM::MOVr);
7169      TmpInst.addOperand(Inst.getOperand(0));
7170      TmpInst.addOperand(Inst.getOperand(1));
7171      TmpInst.addOperand(Inst.getOperand(3));
7172      TmpInst.addOperand(Inst.getOperand(4));
7173      TmpInst.addOperand(Inst.getOperand(5));
7174      Inst = TmpInst;
7175      return true;
7176    }
7177    return false;
7178  }
7179  case ARM::ANDrsi:
7180  case ARM::ORRrsi:
7181  case ARM::EORrsi:
7182  case ARM::BICrsi:
7183  case ARM::SUBrsi:
7184  case ARM::ADDrsi: {
7185    unsigned newOpc;
7186    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7187    if (SOpc == ARM_AM::rrx) return false;
7188    switch (Inst.getOpcode()) {
7189    default: llvm_unreachable("unexpected opcode!");
7190    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7191    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7192    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7193    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7194    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7195    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7196    }
7197    // If the shift is by zero, use the non-shifted instruction definition.
7198    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
7199      MCInst TmpInst;
7200      TmpInst.setOpcode(newOpc);
7201      TmpInst.addOperand(Inst.getOperand(0));
7202      TmpInst.addOperand(Inst.getOperand(1));
7203      TmpInst.addOperand(Inst.getOperand(2));
7204      TmpInst.addOperand(Inst.getOperand(4));
7205      TmpInst.addOperand(Inst.getOperand(5));
7206      TmpInst.addOperand(Inst.getOperand(6));
7207      Inst = TmpInst;
7208      return true;
7209    }
7210    return false;
7211  }
7212  case ARM::ITasm:
7213  case ARM::t2IT: {
7214    // The mask bits for all but the first condition are represented as
7215    // the low bit of the condition code value implies 't'. We currently
7216    // always have 1 implies 't', so XOR toggle the bits if the low bit
7217    // of the condition code is zero. The encoding also expects the low
7218    // bit of the condition to be encoded as bit 4 of the mask operand,
7219    // so mask that in if needed
7220    MCOperand &MO = Inst.getOperand(1);
7221    unsigned Mask = MO.getImm();
7222    unsigned OrigMask = Mask;
7223    unsigned TZ = CountTrailingZeros_32(Mask);
7224    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7225      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7226      for (unsigned i = 3; i != TZ; --i)
7227        Mask ^= 1 << i;
7228    } else
7229      Mask |= 0x10;
7230    MO.setImm(Mask);
7231
7232    // Set up the IT block state according to the IT instruction we just
7233    // matched.
7234    assert(!inITBlock() && "nested IT blocks?!");
7235    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7236    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7237    ITState.CurPosition = 0;
7238    ITState.FirstCond = true;
7239    break;
7240  }
7241  }
7242  return false;
7243}
7244
7245unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7246  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7247  // suffix depending on whether they're in an IT block or not.
7248  unsigned Opc = Inst.getOpcode();
7249  const MCInstrDesc &MCID = getInstDesc(Opc);
7250  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7251    assert(MCID.hasOptionalDef() &&
7252           "optionally flag setting instruction missing optional def operand");
7253    assert(MCID.NumOperands == Inst.getNumOperands() &&
7254           "operand count mismatch!");
7255    // Find the optional-def operand (cc_out).
7256    unsigned OpNo;
7257    for (OpNo = 0;
7258         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7259         ++OpNo)
7260      ;
7261    // If we're parsing Thumb1, reject it completely.
7262    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7263      return Match_MnemonicFail;
7264    // If we're parsing Thumb2, which form is legal depends on whether we're
7265    // in an IT block.
7266    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7267        !inITBlock())
7268      return Match_RequiresITBlock;
7269    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7270        inITBlock())
7271      return Match_RequiresNotITBlock;
7272  }
7273  // Some high-register supporting Thumb1 encodings only allow both registers
7274  // to be from r0-r7 when in Thumb2.
7275  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7276           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7277           isARMLowRegister(Inst.getOperand(2).getReg()))
7278    return Match_RequiresThumb2;
7279  // Others only require ARMv6 or later.
7280  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7281           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7282           isARMLowRegister(Inst.getOperand(1).getReg()))
7283    return Match_RequiresV6;
7284  return Match_Success;
7285}
7286
7287static const char *getSubtargetFeatureName(unsigned Val);
7288bool ARMAsmParser::
7289MatchAndEmitInstruction(SMLoc IDLoc,
7290                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7291                        MCStreamer &Out) {
7292  MCInst Inst;
7293  unsigned ErrorInfo;
7294  unsigned MatchResult;
7295  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
7296  switch (MatchResult) {
7297  default: break;
7298  case Match_Success:
7299    // Context sensitive operand constraints aren't handled by the matcher,
7300    // so check them here.
7301    if (validateInstruction(Inst, Operands)) {
7302      // Still progress the IT block, otherwise one wrong condition causes
7303      // nasty cascading errors.
7304      forwardITPosition();
7305      return true;
7306    }
7307
7308    // Some instructions need post-processing to, for example, tweak which
7309    // encoding is selected. Loop on it while changes happen so the
7310    // individual transformations can chain off each other. E.g.,
7311    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7312    while (processInstruction(Inst, Operands))
7313      ;
7314
7315    // Only move forward at the very end so that everything in validate
7316    // and process gets a consistent answer about whether we're in an IT
7317    // block.
7318    forwardITPosition();
7319
7320    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7321    // doesn't actually encode.
7322    if (Inst.getOpcode() == ARM::ITasm)
7323      return false;
7324
7325    Inst.setLoc(IDLoc);
7326    Out.EmitInstruction(Inst);
7327    return false;
7328  case Match_MissingFeature: {
7329    assert(ErrorInfo && "Unknown missing feature!");
7330    // Special case the error message for the very common case where only
7331    // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
7332    std::string Msg = "instruction requires:";
7333    unsigned Mask = 1;
7334    for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
7335      if (ErrorInfo & Mask) {
7336        Msg += " ";
7337        Msg += getSubtargetFeatureName(ErrorInfo & Mask);
7338      }
7339      Mask <<= 1;
7340    }
7341    return Error(IDLoc, Msg);
7342  }
7343  case Match_InvalidOperand: {
7344    SMLoc ErrorLoc = IDLoc;
7345    if (ErrorInfo != ~0U) {
7346      if (ErrorInfo >= Operands.size())
7347        return Error(IDLoc, "too few operands for instruction");
7348
7349      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7350      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7351    }
7352
7353    return Error(ErrorLoc, "invalid operand for instruction");
7354  }
7355  case Match_MnemonicFail:
7356    return Error(IDLoc, "invalid instruction",
7357                 ((ARMOperand*)Operands[0])->getLocRange());
7358  case Match_ConversionFail:
7359    // The converter function will have already emited a diagnostic.
7360    return true;
7361  case Match_RequiresNotITBlock:
7362    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7363  case Match_RequiresITBlock:
7364    return Error(IDLoc, "instruction only valid inside IT block");
7365  case Match_RequiresV6:
7366    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7367  case Match_RequiresThumb2:
7368    return Error(IDLoc, "instruction variant requires Thumb2");
7369  }
7370
7371  llvm_unreachable("Implement any new match types added!");
7372}
7373
7374/// parseDirective parses the arm specific directives
7375bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7376  StringRef IDVal = DirectiveID.getIdentifier();
7377  if (IDVal == ".word")
7378    return parseDirectiveWord(4, DirectiveID.getLoc());
7379  else if (IDVal == ".thumb")
7380    return parseDirectiveThumb(DirectiveID.getLoc());
7381  else if (IDVal == ".arm")
7382    return parseDirectiveARM(DirectiveID.getLoc());
7383  else if (IDVal == ".thumb_func")
7384    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7385  else if (IDVal == ".code")
7386    return parseDirectiveCode(DirectiveID.getLoc());
7387  else if (IDVal == ".syntax")
7388    return parseDirectiveSyntax(DirectiveID.getLoc());
7389  else if (IDVal == ".unreq")
7390    return parseDirectiveUnreq(DirectiveID.getLoc());
7391  else if (IDVal == ".arch")
7392    return parseDirectiveArch(DirectiveID.getLoc());
7393  else if (IDVal == ".eabi_attribute")
7394    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7395  return true;
7396}
7397
7398/// parseDirectiveWord
7399///  ::= .word [ expression (, expression)* ]
7400bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7401  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7402    for (;;) {
7403      const MCExpr *Value;
7404      if (getParser().ParseExpression(Value))
7405        return true;
7406
7407      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
7408
7409      if (getLexer().is(AsmToken::EndOfStatement))
7410        break;
7411
7412      // FIXME: Improve diagnostic.
7413      if (getLexer().isNot(AsmToken::Comma))
7414        return Error(L, "unexpected token in directive");
7415      Parser.Lex();
7416    }
7417  }
7418
7419  Parser.Lex();
7420  return false;
7421}
7422
7423/// parseDirectiveThumb
7424///  ::= .thumb
7425bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7426  if (getLexer().isNot(AsmToken::EndOfStatement))
7427    return Error(L, "unexpected token in directive");
7428  Parser.Lex();
7429
7430  if (!isThumb())
7431    SwitchMode();
7432  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7433  return false;
7434}
7435
7436/// parseDirectiveARM
7437///  ::= .arm
7438bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7439  if (getLexer().isNot(AsmToken::EndOfStatement))
7440    return Error(L, "unexpected token in directive");
7441  Parser.Lex();
7442
7443  if (isThumb())
7444    SwitchMode();
7445  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7446  return false;
7447}
7448
7449/// parseDirectiveThumbFunc
7450///  ::= .thumbfunc symbol_name
7451bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7452  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7453  bool isMachO = MAI.hasSubsectionsViaSymbols();
7454  StringRef Name;
7455  bool needFuncName = true;
7456
7457  // Darwin asm has (optionally) function name after .thumb_func direction
7458  // ELF doesn't
7459  if (isMachO) {
7460    const AsmToken &Tok = Parser.getTok();
7461    if (Tok.isNot(AsmToken::EndOfStatement)) {
7462      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7463        return Error(L, "unexpected token in .thumb_func directive");
7464      Name = Tok.getIdentifier();
7465      Parser.Lex(); // Consume the identifier token.
7466      needFuncName = false;
7467    }
7468  }
7469
7470  if (getLexer().isNot(AsmToken::EndOfStatement))
7471    return Error(L, "unexpected token in directive");
7472
7473  // Eat the end of statement and any blank lines that follow.
7474  while (getLexer().is(AsmToken::EndOfStatement))
7475    Parser.Lex();
7476
7477  // FIXME: assuming function name will be the line following .thumb_func
7478  // We really should be checking the next symbol definition even if there's
7479  // stuff in between.
7480  if (needFuncName) {
7481    Name = Parser.getTok().getIdentifier();
7482  }
7483
7484  // Mark symbol as a thumb symbol.
7485  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7486  getParser().getStreamer().EmitThumbFunc(Func);
7487  return false;
7488}
7489
7490/// parseDirectiveSyntax
7491///  ::= .syntax unified | divided
7492bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7493  const AsmToken &Tok = Parser.getTok();
7494  if (Tok.isNot(AsmToken::Identifier))
7495    return Error(L, "unexpected token in .syntax directive");
7496  StringRef Mode = Tok.getString();
7497  if (Mode == "unified" || Mode == "UNIFIED")
7498    Parser.Lex();
7499  else if (Mode == "divided" || Mode == "DIVIDED")
7500    return Error(L, "'.syntax divided' arm asssembly not supported");
7501  else
7502    return Error(L, "unrecognized syntax mode in .syntax directive");
7503
7504  if (getLexer().isNot(AsmToken::EndOfStatement))
7505    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7506  Parser.Lex();
7507
7508  // TODO tell the MC streamer the mode
7509  // getParser().getStreamer().Emit???();
7510  return false;
7511}
7512
7513/// parseDirectiveCode
7514///  ::= .code 16 | 32
7515bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7516  const AsmToken &Tok = Parser.getTok();
7517  if (Tok.isNot(AsmToken::Integer))
7518    return Error(L, "unexpected token in .code directive");
7519  int64_t Val = Parser.getTok().getIntVal();
7520  if (Val == 16)
7521    Parser.Lex();
7522  else if (Val == 32)
7523    Parser.Lex();
7524  else
7525    return Error(L, "invalid operand to .code directive");
7526
7527  if (getLexer().isNot(AsmToken::EndOfStatement))
7528    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7529  Parser.Lex();
7530
7531  if (Val == 16) {
7532    if (!isThumb())
7533      SwitchMode();
7534    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7535  } else {
7536    if (isThumb())
7537      SwitchMode();
7538    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7539  }
7540
7541  return false;
7542}
7543
7544/// parseDirectiveReq
7545///  ::= name .req registername
7546bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7547  Parser.Lex(); // Eat the '.req' token.
7548  unsigned Reg;
7549  SMLoc SRegLoc, ERegLoc;
7550  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7551    Parser.EatToEndOfStatement();
7552    return Error(SRegLoc, "register name expected");
7553  }
7554
7555  // Shouldn't be anything else.
7556  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7557    Parser.EatToEndOfStatement();
7558    return Error(Parser.getTok().getLoc(),
7559                 "unexpected input in .req directive.");
7560  }
7561
7562  Parser.Lex(); // Consume the EndOfStatement
7563
7564  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7565    return Error(SRegLoc, "redefinition of '" + Name +
7566                          "' does not match original.");
7567
7568  return false;
7569}
7570
7571/// parseDirectiveUneq
7572///  ::= .unreq registername
7573bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7574  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7575    Parser.EatToEndOfStatement();
7576    return Error(L, "unexpected input in .unreq directive.");
7577  }
7578  RegisterReqs.erase(Parser.getTok().getIdentifier());
7579  Parser.Lex(); // Eat the identifier.
7580  return false;
7581}
7582
7583/// parseDirectiveArch
7584///  ::= .arch token
7585bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7586  return true;
7587}
7588
7589/// parseDirectiveEabiAttr
7590///  ::= .eabi_attribute int, int
7591bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7592  return true;
7593}
7594
7595extern "C" void LLVMInitializeARMAsmLexer();
7596
7597/// Force static initialization.
7598extern "C" void LLVMInitializeARMAsmParser() {
7599  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7600  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7601  LLVMInitializeARMAsmLexer();
7602}
7603
7604#define GET_REGISTER_MATCHER
7605#define GET_SUBTARGET_FEATURE_NAME
7606#define GET_MATCHER_IMPLEMENTATION
7607#include "ARMGenAsmMatcher.inc"
7608