ARMAsmParser.cpp revision 74423e32ce7f426b624bfb0c31481bcf6a36394d
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104  bool parseDirectiveArch(SMLoc L);
105  bool parseDirectiveEabiAttr(SMLoc L);
106
107  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
108                          bool &CarrySetting, unsigned &ProcessorIMod,
109                          StringRef &ITMask);
110  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
111                             bool &CanAcceptPredicationCode);
112
113  bool isThumb() const {
114    // FIXME: Can tablegen auto-generate this?
115    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
116  }
117  bool isThumbOne() const {
118    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
119  }
120  bool isThumbTwo() const {
121    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
122  }
123  bool hasV6Ops() const {
124    return STI.getFeatureBits() & ARM::HasV6Ops;
125  }
126  bool hasV7Ops() const {
127    return STI.getFeatureBits() & ARM::HasV7Ops;
128  }
129  void SwitchMode() {
130    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
131    setAvailableFeatures(FB);
132  }
133  bool isMClass() const {
134    return STI.getFeatureBits() & ARM::FeatureMClass;
135  }
136
137  /// @name Auto-generated Match Functions
138  /// {
139
140#define GET_ASSEMBLER_HEADER
141#include "ARMGenAsmMatcher.inc"
142
143  /// }
144
145  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocNumOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocRegOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseCoprocOptionOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseMemBarrierOptOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseProcIFlagsOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parseMSRMaskOperand(
157    SmallVectorImpl<MCParsedAsmOperand*>&);
158  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
159                                   StringRef Op, int Low, int High);
160  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
161    return parsePKHImm(O, "lsl", 0, 31);
162  }
163  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
164    return parsePKHImm(O, "asr", 1, 32);
165  }
166  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
175
176  // Asm Match Converter Methods
177  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
180                    const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
194                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
202                             const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
206                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
208                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
210                        const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
212                     const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
214                        const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
216                     const SmallVectorImpl<MCParsedAsmOperand*> &);
217  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
218                        const SmallVectorImpl<MCParsedAsmOperand*> &);
219
220  bool validateInstruction(MCInst &Inst,
221                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool processInstruction(MCInst &Inst,
223                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
224  bool shouldOmitCCOutOperand(StringRef Mnemonic,
225                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
226
227public:
228  enum ARMMatchResultTy {
229    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
230    Match_RequiresNotITBlock,
231    Match_RequiresV6,
232    Match_RequiresThumb2
233  };
234
235  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
236    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
237    MCAsmParserExtension::Initialize(_Parser);
238
239    // Initialize the set of available features.
240    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
241
242    // Not in an ITBlock to start with.
243    ITState.CurPosition = ~0U;
244  }
245
246  // Implementation of the MCTargetAsmParser interface:
247  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
248  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
249                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
250  bool ParseDirective(AsmToken DirectiveID);
251
252  unsigned checkTargetMatchPredicate(MCInst &Inst);
253
254  bool MatchAndEmitInstruction(SMLoc IDLoc,
255                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
256                               MCStreamer &Out);
257};
258} // end anonymous namespace
259
260namespace {
261
262/// ARMOperand - Instances of this class represent a parsed ARM machine
263/// instruction.
264class ARMOperand : public MCParsedAsmOperand {
265  enum KindTy {
266    k_CondCode,
267    k_CCOut,
268    k_ITCondMask,
269    k_CoprocNum,
270    k_CoprocReg,
271    k_CoprocOption,
272    k_Immediate,
273    k_MemBarrierOpt,
274    k_Memory,
275    k_PostIndexRegister,
276    k_MSRMask,
277    k_ProcIFlags,
278    k_VectorIndex,
279    k_Register,
280    k_RegisterList,
281    k_DPRRegisterList,
282    k_SPRRegisterList,
283    k_VectorList,
284    k_VectorListAllLanes,
285    k_VectorListIndexed,
286    k_ShiftedRegister,
287    k_ShiftedImmediate,
288    k_ShifterImmediate,
289    k_RotateImmediate,
290    k_BitfieldDescriptor,
291    k_Token
292  } Kind;
293
294  SMLoc StartLoc, EndLoc;
295  SmallVector<unsigned, 8> Registers;
296
297  union {
298    struct {
299      ARMCC::CondCodes Val;
300    } CC;
301
302    struct {
303      unsigned Val;
304    } Cop;
305
306    struct {
307      unsigned Val;
308    } CoprocOption;
309
310    struct {
311      unsigned Mask:4;
312    } ITMask;
313
314    struct {
315      ARM_MB::MemBOpt Val;
316    } MBOpt;
317
318    struct {
319      ARM_PROC::IFlags Val;
320    } IFlags;
321
322    struct {
323      unsigned Val;
324    } MMask;
325
326    struct {
327      const char *Data;
328      unsigned Length;
329    } Tok;
330
331    struct {
332      unsigned RegNum;
333    } Reg;
334
335    // A vector register list is a sequential list of 1 to 4 registers.
336    struct {
337      unsigned RegNum;
338      unsigned Count;
339      unsigned LaneIndex;
340      bool isDoubleSpaced;
341    } VectorList;
342
343    struct {
344      unsigned Val;
345    } VectorIndex;
346
347    struct {
348      const MCExpr *Val;
349    } Imm;
350
351    /// Combined record for all forms of ARM address expressions.
352    struct {
353      unsigned BaseRegNum;
354      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
355      // was specified.
356      const MCConstantExpr *OffsetImm;  // Offset immediate value
357      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
358      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
359      unsigned ShiftImm;        // shift for OffsetReg.
360      unsigned Alignment;       // 0 = no alignment specified
361                                // n = alignment in bytes (2, 4, 8, 16, or 32)
362      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
363    } Memory;
364
365    struct {
366      unsigned RegNum;
367      bool isAdd;
368      ARM_AM::ShiftOpc ShiftTy;
369      unsigned ShiftImm;
370    } PostIdxReg;
371
372    struct {
373      bool isASR;
374      unsigned Imm;
375    } ShifterImm;
376    struct {
377      ARM_AM::ShiftOpc ShiftTy;
378      unsigned SrcReg;
379      unsigned ShiftReg;
380      unsigned ShiftImm;
381    } RegShiftedReg;
382    struct {
383      ARM_AM::ShiftOpc ShiftTy;
384      unsigned SrcReg;
385      unsigned ShiftImm;
386    } RegShiftedImm;
387    struct {
388      unsigned Imm;
389    } RotImm;
390    struct {
391      unsigned LSB;
392      unsigned Width;
393    } Bitfield;
394  };
395
396  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
397public:
398  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
399    Kind = o.Kind;
400    StartLoc = o.StartLoc;
401    EndLoc = o.EndLoc;
402    switch (Kind) {
403    case k_CondCode:
404      CC = o.CC;
405      break;
406    case k_ITCondMask:
407      ITMask = o.ITMask;
408      break;
409    case k_Token:
410      Tok = o.Tok;
411      break;
412    case k_CCOut:
413    case k_Register:
414      Reg = o.Reg;
415      break;
416    case k_RegisterList:
417    case k_DPRRegisterList:
418    case k_SPRRegisterList:
419      Registers = o.Registers;
420      break;
421    case k_VectorList:
422    case k_VectorListAllLanes:
423    case k_VectorListIndexed:
424      VectorList = o.VectorList;
425      break;
426    case k_CoprocNum:
427    case k_CoprocReg:
428      Cop = o.Cop;
429      break;
430    case k_CoprocOption:
431      CoprocOption = o.CoprocOption;
432      break;
433    case k_Immediate:
434      Imm = o.Imm;
435      break;
436    case k_MemBarrierOpt:
437      MBOpt = o.MBOpt;
438      break;
439    case k_Memory:
440      Memory = o.Memory;
441      break;
442    case k_PostIndexRegister:
443      PostIdxReg = o.PostIdxReg;
444      break;
445    case k_MSRMask:
446      MMask = o.MMask;
447      break;
448    case k_ProcIFlags:
449      IFlags = o.IFlags;
450      break;
451    case k_ShifterImmediate:
452      ShifterImm = o.ShifterImm;
453      break;
454    case k_ShiftedRegister:
455      RegShiftedReg = o.RegShiftedReg;
456      break;
457    case k_ShiftedImmediate:
458      RegShiftedImm = o.RegShiftedImm;
459      break;
460    case k_RotateImmediate:
461      RotImm = o.RotImm;
462      break;
463    case k_BitfieldDescriptor:
464      Bitfield = o.Bitfield;
465      break;
466    case k_VectorIndex:
467      VectorIndex = o.VectorIndex;
468      break;
469    }
470  }
471
472  /// getStartLoc - Get the location of the first token of this operand.
473  SMLoc getStartLoc() const { return StartLoc; }
474  /// getEndLoc - Get the location of the last token of this operand.
475  SMLoc getEndLoc() const { return EndLoc; }
476
477  ARMCC::CondCodes getCondCode() const {
478    assert(Kind == k_CondCode && "Invalid access!");
479    return CC.Val;
480  }
481
482  unsigned getCoproc() const {
483    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
484    return Cop.Val;
485  }
486
487  StringRef getToken() const {
488    assert(Kind == k_Token && "Invalid access!");
489    return StringRef(Tok.Data, Tok.Length);
490  }
491
492  unsigned getReg() const {
493    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
494    return Reg.RegNum;
495  }
496
497  const SmallVectorImpl<unsigned> &getRegList() const {
498    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
499            Kind == k_SPRRegisterList) && "Invalid access!");
500    return Registers;
501  }
502
503  const MCExpr *getImm() const {
504    assert(isImm() && "Invalid access!");
505    return Imm.Val;
506  }
507
508  unsigned getVectorIndex() const {
509    assert(Kind == k_VectorIndex && "Invalid access!");
510    return VectorIndex.Val;
511  }
512
513  ARM_MB::MemBOpt getMemBarrierOpt() const {
514    assert(Kind == k_MemBarrierOpt && "Invalid access!");
515    return MBOpt.Val;
516  }
517
518  ARM_PROC::IFlags getProcIFlags() const {
519    assert(Kind == k_ProcIFlags && "Invalid access!");
520    return IFlags.Val;
521  }
522
523  unsigned getMSRMask() const {
524    assert(Kind == k_MSRMask && "Invalid access!");
525    return MMask.Val;
526  }
527
528  bool isCoprocNum() const { return Kind == k_CoprocNum; }
529  bool isCoprocReg() const { return Kind == k_CoprocReg; }
530  bool isCoprocOption() const { return Kind == k_CoprocOption; }
531  bool isCondCode() const { return Kind == k_CondCode; }
532  bool isCCOut() const { return Kind == k_CCOut; }
533  bool isITMask() const { return Kind == k_ITCondMask; }
534  bool isITCondCode() const { return Kind == k_CondCode; }
535  bool isImm() const { return Kind == k_Immediate; }
536  bool isFPImm() const {
537    if (!isImm()) return false;
538    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
539    if (!CE) return false;
540    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
541    return Val != -1;
542  }
543  bool isFBits16() const {
544    if (!isImm()) return false;
545    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
546    if (!CE) return false;
547    int64_t Value = CE->getValue();
548    return Value >= 0 && Value <= 16;
549  }
550  bool isFBits32() const {
551    if (!isImm()) return false;
552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
553    if (!CE) return false;
554    int64_t Value = CE->getValue();
555    return Value >= 1 && Value <= 32;
556  }
557  bool isImm8s4() const {
558    if (!isImm()) return false;
559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
560    if (!CE) return false;
561    int64_t Value = CE->getValue();
562    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
563  }
564  bool isImm0_1020s4() const {
565    if (!isImm()) return false;
566    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
567    if (!CE) return false;
568    int64_t Value = CE->getValue();
569    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
570  }
571  bool isImm0_508s4() const {
572    if (!isImm()) return false;
573    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
574    if (!CE) return false;
575    int64_t Value = CE->getValue();
576    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
577  }
578  bool isImm0_255() const {
579    if (!isImm()) return false;
580    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
581    if (!CE) return false;
582    int64_t Value = CE->getValue();
583    return Value >= 0 && Value < 256;
584  }
585  bool isImm0_1() const {
586    if (!isImm()) return false;
587    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
588    if (!CE) return false;
589    int64_t Value = CE->getValue();
590    return Value >= 0 && Value < 2;
591  }
592  bool isImm0_3() const {
593    if (!isImm()) return false;
594    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
595    if (!CE) return false;
596    int64_t Value = CE->getValue();
597    return Value >= 0 && Value < 4;
598  }
599  bool isImm0_7() const {
600    if (!isImm()) return false;
601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602    if (!CE) return false;
603    int64_t Value = CE->getValue();
604    return Value >= 0 && Value < 8;
605  }
606  bool isImm0_15() const {
607    if (!isImm()) return false;
608    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
609    if (!CE) return false;
610    int64_t Value = CE->getValue();
611    return Value >= 0 && Value < 16;
612  }
613  bool isImm0_31() const {
614    if (!isImm()) return false;
615    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
616    if (!CE) return false;
617    int64_t Value = CE->getValue();
618    return Value >= 0 && Value < 32;
619  }
620  bool isImm0_63() const {
621    if (!isImm()) return false;
622    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
623    if (!CE) return false;
624    int64_t Value = CE->getValue();
625    return Value >= 0 && Value < 64;
626  }
627  bool isImm8() const {
628    if (!isImm()) return false;
629    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
630    if (!CE) return false;
631    int64_t Value = CE->getValue();
632    return Value == 8;
633  }
634  bool isImm16() const {
635    if (!isImm()) return false;
636    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
637    if (!CE) return false;
638    int64_t Value = CE->getValue();
639    return Value == 16;
640  }
641  bool isImm32() const {
642    if (!isImm()) return false;
643    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
644    if (!CE) return false;
645    int64_t Value = CE->getValue();
646    return Value == 32;
647  }
648  bool isShrImm8() const {
649    if (!isImm()) return false;
650    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
651    if (!CE) return false;
652    int64_t Value = CE->getValue();
653    return Value > 0 && Value <= 8;
654  }
655  bool isShrImm16() const {
656    if (!isImm()) return false;
657    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
658    if (!CE) return false;
659    int64_t Value = CE->getValue();
660    return Value > 0 && Value <= 16;
661  }
662  bool isShrImm32() const {
663    if (!isImm()) return false;
664    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
665    if (!CE) return false;
666    int64_t Value = CE->getValue();
667    return Value > 0 && Value <= 32;
668  }
669  bool isShrImm64() const {
670    if (!isImm()) return false;
671    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672    if (!CE) return false;
673    int64_t Value = CE->getValue();
674    return Value > 0 && Value <= 64;
675  }
676  bool isImm1_7() const {
677    if (!isImm()) return false;
678    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
679    if (!CE) return false;
680    int64_t Value = CE->getValue();
681    return Value > 0 && Value < 8;
682  }
683  bool isImm1_15() const {
684    if (!isImm()) return false;
685    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
686    if (!CE) return false;
687    int64_t Value = CE->getValue();
688    return Value > 0 && Value < 16;
689  }
690  bool isImm1_31() const {
691    if (!isImm()) return false;
692    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
693    if (!CE) return false;
694    int64_t Value = CE->getValue();
695    return Value > 0 && Value < 32;
696  }
697  bool isImm1_16() const {
698    if (!isImm()) return false;
699    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
700    if (!CE) return false;
701    int64_t Value = CE->getValue();
702    return Value > 0 && Value < 17;
703  }
704  bool isImm1_32() const {
705    if (!isImm()) return false;
706    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
707    if (!CE) return false;
708    int64_t Value = CE->getValue();
709    return Value > 0 && Value < 33;
710  }
711  bool isImm0_32() const {
712    if (!isImm()) return false;
713    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
714    if (!CE) return false;
715    int64_t Value = CE->getValue();
716    return Value >= 0 && Value < 33;
717  }
718  bool isImm0_65535() const {
719    if (!isImm()) return false;
720    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
721    if (!CE) return false;
722    int64_t Value = CE->getValue();
723    return Value >= 0 && Value < 65536;
724  }
725  bool isImm0_65535Expr() const {
726    if (!isImm()) return false;
727    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
728    // If it's not a constant expression, it'll generate a fixup and be
729    // handled later.
730    if (!CE) return true;
731    int64_t Value = CE->getValue();
732    return Value >= 0 && Value < 65536;
733  }
734  bool isImm24bit() const {
735    if (!isImm()) return false;
736    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
737    if (!CE) return false;
738    int64_t Value = CE->getValue();
739    return Value >= 0 && Value <= 0xffffff;
740  }
741  bool isImmThumbSR() const {
742    if (!isImm()) return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    if (!CE) return false;
745    int64_t Value = CE->getValue();
746    return Value > 0 && Value < 33;
747  }
748  bool isPKHLSLImm() const {
749    if (!isImm()) return false;
750    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
751    if (!CE) return false;
752    int64_t Value = CE->getValue();
753    return Value >= 0 && Value < 32;
754  }
755  bool isPKHASRImm() const {
756    if (!isImm()) return false;
757    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
758    if (!CE) return false;
759    int64_t Value = CE->getValue();
760    return Value > 0 && Value <= 32;
761  }
762  bool isARMSOImm() const {
763    if (!isImm()) return false;
764    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
765    if (!CE) return false;
766    int64_t Value = CE->getValue();
767    return ARM_AM::getSOImmVal(Value) != -1;
768  }
769  bool isARMSOImmNot() const {
770    if (!isImm()) return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return ARM_AM::getSOImmVal(~Value) != -1;
775  }
776  bool isARMSOImmNeg() const {
777    if (!isImm()) return false;
778    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
779    if (!CE) return false;
780    int64_t Value = CE->getValue();
781    return ARM_AM::getSOImmVal(-Value) != -1;
782  }
783  bool isT2SOImm() const {
784    if (!isImm()) return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return ARM_AM::getT2SOImmVal(Value) != -1;
789  }
790  bool isT2SOImmNot() const {
791    if (!isImm()) return false;
792    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793    if (!CE) return false;
794    int64_t Value = CE->getValue();
795    return ARM_AM::getT2SOImmVal(~Value) != -1;
796  }
797  bool isT2SOImmNeg() const {
798    if (!isImm()) return false;
799    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800    if (!CE) return false;
801    int64_t Value = CE->getValue();
802    return ARM_AM::getT2SOImmVal(-Value) != -1;
803  }
804  bool isSetEndImm() const {
805    if (!isImm()) return false;
806    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
807    if (!CE) return false;
808    int64_t Value = CE->getValue();
809    return Value == 1 || Value == 0;
810  }
811  bool isReg() const { return Kind == k_Register; }
812  bool isRegList() const { return Kind == k_RegisterList; }
813  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
814  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
815  bool isToken() const { return Kind == k_Token; }
816  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
817  bool isMemory() const { return Kind == k_Memory; }
818  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
819  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
820  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
821  bool isRotImm() const { return Kind == k_RotateImmediate; }
822  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
823  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
824  bool isPostIdxReg() const {
825    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
826  }
827  bool isMemNoOffset(bool alignOK = false) const {
828    if (!isMemory())
829      return false;
830    // No offset of any kind.
831    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
832     (alignOK || Memory.Alignment == 0);
833  }
834  bool isMemPCRelImm12() const {
835    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
836      return false;
837    // Base register must be PC.
838    if (Memory.BaseRegNum != ARM::PC)
839      return false;
840    // Immediate offset in range [-4095, 4095].
841    if (!Memory.OffsetImm) return true;
842    int64_t Val = Memory.OffsetImm->getValue();
843    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
844  }
845  bool isAlignedMemory() const {
846    return isMemNoOffset(true);
847  }
848  bool isAddrMode2() const {
849    if (!isMemory() || Memory.Alignment != 0) return false;
850    // Check for register offset.
851    if (Memory.OffsetRegNum) return true;
852    // Immediate offset in range [-4095, 4095].
853    if (!Memory.OffsetImm) return true;
854    int64_t Val = Memory.OffsetImm->getValue();
855    return Val > -4096 && Val < 4096;
856  }
857  bool isAM2OffsetImm() const {
858    if (!isImm()) return false;
859    // Immediate offset in range [-4095, 4095].
860    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
861    if (!CE) return false;
862    int64_t Val = CE->getValue();
863    return Val > -4096 && Val < 4096;
864  }
865  bool isAddrMode3() const {
866    // If we have an immediate that's not a constant, treat it as a label
867    // reference needing a fixup. If it is a constant, it's something else
868    // and we reject it.
869    if (isImm() && !isa<MCConstantExpr>(getImm()))
870      return true;
871    if (!isMemory() || Memory.Alignment != 0) return false;
872    // No shifts are legal for AM3.
873    if (Memory.ShiftType != ARM_AM::no_shift) return false;
874    // Check for register offset.
875    if (Memory.OffsetRegNum) return true;
876    // Immediate offset in range [-255, 255].
877    if (!Memory.OffsetImm) return true;
878    int64_t Val = Memory.OffsetImm->getValue();
879    return Val > -256 && Val < 256;
880  }
881  bool isAM3Offset() const {
882    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
883      return false;
884    if (Kind == k_PostIndexRegister)
885      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
886    // Immediate offset in range [-255, 255].
887    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
888    if (!CE) return false;
889    int64_t Val = CE->getValue();
890    // Special case, #-0 is INT32_MIN.
891    return (Val > -256 && Val < 256) || Val == INT32_MIN;
892  }
893  bool isAddrMode5() const {
894    // If we have an immediate that's not a constant, treat it as a label
895    // reference needing a fixup. If it is a constant, it's something else
896    // and we reject it.
897    if (isImm() && !isa<MCConstantExpr>(getImm()))
898      return true;
899    if (!isMemory() || Memory.Alignment != 0) return false;
900    // Check for register offset.
901    if (Memory.OffsetRegNum) return false;
902    // Immediate offset in range [-1020, 1020] and a multiple of 4.
903    if (!Memory.OffsetImm) return true;
904    int64_t Val = Memory.OffsetImm->getValue();
905    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
906      Val == INT32_MIN;
907  }
908  bool isMemTBB() const {
909    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
910        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
911      return false;
912    return true;
913  }
914  bool isMemTBH() const {
915    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
916        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
917        Memory.Alignment != 0 )
918      return false;
919    return true;
920  }
921  bool isMemRegOffset() const {
922    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
923      return false;
924    return true;
925  }
926  bool isT2MemRegOffset() const {
927    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
928        Memory.Alignment != 0)
929      return false;
930    // Only lsl #{0, 1, 2, 3} allowed.
931    if (Memory.ShiftType == ARM_AM::no_shift)
932      return true;
933    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
934      return false;
935    return true;
936  }
937  bool isMemThumbRR() const {
938    // Thumb reg+reg addressing is simple. Just two registers, a base and
939    // an offset. No shifts, negations or any other complicating factors.
940    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
941        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
942      return false;
943    return isARMLowRegister(Memory.BaseRegNum) &&
944      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
945  }
946  bool isMemThumbRIs4() const {
947    if (!isMemory() || Memory.OffsetRegNum != 0 ||
948        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
949      return false;
950    // Immediate offset, multiple of 4 in range [0, 124].
951    if (!Memory.OffsetImm) return true;
952    int64_t Val = Memory.OffsetImm->getValue();
953    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
954  }
955  bool isMemThumbRIs2() const {
956    if (!isMemory() || Memory.OffsetRegNum != 0 ||
957        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
958      return false;
959    // Immediate offset, multiple of 4 in range [0, 62].
960    if (!Memory.OffsetImm) return true;
961    int64_t Val = Memory.OffsetImm->getValue();
962    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
963  }
964  bool isMemThumbRIs1() const {
965    if (!isMemory() || Memory.OffsetRegNum != 0 ||
966        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
967      return false;
968    // Immediate offset in range [0, 31].
969    if (!Memory.OffsetImm) return true;
970    int64_t Val = Memory.OffsetImm->getValue();
971    return Val >= 0 && Val <= 31;
972  }
973  bool isMemThumbSPI() const {
974    if (!isMemory() || Memory.OffsetRegNum != 0 ||
975        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
976      return false;
977    // Immediate offset, multiple of 4 in range [0, 1020].
978    if (!Memory.OffsetImm) return true;
979    int64_t Val = Memory.OffsetImm->getValue();
980    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
981  }
982  bool isMemImm8s4Offset() const {
983    // If we have an immediate that's not a constant, treat it as a label
984    // reference needing a fixup. If it is a constant, it's something else
985    // and we reject it.
986    if (isImm() && !isa<MCConstantExpr>(getImm()))
987      return true;
988    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
989      return false;
990    // Immediate offset a multiple of 4 in range [-1020, 1020].
991    if (!Memory.OffsetImm) return true;
992    int64_t Val = Memory.OffsetImm->getValue();
993    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
994  }
995  bool isMemImm0_1020s4Offset() const {
996    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
997      return false;
998    // Immediate offset a multiple of 4 in range [0, 1020].
999    if (!Memory.OffsetImm) return true;
1000    int64_t Val = Memory.OffsetImm->getValue();
1001    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1002  }
1003  bool isMemImm8Offset() const {
1004    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1005      return false;
1006    // Base reg of PC isn't allowed for these encodings.
1007    if (Memory.BaseRegNum == ARM::PC) return false;
1008    // Immediate offset in range [-255, 255].
1009    if (!Memory.OffsetImm) return true;
1010    int64_t Val = Memory.OffsetImm->getValue();
1011    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1012  }
1013  bool isMemPosImm8Offset() const {
1014    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1015      return false;
1016    // Immediate offset in range [0, 255].
1017    if (!Memory.OffsetImm) return true;
1018    int64_t Val = Memory.OffsetImm->getValue();
1019    return Val >= 0 && Val < 256;
1020  }
1021  bool isMemNegImm8Offset() const {
1022    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1023      return false;
1024    // Base reg of PC isn't allowed for these encodings.
1025    if (Memory.BaseRegNum == ARM::PC) return false;
1026    // Immediate offset in range [-255, -1].
1027    if (!Memory.OffsetImm) return false;
1028    int64_t Val = Memory.OffsetImm->getValue();
1029    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1030  }
1031  bool isMemUImm12Offset() const {
1032    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1033      return false;
1034    // Immediate offset in range [0, 4095].
1035    if (!Memory.OffsetImm) return true;
1036    int64_t Val = Memory.OffsetImm->getValue();
1037    return (Val >= 0 && Val < 4096);
1038  }
1039  bool isMemImm12Offset() const {
1040    // If we have an immediate that's not a constant, treat it as a label
1041    // reference needing a fixup. If it is a constant, it's something else
1042    // and we reject it.
1043    if (isImm() && !isa<MCConstantExpr>(getImm()))
1044      return true;
1045
1046    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1047      return false;
1048    // Immediate offset in range [-4095, 4095].
1049    if (!Memory.OffsetImm) return true;
1050    int64_t Val = Memory.OffsetImm->getValue();
1051    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1052  }
1053  bool isPostIdxImm8() const {
1054    if (!isImm()) return false;
1055    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1056    if (!CE) return false;
1057    int64_t Val = CE->getValue();
1058    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1059  }
1060  bool isPostIdxImm8s4() const {
1061    if (!isImm()) return false;
1062    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1063    if (!CE) return false;
1064    int64_t Val = CE->getValue();
1065    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1066      (Val == INT32_MIN);
1067  }
1068
1069  bool isMSRMask() const { return Kind == k_MSRMask; }
1070  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1071
1072  // NEON operands.
1073  bool isSingleSpacedVectorList() const {
1074    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1075  }
1076  bool isDoubleSpacedVectorList() const {
1077    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1078  }
1079  bool isVecListOneD() const {
1080    if (!isSingleSpacedVectorList()) return false;
1081    return VectorList.Count == 1;
1082  }
1083
1084  bool isVecListTwoD() const {
1085    if (!isSingleSpacedVectorList()) return false;
1086    return VectorList.Count == 2;
1087  }
1088
1089  bool isVecListThreeD() const {
1090    if (!isSingleSpacedVectorList()) return false;
1091    return VectorList.Count == 3;
1092  }
1093
1094  bool isVecListFourD() const {
1095    if (!isSingleSpacedVectorList()) return false;
1096    return VectorList.Count == 4;
1097  }
1098
1099  bool isVecListTwoQ() const {
1100    if (!isDoubleSpacedVectorList()) return false;
1101    return VectorList.Count == 2;
1102  }
1103
1104  bool isVecListThreeQ() const {
1105    if (!isDoubleSpacedVectorList()) return false;
1106    return VectorList.Count == 3;
1107  }
1108
1109  bool isVecListFourQ() const {
1110    if (!isDoubleSpacedVectorList()) return false;
1111    return VectorList.Count == 4;
1112  }
1113
1114  bool isSingleSpacedVectorAllLanes() const {
1115    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1116  }
1117  bool isDoubleSpacedVectorAllLanes() const {
1118    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1119  }
1120  bool isVecListOneDAllLanes() const {
1121    if (!isSingleSpacedVectorAllLanes()) return false;
1122    return VectorList.Count == 1;
1123  }
1124
1125  bool isVecListTwoDAllLanes() const {
1126    if (!isSingleSpacedVectorAllLanes()) return false;
1127    return VectorList.Count == 2;
1128  }
1129
1130  bool isVecListTwoQAllLanes() const {
1131    if (!isDoubleSpacedVectorAllLanes()) return false;
1132    return VectorList.Count == 2;
1133  }
1134
1135  bool isVecListThreeDAllLanes() const {
1136    if (!isSingleSpacedVectorAllLanes()) return false;
1137    return VectorList.Count == 3;
1138  }
1139
1140  bool isVecListThreeQAllLanes() const {
1141    if (!isDoubleSpacedVectorAllLanes()) return false;
1142    return VectorList.Count == 3;
1143  }
1144
1145  bool isVecListFourDAllLanes() const {
1146    if (!isSingleSpacedVectorAllLanes()) return false;
1147    return VectorList.Count == 4;
1148  }
1149
1150  bool isVecListFourQAllLanes() const {
1151    if (!isDoubleSpacedVectorAllLanes()) return false;
1152    return VectorList.Count == 4;
1153  }
1154
1155  bool isSingleSpacedVectorIndexed() const {
1156    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1157  }
1158  bool isDoubleSpacedVectorIndexed() const {
1159    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1160  }
1161  bool isVecListOneDByteIndexed() const {
1162    if (!isSingleSpacedVectorIndexed()) return false;
1163    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1164  }
1165
1166  bool isVecListOneDHWordIndexed() const {
1167    if (!isSingleSpacedVectorIndexed()) return false;
1168    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1169  }
1170
1171  bool isVecListOneDWordIndexed() const {
1172    if (!isSingleSpacedVectorIndexed()) return false;
1173    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1174  }
1175
1176  bool isVecListTwoDByteIndexed() const {
1177    if (!isSingleSpacedVectorIndexed()) return false;
1178    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1179  }
1180
1181  bool isVecListTwoDHWordIndexed() const {
1182    if (!isSingleSpacedVectorIndexed()) return false;
1183    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1184  }
1185
1186  bool isVecListTwoQWordIndexed() const {
1187    if (!isDoubleSpacedVectorIndexed()) return false;
1188    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1189  }
1190
1191  bool isVecListTwoQHWordIndexed() const {
1192    if (!isDoubleSpacedVectorIndexed()) return false;
1193    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1194  }
1195
1196  bool isVecListTwoDWordIndexed() const {
1197    if (!isSingleSpacedVectorIndexed()) return false;
1198    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1199  }
1200
1201  bool isVecListThreeDByteIndexed() const {
1202    if (!isSingleSpacedVectorIndexed()) return false;
1203    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1204  }
1205
1206  bool isVecListThreeDHWordIndexed() const {
1207    if (!isSingleSpacedVectorIndexed()) return false;
1208    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1209  }
1210
1211  bool isVecListThreeQWordIndexed() const {
1212    if (!isDoubleSpacedVectorIndexed()) return false;
1213    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1214  }
1215
1216  bool isVecListThreeQHWordIndexed() const {
1217    if (!isDoubleSpacedVectorIndexed()) return false;
1218    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1219  }
1220
1221  bool isVecListThreeDWordIndexed() const {
1222    if (!isSingleSpacedVectorIndexed()) return false;
1223    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1224  }
1225
1226  bool isVecListFourDByteIndexed() const {
1227    if (!isSingleSpacedVectorIndexed()) return false;
1228    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1229  }
1230
1231  bool isVecListFourDHWordIndexed() const {
1232    if (!isSingleSpacedVectorIndexed()) return false;
1233    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1234  }
1235
1236  bool isVecListFourQWordIndexed() const {
1237    if (!isDoubleSpacedVectorIndexed()) return false;
1238    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1239  }
1240
1241  bool isVecListFourQHWordIndexed() const {
1242    if (!isDoubleSpacedVectorIndexed()) return false;
1243    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1244  }
1245
1246  bool isVecListFourDWordIndexed() const {
1247    if (!isSingleSpacedVectorIndexed()) return false;
1248    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1249  }
1250
1251  bool isVectorIndex8() const {
1252    if (Kind != k_VectorIndex) return false;
1253    return VectorIndex.Val < 8;
1254  }
1255  bool isVectorIndex16() const {
1256    if (Kind != k_VectorIndex) return false;
1257    return VectorIndex.Val < 4;
1258  }
1259  bool isVectorIndex32() const {
1260    if (Kind != k_VectorIndex) return false;
1261    return VectorIndex.Val < 2;
1262  }
1263
1264  bool isNEONi8splat() const {
1265    if (!isImm()) return false;
1266    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1267    // Must be a constant.
1268    if (!CE) return false;
1269    int64_t Value = CE->getValue();
1270    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1271    // value.
1272    return Value >= 0 && Value < 256;
1273  }
1274
1275  bool isNEONi16splat() const {
1276    if (!isImm()) return false;
1277    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1278    // Must be a constant.
1279    if (!CE) return false;
1280    int64_t Value = CE->getValue();
1281    // i16 value in the range [0,255] or [0x0100, 0xff00]
1282    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1283  }
1284
1285  bool isNEONi32splat() const {
1286    if (!isImm()) return false;
1287    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1288    // Must be a constant.
1289    if (!CE) return false;
1290    int64_t Value = CE->getValue();
1291    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1292    return (Value >= 0 && Value < 256) ||
1293      (Value >= 0x0100 && Value <= 0xff00) ||
1294      (Value >= 0x010000 && Value <= 0xff0000) ||
1295      (Value >= 0x01000000 && Value <= 0xff000000);
1296  }
1297
1298  bool isNEONi32vmov() const {
1299    if (!isImm()) return false;
1300    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1301    // Must be a constant.
1302    if (!CE) return false;
1303    int64_t Value = CE->getValue();
1304    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1305    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1306    return (Value >= 0 && Value < 256) ||
1307      (Value >= 0x0100 && Value <= 0xff00) ||
1308      (Value >= 0x010000 && Value <= 0xff0000) ||
1309      (Value >= 0x01000000 && Value <= 0xff000000) ||
1310      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1311      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1312  }
1313  bool isNEONi32vmovNeg() const {
1314    if (!isImm()) return false;
1315    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1316    // Must be a constant.
1317    if (!CE) return false;
1318    int64_t Value = ~CE->getValue();
1319    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1320    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1321    return (Value >= 0 && Value < 256) ||
1322      (Value >= 0x0100 && Value <= 0xff00) ||
1323      (Value >= 0x010000 && Value <= 0xff0000) ||
1324      (Value >= 0x01000000 && Value <= 0xff000000) ||
1325      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1326      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1327  }
1328
1329  bool isNEONi64splat() const {
1330    if (!isImm()) return false;
1331    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1332    // Must be a constant.
1333    if (!CE) return false;
1334    uint64_t Value = CE->getValue();
1335    // i64 value with each byte being either 0 or 0xff.
1336    for (unsigned i = 0; i < 8; ++i)
1337      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1338    return true;
1339  }
1340
1341  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1342    // Add as immediates when possible.  Null MCExpr = 0.
1343    if (Expr == 0)
1344      Inst.addOperand(MCOperand::CreateImm(0));
1345    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1346      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1347    else
1348      Inst.addOperand(MCOperand::CreateExpr(Expr));
1349  }
1350
1351  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1352    assert(N == 2 && "Invalid number of operands!");
1353    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1354    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1355    Inst.addOperand(MCOperand::CreateReg(RegNum));
1356  }
1357
1358  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1359    assert(N == 1 && "Invalid number of operands!");
1360    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1361  }
1362
1363  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1364    assert(N == 1 && "Invalid number of operands!");
1365    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1366  }
1367
1368  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1369    assert(N == 1 && "Invalid number of operands!");
1370    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1371  }
1372
1373  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1374    assert(N == 1 && "Invalid number of operands!");
1375    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1376  }
1377
1378  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1379    assert(N == 1 && "Invalid number of operands!");
1380    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1381  }
1382
1383  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1384    assert(N == 1 && "Invalid number of operands!");
1385    Inst.addOperand(MCOperand::CreateReg(getReg()));
1386  }
1387
1388  void addRegOperands(MCInst &Inst, unsigned N) const {
1389    assert(N == 1 && "Invalid number of operands!");
1390    Inst.addOperand(MCOperand::CreateReg(getReg()));
1391  }
1392
1393  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1394    assert(N == 3 && "Invalid number of operands!");
1395    assert(isRegShiftedReg() &&
1396           "addRegShiftedRegOperands() on non RegShiftedReg!");
1397    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1398    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1399    Inst.addOperand(MCOperand::CreateImm(
1400      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1401  }
1402
1403  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1404    assert(N == 2 && "Invalid number of operands!");
1405    assert(isRegShiftedImm() &&
1406           "addRegShiftedImmOperands() on non RegShiftedImm!");
1407    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1408    Inst.addOperand(MCOperand::CreateImm(
1409      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1410  }
1411
1412  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1413    assert(N == 1 && "Invalid number of operands!");
1414    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1415                                         ShifterImm.Imm));
1416  }
1417
1418  void addRegListOperands(MCInst &Inst, unsigned N) const {
1419    assert(N == 1 && "Invalid number of operands!");
1420    const SmallVectorImpl<unsigned> &RegList = getRegList();
1421    for (SmallVectorImpl<unsigned>::const_iterator
1422           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1423      Inst.addOperand(MCOperand::CreateReg(*I));
1424  }
1425
1426  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1427    addRegListOperands(Inst, N);
1428  }
1429
1430  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1431    addRegListOperands(Inst, N);
1432  }
1433
1434  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1435    assert(N == 1 && "Invalid number of operands!");
1436    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1437    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1438  }
1439
1440  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1441    assert(N == 1 && "Invalid number of operands!");
1442    // Munge the lsb/width into a bitfield mask.
1443    unsigned lsb = Bitfield.LSB;
1444    unsigned width = Bitfield.Width;
1445    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1446    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1447                      (32 - (lsb + width)));
1448    Inst.addOperand(MCOperand::CreateImm(Mask));
1449  }
1450
1451  void addImmOperands(MCInst &Inst, unsigned N) const {
1452    assert(N == 1 && "Invalid number of operands!");
1453    addExpr(Inst, getImm());
1454  }
1455
1456  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1457    assert(N == 1 && "Invalid number of operands!");
1458    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1459    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1460  }
1461
1462  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1463    assert(N == 1 && "Invalid number of operands!");
1464    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1465    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1466  }
1467
1468  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1469    assert(N == 1 && "Invalid number of operands!");
1470    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1471    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1472    Inst.addOperand(MCOperand::CreateImm(Val));
1473  }
1474
1475  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1476    assert(N == 1 && "Invalid number of operands!");
1477    // FIXME: We really want to scale the value here, but the LDRD/STRD
1478    // instruction don't encode operands that way yet.
1479    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1480    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1481  }
1482
1483  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1484    assert(N == 1 && "Invalid number of operands!");
1485    // The immediate is scaled by four in the encoding and is stored
1486    // in the MCInst as such. Lop off the low two bits here.
1487    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1488    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1489  }
1490
1491  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1492    assert(N == 1 && "Invalid number of operands!");
1493    // The immediate is scaled by four in the encoding and is stored
1494    // in the MCInst as such. Lop off the low two bits here.
1495    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1496    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1497  }
1498
1499  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1500    assert(N == 1 && "Invalid number of operands!");
1501    // The constant encodes as the immediate-1, and we store in the instruction
1502    // the bits as encoded, so subtract off one here.
1503    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1504    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1505  }
1506
1507  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1508    assert(N == 1 && "Invalid number of operands!");
1509    // The constant encodes as the immediate-1, and we store in the instruction
1510    // the bits as encoded, so subtract off one here.
1511    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1512    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1513  }
1514
1515  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1516    assert(N == 1 && "Invalid number of operands!");
1517    // The constant encodes as the immediate, except for 32, which encodes as
1518    // zero.
1519    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1520    unsigned Imm = CE->getValue();
1521    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1522  }
1523
1524  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1525    assert(N == 1 && "Invalid number of operands!");
1526    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1527    // the instruction as well.
1528    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1529    int Val = CE->getValue();
1530    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1531  }
1532
1533  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1534    assert(N == 1 && "Invalid number of operands!");
1535    // The operand is actually a t2_so_imm, but we have its bitwise
1536    // negation in the assembly source, so twiddle it here.
1537    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1538    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1539  }
1540
1541  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1542    assert(N == 1 && "Invalid number of operands!");
1543    // The operand is actually a t2_so_imm, but we have its
1544    // negation in the assembly source, so twiddle it here.
1545    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1546    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1547  }
1548
1549  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1550    assert(N == 1 && "Invalid number of operands!");
1551    // The operand is actually a so_imm, but we have its bitwise
1552    // negation in the assembly source, so twiddle it here.
1553    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1554    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1555  }
1556
1557  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1558    assert(N == 1 && "Invalid number of operands!");
1559    // The operand is actually a so_imm, but we have its
1560    // negation in the assembly source, so twiddle it here.
1561    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1562    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1563  }
1564
1565  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1566    assert(N == 1 && "Invalid number of operands!");
1567    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1568  }
1569
1570  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1571    assert(N == 1 && "Invalid number of operands!");
1572    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1573  }
1574
1575  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1576    assert(N == 1 && "Invalid number of operands!");
1577    int32_t Imm = Memory.OffsetImm->getValue();
1578    // FIXME: Handle #-0
1579    if (Imm == INT32_MIN) Imm = 0;
1580    Inst.addOperand(MCOperand::CreateImm(Imm));
1581  }
1582
1583  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1584    assert(N == 2 && "Invalid number of operands!");
1585    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1586    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1587  }
1588
1589  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1590    assert(N == 3 && "Invalid number of operands!");
1591    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1592    if (!Memory.OffsetRegNum) {
1593      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1594      // Special case for #-0
1595      if (Val == INT32_MIN) Val = 0;
1596      if (Val < 0) Val = -Val;
1597      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1598    } else {
1599      // For register offset, we encode the shift type and negation flag
1600      // here.
1601      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1602                              Memory.ShiftImm, Memory.ShiftType);
1603    }
1604    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1605    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1606    Inst.addOperand(MCOperand::CreateImm(Val));
1607  }
1608
1609  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1610    assert(N == 2 && "Invalid number of operands!");
1611    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1612    assert(CE && "non-constant AM2OffsetImm operand!");
1613    int32_t Val = CE->getValue();
1614    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1615    // Special case for #-0
1616    if (Val == INT32_MIN) Val = 0;
1617    if (Val < 0) Val = -Val;
1618    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1619    Inst.addOperand(MCOperand::CreateReg(0));
1620    Inst.addOperand(MCOperand::CreateImm(Val));
1621  }
1622
1623  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1624    assert(N == 3 && "Invalid number of operands!");
1625    // If we have an immediate that's not a constant, treat it as a label
1626    // reference needing a fixup. If it is a constant, it's something else
1627    // and we reject it.
1628    if (isImm()) {
1629      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1630      Inst.addOperand(MCOperand::CreateReg(0));
1631      Inst.addOperand(MCOperand::CreateImm(0));
1632      return;
1633    }
1634
1635    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1636    if (!Memory.OffsetRegNum) {
1637      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1638      // Special case for #-0
1639      if (Val == INT32_MIN) Val = 0;
1640      if (Val < 0) Val = -Val;
1641      Val = ARM_AM::getAM3Opc(AddSub, Val);
1642    } else {
1643      // For register offset, we encode the shift type and negation flag
1644      // here.
1645      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1646    }
1647    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1648    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1649    Inst.addOperand(MCOperand::CreateImm(Val));
1650  }
1651
1652  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1653    assert(N == 2 && "Invalid number of operands!");
1654    if (Kind == k_PostIndexRegister) {
1655      int32_t Val =
1656        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1657      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1658      Inst.addOperand(MCOperand::CreateImm(Val));
1659      return;
1660    }
1661
1662    // Constant offset.
1663    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1664    int32_t Val = CE->getValue();
1665    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1666    // Special case for #-0
1667    if (Val == INT32_MIN) Val = 0;
1668    if (Val < 0) Val = -Val;
1669    Val = ARM_AM::getAM3Opc(AddSub, Val);
1670    Inst.addOperand(MCOperand::CreateReg(0));
1671    Inst.addOperand(MCOperand::CreateImm(Val));
1672  }
1673
1674  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1675    assert(N == 2 && "Invalid number of operands!");
1676    // If we have an immediate that's not a constant, treat it as a label
1677    // reference needing a fixup. If it is a constant, it's something else
1678    // and we reject it.
1679    if (isImm()) {
1680      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1681      Inst.addOperand(MCOperand::CreateImm(0));
1682      return;
1683    }
1684
1685    // The lower two bits are always zero and as such are not encoded.
1686    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1687    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1688    // Special case for #-0
1689    if (Val == INT32_MIN) Val = 0;
1690    if (Val < 0) Val = -Val;
1691    Val = ARM_AM::getAM5Opc(AddSub, Val);
1692    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1693    Inst.addOperand(MCOperand::CreateImm(Val));
1694  }
1695
1696  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1697    assert(N == 2 && "Invalid number of operands!");
1698    // If we have an immediate that's not a constant, treat it as a label
1699    // reference needing a fixup. If it is a constant, it's something else
1700    // and we reject it.
1701    if (isImm()) {
1702      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1703      Inst.addOperand(MCOperand::CreateImm(0));
1704      return;
1705    }
1706
1707    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1708    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1709    Inst.addOperand(MCOperand::CreateImm(Val));
1710  }
1711
1712  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1713    assert(N == 2 && "Invalid number of operands!");
1714    // The lower two bits are always zero and as such are not encoded.
1715    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1716    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1717    Inst.addOperand(MCOperand::CreateImm(Val));
1718  }
1719
1720  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1721    assert(N == 2 && "Invalid number of operands!");
1722    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1723    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1724    Inst.addOperand(MCOperand::CreateImm(Val));
1725  }
1726
1727  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1728    addMemImm8OffsetOperands(Inst, N);
1729  }
1730
1731  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1732    addMemImm8OffsetOperands(Inst, N);
1733  }
1734
1735  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1736    assert(N == 2 && "Invalid number of operands!");
1737    // If this is an immediate, it's a label reference.
1738    if (isImm()) {
1739      addExpr(Inst, getImm());
1740      Inst.addOperand(MCOperand::CreateImm(0));
1741      return;
1742    }
1743
1744    // Otherwise, it's a normal memory reg+offset.
1745    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1746    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1747    Inst.addOperand(MCOperand::CreateImm(Val));
1748  }
1749
1750  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1751    assert(N == 2 && "Invalid number of operands!");
1752    // If this is an immediate, it's a label reference.
1753    if (isImm()) {
1754      addExpr(Inst, getImm());
1755      Inst.addOperand(MCOperand::CreateImm(0));
1756      return;
1757    }
1758
1759    // Otherwise, it's a normal memory reg+offset.
1760    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1761    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1762    Inst.addOperand(MCOperand::CreateImm(Val));
1763  }
1764
1765  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1766    assert(N == 2 && "Invalid number of operands!");
1767    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1768    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1769  }
1770
1771  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1772    assert(N == 2 && "Invalid number of operands!");
1773    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1774    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1775  }
1776
1777  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1778    assert(N == 3 && "Invalid number of operands!");
1779    unsigned Val =
1780      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1781                        Memory.ShiftImm, Memory.ShiftType);
1782    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1783    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1784    Inst.addOperand(MCOperand::CreateImm(Val));
1785  }
1786
1787  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1788    assert(N == 3 && "Invalid number of operands!");
1789    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1790    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1791    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1792  }
1793
1794  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1795    assert(N == 2 && "Invalid number of operands!");
1796    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1797    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1798  }
1799
1800  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1801    assert(N == 2 && "Invalid number of operands!");
1802    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1803    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1804    Inst.addOperand(MCOperand::CreateImm(Val));
1805  }
1806
1807  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1808    assert(N == 2 && "Invalid number of operands!");
1809    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1810    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1811    Inst.addOperand(MCOperand::CreateImm(Val));
1812  }
1813
1814  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1815    assert(N == 2 && "Invalid number of operands!");
1816    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1817    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1818    Inst.addOperand(MCOperand::CreateImm(Val));
1819  }
1820
1821  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1822    assert(N == 2 && "Invalid number of operands!");
1823    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1824    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1825    Inst.addOperand(MCOperand::CreateImm(Val));
1826  }
1827
1828  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1829    assert(N == 1 && "Invalid number of operands!");
1830    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1831    assert(CE && "non-constant post-idx-imm8 operand!");
1832    int Imm = CE->getValue();
1833    bool isAdd = Imm >= 0;
1834    if (Imm == INT32_MIN) Imm = 0;
1835    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1836    Inst.addOperand(MCOperand::CreateImm(Imm));
1837  }
1838
1839  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1840    assert(N == 1 && "Invalid number of operands!");
1841    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1842    assert(CE && "non-constant post-idx-imm8s4 operand!");
1843    int Imm = CE->getValue();
1844    bool isAdd = Imm >= 0;
1845    if (Imm == INT32_MIN) Imm = 0;
1846    // Immediate is scaled by 4.
1847    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1848    Inst.addOperand(MCOperand::CreateImm(Imm));
1849  }
1850
1851  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1852    assert(N == 2 && "Invalid number of operands!");
1853    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1854    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1855  }
1856
1857  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1858    assert(N == 2 && "Invalid number of operands!");
1859    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1860    // The sign, shift type, and shift amount are encoded in a single operand
1861    // using the AM2 encoding helpers.
1862    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1863    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1864                                     PostIdxReg.ShiftTy);
1865    Inst.addOperand(MCOperand::CreateImm(Imm));
1866  }
1867
1868  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1869    assert(N == 1 && "Invalid number of operands!");
1870    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1871  }
1872
1873  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1874    assert(N == 1 && "Invalid number of operands!");
1875    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1876  }
1877
1878  void addVecListOperands(MCInst &Inst, unsigned N) const {
1879    assert(N == 1 && "Invalid number of operands!");
1880    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1881  }
1882
1883  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1884    assert(N == 2 && "Invalid number of operands!");
1885    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1886    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1887  }
1888
1889  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1890    assert(N == 1 && "Invalid number of operands!");
1891    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1892  }
1893
1894  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1895    assert(N == 1 && "Invalid number of operands!");
1896    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1897  }
1898
1899  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1900    assert(N == 1 && "Invalid number of operands!");
1901    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1902  }
1903
1904  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1905    assert(N == 1 && "Invalid number of operands!");
1906    // The immediate encodes the type of constant as well as the value.
1907    // Mask in that this is an i8 splat.
1908    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1909    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1910  }
1911
1912  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1913    assert(N == 1 && "Invalid number of operands!");
1914    // The immediate encodes the type of constant as well as the value.
1915    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1916    unsigned Value = CE->getValue();
1917    if (Value >= 256)
1918      Value = (Value >> 8) | 0xa00;
1919    else
1920      Value |= 0x800;
1921    Inst.addOperand(MCOperand::CreateImm(Value));
1922  }
1923
1924  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1925    assert(N == 1 && "Invalid number of operands!");
1926    // The immediate encodes the type of constant as well as the value.
1927    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1928    unsigned Value = CE->getValue();
1929    if (Value >= 256 && Value <= 0xff00)
1930      Value = (Value >> 8) | 0x200;
1931    else if (Value > 0xffff && Value <= 0xff0000)
1932      Value = (Value >> 16) | 0x400;
1933    else if (Value > 0xffffff)
1934      Value = (Value >> 24) | 0x600;
1935    Inst.addOperand(MCOperand::CreateImm(Value));
1936  }
1937
1938  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1939    assert(N == 1 && "Invalid number of operands!");
1940    // The immediate encodes the type of constant as well as the value.
1941    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1942    unsigned Value = CE->getValue();
1943    if (Value >= 256 && Value <= 0xffff)
1944      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1945    else if (Value > 0xffff && Value <= 0xffffff)
1946      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1947    else if (Value > 0xffffff)
1948      Value = (Value >> 24) | 0x600;
1949    Inst.addOperand(MCOperand::CreateImm(Value));
1950  }
1951
1952  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1953    assert(N == 1 && "Invalid number of operands!");
1954    // The immediate encodes the type of constant as well as the value.
1955    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1956    unsigned Value = ~CE->getValue();
1957    if (Value >= 256 && Value <= 0xffff)
1958      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1959    else if (Value > 0xffff && Value <= 0xffffff)
1960      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1961    else if (Value > 0xffffff)
1962      Value = (Value >> 24) | 0x600;
1963    Inst.addOperand(MCOperand::CreateImm(Value));
1964  }
1965
1966  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1967    assert(N == 1 && "Invalid number of operands!");
1968    // The immediate encodes the type of constant as well as the value.
1969    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1970    uint64_t Value = CE->getValue();
1971    unsigned Imm = 0;
1972    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1973      Imm |= (Value & 1) << i;
1974    }
1975    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1976  }
1977
1978  virtual void print(raw_ostream &OS) const;
1979
1980  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1981    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1982    Op->ITMask.Mask = Mask;
1983    Op->StartLoc = S;
1984    Op->EndLoc = S;
1985    return Op;
1986  }
1987
1988  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1989    ARMOperand *Op = new ARMOperand(k_CondCode);
1990    Op->CC.Val = CC;
1991    Op->StartLoc = S;
1992    Op->EndLoc = S;
1993    return Op;
1994  }
1995
1996  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1997    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1998    Op->Cop.Val = CopVal;
1999    Op->StartLoc = S;
2000    Op->EndLoc = S;
2001    return Op;
2002  }
2003
2004  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2005    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2006    Op->Cop.Val = CopVal;
2007    Op->StartLoc = S;
2008    Op->EndLoc = S;
2009    return Op;
2010  }
2011
2012  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2013    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2014    Op->Cop.Val = Val;
2015    Op->StartLoc = S;
2016    Op->EndLoc = E;
2017    return Op;
2018  }
2019
2020  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2021    ARMOperand *Op = new ARMOperand(k_CCOut);
2022    Op->Reg.RegNum = RegNum;
2023    Op->StartLoc = S;
2024    Op->EndLoc = S;
2025    return Op;
2026  }
2027
2028  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2029    ARMOperand *Op = new ARMOperand(k_Token);
2030    Op->Tok.Data = Str.data();
2031    Op->Tok.Length = Str.size();
2032    Op->StartLoc = S;
2033    Op->EndLoc = S;
2034    return Op;
2035  }
2036
2037  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2038    ARMOperand *Op = new ARMOperand(k_Register);
2039    Op->Reg.RegNum = RegNum;
2040    Op->StartLoc = S;
2041    Op->EndLoc = E;
2042    return Op;
2043  }
2044
2045  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2046                                           unsigned SrcReg,
2047                                           unsigned ShiftReg,
2048                                           unsigned ShiftImm,
2049                                           SMLoc S, SMLoc E) {
2050    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2051    Op->RegShiftedReg.ShiftTy = ShTy;
2052    Op->RegShiftedReg.SrcReg = SrcReg;
2053    Op->RegShiftedReg.ShiftReg = ShiftReg;
2054    Op->RegShiftedReg.ShiftImm = ShiftImm;
2055    Op->StartLoc = S;
2056    Op->EndLoc = E;
2057    return Op;
2058  }
2059
2060  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2061                                            unsigned SrcReg,
2062                                            unsigned ShiftImm,
2063                                            SMLoc S, SMLoc E) {
2064    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2065    Op->RegShiftedImm.ShiftTy = ShTy;
2066    Op->RegShiftedImm.SrcReg = SrcReg;
2067    Op->RegShiftedImm.ShiftImm = ShiftImm;
2068    Op->StartLoc = S;
2069    Op->EndLoc = E;
2070    return Op;
2071  }
2072
2073  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2074                                   SMLoc S, SMLoc E) {
2075    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2076    Op->ShifterImm.isASR = isASR;
2077    Op->ShifterImm.Imm = Imm;
2078    Op->StartLoc = S;
2079    Op->EndLoc = E;
2080    return Op;
2081  }
2082
2083  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2084    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2085    Op->RotImm.Imm = Imm;
2086    Op->StartLoc = S;
2087    Op->EndLoc = E;
2088    return Op;
2089  }
2090
2091  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2092                                    SMLoc S, SMLoc E) {
2093    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2094    Op->Bitfield.LSB = LSB;
2095    Op->Bitfield.Width = Width;
2096    Op->StartLoc = S;
2097    Op->EndLoc = E;
2098    return Op;
2099  }
2100
2101  static ARMOperand *
2102  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2103                SMLoc StartLoc, SMLoc EndLoc) {
2104    KindTy Kind = k_RegisterList;
2105
2106    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2107      Kind = k_DPRRegisterList;
2108    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2109             contains(Regs.front().first))
2110      Kind = k_SPRRegisterList;
2111
2112    ARMOperand *Op = new ARMOperand(Kind);
2113    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2114           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2115      Op->Registers.push_back(I->first);
2116    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2117    Op->StartLoc = StartLoc;
2118    Op->EndLoc = EndLoc;
2119    return Op;
2120  }
2121
2122  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2123                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2124    ARMOperand *Op = new ARMOperand(k_VectorList);
2125    Op->VectorList.RegNum = RegNum;
2126    Op->VectorList.Count = Count;
2127    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2128    Op->StartLoc = S;
2129    Op->EndLoc = E;
2130    return Op;
2131  }
2132
2133  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2134                                              bool isDoubleSpaced,
2135                                              SMLoc S, SMLoc E) {
2136    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2137    Op->VectorList.RegNum = RegNum;
2138    Op->VectorList.Count = Count;
2139    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2140    Op->StartLoc = S;
2141    Op->EndLoc = E;
2142    return Op;
2143  }
2144
2145  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2146                                             unsigned Index,
2147                                             bool isDoubleSpaced,
2148                                             SMLoc S, SMLoc E) {
2149    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2150    Op->VectorList.RegNum = RegNum;
2151    Op->VectorList.Count = Count;
2152    Op->VectorList.LaneIndex = Index;
2153    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2154    Op->StartLoc = S;
2155    Op->EndLoc = E;
2156    return Op;
2157  }
2158
2159  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2160                                       MCContext &Ctx) {
2161    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2162    Op->VectorIndex.Val = Idx;
2163    Op->StartLoc = S;
2164    Op->EndLoc = E;
2165    return Op;
2166  }
2167
2168  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2169    ARMOperand *Op = new ARMOperand(k_Immediate);
2170    Op->Imm.Val = Val;
2171    Op->StartLoc = S;
2172    Op->EndLoc = E;
2173    return Op;
2174  }
2175
2176  static ARMOperand *CreateMem(unsigned BaseRegNum,
2177                               const MCConstantExpr *OffsetImm,
2178                               unsigned OffsetRegNum,
2179                               ARM_AM::ShiftOpc ShiftType,
2180                               unsigned ShiftImm,
2181                               unsigned Alignment,
2182                               bool isNegative,
2183                               SMLoc S, SMLoc E) {
2184    ARMOperand *Op = new ARMOperand(k_Memory);
2185    Op->Memory.BaseRegNum = BaseRegNum;
2186    Op->Memory.OffsetImm = OffsetImm;
2187    Op->Memory.OffsetRegNum = OffsetRegNum;
2188    Op->Memory.ShiftType = ShiftType;
2189    Op->Memory.ShiftImm = ShiftImm;
2190    Op->Memory.Alignment = Alignment;
2191    Op->Memory.isNegative = isNegative;
2192    Op->StartLoc = S;
2193    Op->EndLoc = E;
2194    return Op;
2195  }
2196
2197  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2198                                      ARM_AM::ShiftOpc ShiftTy,
2199                                      unsigned ShiftImm,
2200                                      SMLoc S, SMLoc E) {
2201    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2202    Op->PostIdxReg.RegNum = RegNum;
2203    Op->PostIdxReg.isAdd = isAdd;
2204    Op->PostIdxReg.ShiftTy = ShiftTy;
2205    Op->PostIdxReg.ShiftImm = ShiftImm;
2206    Op->StartLoc = S;
2207    Op->EndLoc = E;
2208    return Op;
2209  }
2210
2211  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2212    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2213    Op->MBOpt.Val = Opt;
2214    Op->StartLoc = S;
2215    Op->EndLoc = S;
2216    return Op;
2217  }
2218
2219  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2220    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2221    Op->IFlags.Val = IFlags;
2222    Op->StartLoc = S;
2223    Op->EndLoc = S;
2224    return Op;
2225  }
2226
2227  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2228    ARMOperand *Op = new ARMOperand(k_MSRMask);
2229    Op->MMask.Val = MMask;
2230    Op->StartLoc = S;
2231    Op->EndLoc = S;
2232    return Op;
2233  }
2234};
2235
2236} // end anonymous namespace.
2237
2238void ARMOperand::print(raw_ostream &OS) const {
2239  switch (Kind) {
2240  case k_CondCode:
2241    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2242    break;
2243  case k_CCOut:
2244    OS << "<ccout " << getReg() << ">";
2245    break;
2246  case k_ITCondMask: {
2247    static const char *MaskStr[] = {
2248      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2249      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2250    };
2251    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2252    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2253    break;
2254  }
2255  case k_CoprocNum:
2256    OS << "<coprocessor number: " << getCoproc() << ">";
2257    break;
2258  case k_CoprocReg:
2259    OS << "<coprocessor register: " << getCoproc() << ">";
2260    break;
2261  case k_CoprocOption:
2262    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2263    break;
2264  case k_MSRMask:
2265    OS << "<mask: " << getMSRMask() << ">";
2266    break;
2267  case k_Immediate:
2268    getImm()->print(OS);
2269    break;
2270  case k_MemBarrierOpt:
2271    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2272    break;
2273  case k_Memory:
2274    OS << "<memory "
2275       << " base:" << Memory.BaseRegNum;
2276    OS << ">";
2277    break;
2278  case k_PostIndexRegister:
2279    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2280       << PostIdxReg.RegNum;
2281    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2282      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2283         << PostIdxReg.ShiftImm;
2284    OS << ">";
2285    break;
2286  case k_ProcIFlags: {
2287    OS << "<ARM_PROC::";
2288    unsigned IFlags = getProcIFlags();
2289    for (int i=2; i >= 0; --i)
2290      if (IFlags & (1 << i))
2291        OS << ARM_PROC::IFlagsToString(1 << i);
2292    OS << ">";
2293    break;
2294  }
2295  case k_Register:
2296    OS << "<register " << getReg() << ">";
2297    break;
2298  case k_ShifterImmediate:
2299    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2300       << " #" << ShifterImm.Imm << ">";
2301    break;
2302  case k_ShiftedRegister:
2303    OS << "<so_reg_reg "
2304       << RegShiftedReg.SrcReg << " "
2305       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2306       << " " << RegShiftedReg.ShiftReg << ">";
2307    break;
2308  case k_ShiftedImmediate:
2309    OS << "<so_reg_imm "
2310       << RegShiftedImm.SrcReg << " "
2311       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2312       << " #" << RegShiftedImm.ShiftImm << ">";
2313    break;
2314  case k_RotateImmediate:
2315    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2316    break;
2317  case k_BitfieldDescriptor:
2318    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2319       << ", width: " << Bitfield.Width << ">";
2320    break;
2321  case k_RegisterList:
2322  case k_DPRRegisterList:
2323  case k_SPRRegisterList: {
2324    OS << "<register_list ";
2325
2326    const SmallVectorImpl<unsigned> &RegList = getRegList();
2327    for (SmallVectorImpl<unsigned>::const_iterator
2328           I = RegList.begin(), E = RegList.end(); I != E; ) {
2329      OS << *I;
2330      if (++I < E) OS << ", ";
2331    }
2332
2333    OS << ">";
2334    break;
2335  }
2336  case k_VectorList:
2337    OS << "<vector_list " << VectorList.Count << " * "
2338       << VectorList.RegNum << ">";
2339    break;
2340  case k_VectorListAllLanes:
2341    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2342       << VectorList.RegNum << ">";
2343    break;
2344  case k_VectorListIndexed:
2345    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2346       << VectorList.Count << " * " << VectorList.RegNum << ">";
2347    break;
2348  case k_Token:
2349    OS << "'" << getToken() << "'";
2350    break;
2351  case k_VectorIndex:
2352    OS << "<vectorindex " << getVectorIndex() << ">";
2353    break;
2354  }
2355}
2356
2357/// @name Auto-generated Match Functions
2358/// {
2359
2360static unsigned MatchRegisterName(StringRef Name);
2361
2362/// }
2363
2364bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2365                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2366  StartLoc = Parser.getTok().getLoc();
2367  RegNo = tryParseRegister();
2368  EndLoc = Parser.getTok().getLoc();
2369
2370  return (RegNo == (unsigned)-1);
2371}
2372
2373/// Try to parse a register name.  The token must be an Identifier when called,
2374/// and if it is a register name the token is eaten and the register number is
2375/// returned.  Otherwise return -1.
2376///
2377int ARMAsmParser::tryParseRegister() {
2378  const AsmToken &Tok = Parser.getTok();
2379  if (Tok.isNot(AsmToken::Identifier)) return -1;
2380
2381  std::string lowerCase = Tok.getString().lower();
2382  unsigned RegNum = MatchRegisterName(lowerCase);
2383  if (!RegNum) {
2384    RegNum = StringSwitch<unsigned>(lowerCase)
2385      .Case("r13", ARM::SP)
2386      .Case("r14", ARM::LR)
2387      .Case("r15", ARM::PC)
2388      .Case("ip", ARM::R12)
2389      // Additional register name aliases for 'gas' compatibility.
2390      .Case("a1", ARM::R0)
2391      .Case("a2", ARM::R1)
2392      .Case("a3", ARM::R2)
2393      .Case("a4", ARM::R3)
2394      .Case("v1", ARM::R4)
2395      .Case("v2", ARM::R5)
2396      .Case("v3", ARM::R6)
2397      .Case("v4", ARM::R7)
2398      .Case("v5", ARM::R8)
2399      .Case("v6", ARM::R9)
2400      .Case("v7", ARM::R10)
2401      .Case("v8", ARM::R11)
2402      .Case("sb", ARM::R9)
2403      .Case("sl", ARM::R10)
2404      .Case("fp", ARM::R11)
2405      .Default(0);
2406  }
2407  if (!RegNum) {
2408    // Check for aliases registered via .req. Canonicalize to lower case.
2409    // That's more consistent since register names are case insensitive, and
2410    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2411    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2412    // If no match, return failure.
2413    if (Entry == RegisterReqs.end())
2414      return -1;
2415    Parser.Lex(); // Eat identifier token.
2416    return Entry->getValue();
2417  }
2418
2419  Parser.Lex(); // Eat identifier token.
2420
2421  return RegNum;
2422}
2423
2424// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2425// If a recoverable error occurs, return 1. If an irrecoverable error
2426// occurs, return -1. An irrecoverable error is one where tokens have been
2427// consumed in the process of trying to parse the shifter (i.e., when it is
2428// indeed a shifter operand, but malformed).
2429int ARMAsmParser::tryParseShiftRegister(
2430                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2431  SMLoc S = Parser.getTok().getLoc();
2432  const AsmToken &Tok = Parser.getTok();
2433  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2434
2435  std::string lowerCase = Tok.getString().lower();
2436  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2437      .Case("asl", ARM_AM::lsl)
2438      .Case("lsl", ARM_AM::lsl)
2439      .Case("lsr", ARM_AM::lsr)
2440      .Case("asr", ARM_AM::asr)
2441      .Case("ror", ARM_AM::ror)
2442      .Case("rrx", ARM_AM::rrx)
2443      .Default(ARM_AM::no_shift);
2444
2445  if (ShiftTy == ARM_AM::no_shift)
2446    return 1;
2447
2448  Parser.Lex(); // Eat the operator.
2449
2450  // The source register for the shift has already been added to the
2451  // operand list, so we need to pop it off and combine it into the shifted
2452  // register operand instead.
2453  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2454  if (!PrevOp->isReg())
2455    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2456  int SrcReg = PrevOp->getReg();
2457  int64_t Imm = 0;
2458  int ShiftReg = 0;
2459  if (ShiftTy == ARM_AM::rrx) {
2460    // RRX Doesn't have an explicit shift amount. The encoder expects
2461    // the shift register to be the same as the source register. Seems odd,
2462    // but OK.
2463    ShiftReg = SrcReg;
2464  } else {
2465    // Figure out if this is shifted by a constant or a register (for non-RRX).
2466    if (Parser.getTok().is(AsmToken::Hash) ||
2467        Parser.getTok().is(AsmToken::Dollar)) {
2468      Parser.Lex(); // Eat hash.
2469      SMLoc ImmLoc = Parser.getTok().getLoc();
2470      const MCExpr *ShiftExpr = 0;
2471      if (getParser().ParseExpression(ShiftExpr)) {
2472        Error(ImmLoc, "invalid immediate shift value");
2473        return -1;
2474      }
2475      // The expression must be evaluatable as an immediate.
2476      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2477      if (!CE) {
2478        Error(ImmLoc, "invalid immediate shift value");
2479        return -1;
2480      }
2481      // Range check the immediate.
2482      // lsl, ror: 0 <= imm <= 31
2483      // lsr, asr: 0 <= imm <= 32
2484      Imm = CE->getValue();
2485      if (Imm < 0 ||
2486          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2487          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2488        Error(ImmLoc, "immediate shift value out of range");
2489        return -1;
2490      }
2491      // shift by zero is a nop. Always send it through as lsl.
2492      // ('as' compatibility)
2493      if (Imm == 0)
2494        ShiftTy = ARM_AM::lsl;
2495    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2496      ShiftReg = tryParseRegister();
2497      SMLoc L = Parser.getTok().getLoc();
2498      if (ShiftReg == -1) {
2499        Error (L, "expected immediate or register in shift operand");
2500        return -1;
2501      }
2502    } else {
2503      Error (Parser.getTok().getLoc(),
2504                    "expected immediate or register in shift operand");
2505      return -1;
2506    }
2507  }
2508
2509  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2510    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2511                                                         ShiftReg, Imm,
2512                                               S, Parser.getTok().getLoc()));
2513  else
2514    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2515                                               S, Parser.getTok().getLoc()));
2516
2517  return 0;
2518}
2519
2520
2521/// Try to parse a register name.  The token must be an Identifier when called.
2522/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2523/// if there is a "writeback". 'true' if it's not a register.
2524///
2525/// TODO this is likely to change to allow different register types and or to
2526/// parse for a specific register type.
2527bool ARMAsmParser::
2528tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2529  SMLoc S = Parser.getTok().getLoc();
2530  int RegNo = tryParseRegister();
2531  if (RegNo == -1)
2532    return true;
2533
2534  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2535
2536  const AsmToken &ExclaimTok = Parser.getTok();
2537  if (ExclaimTok.is(AsmToken::Exclaim)) {
2538    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2539                                               ExclaimTok.getLoc()));
2540    Parser.Lex(); // Eat exclaim token
2541    return false;
2542  }
2543
2544  // Also check for an index operand. This is only legal for vector registers,
2545  // but that'll get caught OK in operand matching, so we don't need to
2546  // explicitly filter everything else out here.
2547  if (Parser.getTok().is(AsmToken::LBrac)) {
2548    SMLoc SIdx = Parser.getTok().getLoc();
2549    Parser.Lex(); // Eat left bracket token.
2550
2551    const MCExpr *ImmVal;
2552    if (getParser().ParseExpression(ImmVal))
2553      return MatchOperand_ParseFail;
2554    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2555    if (!MCE) {
2556      TokError("immediate value expected for vector index");
2557      return MatchOperand_ParseFail;
2558    }
2559
2560    SMLoc E = Parser.getTok().getLoc();
2561    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2562      Error(E, "']' expected");
2563      return MatchOperand_ParseFail;
2564    }
2565
2566    Parser.Lex(); // Eat right bracket token.
2567
2568    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2569                                                     SIdx, E,
2570                                                     getContext()));
2571  }
2572
2573  return false;
2574}
2575
2576/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2577/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2578/// "c5", ...
2579static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2580  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2581  // but efficient.
2582  switch (Name.size()) {
2583  default: return -1;
2584  case 2:
2585    if (Name[0] != CoprocOp)
2586      return -1;
2587    switch (Name[1]) {
2588    default:  return -1;
2589    case '0': return 0;
2590    case '1': return 1;
2591    case '2': return 2;
2592    case '3': return 3;
2593    case '4': return 4;
2594    case '5': return 5;
2595    case '6': return 6;
2596    case '7': return 7;
2597    case '8': return 8;
2598    case '9': return 9;
2599    }
2600  case 3:
2601    if (Name[0] != CoprocOp || Name[1] != '1')
2602      return -1;
2603    switch (Name[2]) {
2604    default:  return -1;
2605    case '0': return 10;
2606    case '1': return 11;
2607    case '2': return 12;
2608    case '3': return 13;
2609    case '4': return 14;
2610    case '5': return 15;
2611    }
2612  }
2613}
2614
2615/// parseITCondCode - Try to parse a condition code for an IT instruction.
2616ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2617parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2618  SMLoc S = Parser.getTok().getLoc();
2619  const AsmToken &Tok = Parser.getTok();
2620  if (!Tok.is(AsmToken::Identifier))
2621    return MatchOperand_NoMatch;
2622  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2623    .Case("eq", ARMCC::EQ)
2624    .Case("ne", ARMCC::NE)
2625    .Case("hs", ARMCC::HS)
2626    .Case("cs", ARMCC::HS)
2627    .Case("lo", ARMCC::LO)
2628    .Case("cc", ARMCC::LO)
2629    .Case("mi", ARMCC::MI)
2630    .Case("pl", ARMCC::PL)
2631    .Case("vs", ARMCC::VS)
2632    .Case("vc", ARMCC::VC)
2633    .Case("hi", ARMCC::HI)
2634    .Case("ls", ARMCC::LS)
2635    .Case("ge", ARMCC::GE)
2636    .Case("lt", ARMCC::LT)
2637    .Case("gt", ARMCC::GT)
2638    .Case("le", ARMCC::LE)
2639    .Case("al", ARMCC::AL)
2640    .Default(~0U);
2641  if (CC == ~0U)
2642    return MatchOperand_NoMatch;
2643  Parser.Lex(); // Eat the token.
2644
2645  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2646
2647  return MatchOperand_Success;
2648}
2649
2650/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2651/// token must be an Identifier when called, and if it is a coprocessor
2652/// number, the token is eaten and the operand is added to the operand list.
2653ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2654parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2655  SMLoc S = Parser.getTok().getLoc();
2656  const AsmToken &Tok = Parser.getTok();
2657  if (Tok.isNot(AsmToken::Identifier))
2658    return MatchOperand_NoMatch;
2659
2660  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2661  if (Num == -1)
2662    return MatchOperand_NoMatch;
2663
2664  Parser.Lex(); // Eat identifier token.
2665  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2666  return MatchOperand_Success;
2667}
2668
2669/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2670/// token must be an Identifier when called, and if it is a coprocessor
2671/// number, the token is eaten and the operand is added to the operand list.
2672ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2673parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2674  SMLoc S = Parser.getTok().getLoc();
2675  const AsmToken &Tok = Parser.getTok();
2676  if (Tok.isNot(AsmToken::Identifier))
2677    return MatchOperand_NoMatch;
2678
2679  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2680  if (Reg == -1)
2681    return MatchOperand_NoMatch;
2682
2683  Parser.Lex(); // Eat identifier token.
2684  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2685  return MatchOperand_Success;
2686}
2687
2688/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2689/// coproc_option : '{' imm0_255 '}'
2690ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2691parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2692  SMLoc S = Parser.getTok().getLoc();
2693
2694  // If this isn't a '{', this isn't a coprocessor immediate operand.
2695  if (Parser.getTok().isNot(AsmToken::LCurly))
2696    return MatchOperand_NoMatch;
2697  Parser.Lex(); // Eat the '{'
2698
2699  const MCExpr *Expr;
2700  SMLoc Loc = Parser.getTok().getLoc();
2701  if (getParser().ParseExpression(Expr)) {
2702    Error(Loc, "illegal expression");
2703    return MatchOperand_ParseFail;
2704  }
2705  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2706  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2707    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2708    return MatchOperand_ParseFail;
2709  }
2710  int Val = CE->getValue();
2711
2712  // Check for and consume the closing '}'
2713  if (Parser.getTok().isNot(AsmToken::RCurly))
2714    return MatchOperand_ParseFail;
2715  SMLoc E = Parser.getTok().getLoc();
2716  Parser.Lex(); // Eat the '}'
2717
2718  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2719  return MatchOperand_Success;
2720}
2721
2722// For register list parsing, we need to map from raw GPR register numbering
2723// to the enumeration values. The enumeration values aren't sorted by
2724// register number due to our using "sp", "lr" and "pc" as canonical names.
2725static unsigned getNextRegister(unsigned Reg) {
2726  // If this is a GPR, we need to do it manually, otherwise we can rely
2727  // on the sort ordering of the enumeration since the other reg-classes
2728  // are sane.
2729  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2730    return Reg + 1;
2731  switch(Reg) {
2732  default: assert(0 && "Invalid GPR number!");
2733  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2734  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2735  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2736  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2737  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2738  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2739  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2740  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2741  }
2742}
2743
2744// Return the low-subreg of a given Q register.
2745static unsigned getDRegFromQReg(unsigned QReg) {
2746  switch (QReg) {
2747  default: llvm_unreachable("expected a Q register!");
2748  case ARM::Q0:  return ARM::D0;
2749  case ARM::Q1:  return ARM::D2;
2750  case ARM::Q2:  return ARM::D4;
2751  case ARM::Q3:  return ARM::D6;
2752  case ARM::Q4:  return ARM::D8;
2753  case ARM::Q5:  return ARM::D10;
2754  case ARM::Q6:  return ARM::D12;
2755  case ARM::Q7:  return ARM::D14;
2756  case ARM::Q8:  return ARM::D16;
2757  case ARM::Q9:  return ARM::D18;
2758  case ARM::Q10: return ARM::D20;
2759  case ARM::Q11: return ARM::D22;
2760  case ARM::Q12: return ARM::D24;
2761  case ARM::Q13: return ARM::D26;
2762  case ARM::Q14: return ARM::D28;
2763  case ARM::Q15: return ARM::D30;
2764  }
2765}
2766
2767/// Parse a register list.
2768bool ARMAsmParser::
2769parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2770  assert(Parser.getTok().is(AsmToken::LCurly) &&
2771         "Token is not a Left Curly Brace");
2772  SMLoc S = Parser.getTok().getLoc();
2773  Parser.Lex(); // Eat '{' token.
2774  SMLoc RegLoc = Parser.getTok().getLoc();
2775
2776  // Check the first register in the list to see what register class
2777  // this is a list of.
2778  int Reg = tryParseRegister();
2779  if (Reg == -1)
2780    return Error(RegLoc, "register expected");
2781
2782  // The reglist instructions have at most 16 registers, so reserve
2783  // space for that many.
2784  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2785
2786  // Allow Q regs and just interpret them as the two D sub-registers.
2787  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2788    Reg = getDRegFromQReg(Reg);
2789    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2790    ++Reg;
2791  }
2792  const MCRegisterClass *RC;
2793  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2794    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2795  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2796    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2797  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2798    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2799  else
2800    return Error(RegLoc, "invalid register in register list");
2801
2802  // Store the register.
2803  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2804
2805  // This starts immediately after the first register token in the list,
2806  // so we can see either a comma or a minus (range separator) as a legal
2807  // next token.
2808  while (Parser.getTok().is(AsmToken::Comma) ||
2809         Parser.getTok().is(AsmToken::Minus)) {
2810    if (Parser.getTok().is(AsmToken::Minus)) {
2811      Parser.Lex(); // Eat the minus.
2812      SMLoc EndLoc = Parser.getTok().getLoc();
2813      int EndReg = tryParseRegister();
2814      if (EndReg == -1)
2815        return Error(EndLoc, "register expected");
2816      // Allow Q regs and just interpret them as the two D sub-registers.
2817      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2818        EndReg = getDRegFromQReg(EndReg) + 1;
2819      // If the register is the same as the start reg, there's nothing
2820      // more to do.
2821      if (Reg == EndReg)
2822        continue;
2823      // The register must be in the same register class as the first.
2824      if (!RC->contains(EndReg))
2825        return Error(EndLoc, "invalid register in register list");
2826      // Ranges must go from low to high.
2827      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2828        return Error(EndLoc, "bad range in register list");
2829
2830      // Add all the registers in the range to the register list.
2831      while (Reg != EndReg) {
2832        Reg = getNextRegister(Reg);
2833        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2834      }
2835      continue;
2836    }
2837    Parser.Lex(); // Eat the comma.
2838    RegLoc = Parser.getTok().getLoc();
2839    int OldReg = Reg;
2840    const AsmToken RegTok = Parser.getTok();
2841    Reg = tryParseRegister();
2842    if (Reg == -1)
2843      return Error(RegLoc, "register expected");
2844    // Allow Q regs and just interpret them as the two D sub-registers.
2845    bool isQReg = false;
2846    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2847      Reg = getDRegFromQReg(Reg);
2848      isQReg = true;
2849    }
2850    // The register must be in the same register class as the first.
2851    if (!RC->contains(Reg))
2852      return Error(RegLoc, "invalid register in register list");
2853    // List must be monotonically increasing.
2854    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2855      return Error(RegLoc, "register list not in ascending order");
2856    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2857      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2858              ") in register list");
2859      continue;
2860    }
2861    // VFP register lists must also be contiguous.
2862    // It's OK to use the enumeration values directly here rather, as the
2863    // VFP register classes have the enum sorted properly.
2864    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2865        Reg != OldReg + 1)
2866      return Error(RegLoc, "non-contiguous register range");
2867    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2868    if (isQReg)
2869      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2870  }
2871
2872  SMLoc E = Parser.getTok().getLoc();
2873  if (Parser.getTok().isNot(AsmToken::RCurly))
2874    return Error(E, "'}' expected");
2875  Parser.Lex(); // Eat '}' token.
2876
2877  // Push the register list operand.
2878  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2879
2880  // The ARM system instruction variants for LDM/STM have a '^' token here.
2881  if (Parser.getTok().is(AsmToken::Caret)) {
2882    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2883    Parser.Lex(); // Eat '^' token.
2884  }
2885
2886  return false;
2887}
2888
2889// Helper function to parse the lane index for vector lists.
2890ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2891parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2892  Index = 0; // Always return a defined index value.
2893  if (Parser.getTok().is(AsmToken::LBrac)) {
2894    Parser.Lex(); // Eat the '['.
2895    if (Parser.getTok().is(AsmToken::RBrac)) {
2896      // "Dn[]" is the 'all lanes' syntax.
2897      LaneKind = AllLanes;
2898      Parser.Lex(); // Eat the ']'.
2899      return MatchOperand_Success;
2900    }
2901    const MCExpr *LaneIndex;
2902    SMLoc Loc = Parser.getTok().getLoc();
2903    if (getParser().ParseExpression(LaneIndex)) {
2904      Error(Loc, "illegal expression");
2905      return MatchOperand_ParseFail;
2906    }
2907    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2908    if (!CE) {
2909      Error(Loc, "lane index must be empty or an integer");
2910      return MatchOperand_ParseFail;
2911    }
2912    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2913      Error(Parser.getTok().getLoc(), "']' expected");
2914      return MatchOperand_ParseFail;
2915    }
2916    Parser.Lex(); // Eat the ']'.
2917    int64_t Val = CE->getValue();
2918
2919    // FIXME: Make this range check context sensitive for .8, .16, .32.
2920    if (Val < 0 || Val > 7) {
2921      Error(Parser.getTok().getLoc(), "lane index out of range");
2922      return MatchOperand_ParseFail;
2923    }
2924    Index = Val;
2925    LaneKind = IndexedLane;
2926    return MatchOperand_Success;
2927  }
2928  LaneKind = NoLanes;
2929  return MatchOperand_Success;
2930}
2931
2932// parse a vector register list
2933ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2934parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2935  VectorLaneTy LaneKind;
2936  unsigned LaneIndex;
2937  SMLoc S = Parser.getTok().getLoc();
2938  // As an extension (to match gas), support a plain D register or Q register
2939  // (without encosing curly braces) as a single or double entry list,
2940  // respectively.
2941  if (Parser.getTok().is(AsmToken::Identifier)) {
2942    int Reg = tryParseRegister();
2943    if (Reg == -1)
2944      return MatchOperand_NoMatch;
2945    SMLoc E = Parser.getTok().getLoc();
2946    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2947      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2948      if (Res != MatchOperand_Success)
2949        return Res;
2950      switch (LaneKind) {
2951      case NoLanes:
2952        E = Parser.getTok().getLoc();
2953        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2954        break;
2955      case AllLanes:
2956        E = Parser.getTok().getLoc();
2957        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2958                                                                S, E));
2959        break;
2960      case IndexedLane:
2961        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2962                                                               LaneIndex,
2963                                                               false, S, E));
2964        break;
2965      }
2966      return MatchOperand_Success;
2967    }
2968    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2969      Reg = getDRegFromQReg(Reg);
2970      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2971      if (Res != MatchOperand_Success)
2972        return Res;
2973      switch (LaneKind) {
2974      case NoLanes:
2975        E = Parser.getTok().getLoc();
2976        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2977        break;
2978      case AllLanes:
2979        E = Parser.getTok().getLoc();
2980        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
2981                                                                S, E));
2982        break;
2983      case IndexedLane:
2984        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2985                                                               LaneIndex,
2986                                                               false, S, E));
2987        break;
2988      }
2989      return MatchOperand_Success;
2990    }
2991    Error(S, "vector register expected");
2992    return MatchOperand_ParseFail;
2993  }
2994
2995  if (Parser.getTok().isNot(AsmToken::LCurly))
2996    return MatchOperand_NoMatch;
2997
2998  Parser.Lex(); // Eat '{' token.
2999  SMLoc RegLoc = Parser.getTok().getLoc();
3000
3001  int Reg = tryParseRegister();
3002  if (Reg == -1) {
3003    Error(RegLoc, "register expected");
3004    return MatchOperand_ParseFail;
3005  }
3006  unsigned Count = 1;
3007  int Spacing = 0;
3008  unsigned FirstReg = Reg;
3009  // The list is of D registers, but we also allow Q regs and just interpret
3010  // them as the two D sub-registers.
3011  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3012    FirstReg = Reg = getDRegFromQReg(Reg);
3013    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3014                 // it's ambiguous with four-register single spaced.
3015    ++Reg;
3016    ++Count;
3017  }
3018  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
3019    return MatchOperand_ParseFail;
3020
3021  while (Parser.getTok().is(AsmToken::Comma) ||
3022         Parser.getTok().is(AsmToken::Minus)) {
3023    if (Parser.getTok().is(AsmToken::Minus)) {
3024      if (!Spacing)
3025        Spacing = 1; // Register range implies a single spaced list.
3026      else if (Spacing == 2) {
3027        Error(Parser.getTok().getLoc(),
3028              "sequential registers in double spaced list");
3029        return MatchOperand_ParseFail;
3030      }
3031      Parser.Lex(); // Eat the minus.
3032      SMLoc EndLoc = Parser.getTok().getLoc();
3033      int EndReg = tryParseRegister();
3034      if (EndReg == -1) {
3035        Error(EndLoc, "register expected");
3036        return MatchOperand_ParseFail;
3037      }
3038      // Allow Q regs and just interpret them as the two D sub-registers.
3039      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3040        EndReg = getDRegFromQReg(EndReg) + 1;
3041      // If the register is the same as the start reg, there's nothing
3042      // more to do.
3043      if (Reg == EndReg)
3044        continue;
3045      // The register must be in the same register class as the first.
3046      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3047        Error(EndLoc, "invalid register in register list");
3048        return MatchOperand_ParseFail;
3049      }
3050      // Ranges must go from low to high.
3051      if (Reg > EndReg) {
3052        Error(EndLoc, "bad range in register list");
3053        return MatchOperand_ParseFail;
3054      }
3055      // Parse the lane specifier if present.
3056      VectorLaneTy NextLaneKind;
3057      unsigned NextLaneIndex;
3058      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3059        return MatchOperand_ParseFail;
3060      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3061        Error(EndLoc, "mismatched lane index in register list");
3062        return MatchOperand_ParseFail;
3063      }
3064      EndLoc = Parser.getTok().getLoc();
3065
3066      // Add all the registers in the range to the register list.
3067      Count += EndReg - Reg;
3068      Reg = EndReg;
3069      continue;
3070    }
3071    Parser.Lex(); // Eat the comma.
3072    RegLoc = Parser.getTok().getLoc();
3073    int OldReg = Reg;
3074    Reg = tryParseRegister();
3075    if (Reg == -1) {
3076      Error(RegLoc, "register expected");
3077      return MatchOperand_ParseFail;
3078    }
3079    // vector register lists must be contiguous.
3080    // It's OK to use the enumeration values directly here rather, as the
3081    // VFP register classes have the enum sorted properly.
3082    //
3083    // The list is of D registers, but we also allow Q regs and just interpret
3084    // them as the two D sub-registers.
3085    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3086      if (!Spacing)
3087        Spacing = 1; // Register range implies a single spaced list.
3088      else if (Spacing == 2) {
3089        Error(RegLoc,
3090              "invalid register in double-spaced list (must be 'D' register')");
3091        return MatchOperand_ParseFail;
3092      }
3093      Reg = getDRegFromQReg(Reg);
3094      if (Reg != OldReg + 1) {
3095        Error(RegLoc, "non-contiguous register range");
3096        return MatchOperand_ParseFail;
3097      }
3098      ++Reg;
3099      Count += 2;
3100      // Parse the lane specifier if present.
3101      VectorLaneTy NextLaneKind;
3102      unsigned NextLaneIndex;
3103      SMLoc EndLoc = Parser.getTok().getLoc();
3104      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3105        return MatchOperand_ParseFail;
3106      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3107        Error(EndLoc, "mismatched lane index in register list");
3108        return MatchOperand_ParseFail;
3109      }
3110      continue;
3111    }
3112    // Normal D register.
3113    // Figure out the register spacing (single or double) of the list if
3114    // we don't know it already.
3115    if (!Spacing)
3116      Spacing = 1 + (Reg == OldReg + 2);
3117
3118    // Just check that it's contiguous and keep going.
3119    if (Reg != OldReg + Spacing) {
3120      Error(RegLoc, "non-contiguous register range");
3121      return MatchOperand_ParseFail;
3122    }
3123    ++Count;
3124    // Parse the lane specifier if present.
3125    VectorLaneTy NextLaneKind;
3126    unsigned NextLaneIndex;
3127    SMLoc EndLoc = Parser.getTok().getLoc();
3128    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3129      return MatchOperand_ParseFail;
3130    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3131      Error(EndLoc, "mismatched lane index in register list");
3132      return MatchOperand_ParseFail;
3133    }
3134  }
3135
3136  SMLoc E = Parser.getTok().getLoc();
3137  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3138    Error(E, "'}' expected");
3139    return MatchOperand_ParseFail;
3140  }
3141  Parser.Lex(); // Eat '}' token.
3142
3143  switch (LaneKind) {
3144  case NoLanes:
3145    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3146                                                    (Spacing == 2), S, E));
3147    break;
3148  case AllLanes:
3149    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3150                                                            (Spacing == 2),
3151                                                            S, E));
3152    break;
3153  case IndexedLane:
3154    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3155                                                           LaneIndex,
3156                                                           (Spacing == 2),
3157                                                           S, E));
3158    break;
3159  }
3160  return MatchOperand_Success;
3161}
3162
3163/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3164ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3165parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3166  SMLoc S = Parser.getTok().getLoc();
3167  const AsmToken &Tok = Parser.getTok();
3168  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3169  StringRef OptStr = Tok.getString();
3170
3171  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3172    .Case("sy",    ARM_MB::SY)
3173    .Case("st",    ARM_MB::ST)
3174    .Case("sh",    ARM_MB::ISH)
3175    .Case("ish",   ARM_MB::ISH)
3176    .Case("shst",  ARM_MB::ISHST)
3177    .Case("ishst", ARM_MB::ISHST)
3178    .Case("nsh",   ARM_MB::NSH)
3179    .Case("un",    ARM_MB::NSH)
3180    .Case("nshst", ARM_MB::NSHST)
3181    .Case("unst",  ARM_MB::NSHST)
3182    .Case("osh",   ARM_MB::OSH)
3183    .Case("oshst", ARM_MB::OSHST)
3184    .Default(~0U);
3185
3186  if (Opt == ~0U)
3187    return MatchOperand_NoMatch;
3188
3189  Parser.Lex(); // Eat identifier token.
3190  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3191  return MatchOperand_Success;
3192}
3193
3194/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3195ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3196parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3197  SMLoc S = Parser.getTok().getLoc();
3198  const AsmToken &Tok = Parser.getTok();
3199  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3200  StringRef IFlagsStr = Tok.getString();
3201
3202  // An iflags string of "none" is interpreted to mean that none of the AIF
3203  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3204  unsigned IFlags = 0;
3205  if (IFlagsStr != "none") {
3206        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3207      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3208        .Case("a", ARM_PROC::A)
3209        .Case("i", ARM_PROC::I)
3210        .Case("f", ARM_PROC::F)
3211        .Default(~0U);
3212
3213      // If some specific iflag is already set, it means that some letter is
3214      // present more than once, this is not acceptable.
3215      if (Flag == ~0U || (IFlags & Flag))
3216        return MatchOperand_NoMatch;
3217
3218      IFlags |= Flag;
3219    }
3220  }
3221
3222  Parser.Lex(); // Eat identifier token.
3223  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3224  return MatchOperand_Success;
3225}
3226
3227/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3228ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3229parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3230  SMLoc S = Parser.getTok().getLoc();
3231  const AsmToken &Tok = Parser.getTok();
3232  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3233  StringRef Mask = Tok.getString();
3234
3235  if (isMClass()) {
3236    // See ARMv6-M 10.1.1
3237    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3238      .Case("apsr", 0)
3239      .Case("iapsr", 1)
3240      .Case("eapsr", 2)
3241      .Case("xpsr", 3)
3242      .Case("ipsr", 5)
3243      .Case("epsr", 6)
3244      .Case("iepsr", 7)
3245      .Case("msp", 8)
3246      .Case("psp", 9)
3247      .Case("primask", 16)
3248      .Case("basepri", 17)
3249      .Case("basepri_max", 18)
3250      .Case("faultmask", 19)
3251      .Case("control", 20)
3252      .Default(~0U);
3253
3254    if (FlagsVal == ~0U)
3255      return MatchOperand_NoMatch;
3256
3257    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3258      // basepri, basepri_max and faultmask only valid for V7m.
3259      return MatchOperand_NoMatch;
3260
3261    Parser.Lex(); // Eat identifier token.
3262    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3263    return MatchOperand_Success;
3264  }
3265
3266  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3267  size_t Start = 0, Next = Mask.find('_');
3268  StringRef Flags = "";
3269  std::string SpecReg = Mask.slice(Start, Next).lower();
3270  if (Next != StringRef::npos)
3271    Flags = Mask.slice(Next+1, Mask.size());
3272
3273  // FlagsVal contains the complete mask:
3274  // 3-0: Mask
3275  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3276  unsigned FlagsVal = 0;
3277
3278  if (SpecReg == "apsr") {
3279    FlagsVal = StringSwitch<unsigned>(Flags)
3280    .Case("nzcvq",  0x8) // same as CPSR_f
3281    .Case("g",      0x4) // same as CPSR_s
3282    .Case("nzcvqg", 0xc) // same as CPSR_fs
3283    .Default(~0U);
3284
3285    if (FlagsVal == ~0U) {
3286      if (!Flags.empty())
3287        return MatchOperand_NoMatch;
3288      else
3289        FlagsVal = 8; // No flag
3290    }
3291  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3292    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3293      Flags = "fc";
3294    for (int i = 0, e = Flags.size(); i != e; ++i) {
3295      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3296      .Case("c", 1)
3297      .Case("x", 2)
3298      .Case("s", 4)
3299      .Case("f", 8)
3300      .Default(~0U);
3301
3302      // If some specific flag is already set, it means that some letter is
3303      // present more than once, this is not acceptable.
3304      if (FlagsVal == ~0U || (FlagsVal & Flag))
3305        return MatchOperand_NoMatch;
3306      FlagsVal |= Flag;
3307    }
3308  } else // No match for special register.
3309    return MatchOperand_NoMatch;
3310
3311  // Special register without flags is NOT equivalent to "fc" flags.
3312  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3313  // two lines would enable gas compatibility at the expense of breaking
3314  // round-tripping.
3315  //
3316  // if (!FlagsVal)
3317  //  FlagsVal = 0x9;
3318
3319  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3320  if (SpecReg == "spsr")
3321    FlagsVal |= 16;
3322
3323  Parser.Lex(); // Eat identifier token.
3324  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3325  return MatchOperand_Success;
3326}
3327
3328ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3329parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3330            int Low, int High) {
3331  const AsmToken &Tok = Parser.getTok();
3332  if (Tok.isNot(AsmToken::Identifier)) {
3333    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3334    return MatchOperand_ParseFail;
3335  }
3336  StringRef ShiftName = Tok.getString();
3337  std::string LowerOp = Op.lower();
3338  std::string UpperOp = Op.upper();
3339  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3340    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3341    return MatchOperand_ParseFail;
3342  }
3343  Parser.Lex(); // Eat shift type token.
3344
3345  // There must be a '#' and a shift amount.
3346  if (Parser.getTok().isNot(AsmToken::Hash) &&
3347      Parser.getTok().isNot(AsmToken::Dollar)) {
3348    Error(Parser.getTok().getLoc(), "'#' expected");
3349    return MatchOperand_ParseFail;
3350  }
3351  Parser.Lex(); // Eat hash token.
3352
3353  const MCExpr *ShiftAmount;
3354  SMLoc Loc = Parser.getTok().getLoc();
3355  if (getParser().ParseExpression(ShiftAmount)) {
3356    Error(Loc, "illegal expression");
3357    return MatchOperand_ParseFail;
3358  }
3359  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3360  if (!CE) {
3361    Error(Loc, "constant expression expected");
3362    return MatchOperand_ParseFail;
3363  }
3364  int Val = CE->getValue();
3365  if (Val < Low || Val > High) {
3366    Error(Loc, "immediate value out of range");
3367    return MatchOperand_ParseFail;
3368  }
3369
3370  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3371
3372  return MatchOperand_Success;
3373}
3374
3375ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3376parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3377  const AsmToken &Tok = Parser.getTok();
3378  SMLoc S = Tok.getLoc();
3379  if (Tok.isNot(AsmToken::Identifier)) {
3380    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3381    return MatchOperand_ParseFail;
3382  }
3383  int Val = StringSwitch<int>(Tok.getString())
3384    .Case("be", 1)
3385    .Case("le", 0)
3386    .Default(-1);
3387  Parser.Lex(); // Eat the token.
3388
3389  if (Val == -1) {
3390    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3391    return MatchOperand_ParseFail;
3392  }
3393  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3394                                                                  getContext()),
3395                                           S, Parser.getTok().getLoc()));
3396  return MatchOperand_Success;
3397}
3398
3399/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3400/// instructions. Legal values are:
3401///     lsl #n  'n' in [0,31]
3402///     asr #n  'n' in [1,32]
3403///             n == 32 encoded as n == 0.
3404ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3405parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3406  const AsmToken &Tok = Parser.getTok();
3407  SMLoc S = Tok.getLoc();
3408  if (Tok.isNot(AsmToken::Identifier)) {
3409    Error(S, "shift operator 'asr' or 'lsl' expected");
3410    return MatchOperand_ParseFail;
3411  }
3412  StringRef ShiftName = Tok.getString();
3413  bool isASR;
3414  if (ShiftName == "lsl" || ShiftName == "LSL")
3415    isASR = false;
3416  else if (ShiftName == "asr" || ShiftName == "ASR")
3417    isASR = true;
3418  else {
3419    Error(S, "shift operator 'asr' or 'lsl' expected");
3420    return MatchOperand_ParseFail;
3421  }
3422  Parser.Lex(); // Eat the operator.
3423
3424  // A '#' and a shift amount.
3425  if (Parser.getTok().isNot(AsmToken::Hash) &&
3426      Parser.getTok().isNot(AsmToken::Dollar)) {
3427    Error(Parser.getTok().getLoc(), "'#' expected");
3428    return MatchOperand_ParseFail;
3429  }
3430  Parser.Lex(); // Eat hash token.
3431
3432  const MCExpr *ShiftAmount;
3433  SMLoc E = Parser.getTok().getLoc();
3434  if (getParser().ParseExpression(ShiftAmount)) {
3435    Error(E, "malformed shift expression");
3436    return MatchOperand_ParseFail;
3437  }
3438  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3439  if (!CE) {
3440    Error(E, "shift amount must be an immediate");
3441    return MatchOperand_ParseFail;
3442  }
3443
3444  int64_t Val = CE->getValue();
3445  if (isASR) {
3446    // Shift amount must be in [1,32]
3447    if (Val < 1 || Val > 32) {
3448      Error(E, "'asr' shift amount must be in range [1,32]");
3449      return MatchOperand_ParseFail;
3450    }
3451    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3452    if (isThumb() && Val == 32) {
3453      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3454      return MatchOperand_ParseFail;
3455    }
3456    if (Val == 32) Val = 0;
3457  } else {
3458    // Shift amount must be in [1,32]
3459    if (Val < 0 || Val > 31) {
3460      Error(E, "'lsr' shift amount must be in range [0,31]");
3461      return MatchOperand_ParseFail;
3462    }
3463  }
3464
3465  E = Parser.getTok().getLoc();
3466  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3467
3468  return MatchOperand_Success;
3469}
3470
3471/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3472/// of instructions. Legal values are:
3473///     ror #n  'n' in {0, 8, 16, 24}
3474ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3475parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3476  const AsmToken &Tok = Parser.getTok();
3477  SMLoc S = Tok.getLoc();
3478  if (Tok.isNot(AsmToken::Identifier))
3479    return MatchOperand_NoMatch;
3480  StringRef ShiftName = Tok.getString();
3481  if (ShiftName != "ror" && ShiftName != "ROR")
3482    return MatchOperand_NoMatch;
3483  Parser.Lex(); // Eat the operator.
3484
3485  // A '#' and a rotate amount.
3486  if (Parser.getTok().isNot(AsmToken::Hash) &&
3487      Parser.getTok().isNot(AsmToken::Dollar)) {
3488    Error(Parser.getTok().getLoc(), "'#' expected");
3489    return MatchOperand_ParseFail;
3490  }
3491  Parser.Lex(); // Eat hash token.
3492
3493  const MCExpr *ShiftAmount;
3494  SMLoc E = Parser.getTok().getLoc();
3495  if (getParser().ParseExpression(ShiftAmount)) {
3496    Error(E, "malformed rotate expression");
3497    return MatchOperand_ParseFail;
3498  }
3499  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3500  if (!CE) {
3501    Error(E, "rotate amount must be an immediate");
3502    return MatchOperand_ParseFail;
3503  }
3504
3505  int64_t Val = CE->getValue();
3506  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3507  // normally, zero is represented in asm by omitting the rotate operand
3508  // entirely.
3509  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3510    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3511    return MatchOperand_ParseFail;
3512  }
3513
3514  E = Parser.getTok().getLoc();
3515  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3516
3517  return MatchOperand_Success;
3518}
3519
3520ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3521parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3522  SMLoc S = Parser.getTok().getLoc();
3523  // The bitfield descriptor is really two operands, the LSB and the width.
3524  if (Parser.getTok().isNot(AsmToken::Hash) &&
3525      Parser.getTok().isNot(AsmToken::Dollar)) {
3526    Error(Parser.getTok().getLoc(), "'#' expected");
3527    return MatchOperand_ParseFail;
3528  }
3529  Parser.Lex(); // Eat hash token.
3530
3531  const MCExpr *LSBExpr;
3532  SMLoc E = Parser.getTok().getLoc();
3533  if (getParser().ParseExpression(LSBExpr)) {
3534    Error(E, "malformed immediate expression");
3535    return MatchOperand_ParseFail;
3536  }
3537  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3538  if (!CE) {
3539    Error(E, "'lsb' operand must be an immediate");
3540    return MatchOperand_ParseFail;
3541  }
3542
3543  int64_t LSB = CE->getValue();
3544  // The LSB must be in the range [0,31]
3545  if (LSB < 0 || LSB > 31) {
3546    Error(E, "'lsb' operand must be in the range [0,31]");
3547    return MatchOperand_ParseFail;
3548  }
3549  E = Parser.getTok().getLoc();
3550
3551  // Expect another immediate operand.
3552  if (Parser.getTok().isNot(AsmToken::Comma)) {
3553    Error(Parser.getTok().getLoc(), "too few operands");
3554    return MatchOperand_ParseFail;
3555  }
3556  Parser.Lex(); // Eat hash token.
3557  if (Parser.getTok().isNot(AsmToken::Hash) &&
3558      Parser.getTok().isNot(AsmToken::Dollar)) {
3559    Error(Parser.getTok().getLoc(), "'#' expected");
3560    return MatchOperand_ParseFail;
3561  }
3562  Parser.Lex(); // Eat hash token.
3563
3564  const MCExpr *WidthExpr;
3565  if (getParser().ParseExpression(WidthExpr)) {
3566    Error(E, "malformed immediate expression");
3567    return MatchOperand_ParseFail;
3568  }
3569  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3570  if (!CE) {
3571    Error(E, "'width' operand must be an immediate");
3572    return MatchOperand_ParseFail;
3573  }
3574
3575  int64_t Width = CE->getValue();
3576  // The LSB must be in the range [1,32-lsb]
3577  if (Width < 1 || Width > 32 - LSB) {
3578    Error(E, "'width' operand must be in the range [1,32-lsb]");
3579    return MatchOperand_ParseFail;
3580  }
3581  E = Parser.getTok().getLoc();
3582
3583  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3584
3585  return MatchOperand_Success;
3586}
3587
3588ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3589parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3590  // Check for a post-index addressing register operand. Specifically:
3591  // postidx_reg := '+' register {, shift}
3592  //              | '-' register {, shift}
3593  //              | register {, shift}
3594
3595  // This method must return MatchOperand_NoMatch without consuming any tokens
3596  // in the case where there is no match, as other alternatives take other
3597  // parse methods.
3598  AsmToken Tok = Parser.getTok();
3599  SMLoc S = Tok.getLoc();
3600  bool haveEaten = false;
3601  bool isAdd = true;
3602  int Reg = -1;
3603  if (Tok.is(AsmToken::Plus)) {
3604    Parser.Lex(); // Eat the '+' token.
3605    haveEaten = true;
3606  } else if (Tok.is(AsmToken::Minus)) {
3607    Parser.Lex(); // Eat the '-' token.
3608    isAdd = false;
3609    haveEaten = true;
3610  }
3611  if (Parser.getTok().is(AsmToken::Identifier))
3612    Reg = tryParseRegister();
3613  if (Reg == -1) {
3614    if (!haveEaten)
3615      return MatchOperand_NoMatch;
3616    Error(Parser.getTok().getLoc(), "register expected");
3617    return MatchOperand_ParseFail;
3618  }
3619  SMLoc E = Parser.getTok().getLoc();
3620
3621  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3622  unsigned ShiftImm = 0;
3623  if (Parser.getTok().is(AsmToken::Comma)) {
3624    Parser.Lex(); // Eat the ','.
3625    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3626      return MatchOperand_ParseFail;
3627  }
3628
3629  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3630                                                  ShiftImm, S, E));
3631
3632  return MatchOperand_Success;
3633}
3634
3635ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3636parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3637  // Check for a post-index addressing register operand. Specifically:
3638  // am3offset := '+' register
3639  //              | '-' register
3640  //              | register
3641  //              | # imm
3642  //              | # + imm
3643  //              | # - imm
3644
3645  // This method must return MatchOperand_NoMatch without consuming any tokens
3646  // in the case where there is no match, as other alternatives take other
3647  // parse methods.
3648  AsmToken Tok = Parser.getTok();
3649  SMLoc S = Tok.getLoc();
3650
3651  // Do immediates first, as we always parse those if we have a '#'.
3652  if (Parser.getTok().is(AsmToken::Hash) ||
3653      Parser.getTok().is(AsmToken::Dollar)) {
3654    Parser.Lex(); // Eat the '#'.
3655    // Explicitly look for a '-', as we need to encode negative zero
3656    // differently.
3657    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3658    const MCExpr *Offset;
3659    if (getParser().ParseExpression(Offset))
3660      return MatchOperand_ParseFail;
3661    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3662    if (!CE) {
3663      Error(S, "constant expression expected");
3664      return MatchOperand_ParseFail;
3665    }
3666    SMLoc E = Tok.getLoc();
3667    // Negative zero is encoded as the flag value INT32_MIN.
3668    int32_t Val = CE->getValue();
3669    if (isNegative && Val == 0)
3670      Val = INT32_MIN;
3671
3672    Operands.push_back(
3673      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3674
3675    return MatchOperand_Success;
3676  }
3677
3678
3679  bool haveEaten = false;
3680  bool isAdd = true;
3681  int Reg = -1;
3682  if (Tok.is(AsmToken::Plus)) {
3683    Parser.Lex(); // Eat the '+' token.
3684    haveEaten = true;
3685  } else if (Tok.is(AsmToken::Minus)) {
3686    Parser.Lex(); // Eat the '-' token.
3687    isAdd = false;
3688    haveEaten = true;
3689  }
3690  if (Parser.getTok().is(AsmToken::Identifier))
3691    Reg = tryParseRegister();
3692  if (Reg == -1) {
3693    if (!haveEaten)
3694      return MatchOperand_NoMatch;
3695    Error(Parser.getTok().getLoc(), "register expected");
3696    return MatchOperand_ParseFail;
3697  }
3698  SMLoc E = Parser.getTok().getLoc();
3699
3700  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3701                                                  0, S, E));
3702
3703  return MatchOperand_Success;
3704}
3705
3706/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3707/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3708/// when they refer multiple MIOperands inside a single one.
3709bool ARMAsmParser::
3710cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3711             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3712  // Rt, Rt2
3713  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3714  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3715  // Create a writeback register dummy placeholder.
3716  Inst.addOperand(MCOperand::CreateReg(0));
3717  // addr
3718  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3719  // pred
3720  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3721  return true;
3722}
3723
3724/// cvtT2StrdPre - Convert parsed operands to MCInst.
3725/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3726/// when they refer multiple MIOperands inside a single one.
3727bool ARMAsmParser::
3728cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3729             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3730  // Create a writeback register dummy placeholder.
3731  Inst.addOperand(MCOperand::CreateReg(0));
3732  // Rt, Rt2
3733  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3734  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3735  // addr
3736  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3737  // pred
3738  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3739  return true;
3740}
3741
3742/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3743/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3744/// when they refer multiple MIOperands inside a single one.
3745bool ARMAsmParser::
3746cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3747                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3748  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3749
3750  // Create a writeback register dummy placeholder.
3751  Inst.addOperand(MCOperand::CreateImm(0));
3752
3753  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3754  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3755  return true;
3756}
3757
3758/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3759/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3760/// when they refer multiple MIOperands inside a single one.
3761bool ARMAsmParser::
3762cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3763                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3764  // Create a writeback register dummy placeholder.
3765  Inst.addOperand(MCOperand::CreateImm(0));
3766  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3767  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3768  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3769  return true;
3770}
3771
3772/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3773/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3774/// when they refer multiple MIOperands inside a single one.
3775bool ARMAsmParser::
3776cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3777                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3778  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3779
3780  // Create a writeback register dummy placeholder.
3781  Inst.addOperand(MCOperand::CreateImm(0));
3782
3783  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3784  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3785  return true;
3786}
3787
3788/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3789/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3790/// when they refer multiple MIOperands inside a single one.
3791bool ARMAsmParser::
3792cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3793                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3794  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3795
3796  // Create a writeback register dummy placeholder.
3797  Inst.addOperand(MCOperand::CreateImm(0));
3798
3799  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3800  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3801  return true;
3802}
3803
3804
3805/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3806/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3807/// when they refer multiple MIOperands inside a single one.
3808bool ARMAsmParser::
3809cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3810                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3811  // Create a writeback register dummy placeholder.
3812  Inst.addOperand(MCOperand::CreateImm(0));
3813  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3814  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3815  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3816  return true;
3817}
3818
3819/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3820/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3821/// when they refer multiple MIOperands inside a single one.
3822bool ARMAsmParser::
3823cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3824                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3825  // Create a writeback register dummy placeholder.
3826  Inst.addOperand(MCOperand::CreateImm(0));
3827  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3828  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3829  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3830  return true;
3831}
3832
3833/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3834/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3835/// when they refer multiple MIOperands inside a single one.
3836bool ARMAsmParser::
3837cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3838                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3839  // Create a writeback register dummy placeholder.
3840  Inst.addOperand(MCOperand::CreateImm(0));
3841  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3842  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3843  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3844  return true;
3845}
3846
3847/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3848/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3849/// when they refer multiple MIOperands inside a single one.
3850bool ARMAsmParser::
3851cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3852                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3853  // Rt
3854  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3855  // Create a writeback register dummy placeholder.
3856  Inst.addOperand(MCOperand::CreateImm(0));
3857  // addr
3858  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3859  // offset
3860  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3861  // pred
3862  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3863  return true;
3864}
3865
3866/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3867/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3868/// when they refer multiple MIOperands inside a single one.
3869bool ARMAsmParser::
3870cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3871                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3872  // Rt
3873  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3874  // Create a writeback register dummy placeholder.
3875  Inst.addOperand(MCOperand::CreateImm(0));
3876  // addr
3877  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3878  // offset
3879  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3880  // pred
3881  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3882  return true;
3883}
3884
3885/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3886/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3887/// when they refer multiple MIOperands inside a single one.
3888bool ARMAsmParser::
3889cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3890                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3891  // Create a writeback register dummy placeholder.
3892  Inst.addOperand(MCOperand::CreateImm(0));
3893  // Rt
3894  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3895  // addr
3896  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3897  // offset
3898  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3899  // pred
3900  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3901  return true;
3902}
3903
3904/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3905/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3906/// when they refer multiple MIOperands inside a single one.
3907bool ARMAsmParser::
3908cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3909                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3910  // Create a writeback register dummy placeholder.
3911  Inst.addOperand(MCOperand::CreateImm(0));
3912  // Rt
3913  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3914  // addr
3915  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3916  // offset
3917  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3918  // pred
3919  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3920  return true;
3921}
3922
3923/// cvtLdrdPre - Convert parsed operands to MCInst.
3924/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3925/// when they refer multiple MIOperands inside a single one.
3926bool ARMAsmParser::
3927cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3928           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3929  // Rt, Rt2
3930  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3931  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3932  // Create a writeback register dummy placeholder.
3933  Inst.addOperand(MCOperand::CreateImm(0));
3934  // addr
3935  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3936  // pred
3937  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3938  return true;
3939}
3940
3941/// cvtStrdPre - Convert parsed operands to MCInst.
3942/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3943/// when they refer multiple MIOperands inside a single one.
3944bool ARMAsmParser::
3945cvtStrdPre(MCInst &Inst, unsigned Opcode,
3946           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3947  // Create a writeback register dummy placeholder.
3948  Inst.addOperand(MCOperand::CreateImm(0));
3949  // Rt, Rt2
3950  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3951  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3952  // addr
3953  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3954  // pred
3955  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3956  return true;
3957}
3958
3959/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3960/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3961/// when they refer multiple MIOperands inside a single one.
3962bool ARMAsmParser::
3963cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3964                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3965  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3966  // Create a writeback register dummy placeholder.
3967  Inst.addOperand(MCOperand::CreateImm(0));
3968  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3969  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3970  return true;
3971}
3972
3973/// cvtThumbMultiple- Convert parsed operands to MCInst.
3974/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3975/// when they refer multiple MIOperands inside a single one.
3976bool ARMAsmParser::
3977cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3978           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3979  // The second source operand must be the same register as the destination
3980  // operand.
3981  if (Operands.size() == 6 &&
3982      (((ARMOperand*)Operands[3])->getReg() !=
3983       ((ARMOperand*)Operands[5])->getReg()) &&
3984      (((ARMOperand*)Operands[3])->getReg() !=
3985       ((ARMOperand*)Operands[4])->getReg())) {
3986    Error(Operands[3]->getStartLoc(),
3987          "destination register must match source register");
3988    return false;
3989  }
3990  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3991  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3992  // If we have a three-operand form, make sure to set Rn to be the operand
3993  // that isn't the same as Rd.
3994  unsigned RegOp = 4;
3995  if (Operands.size() == 6 &&
3996      ((ARMOperand*)Operands[4])->getReg() ==
3997        ((ARMOperand*)Operands[3])->getReg())
3998    RegOp = 5;
3999  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4000  Inst.addOperand(Inst.getOperand(0));
4001  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4002
4003  return true;
4004}
4005
4006bool ARMAsmParser::
4007cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
4008              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4009  // Vd
4010  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4011  // Create a writeback register dummy placeholder.
4012  Inst.addOperand(MCOperand::CreateImm(0));
4013  // Vn
4014  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4015  // pred
4016  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4017  return true;
4018}
4019
4020bool ARMAsmParser::
4021cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
4022                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4023  // Vd
4024  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4025  // Create a writeback register dummy placeholder.
4026  Inst.addOperand(MCOperand::CreateImm(0));
4027  // Vn
4028  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4029  // Vm
4030  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4031  // pred
4032  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4033  return true;
4034}
4035
4036bool ARMAsmParser::
4037cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
4038              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4039  // Create a writeback register dummy placeholder.
4040  Inst.addOperand(MCOperand::CreateImm(0));
4041  // Vn
4042  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4043  // Vt
4044  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4045  // pred
4046  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4047  return true;
4048}
4049
4050bool ARMAsmParser::
4051cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
4052                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4053  // Create a writeback register dummy placeholder.
4054  Inst.addOperand(MCOperand::CreateImm(0));
4055  // Vn
4056  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4057  // Vm
4058  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4059  // Vt
4060  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4061  // pred
4062  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4063  return true;
4064}
4065
4066/// Parse an ARM memory expression, return false if successful else return true
4067/// or an error.  The first token must be a '[' when called.
4068bool ARMAsmParser::
4069parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4070  SMLoc S, E;
4071  assert(Parser.getTok().is(AsmToken::LBrac) &&
4072         "Token is not a Left Bracket");
4073  S = Parser.getTok().getLoc();
4074  Parser.Lex(); // Eat left bracket token.
4075
4076  const AsmToken &BaseRegTok = Parser.getTok();
4077  int BaseRegNum = tryParseRegister();
4078  if (BaseRegNum == -1)
4079    return Error(BaseRegTok.getLoc(), "register expected");
4080
4081  // The next token must either be a comma or a closing bracket.
4082  const AsmToken &Tok = Parser.getTok();
4083  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4084    return Error(Tok.getLoc(), "malformed memory operand");
4085
4086  if (Tok.is(AsmToken::RBrac)) {
4087    E = Tok.getLoc();
4088    Parser.Lex(); // Eat right bracket token.
4089
4090    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4091                                             0, 0, false, S, E));
4092
4093    // If there's a pre-indexing writeback marker, '!', just add it as a token
4094    // operand. It's rather odd, but syntactically valid.
4095    if (Parser.getTok().is(AsmToken::Exclaim)) {
4096      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4097      Parser.Lex(); // Eat the '!'.
4098    }
4099
4100    return false;
4101  }
4102
4103  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4104  Parser.Lex(); // Eat the comma.
4105
4106  // If we have a ':', it's an alignment specifier.
4107  if (Parser.getTok().is(AsmToken::Colon)) {
4108    Parser.Lex(); // Eat the ':'.
4109    E = Parser.getTok().getLoc();
4110
4111    const MCExpr *Expr;
4112    if (getParser().ParseExpression(Expr))
4113     return true;
4114
4115    // The expression has to be a constant. Memory references with relocations
4116    // don't come through here, as they use the <label> forms of the relevant
4117    // instructions.
4118    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4119    if (!CE)
4120      return Error (E, "constant expression expected");
4121
4122    unsigned Align = 0;
4123    switch (CE->getValue()) {
4124    default:
4125      return Error(E,
4126                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4127    case 16:  Align = 2; break;
4128    case 32:  Align = 4; break;
4129    case 64:  Align = 8; break;
4130    case 128: Align = 16; break;
4131    case 256: Align = 32; break;
4132    }
4133
4134    // Now we should have the closing ']'
4135    E = Parser.getTok().getLoc();
4136    if (Parser.getTok().isNot(AsmToken::RBrac))
4137      return Error(E, "']' expected");
4138    Parser.Lex(); // Eat right bracket token.
4139
4140    // Don't worry about range checking the value here. That's handled by
4141    // the is*() predicates.
4142    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4143                                             ARM_AM::no_shift, 0, Align,
4144                                             false, S, E));
4145
4146    // If there's a pre-indexing writeback marker, '!', just add it as a token
4147    // operand.
4148    if (Parser.getTok().is(AsmToken::Exclaim)) {
4149      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4150      Parser.Lex(); // Eat the '!'.
4151    }
4152
4153    return false;
4154  }
4155
4156  // If we have a '#', it's an immediate offset, else assume it's a register
4157  // offset. Be friendly and also accept a plain integer (without a leading
4158  // hash) for gas compatibility.
4159  if (Parser.getTok().is(AsmToken::Hash) ||
4160      Parser.getTok().is(AsmToken::Dollar) ||
4161      Parser.getTok().is(AsmToken::Integer)) {
4162    if (Parser.getTok().isNot(AsmToken::Integer))
4163      Parser.Lex(); // Eat the '#'.
4164    E = Parser.getTok().getLoc();
4165
4166    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4167    const MCExpr *Offset;
4168    if (getParser().ParseExpression(Offset))
4169     return true;
4170
4171    // The expression has to be a constant. Memory references with relocations
4172    // don't come through here, as they use the <label> forms of the relevant
4173    // instructions.
4174    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4175    if (!CE)
4176      return Error (E, "constant expression expected");
4177
4178    // If the constant was #-0, represent it as INT32_MIN.
4179    int32_t Val = CE->getValue();
4180    if (isNegative && Val == 0)
4181      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4182
4183    // Now we should have the closing ']'
4184    E = Parser.getTok().getLoc();
4185    if (Parser.getTok().isNot(AsmToken::RBrac))
4186      return Error(E, "']' expected");
4187    Parser.Lex(); // Eat right bracket token.
4188
4189    // Don't worry about range checking the value here. That's handled by
4190    // the is*() predicates.
4191    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4192                                             ARM_AM::no_shift, 0, 0,
4193                                             false, S, E));
4194
4195    // If there's a pre-indexing writeback marker, '!', just add it as a token
4196    // operand.
4197    if (Parser.getTok().is(AsmToken::Exclaim)) {
4198      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4199      Parser.Lex(); // Eat the '!'.
4200    }
4201
4202    return false;
4203  }
4204
4205  // The register offset is optionally preceded by a '+' or '-'
4206  bool isNegative = false;
4207  if (Parser.getTok().is(AsmToken::Minus)) {
4208    isNegative = true;
4209    Parser.Lex(); // Eat the '-'.
4210  } else if (Parser.getTok().is(AsmToken::Plus)) {
4211    // Nothing to do.
4212    Parser.Lex(); // Eat the '+'.
4213  }
4214
4215  E = Parser.getTok().getLoc();
4216  int OffsetRegNum = tryParseRegister();
4217  if (OffsetRegNum == -1)
4218    return Error(E, "register expected");
4219
4220  // If there's a shift operator, handle it.
4221  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4222  unsigned ShiftImm = 0;
4223  if (Parser.getTok().is(AsmToken::Comma)) {
4224    Parser.Lex(); // Eat the ','.
4225    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4226      return true;
4227  }
4228
4229  // Now we should have the closing ']'
4230  E = Parser.getTok().getLoc();
4231  if (Parser.getTok().isNot(AsmToken::RBrac))
4232    return Error(E, "']' expected");
4233  Parser.Lex(); // Eat right bracket token.
4234
4235  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4236                                           ShiftType, ShiftImm, 0, isNegative,
4237                                           S, E));
4238
4239  // If there's a pre-indexing writeback marker, '!', just add it as a token
4240  // operand.
4241  if (Parser.getTok().is(AsmToken::Exclaim)) {
4242    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4243    Parser.Lex(); // Eat the '!'.
4244  }
4245
4246  return false;
4247}
4248
4249/// parseMemRegOffsetShift - one of these two:
4250///   ( lsl | lsr | asr | ror ) , # shift_amount
4251///   rrx
4252/// return true if it parses a shift otherwise it returns false.
4253bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4254                                          unsigned &Amount) {
4255  SMLoc Loc = Parser.getTok().getLoc();
4256  const AsmToken &Tok = Parser.getTok();
4257  if (Tok.isNot(AsmToken::Identifier))
4258    return true;
4259  StringRef ShiftName = Tok.getString();
4260  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4261      ShiftName == "asl" || ShiftName == "ASL")
4262    St = ARM_AM::lsl;
4263  else if (ShiftName == "lsr" || ShiftName == "LSR")
4264    St = ARM_AM::lsr;
4265  else if (ShiftName == "asr" || ShiftName == "ASR")
4266    St = ARM_AM::asr;
4267  else if (ShiftName == "ror" || ShiftName == "ROR")
4268    St = ARM_AM::ror;
4269  else if (ShiftName == "rrx" || ShiftName == "RRX")
4270    St = ARM_AM::rrx;
4271  else
4272    return Error(Loc, "illegal shift operator");
4273  Parser.Lex(); // Eat shift type token.
4274
4275  // rrx stands alone.
4276  Amount = 0;
4277  if (St != ARM_AM::rrx) {
4278    Loc = Parser.getTok().getLoc();
4279    // A '#' and a shift amount.
4280    const AsmToken &HashTok = Parser.getTok();
4281    if (HashTok.isNot(AsmToken::Hash) &&
4282        HashTok.isNot(AsmToken::Dollar))
4283      return Error(HashTok.getLoc(), "'#' expected");
4284    Parser.Lex(); // Eat hash token.
4285
4286    const MCExpr *Expr;
4287    if (getParser().ParseExpression(Expr))
4288      return true;
4289    // Range check the immediate.
4290    // lsl, ror: 0 <= imm <= 31
4291    // lsr, asr: 0 <= imm <= 32
4292    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4293    if (!CE)
4294      return Error(Loc, "shift amount must be an immediate");
4295    int64_t Imm = CE->getValue();
4296    if (Imm < 0 ||
4297        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4298        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4299      return Error(Loc, "immediate shift value out of range");
4300    Amount = Imm;
4301  }
4302
4303  return false;
4304}
4305
4306/// parseFPImm - A floating point immediate expression operand.
4307ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4308parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4309  // Anything that can accept a floating point constant as an operand
4310  // needs to go through here, as the regular ParseExpression is
4311  // integer only.
4312  //
4313  // This routine still creates a generic Immediate operand, containing
4314  // a bitcast of the 64-bit floating point value. The various operands
4315  // that accept floats can check whether the value is valid for them
4316  // via the standard is*() predicates.
4317
4318  SMLoc S = Parser.getTok().getLoc();
4319
4320  if (Parser.getTok().isNot(AsmToken::Hash) &&
4321      Parser.getTok().isNot(AsmToken::Dollar))
4322    return MatchOperand_NoMatch;
4323
4324  // Disambiguate the VMOV forms that can accept an FP immediate.
4325  // vmov.f32 <sreg>, #imm
4326  // vmov.f64 <dreg>, #imm
4327  // vmov.f32 <dreg>, #imm  @ vector f32x2
4328  // vmov.f32 <qreg>, #imm  @ vector f32x4
4329  //
4330  // There are also the NEON VMOV instructions which expect an
4331  // integer constant. Make sure we don't try to parse an FPImm
4332  // for these:
4333  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4334  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4335  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4336                           TyOp->getToken() != ".f64"))
4337    return MatchOperand_NoMatch;
4338
4339  Parser.Lex(); // Eat the '#'.
4340
4341  // Handle negation, as that still comes through as a separate token.
4342  bool isNegative = false;
4343  if (Parser.getTok().is(AsmToken::Minus)) {
4344    isNegative = true;
4345    Parser.Lex();
4346  }
4347  const AsmToken &Tok = Parser.getTok();
4348  SMLoc Loc = Tok.getLoc();
4349  if (Tok.is(AsmToken::Real)) {
4350    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4351    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4352    // If we had a '-' in front, toggle the sign bit.
4353    IntVal ^= (uint64_t)isNegative << 31;
4354    Parser.Lex(); // Eat the token.
4355    Operands.push_back(ARMOperand::CreateImm(
4356          MCConstantExpr::Create(IntVal, getContext()),
4357          S, Parser.getTok().getLoc()));
4358    return MatchOperand_Success;
4359  }
4360  // Also handle plain integers. Instructions which allow floating point
4361  // immediates also allow a raw encoded 8-bit value.
4362  if (Tok.is(AsmToken::Integer)) {
4363    int64_t Val = Tok.getIntVal();
4364    Parser.Lex(); // Eat the token.
4365    if (Val > 255 || Val < 0) {
4366      Error(Loc, "encoded floating point value out of range");
4367      return MatchOperand_ParseFail;
4368    }
4369    double RealVal = ARM_AM::getFPImmFloat(Val);
4370    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4371    Operands.push_back(ARMOperand::CreateImm(
4372        MCConstantExpr::Create(Val, getContext()), S,
4373        Parser.getTok().getLoc()));
4374    return MatchOperand_Success;
4375  }
4376
4377  Error(Loc, "invalid floating point immediate");
4378  return MatchOperand_ParseFail;
4379}
4380
4381/// Parse a arm instruction operand.  For now this parses the operand regardless
4382/// of the mnemonic.
4383bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4384                                StringRef Mnemonic) {
4385  SMLoc S, E;
4386
4387  // Check if the current operand has a custom associated parser, if so, try to
4388  // custom parse the operand, or fallback to the general approach.
4389  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4390  if (ResTy == MatchOperand_Success)
4391    return false;
4392  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4393  // there was a match, but an error occurred, in which case, just return that
4394  // the operand parsing failed.
4395  if (ResTy == MatchOperand_ParseFail)
4396    return true;
4397
4398  switch (getLexer().getKind()) {
4399  default:
4400    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4401    return true;
4402  case AsmToken::Identifier: {
4403    if (!tryParseRegisterWithWriteBack(Operands))
4404      return false;
4405    int Res = tryParseShiftRegister(Operands);
4406    if (Res == 0) // success
4407      return false;
4408    else if (Res == -1) // irrecoverable error
4409      return true;
4410    // If this is VMRS, check for the apsr_nzcv operand.
4411    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4412      S = Parser.getTok().getLoc();
4413      Parser.Lex();
4414      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4415      return false;
4416    }
4417
4418    // Fall though for the Identifier case that is not a register or a
4419    // special name.
4420  }
4421  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4422  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4423  case AsmToken::String:  // quoted label names.
4424  case AsmToken::Dot: {   // . as a branch target
4425    // This was not a register so parse other operands that start with an
4426    // identifier (like labels) as expressions and create them as immediates.
4427    const MCExpr *IdVal;
4428    S = Parser.getTok().getLoc();
4429    if (getParser().ParseExpression(IdVal))
4430      return true;
4431    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4432    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4433    return false;
4434  }
4435  case AsmToken::LBrac:
4436    return parseMemory(Operands);
4437  case AsmToken::LCurly:
4438    return parseRegisterList(Operands);
4439  case AsmToken::Dollar:
4440  case AsmToken::Hash: {
4441    // #42 -> immediate.
4442    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4443    S = Parser.getTok().getLoc();
4444    Parser.Lex();
4445    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4446    const MCExpr *ImmVal;
4447    if (getParser().ParseExpression(ImmVal))
4448      return true;
4449    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4450    if (CE) {
4451      int32_t Val = CE->getValue();
4452      if (isNegative && Val == 0)
4453        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4454    }
4455    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4456    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4457    return false;
4458  }
4459  case AsmToken::Colon: {
4460    // ":lower16:" and ":upper16:" expression prefixes
4461    // FIXME: Check it's an expression prefix,
4462    // e.g. (FOO - :lower16:BAR) isn't legal.
4463    ARMMCExpr::VariantKind RefKind;
4464    if (parsePrefix(RefKind))
4465      return true;
4466
4467    const MCExpr *SubExprVal;
4468    if (getParser().ParseExpression(SubExprVal))
4469      return true;
4470
4471    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4472                                                   getContext());
4473    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4474    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4475    return false;
4476  }
4477  }
4478}
4479
4480// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4481//  :lower16: and :upper16:.
4482bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4483  RefKind = ARMMCExpr::VK_ARM_None;
4484
4485  // :lower16: and :upper16: modifiers
4486  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4487  Parser.Lex(); // Eat ':'
4488
4489  if (getLexer().isNot(AsmToken::Identifier)) {
4490    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4491    return true;
4492  }
4493
4494  StringRef IDVal = Parser.getTok().getIdentifier();
4495  if (IDVal == "lower16") {
4496    RefKind = ARMMCExpr::VK_ARM_LO16;
4497  } else if (IDVal == "upper16") {
4498    RefKind = ARMMCExpr::VK_ARM_HI16;
4499  } else {
4500    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4501    return true;
4502  }
4503  Parser.Lex();
4504
4505  if (getLexer().isNot(AsmToken::Colon)) {
4506    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4507    return true;
4508  }
4509  Parser.Lex(); // Eat the last ':'
4510  return false;
4511}
4512
4513/// \brief Given a mnemonic, split out possible predication code and carry
4514/// setting letters to form a canonical mnemonic and flags.
4515//
4516// FIXME: Would be nice to autogen this.
4517// FIXME: This is a bit of a maze of special cases.
4518StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4519                                      unsigned &PredicationCode,
4520                                      bool &CarrySetting,
4521                                      unsigned &ProcessorIMod,
4522                                      StringRef &ITMask) {
4523  PredicationCode = ARMCC::AL;
4524  CarrySetting = false;
4525  ProcessorIMod = 0;
4526
4527  // Ignore some mnemonics we know aren't predicated forms.
4528  //
4529  // FIXME: Would be nice to autogen this.
4530  if ((Mnemonic == "movs" && isThumb()) ||
4531      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4532      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4533      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4534      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4535      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4536      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4537      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4538      Mnemonic == "fmuls")
4539    return Mnemonic;
4540
4541  // First, split out any predication code. Ignore mnemonics we know aren't
4542  // predicated but do have a carry-set and so weren't caught above.
4543  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4544      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4545      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4546      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4547    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4548      .Case("eq", ARMCC::EQ)
4549      .Case("ne", ARMCC::NE)
4550      .Case("hs", ARMCC::HS)
4551      .Case("cs", ARMCC::HS)
4552      .Case("lo", ARMCC::LO)
4553      .Case("cc", ARMCC::LO)
4554      .Case("mi", ARMCC::MI)
4555      .Case("pl", ARMCC::PL)
4556      .Case("vs", ARMCC::VS)
4557      .Case("vc", ARMCC::VC)
4558      .Case("hi", ARMCC::HI)
4559      .Case("ls", ARMCC::LS)
4560      .Case("ge", ARMCC::GE)
4561      .Case("lt", ARMCC::LT)
4562      .Case("gt", ARMCC::GT)
4563      .Case("le", ARMCC::LE)
4564      .Case("al", ARMCC::AL)
4565      .Default(~0U);
4566    if (CC != ~0U) {
4567      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4568      PredicationCode = CC;
4569    }
4570  }
4571
4572  // Next, determine if we have a carry setting bit. We explicitly ignore all
4573  // the instructions we know end in 's'.
4574  if (Mnemonic.endswith("s") &&
4575      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4576        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4577        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4578        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4579        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4580        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4581        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4582        Mnemonic == "fmuls" || Mnemonic == "fcmps" ||
4583        (Mnemonic == "movs" && isThumb()))) {
4584    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4585    CarrySetting = true;
4586  }
4587
4588  // The "cps" instruction can have a interrupt mode operand which is glued into
4589  // the mnemonic. Check if this is the case, split it and parse the imod op
4590  if (Mnemonic.startswith("cps")) {
4591    // Split out any imod code.
4592    unsigned IMod =
4593      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4594      .Case("ie", ARM_PROC::IE)
4595      .Case("id", ARM_PROC::ID)
4596      .Default(~0U);
4597    if (IMod != ~0U) {
4598      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4599      ProcessorIMod = IMod;
4600    }
4601  }
4602
4603  // The "it" instruction has the condition mask on the end of the mnemonic.
4604  if (Mnemonic.startswith("it")) {
4605    ITMask = Mnemonic.slice(2, Mnemonic.size());
4606    Mnemonic = Mnemonic.slice(0, 2);
4607  }
4608
4609  return Mnemonic;
4610}
4611
4612/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4613/// inclusion of carry set or predication code operands.
4614//
4615// FIXME: It would be nice to autogen this.
4616void ARMAsmParser::
4617getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4618                      bool &CanAcceptPredicationCode) {
4619  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4620      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4621      Mnemonic == "add" || Mnemonic == "adc" ||
4622      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4623      Mnemonic == "orr" || Mnemonic == "mvn" ||
4624      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4625      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4626      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4627                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4628                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4629    CanAcceptCarrySet = true;
4630  } else
4631    CanAcceptCarrySet = false;
4632
4633  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4634      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4635      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4636      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4637      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4638      (Mnemonic == "clrex" && !isThumb()) ||
4639      (Mnemonic == "nop" && isThumbOne()) ||
4640      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4641        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4642        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4643      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4644       !isThumb()) ||
4645      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4646    CanAcceptPredicationCode = false;
4647  } else
4648    CanAcceptPredicationCode = true;
4649
4650  if (isThumb()) {
4651    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4652        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4653      CanAcceptPredicationCode = false;
4654  }
4655}
4656
4657bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4658                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4659  // FIXME: This is all horribly hacky. We really need a better way to deal
4660  // with optional operands like this in the matcher table.
4661
4662  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4663  // another does not. Specifically, the MOVW instruction does not. So we
4664  // special case it here and remove the defaulted (non-setting) cc_out
4665  // operand if that's the instruction we're trying to match.
4666  //
4667  // We do this as post-processing of the explicit operands rather than just
4668  // conditionally adding the cc_out in the first place because we need
4669  // to check the type of the parsed immediate operand.
4670  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4671      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4672      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4673      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4674    return true;
4675
4676  // Register-register 'add' for thumb does not have a cc_out operand
4677  // when there are only two register operands.
4678  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4679      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4680      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4681      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4682    return true;
4683  // Register-register 'add' for thumb does not have a cc_out operand
4684  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4685  // have to check the immediate range here since Thumb2 has a variant
4686  // that can handle a different range and has a cc_out operand.
4687  if (((isThumb() && Mnemonic == "add") ||
4688       (isThumbTwo() && Mnemonic == "sub")) &&
4689      Operands.size() == 6 &&
4690      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4691      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4692      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4693      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4694      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4695       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4696    return true;
4697  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4698  // imm0_4095 variant. That's the least-preferred variant when
4699  // selecting via the generic "add" mnemonic, so to know that we
4700  // should remove the cc_out operand, we have to explicitly check that
4701  // it's not one of the other variants. Ugh.
4702  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4703      Operands.size() == 6 &&
4704      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4705      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4706      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4707    // Nest conditions rather than one big 'if' statement for readability.
4708    //
4709    // If either register is a high reg, it's either one of the SP
4710    // variants (handled above) or a 32-bit encoding, so we just
4711    // check against T3. If the second register is the PC, this is an
4712    // alternate form of ADR, which uses encoding T4, so check for that too.
4713    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4714         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4715        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4716        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4717      return false;
4718    // If both registers are low, we're in an IT block, and the immediate is
4719    // in range, we should use encoding T1 instead, which has a cc_out.
4720    if (inITBlock() &&
4721        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4722        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4723        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4724      return false;
4725
4726    // Otherwise, we use encoding T4, which does not have a cc_out
4727    // operand.
4728    return true;
4729  }
4730
4731  // The thumb2 multiply instruction doesn't have a CCOut register, so
4732  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4733  // use the 16-bit encoding or not.
4734  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4735      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4736      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4737      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4738      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4739      // If the registers aren't low regs, the destination reg isn't the
4740      // same as one of the source regs, or the cc_out operand is zero
4741      // outside of an IT block, we have to use the 32-bit encoding, so
4742      // remove the cc_out operand.
4743      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4744       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4745       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4746       !inITBlock() ||
4747       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4748        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4749        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4750        static_cast<ARMOperand*>(Operands[4])->getReg())))
4751    return true;
4752
4753  // Also check the 'mul' syntax variant that doesn't specify an explicit
4754  // destination register.
4755  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4756      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4757      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4758      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4759      // If the registers aren't low regs  or the cc_out operand is zero
4760      // outside of an IT block, we have to use the 32-bit encoding, so
4761      // remove the cc_out operand.
4762      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4763       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4764       !inITBlock()))
4765    return true;
4766
4767
4768
4769  // Register-register 'add/sub' for thumb does not have a cc_out operand
4770  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4771  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4772  // right, this will result in better diagnostics (which operand is off)
4773  // anyway.
4774  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4775      (Operands.size() == 5 || Operands.size() == 6) &&
4776      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4777      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4778      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4779    return true;
4780
4781  return false;
4782}
4783
4784static bool isDataTypeToken(StringRef Tok) {
4785  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4786    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4787    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4788    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4789    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4790    Tok == ".f" || Tok == ".d";
4791}
4792
4793// FIXME: This bit should probably be handled via an explicit match class
4794// in the .td files that matches the suffix instead of having it be
4795// a literal string token the way it is now.
4796static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4797  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4798}
4799
4800static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4801/// Parse an arm instruction mnemonic followed by its operands.
4802bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4803                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4804  // Apply mnemonic aliases before doing anything else, as the destination
4805  // mnemnonic may include suffices and we want to handle them normally.
4806  // The generic tblgen'erated code does this later, at the start of
4807  // MatchInstructionImpl(), but that's too late for aliases that include
4808  // any sort of suffix.
4809  unsigned AvailableFeatures = getAvailableFeatures();
4810  applyMnemonicAliases(Name, AvailableFeatures);
4811
4812  // First check for the ARM-specific .req directive.
4813  if (Parser.getTok().is(AsmToken::Identifier) &&
4814      Parser.getTok().getIdentifier() == ".req") {
4815    parseDirectiveReq(Name, NameLoc);
4816    // We always return 'error' for this, as we're done with this
4817    // statement and don't need to match the 'instruction."
4818    return true;
4819  }
4820
4821  // Create the leading tokens for the mnemonic, split by '.' characters.
4822  size_t Start = 0, Next = Name.find('.');
4823  StringRef Mnemonic = Name.slice(Start, Next);
4824
4825  // Split out the predication code and carry setting flag from the mnemonic.
4826  unsigned PredicationCode;
4827  unsigned ProcessorIMod;
4828  bool CarrySetting;
4829  StringRef ITMask;
4830  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4831                           ProcessorIMod, ITMask);
4832
4833  // In Thumb1, only the branch (B) instruction can be predicated.
4834  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4835    Parser.EatToEndOfStatement();
4836    return Error(NameLoc, "conditional execution not supported in Thumb1");
4837  }
4838
4839  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4840
4841  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4842  // is the mask as it will be for the IT encoding if the conditional
4843  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4844  // where the conditional bit0 is zero, the instruction post-processing
4845  // will adjust the mask accordingly.
4846  if (Mnemonic == "it") {
4847    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4848    if (ITMask.size() > 3) {
4849      Parser.EatToEndOfStatement();
4850      return Error(Loc, "too many conditions on IT instruction");
4851    }
4852    unsigned Mask = 8;
4853    for (unsigned i = ITMask.size(); i != 0; --i) {
4854      char pos = ITMask[i - 1];
4855      if (pos != 't' && pos != 'e') {
4856        Parser.EatToEndOfStatement();
4857        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4858      }
4859      Mask >>= 1;
4860      if (ITMask[i - 1] == 't')
4861        Mask |= 8;
4862    }
4863    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4864  }
4865
4866  // FIXME: This is all a pretty gross hack. We should automatically handle
4867  // optional operands like this via tblgen.
4868
4869  // Next, add the CCOut and ConditionCode operands, if needed.
4870  //
4871  // For mnemonics which can ever incorporate a carry setting bit or predication
4872  // code, our matching model involves us always generating CCOut and
4873  // ConditionCode operands to match the mnemonic "as written" and then we let
4874  // the matcher deal with finding the right instruction or generating an
4875  // appropriate error.
4876  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4877  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4878
4879  // If we had a carry-set on an instruction that can't do that, issue an
4880  // error.
4881  if (!CanAcceptCarrySet && CarrySetting) {
4882    Parser.EatToEndOfStatement();
4883    return Error(NameLoc, "instruction '" + Mnemonic +
4884                 "' can not set flags, but 's' suffix specified");
4885  }
4886  // If we had a predication code on an instruction that can't do that, issue an
4887  // error.
4888  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4889    Parser.EatToEndOfStatement();
4890    return Error(NameLoc, "instruction '" + Mnemonic +
4891                 "' is not predicable, but condition code specified");
4892  }
4893
4894  // Add the carry setting operand, if necessary.
4895  if (CanAcceptCarrySet) {
4896    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4897    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4898                                               Loc));
4899  }
4900
4901  // Add the predication code operand, if necessary.
4902  if (CanAcceptPredicationCode) {
4903    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4904                                      CarrySetting);
4905    Operands.push_back(ARMOperand::CreateCondCode(
4906                         ARMCC::CondCodes(PredicationCode), Loc));
4907  }
4908
4909  // Add the processor imod operand, if necessary.
4910  if (ProcessorIMod) {
4911    Operands.push_back(ARMOperand::CreateImm(
4912          MCConstantExpr::Create(ProcessorIMod, getContext()),
4913                                 NameLoc, NameLoc));
4914  }
4915
4916  // Add the remaining tokens in the mnemonic.
4917  while (Next != StringRef::npos) {
4918    Start = Next;
4919    Next = Name.find('.', Start + 1);
4920    StringRef ExtraToken = Name.slice(Start, Next);
4921
4922    // Some NEON instructions have an optional datatype suffix that is
4923    // completely ignored. Check for that.
4924    if (isDataTypeToken(ExtraToken) &&
4925        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4926      continue;
4927
4928    if (ExtraToken != ".n") {
4929      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4930      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4931    }
4932  }
4933
4934  // Read the remaining operands.
4935  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4936    // Read the first operand.
4937    if (parseOperand(Operands, Mnemonic)) {
4938      Parser.EatToEndOfStatement();
4939      return true;
4940    }
4941
4942    while (getLexer().is(AsmToken::Comma)) {
4943      Parser.Lex();  // Eat the comma.
4944
4945      // Parse and remember the operand.
4946      if (parseOperand(Operands, Mnemonic)) {
4947        Parser.EatToEndOfStatement();
4948        return true;
4949      }
4950    }
4951  }
4952
4953  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4954    SMLoc Loc = getLexer().getLoc();
4955    Parser.EatToEndOfStatement();
4956    return Error(Loc, "unexpected token in argument list");
4957  }
4958
4959  Parser.Lex(); // Consume the EndOfStatement
4960
4961  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4962  // do and don't have a cc_out optional-def operand. With some spot-checks
4963  // of the operand list, we can figure out which variant we're trying to
4964  // parse and adjust accordingly before actually matching. We shouldn't ever
4965  // try to remove a cc_out operand that was explicitly set on the the
4966  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4967  // table driven matcher doesn't fit well with the ARM instruction set.
4968  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4969    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4970    Operands.erase(Operands.begin() + 1);
4971    delete Op;
4972  }
4973
4974  // ARM mode 'blx' need special handling, as the register operand version
4975  // is predicable, but the label operand version is not. So, we can't rely
4976  // on the Mnemonic based checking to correctly figure out when to put
4977  // a k_CondCode operand in the list. If we're trying to match the label
4978  // version, remove the k_CondCode operand here.
4979  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4980      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4981    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4982    Operands.erase(Operands.begin() + 1);
4983    delete Op;
4984  }
4985
4986  // The vector-compare-to-zero instructions have a literal token "#0" at
4987  // the end that comes to here as an immediate operand. Convert it to a
4988  // token to play nicely with the matcher.
4989  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4990      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4991      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4992    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4993    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4994    if (CE && CE->getValue() == 0) {
4995      Operands.erase(Operands.begin() + 5);
4996      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4997      delete Op;
4998    }
4999  }
5000  // VCMP{E} does the same thing, but with a different operand count.
5001  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
5002      static_cast<ARMOperand*>(Operands[4])->isImm()) {
5003    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
5004    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5005    if (CE && CE->getValue() == 0) {
5006      Operands.erase(Operands.begin() + 4);
5007      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5008      delete Op;
5009    }
5010  }
5011  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
5012  // end. Convert it to a token here. Take care not to convert those
5013  // that should hit the Thumb2 encoding.
5014  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
5015      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5016      static_cast<ARMOperand*>(Operands[4])->isReg() &&
5017      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5018    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5019    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5020    if (CE && CE->getValue() == 0 &&
5021        (isThumbOne() ||
5022         // The cc_out operand matches the IT block.
5023         ((inITBlock() != CarrySetting) &&
5024         // Neither register operand is a high register.
5025         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
5026          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
5027      Operands.erase(Operands.begin() + 5);
5028      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5029      delete Op;
5030    }
5031  }
5032
5033  return false;
5034}
5035
5036// Validate context-sensitive operand constraints.
5037
5038// return 'true' if register list contains non-low GPR registers,
5039// 'false' otherwise. If Reg is in the register list or is HiReg, set
5040// 'containsReg' to true.
5041static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5042                                 unsigned HiReg, bool &containsReg) {
5043  containsReg = false;
5044  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5045    unsigned OpReg = Inst.getOperand(i).getReg();
5046    if (OpReg == Reg)
5047      containsReg = true;
5048    // Anything other than a low register isn't legal here.
5049    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5050      return true;
5051  }
5052  return false;
5053}
5054
5055// Check if the specified regisgter is in the register list of the inst,
5056// starting at the indicated operand number.
5057static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5058  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5059    unsigned OpReg = Inst.getOperand(i).getReg();
5060    if (OpReg == Reg)
5061      return true;
5062  }
5063  return false;
5064}
5065
5066// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5067// the ARMInsts array) instead. Getting that here requires awkward
5068// API changes, though. Better way?
5069namespace llvm {
5070extern const MCInstrDesc ARMInsts[];
5071}
5072static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5073  return ARMInsts[Opcode];
5074}
5075
5076// FIXME: We would really like to be able to tablegen'erate this.
5077bool ARMAsmParser::
5078validateInstruction(MCInst &Inst,
5079                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5080  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5081  SMLoc Loc = Operands[0]->getStartLoc();
5082  // Check the IT block state first.
5083  // NOTE: BKPT instruction has the interesting property of being
5084  // allowed in IT blocks, but not being predicable.  It just always
5085  // executes.
5086  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5087      Inst.getOpcode() != ARM::BKPT) {
5088    unsigned bit = 1;
5089    if (ITState.FirstCond)
5090      ITState.FirstCond = false;
5091    else
5092      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5093    // The instruction must be predicable.
5094    if (!MCID.isPredicable())
5095      return Error(Loc, "instructions in IT block must be predicable");
5096    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5097    unsigned ITCond = bit ? ITState.Cond :
5098      ARMCC::getOppositeCondition(ITState.Cond);
5099    if (Cond != ITCond) {
5100      // Find the condition code Operand to get its SMLoc information.
5101      SMLoc CondLoc;
5102      for (unsigned i = 1; i < Operands.size(); ++i)
5103        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5104          CondLoc = Operands[i]->getStartLoc();
5105      return Error(CondLoc, "incorrect condition in IT block; got '" +
5106                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5107                   "', but expected '" +
5108                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5109    }
5110  // Check for non-'al' condition codes outside of the IT block.
5111  } else if (isThumbTwo() && MCID.isPredicable() &&
5112             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5113             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5114             Inst.getOpcode() != ARM::t2B)
5115    return Error(Loc, "predicated instructions must be in IT block");
5116
5117  switch (Inst.getOpcode()) {
5118  case ARM::LDRD:
5119  case ARM::LDRD_PRE:
5120  case ARM::LDRD_POST:
5121  case ARM::LDREXD: {
5122    // Rt2 must be Rt + 1.
5123    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5124    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5125    if (Rt2 != Rt + 1)
5126      return Error(Operands[3]->getStartLoc(),
5127                   "destination operands must be sequential");
5128    return false;
5129  }
5130  case ARM::STRD: {
5131    // Rt2 must be Rt + 1.
5132    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5133    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5134    if (Rt2 != Rt + 1)
5135      return Error(Operands[3]->getStartLoc(),
5136                   "source operands must be sequential");
5137    return false;
5138  }
5139  case ARM::STRD_PRE:
5140  case ARM::STRD_POST:
5141  case ARM::STREXD: {
5142    // Rt2 must be Rt + 1.
5143    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5144    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5145    if (Rt2 != Rt + 1)
5146      return Error(Operands[3]->getStartLoc(),
5147                   "source operands must be sequential");
5148    return false;
5149  }
5150  case ARM::SBFX:
5151  case ARM::UBFX: {
5152    // width must be in range [1, 32-lsb]
5153    unsigned lsb = Inst.getOperand(2).getImm();
5154    unsigned widthm1 = Inst.getOperand(3).getImm();
5155    if (widthm1 >= 32 - lsb)
5156      return Error(Operands[5]->getStartLoc(),
5157                   "bitfield width must be in range [1,32-lsb]");
5158    return false;
5159  }
5160  case ARM::tLDMIA: {
5161    // If we're parsing Thumb2, the .w variant is available and handles
5162    // most cases that are normally illegal for a Thumb1 LDM
5163    // instruction. We'll make the transformation in processInstruction()
5164    // if necessary.
5165    //
5166    // Thumb LDM instructions are writeback iff the base register is not
5167    // in the register list.
5168    unsigned Rn = Inst.getOperand(0).getReg();
5169    bool hasWritebackToken =
5170      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5171       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5172    bool listContainsBase;
5173    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5174      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5175                   "registers must be in range r0-r7");
5176    // If we should have writeback, then there should be a '!' token.
5177    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5178      return Error(Operands[2]->getStartLoc(),
5179                   "writeback operator '!' expected");
5180    // If we should not have writeback, there must not be a '!'. This is
5181    // true even for the 32-bit wide encodings.
5182    if (listContainsBase && hasWritebackToken)
5183      return Error(Operands[3]->getStartLoc(),
5184                   "writeback operator '!' not allowed when base register "
5185                   "in register list");
5186
5187    break;
5188  }
5189  case ARM::t2LDMIA_UPD: {
5190    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5191      return Error(Operands[4]->getStartLoc(),
5192                   "writeback operator '!' not allowed when base register "
5193                   "in register list");
5194    break;
5195  }
5196  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5197  // so only issue a diagnostic for thumb1. The instructions will be
5198  // switched to the t2 encodings in processInstruction() if necessary.
5199  case ARM::tPOP: {
5200    bool listContainsBase;
5201    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5202        !isThumbTwo())
5203      return Error(Operands[2]->getStartLoc(),
5204                   "registers must be in range r0-r7 or pc");
5205    break;
5206  }
5207  case ARM::tPUSH: {
5208    bool listContainsBase;
5209    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5210        !isThumbTwo())
5211      return Error(Operands[2]->getStartLoc(),
5212                   "registers must be in range r0-r7 or lr");
5213    break;
5214  }
5215  case ARM::tSTMIA_UPD: {
5216    bool listContainsBase;
5217    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5218      return Error(Operands[4]->getStartLoc(),
5219                   "registers must be in range r0-r7");
5220    break;
5221  }
5222  }
5223
5224  return false;
5225}
5226
5227static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5228  switch(Opc) {
5229  default: assert(0 && "unexpected opcode!");
5230  // VST1LN
5231  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5232  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5233  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5234  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5235  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5236  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5237  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5238  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5239  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5240
5241  // VST2LN
5242  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5243  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5244  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5245  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5246  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5247
5248  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5249  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5250  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5251  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5252  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5253
5254  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5255  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5256  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5257  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5258  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5259
5260  // VST3LN
5261  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5262  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5263  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5264  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5265  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5266  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5267  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5268  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5269  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5270  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5271  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5272  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5273  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5274  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5275  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5276
5277  // VST3
5278  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5279  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5280  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5281  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5282  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5283  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5284  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5285  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5286  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5287  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5288  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5289  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5290  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5291  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5292  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5293  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5294  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5295  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5296
5297  // VST4LN
5298  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5299  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5300  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5301  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5302  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5303  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5304  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5305  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5306  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5307  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5308  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5309  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5310  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5311  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5312  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5313
5314  // VST4
5315  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5316  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5317  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5318  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5319  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5320  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5321  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5322  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5323  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5324  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5325  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5326  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5327  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5328  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5329  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5330  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5331  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5332  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5333  }
5334}
5335
5336static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5337  switch(Opc) {
5338  default: assert(0 && "unexpected opcode!");
5339  // VLD1LN
5340  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5341  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5342  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5343  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5344  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5345  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5346  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5347  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5348  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5349
5350  // VLD2LN
5351  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5352  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5353  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5354  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5355  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5356  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5357  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5358  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5359  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5360  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5361  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5362  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5363  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5364  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5365  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5366
5367  // VLD3DUP
5368  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5369  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5370  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5371  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5372  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5373  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5374  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5375  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5376  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5377  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5378  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5379  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5380  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5381  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5382  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5383  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5384  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5385  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5386
5387  // VLD3LN
5388  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5389  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5390  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5391  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5392  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5393  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5394  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5395  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5396  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5397  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5398  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5399  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5400  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5401  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5402  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5403
5404  // VLD3
5405  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5406  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5407  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5408  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5409  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5410  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5411  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5412  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5413  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5414  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5415  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5416  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5417  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5418  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5419  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5420  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5421  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5422  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5423
5424  // VLD4LN
5425  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5426  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5427  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5428  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5429  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5430  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5431  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5432  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5433  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5434  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5435  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5436  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5437  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5438  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5439  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5440
5441  // VLD4DUP
5442  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5443  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5444  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5445  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5446  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5447  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5448  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5449  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5450  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5451  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5452  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5453  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5454  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5455  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5456  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5457  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5458  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5459  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5460
5461  // VLD4
5462  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5463  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5464  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5465  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5466  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5467  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5468  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5469  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5470  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5471  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5472  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5473  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5474  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5475  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5476  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5477  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5478  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5479  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5480  }
5481}
5482
5483bool ARMAsmParser::
5484processInstruction(MCInst &Inst,
5485                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5486  switch (Inst.getOpcode()) {
5487  // Aliases for alternate PC+imm syntax of LDR instructions.
5488  case ARM::t2LDRpcrel:
5489    Inst.setOpcode(ARM::t2LDRpci);
5490    return true;
5491  case ARM::t2LDRBpcrel:
5492    Inst.setOpcode(ARM::t2LDRBpci);
5493    return true;
5494  case ARM::t2LDRHpcrel:
5495    Inst.setOpcode(ARM::t2LDRHpci);
5496    return true;
5497  case ARM::t2LDRSBpcrel:
5498    Inst.setOpcode(ARM::t2LDRSBpci);
5499    return true;
5500  case ARM::t2LDRSHpcrel:
5501    Inst.setOpcode(ARM::t2LDRSHpci);
5502    return true;
5503  // Handle NEON VST complex aliases.
5504  case ARM::VST1LNdWB_register_Asm_8:
5505  case ARM::VST1LNdWB_register_Asm_16:
5506  case ARM::VST1LNdWB_register_Asm_32: {
5507    MCInst TmpInst;
5508    // Shuffle the operands around so the lane index operand is in the
5509    // right place.
5510    unsigned Spacing;
5511    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5512    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5513    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5514    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5515    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5516    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5517    TmpInst.addOperand(Inst.getOperand(1)); // lane
5518    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5519    TmpInst.addOperand(Inst.getOperand(6));
5520    Inst = TmpInst;
5521    return true;
5522  }
5523
5524  case ARM::VST2LNdWB_register_Asm_8:
5525  case ARM::VST2LNdWB_register_Asm_16:
5526  case ARM::VST2LNdWB_register_Asm_32:
5527  case ARM::VST2LNqWB_register_Asm_16:
5528  case ARM::VST2LNqWB_register_Asm_32: {
5529    MCInst TmpInst;
5530    // Shuffle the operands around so the lane index operand is in the
5531    // right place.
5532    unsigned Spacing;
5533    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5534    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5535    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5536    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5537    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5538    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5539    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5540                                            Spacing));
5541    TmpInst.addOperand(Inst.getOperand(1)); // lane
5542    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5543    TmpInst.addOperand(Inst.getOperand(6));
5544    Inst = TmpInst;
5545    return true;
5546  }
5547
5548  case ARM::VST3LNdWB_register_Asm_8:
5549  case ARM::VST3LNdWB_register_Asm_16:
5550  case ARM::VST3LNdWB_register_Asm_32:
5551  case ARM::VST3LNqWB_register_Asm_16:
5552  case ARM::VST3LNqWB_register_Asm_32: {
5553    MCInst TmpInst;
5554    // Shuffle the operands around so the lane index operand is in the
5555    // right place.
5556    unsigned Spacing;
5557    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5558    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5559    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5560    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5561    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5562    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5563    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5564                                            Spacing));
5565    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5566                                            Spacing * 2));
5567    TmpInst.addOperand(Inst.getOperand(1)); // lane
5568    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5569    TmpInst.addOperand(Inst.getOperand(6));
5570    Inst = TmpInst;
5571    return true;
5572  }
5573
5574  case ARM::VST4LNdWB_register_Asm_8:
5575  case ARM::VST4LNdWB_register_Asm_16:
5576  case ARM::VST4LNdWB_register_Asm_32:
5577  case ARM::VST4LNqWB_register_Asm_16:
5578  case ARM::VST4LNqWB_register_Asm_32: {
5579    MCInst TmpInst;
5580    // Shuffle the operands around so the lane index operand is in the
5581    // right place.
5582    unsigned Spacing;
5583    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5584    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5585    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5586    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5587    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5588    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5589    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5590                                            Spacing));
5591    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5592                                            Spacing * 2));
5593    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5594                                            Spacing * 3));
5595    TmpInst.addOperand(Inst.getOperand(1)); // lane
5596    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5597    TmpInst.addOperand(Inst.getOperand(6));
5598    Inst = TmpInst;
5599    return true;
5600  }
5601
5602  case ARM::VST1LNdWB_fixed_Asm_8:
5603  case ARM::VST1LNdWB_fixed_Asm_16:
5604  case ARM::VST1LNdWB_fixed_Asm_32: {
5605    MCInst TmpInst;
5606    // Shuffle the operands around so the lane index operand is in the
5607    // right place.
5608    unsigned Spacing;
5609    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5610    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5611    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5612    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5613    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5614    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5615    TmpInst.addOperand(Inst.getOperand(1)); // lane
5616    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5617    TmpInst.addOperand(Inst.getOperand(5));
5618    Inst = TmpInst;
5619    return true;
5620  }
5621
5622  case ARM::VST2LNdWB_fixed_Asm_8:
5623  case ARM::VST2LNdWB_fixed_Asm_16:
5624  case ARM::VST2LNdWB_fixed_Asm_32:
5625  case ARM::VST2LNqWB_fixed_Asm_16:
5626  case ARM::VST2LNqWB_fixed_Asm_32: {
5627    MCInst TmpInst;
5628    // Shuffle the operands around so the lane index operand is in the
5629    // right place.
5630    unsigned Spacing;
5631    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5632    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5633    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5634    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5635    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5636    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5637    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5638                                            Spacing));
5639    TmpInst.addOperand(Inst.getOperand(1)); // lane
5640    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5641    TmpInst.addOperand(Inst.getOperand(5));
5642    Inst = TmpInst;
5643    return true;
5644  }
5645
5646  case ARM::VST3LNdWB_fixed_Asm_8:
5647  case ARM::VST3LNdWB_fixed_Asm_16:
5648  case ARM::VST3LNdWB_fixed_Asm_32:
5649  case ARM::VST3LNqWB_fixed_Asm_16:
5650  case ARM::VST3LNqWB_fixed_Asm_32: {
5651    MCInst TmpInst;
5652    // Shuffle the operands around so the lane index operand is in the
5653    // right place.
5654    unsigned Spacing;
5655    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5656    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5657    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5658    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5659    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5660    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5661    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5662                                            Spacing));
5663    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5664                                            Spacing * 2));
5665    TmpInst.addOperand(Inst.getOperand(1)); // lane
5666    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5667    TmpInst.addOperand(Inst.getOperand(5));
5668    Inst = TmpInst;
5669    return true;
5670  }
5671
5672  case ARM::VST4LNdWB_fixed_Asm_8:
5673  case ARM::VST4LNdWB_fixed_Asm_16:
5674  case ARM::VST4LNdWB_fixed_Asm_32:
5675  case ARM::VST4LNqWB_fixed_Asm_16:
5676  case ARM::VST4LNqWB_fixed_Asm_32: {
5677    MCInst TmpInst;
5678    // Shuffle the operands around so the lane index operand is in the
5679    // right place.
5680    unsigned Spacing;
5681    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5682    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5683    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5684    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5685    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5686    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5687    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5688                                            Spacing));
5689    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5690                                            Spacing * 2));
5691    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5692                                            Spacing * 3));
5693    TmpInst.addOperand(Inst.getOperand(1)); // lane
5694    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5695    TmpInst.addOperand(Inst.getOperand(5));
5696    Inst = TmpInst;
5697    return true;
5698  }
5699
5700  case ARM::VST1LNdAsm_8:
5701  case ARM::VST1LNdAsm_16:
5702  case ARM::VST1LNdAsm_32: {
5703    MCInst TmpInst;
5704    // Shuffle the operands around so the lane index operand is in the
5705    // right place.
5706    unsigned Spacing;
5707    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5708    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5709    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5710    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5711    TmpInst.addOperand(Inst.getOperand(1)); // lane
5712    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5713    TmpInst.addOperand(Inst.getOperand(5));
5714    Inst = TmpInst;
5715    return true;
5716  }
5717
5718  case ARM::VST2LNdAsm_8:
5719  case ARM::VST2LNdAsm_16:
5720  case ARM::VST2LNdAsm_32:
5721  case ARM::VST2LNqAsm_16:
5722  case ARM::VST2LNqAsm_32: {
5723    MCInst TmpInst;
5724    // Shuffle the operands around so the lane index operand is in the
5725    // right place.
5726    unsigned Spacing;
5727    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5728    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5729    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5730    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5731    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5732                                            Spacing));
5733    TmpInst.addOperand(Inst.getOperand(1)); // lane
5734    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5735    TmpInst.addOperand(Inst.getOperand(5));
5736    Inst = TmpInst;
5737    return true;
5738  }
5739
5740  case ARM::VST3LNdAsm_8:
5741  case ARM::VST3LNdAsm_16:
5742  case ARM::VST3LNdAsm_32:
5743  case ARM::VST3LNqAsm_16:
5744  case ARM::VST3LNqAsm_32: {
5745    MCInst TmpInst;
5746    // Shuffle the operands around so the lane index operand is in the
5747    // right place.
5748    unsigned Spacing;
5749    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5750    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5751    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5752    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5753    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5754                                            Spacing));
5755    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5756                                            Spacing * 2));
5757    TmpInst.addOperand(Inst.getOperand(1)); // lane
5758    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5759    TmpInst.addOperand(Inst.getOperand(5));
5760    Inst = TmpInst;
5761    return true;
5762  }
5763
5764  case ARM::VST4LNdAsm_8:
5765  case ARM::VST4LNdAsm_16:
5766  case ARM::VST4LNdAsm_32:
5767  case ARM::VST4LNqAsm_16:
5768  case ARM::VST4LNqAsm_32: {
5769    MCInst TmpInst;
5770    // Shuffle the operands around so the lane index operand is in the
5771    // right place.
5772    unsigned Spacing;
5773    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5774    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5775    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5776    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5777    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5778                                            Spacing));
5779    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5780                                            Spacing * 2));
5781    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5782                                            Spacing * 3));
5783    TmpInst.addOperand(Inst.getOperand(1)); // lane
5784    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5785    TmpInst.addOperand(Inst.getOperand(5));
5786    Inst = TmpInst;
5787    return true;
5788  }
5789
5790  // Handle NEON VLD complex aliases.
5791  case ARM::VLD1LNdWB_register_Asm_8:
5792  case ARM::VLD1LNdWB_register_Asm_16:
5793  case ARM::VLD1LNdWB_register_Asm_32: {
5794    MCInst TmpInst;
5795    // Shuffle the operands around so the lane index operand is in the
5796    // right place.
5797    unsigned Spacing;
5798    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5799    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5800    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5801    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5802    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5803    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5804    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5805    TmpInst.addOperand(Inst.getOperand(1)); // lane
5806    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5807    TmpInst.addOperand(Inst.getOperand(6));
5808    Inst = TmpInst;
5809    return true;
5810  }
5811
5812  case ARM::VLD2LNdWB_register_Asm_8:
5813  case ARM::VLD2LNdWB_register_Asm_16:
5814  case ARM::VLD2LNdWB_register_Asm_32:
5815  case ARM::VLD2LNqWB_register_Asm_16:
5816  case ARM::VLD2LNqWB_register_Asm_32: {
5817    MCInst TmpInst;
5818    // Shuffle the operands around so the lane index operand is in the
5819    // right place.
5820    unsigned Spacing;
5821    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5822    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5823    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5824                                            Spacing));
5825    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5826    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5827    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5828    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5829    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5830    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5831                                            Spacing));
5832    TmpInst.addOperand(Inst.getOperand(1)); // lane
5833    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5834    TmpInst.addOperand(Inst.getOperand(6));
5835    Inst = TmpInst;
5836    return true;
5837  }
5838
5839  case ARM::VLD3LNdWB_register_Asm_8:
5840  case ARM::VLD3LNdWB_register_Asm_16:
5841  case ARM::VLD3LNdWB_register_Asm_32:
5842  case ARM::VLD3LNqWB_register_Asm_16:
5843  case ARM::VLD3LNqWB_register_Asm_32: {
5844    MCInst TmpInst;
5845    // Shuffle the operands around so the lane index operand is in the
5846    // right place.
5847    unsigned Spacing;
5848    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5849    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5850    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5851                                            Spacing));
5852    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5853                                            Spacing * 2));
5854    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5855    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5856    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5857    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5858    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5859    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5860                                            Spacing));
5861    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5862                                            Spacing * 2));
5863    TmpInst.addOperand(Inst.getOperand(1)); // lane
5864    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5865    TmpInst.addOperand(Inst.getOperand(6));
5866    Inst = TmpInst;
5867    return true;
5868  }
5869
5870  case ARM::VLD4LNdWB_register_Asm_8:
5871  case ARM::VLD4LNdWB_register_Asm_16:
5872  case ARM::VLD4LNdWB_register_Asm_32:
5873  case ARM::VLD4LNqWB_register_Asm_16:
5874  case ARM::VLD4LNqWB_register_Asm_32: {
5875    MCInst TmpInst;
5876    // Shuffle the operands around so the lane index operand is in the
5877    // right place.
5878    unsigned Spacing;
5879    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5880    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5881    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5882                                            Spacing));
5883    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5884                                            Spacing * 2));
5885    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5886                                            Spacing * 3));
5887    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5888    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5889    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5890    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5891    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5892    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5893                                            Spacing));
5894    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5895                                            Spacing * 2));
5896    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5897                                            Spacing * 3));
5898    TmpInst.addOperand(Inst.getOperand(1)); // lane
5899    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5900    TmpInst.addOperand(Inst.getOperand(6));
5901    Inst = TmpInst;
5902    return true;
5903  }
5904
5905  case ARM::VLD1LNdWB_fixed_Asm_8:
5906  case ARM::VLD1LNdWB_fixed_Asm_16:
5907  case ARM::VLD1LNdWB_fixed_Asm_32: {
5908    MCInst TmpInst;
5909    // Shuffle the operands around so the lane index operand is in the
5910    // right place.
5911    unsigned Spacing;
5912    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5913    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5914    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5915    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5916    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5917    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5918    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5919    TmpInst.addOperand(Inst.getOperand(1)); // lane
5920    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5921    TmpInst.addOperand(Inst.getOperand(5));
5922    Inst = TmpInst;
5923    return true;
5924  }
5925
5926  case ARM::VLD2LNdWB_fixed_Asm_8:
5927  case ARM::VLD2LNdWB_fixed_Asm_16:
5928  case ARM::VLD2LNdWB_fixed_Asm_32:
5929  case ARM::VLD2LNqWB_fixed_Asm_16:
5930  case ARM::VLD2LNqWB_fixed_Asm_32: {
5931    MCInst TmpInst;
5932    // Shuffle the operands around so the lane index operand is in the
5933    // right place.
5934    unsigned Spacing;
5935    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5936    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5937    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5938                                            Spacing));
5939    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5940    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5941    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5942    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5943    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5944    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5945                                            Spacing));
5946    TmpInst.addOperand(Inst.getOperand(1)); // lane
5947    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5948    TmpInst.addOperand(Inst.getOperand(5));
5949    Inst = TmpInst;
5950    return true;
5951  }
5952
5953  case ARM::VLD3LNdWB_fixed_Asm_8:
5954  case ARM::VLD3LNdWB_fixed_Asm_16:
5955  case ARM::VLD3LNdWB_fixed_Asm_32:
5956  case ARM::VLD3LNqWB_fixed_Asm_16:
5957  case ARM::VLD3LNqWB_fixed_Asm_32: {
5958    MCInst TmpInst;
5959    // Shuffle the operands around so the lane index operand is in the
5960    // right place.
5961    unsigned Spacing;
5962    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5963    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5964    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5965                                            Spacing));
5966    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5967                                            Spacing * 2));
5968    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5969    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5970    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5971    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5972    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5973    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5974                                            Spacing));
5975    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5976                                            Spacing * 2));
5977    TmpInst.addOperand(Inst.getOperand(1)); // lane
5978    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5979    TmpInst.addOperand(Inst.getOperand(5));
5980    Inst = TmpInst;
5981    return true;
5982  }
5983
5984  case ARM::VLD4LNdWB_fixed_Asm_8:
5985  case ARM::VLD4LNdWB_fixed_Asm_16:
5986  case ARM::VLD4LNdWB_fixed_Asm_32:
5987  case ARM::VLD4LNqWB_fixed_Asm_16:
5988  case ARM::VLD4LNqWB_fixed_Asm_32: {
5989    MCInst TmpInst;
5990    // Shuffle the operands around so the lane index operand is in the
5991    // right place.
5992    unsigned Spacing;
5993    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5994    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5995    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5996                                            Spacing));
5997    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5998                                            Spacing * 2));
5999    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6000                                            Spacing * 3));
6001    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6002    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6003    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6004    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6005    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6006    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6007                                            Spacing));
6008    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6009                                            Spacing * 2));
6010    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6011                                            Spacing * 3));
6012    TmpInst.addOperand(Inst.getOperand(1)); // lane
6013    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6014    TmpInst.addOperand(Inst.getOperand(5));
6015    Inst = TmpInst;
6016    return true;
6017  }
6018
6019  case ARM::VLD1LNdAsm_8:
6020  case ARM::VLD1LNdAsm_16:
6021  case ARM::VLD1LNdAsm_32: {
6022    MCInst TmpInst;
6023    // Shuffle the operands around so the lane index operand is in the
6024    // right place.
6025    unsigned Spacing;
6026    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6027    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6028    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6029    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6030    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6031    TmpInst.addOperand(Inst.getOperand(1)); // lane
6032    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6033    TmpInst.addOperand(Inst.getOperand(5));
6034    Inst = TmpInst;
6035    return true;
6036  }
6037
6038  case ARM::VLD2LNdAsm_8:
6039  case ARM::VLD2LNdAsm_16:
6040  case ARM::VLD2LNdAsm_32:
6041  case ARM::VLD2LNqAsm_16:
6042  case ARM::VLD2LNqAsm_32: {
6043    MCInst TmpInst;
6044    // Shuffle the operands around so the lane index operand is in the
6045    // right place.
6046    unsigned Spacing;
6047    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6048    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6049    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6050                                            Spacing));
6051    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6052    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6053    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6054    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6055                                            Spacing));
6056    TmpInst.addOperand(Inst.getOperand(1)); // lane
6057    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6058    TmpInst.addOperand(Inst.getOperand(5));
6059    Inst = TmpInst;
6060    return true;
6061  }
6062
6063  case ARM::VLD3LNdAsm_8:
6064  case ARM::VLD3LNdAsm_16:
6065  case ARM::VLD3LNdAsm_32:
6066  case ARM::VLD3LNqAsm_16:
6067  case ARM::VLD3LNqAsm_32: {
6068    MCInst TmpInst;
6069    // Shuffle the operands around so the lane index operand is in the
6070    // right place.
6071    unsigned Spacing;
6072    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6073    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6074    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6075                                            Spacing));
6076    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6077                                            Spacing * 2));
6078    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6079    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6080    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6081    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6082                                            Spacing));
6083    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6084                                            Spacing * 2));
6085    TmpInst.addOperand(Inst.getOperand(1)); // lane
6086    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6087    TmpInst.addOperand(Inst.getOperand(5));
6088    Inst = TmpInst;
6089    return true;
6090  }
6091
6092  case ARM::VLD4LNdAsm_8:
6093  case ARM::VLD4LNdAsm_16:
6094  case ARM::VLD4LNdAsm_32:
6095  case ARM::VLD4LNqAsm_16:
6096  case ARM::VLD4LNqAsm_32: {
6097    MCInst TmpInst;
6098    // Shuffle the operands around so the lane index operand is in the
6099    // right place.
6100    unsigned Spacing;
6101    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6102    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6103    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6104                                            Spacing));
6105    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6106                                            Spacing * 2));
6107    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6108                                            Spacing * 3));
6109    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6110    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6111    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6112    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6113                                            Spacing));
6114    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6115                                            Spacing * 2));
6116    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6117                                            Spacing * 3));
6118    TmpInst.addOperand(Inst.getOperand(1)); // lane
6119    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6120    TmpInst.addOperand(Inst.getOperand(5));
6121    Inst = TmpInst;
6122    return true;
6123  }
6124
6125  // VLD3DUP single 3-element structure to all lanes instructions.
6126  case ARM::VLD3DUPdAsm_8:
6127  case ARM::VLD3DUPdAsm_16:
6128  case ARM::VLD3DUPdAsm_32:
6129  case ARM::VLD3DUPqAsm_8:
6130  case ARM::VLD3DUPqAsm_16:
6131  case ARM::VLD3DUPqAsm_32: {
6132    MCInst TmpInst;
6133    unsigned Spacing;
6134    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6135    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6136    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6137                                            Spacing));
6138    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6139                                            Spacing * 2));
6140    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6141    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6142    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6143    TmpInst.addOperand(Inst.getOperand(4));
6144    Inst = TmpInst;
6145    return true;
6146  }
6147
6148  case ARM::VLD3DUPdWB_fixed_Asm_8:
6149  case ARM::VLD3DUPdWB_fixed_Asm_16:
6150  case ARM::VLD3DUPdWB_fixed_Asm_32:
6151  case ARM::VLD3DUPqWB_fixed_Asm_8:
6152  case ARM::VLD3DUPqWB_fixed_Asm_16:
6153  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6154    MCInst TmpInst;
6155    unsigned Spacing;
6156    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6157    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6158    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6159                                            Spacing));
6160    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6161                                            Spacing * 2));
6162    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6163    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6164    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6165    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6166    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6167    TmpInst.addOperand(Inst.getOperand(4));
6168    Inst = TmpInst;
6169    return true;
6170  }
6171
6172  case ARM::VLD3DUPdWB_register_Asm_8:
6173  case ARM::VLD3DUPdWB_register_Asm_16:
6174  case ARM::VLD3DUPdWB_register_Asm_32:
6175  case ARM::VLD3DUPqWB_register_Asm_8:
6176  case ARM::VLD3DUPqWB_register_Asm_16:
6177  case ARM::VLD3DUPqWB_register_Asm_32: {
6178    MCInst TmpInst;
6179    unsigned Spacing;
6180    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6181    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6182    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6183                                            Spacing));
6184    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6185                                            Spacing * 2));
6186    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6187    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6188    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6189    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6190    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6191    TmpInst.addOperand(Inst.getOperand(5));
6192    Inst = TmpInst;
6193    return true;
6194  }
6195
6196  // VLD3 multiple 3-element structure instructions.
6197  case ARM::VLD3dAsm_8:
6198  case ARM::VLD3dAsm_16:
6199  case ARM::VLD3dAsm_32:
6200  case ARM::VLD3qAsm_8:
6201  case ARM::VLD3qAsm_16:
6202  case ARM::VLD3qAsm_32: {
6203    MCInst TmpInst;
6204    unsigned Spacing;
6205    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6206    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6207    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6208                                            Spacing));
6209    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6210                                            Spacing * 2));
6211    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6212    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6213    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6214    TmpInst.addOperand(Inst.getOperand(4));
6215    Inst = TmpInst;
6216    return true;
6217  }
6218
6219  case ARM::VLD3dWB_fixed_Asm_8:
6220  case ARM::VLD3dWB_fixed_Asm_16:
6221  case ARM::VLD3dWB_fixed_Asm_32:
6222  case ARM::VLD3qWB_fixed_Asm_8:
6223  case ARM::VLD3qWB_fixed_Asm_16:
6224  case ARM::VLD3qWB_fixed_Asm_32: {
6225    MCInst TmpInst;
6226    unsigned Spacing;
6227    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6228    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6229    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6230                                            Spacing));
6231    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6232                                            Spacing * 2));
6233    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6234    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6235    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6236    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6237    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6238    TmpInst.addOperand(Inst.getOperand(4));
6239    Inst = TmpInst;
6240    return true;
6241  }
6242
6243  case ARM::VLD3dWB_register_Asm_8:
6244  case ARM::VLD3dWB_register_Asm_16:
6245  case ARM::VLD3dWB_register_Asm_32:
6246  case ARM::VLD3qWB_register_Asm_8:
6247  case ARM::VLD3qWB_register_Asm_16:
6248  case ARM::VLD3qWB_register_Asm_32: {
6249    MCInst TmpInst;
6250    unsigned Spacing;
6251    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6252    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6253    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6254                                            Spacing));
6255    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6256                                            Spacing * 2));
6257    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6258    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6259    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6260    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6261    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6262    TmpInst.addOperand(Inst.getOperand(5));
6263    Inst = TmpInst;
6264    return true;
6265  }
6266
6267  // VLD4DUP single 3-element structure to all lanes instructions.
6268  case ARM::VLD4DUPdAsm_8:
6269  case ARM::VLD4DUPdAsm_16:
6270  case ARM::VLD4DUPdAsm_32:
6271  case ARM::VLD4DUPqAsm_8:
6272  case ARM::VLD4DUPqAsm_16:
6273  case ARM::VLD4DUPqAsm_32: {
6274    MCInst TmpInst;
6275    unsigned Spacing;
6276    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6277    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6278    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6279                                            Spacing));
6280    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6281                                            Spacing * 2));
6282    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6283                                            Spacing * 3));
6284    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6285    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6286    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6287    TmpInst.addOperand(Inst.getOperand(4));
6288    Inst = TmpInst;
6289    return true;
6290  }
6291
6292  case ARM::VLD4DUPdWB_fixed_Asm_8:
6293  case ARM::VLD4DUPdWB_fixed_Asm_16:
6294  case ARM::VLD4DUPdWB_fixed_Asm_32:
6295  case ARM::VLD4DUPqWB_fixed_Asm_8:
6296  case ARM::VLD4DUPqWB_fixed_Asm_16:
6297  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6298    MCInst TmpInst;
6299    unsigned Spacing;
6300    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6301    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6302    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6303                                            Spacing));
6304    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6305                                            Spacing * 2));
6306    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6307                                            Spacing * 3));
6308    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6309    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6310    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6311    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6312    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6313    TmpInst.addOperand(Inst.getOperand(4));
6314    Inst = TmpInst;
6315    return true;
6316  }
6317
6318  case ARM::VLD4DUPdWB_register_Asm_8:
6319  case ARM::VLD4DUPdWB_register_Asm_16:
6320  case ARM::VLD4DUPdWB_register_Asm_32:
6321  case ARM::VLD4DUPqWB_register_Asm_8:
6322  case ARM::VLD4DUPqWB_register_Asm_16:
6323  case ARM::VLD4DUPqWB_register_Asm_32: {
6324    MCInst TmpInst;
6325    unsigned Spacing;
6326    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6327    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6328    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6329                                            Spacing));
6330    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6331                                            Spacing * 2));
6332    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6333                                            Spacing * 3));
6334    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6335    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6336    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6337    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6338    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6339    TmpInst.addOperand(Inst.getOperand(5));
6340    Inst = TmpInst;
6341    return true;
6342  }
6343
6344  // VLD4 multiple 4-element structure instructions.
6345  case ARM::VLD4dAsm_8:
6346  case ARM::VLD4dAsm_16:
6347  case ARM::VLD4dAsm_32:
6348  case ARM::VLD4qAsm_8:
6349  case ARM::VLD4qAsm_16:
6350  case ARM::VLD4qAsm_32: {
6351    MCInst TmpInst;
6352    unsigned Spacing;
6353    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6354    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6355    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6356                                            Spacing));
6357    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6358                                            Spacing * 2));
6359    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6360                                            Spacing * 3));
6361    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6362    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6363    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6364    TmpInst.addOperand(Inst.getOperand(4));
6365    Inst = TmpInst;
6366    return true;
6367  }
6368
6369  case ARM::VLD4dWB_fixed_Asm_8:
6370  case ARM::VLD4dWB_fixed_Asm_16:
6371  case ARM::VLD4dWB_fixed_Asm_32:
6372  case ARM::VLD4qWB_fixed_Asm_8:
6373  case ARM::VLD4qWB_fixed_Asm_16:
6374  case ARM::VLD4qWB_fixed_Asm_32: {
6375    MCInst TmpInst;
6376    unsigned Spacing;
6377    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6378    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6379    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6380                                            Spacing));
6381    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6382                                            Spacing * 2));
6383    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6384                                            Spacing * 3));
6385    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6386    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6387    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6388    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6389    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6390    TmpInst.addOperand(Inst.getOperand(4));
6391    Inst = TmpInst;
6392    return true;
6393  }
6394
6395  case ARM::VLD4dWB_register_Asm_8:
6396  case ARM::VLD4dWB_register_Asm_16:
6397  case ARM::VLD4dWB_register_Asm_32:
6398  case ARM::VLD4qWB_register_Asm_8:
6399  case ARM::VLD4qWB_register_Asm_16:
6400  case ARM::VLD4qWB_register_Asm_32: {
6401    MCInst TmpInst;
6402    unsigned Spacing;
6403    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6404    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6405    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6406                                            Spacing));
6407    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6408                                            Spacing * 2));
6409    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6410                                            Spacing * 3));
6411    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6412    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6413    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6414    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6415    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6416    TmpInst.addOperand(Inst.getOperand(5));
6417    Inst = TmpInst;
6418    return true;
6419  }
6420
6421  // VST3 multiple 3-element structure instructions.
6422  case ARM::VST3dAsm_8:
6423  case ARM::VST3dAsm_16:
6424  case ARM::VST3dAsm_32:
6425  case ARM::VST3qAsm_8:
6426  case ARM::VST3qAsm_16:
6427  case ARM::VST3qAsm_32: {
6428    MCInst TmpInst;
6429    unsigned Spacing;
6430    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6431    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6432    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6433    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6434    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6435                                            Spacing));
6436    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6437                                            Spacing * 2));
6438    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6439    TmpInst.addOperand(Inst.getOperand(4));
6440    Inst = TmpInst;
6441    return true;
6442  }
6443
6444  case ARM::VST3dWB_fixed_Asm_8:
6445  case ARM::VST3dWB_fixed_Asm_16:
6446  case ARM::VST3dWB_fixed_Asm_32:
6447  case ARM::VST3qWB_fixed_Asm_8:
6448  case ARM::VST3qWB_fixed_Asm_16:
6449  case ARM::VST3qWB_fixed_Asm_32: {
6450    MCInst TmpInst;
6451    unsigned Spacing;
6452    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6453    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6454    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6455    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6456    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6457    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6458    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6459                                            Spacing));
6460    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6461                                            Spacing * 2));
6462    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6463    TmpInst.addOperand(Inst.getOperand(4));
6464    Inst = TmpInst;
6465    return true;
6466  }
6467
6468  case ARM::VST3dWB_register_Asm_8:
6469  case ARM::VST3dWB_register_Asm_16:
6470  case ARM::VST3dWB_register_Asm_32:
6471  case ARM::VST3qWB_register_Asm_8:
6472  case ARM::VST3qWB_register_Asm_16:
6473  case ARM::VST3qWB_register_Asm_32: {
6474    MCInst TmpInst;
6475    unsigned Spacing;
6476    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6477    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6478    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6479    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6480    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6481    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6482    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6483                                            Spacing));
6484    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6485                                            Spacing * 2));
6486    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6487    TmpInst.addOperand(Inst.getOperand(5));
6488    Inst = TmpInst;
6489    return true;
6490  }
6491
6492  // VST4 multiple 3-element structure instructions.
6493  case ARM::VST4dAsm_8:
6494  case ARM::VST4dAsm_16:
6495  case ARM::VST4dAsm_32:
6496  case ARM::VST4qAsm_8:
6497  case ARM::VST4qAsm_16:
6498  case ARM::VST4qAsm_32: {
6499    MCInst TmpInst;
6500    unsigned Spacing;
6501    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6502    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6503    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6504    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6505    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6506                                            Spacing));
6507    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6508                                            Spacing * 2));
6509    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6510                                            Spacing * 3));
6511    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6512    TmpInst.addOperand(Inst.getOperand(4));
6513    Inst = TmpInst;
6514    return true;
6515  }
6516
6517  case ARM::VST4dWB_fixed_Asm_8:
6518  case ARM::VST4dWB_fixed_Asm_16:
6519  case ARM::VST4dWB_fixed_Asm_32:
6520  case ARM::VST4qWB_fixed_Asm_8:
6521  case ARM::VST4qWB_fixed_Asm_16:
6522  case ARM::VST4qWB_fixed_Asm_32: {
6523    MCInst TmpInst;
6524    unsigned Spacing;
6525    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6526    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6527    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6528    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6529    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6530    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6531    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6532                                            Spacing));
6533    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6534                                            Spacing * 2));
6535    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6536                                            Spacing * 3));
6537    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6538    TmpInst.addOperand(Inst.getOperand(4));
6539    Inst = TmpInst;
6540    return true;
6541  }
6542
6543  case ARM::VST4dWB_register_Asm_8:
6544  case ARM::VST4dWB_register_Asm_16:
6545  case ARM::VST4dWB_register_Asm_32:
6546  case ARM::VST4qWB_register_Asm_8:
6547  case ARM::VST4qWB_register_Asm_16:
6548  case ARM::VST4qWB_register_Asm_32: {
6549    MCInst TmpInst;
6550    unsigned Spacing;
6551    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6552    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6553    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6554    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6555    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6556    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6557    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6558                                            Spacing));
6559    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6560                                            Spacing * 2));
6561    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6562                                            Spacing * 3));
6563    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6564    TmpInst.addOperand(Inst.getOperand(5));
6565    Inst = TmpInst;
6566    return true;
6567  }
6568
6569  // Handle the Thumb2 mode MOV complex aliases.
6570  case ARM::t2MOVsr:
6571  case ARM::t2MOVSsr: {
6572    // Which instruction to expand to depends on the CCOut operand and
6573    // whether we're in an IT block if the register operands are low
6574    // registers.
6575    bool isNarrow = false;
6576    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6577        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6578        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6579        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6580        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6581      isNarrow = true;
6582    MCInst TmpInst;
6583    unsigned newOpc;
6584    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6585    default: llvm_unreachable("unexpected opcode!");
6586    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6587    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6588    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6589    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6590    }
6591    TmpInst.setOpcode(newOpc);
6592    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6593    if (isNarrow)
6594      TmpInst.addOperand(MCOperand::CreateReg(
6595          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6596    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6597    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6598    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6599    TmpInst.addOperand(Inst.getOperand(5));
6600    if (!isNarrow)
6601      TmpInst.addOperand(MCOperand::CreateReg(
6602          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6603    Inst = TmpInst;
6604    return true;
6605  }
6606  case ARM::t2MOVsi:
6607  case ARM::t2MOVSsi: {
6608    // Which instruction to expand to depends on the CCOut operand and
6609    // whether we're in an IT block if the register operands are low
6610    // registers.
6611    bool isNarrow = false;
6612    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6613        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6614        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6615      isNarrow = true;
6616    MCInst TmpInst;
6617    unsigned newOpc;
6618    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6619    default: llvm_unreachable("unexpected opcode!");
6620    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6621    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6622    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6623    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6624    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6625    }
6626    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6627    if (Ammount == 32) Ammount = 0;
6628    TmpInst.setOpcode(newOpc);
6629    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6630    if (isNarrow)
6631      TmpInst.addOperand(MCOperand::CreateReg(
6632          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6633    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6634    if (newOpc != ARM::t2RRX)
6635      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
6636    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6637    TmpInst.addOperand(Inst.getOperand(4));
6638    if (!isNarrow)
6639      TmpInst.addOperand(MCOperand::CreateReg(
6640          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6641    Inst = TmpInst;
6642    return true;
6643  }
6644  // Handle the ARM mode MOV complex aliases.
6645  case ARM::ASRr:
6646  case ARM::LSRr:
6647  case ARM::LSLr:
6648  case ARM::RORr: {
6649    ARM_AM::ShiftOpc ShiftTy;
6650    switch(Inst.getOpcode()) {
6651    default: llvm_unreachable("unexpected opcode!");
6652    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6653    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6654    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6655    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6656    }
6657    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6658    MCInst TmpInst;
6659    TmpInst.setOpcode(ARM::MOVsr);
6660    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6661    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6662    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6663    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6664    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6665    TmpInst.addOperand(Inst.getOperand(4));
6666    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6667    Inst = TmpInst;
6668    return true;
6669  }
6670  case ARM::ASRi:
6671  case ARM::LSRi:
6672  case ARM::LSLi:
6673  case ARM::RORi: {
6674    ARM_AM::ShiftOpc ShiftTy;
6675    switch(Inst.getOpcode()) {
6676    default: llvm_unreachable("unexpected opcode!");
6677    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6678    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6679    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6680    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6681    }
6682    // A shift by zero is a plain MOVr, not a MOVsi.
6683    unsigned Amt = Inst.getOperand(2).getImm();
6684    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6685    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6686    MCInst TmpInst;
6687    TmpInst.setOpcode(Opc);
6688    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6689    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6690    if (Opc == ARM::MOVsi)
6691      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6692    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6693    TmpInst.addOperand(Inst.getOperand(4));
6694    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6695    Inst = TmpInst;
6696    return true;
6697  }
6698  case ARM::RRXi: {
6699    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6700    MCInst TmpInst;
6701    TmpInst.setOpcode(ARM::MOVsi);
6702    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6703    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6704    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6705    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6706    TmpInst.addOperand(Inst.getOperand(3));
6707    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6708    Inst = TmpInst;
6709    return true;
6710  }
6711  case ARM::t2LDMIA_UPD: {
6712    // If this is a load of a single register, then we should use
6713    // a post-indexed LDR instruction instead, per the ARM ARM.
6714    if (Inst.getNumOperands() != 5)
6715      return false;
6716    MCInst TmpInst;
6717    TmpInst.setOpcode(ARM::t2LDR_POST);
6718    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6719    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6720    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6721    TmpInst.addOperand(MCOperand::CreateImm(4));
6722    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6723    TmpInst.addOperand(Inst.getOperand(3));
6724    Inst = TmpInst;
6725    return true;
6726  }
6727  case ARM::t2STMDB_UPD: {
6728    // If this is a store of a single register, then we should use
6729    // a pre-indexed STR instruction instead, per the ARM ARM.
6730    if (Inst.getNumOperands() != 5)
6731      return false;
6732    MCInst TmpInst;
6733    TmpInst.setOpcode(ARM::t2STR_PRE);
6734    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6735    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6736    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6737    TmpInst.addOperand(MCOperand::CreateImm(-4));
6738    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6739    TmpInst.addOperand(Inst.getOperand(3));
6740    Inst = TmpInst;
6741    return true;
6742  }
6743  case ARM::LDMIA_UPD:
6744    // If this is a load of a single register via a 'pop', then we should use
6745    // a post-indexed LDR instruction instead, per the ARM ARM.
6746    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
6747        Inst.getNumOperands() == 5) {
6748      MCInst TmpInst;
6749      TmpInst.setOpcode(ARM::LDR_POST_IMM);
6750      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6751      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6752      TmpInst.addOperand(Inst.getOperand(1)); // Rn
6753      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
6754      TmpInst.addOperand(MCOperand::CreateImm(4));
6755      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6756      TmpInst.addOperand(Inst.getOperand(3));
6757      Inst = TmpInst;
6758      return true;
6759    }
6760    break;
6761  case ARM::STMDB_UPD:
6762    // If this is a store of a single register via a 'push', then we should use
6763    // a pre-indexed STR instruction instead, per the ARM ARM.
6764    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
6765        Inst.getNumOperands() == 5) {
6766      MCInst TmpInst;
6767      TmpInst.setOpcode(ARM::STR_PRE_IMM);
6768      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6769      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6770      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
6771      TmpInst.addOperand(MCOperand::CreateImm(-4));
6772      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6773      TmpInst.addOperand(Inst.getOperand(3));
6774      Inst = TmpInst;
6775    }
6776    break;
6777  case ARM::t2ADDri12:
6778    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
6779    // mnemonic was used (not "addw"), encoding T3 is preferred.
6780    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
6781        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6782      break;
6783    Inst.setOpcode(ARM::t2ADDri);
6784    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6785    break;
6786  case ARM::t2SUBri12:
6787    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
6788    // mnemonic was used (not "subw"), encoding T3 is preferred.
6789    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
6790        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6791      break;
6792    Inst.setOpcode(ARM::t2SUBri);
6793    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6794    break;
6795  case ARM::tADDi8:
6796    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6797    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6798    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6799    // to encoding T1 if <Rd> is omitted."
6800    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6801      Inst.setOpcode(ARM::tADDi3);
6802      return true;
6803    }
6804    break;
6805  case ARM::tSUBi8:
6806    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6807    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6808    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6809    // to encoding T1 if <Rd> is omitted."
6810    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6811      Inst.setOpcode(ARM::tSUBi3);
6812      return true;
6813    }
6814    break;
6815  case ARM::t2ADDrr: {
6816    // If the destination and first source operand are the same, and
6817    // there's no setting of the flags, use encoding T2 instead of T3.
6818    // Note that this is only for ADD, not SUB. This mirrors the system
6819    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6820    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6821        Inst.getOperand(5).getReg() != 0 ||
6822        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6823         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6824      break;
6825    MCInst TmpInst;
6826    TmpInst.setOpcode(ARM::tADDhirr);
6827    TmpInst.addOperand(Inst.getOperand(0));
6828    TmpInst.addOperand(Inst.getOperand(0));
6829    TmpInst.addOperand(Inst.getOperand(2));
6830    TmpInst.addOperand(Inst.getOperand(3));
6831    TmpInst.addOperand(Inst.getOperand(4));
6832    Inst = TmpInst;
6833    return true;
6834  }
6835  case ARM::tB:
6836    // A Thumb conditional branch outside of an IT block is a tBcc.
6837    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6838      Inst.setOpcode(ARM::tBcc);
6839      return true;
6840    }
6841    break;
6842  case ARM::t2B:
6843    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6844    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6845      Inst.setOpcode(ARM::t2Bcc);
6846      return true;
6847    }
6848    break;
6849  case ARM::t2Bcc:
6850    // If the conditional is AL or we're in an IT block, we really want t2B.
6851    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6852      Inst.setOpcode(ARM::t2B);
6853      return true;
6854    }
6855    break;
6856  case ARM::tBcc:
6857    // If the conditional is AL, we really want tB.
6858    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6859      Inst.setOpcode(ARM::tB);
6860      return true;
6861    }
6862    break;
6863  case ARM::tLDMIA: {
6864    // If the register list contains any high registers, or if the writeback
6865    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6866    // instead if we're in Thumb2. Otherwise, this should have generated
6867    // an error in validateInstruction().
6868    unsigned Rn = Inst.getOperand(0).getReg();
6869    bool hasWritebackToken =
6870      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6871       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6872    bool listContainsBase;
6873    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6874        (!listContainsBase && !hasWritebackToken) ||
6875        (listContainsBase && hasWritebackToken)) {
6876      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6877      assert (isThumbTwo());
6878      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6879      // If we're switching to the updating version, we need to insert
6880      // the writeback tied operand.
6881      if (hasWritebackToken)
6882        Inst.insert(Inst.begin(),
6883                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6884      return true;
6885    }
6886    break;
6887  }
6888  case ARM::tSTMIA_UPD: {
6889    // If the register list contains any high registers, we need to use
6890    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6891    // should have generated an error in validateInstruction().
6892    unsigned Rn = Inst.getOperand(0).getReg();
6893    bool listContainsBase;
6894    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6895      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6896      assert (isThumbTwo());
6897      Inst.setOpcode(ARM::t2STMIA_UPD);
6898      return true;
6899    }
6900    break;
6901  }
6902  case ARM::tPOP: {
6903    bool listContainsBase;
6904    // If the register list contains any high registers, we need to use
6905    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6906    // should have generated an error in validateInstruction().
6907    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6908      return false;
6909    assert (isThumbTwo());
6910    Inst.setOpcode(ARM::t2LDMIA_UPD);
6911    // Add the base register and writeback operands.
6912    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6913    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6914    return true;
6915  }
6916  case ARM::tPUSH: {
6917    bool listContainsBase;
6918    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6919      return false;
6920    assert (isThumbTwo());
6921    Inst.setOpcode(ARM::t2STMDB_UPD);
6922    // Add the base register and writeback operands.
6923    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6924    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6925    return true;
6926  }
6927  case ARM::t2MOVi: {
6928    // If we can use the 16-bit encoding and the user didn't explicitly
6929    // request the 32-bit variant, transform it here.
6930    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6931        Inst.getOperand(1).getImm() <= 255 &&
6932        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6933         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6934        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6935        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6936         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6937      // The operands aren't in the same order for tMOVi8...
6938      MCInst TmpInst;
6939      TmpInst.setOpcode(ARM::tMOVi8);
6940      TmpInst.addOperand(Inst.getOperand(0));
6941      TmpInst.addOperand(Inst.getOperand(4));
6942      TmpInst.addOperand(Inst.getOperand(1));
6943      TmpInst.addOperand(Inst.getOperand(2));
6944      TmpInst.addOperand(Inst.getOperand(3));
6945      Inst = TmpInst;
6946      return true;
6947    }
6948    break;
6949  }
6950  case ARM::t2MOVr: {
6951    // If we can use the 16-bit encoding and the user didn't explicitly
6952    // request the 32-bit variant, transform it here.
6953    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6954        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6955        Inst.getOperand(2).getImm() == ARMCC::AL &&
6956        Inst.getOperand(4).getReg() == ARM::CPSR &&
6957        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6958         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6959      // The operands aren't the same for tMOV[S]r... (no cc_out)
6960      MCInst TmpInst;
6961      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
6962      TmpInst.addOperand(Inst.getOperand(0));
6963      TmpInst.addOperand(Inst.getOperand(1));
6964      TmpInst.addOperand(Inst.getOperand(2));
6965      TmpInst.addOperand(Inst.getOperand(3));
6966      Inst = TmpInst;
6967      return true;
6968    }
6969    break;
6970  }
6971  case ARM::t2SXTH:
6972  case ARM::t2SXTB:
6973  case ARM::t2UXTH:
6974  case ARM::t2UXTB: {
6975    // If we can use the 16-bit encoding and the user didn't explicitly
6976    // request the 32-bit variant, transform it here.
6977    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6978        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6979        Inst.getOperand(2).getImm() == 0 &&
6980        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6981         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6982      unsigned NewOpc;
6983      switch (Inst.getOpcode()) {
6984      default: llvm_unreachable("Illegal opcode!");
6985      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
6986      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
6987      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
6988      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
6989      }
6990      // The operands aren't the same for thumb1 (no rotate operand).
6991      MCInst TmpInst;
6992      TmpInst.setOpcode(NewOpc);
6993      TmpInst.addOperand(Inst.getOperand(0));
6994      TmpInst.addOperand(Inst.getOperand(1));
6995      TmpInst.addOperand(Inst.getOperand(3));
6996      TmpInst.addOperand(Inst.getOperand(4));
6997      Inst = TmpInst;
6998      return true;
6999    }
7000    break;
7001  }
7002  case ARM::MOVsi: {
7003    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7004    if (SOpc == ARM_AM::rrx) return false;
7005    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7006      // Shifting by zero is accepted as a vanilla 'MOVr'
7007      MCInst TmpInst;
7008      TmpInst.setOpcode(ARM::MOVr);
7009      TmpInst.addOperand(Inst.getOperand(0));
7010      TmpInst.addOperand(Inst.getOperand(1));
7011      TmpInst.addOperand(Inst.getOperand(3));
7012      TmpInst.addOperand(Inst.getOperand(4));
7013      TmpInst.addOperand(Inst.getOperand(5));
7014      Inst = TmpInst;
7015      return true;
7016    }
7017    return false;
7018  }
7019  case ARM::ANDrsi:
7020  case ARM::ORRrsi:
7021  case ARM::EORrsi:
7022  case ARM::BICrsi:
7023  case ARM::SUBrsi:
7024  case ARM::ADDrsi: {
7025    unsigned newOpc;
7026    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7027    if (SOpc == ARM_AM::rrx) return false;
7028    switch (Inst.getOpcode()) {
7029    default: assert(0 && "unexpected opcode!");
7030    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7031    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7032    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7033    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7034    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7035    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7036    }
7037    // If the shift is by zero, use the non-shifted instruction definition.
7038    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
7039      MCInst TmpInst;
7040      TmpInst.setOpcode(newOpc);
7041      TmpInst.addOperand(Inst.getOperand(0));
7042      TmpInst.addOperand(Inst.getOperand(1));
7043      TmpInst.addOperand(Inst.getOperand(2));
7044      TmpInst.addOperand(Inst.getOperand(4));
7045      TmpInst.addOperand(Inst.getOperand(5));
7046      TmpInst.addOperand(Inst.getOperand(6));
7047      Inst = TmpInst;
7048      return true;
7049    }
7050    return false;
7051  }
7052  case ARM::ITasm:
7053  case ARM::t2IT: {
7054    // The mask bits for all but the first condition are represented as
7055    // the low bit of the condition code value implies 't'. We currently
7056    // always have 1 implies 't', so XOR toggle the bits if the low bit
7057    // of the condition code is zero. The encoding also expects the low
7058    // bit of the condition to be encoded as bit 4 of the mask operand,
7059    // so mask that in if needed
7060    MCOperand &MO = Inst.getOperand(1);
7061    unsigned Mask = MO.getImm();
7062    unsigned OrigMask = Mask;
7063    unsigned TZ = CountTrailingZeros_32(Mask);
7064    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7065      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7066      for (unsigned i = 3; i != TZ; --i)
7067        Mask ^= 1 << i;
7068    } else
7069      Mask |= 0x10;
7070    MO.setImm(Mask);
7071
7072    // Set up the IT block state according to the IT instruction we just
7073    // matched.
7074    assert(!inITBlock() && "nested IT blocks?!");
7075    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7076    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7077    ITState.CurPosition = 0;
7078    ITState.FirstCond = true;
7079    break;
7080  }
7081  }
7082  return false;
7083}
7084
7085unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7086  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7087  // suffix depending on whether they're in an IT block or not.
7088  unsigned Opc = Inst.getOpcode();
7089  const MCInstrDesc &MCID = getInstDesc(Opc);
7090  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7091    assert(MCID.hasOptionalDef() &&
7092           "optionally flag setting instruction missing optional def operand");
7093    assert(MCID.NumOperands == Inst.getNumOperands() &&
7094           "operand count mismatch!");
7095    // Find the optional-def operand (cc_out).
7096    unsigned OpNo;
7097    for (OpNo = 0;
7098         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7099         ++OpNo)
7100      ;
7101    // If we're parsing Thumb1, reject it completely.
7102    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7103      return Match_MnemonicFail;
7104    // If we're parsing Thumb2, which form is legal depends on whether we're
7105    // in an IT block.
7106    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7107        !inITBlock())
7108      return Match_RequiresITBlock;
7109    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7110        inITBlock())
7111      return Match_RequiresNotITBlock;
7112  }
7113  // Some high-register supporting Thumb1 encodings only allow both registers
7114  // to be from r0-r7 when in Thumb2.
7115  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7116           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7117           isARMLowRegister(Inst.getOperand(2).getReg()))
7118    return Match_RequiresThumb2;
7119  // Others only require ARMv6 or later.
7120  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7121           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7122           isARMLowRegister(Inst.getOperand(1).getReg()))
7123    return Match_RequiresV6;
7124  return Match_Success;
7125}
7126
7127bool ARMAsmParser::
7128MatchAndEmitInstruction(SMLoc IDLoc,
7129                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7130                        MCStreamer &Out) {
7131  MCInst Inst;
7132  unsigned ErrorInfo;
7133  unsigned MatchResult;
7134  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
7135  switch (MatchResult) {
7136  default: break;
7137  case Match_Success:
7138    // Context sensitive operand constraints aren't handled by the matcher,
7139    // so check them here.
7140    if (validateInstruction(Inst, Operands)) {
7141      // Still progress the IT block, otherwise one wrong condition causes
7142      // nasty cascading errors.
7143      forwardITPosition();
7144      return true;
7145    }
7146
7147    // Some instructions need post-processing to, for example, tweak which
7148    // encoding is selected. Loop on it while changes happen so the
7149    // individual transformations can chain off each other. E.g.,
7150    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7151    while (processInstruction(Inst, Operands))
7152      ;
7153
7154    // Only move forward at the very end so that everything in validate
7155    // and process gets a consistent answer about whether we're in an IT
7156    // block.
7157    forwardITPosition();
7158
7159    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7160    // doesn't actually encode.
7161    if (Inst.getOpcode() == ARM::ITasm)
7162      return false;
7163
7164    Out.EmitInstruction(Inst);
7165    return false;
7166  case Match_MissingFeature:
7167    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
7168    return true;
7169  case Match_InvalidOperand: {
7170    SMLoc ErrorLoc = IDLoc;
7171    if (ErrorInfo != ~0U) {
7172      if (ErrorInfo >= Operands.size())
7173        return Error(IDLoc, "too few operands for instruction");
7174
7175      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7176      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7177    }
7178
7179    return Error(ErrorLoc, "invalid operand for instruction");
7180  }
7181  case Match_MnemonicFail:
7182    return Error(IDLoc, "invalid instruction");
7183  case Match_ConversionFail:
7184    // The converter function will have already emited a diagnostic.
7185    return true;
7186  case Match_RequiresNotITBlock:
7187    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7188  case Match_RequiresITBlock:
7189    return Error(IDLoc, "instruction only valid inside IT block");
7190  case Match_RequiresV6:
7191    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7192  case Match_RequiresThumb2:
7193    return Error(IDLoc, "instruction variant requires Thumb2");
7194  }
7195
7196  llvm_unreachable("Implement any new match types added!");
7197}
7198
7199/// parseDirective parses the arm specific directives
7200bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7201  StringRef IDVal = DirectiveID.getIdentifier();
7202  if (IDVal == ".word")
7203    return parseDirectiveWord(4, DirectiveID.getLoc());
7204  else if (IDVal == ".thumb")
7205    return parseDirectiveThumb(DirectiveID.getLoc());
7206  else if (IDVal == ".arm")
7207    return parseDirectiveARM(DirectiveID.getLoc());
7208  else if (IDVal == ".thumb_func")
7209    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7210  else if (IDVal == ".code")
7211    return parseDirectiveCode(DirectiveID.getLoc());
7212  else if (IDVal == ".syntax")
7213    return parseDirectiveSyntax(DirectiveID.getLoc());
7214  else if (IDVal == ".unreq")
7215    return parseDirectiveUnreq(DirectiveID.getLoc());
7216  else if (IDVal == ".arch")
7217    return parseDirectiveArch(DirectiveID.getLoc());
7218  else if (IDVal == ".eabi_attribute")
7219    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7220  return true;
7221}
7222
7223/// parseDirectiveWord
7224///  ::= .word [ expression (, expression)* ]
7225bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7226  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7227    for (;;) {
7228      const MCExpr *Value;
7229      if (getParser().ParseExpression(Value))
7230        return true;
7231
7232      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
7233
7234      if (getLexer().is(AsmToken::EndOfStatement))
7235        break;
7236
7237      // FIXME: Improve diagnostic.
7238      if (getLexer().isNot(AsmToken::Comma))
7239        return Error(L, "unexpected token in directive");
7240      Parser.Lex();
7241    }
7242  }
7243
7244  Parser.Lex();
7245  return false;
7246}
7247
7248/// parseDirectiveThumb
7249///  ::= .thumb
7250bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7251  if (getLexer().isNot(AsmToken::EndOfStatement))
7252    return Error(L, "unexpected token in directive");
7253  Parser.Lex();
7254
7255  if (!isThumb())
7256    SwitchMode();
7257  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7258  return false;
7259}
7260
7261/// parseDirectiveARM
7262///  ::= .arm
7263bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7264  if (getLexer().isNot(AsmToken::EndOfStatement))
7265    return Error(L, "unexpected token in directive");
7266  Parser.Lex();
7267
7268  if (isThumb())
7269    SwitchMode();
7270  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7271  return false;
7272}
7273
7274/// parseDirectiveThumbFunc
7275///  ::= .thumbfunc symbol_name
7276bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7277  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7278  bool isMachO = MAI.hasSubsectionsViaSymbols();
7279  StringRef Name;
7280  bool needFuncName = true;
7281
7282  // Darwin asm has (optionally) function name after .thumb_func direction
7283  // ELF doesn't
7284  if (isMachO) {
7285    const AsmToken &Tok = Parser.getTok();
7286    if (Tok.isNot(AsmToken::EndOfStatement)) {
7287      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7288        return Error(L, "unexpected token in .thumb_func directive");
7289      Name = Tok.getIdentifier();
7290      Parser.Lex(); // Consume the identifier token.
7291      needFuncName = false;
7292    }
7293  }
7294
7295  if (getLexer().isNot(AsmToken::EndOfStatement))
7296    return Error(L, "unexpected token in directive");
7297
7298  // Eat the end of statement and any blank lines that follow.
7299  while (getLexer().is(AsmToken::EndOfStatement))
7300    Parser.Lex();
7301
7302  // FIXME: assuming function name will be the line following .thumb_func
7303  // We really should be checking the next symbol definition even if there's
7304  // stuff in between.
7305  if (needFuncName) {
7306    Name = Parser.getTok().getIdentifier();
7307  }
7308
7309  // Mark symbol as a thumb symbol.
7310  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7311  getParser().getStreamer().EmitThumbFunc(Func);
7312  return false;
7313}
7314
7315/// parseDirectiveSyntax
7316///  ::= .syntax unified | divided
7317bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7318  const AsmToken &Tok = Parser.getTok();
7319  if (Tok.isNot(AsmToken::Identifier))
7320    return Error(L, "unexpected token in .syntax directive");
7321  StringRef Mode = Tok.getString();
7322  if (Mode == "unified" || Mode == "UNIFIED")
7323    Parser.Lex();
7324  else if (Mode == "divided" || Mode == "DIVIDED")
7325    return Error(L, "'.syntax divided' arm asssembly not supported");
7326  else
7327    return Error(L, "unrecognized syntax mode in .syntax directive");
7328
7329  if (getLexer().isNot(AsmToken::EndOfStatement))
7330    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7331  Parser.Lex();
7332
7333  // TODO tell the MC streamer the mode
7334  // getParser().getStreamer().Emit???();
7335  return false;
7336}
7337
7338/// parseDirectiveCode
7339///  ::= .code 16 | 32
7340bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7341  const AsmToken &Tok = Parser.getTok();
7342  if (Tok.isNot(AsmToken::Integer))
7343    return Error(L, "unexpected token in .code directive");
7344  int64_t Val = Parser.getTok().getIntVal();
7345  if (Val == 16)
7346    Parser.Lex();
7347  else if (Val == 32)
7348    Parser.Lex();
7349  else
7350    return Error(L, "invalid operand to .code directive");
7351
7352  if (getLexer().isNot(AsmToken::EndOfStatement))
7353    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7354  Parser.Lex();
7355
7356  if (Val == 16) {
7357    if (!isThumb())
7358      SwitchMode();
7359    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7360  } else {
7361    if (isThumb())
7362      SwitchMode();
7363    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7364  }
7365
7366  return false;
7367}
7368
7369/// parseDirectiveReq
7370///  ::= name .req registername
7371bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7372  Parser.Lex(); // Eat the '.req' token.
7373  unsigned Reg;
7374  SMLoc SRegLoc, ERegLoc;
7375  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7376    Parser.EatToEndOfStatement();
7377    return Error(SRegLoc, "register name expected");
7378  }
7379
7380  // Shouldn't be anything else.
7381  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7382    Parser.EatToEndOfStatement();
7383    return Error(Parser.getTok().getLoc(),
7384                 "unexpected input in .req directive.");
7385  }
7386
7387  Parser.Lex(); // Consume the EndOfStatement
7388
7389  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7390    return Error(SRegLoc, "redefinition of '" + Name +
7391                          "' does not match original.");
7392
7393  return false;
7394}
7395
7396/// parseDirectiveUneq
7397///  ::= .unreq registername
7398bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7399  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7400    Parser.EatToEndOfStatement();
7401    return Error(L, "unexpected input in .unreq directive.");
7402  }
7403  RegisterReqs.erase(Parser.getTok().getIdentifier());
7404  Parser.Lex(); // Eat the identifier.
7405  return false;
7406}
7407
7408/// parseDirectiveArch
7409///  ::= .arch token
7410bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7411  return true;
7412}
7413
7414/// parseDirectiveEabiAttr
7415///  ::= .eabi_attribute int, int
7416bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7417  return true;
7418}
7419
7420extern "C" void LLVMInitializeARMAsmLexer();
7421
7422/// Force static initialization.
7423extern "C" void LLVMInitializeARMAsmParser() {
7424  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7425  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7426  LLVMInitializeARMAsmLexer();
7427}
7428
7429#define GET_REGISTER_MATCHER
7430#define GET_MATCHER_IMPLEMENTATION
7431#include "ARMGenAsmMatcher.inc"
7432