ARMAsmParser.cpp revision b657a90929867716ca1c7c12d442bb5d32281bd4
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47  const MCRegisterInfo *MRI;
48
49  // Map of register aliases registers via the .req directive.
50  StringMap<unsigned> RegisterReqs;
51
52  struct {
53    ARMCC::CondCodes Cond;    // Condition for IT block.
54    unsigned Mask:4;          // Condition mask for instructions.
55                              // Starting at first 1 (from lsb).
56                              //   '1'  condition as indicated in IT.
57                              //   '0'  inverse of condition (else).
58                              // Count of instructions in IT block is
59                              // 4 - trailingzeroes(mask)
60
61    bool FirstCond;           // Explicit flag for when we're parsing the
62                              // First instruction in the IT block. It's
63                              // implied in the mask, so needs special
64                              // handling.
65
66    unsigned CurPosition;     // Current position in parsing of IT
67                              // block. In range [0,3]. Initialized
68                              // according to count of instructions in block.
69                              // ~0U if no active IT block.
70  } ITState;
71  bool inITBlock() { return ITState.CurPosition != ~0U;}
72  void forwardITPosition() {
73    if (!inITBlock()) return;
74    // Move to the next instruction in the IT block, if there is one. If not,
75    // mark the block as done.
76    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
77    if (++ITState.CurPosition == 5 - TZ)
78      ITState.CurPosition = ~0U; // Done with the IT block after this.
79  }
80
81
82  MCAsmParser &getParser() const { return Parser; }
83  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
84
85  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
86  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
87
88  int tryParseRegister();
89  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
90  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
93  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
94  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
95  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
96                              unsigned &ShiftAmount);
97  bool parseDirectiveWord(unsigned Size, SMLoc L);
98  bool parseDirectiveThumb(SMLoc L);
99  bool parseDirectiveARM(SMLoc L);
100  bool parseDirectiveThumbFunc(SMLoc L);
101  bool parseDirectiveCode(SMLoc L);
102  bool parseDirectiveSyntax(SMLoc L);
103  bool parseDirectiveReq(StringRef Name, SMLoc L);
104  bool parseDirectiveUnreq(SMLoc L);
105  bool parseDirectiveArch(SMLoc L);
106  bool parseDirectiveEabiAttr(SMLoc L);
107
108  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
109                          bool &CarrySetting, unsigned &ProcessorIMod,
110                          StringRef &ITMask);
111  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
112                             bool &CanAcceptPredicationCode);
113
114  bool isThumb() const {
115    // FIXME: Can tablegen auto-generate this?
116    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
117  }
118  bool isThumbOne() const {
119    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
120  }
121  bool isThumbTwo() const {
122    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
123  }
124  bool hasV6Ops() const {
125    return STI.getFeatureBits() & ARM::HasV6Ops;
126  }
127  bool hasV7Ops() const {
128    return STI.getFeatureBits() & ARM::HasV7Ops;
129  }
130  void SwitchMode() {
131    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
132    setAvailableFeatures(FB);
133  }
134  bool isMClass() const {
135    return STI.getFeatureBits() & ARM::FeatureMClass;
136  }
137
138  /// @name Auto-generated Match Functions
139  /// {
140
141#define GET_ASSEMBLER_HEADER
142#include "ARMGenAsmMatcher.inc"
143
144  /// }
145
146  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
147  OperandMatchResultTy parseCoprocNumOperand(
148    SmallVectorImpl<MCParsedAsmOperand*>&);
149  OperandMatchResultTy parseCoprocRegOperand(
150    SmallVectorImpl<MCParsedAsmOperand*>&);
151  OperandMatchResultTy parseCoprocOptionOperand(
152    SmallVectorImpl<MCParsedAsmOperand*>&);
153  OperandMatchResultTy parseMemBarrierOptOperand(
154    SmallVectorImpl<MCParsedAsmOperand*>&);
155  OperandMatchResultTy parseProcIFlagsOperand(
156    SmallVectorImpl<MCParsedAsmOperand*>&);
157  OperandMatchResultTy parseMSRMaskOperand(
158    SmallVectorImpl<MCParsedAsmOperand*>&);
159  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
160                                   StringRef Op, int Low, int High);
161  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
162    return parsePKHImm(O, "lsl", 0, 31);
163  }
164  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
165    return parsePKHImm(O, "asr", 1, 32);
166  }
167  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
175  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
176
177  // Asm Match Converter Methods
178  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
179                    const SmallVectorImpl<MCParsedAsmOperand*> &);
180  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
181                    const SmallVectorImpl<MCParsedAsmOperand*> &);
182  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
183                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
184  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
185                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
186  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
187                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
188  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
189                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
190  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
191                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
192  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
193                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
194  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
195                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
196  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
197                             const SmallVectorImpl<MCParsedAsmOperand*> &);
198  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
199                             const SmallVectorImpl<MCParsedAsmOperand*> &);
200  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
201                             const SmallVectorImpl<MCParsedAsmOperand*> &);
202  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
203                             const SmallVectorImpl<MCParsedAsmOperand*> &);
204  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
205                  const SmallVectorImpl<MCParsedAsmOperand*> &);
206  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
207                  const SmallVectorImpl<MCParsedAsmOperand*> &);
208  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
209                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
210  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
211                        const SmallVectorImpl<MCParsedAsmOperand*> &);
212  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
213                     const SmallVectorImpl<MCParsedAsmOperand*> &);
214  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
215                        const SmallVectorImpl<MCParsedAsmOperand*> &);
216  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
217                     const SmallVectorImpl<MCParsedAsmOperand*> &);
218  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
219                        const SmallVectorImpl<MCParsedAsmOperand*> &);
220
221  bool validateInstruction(MCInst &Inst,
222                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
223  bool processInstruction(MCInst &Inst,
224                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
225  bool shouldOmitCCOutOperand(StringRef Mnemonic,
226                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
227
228public:
229  enum ARMMatchResultTy {
230    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
231    Match_RequiresNotITBlock,
232    Match_RequiresV6,
233    Match_RequiresThumb2
234  };
235
236  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
237    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
238    MCAsmParserExtension::Initialize(_Parser);
239
240    // Cache the MCRegisterInfo.
241    MRI = &getContext().getRegisterInfo();
242
243    // Initialize the set of available features.
244    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
245
246    // Not in an ITBlock to start with.
247    ITState.CurPosition = ~0U;
248  }
249
250  // Implementation of the MCTargetAsmParser interface:
251  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
252  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
253                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
254  bool ParseDirective(AsmToken DirectiveID);
255
256  unsigned checkTargetMatchPredicate(MCInst &Inst);
257
258  bool MatchAndEmitInstruction(SMLoc IDLoc,
259                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
260                               MCStreamer &Out);
261};
262} // end anonymous namespace
263
264namespace {
265
266/// ARMOperand - Instances of this class represent a parsed ARM machine
267/// instruction.
268class ARMOperand : public MCParsedAsmOperand {
269  enum KindTy {
270    k_CondCode,
271    k_CCOut,
272    k_ITCondMask,
273    k_CoprocNum,
274    k_CoprocReg,
275    k_CoprocOption,
276    k_Immediate,
277    k_MemBarrierOpt,
278    k_Memory,
279    k_PostIndexRegister,
280    k_MSRMask,
281    k_ProcIFlags,
282    k_VectorIndex,
283    k_Register,
284    k_RegisterList,
285    k_DPRRegisterList,
286    k_SPRRegisterList,
287    k_VectorList,
288    k_VectorListAllLanes,
289    k_VectorListIndexed,
290    k_ShiftedRegister,
291    k_ShiftedImmediate,
292    k_ShifterImmediate,
293    k_RotateImmediate,
294    k_BitfieldDescriptor,
295    k_Token
296  } Kind;
297
298  SMLoc StartLoc, EndLoc;
299  SmallVector<unsigned, 8> Registers;
300
301  union {
302    struct {
303      ARMCC::CondCodes Val;
304    } CC;
305
306    struct {
307      unsigned Val;
308    } Cop;
309
310    struct {
311      unsigned Val;
312    } CoprocOption;
313
314    struct {
315      unsigned Mask:4;
316    } ITMask;
317
318    struct {
319      ARM_MB::MemBOpt Val;
320    } MBOpt;
321
322    struct {
323      ARM_PROC::IFlags Val;
324    } IFlags;
325
326    struct {
327      unsigned Val;
328    } MMask;
329
330    struct {
331      const char *Data;
332      unsigned Length;
333    } Tok;
334
335    struct {
336      unsigned RegNum;
337    } Reg;
338
339    // A vector register list is a sequential list of 1 to 4 registers.
340    struct {
341      unsigned RegNum;
342      unsigned Count;
343      unsigned LaneIndex;
344      bool isDoubleSpaced;
345    } VectorList;
346
347    struct {
348      unsigned Val;
349    } VectorIndex;
350
351    struct {
352      const MCExpr *Val;
353    } Imm;
354
355    /// Combined record for all forms of ARM address expressions.
356    struct {
357      unsigned BaseRegNum;
358      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
359      // was specified.
360      const MCConstantExpr *OffsetImm;  // Offset immediate value
361      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
362      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
363      unsigned ShiftImm;        // shift for OffsetReg.
364      unsigned Alignment;       // 0 = no alignment specified
365                                // n = alignment in bytes (2, 4, 8, 16, or 32)
366      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
367    } Memory;
368
369    struct {
370      unsigned RegNum;
371      bool isAdd;
372      ARM_AM::ShiftOpc ShiftTy;
373      unsigned ShiftImm;
374    } PostIdxReg;
375
376    struct {
377      bool isASR;
378      unsigned Imm;
379    } ShifterImm;
380    struct {
381      ARM_AM::ShiftOpc ShiftTy;
382      unsigned SrcReg;
383      unsigned ShiftReg;
384      unsigned ShiftImm;
385    } RegShiftedReg;
386    struct {
387      ARM_AM::ShiftOpc ShiftTy;
388      unsigned SrcReg;
389      unsigned ShiftImm;
390    } RegShiftedImm;
391    struct {
392      unsigned Imm;
393    } RotImm;
394    struct {
395      unsigned LSB;
396      unsigned Width;
397    } Bitfield;
398  };
399
400  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
401public:
402  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
403    Kind = o.Kind;
404    StartLoc = o.StartLoc;
405    EndLoc = o.EndLoc;
406    switch (Kind) {
407    case k_CondCode:
408      CC = o.CC;
409      break;
410    case k_ITCondMask:
411      ITMask = o.ITMask;
412      break;
413    case k_Token:
414      Tok = o.Tok;
415      break;
416    case k_CCOut:
417    case k_Register:
418      Reg = o.Reg;
419      break;
420    case k_RegisterList:
421    case k_DPRRegisterList:
422    case k_SPRRegisterList:
423      Registers = o.Registers;
424      break;
425    case k_VectorList:
426    case k_VectorListAllLanes:
427    case k_VectorListIndexed:
428      VectorList = o.VectorList;
429      break;
430    case k_CoprocNum:
431    case k_CoprocReg:
432      Cop = o.Cop;
433      break;
434    case k_CoprocOption:
435      CoprocOption = o.CoprocOption;
436      break;
437    case k_Immediate:
438      Imm = o.Imm;
439      break;
440    case k_MemBarrierOpt:
441      MBOpt = o.MBOpt;
442      break;
443    case k_Memory:
444      Memory = o.Memory;
445      break;
446    case k_PostIndexRegister:
447      PostIdxReg = o.PostIdxReg;
448      break;
449    case k_MSRMask:
450      MMask = o.MMask;
451      break;
452    case k_ProcIFlags:
453      IFlags = o.IFlags;
454      break;
455    case k_ShifterImmediate:
456      ShifterImm = o.ShifterImm;
457      break;
458    case k_ShiftedRegister:
459      RegShiftedReg = o.RegShiftedReg;
460      break;
461    case k_ShiftedImmediate:
462      RegShiftedImm = o.RegShiftedImm;
463      break;
464    case k_RotateImmediate:
465      RotImm = o.RotImm;
466      break;
467    case k_BitfieldDescriptor:
468      Bitfield = o.Bitfield;
469      break;
470    case k_VectorIndex:
471      VectorIndex = o.VectorIndex;
472      break;
473    }
474  }
475
476  /// getStartLoc - Get the location of the first token of this operand.
477  SMLoc getStartLoc() const { return StartLoc; }
478  /// getEndLoc - Get the location of the last token of this operand.
479  SMLoc getEndLoc() const { return EndLoc; }
480
481  ARMCC::CondCodes getCondCode() const {
482    assert(Kind == k_CondCode && "Invalid access!");
483    return CC.Val;
484  }
485
486  unsigned getCoproc() const {
487    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
488    return Cop.Val;
489  }
490
491  StringRef getToken() const {
492    assert(Kind == k_Token && "Invalid access!");
493    return StringRef(Tok.Data, Tok.Length);
494  }
495
496  unsigned getReg() const {
497    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
498    return Reg.RegNum;
499  }
500
501  const SmallVectorImpl<unsigned> &getRegList() const {
502    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
503            Kind == k_SPRRegisterList) && "Invalid access!");
504    return Registers;
505  }
506
507  const MCExpr *getImm() const {
508    assert(isImm() && "Invalid access!");
509    return Imm.Val;
510  }
511
512  unsigned getVectorIndex() const {
513    assert(Kind == k_VectorIndex && "Invalid access!");
514    return VectorIndex.Val;
515  }
516
517  ARM_MB::MemBOpt getMemBarrierOpt() const {
518    assert(Kind == k_MemBarrierOpt && "Invalid access!");
519    return MBOpt.Val;
520  }
521
522  ARM_PROC::IFlags getProcIFlags() const {
523    assert(Kind == k_ProcIFlags && "Invalid access!");
524    return IFlags.Val;
525  }
526
527  unsigned getMSRMask() const {
528    assert(Kind == k_MSRMask && "Invalid access!");
529    return MMask.Val;
530  }
531
532  bool isCoprocNum() const { return Kind == k_CoprocNum; }
533  bool isCoprocReg() const { return Kind == k_CoprocReg; }
534  bool isCoprocOption() const { return Kind == k_CoprocOption; }
535  bool isCondCode() const { return Kind == k_CondCode; }
536  bool isCCOut() const { return Kind == k_CCOut; }
537  bool isITMask() const { return Kind == k_ITCondMask; }
538  bool isITCondCode() const { return Kind == k_CondCode; }
539  bool isImm() const { return Kind == k_Immediate; }
540  bool isFPImm() const {
541    if (!isImm()) return false;
542    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
543    if (!CE) return false;
544    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
545    return Val != -1;
546  }
547  bool isFBits16() const {
548    if (!isImm()) return false;
549    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
550    if (!CE) return false;
551    int64_t Value = CE->getValue();
552    return Value >= 0 && Value <= 16;
553  }
554  bool isFBits32() const {
555    if (!isImm()) return false;
556    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
557    if (!CE) return false;
558    int64_t Value = CE->getValue();
559    return Value >= 1 && Value <= 32;
560  }
561  bool isImm8s4() const {
562    if (!isImm()) return false;
563    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
564    if (!CE) return false;
565    int64_t Value = CE->getValue();
566    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
567  }
568  bool isImm0_1020s4() const {
569    if (!isImm()) return false;
570    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
571    if (!CE) return false;
572    int64_t Value = CE->getValue();
573    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
574  }
575  bool isImm0_508s4() const {
576    if (!isImm()) return false;
577    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
578    if (!CE) return false;
579    int64_t Value = CE->getValue();
580    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
581  }
582  bool isImm0_255() const {
583    if (!isImm()) return false;
584    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
585    if (!CE) return false;
586    int64_t Value = CE->getValue();
587    return Value >= 0 && Value < 256;
588  }
589  bool isImm0_1() const {
590    if (!isImm()) return false;
591    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
592    if (!CE) return false;
593    int64_t Value = CE->getValue();
594    return Value >= 0 && Value < 2;
595  }
596  bool isImm0_3() const {
597    if (!isImm()) return false;
598    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
599    if (!CE) return false;
600    int64_t Value = CE->getValue();
601    return Value >= 0 && Value < 4;
602  }
603  bool isImm0_7() const {
604    if (!isImm()) return false;
605    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
606    if (!CE) return false;
607    int64_t Value = CE->getValue();
608    return Value >= 0 && Value < 8;
609  }
610  bool isImm0_15() const {
611    if (!isImm()) return false;
612    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
613    if (!CE) return false;
614    int64_t Value = CE->getValue();
615    return Value >= 0 && Value < 16;
616  }
617  bool isImm0_31() const {
618    if (!isImm()) return false;
619    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
620    if (!CE) return false;
621    int64_t Value = CE->getValue();
622    return Value >= 0 && Value < 32;
623  }
624  bool isImm0_63() const {
625    if (!isImm()) return false;
626    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
627    if (!CE) return false;
628    int64_t Value = CE->getValue();
629    return Value >= 0 && Value < 64;
630  }
631  bool isImm8() const {
632    if (!isImm()) return false;
633    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
634    if (!CE) return false;
635    int64_t Value = CE->getValue();
636    return Value == 8;
637  }
638  bool isImm16() const {
639    if (!isImm()) return false;
640    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
641    if (!CE) return false;
642    int64_t Value = CE->getValue();
643    return Value == 16;
644  }
645  bool isImm32() const {
646    if (!isImm()) return false;
647    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
648    if (!CE) return false;
649    int64_t Value = CE->getValue();
650    return Value == 32;
651  }
652  bool isShrImm8() const {
653    if (!isImm()) return false;
654    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
655    if (!CE) return false;
656    int64_t Value = CE->getValue();
657    return Value > 0 && Value <= 8;
658  }
659  bool isShrImm16() const {
660    if (!isImm()) return false;
661    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
662    if (!CE) return false;
663    int64_t Value = CE->getValue();
664    return Value > 0 && Value <= 16;
665  }
666  bool isShrImm32() const {
667    if (!isImm()) return false;
668    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
669    if (!CE) return false;
670    int64_t Value = CE->getValue();
671    return Value > 0 && Value <= 32;
672  }
673  bool isShrImm64() const {
674    if (!isImm()) return false;
675    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
676    if (!CE) return false;
677    int64_t Value = CE->getValue();
678    return Value > 0 && Value <= 64;
679  }
680  bool isImm1_7() const {
681    if (!isImm()) return false;
682    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
683    if (!CE) return false;
684    int64_t Value = CE->getValue();
685    return Value > 0 && Value < 8;
686  }
687  bool isImm1_15() const {
688    if (!isImm()) return false;
689    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
690    if (!CE) return false;
691    int64_t Value = CE->getValue();
692    return Value > 0 && Value < 16;
693  }
694  bool isImm1_31() const {
695    if (!isImm()) return false;
696    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
697    if (!CE) return false;
698    int64_t Value = CE->getValue();
699    return Value > 0 && Value < 32;
700  }
701  bool isImm1_16() const {
702    if (!isImm()) return false;
703    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
704    if (!CE) return false;
705    int64_t Value = CE->getValue();
706    return Value > 0 && Value < 17;
707  }
708  bool isImm1_32() const {
709    if (!isImm()) return false;
710    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
711    if (!CE) return false;
712    int64_t Value = CE->getValue();
713    return Value > 0 && Value < 33;
714  }
715  bool isImm0_32() const {
716    if (!isImm()) return false;
717    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
718    if (!CE) return false;
719    int64_t Value = CE->getValue();
720    return Value >= 0 && Value < 33;
721  }
722  bool isImm0_65535() const {
723    if (!isImm()) return false;
724    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
725    if (!CE) return false;
726    int64_t Value = CE->getValue();
727    return Value >= 0 && Value < 65536;
728  }
729  bool isImm0_65535Expr() const {
730    if (!isImm()) return false;
731    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
732    // If it's not a constant expression, it'll generate a fixup and be
733    // handled later.
734    if (!CE) return true;
735    int64_t Value = CE->getValue();
736    return Value >= 0 && Value < 65536;
737  }
738  bool isImm24bit() const {
739    if (!isImm()) return false;
740    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
741    if (!CE) return false;
742    int64_t Value = CE->getValue();
743    return Value >= 0 && Value <= 0xffffff;
744  }
745  bool isImmThumbSR() const {
746    if (!isImm()) return false;
747    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
748    if (!CE) return false;
749    int64_t Value = CE->getValue();
750    return Value > 0 && Value < 33;
751  }
752  bool isPKHLSLImm() const {
753    if (!isImm()) return false;
754    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
755    if (!CE) return false;
756    int64_t Value = CE->getValue();
757    return Value >= 0 && Value < 32;
758  }
759  bool isPKHASRImm() const {
760    if (!isImm()) return false;
761    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
762    if (!CE) return false;
763    int64_t Value = CE->getValue();
764    return Value > 0 && Value <= 32;
765  }
766  bool isARMSOImm() const {
767    if (!isImm()) return false;
768    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
769    if (!CE) return false;
770    int64_t Value = CE->getValue();
771    return ARM_AM::getSOImmVal(Value) != -1;
772  }
773  bool isARMSOImmNot() const {
774    if (!isImm()) return false;
775    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
776    if (!CE) return false;
777    int64_t Value = CE->getValue();
778    return ARM_AM::getSOImmVal(~Value) != -1;
779  }
780  bool isARMSOImmNeg() const {
781    if (!isImm()) return false;
782    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
783    if (!CE) return false;
784    int64_t Value = CE->getValue();
785    // Only use this when not representable as a plain so_imm.
786    return ARM_AM::getSOImmVal(Value) == -1 &&
787      ARM_AM::getSOImmVal(-Value) != -1;
788  }
789  bool isT2SOImm() const {
790    if (!isImm()) return false;
791    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
792    if (!CE) return false;
793    int64_t Value = CE->getValue();
794    return ARM_AM::getT2SOImmVal(Value) != -1;
795  }
796  bool isT2SOImmNot() const {
797    if (!isImm()) return false;
798    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
799    if (!CE) return false;
800    int64_t Value = CE->getValue();
801    return ARM_AM::getT2SOImmVal(~Value) != -1;
802  }
803  bool isT2SOImmNeg() const {
804    if (!isImm()) return false;
805    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
806    if (!CE) return false;
807    int64_t Value = CE->getValue();
808    // Only use this when not representable as a plain so_imm.
809    return ARM_AM::getT2SOImmVal(Value) == -1 &&
810      ARM_AM::getT2SOImmVal(-Value) != -1;
811  }
812  bool isSetEndImm() const {
813    if (!isImm()) return false;
814    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
815    if (!CE) return false;
816    int64_t Value = CE->getValue();
817    return Value == 1 || Value == 0;
818  }
819  bool isReg() const { return Kind == k_Register; }
820  bool isRegList() const { return Kind == k_RegisterList; }
821  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
822  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
823  bool isToken() const { return Kind == k_Token; }
824  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
825  bool isMemory() const { return Kind == k_Memory; }
826  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
827  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
828  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
829  bool isRotImm() const { return Kind == k_RotateImmediate; }
830  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
831  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
832  bool isPostIdxReg() const {
833    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
834  }
835  bool isMemNoOffset(bool alignOK = false) const {
836    if (!isMemory())
837      return false;
838    // No offset of any kind.
839    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
840     (alignOK || Memory.Alignment == 0);
841  }
842  bool isMemPCRelImm12() const {
843    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
844      return false;
845    // Base register must be PC.
846    if (Memory.BaseRegNum != ARM::PC)
847      return false;
848    // Immediate offset in range [-4095, 4095].
849    if (!Memory.OffsetImm) return true;
850    int64_t Val = Memory.OffsetImm->getValue();
851    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
852  }
853  bool isAlignedMemory() const {
854    return isMemNoOffset(true);
855  }
856  bool isAddrMode2() const {
857    if (!isMemory() || Memory.Alignment != 0) return false;
858    // Check for register offset.
859    if (Memory.OffsetRegNum) return true;
860    // Immediate offset in range [-4095, 4095].
861    if (!Memory.OffsetImm) return true;
862    int64_t Val = Memory.OffsetImm->getValue();
863    return Val > -4096 && Val < 4096;
864  }
865  bool isAM2OffsetImm() const {
866    if (!isImm()) return false;
867    // Immediate offset in range [-4095, 4095].
868    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
869    if (!CE) return false;
870    int64_t Val = CE->getValue();
871    return Val > -4096 && Val < 4096;
872  }
873  bool isAddrMode3() const {
874    // If we have an immediate that's not a constant, treat it as a label
875    // reference needing a fixup. If it is a constant, it's something else
876    // and we reject it.
877    if (isImm() && !isa<MCConstantExpr>(getImm()))
878      return true;
879    if (!isMemory() || Memory.Alignment != 0) return false;
880    // No shifts are legal for AM3.
881    if (Memory.ShiftType != ARM_AM::no_shift) return false;
882    // Check for register offset.
883    if (Memory.OffsetRegNum) return true;
884    // Immediate offset in range [-255, 255].
885    if (!Memory.OffsetImm) return true;
886    int64_t Val = Memory.OffsetImm->getValue();
887    return Val > -256 && Val < 256;
888  }
889  bool isAM3Offset() const {
890    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
891      return false;
892    if (Kind == k_PostIndexRegister)
893      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
894    // Immediate offset in range [-255, 255].
895    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
896    if (!CE) return false;
897    int64_t Val = CE->getValue();
898    // Special case, #-0 is INT32_MIN.
899    return (Val > -256 && Val < 256) || Val == INT32_MIN;
900  }
901  bool isAddrMode5() const {
902    // If we have an immediate that's not a constant, treat it as a label
903    // reference needing a fixup. If it is a constant, it's something else
904    // and we reject it.
905    if (isImm() && !isa<MCConstantExpr>(getImm()))
906      return true;
907    if (!isMemory() || Memory.Alignment != 0) return false;
908    // Check for register offset.
909    if (Memory.OffsetRegNum) return false;
910    // Immediate offset in range [-1020, 1020] and a multiple of 4.
911    if (!Memory.OffsetImm) return true;
912    int64_t Val = Memory.OffsetImm->getValue();
913    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
914      Val == INT32_MIN;
915  }
916  bool isMemTBB() const {
917    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
918        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
919      return false;
920    return true;
921  }
922  bool isMemTBH() const {
923    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
924        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
925        Memory.Alignment != 0 )
926      return false;
927    return true;
928  }
929  bool isMemRegOffset() const {
930    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
931      return false;
932    return true;
933  }
934  bool isT2MemRegOffset() const {
935    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
936        Memory.Alignment != 0)
937      return false;
938    // Only lsl #{0, 1, 2, 3} allowed.
939    if (Memory.ShiftType == ARM_AM::no_shift)
940      return true;
941    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
942      return false;
943    return true;
944  }
945  bool isMemThumbRR() const {
946    // Thumb reg+reg addressing is simple. Just two registers, a base and
947    // an offset. No shifts, negations or any other complicating factors.
948    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
949        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
950      return false;
951    return isARMLowRegister(Memory.BaseRegNum) &&
952      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
953  }
954  bool isMemThumbRIs4() const {
955    if (!isMemory() || Memory.OffsetRegNum != 0 ||
956        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
957      return false;
958    // Immediate offset, multiple of 4 in range [0, 124].
959    if (!Memory.OffsetImm) return true;
960    int64_t Val = Memory.OffsetImm->getValue();
961    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
962  }
963  bool isMemThumbRIs2() const {
964    if (!isMemory() || Memory.OffsetRegNum != 0 ||
965        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
966      return false;
967    // Immediate offset, multiple of 4 in range [0, 62].
968    if (!Memory.OffsetImm) return true;
969    int64_t Val = Memory.OffsetImm->getValue();
970    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
971  }
972  bool isMemThumbRIs1() const {
973    if (!isMemory() || Memory.OffsetRegNum != 0 ||
974        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
975      return false;
976    // Immediate offset in range [0, 31].
977    if (!Memory.OffsetImm) return true;
978    int64_t Val = Memory.OffsetImm->getValue();
979    return Val >= 0 && Val <= 31;
980  }
981  bool isMemThumbSPI() const {
982    if (!isMemory() || Memory.OffsetRegNum != 0 ||
983        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
984      return false;
985    // Immediate offset, multiple of 4 in range [0, 1020].
986    if (!Memory.OffsetImm) return true;
987    int64_t Val = Memory.OffsetImm->getValue();
988    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
989  }
990  bool isMemImm8s4Offset() const {
991    // If we have an immediate that's not a constant, treat it as a label
992    // reference needing a fixup. If it is a constant, it's something else
993    // and we reject it.
994    if (isImm() && !isa<MCConstantExpr>(getImm()))
995      return true;
996    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
997      return false;
998    // Immediate offset a multiple of 4 in range [-1020, 1020].
999    if (!Memory.OffsetImm) return true;
1000    int64_t Val = Memory.OffsetImm->getValue();
1001    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1002  }
1003  bool isMemImm0_1020s4Offset() const {
1004    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1005      return false;
1006    // Immediate offset a multiple of 4 in range [0, 1020].
1007    if (!Memory.OffsetImm) return true;
1008    int64_t Val = Memory.OffsetImm->getValue();
1009    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1010  }
1011  bool isMemImm8Offset() const {
1012    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1013      return false;
1014    // Base reg of PC isn't allowed for these encodings.
1015    if (Memory.BaseRegNum == ARM::PC) return false;
1016    // Immediate offset in range [-255, 255].
1017    if (!Memory.OffsetImm) return true;
1018    int64_t Val = Memory.OffsetImm->getValue();
1019    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1020  }
1021  bool isMemPosImm8Offset() const {
1022    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1023      return false;
1024    // Immediate offset in range [0, 255].
1025    if (!Memory.OffsetImm) return true;
1026    int64_t Val = Memory.OffsetImm->getValue();
1027    return Val >= 0 && Val < 256;
1028  }
1029  bool isMemNegImm8Offset() const {
1030    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1031      return false;
1032    // Base reg of PC isn't allowed for these encodings.
1033    if (Memory.BaseRegNum == ARM::PC) return false;
1034    // Immediate offset in range [-255, -1].
1035    if (!Memory.OffsetImm) return false;
1036    int64_t Val = Memory.OffsetImm->getValue();
1037    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1038  }
1039  bool isMemUImm12Offset() const {
1040    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1041      return false;
1042    // Immediate offset in range [0, 4095].
1043    if (!Memory.OffsetImm) return true;
1044    int64_t Val = Memory.OffsetImm->getValue();
1045    return (Val >= 0 && Val < 4096);
1046  }
1047  bool isMemImm12Offset() const {
1048    // If we have an immediate that's not a constant, treat it as a label
1049    // reference needing a fixup. If it is a constant, it's something else
1050    // and we reject it.
1051    if (isImm() && !isa<MCConstantExpr>(getImm()))
1052      return true;
1053
1054    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1055      return false;
1056    // Immediate offset in range [-4095, 4095].
1057    if (!Memory.OffsetImm) return true;
1058    int64_t Val = Memory.OffsetImm->getValue();
1059    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1060  }
1061  bool isPostIdxImm8() const {
1062    if (!isImm()) return false;
1063    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1064    if (!CE) return false;
1065    int64_t Val = CE->getValue();
1066    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1067  }
1068  bool isPostIdxImm8s4() const {
1069    if (!isImm()) return false;
1070    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1071    if (!CE) return false;
1072    int64_t Val = CE->getValue();
1073    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1074      (Val == INT32_MIN);
1075  }
1076
1077  bool isMSRMask() const { return Kind == k_MSRMask; }
1078  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1079
1080  // NEON operands.
1081  bool isSingleSpacedVectorList() const {
1082    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1083  }
1084  bool isDoubleSpacedVectorList() const {
1085    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1086  }
1087  bool isVecListOneD() const {
1088    if (!isSingleSpacedVectorList()) return false;
1089    return VectorList.Count == 1;
1090  }
1091
1092  bool isVecListDPair() const {
1093    if (!isSingleSpacedVectorList()) return false;
1094    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1095              .contains(VectorList.RegNum));
1096  }
1097
1098  bool isVecListThreeD() const {
1099    if (!isSingleSpacedVectorList()) return false;
1100    return VectorList.Count == 3;
1101  }
1102
1103  bool isVecListFourD() const {
1104    if (!isSingleSpacedVectorList()) return false;
1105    return VectorList.Count == 4;
1106  }
1107
1108  bool isVecListDPairSpaced() const {
1109    if (isSingleSpacedVectorList()) return false;
1110    return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1111              .contains(VectorList.RegNum));
1112  }
1113
1114  bool isVecListThreeQ() const {
1115    if (!isDoubleSpacedVectorList()) return false;
1116    return VectorList.Count == 3;
1117  }
1118
1119  bool isVecListFourQ() const {
1120    if (!isDoubleSpacedVectorList()) return false;
1121    return VectorList.Count == 4;
1122  }
1123
1124  bool isSingleSpacedVectorAllLanes() const {
1125    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1126  }
1127  bool isDoubleSpacedVectorAllLanes() const {
1128    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1129  }
1130  bool isVecListOneDAllLanes() const {
1131    if (!isSingleSpacedVectorAllLanes()) return false;
1132    return VectorList.Count == 1;
1133  }
1134
1135  bool isVecListDPairAllLanes() const {
1136    if (!isSingleSpacedVectorAllLanes()) return false;
1137    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1138              .contains(VectorList.RegNum));
1139  }
1140
1141  bool isVecListDPairSpacedAllLanes() const {
1142    if (!isDoubleSpacedVectorAllLanes()) return false;
1143    return VectorList.Count == 2;
1144  }
1145
1146  bool isVecListThreeDAllLanes() const {
1147    if (!isSingleSpacedVectorAllLanes()) return false;
1148    return VectorList.Count == 3;
1149  }
1150
1151  bool isVecListThreeQAllLanes() const {
1152    if (!isDoubleSpacedVectorAllLanes()) return false;
1153    return VectorList.Count == 3;
1154  }
1155
1156  bool isVecListFourDAllLanes() const {
1157    if (!isSingleSpacedVectorAllLanes()) return false;
1158    return VectorList.Count == 4;
1159  }
1160
1161  bool isVecListFourQAllLanes() const {
1162    if (!isDoubleSpacedVectorAllLanes()) return false;
1163    return VectorList.Count == 4;
1164  }
1165
1166  bool isSingleSpacedVectorIndexed() const {
1167    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1168  }
1169  bool isDoubleSpacedVectorIndexed() const {
1170    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1171  }
1172  bool isVecListOneDByteIndexed() const {
1173    if (!isSingleSpacedVectorIndexed()) return false;
1174    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1175  }
1176
1177  bool isVecListOneDHWordIndexed() const {
1178    if (!isSingleSpacedVectorIndexed()) return false;
1179    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1180  }
1181
1182  bool isVecListOneDWordIndexed() const {
1183    if (!isSingleSpacedVectorIndexed()) return false;
1184    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1185  }
1186
1187  bool isVecListTwoDByteIndexed() const {
1188    if (!isSingleSpacedVectorIndexed()) return false;
1189    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1190  }
1191
1192  bool isVecListTwoDHWordIndexed() const {
1193    if (!isSingleSpacedVectorIndexed()) return false;
1194    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1195  }
1196
1197  bool isVecListTwoQWordIndexed() const {
1198    if (!isDoubleSpacedVectorIndexed()) return false;
1199    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1200  }
1201
1202  bool isVecListTwoQHWordIndexed() const {
1203    if (!isDoubleSpacedVectorIndexed()) return false;
1204    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1205  }
1206
1207  bool isVecListTwoDWordIndexed() const {
1208    if (!isSingleSpacedVectorIndexed()) return false;
1209    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1210  }
1211
1212  bool isVecListThreeDByteIndexed() const {
1213    if (!isSingleSpacedVectorIndexed()) return false;
1214    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1215  }
1216
1217  bool isVecListThreeDHWordIndexed() const {
1218    if (!isSingleSpacedVectorIndexed()) return false;
1219    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1220  }
1221
1222  bool isVecListThreeQWordIndexed() const {
1223    if (!isDoubleSpacedVectorIndexed()) return false;
1224    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1225  }
1226
1227  bool isVecListThreeQHWordIndexed() const {
1228    if (!isDoubleSpacedVectorIndexed()) return false;
1229    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1230  }
1231
1232  bool isVecListThreeDWordIndexed() const {
1233    if (!isSingleSpacedVectorIndexed()) return false;
1234    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1235  }
1236
1237  bool isVecListFourDByteIndexed() const {
1238    if (!isSingleSpacedVectorIndexed()) return false;
1239    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1240  }
1241
1242  bool isVecListFourDHWordIndexed() const {
1243    if (!isSingleSpacedVectorIndexed()) return false;
1244    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1245  }
1246
1247  bool isVecListFourQWordIndexed() const {
1248    if (!isDoubleSpacedVectorIndexed()) return false;
1249    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1250  }
1251
1252  bool isVecListFourQHWordIndexed() const {
1253    if (!isDoubleSpacedVectorIndexed()) return false;
1254    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1255  }
1256
1257  bool isVecListFourDWordIndexed() const {
1258    if (!isSingleSpacedVectorIndexed()) return false;
1259    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1260  }
1261
1262  bool isVectorIndex8() const {
1263    if (Kind != k_VectorIndex) return false;
1264    return VectorIndex.Val < 8;
1265  }
1266  bool isVectorIndex16() const {
1267    if (Kind != k_VectorIndex) return false;
1268    return VectorIndex.Val < 4;
1269  }
1270  bool isVectorIndex32() const {
1271    if (Kind != k_VectorIndex) return false;
1272    return VectorIndex.Val < 2;
1273  }
1274
1275  bool isNEONi8splat() const {
1276    if (!isImm()) return false;
1277    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1278    // Must be a constant.
1279    if (!CE) return false;
1280    int64_t Value = CE->getValue();
1281    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1282    // value.
1283    return Value >= 0 && Value < 256;
1284  }
1285
1286  bool isNEONi16splat() const {
1287    if (!isImm()) return false;
1288    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1289    // Must be a constant.
1290    if (!CE) return false;
1291    int64_t Value = CE->getValue();
1292    // i16 value in the range [0,255] or [0x0100, 0xff00]
1293    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1294  }
1295
1296  bool isNEONi32splat() const {
1297    if (!isImm()) return false;
1298    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1299    // Must be a constant.
1300    if (!CE) return false;
1301    int64_t Value = CE->getValue();
1302    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1303    return (Value >= 0 && Value < 256) ||
1304      (Value >= 0x0100 && Value <= 0xff00) ||
1305      (Value >= 0x010000 && Value <= 0xff0000) ||
1306      (Value >= 0x01000000 && Value <= 0xff000000);
1307  }
1308
1309  bool isNEONi32vmov() const {
1310    if (!isImm()) return false;
1311    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1312    // Must be a constant.
1313    if (!CE) return false;
1314    int64_t Value = CE->getValue();
1315    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1316    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1317    return (Value >= 0 && Value < 256) ||
1318      (Value >= 0x0100 && Value <= 0xff00) ||
1319      (Value >= 0x010000 && Value <= 0xff0000) ||
1320      (Value >= 0x01000000 && Value <= 0xff000000) ||
1321      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1322      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1323  }
1324  bool isNEONi32vmovNeg() const {
1325    if (!isImm()) return false;
1326    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1327    // Must be a constant.
1328    if (!CE) return false;
1329    int64_t Value = ~CE->getValue();
1330    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1331    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1332    return (Value >= 0 && Value < 256) ||
1333      (Value >= 0x0100 && Value <= 0xff00) ||
1334      (Value >= 0x010000 && Value <= 0xff0000) ||
1335      (Value >= 0x01000000 && Value <= 0xff000000) ||
1336      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1337      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1338  }
1339
1340  bool isNEONi64splat() const {
1341    if (!isImm()) return false;
1342    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1343    // Must be a constant.
1344    if (!CE) return false;
1345    uint64_t Value = CE->getValue();
1346    // i64 value with each byte being either 0 or 0xff.
1347    for (unsigned i = 0; i < 8; ++i)
1348      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1349    return true;
1350  }
1351
1352  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1353    // Add as immediates when possible.  Null MCExpr = 0.
1354    if (Expr == 0)
1355      Inst.addOperand(MCOperand::CreateImm(0));
1356    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1357      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1358    else
1359      Inst.addOperand(MCOperand::CreateExpr(Expr));
1360  }
1361
1362  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1363    assert(N == 2 && "Invalid number of operands!");
1364    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1365    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1366    Inst.addOperand(MCOperand::CreateReg(RegNum));
1367  }
1368
1369  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1370    assert(N == 1 && "Invalid number of operands!");
1371    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1372  }
1373
1374  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1375    assert(N == 1 && "Invalid number of operands!");
1376    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1377  }
1378
1379  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1380    assert(N == 1 && "Invalid number of operands!");
1381    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1382  }
1383
1384  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1385    assert(N == 1 && "Invalid number of operands!");
1386    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1387  }
1388
1389  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1390    assert(N == 1 && "Invalid number of operands!");
1391    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1392  }
1393
1394  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1395    assert(N == 1 && "Invalid number of operands!");
1396    Inst.addOperand(MCOperand::CreateReg(getReg()));
1397  }
1398
1399  void addRegOperands(MCInst &Inst, unsigned N) const {
1400    assert(N == 1 && "Invalid number of operands!");
1401    Inst.addOperand(MCOperand::CreateReg(getReg()));
1402  }
1403
1404  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1405    assert(N == 3 && "Invalid number of operands!");
1406    assert(isRegShiftedReg() &&
1407           "addRegShiftedRegOperands() on non RegShiftedReg!");
1408    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1409    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1410    Inst.addOperand(MCOperand::CreateImm(
1411      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1412  }
1413
1414  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1415    assert(N == 2 && "Invalid number of operands!");
1416    assert(isRegShiftedImm() &&
1417           "addRegShiftedImmOperands() on non RegShiftedImm!");
1418    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1419    Inst.addOperand(MCOperand::CreateImm(
1420      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1421  }
1422
1423  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1424    assert(N == 1 && "Invalid number of operands!");
1425    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1426                                         ShifterImm.Imm));
1427  }
1428
1429  void addRegListOperands(MCInst &Inst, unsigned N) const {
1430    assert(N == 1 && "Invalid number of operands!");
1431    const SmallVectorImpl<unsigned> &RegList = getRegList();
1432    for (SmallVectorImpl<unsigned>::const_iterator
1433           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1434      Inst.addOperand(MCOperand::CreateReg(*I));
1435  }
1436
1437  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1438    addRegListOperands(Inst, N);
1439  }
1440
1441  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1442    addRegListOperands(Inst, N);
1443  }
1444
1445  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1446    assert(N == 1 && "Invalid number of operands!");
1447    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1448    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1449  }
1450
1451  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1452    assert(N == 1 && "Invalid number of operands!");
1453    // Munge the lsb/width into a bitfield mask.
1454    unsigned lsb = Bitfield.LSB;
1455    unsigned width = Bitfield.Width;
1456    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1457    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1458                      (32 - (lsb + width)));
1459    Inst.addOperand(MCOperand::CreateImm(Mask));
1460  }
1461
1462  void addImmOperands(MCInst &Inst, unsigned N) const {
1463    assert(N == 1 && "Invalid number of operands!");
1464    addExpr(Inst, getImm());
1465  }
1466
1467  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1468    assert(N == 1 && "Invalid number of operands!");
1469    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1470    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1471  }
1472
1473  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1474    assert(N == 1 && "Invalid number of operands!");
1475    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1476    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1477  }
1478
1479  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1480    assert(N == 1 && "Invalid number of operands!");
1481    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1482    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1483    Inst.addOperand(MCOperand::CreateImm(Val));
1484  }
1485
1486  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1487    assert(N == 1 && "Invalid number of operands!");
1488    // FIXME: We really want to scale the value here, but the LDRD/STRD
1489    // instruction don't encode operands that way yet.
1490    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1491    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1492  }
1493
1494  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1495    assert(N == 1 && "Invalid number of operands!");
1496    // The immediate is scaled by four in the encoding and is stored
1497    // in the MCInst as such. Lop off the low two bits here.
1498    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1499    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1500  }
1501
1502  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1503    assert(N == 1 && "Invalid number of operands!");
1504    // The immediate is scaled by four in the encoding and is stored
1505    // in the MCInst as such. Lop off the low two bits here.
1506    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1507    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1508  }
1509
1510  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1511    assert(N == 1 && "Invalid number of operands!");
1512    // The constant encodes as the immediate-1, and we store in the instruction
1513    // the bits as encoded, so subtract off one here.
1514    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1515    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1516  }
1517
1518  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1519    assert(N == 1 && "Invalid number of operands!");
1520    // The constant encodes as the immediate-1, and we store in the instruction
1521    // the bits as encoded, so subtract off one here.
1522    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1523    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1524  }
1525
1526  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1527    assert(N == 1 && "Invalid number of operands!");
1528    // The constant encodes as the immediate, except for 32, which encodes as
1529    // zero.
1530    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1531    unsigned Imm = CE->getValue();
1532    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1533  }
1534
1535  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1536    assert(N == 1 && "Invalid number of operands!");
1537    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1538    // the instruction as well.
1539    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1540    int Val = CE->getValue();
1541    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1542  }
1543
1544  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1545    assert(N == 1 && "Invalid number of operands!");
1546    // The operand is actually a t2_so_imm, but we have its bitwise
1547    // negation in the assembly source, so twiddle it here.
1548    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1549    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1550  }
1551
1552  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1553    assert(N == 1 && "Invalid number of operands!");
1554    // The operand is actually a t2_so_imm, but we have its
1555    // negation in the assembly source, so twiddle it here.
1556    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1557    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1558  }
1559
1560  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1561    assert(N == 1 && "Invalid number of operands!");
1562    // The operand is actually a so_imm, but we have its bitwise
1563    // negation in the assembly source, so twiddle it here.
1564    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1565    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1566  }
1567
1568  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1569    assert(N == 1 && "Invalid number of operands!");
1570    // The operand is actually a so_imm, but we have its
1571    // negation in the assembly source, so twiddle it here.
1572    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1573    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1574  }
1575
1576  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1577    assert(N == 1 && "Invalid number of operands!");
1578    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1579  }
1580
1581  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1582    assert(N == 1 && "Invalid number of operands!");
1583    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1584  }
1585
1586  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1587    assert(N == 1 && "Invalid number of operands!");
1588    int32_t Imm = Memory.OffsetImm->getValue();
1589    // FIXME: Handle #-0
1590    if (Imm == INT32_MIN) Imm = 0;
1591    Inst.addOperand(MCOperand::CreateImm(Imm));
1592  }
1593
1594  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1595    assert(N == 2 && "Invalid number of operands!");
1596    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1597    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1598  }
1599
1600  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1601    assert(N == 3 && "Invalid number of operands!");
1602    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1603    if (!Memory.OffsetRegNum) {
1604      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1605      // Special case for #-0
1606      if (Val == INT32_MIN) Val = 0;
1607      if (Val < 0) Val = -Val;
1608      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1609    } else {
1610      // For register offset, we encode the shift type and negation flag
1611      // here.
1612      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1613                              Memory.ShiftImm, Memory.ShiftType);
1614    }
1615    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1616    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1617    Inst.addOperand(MCOperand::CreateImm(Val));
1618  }
1619
1620  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1621    assert(N == 2 && "Invalid number of operands!");
1622    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1623    assert(CE && "non-constant AM2OffsetImm operand!");
1624    int32_t Val = CE->getValue();
1625    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1626    // Special case for #-0
1627    if (Val == INT32_MIN) Val = 0;
1628    if (Val < 0) Val = -Val;
1629    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1630    Inst.addOperand(MCOperand::CreateReg(0));
1631    Inst.addOperand(MCOperand::CreateImm(Val));
1632  }
1633
1634  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1635    assert(N == 3 && "Invalid number of operands!");
1636    // If we have an immediate that's not a constant, treat it as a label
1637    // reference needing a fixup. If it is a constant, it's something else
1638    // and we reject it.
1639    if (isImm()) {
1640      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1641      Inst.addOperand(MCOperand::CreateReg(0));
1642      Inst.addOperand(MCOperand::CreateImm(0));
1643      return;
1644    }
1645
1646    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1647    if (!Memory.OffsetRegNum) {
1648      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1649      // Special case for #-0
1650      if (Val == INT32_MIN) Val = 0;
1651      if (Val < 0) Val = -Val;
1652      Val = ARM_AM::getAM3Opc(AddSub, Val);
1653    } else {
1654      // For register offset, we encode the shift type and negation flag
1655      // here.
1656      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1657    }
1658    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1659    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1660    Inst.addOperand(MCOperand::CreateImm(Val));
1661  }
1662
1663  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1664    assert(N == 2 && "Invalid number of operands!");
1665    if (Kind == k_PostIndexRegister) {
1666      int32_t Val =
1667        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1668      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1669      Inst.addOperand(MCOperand::CreateImm(Val));
1670      return;
1671    }
1672
1673    // Constant offset.
1674    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1675    int32_t Val = CE->getValue();
1676    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1677    // Special case for #-0
1678    if (Val == INT32_MIN) Val = 0;
1679    if (Val < 0) Val = -Val;
1680    Val = ARM_AM::getAM3Opc(AddSub, Val);
1681    Inst.addOperand(MCOperand::CreateReg(0));
1682    Inst.addOperand(MCOperand::CreateImm(Val));
1683  }
1684
1685  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1686    assert(N == 2 && "Invalid number of operands!");
1687    // If we have an immediate that's not a constant, treat it as a label
1688    // reference needing a fixup. If it is a constant, it's something else
1689    // and we reject it.
1690    if (isImm()) {
1691      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1692      Inst.addOperand(MCOperand::CreateImm(0));
1693      return;
1694    }
1695
1696    // The lower two bits are always zero and as such are not encoded.
1697    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1698    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1699    // Special case for #-0
1700    if (Val == INT32_MIN) Val = 0;
1701    if (Val < 0) Val = -Val;
1702    Val = ARM_AM::getAM5Opc(AddSub, Val);
1703    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1704    Inst.addOperand(MCOperand::CreateImm(Val));
1705  }
1706
1707  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1708    assert(N == 2 && "Invalid number of operands!");
1709    // If we have an immediate that's not a constant, treat it as a label
1710    // reference needing a fixup. If it is a constant, it's something else
1711    // and we reject it.
1712    if (isImm()) {
1713      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1714      Inst.addOperand(MCOperand::CreateImm(0));
1715      return;
1716    }
1717
1718    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1719    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1720    Inst.addOperand(MCOperand::CreateImm(Val));
1721  }
1722
1723  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1724    assert(N == 2 && "Invalid number of operands!");
1725    // The lower two bits are always zero and as such are not encoded.
1726    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1727    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1728    Inst.addOperand(MCOperand::CreateImm(Val));
1729  }
1730
1731  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1732    assert(N == 2 && "Invalid number of operands!");
1733    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1734    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1735    Inst.addOperand(MCOperand::CreateImm(Val));
1736  }
1737
1738  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1739    addMemImm8OffsetOperands(Inst, N);
1740  }
1741
1742  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1743    addMemImm8OffsetOperands(Inst, N);
1744  }
1745
1746  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1747    assert(N == 2 && "Invalid number of operands!");
1748    // If this is an immediate, it's a label reference.
1749    if (isImm()) {
1750      addExpr(Inst, getImm());
1751      Inst.addOperand(MCOperand::CreateImm(0));
1752      return;
1753    }
1754
1755    // Otherwise, it's a normal memory reg+offset.
1756    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1757    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1758    Inst.addOperand(MCOperand::CreateImm(Val));
1759  }
1760
1761  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1762    assert(N == 2 && "Invalid number of operands!");
1763    // If this is an immediate, it's a label reference.
1764    if (isImm()) {
1765      addExpr(Inst, getImm());
1766      Inst.addOperand(MCOperand::CreateImm(0));
1767      return;
1768    }
1769
1770    // Otherwise, it's a normal memory reg+offset.
1771    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1772    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1773    Inst.addOperand(MCOperand::CreateImm(Val));
1774  }
1775
1776  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1777    assert(N == 2 && "Invalid number of operands!");
1778    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1779    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1780  }
1781
1782  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1783    assert(N == 2 && "Invalid number of operands!");
1784    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1785    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1786  }
1787
1788  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1789    assert(N == 3 && "Invalid number of operands!");
1790    unsigned Val =
1791      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1792                        Memory.ShiftImm, Memory.ShiftType);
1793    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1794    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1795    Inst.addOperand(MCOperand::CreateImm(Val));
1796  }
1797
1798  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1799    assert(N == 3 && "Invalid number of operands!");
1800    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1801    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1802    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1803  }
1804
1805  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1806    assert(N == 2 && "Invalid number of operands!");
1807    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1808    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1809  }
1810
1811  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1812    assert(N == 2 && "Invalid number of operands!");
1813    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1814    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1815    Inst.addOperand(MCOperand::CreateImm(Val));
1816  }
1817
1818  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1819    assert(N == 2 && "Invalid number of operands!");
1820    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1821    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1822    Inst.addOperand(MCOperand::CreateImm(Val));
1823  }
1824
1825  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1826    assert(N == 2 && "Invalid number of operands!");
1827    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1828    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1829    Inst.addOperand(MCOperand::CreateImm(Val));
1830  }
1831
1832  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1833    assert(N == 2 && "Invalid number of operands!");
1834    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1835    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1836    Inst.addOperand(MCOperand::CreateImm(Val));
1837  }
1838
1839  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1840    assert(N == 1 && "Invalid number of operands!");
1841    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1842    assert(CE && "non-constant post-idx-imm8 operand!");
1843    int Imm = CE->getValue();
1844    bool isAdd = Imm >= 0;
1845    if (Imm == INT32_MIN) Imm = 0;
1846    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1847    Inst.addOperand(MCOperand::CreateImm(Imm));
1848  }
1849
1850  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1851    assert(N == 1 && "Invalid number of operands!");
1852    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1853    assert(CE && "non-constant post-idx-imm8s4 operand!");
1854    int Imm = CE->getValue();
1855    bool isAdd = Imm >= 0;
1856    if (Imm == INT32_MIN) Imm = 0;
1857    // Immediate is scaled by 4.
1858    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1859    Inst.addOperand(MCOperand::CreateImm(Imm));
1860  }
1861
1862  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1863    assert(N == 2 && "Invalid number of operands!");
1864    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1865    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1866  }
1867
1868  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1869    assert(N == 2 && "Invalid number of operands!");
1870    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1871    // The sign, shift type, and shift amount are encoded in a single operand
1872    // using the AM2 encoding helpers.
1873    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1874    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1875                                     PostIdxReg.ShiftTy);
1876    Inst.addOperand(MCOperand::CreateImm(Imm));
1877  }
1878
1879  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1880    assert(N == 1 && "Invalid number of operands!");
1881    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1882  }
1883
1884  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1885    assert(N == 1 && "Invalid number of operands!");
1886    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1887  }
1888
1889  void addVecListOperands(MCInst &Inst, unsigned N) const {
1890    assert(N == 1 && "Invalid number of operands!");
1891    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1892  }
1893
1894  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1895    assert(N == 2 && "Invalid number of operands!");
1896    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1897    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1898  }
1899
1900  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1901    assert(N == 1 && "Invalid number of operands!");
1902    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1903  }
1904
1905  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1906    assert(N == 1 && "Invalid number of operands!");
1907    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1908  }
1909
1910  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1911    assert(N == 1 && "Invalid number of operands!");
1912    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1913  }
1914
1915  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1916    assert(N == 1 && "Invalid number of operands!");
1917    // The immediate encodes the type of constant as well as the value.
1918    // Mask in that this is an i8 splat.
1919    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1920    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1921  }
1922
1923  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1924    assert(N == 1 && "Invalid number of operands!");
1925    // The immediate encodes the type of constant as well as the value.
1926    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1927    unsigned Value = CE->getValue();
1928    if (Value >= 256)
1929      Value = (Value >> 8) | 0xa00;
1930    else
1931      Value |= 0x800;
1932    Inst.addOperand(MCOperand::CreateImm(Value));
1933  }
1934
1935  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1936    assert(N == 1 && "Invalid number of operands!");
1937    // The immediate encodes the type of constant as well as the value.
1938    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1939    unsigned Value = CE->getValue();
1940    if (Value >= 256 && Value <= 0xff00)
1941      Value = (Value >> 8) | 0x200;
1942    else if (Value > 0xffff && Value <= 0xff0000)
1943      Value = (Value >> 16) | 0x400;
1944    else if (Value > 0xffffff)
1945      Value = (Value >> 24) | 0x600;
1946    Inst.addOperand(MCOperand::CreateImm(Value));
1947  }
1948
1949  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1950    assert(N == 1 && "Invalid number of operands!");
1951    // The immediate encodes the type of constant as well as the value.
1952    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1953    unsigned Value = CE->getValue();
1954    if (Value >= 256 && Value <= 0xffff)
1955      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1956    else if (Value > 0xffff && Value <= 0xffffff)
1957      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1958    else if (Value > 0xffffff)
1959      Value = (Value >> 24) | 0x600;
1960    Inst.addOperand(MCOperand::CreateImm(Value));
1961  }
1962
1963  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1964    assert(N == 1 && "Invalid number of operands!");
1965    // The immediate encodes the type of constant as well as the value.
1966    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1967    unsigned Value = ~CE->getValue();
1968    if (Value >= 256 && Value <= 0xffff)
1969      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1970    else if (Value > 0xffff && Value <= 0xffffff)
1971      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1972    else if (Value > 0xffffff)
1973      Value = (Value >> 24) | 0x600;
1974    Inst.addOperand(MCOperand::CreateImm(Value));
1975  }
1976
1977  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1978    assert(N == 1 && "Invalid number of operands!");
1979    // The immediate encodes the type of constant as well as the value.
1980    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1981    uint64_t Value = CE->getValue();
1982    unsigned Imm = 0;
1983    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1984      Imm |= (Value & 1) << i;
1985    }
1986    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1987  }
1988
1989  virtual void print(raw_ostream &OS) const;
1990
1991  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1992    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1993    Op->ITMask.Mask = Mask;
1994    Op->StartLoc = S;
1995    Op->EndLoc = S;
1996    return Op;
1997  }
1998
1999  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
2000    ARMOperand *Op = new ARMOperand(k_CondCode);
2001    Op->CC.Val = CC;
2002    Op->StartLoc = S;
2003    Op->EndLoc = S;
2004    return Op;
2005  }
2006
2007  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2008    ARMOperand *Op = new ARMOperand(k_CoprocNum);
2009    Op->Cop.Val = CopVal;
2010    Op->StartLoc = S;
2011    Op->EndLoc = S;
2012    return Op;
2013  }
2014
2015  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2016    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2017    Op->Cop.Val = CopVal;
2018    Op->StartLoc = S;
2019    Op->EndLoc = S;
2020    return Op;
2021  }
2022
2023  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2024    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2025    Op->Cop.Val = Val;
2026    Op->StartLoc = S;
2027    Op->EndLoc = E;
2028    return Op;
2029  }
2030
2031  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2032    ARMOperand *Op = new ARMOperand(k_CCOut);
2033    Op->Reg.RegNum = RegNum;
2034    Op->StartLoc = S;
2035    Op->EndLoc = S;
2036    return Op;
2037  }
2038
2039  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2040    ARMOperand *Op = new ARMOperand(k_Token);
2041    Op->Tok.Data = Str.data();
2042    Op->Tok.Length = Str.size();
2043    Op->StartLoc = S;
2044    Op->EndLoc = S;
2045    return Op;
2046  }
2047
2048  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2049    ARMOperand *Op = new ARMOperand(k_Register);
2050    Op->Reg.RegNum = RegNum;
2051    Op->StartLoc = S;
2052    Op->EndLoc = E;
2053    return Op;
2054  }
2055
2056  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2057                                           unsigned SrcReg,
2058                                           unsigned ShiftReg,
2059                                           unsigned ShiftImm,
2060                                           SMLoc S, SMLoc E) {
2061    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2062    Op->RegShiftedReg.ShiftTy = ShTy;
2063    Op->RegShiftedReg.SrcReg = SrcReg;
2064    Op->RegShiftedReg.ShiftReg = ShiftReg;
2065    Op->RegShiftedReg.ShiftImm = ShiftImm;
2066    Op->StartLoc = S;
2067    Op->EndLoc = E;
2068    return Op;
2069  }
2070
2071  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2072                                            unsigned SrcReg,
2073                                            unsigned ShiftImm,
2074                                            SMLoc S, SMLoc E) {
2075    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2076    Op->RegShiftedImm.ShiftTy = ShTy;
2077    Op->RegShiftedImm.SrcReg = SrcReg;
2078    Op->RegShiftedImm.ShiftImm = ShiftImm;
2079    Op->StartLoc = S;
2080    Op->EndLoc = E;
2081    return Op;
2082  }
2083
2084  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2085                                   SMLoc S, SMLoc E) {
2086    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2087    Op->ShifterImm.isASR = isASR;
2088    Op->ShifterImm.Imm = Imm;
2089    Op->StartLoc = S;
2090    Op->EndLoc = E;
2091    return Op;
2092  }
2093
2094  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2095    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2096    Op->RotImm.Imm = Imm;
2097    Op->StartLoc = S;
2098    Op->EndLoc = E;
2099    return Op;
2100  }
2101
2102  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2103                                    SMLoc S, SMLoc E) {
2104    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2105    Op->Bitfield.LSB = LSB;
2106    Op->Bitfield.Width = Width;
2107    Op->StartLoc = S;
2108    Op->EndLoc = E;
2109    return Op;
2110  }
2111
2112  static ARMOperand *
2113  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2114                SMLoc StartLoc, SMLoc EndLoc) {
2115    KindTy Kind = k_RegisterList;
2116
2117    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2118      Kind = k_DPRRegisterList;
2119    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2120             contains(Regs.front().first))
2121      Kind = k_SPRRegisterList;
2122
2123    ARMOperand *Op = new ARMOperand(Kind);
2124    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2125           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2126      Op->Registers.push_back(I->first);
2127    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2128    Op->StartLoc = StartLoc;
2129    Op->EndLoc = EndLoc;
2130    return Op;
2131  }
2132
2133  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2134                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2135    ARMOperand *Op = new ARMOperand(k_VectorList);
2136    Op->VectorList.RegNum = RegNum;
2137    Op->VectorList.Count = Count;
2138    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2139    Op->StartLoc = S;
2140    Op->EndLoc = E;
2141    return Op;
2142  }
2143
2144  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2145                                              bool isDoubleSpaced,
2146                                              SMLoc S, SMLoc E) {
2147    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2148    Op->VectorList.RegNum = RegNum;
2149    Op->VectorList.Count = Count;
2150    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2151    Op->StartLoc = S;
2152    Op->EndLoc = E;
2153    return Op;
2154  }
2155
2156  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2157                                             unsigned Index,
2158                                             bool isDoubleSpaced,
2159                                             SMLoc S, SMLoc E) {
2160    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2161    Op->VectorList.RegNum = RegNum;
2162    Op->VectorList.Count = Count;
2163    Op->VectorList.LaneIndex = Index;
2164    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2165    Op->StartLoc = S;
2166    Op->EndLoc = E;
2167    return Op;
2168  }
2169
2170  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2171                                       MCContext &Ctx) {
2172    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2173    Op->VectorIndex.Val = Idx;
2174    Op->StartLoc = S;
2175    Op->EndLoc = E;
2176    return Op;
2177  }
2178
2179  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2180    ARMOperand *Op = new ARMOperand(k_Immediate);
2181    Op->Imm.Val = Val;
2182    Op->StartLoc = S;
2183    Op->EndLoc = E;
2184    return Op;
2185  }
2186
2187  static ARMOperand *CreateMem(unsigned BaseRegNum,
2188                               const MCConstantExpr *OffsetImm,
2189                               unsigned OffsetRegNum,
2190                               ARM_AM::ShiftOpc ShiftType,
2191                               unsigned ShiftImm,
2192                               unsigned Alignment,
2193                               bool isNegative,
2194                               SMLoc S, SMLoc E) {
2195    ARMOperand *Op = new ARMOperand(k_Memory);
2196    Op->Memory.BaseRegNum = BaseRegNum;
2197    Op->Memory.OffsetImm = OffsetImm;
2198    Op->Memory.OffsetRegNum = OffsetRegNum;
2199    Op->Memory.ShiftType = ShiftType;
2200    Op->Memory.ShiftImm = ShiftImm;
2201    Op->Memory.Alignment = Alignment;
2202    Op->Memory.isNegative = isNegative;
2203    Op->StartLoc = S;
2204    Op->EndLoc = E;
2205    return Op;
2206  }
2207
2208  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2209                                      ARM_AM::ShiftOpc ShiftTy,
2210                                      unsigned ShiftImm,
2211                                      SMLoc S, SMLoc E) {
2212    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2213    Op->PostIdxReg.RegNum = RegNum;
2214    Op->PostIdxReg.isAdd = isAdd;
2215    Op->PostIdxReg.ShiftTy = ShiftTy;
2216    Op->PostIdxReg.ShiftImm = ShiftImm;
2217    Op->StartLoc = S;
2218    Op->EndLoc = E;
2219    return Op;
2220  }
2221
2222  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2223    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2224    Op->MBOpt.Val = Opt;
2225    Op->StartLoc = S;
2226    Op->EndLoc = S;
2227    return Op;
2228  }
2229
2230  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2231    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2232    Op->IFlags.Val = IFlags;
2233    Op->StartLoc = S;
2234    Op->EndLoc = S;
2235    return Op;
2236  }
2237
2238  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2239    ARMOperand *Op = new ARMOperand(k_MSRMask);
2240    Op->MMask.Val = MMask;
2241    Op->StartLoc = S;
2242    Op->EndLoc = S;
2243    return Op;
2244  }
2245};
2246
2247} // end anonymous namespace.
2248
2249void ARMOperand::print(raw_ostream &OS) const {
2250  switch (Kind) {
2251  case k_CondCode:
2252    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2253    break;
2254  case k_CCOut:
2255    OS << "<ccout " << getReg() << ">";
2256    break;
2257  case k_ITCondMask: {
2258    static const char *MaskStr[] = {
2259      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2260      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2261    };
2262    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2263    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2264    break;
2265  }
2266  case k_CoprocNum:
2267    OS << "<coprocessor number: " << getCoproc() << ">";
2268    break;
2269  case k_CoprocReg:
2270    OS << "<coprocessor register: " << getCoproc() << ">";
2271    break;
2272  case k_CoprocOption:
2273    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2274    break;
2275  case k_MSRMask:
2276    OS << "<mask: " << getMSRMask() << ">";
2277    break;
2278  case k_Immediate:
2279    getImm()->print(OS);
2280    break;
2281  case k_MemBarrierOpt:
2282    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2283    break;
2284  case k_Memory:
2285    OS << "<memory "
2286       << " base:" << Memory.BaseRegNum;
2287    OS << ">";
2288    break;
2289  case k_PostIndexRegister:
2290    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2291       << PostIdxReg.RegNum;
2292    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2293      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2294         << PostIdxReg.ShiftImm;
2295    OS << ">";
2296    break;
2297  case k_ProcIFlags: {
2298    OS << "<ARM_PROC::";
2299    unsigned IFlags = getProcIFlags();
2300    for (int i=2; i >= 0; --i)
2301      if (IFlags & (1 << i))
2302        OS << ARM_PROC::IFlagsToString(1 << i);
2303    OS << ">";
2304    break;
2305  }
2306  case k_Register:
2307    OS << "<register " << getReg() << ">";
2308    break;
2309  case k_ShifterImmediate:
2310    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2311       << " #" << ShifterImm.Imm << ">";
2312    break;
2313  case k_ShiftedRegister:
2314    OS << "<so_reg_reg "
2315       << RegShiftedReg.SrcReg << " "
2316       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2317       << " " << RegShiftedReg.ShiftReg << ">";
2318    break;
2319  case k_ShiftedImmediate:
2320    OS << "<so_reg_imm "
2321       << RegShiftedImm.SrcReg << " "
2322       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2323       << " #" << RegShiftedImm.ShiftImm << ">";
2324    break;
2325  case k_RotateImmediate:
2326    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2327    break;
2328  case k_BitfieldDescriptor:
2329    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2330       << ", width: " << Bitfield.Width << ">";
2331    break;
2332  case k_RegisterList:
2333  case k_DPRRegisterList:
2334  case k_SPRRegisterList: {
2335    OS << "<register_list ";
2336
2337    const SmallVectorImpl<unsigned> &RegList = getRegList();
2338    for (SmallVectorImpl<unsigned>::const_iterator
2339           I = RegList.begin(), E = RegList.end(); I != E; ) {
2340      OS << *I;
2341      if (++I < E) OS << ", ";
2342    }
2343
2344    OS << ">";
2345    break;
2346  }
2347  case k_VectorList:
2348    OS << "<vector_list " << VectorList.Count << " * "
2349       << VectorList.RegNum << ">";
2350    break;
2351  case k_VectorListAllLanes:
2352    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2353       << VectorList.RegNum << ">";
2354    break;
2355  case k_VectorListIndexed:
2356    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2357       << VectorList.Count << " * " << VectorList.RegNum << ">";
2358    break;
2359  case k_Token:
2360    OS << "'" << getToken() << "'";
2361    break;
2362  case k_VectorIndex:
2363    OS << "<vectorindex " << getVectorIndex() << ">";
2364    break;
2365  }
2366}
2367
2368/// @name Auto-generated Match Functions
2369/// {
2370
2371static unsigned MatchRegisterName(StringRef Name);
2372
2373/// }
2374
2375bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2376                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2377  StartLoc = Parser.getTok().getLoc();
2378  RegNo = tryParseRegister();
2379  EndLoc = Parser.getTok().getLoc();
2380
2381  return (RegNo == (unsigned)-1);
2382}
2383
2384/// Try to parse a register name.  The token must be an Identifier when called,
2385/// and if it is a register name the token is eaten and the register number is
2386/// returned.  Otherwise return -1.
2387///
2388int ARMAsmParser::tryParseRegister() {
2389  const AsmToken &Tok = Parser.getTok();
2390  if (Tok.isNot(AsmToken::Identifier)) return -1;
2391
2392  std::string lowerCase = Tok.getString().lower();
2393  unsigned RegNum = MatchRegisterName(lowerCase);
2394  if (!RegNum) {
2395    RegNum = StringSwitch<unsigned>(lowerCase)
2396      .Case("r13", ARM::SP)
2397      .Case("r14", ARM::LR)
2398      .Case("r15", ARM::PC)
2399      .Case("ip", ARM::R12)
2400      // Additional register name aliases for 'gas' compatibility.
2401      .Case("a1", ARM::R0)
2402      .Case("a2", ARM::R1)
2403      .Case("a3", ARM::R2)
2404      .Case("a4", ARM::R3)
2405      .Case("v1", ARM::R4)
2406      .Case("v2", ARM::R5)
2407      .Case("v3", ARM::R6)
2408      .Case("v4", ARM::R7)
2409      .Case("v5", ARM::R8)
2410      .Case("v6", ARM::R9)
2411      .Case("v7", ARM::R10)
2412      .Case("v8", ARM::R11)
2413      .Case("sb", ARM::R9)
2414      .Case("sl", ARM::R10)
2415      .Case("fp", ARM::R11)
2416      .Default(0);
2417  }
2418  if (!RegNum) {
2419    // Check for aliases registered via .req. Canonicalize to lower case.
2420    // That's more consistent since register names are case insensitive, and
2421    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2422    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2423    // If no match, return failure.
2424    if (Entry == RegisterReqs.end())
2425      return -1;
2426    Parser.Lex(); // Eat identifier token.
2427    return Entry->getValue();
2428  }
2429
2430  Parser.Lex(); // Eat identifier token.
2431
2432  return RegNum;
2433}
2434
2435// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2436// If a recoverable error occurs, return 1. If an irrecoverable error
2437// occurs, return -1. An irrecoverable error is one where tokens have been
2438// consumed in the process of trying to parse the shifter (i.e., when it is
2439// indeed a shifter operand, but malformed).
2440int ARMAsmParser::tryParseShiftRegister(
2441                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2442  SMLoc S = Parser.getTok().getLoc();
2443  const AsmToken &Tok = Parser.getTok();
2444  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2445
2446  std::string lowerCase = Tok.getString().lower();
2447  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2448      .Case("asl", ARM_AM::lsl)
2449      .Case("lsl", ARM_AM::lsl)
2450      .Case("lsr", ARM_AM::lsr)
2451      .Case("asr", ARM_AM::asr)
2452      .Case("ror", ARM_AM::ror)
2453      .Case("rrx", ARM_AM::rrx)
2454      .Default(ARM_AM::no_shift);
2455
2456  if (ShiftTy == ARM_AM::no_shift)
2457    return 1;
2458
2459  Parser.Lex(); // Eat the operator.
2460
2461  // The source register for the shift has already been added to the
2462  // operand list, so we need to pop it off and combine it into the shifted
2463  // register operand instead.
2464  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2465  if (!PrevOp->isReg())
2466    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2467  int SrcReg = PrevOp->getReg();
2468  int64_t Imm = 0;
2469  int ShiftReg = 0;
2470  if (ShiftTy == ARM_AM::rrx) {
2471    // RRX Doesn't have an explicit shift amount. The encoder expects
2472    // the shift register to be the same as the source register. Seems odd,
2473    // but OK.
2474    ShiftReg = SrcReg;
2475  } else {
2476    // Figure out if this is shifted by a constant or a register (for non-RRX).
2477    if (Parser.getTok().is(AsmToken::Hash) ||
2478        Parser.getTok().is(AsmToken::Dollar)) {
2479      Parser.Lex(); // Eat hash.
2480      SMLoc ImmLoc = Parser.getTok().getLoc();
2481      const MCExpr *ShiftExpr = 0;
2482      if (getParser().ParseExpression(ShiftExpr)) {
2483        Error(ImmLoc, "invalid immediate shift value");
2484        return -1;
2485      }
2486      // The expression must be evaluatable as an immediate.
2487      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2488      if (!CE) {
2489        Error(ImmLoc, "invalid immediate shift value");
2490        return -1;
2491      }
2492      // Range check the immediate.
2493      // lsl, ror: 0 <= imm <= 31
2494      // lsr, asr: 0 <= imm <= 32
2495      Imm = CE->getValue();
2496      if (Imm < 0 ||
2497          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2498          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2499        Error(ImmLoc, "immediate shift value out of range");
2500        return -1;
2501      }
2502      // shift by zero is a nop. Always send it through as lsl.
2503      // ('as' compatibility)
2504      if (Imm == 0)
2505        ShiftTy = ARM_AM::lsl;
2506    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2507      ShiftReg = tryParseRegister();
2508      SMLoc L = Parser.getTok().getLoc();
2509      if (ShiftReg == -1) {
2510        Error (L, "expected immediate or register in shift operand");
2511        return -1;
2512      }
2513    } else {
2514      Error (Parser.getTok().getLoc(),
2515                    "expected immediate or register in shift operand");
2516      return -1;
2517    }
2518  }
2519
2520  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2521    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2522                                                         ShiftReg, Imm,
2523                                               S, Parser.getTok().getLoc()));
2524  else
2525    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2526                                               S, Parser.getTok().getLoc()));
2527
2528  return 0;
2529}
2530
2531
2532/// Try to parse a register name.  The token must be an Identifier when called.
2533/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2534/// if there is a "writeback". 'true' if it's not a register.
2535///
2536/// TODO this is likely to change to allow different register types and or to
2537/// parse for a specific register type.
2538bool ARMAsmParser::
2539tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2540  SMLoc S = Parser.getTok().getLoc();
2541  int RegNo = tryParseRegister();
2542  if (RegNo == -1)
2543    return true;
2544
2545  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2546
2547  const AsmToken &ExclaimTok = Parser.getTok();
2548  if (ExclaimTok.is(AsmToken::Exclaim)) {
2549    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2550                                               ExclaimTok.getLoc()));
2551    Parser.Lex(); // Eat exclaim token
2552    return false;
2553  }
2554
2555  // Also check for an index operand. This is only legal for vector registers,
2556  // but that'll get caught OK in operand matching, so we don't need to
2557  // explicitly filter everything else out here.
2558  if (Parser.getTok().is(AsmToken::LBrac)) {
2559    SMLoc SIdx = Parser.getTok().getLoc();
2560    Parser.Lex(); // Eat left bracket token.
2561
2562    const MCExpr *ImmVal;
2563    if (getParser().ParseExpression(ImmVal))
2564      return true;
2565    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2566    if (!MCE)
2567      return TokError("immediate value expected for vector index");
2568
2569    SMLoc E = Parser.getTok().getLoc();
2570    if (Parser.getTok().isNot(AsmToken::RBrac))
2571      return Error(E, "']' expected");
2572
2573    Parser.Lex(); // Eat right bracket token.
2574
2575    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2576                                                     SIdx, E,
2577                                                     getContext()));
2578  }
2579
2580  return false;
2581}
2582
2583/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2584/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2585/// "c5", ...
2586static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2587  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2588  // but efficient.
2589  switch (Name.size()) {
2590  default: return -1;
2591  case 2:
2592    if (Name[0] != CoprocOp)
2593      return -1;
2594    switch (Name[1]) {
2595    default:  return -1;
2596    case '0': return 0;
2597    case '1': return 1;
2598    case '2': return 2;
2599    case '3': return 3;
2600    case '4': return 4;
2601    case '5': return 5;
2602    case '6': return 6;
2603    case '7': return 7;
2604    case '8': return 8;
2605    case '9': return 9;
2606    }
2607  case 3:
2608    if (Name[0] != CoprocOp || Name[1] != '1')
2609      return -1;
2610    switch (Name[2]) {
2611    default:  return -1;
2612    case '0': return 10;
2613    case '1': return 11;
2614    case '2': return 12;
2615    case '3': return 13;
2616    case '4': return 14;
2617    case '5': return 15;
2618    }
2619  }
2620}
2621
2622/// parseITCondCode - Try to parse a condition code for an IT instruction.
2623ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2624parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2625  SMLoc S = Parser.getTok().getLoc();
2626  const AsmToken &Tok = Parser.getTok();
2627  if (!Tok.is(AsmToken::Identifier))
2628    return MatchOperand_NoMatch;
2629  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2630    .Case("eq", ARMCC::EQ)
2631    .Case("ne", ARMCC::NE)
2632    .Case("hs", ARMCC::HS)
2633    .Case("cs", ARMCC::HS)
2634    .Case("lo", ARMCC::LO)
2635    .Case("cc", ARMCC::LO)
2636    .Case("mi", ARMCC::MI)
2637    .Case("pl", ARMCC::PL)
2638    .Case("vs", ARMCC::VS)
2639    .Case("vc", ARMCC::VC)
2640    .Case("hi", ARMCC::HI)
2641    .Case("ls", ARMCC::LS)
2642    .Case("ge", ARMCC::GE)
2643    .Case("lt", ARMCC::LT)
2644    .Case("gt", ARMCC::GT)
2645    .Case("le", ARMCC::LE)
2646    .Case("al", ARMCC::AL)
2647    .Default(~0U);
2648  if (CC == ~0U)
2649    return MatchOperand_NoMatch;
2650  Parser.Lex(); // Eat the token.
2651
2652  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2653
2654  return MatchOperand_Success;
2655}
2656
2657/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2658/// token must be an Identifier when called, and if it is a coprocessor
2659/// number, the token is eaten and the operand is added to the operand list.
2660ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2661parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2662  SMLoc S = Parser.getTok().getLoc();
2663  const AsmToken &Tok = Parser.getTok();
2664  if (Tok.isNot(AsmToken::Identifier))
2665    return MatchOperand_NoMatch;
2666
2667  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2668  if (Num == -1)
2669    return MatchOperand_NoMatch;
2670
2671  Parser.Lex(); // Eat identifier token.
2672  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2673  return MatchOperand_Success;
2674}
2675
2676/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2677/// token must be an Identifier when called, and if it is a coprocessor
2678/// number, the token is eaten and the operand is added to the operand list.
2679ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2680parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2681  SMLoc S = Parser.getTok().getLoc();
2682  const AsmToken &Tok = Parser.getTok();
2683  if (Tok.isNot(AsmToken::Identifier))
2684    return MatchOperand_NoMatch;
2685
2686  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2687  if (Reg == -1)
2688    return MatchOperand_NoMatch;
2689
2690  Parser.Lex(); // Eat identifier token.
2691  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2692  return MatchOperand_Success;
2693}
2694
2695/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2696/// coproc_option : '{' imm0_255 '}'
2697ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2698parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2699  SMLoc S = Parser.getTok().getLoc();
2700
2701  // If this isn't a '{', this isn't a coprocessor immediate operand.
2702  if (Parser.getTok().isNot(AsmToken::LCurly))
2703    return MatchOperand_NoMatch;
2704  Parser.Lex(); // Eat the '{'
2705
2706  const MCExpr *Expr;
2707  SMLoc Loc = Parser.getTok().getLoc();
2708  if (getParser().ParseExpression(Expr)) {
2709    Error(Loc, "illegal expression");
2710    return MatchOperand_ParseFail;
2711  }
2712  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2713  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2714    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2715    return MatchOperand_ParseFail;
2716  }
2717  int Val = CE->getValue();
2718
2719  // Check for and consume the closing '}'
2720  if (Parser.getTok().isNot(AsmToken::RCurly))
2721    return MatchOperand_ParseFail;
2722  SMLoc E = Parser.getTok().getLoc();
2723  Parser.Lex(); // Eat the '}'
2724
2725  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2726  return MatchOperand_Success;
2727}
2728
2729// For register list parsing, we need to map from raw GPR register numbering
2730// to the enumeration values. The enumeration values aren't sorted by
2731// register number due to our using "sp", "lr" and "pc" as canonical names.
2732static unsigned getNextRegister(unsigned Reg) {
2733  // If this is a GPR, we need to do it manually, otherwise we can rely
2734  // on the sort ordering of the enumeration since the other reg-classes
2735  // are sane.
2736  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2737    return Reg + 1;
2738  switch(Reg) {
2739  default: llvm_unreachable("Invalid GPR number!");
2740  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2741  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2742  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2743  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2744  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2745  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2746  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2747  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2748  }
2749}
2750
2751// Return the low-subreg of a given Q register.
2752static unsigned getDRegFromQReg(unsigned QReg) {
2753  switch (QReg) {
2754  default: llvm_unreachable("expected a Q register!");
2755  case ARM::Q0:  return ARM::D0;
2756  case ARM::Q1:  return ARM::D2;
2757  case ARM::Q2:  return ARM::D4;
2758  case ARM::Q3:  return ARM::D6;
2759  case ARM::Q4:  return ARM::D8;
2760  case ARM::Q5:  return ARM::D10;
2761  case ARM::Q6:  return ARM::D12;
2762  case ARM::Q7:  return ARM::D14;
2763  case ARM::Q8:  return ARM::D16;
2764  case ARM::Q9:  return ARM::D18;
2765  case ARM::Q10: return ARM::D20;
2766  case ARM::Q11: return ARM::D22;
2767  case ARM::Q12: return ARM::D24;
2768  case ARM::Q13: return ARM::D26;
2769  case ARM::Q14: return ARM::D28;
2770  case ARM::Q15: return ARM::D30;
2771  }
2772}
2773
2774/// Parse a register list.
2775bool ARMAsmParser::
2776parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2777  assert(Parser.getTok().is(AsmToken::LCurly) &&
2778         "Token is not a Left Curly Brace");
2779  SMLoc S = Parser.getTok().getLoc();
2780  Parser.Lex(); // Eat '{' token.
2781  SMLoc RegLoc = Parser.getTok().getLoc();
2782
2783  // Check the first register in the list to see what register class
2784  // this is a list of.
2785  int Reg = tryParseRegister();
2786  if (Reg == -1)
2787    return Error(RegLoc, "register expected");
2788
2789  // The reglist instructions have at most 16 registers, so reserve
2790  // space for that many.
2791  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2792
2793  // Allow Q regs and just interpret them as the two D sub-registers.
2794  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2795    Reg = getDRegFromQReg(Reg);
2796    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2797    ++Reg;
2798  }
2799  const MCRegisterClass *RC;
2800  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2801    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2802  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2803    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2804  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2805    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2806  else
2807    return Error(RegLoc, "invalid register in register list");
2808
2809  // Store the register.
2810  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2811
2812  // This starts immediately after the first register token in the list,
2813  // so we can see either a comma or a minus (range separator) as a legal
2814  // next token.
2815  while (Parser.getTok().is(AsmToken::Comma) ||
2816         Parser.getTok().is(AsmToken::Minus)) {
2817    if (Parser.getTok().is(AsmToken::Minus)) {
2818      Parser.Lex(); // Eat the minus.
2819      SMLoc EndLoc = Parser.getTok().getLoc();
2820      int EndReg = tryParseRegister();
2821      if (EndReg == -1)
2822        return Error(EndLoc, "register expected");
2823      // Allow Q regs and just interpret them as the two D sub-registers.
2824      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2825        EndReg = getDRegFromQReg(EndReg) + 1;
2826      // If the register is the same as the start reg, there's nothing
2827      // more to do.
2828      if (Reg == EndReg)
2829        continue;
2830      // The register must be in the same register class as the first.
2831      if (!RC->contains(EndReg))
2832        return Error(EndLoc, "invalid register in register list");
2833      // Ranges must go from low to high.
2834      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2835        return Error(EndLoc, "bad range in register list");
2836
2837      // Add all the registers in the range to the register list.
2838      while (Reg != EndReg) {
2839        Reg = getNextRegister(Reg);
2840        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2841      }
2842      continue;
2843    }
2844    Parser.Lex(); // Eat the comma.
2845    RegLoc = Parser.getTok().getLoc();
2846    int OldReg = Reg;
2847    const AsmToken RegTok = Parser.getTok();
2848    Reg = tryParseRegister();
2849    if (Reg == -1)
2850      return Error(RegLoc, "register expected");
2851    // Allow Q regs and just interpret them as the two D sub-registers.
2852    bool isQReg = false;
2853    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2854      Reg = getDRegFromQReg(Reg);
2855      isQReg = true;
2856    }
2857    // The register must be in the same register class as the first.
2858    if (!RC->contains(Reg))
2859      return Error(RegLoc, "invalid register in register list");
2860    // List must be monotonically increasing.
2861    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) {
2862      if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2863        Warning(RegLoc, "register list not in ascending order");
2864      else
2865        return Error(RegLoc, "register list not in ascending order");
2866    }
2867    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2868      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2869              ") in register list");
2870      continue;
2871    }
2872    // VFP register lists must also be contiguous.
2873    // It's OK to use the enumeration values directly here rather, as the
2874    // VFP register classes have the enum sorted properly.
2875    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2876        Reg != OldReg + 1)
2877      return Error(RegLoc, "non-contiguous register range");
2878    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2879    if (isQReg)
2880      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2881  }
2882
2883  SMLoc E = Parser.getTok().getLoc();
2884  if (Parser.getTok().isNot(AsmToken::RCurly))
2885    return Error(E, "'}' expected");
2886  Parser.Lex(); // Eat '}' token.
2887
2888  // Push the register list operand.
2889  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2890
2891  // The ARM system instruction variants for LDM/STM have a '^' token here.
2892  if (Parser.getTok().is(AsmToken::Caret)) {
2893    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2894    Parser.Lex(); // Eat '^' token.
2895  }
2896
2897  return false;
2898}
2899
2900// Helper function to parse the lane index for vector lists.
2901ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2902parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2903  Index = 0; // Always return a defined index value.
2904  if (Parser.getTok().is(AsmToken::LBrac)) {
2905    Parser.Lex(); // Eat the '['.
2906    if (Parser.getTok().is(AsmToken::RBrac)) {
2907      // "Dn[]" is the 'all lanes' syntax.
2908      LaneKind = AllLanes;
2909      Parser.Lex(); // Eat the ']'.
2910      return MatchOperand_Success;
2911    }
2912
2913    // There's an optional '#' token here. Normally there wouldn't be, but
2914    // inline assemble puts one in, and it's friendly to accept that.
2915    if (Parser.getTok().is(AsmToken::Hash))
2916      Parser.Lex(); // Eat the '#'
2917
2918    const MCExpr *LaneIndex;
2919    SMLoc Loc = Parser.getTok().getLoc();
2920    if (getParser().ParseExpression(LaneIndex)) {
2921      Error(Loc, "illegal expression");
2922      return MatchOperand_ParseFail;
2923    }
2924    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2925    if (!CE) {
2926      Error(Loc, "lane index must be empty or an integer");
2927      return MatchOperand_ParseFail;
2928    }
2929    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2930      Error(Parser.getTok().getLoc(), "']' expected");
2931      return MatchOperand_ParseFail;
2932    }
2933    Parser.Lex(); // Eat the ']'.
2934    int64_t Val = CE->getValue();
2935
2936    // FIXME: Make this range check context sensitive for .8, .16, .32.
2937    if (Val < 0 || Val > 7) {
2938      Error(Parser.getTok().getLoc(), "lane index out of range");
2939      return MatchOperand_ParseFail;
2940    }
2941    Index = Val;
2942    LaneKind = IndexedLane;
2943    return MatchOperand_Success;
2944  }
2945  LaneKind = NoLanes;
2946  return MatchOperand_Success;
2947}
2948
2949// parse a vector register list
2950ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2951parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2952  VectorLaneTy LaneKind;
2953  unsigned LaneIndex;
2954  SMLoc S = Parser.getTok().getLoc();
2955  // As an extension (to match gas), support a plain D register or Q register
2956  // (without encosing curly braces) as a single or double entry list,
2957  // respectively.
2958  if (Parser.getTok().is(AsmToken::Identifier)) {
2959    int Reg = tryParseRegister();
2960    if (Reg == -1)
2961      return MatchOperand_NoMatch;
2962    SMLoc E = Parser.getTok().getLoc();
2963    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2964      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2965      if (Res != MatchOperand_Success)
2966        return Res;
2967      switch (LaneKind) {
2968      case NoLanes:
2969        E = Parser.getTok().getLoc();
2970        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2971        break;
2972      case AllLanes:
2973        E = Parser.getTok().getLoc();
2974        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2975                                                                S, E));
2976        break;
2977      case IndexedLane:
2978        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2979                                                               LaneIndex,
2980                                                               false, S, E));
2981        break;
2982      }
2983      return MatchOperand_Success;
2984    }
2985    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2986      Reg = getDRegFromQReg(Reg);
2987      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2988      if (Res != MatchOperand_Success)
2989        return Res;
2990      switch (LaneKind) {
2991      case NoLanes:
2992        E = Parser.getTok().getLoc();
2993        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
2994                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
2995        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2996        break;
2997      case AllLanes:
2998        E = Parser.getTok().getLoc();
2999        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3000                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3001        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3002                                                                S, E));
3003        break;
3004      case IndexedLane:
3005        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3006                                                               LaneIndex,
3007                                                               false, S, E));
3008        break;
3009      }
3010      return MatchOperand_Success;
3011    }
3012    Error(S, "vector register expected");
3013    return MatchOperand_ParseFail;
3014  }
3015
3016  if (Parser.getTok().isNot(AsmToken::LCurly))
3017    return MatchOperand_NoMatch;
3018
3019  Parser.Lex(); // Eat '{' token.
3020  SMLoc RegLoc = Parser.getTok().getLoc();
3021
3022  int Reg = tryParseRegister();
3023  if (Reg == -1) {
3024    Error(RegLoc, "register expected");
3025    return MatchOperand_ParseFail;
3026  }
3027  unsigned Count = 1;
3028  int Spacing = 0;
3029  unsigned FirstReg = Reg;
3030  // The list is of D registers, but we also allow Q regs and just interpret
3031  // them as the two D sub-registers.
3032  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3033    FirstReg = Reg = getDRegFromQReg(Reg);
3034    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3035                 // it's ambiguous with four-register single spaced.
3036    ++Reg;
3037    ++Count;
3038  }
3039  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
3040    return MatchOperand_ParseFail;
3041
3042  while (Parser.getTok().is(AsmToken::Comma) ||
3043         Parser.getTok().is(AsmToken::Minus)) {
3044    if (Parser.getTok().is(AsmToken::Minus)) {
3045      if (!Spacing)
3046        Spacing = 1; // Register range implies a single spaced list.
3047      else if (Spacing == 2) {
3048        Error(Parser.getTok().getLoc(),
3049              "sequential registers in double spaced list");
3050        return MatchOperand_ParseFail;
3051      }
3052      Parser.Lex(); // Eat the minus.
3053      SMLoc EndLoc = Parser.getTok().getLoc();
3054      int EndReg = tryParseRegister();
3055      if (EndReg == -1) {
3056        Error(EndLoc, "register expected");
3057        return MatchOperand_ParseFail;
3058      }
3059      // Allow Q regs and just interpret them as the two D sub-registers.
3060      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3061        EndReg = getDRegFromQReg(EndReg) + 1;
3062      // If the register is the same as the start reg, there's nothing
3063      // more to do.
3064      if (Reg == EndReg)
3065        continue;
3066      // The register must be in the same register class as the first.
3067      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3068        Error(EndLoc, "invalid register in register list");
3069        return MatchOperand_ParseFail;
3070      }
3071      // Ranges must go from low to high.
3072      if (Reg > EndReg) {
3073        Error(EndLoc, "bad range in register list");
3074        return MatchOperand_ParseFail;
3075      }
3076      // Parse the lane specifier if present.
3077      VectorLaneTy NextLaneKind;
3078      unsigned NextLaneIndex;
3079      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3080        return MatchOperand_ParseFail;
3081      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3082        Error(EndLoc, "mismatched lane index in register list");
3083        return MatchOperand_ParseFail;
3084      }
3085      EndLoc = Parser.getTok().getLoc();
3086
3087      // Add all the registers in the range to the register list.
3088      Count += EndReg - Reg;
3089      Reg = EndReg;
3090      continue;
3091    }
3092    Parser.Lex(); // Eat the comma.
3093    RegLoc = Parser.getTok().getLoc();
3094    int OldReg = Reg;
3095    Reg = tryParseRegister();
3096    if (Reg == -1) {
3097      Error(RegLoc, "register expected");
3098      return MatchOperand_ParseFail;
3099    }
3100    // vector register lists must be contiguous.
3101    // It's OK to use the enumeration values directly here rather, as the
3102    // VFP register classes have the enum sorted properly.
3103    //
3104    // The list is of D registers, but we also allow Q regs and just interpret
3105    // them as the two D sub-registers.
3106    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3107      if (!Spacing)
3108        Spacing = 1; // Register range implies a single spaced list.
3109      else if (Spacing == 2) {
3110        Error(RegLoc,
3111              "invalid register in double-spaced list (must be 'D' register')");
3112        return MatchOperand_ParseFail;
3113      }
3114      Reg = getDRegFromQReg(Reg);
3115      if (Reg != OldReg + 1) {
3116        Error(RegLoc, "non-contiguous register range");
3117        return MatchOperand_ParseFail;
3118      }
3119      ++Reg;
3120      Count += 2;
3121      // Parse the lane specifier if present.
3122      VectorLaneTy NextLaneKind;
3123      unsigned NextLaneIndex;
3124      SMLoc EndLoc = Parser.getTok().getLoc();
3125      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3126        return MatchOperand_ParseFail;
3127      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3128        Error(EndLoc, "mismatched lane index in register list");
3129        return MatchOperand_ParseFail;
3130      }
3131      continue;
3132    }
3133    // Normal D register.
3134    // Figure out the register spacing (single or double) of the list if
3135    // we don't know it already.
3136    if (!Spacing)
3137      Spacing = 1 + (Reg == OldReg + 2);
3138
3139    // Just check that it's contiguous and keep going.
3140    if (Reg != OldReg + Spacing) {
3141      Error(RegLoc, "non-contiguous register range");
3142      return MatchOperand_ParseFail;
3143    }
3144    ++Count;
3145    // Parse the lane specifier if present.
3146    VectorLaneTy NextLaneKind;
3147    unsigned NextLaneIndex;
3148    SMLoc EndLoc = Parser.getTok().getLoc();
3149    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3150      return MatchOperand_ParseFail;
3151    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3152      Error(EndLoc, "mismatched lane index in register list");
3153      return MatchOperand_ParseFail;
3154    }
3155  }
3156
3157  SMLoc E = Parser.getTok().getLoc();
3158  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3159    Error(E, "'}' expected");
3160    return MatchOperand_ParseFail;
3161  }
3162  Parser.Lex(); // Eat '}' token.
3163
3164  switch (LaneKind) {
3165  case NoLanes:
3166    // Two-register operands have been converted to the
3167    // composite register classes.
3168    if (Count == 2) {
3169      const MCRegisterClass *RC = (Spacing == 1) ?
3170        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3171        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3172      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3173    }
3174
3175    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3176                                                    (Spacing == 2), S, E));
3177    break;
3178  case AllLanes:
3179    // Two-register operands have been converted to the
3180    // composite register classes.
3181    if (Count == 2) {
3182      const MCRegisterClass *RC = (Spacing == 1) ?
3183        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3184        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3185      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3186    }
3187    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3188                                                            (Spacing == 2),
3189                                                            S, E));
3190    break;
3191  case IndexedLane:
3192    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3193                                                           LaneIndex,
3194                                                           (Spacing == 2),
3195                                                           S, E));
3196    break;
3197  }
3198  return MatchOperand_Success;
3199}
3200
3201/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3202ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3203parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3204  SMLoc S = Parser.getTok().getLoc();
3205  const AsmToken &Tok = Parser.getTok();
3206  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3207  StringRef OptStr = Tok.getString();
3208
3209  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3210    .Case("sy",    ARM_MB::SY)
3211    .Case("st",    ARM_MB::ST)
3212    .Case("sh",    ARM_MB::ISH)
3213    .Case("ish",   ARM_MB::ISH)
3214    .Case("shst",  ARM_MB::ISHST)
3215    .Case("ishst", ARM_MB::ISHST)
3216    .Case("nsh",   ARM_MB::NSH)
3217    .Case("un",    ARM_MB::NSH)
3218    .Case("nshst", ARM_MB::NSHST)
3219    .Case("unst",  ARM_MB::NSHST)
3220    .Case("osh",   ARM_MB::OSH)
3221    .Case("oshst", ARM_MB::OSHST)
3222    .Default(~0U);
3223
3224  if (Opt == ~0U)
3225    return MatchOperand_NoMatch;
3226
3227  Parser.Lex(); // Eat identifier token.
3228  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3229  return MatchOperand_Success;
3230}
3231
3232/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3233ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3234parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3235  SMLoc S = Parser.getTok().getLoc();
3236  const AsmToken &Tok = Parser.getTok();
3237  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3238  StringRef IFlagsStr = Tok.getString();
3239
3240  // An iflags string of "none" is interpreted to mean that none of the AIF
3241  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3242  unsigned IFlags = 0;
3243  if (IFlagsStr != "none") {
3244        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3245      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3246        .Case("a", ARM_PROC::A)
3247        .Case("i", ARM_PROC::I)
3248        .Case("f", ARM_PROC::F)
3249        .Default(~0U);
3250
3251      // If some specific iflag is already set, it means that some letter is
3252      // present more than once, this is not acceptable.
3253      if (Flag == ~0U || (IFlags & Flag))
3254        return MatchOperand_NoMatch;
3255
3256      IFlags |= Flag;
3257    }
3258  }
3259
3260  Parser.Lex(); // Eat identifier token.
3261  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3262  return MatchOperand_Success;
3263}
3264
3265/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3266ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3267parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3268  SMLoc S = Parser.getTok().getLoc();
3269  const AsmToken &Tok = Parser.getTok();
3270  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3271  StringRef Mask = Tok.getString();
3272
3273  if (isMClass()) {
3274    // See ARMv6-M 10.1.1
3275    std::string Name = Mask.lower();
3276    unsigned FlagsVal = StringSwitch<unsigned>(Name)
3277      .Case("apsr", 0)
3278      .Case("iapsr", 1)
3279      .Case("eapsr", 2)
3280      .Case("xpsr", 3)
3281      .Case("ipsr", 5)
3282      .Case("epsr", 6)
3283      .Case("iepsr", 7)
3284      .Case("msp", 8)
3285      .Case("psp", 9)
3286      .Case("primask", 16)
3287      .Case("basepri", 17)
3288      .Case("basepri_max", 18)
3289      .Case("faultmask", 19)
3290      .Case("control", 20)
3291      .Default(~0U);
3292
3293    if (FlagsVal == ~0U)
3294      return MatchOperand_NoMatch;
3295
3296    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3297      // basepri, basepri_max and faultmask only valid for V7m.
3298      return MatchOperand_NoMatch;
3299
3300    Parser.Lex(); // Eat identifier token.
3301    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3302    return MatchOperand_Success;
3303  }
3304
3305  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3306  size_t Start = 0, Next = Mask.find('_');
3307  StringRef Flags = "";
3308  std::string SpecReg = Mask.slice(Start, Next).lower();
3309  if (Next != StringRef::npos)
3310    Flags = Mask.slice(Next+1, Mask.size());
3311
3312  // FlagsVal contains the complete mask:
3313  // 3-0: Mask
3314  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3315  unsigned FlagsVal = 0;
3316
3317  if (SpecReg == "apsr") {
3318    FlagsVal = StringSwitch<unsigned>(Flags)
3319    .Case("nzcvq",  0x8) // same as CPSR_f
3320    .Case("g",      0x4) // same as CPSR_s
3321    .Case("nzcvqg", 0xc) // same as CPSR_fs
3322    .Default(~0U);
3323
3324    if (FlagsVal == ~0U) {
3325      if (!Flags.empty())
3326        return MatchOperand_NoMatch;
3327      else
3328        FlagsVal = 8; // No flag
3329    }
3330  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3331    // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
3332    if (Flags == "all" || Flags == "")
3333      Flags = "fc";
3334    for (int i = 0, e = Flags.size(); i != e; ++i) {
3335      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3336      .Case("c", 1)
3337      .Case("x", 2)
3338      .Case("s", 4)
3339      .Case("f", 8)
3340      .Default(~0U);
3341
3342      // If some specific flag is already set, it means that some letter is
3343      // present more than once, this is not acceptable.
3344      if (FlagsVal == ~0U || (FlagsVal & Flag))
3345        return MatchOperand_NoMatch;
3346      FlagsVal |= Flag;
3347    }
3348  } else // No match for special register.
3349    return MatchOperand_NoMatch;
3350
3351  // Special register without flags is NOT equivalent to "fc" flags.
3352  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3353  // two lines would enable gas compatibility at the expense of breaking
3354  // round-tripping.
3355  //
3356  // if (!FlagsVal)
3357  //  FlagsVal = 0x9;
3358
3359  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3360  if (SpecReg == "spsr")
3361    FlagsVal |= 16;
3362
3363  Parser.Lex(); // Eat identifier token.
3364  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3365  return MatchOperand_Success;
3366}
3367
3368ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3369parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3370            int Low, int High) {
3371  const AsmToken &Tok = Parser.getTok();
3372  if (Tok.isNot(AsmToken::Identifier)) {
3373    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3374    return MatchOperand_ParseFail;
3375  }
3376  StringRef ShiftName = Tok.getString();
3377  std::string LowerOp = Op.lower();
3378  std::string UpperOp = Op.upper();
3379  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3380    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3381    return MatchOperand_ParseFail;
3382  }
3383  Parser.Lex(); // Eat shift type token.
3384
3385  // There must be a '#' and a shift amount.
3386  if (Parser.getTok().isNot(AsmToken::Hash) &&
3387      Parser.getTok().isNot(AsmToken::Dollar)) {
3388    Error(Parser.getTok().getLoc(), "'#' expected");
3389    return MatchOperand_ParseFail;
3390  }
3391  Parser.Lex(); // Eat hash token.
3392
3393  const MCExpr *ShiftAmount;
3394  SMLoc Loc = Parser.getTok().getLoc();
3395  if (getParser().ParseExpression(ShiftAmount)) {
3396    Error(Loc, "illegal expression");
3397    return MatchOperand_ParseFail;
3398  }
3399  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3400  if (!CE) {
3401    Error(Loc, "constant expression expected");
3402    return MatchOperand_ParseFail;
3403  }
3404  int Val = CE->getValue();
3405  if (Val < Low || Val > High) {
3406    Error(Loc, "immediate value out of range");
3407    return MatchOperand_ParseFail;
3408  }
3409
3410  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3411
3412  return MatchOperand_Success;
3413}
3414
3415ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3416parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3417  const AsmToken &Tok = Parser.getTok();
3418  SMLoc S = Tok.getLoc();
3419  if (Tok.isNot(AsmToken::Identifier)) {
3420    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3421    return MatchOperand_ParseFail;
3422  }
3423  int Val = StringSwitch<int>(Tok.getString())
3424    .Case("be", 1)
3425    .Case("le", 0)
3426    .Default(-1);
3427  Parser.Lex(); // Eat the token.
3428
3429  if (Val == -1) {
3430    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3431    return MatchOperand_ParseFail;
3432  }
3433  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3434                                                                  getContext()),
3435                                           S, Parser.getTok().getLoc()));
3436  return MatchOperand_Success;
3437}
3438
3439/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3440/// instructions. Legal values are:
3441///     lsl #n  'n' in [0,31]
3442///     asr #n  'n' in [1,32]
3443///             n == 32 encoded as n == 0.
3444ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3445parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3446  const AsmToken &Tok = Parser.getTok();
3447  SMLoc S = Tok.getLoc();
3448  if (Tok.isNot(AsmToken::Identifier)) {
3449    Error(S, "shift operator 'asr' or 'lsl' expected");
3450    return MatchOperand_ParseFail;
3451  }
3452  StringRef ShiftName = Tok.getString();
3453  bool isASR;
3454  if (ShiftName == "lsl" || ShiftName == "LSL")
3455    isASR = false;
3456  else if (ShiftName == "asr" || ShiftName == "ASR")
3457    isASR = true;
3458  else {
3459    Error(S, "shift operator 'asr' or 'lsl' expected");
3460    return MatchOperand_ParseFail;
3461  }
3462  Parser.Lex(); // Eat the operator.
3463
3464  // A '#' and a shift amount.
3465  if (Parser.getTok().isNot(AsmToken::Hash) &&
3466      Parser.getTok().isNot(AsmToken::Dollar)) {
3467    Error(Parser.getTok().getLoc(), "'#' expected");
3468    return MatchOperand_ParseFail;
3469  }
3470  Parser.Lex(); // Eat hash token.
3471
3472  const MCExpr *ShiftAmount;
3473  SMLoc E = Parser.getTok().getLoc();
3474  if (getParser().ParseExpression(ShiftAmount)) {
3475    Error(E, "malformed shift expression");
3476    return MatchOperand_ParseFail;
3477  }
3478  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3479  if (!CE) {
3480    Error(E, "shift amount must be an immediate");
3481    return MatchOperand_ParseFail;
3482  }
3483
3484  int64_t Val = CE->getValue();
3485  if (isASR) {
3486    // Shift amount must be in [1,32]
3487    if (Val < 1 || Val > 32) {
3488      Error(E, "'asr' shift amount must be in range [1,32]");
3489      return MatchOperand_ParseFail;
3490    }
3491    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3492    if (isThumb() && Val == 32) {
3493      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3494      return MatchOperand_ParseFail;
3495    }
3496    if (Val == 32) Val = 0;
3497  } else {
3498    // Shift amount must be in [1,32]
3499    if (Val < 0 || Val > 31) {
3500      Error(E, "'lsr' shift amount must be in range [0,31]");
3501      return MatchOperand_ParseFail;
3502    }
3503  }
3504
3505  E = Parser.getTok().getLoc();
3506  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3507
3508  return MatchOperand_Success;
3509}
3510
3511/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3512/// of instructions. Legal values are:
3513///     ror #n  'n' in {0, 8, 16, 24}
3514ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3515parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3516  const AsmToken &Tok = Parser.getTok();
3517  SMLoc S = Tok.getLoc();
3518  if (Tok.isNot(AsmToken::Identifier))
3519    return MatchOperand_NoMatch;
3520  StringRef ShiftName = Tok.getString();
3521  if (ShiftName != "ror" && ShiftName != "ROR")
3522    return MatchOperand_NoMatch;
3523  Parser.Lex(); // Eat the operator.
3524
3525  // A '#' and a rotate amount.
3526  if (Parser.getTok().isNot(AsmToken::Hash) &&
3527      Parser.getTok().isNot(AsmToken::Dollar)) {
3528    Error(Parser.getTok().getLoc(), "'#' expected");
3529    return MatchOperand_ParseFail;
3530  }
3531  Parser.Lex(); // Eat hash token.
3532
3533  const MCExpr *ShiftAmount;
3534  SMLoc E = Parser.getTok().getLoc();
3535  if (getParser().ParseExpression(ShiftAmount)) {
3536    Error(E, "malformed rotate expression");
3537    return MatchOperand_ParseFail;
3538  }
3539  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3540  if (!CE) {
3541    Error(E, "rotate amount must be an immediate");
3542    return MatchOperand_ParseFail;
3543  }
3544
3545  int64_t Val = CE->getValue();
3546  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3547  // normally, zero is represented in asm by omitting the rotate operand
3548  // entirely.
3549  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3550    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3551    return MatchOperand_ParseFail;
3552  }
3553
3554  E = Parser.getTok().getLoc();
3555  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3556
3557  return MatchOperand_Success;
3558}
3559
3560ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3561parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3562  SMLoc S = Parser.getTok().getLoc();
3563  // The bitfield descriptor is really two operands, the LSB and the width.
3564  if (Parser.getTok().isNot(AsmToken::Hash) &&
3565      Parser.getTok().isNot(AsmToken::Dollar)) {
3566    Error(Parser.getTok().getLoc(), "'#' expected");
3567    return MatchOperand_ParseFail;
3568  }
3569  Parser.Lex(); // Eat hash token.
3570
3571  const MCExpr *LSBExpr;
3572  SMLoc E = Parser.getTok().getLoc();
3573  if (getParser().ParseExpression(LSBExpr)) {
3574    Error(E, "malformed immediate expression");
3575    return MatchOperand_ParseFail;
3576  }
3577  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3578  if (!CE) {
3579    Error(E, "'lsb' operand must be an immediate");
3580    return MatchOperand_ParseFail;
3581  }
3582
3583  int64_t LSB = CE->getValue();
3584  // The LSB must be in the range [0,31]
3585  if (LSB < 0 || LSB > 31) {
3586    Error(E, "'lsb' operand must be in the range [0,31]");
3587    return MatchOperand_ParseFail;
3588  }
3589  E = Parser.getTok().getLoc();
3590
3591  // Expect another immediate operand.
3592  if (Parser.getTok().isNot(AsmToken::Comma)) {
3593    Error(Parser.getTok().getLoc(), "too few operands");
3594    return MatchOperand_ParseFail;
3595  }
3596  Parser.Lex(); // Eat hash token.
3597  if (Parser.getTok().isNot(AsmToken::Hash) &&
3598      Parser.getTok().isNot(AsmToken::Dollar)) {
3599    Error(Parser.getTok().getLoc(), "'#' expected");
3600    return MatchOperand_ParseFail;
3601  }
3602  Parser.Lex(); // Eat hash token.
3603
3604  const MCExpr *WidthExpr;
3605  if (getParser().ParseExpression(WidthExpr)) {
3606    Error(E, "malformed immediate expression");
3607    return MatchOperand_ParseFail;
3608  }
3609  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3610  if (!CE) {
3611    Error(E, "'width' operand must be an immediate");
3612    return MatchOperand_ParseFail;
3613  }
3614
3615  int64_t Width = CE->getValue();
3616  // The LSB must be in the range [1,32-lsb]
3617  if (Width < 1 || Width > 32 - LSB) {
3618    Error(E, "'width' operand must be in the range [1,32-lsb]");
3619    return MatchOperand_ParseFail;
3620  }
3621  E = Parser.getTok().getLoc();
3622
3623  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3624
3625  return MatchOperand_Success;
3626}
3627
3628ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3629parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3630  // Check for a post-index addressing register operand. Specifically:
3631  // postidx_reg := '+' register {, shift}
3632  //              | '-' register {, shift}
3633  //              | register {, shift}
3634
3635  // This method must return MatchOperand_NoMatch without consuming any tokens
3636  // in the case where there is no match, as other alternatives take other
3637  // parse methods.
3638  AsmToken Tok = Parser.getTok();
3639  SMLoc S = Tok.getLoc();
3640  bool haveEaten = false;
3641  bool isAdd = true;
3642  int Reg = -1;
3643  if (Tok.is(AsmToken::Plus)) {
3644    Parser.Lex(); // Eat the '+' token.
3645    haveEaten = true;
3646  } else if (Tok.is(AsmToken::Minus)) {
3647    Parser.Lex(); // Eat the '-' token.
3648    isAdd = false;
3649    haveEaten = true;
3650  }
3651  if (Parser.getTok().is(AsmToken::Identifier))
3652    Reg = tryParseRegister();
3653  if (Reg == -1) {
3654    if (!haveEaten)
3655      return MatchOperand_NoMatch;
3656    Error(Parser.getTok().getLoc(), "register expected");
3657    return MatchOperand_ParseFail;
3658  }
3659  SMLoc E = Parser.getTok().getLoc();
3660
3661  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3662  unsigned ShiftImm = 0;
3663  if (Parser.getTok().is(AsmToken::Comma)) {
3664    Parser.Lex(); // Eat the ','.
3665    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3666      return MatchOperand_ParseFail;
3667  }
3668
3669  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3670                                                  ShiftImm, S, E));
3671
3672  return MatchOperand_Success;
3673}
3674
3675ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3676parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3677  // Check for a post-index addressing register operand. Specifically:
3678  // am3offset := '+' register
3679  //              | '-' register
3680  //              | register
3681  //              | # imm
3682  //              | # + imm
3683  //              | # - imm
3684
3685  // This method must return MatchOperand_NoMatch without consuming any tokens
3686  // in the case where there is no match, as other alternatives take other
3687  // parse methods.
3688  AsmToken Tok = Parser.getTok();
3689  SMLoc S = Tok.getLoc();
3690
3691  // Do immediates first, as we always parse those if we have a '#'.
3692  if (Parser.getTok().is(AsmToken::Hash) ||
3693      Parser.getTok().is(AsmToken::Dollar)) {
3694    Parser.Lex(); // Eat the '#'.
3695    // Explicitly look for a '-', as we need to encode negative zero
3696    // differently.
3697    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3698    const MCExpr *Offset;
3699    if (getParser().ParseExpression(Offset))
3700      return MatchOperand_ParseFail;
3701    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3702    if (!CE) {
3703      Error(S, "constant expression expected");
3704      return MatchOperand_ParseFail;
3705    }
3706    SMLoc E = Tok.getLoc();
3707    // Negative zero is encoded as the flag value INT32_MIN.
3708    int32_t Val = CE->getValue();
3709    if (isNegative && Val == 0)
3710      Val = INT32_MIN;
3711
3712    Operands.push_back(
3713      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3714
3715    return MatchOperand_Success;
3716  }
3717
3718
3719  bool haveEaten = false;
3720  bool isAdd = true;
3721  int Reg = -1;
3722  if (Tok.is(AsmToken::Plus)) {
3723    Parser.Lex(); // Eat the '+' token.
3724    haveEaten = true;
3725  } else if (Tok.is(AsmToken::Minus)) {
3726    Parser.Lex(); // Eat the '-' token.
3727    isAdd = false;
3728    haveEaten = true;
3729  }
3730  if (Parser.getTok().is(AsmToken::Identifier))
3731    Reg = tryParseRegister();
3732  if (Reg == -1) {
3733    if (!haveEaten)
3734      return MatchOperand_NoMatch;
3735    Error(Parser.getTok().getLoc(), "register expected");
3736    return MatchOperand_ParseFail;
3737  }
3738  SMLoc E = Parser.getTok().getLoc();
3739
3740  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3741                                                  0, S, E));
3742
3743  return MatchOperand_Success;
3744}
3745
3746/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3747/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3748/// when they refer multiple MIOperands inside a single one.
3749bool ARMAsmParser::
3750cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3751             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3752  // Rt, Rt2
3753  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3754  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3755  // Create a writeback register dummy placeholder.
3756  Inst.addOperand(MCOperand::CreateReg(0));
3757  // addr
3758  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3759  // pred
3760  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3761  return true;
3762}
3763
3764/// cvtT2StrdPre - Convert parsed operands to MCInst.
3765/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3766/// when they refer multiple MIOperands inside a single one.
3767bool ARMAsmParser::
3768cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3769             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3770  // Create a writeback register dummy placeholder.
3771  Inst.addOperand(MCOperand::CreateReg(0));
3772  // Rt, Rt2
3773  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3774  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3775  // addr
3776  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3777  // pred
3778  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3779  return true;
3780}
3781
3782/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3783/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3784/// when they refer multiple MIOperands inside a single one.
3785bool ARMAsmParser::
3786cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3787                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3788  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3789
3790  // Create a writeback register dummy placeholder.
3791  Inst.addOperand(MCOperand::CreateImm(0));
3792
3793  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3794  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3795  return true;
3796}
3797
3798/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3799/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3800/// when they refer multiple MIOperands inside a single one.
3801bool ARMAsmParser::
3802cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3803                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3804  // Create a writeback register dummy placeholder.
3805  Inst.addOperand(MCOperand::CreateImm(0));
3806  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3807  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3808  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3809  return true;
3810}
3811
3812/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3813/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3814/// when they refer multiple MIOperands inside a single one.
3815bool ARMAsmParser::
3816cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3817                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3818  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3819
3820  // Create a writeback register dummy placeholder.
3821  Inst.addOperand(MCOperand::CreateImm(0));
3822
3823  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3824  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3825  return true;
3826}
3827
3828/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3829/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3830/// when they refer multiple MIOperands inside a single one.
3831bool ARMAsmParser::
3832cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3833                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3834  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3835
3836  // Create a writeback register dummy placeholder.
3837  Inst.addOperand(MCOperand::CreateImm(0));
3838
3839  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3840  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3841  return true;
3842}
3843
3844
3845/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3846/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3847/// when they refer multiple MIOperands inside a single one.
3848bool ARMAsmParser::
3849cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3850                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3851  // Create a writeback register dummy placeholder.
3852  Inst.addOperand(MCOperand::CreateImm(0));
3853  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3854  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3855  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3856  return true;
3857}
3858
3859/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3860/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3861/// when they refer multiple MIOperands inside a single one.
3862bool ARMAsmParser::
3863cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3864                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3865  // Create a writeback register dummy placeholder.
3866  Inst.addOperand(MCOperand::CreateImm(0));
3867  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3868  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3869  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3870  return true;
3871}
3872
3873/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3874/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3875/// when they refer multiple MIOperands inside a single one.
3876bool ARMAsmParser::
3877cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3878                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3879  // Create a writeback register dummy placeholder.
3880  Inst.addOperand(MCOperand::CreateImm(0));
3881  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3882  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3883  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3884  return true;
3885}
3886
3887/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3888/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3889/// when they refer multiple MIOperands inside a single one.
3890bool ARMAsmParser::
3891cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3892                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3893  // Rt
3894  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3895  // Create a writeback register dummy placeholder.
3896  Inst.addOperand(MCOperand::CreateImm(0));
3897  // addr
3898  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3899  // offset
3900  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3901  // pred
3902  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3903  return true;
3904}
3905
3906/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3907/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3908/// when they refer multiple MIOperands inside a single one.
3909bool ARMAsmParser::
3910cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3911                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3912  // Rt
3913  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3914  // Create a writeback register dummy placeholder.
3915  Inst.addOperand(MCOperand::CreateImm(0));
3916  // addr
3917  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3918  // offset
3919  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3920  // pred
3921  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3922  return true;
3923}
3924
3925/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3926/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3927/// when they refer multiple MIOperands inside a single one.
3928bool ARMAsmParser::
3929cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3930                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3931  // Create a writeback register dummy placeholder.
3932  Inst.addOperand(MCOperand::CreateImm(0));
3933  // Rt
3934  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3935  // addr
3936  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3937  // offset
3938  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3939  // pred
3940  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3941  return true;
3942}
3943
3944/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3945/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3946/// when they refer multiple MIOperands inside a single one.
3947bool ARMAsmParser::
3948cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3949                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3950  // Create a writeback register dummy placeholder.
3951  Inst.addOperand(MCOperand::CreateImm(0));
3952  // Rt
3953  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3954  // addr
3955  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3956  // offset
3957  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3958  // pred
3959  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3960  return true;
3961}
3962
3963/// cvtLdrdPre - Convert parsed operands to MCInst.
3964/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3965/// when they refer multiple MIOperands inside a single one.
3966bool ARMAsmParser::
3967cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3968           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3969  // Rt, Rt2
3970  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3971  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3972  // Create a writeback register dummy placeholder.
3973  Inst.addOperand(MCOperand::CreateImm(0));
3974  // addr
3975  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3976  // pred
3977  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3978  return true;
3979}
3980
3981/// cvtStrdPre - Convert parsed operands to MCInst.
3982/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3983/// when they refer multiple MIOperands inside a single one.
3984bool ARMAsmParser::
3985cvtStrdPre(MCInst &Inst, unsigned Opcode,
3986           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3987  // Create a writeback register dummy placeholder.
3988  Inst.addOperand(MCOperand::CreateImm(0));
3989  // Rt, Rt2
3990  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3991  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3992  // addr
3993  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3994  // pred
3995  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3996  return true;
3997}
3998
3999/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4000/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4001/// when they refer multiple MIOperands inside a single one.
4002bool ARMAsmParser::
4003cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
4004                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4005  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4006  // Create a writeback register dummy placeholder.
4007  Inst.addOperand(MCOperand::CreateImm(0));
4008  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4009  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4010  return true;
4011}
4012
4013/// cvtThumbMultiple- Convert parsed operands to MCInst.
4014/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4015/// when they refer multiple MIOperands inside a single one.
4016bool ARMAsmParser::
4017cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
4018           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4019  // The second source operand must be the same register as the destination
4020  // operand.
4021  if (Operands.size() == 6 &&
4022      (((ARMOperand*)Operands[3])->getReg() !=
4023       ((ARMOperand*)Operands[5])->getReg()) &&
4024      (((ARMOperand*)Operands[3])->getReg() !=
4025       ((ARMOperand*)Operands[4])->getReg())) {
4026    Error(Operands[3]->getStartLoc(),
4027          "destination register must match source register");
4028    return false;
4029  }
4030  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4031  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4032  // If we have a three-operand form, make sure to set Rn to be the operand
4033  // that isn't the same as Rd.
4034  unsigned RegOp = 4;
4035  if (Operands.size() == 6 &&
4036      ((ARMOperand*)Operands[4])->getReg() ==
4037        ((ARMOperand*)Operands[3])->getReg())
4038    RegOp = 5;
4039  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4040  Inst.addOperand(Inst.getOperand(0));
4041  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4042
4043  return true;
4044}
4045
4046bool ARMAsmParser::
4047cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
4048              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4049  // Vd
4050  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4051  // Create a writeback register dummy placeholder.
4052  Inst.addOperand(MCOperand::CreateImm(0));
4053  // Vn
4054  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4055  // pred
4056  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4057  return true;
4058}
4059
4060bool ARMAsmParser::
4061cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
4062                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4063  // Vd
4064  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4065  // Create a writeback register dummy placeholder.
4066  Inst.addOperand(MCOperand::CreateImm(0));
4067  // Vn
4068  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4069  // Vm
4070  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4071  // pred
4072  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4073  return true;
4074}
4075
4076bool ARMAsmParser::
4077cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
4078              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4079  // Create a writeback register dummy placeholder.
4080  Inst.addOperand(MCOperand::CreateImm(0));
4081  // Vn
4082  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4083  // Vt
4084  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4085  // pred
4086  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4087  return true;
4088}
4089
4090bool ARMAsmParser::
4091cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
4092                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4093  // Create a writeback register dummy placeholder.
4094  Inst.addOperand(MCOperand::CreateImm(0));
4095  // Vn
4096  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4097  // Vm
4098  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4099  // Vt
4100  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4101  // pred
4102  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4103  return true;
4104}
4105
4106/// Parse an ARM memory expression, return false if successful else return true
4107/// or an error.  The first token must be a '[' when called.
4108bool ARMAsmParser::
4109parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4110  SMLoc S, E;
4111  assert(Parser.getTok().is(AsmToken::LBrac) &&
4112         "Token is not a Left Bracket");
4113  S = Parser.getTok().getLoc();
4114  Parser.Lex(); // Eat left bracket token.
4115
4116  const AsmToken &BaseRegTok = Parser.getTok();
4117  int BaseRegNum = tryParseRegister();
4118  if (BaseRegNum == -1)
4119    return Error(BaseRegTok.getLoc(), "register expected");
4120
4121  // The next token must either be a comma or a closing bracket.
4122  const AsmToken &Tok = Parser.getTok();
4123  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4124    return Error(Tok.getLoc(), "malformed memory operand");
4125
4126  if (Tok.is(AsmToken::RBrac)) {
4127    E = Tok.getLoc();
4128    Parser.Lex(); // Eat right bracket token.
4129
4130    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4131                                             0, 0, false, S, E));
4132
4133    // If there's a pre-indexing writeback marker, '!', just add it as a token
4134    // operand. It's rather odd, but syntactically valid.
4135    if (Parser.getTok().is(AsmToken::Exclaim)) {
4136      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4137      Parser.Lex(); // Eat the '!'.
4138    }
4139
4140    return false;
4141  }
4142
4143  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4144  Parser.Lex(); // Eat the comma.
4145
4146  // If we have a ':', it's an alignment specifier.
4147  if (Parser.getTok().is(AsmToken::Colon)) {
4148    Parser.Lex(); // Eat the ':'.
4149    E = Parser.getTok().getLoc();
4150
4151    const MCExpr *Expr;
4152    if (getParser().ParseExpression(Expr))
4153     return true;
4154
4155    // The expression has to be a constant. Memory references with relocations
4156    // don't come through here, as they use the <label> forms of the relevant
4157    // instructions.
4158    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4159    if (!CE)
4160      return Error (E, "constant expression expected");
4161
4162    unsigned Align = 0;
4163    switch (CE->getValue()) {
4164    default:
4165      return Error(E,
4166                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4167    case 16:  Align = 2; break;
4168    case 32:  Align = 4; break;
4169    case 64:  Align = 8; break;
4170    case 128: Align = 16; break;
4171    case 256: Align = 32; break;
4172    }
4173
4174    // Now we should have the closing ']'
4175    E = Parser.getTok().getLoc();
4176    if (Parser.getTok().isNot(AsmToken::RBrac))
4177      return Error(E, "']' expected");
4178    Parser.Lex(); // Eat right bracket token.
4179
4180    // Don't worry about range checking the value here. That's handled by
4181    // the is*() predicates.
4182    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4183                                             ARM_AM::no_shift, 0, Align,
4184                                             false, S, E));
4185
4186    // If there's a pre-indexing writeback marker, '!', just add it as a token
4187    // operand.
4188    if (Parser.getTok().is(AsmToken::Exclaim)) {
4189      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4190      Parser.Lex(); // Eat the '!'.
4191    }
4192
4193    return false;
4194  }
4195
4196  // If we have a '#', it's an immediate offset, else assume it's a register
4197  // offset. Be friendly and also accept a plain integer (without a leading
4198  // hash) for gas compatibility.
4199  if (Parser.getTok().is(AsmToken::Hash) ||
4200      Parser.getTok().is(AsmToken::Dollar) ||
4201      Parser.getTok().is(AsmToken::Integer)) {
4202    if (Parser.getTok().isNot(AsmToken::Integer))
4203      Parser.Lex(); // Eat the '#'.
4204    E = Parser.getTok().getLoc();
4205
4206    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4207    const MCExpr *Offset;
4208    if (getParser().ParseExpression(Offset))
4209     return true;
4210
4211    // The expression has to be a constant. Memory references with relocations
4212    // don't come through here, as they use the <label> forms of the relevant
4213    // instructions.
4214    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4215    if (!CE)
4216      return Error (E, "constant expression expected");
4217
4218    // If the constant was #-0, represent it as INT32_MIN.
4219    int32_t Val = CE->getValue();
4220    if (isNegative && Val == 0)
4221      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4222
4223    // Now we should have the closing ']'
4224    E = Parser.getTok().getLoc();
4225    if (Parser.getTok().isNot(AsmToken::RBrac))
4226      return Error(E, "']' expected");
4227    Parser.Lex(); // Eat right bracket token.
4228
4229    // Don't worry about range checking the value here. That's handled by
4230    // the is*() predicates.
4231    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4232                                             ARM_AM::no_shift, 0, 0,
4233                                             false, S, E));
4234
4235    // If there's a pre-indexing writeback marker, '!', just add it as a token
4236    // operand.
4237    if (Parser.getTok().is(AsmToken::Exclaim)) {
4238      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4239      Parser.Lex(); // Eat the '!'.
4240    }
4241
4242    return false;
4243  }
4244
4245  // The register offset is optionally preceded by a '+' or '-'
4246  bool isNegative = false;
4247  if (Parser.getTok().is(AsmToken::Minus)) {
4248    isNegative = true;
4249    Parser.Lex(); // Eat the '-'.
4250  } else if (Parser.getTok().is(AsmToken::Plus)) {
4251    // Nothing to do.
4252    Parser.Lex(); // Eat the '+'.
4253  }
4254
4255  E = Parser.getTok().getLoc();
4256  int OffsetRegNum = tryParseRegister();
4257  if (OffsetRegNum == -1)
4258    return Error(E, "register expected");
4259
4260  // If there's a shift operator, handle it.
4261  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4262  unsigned ShiftImm = 0;
4263  if (Parser.getTok().is(AsmToken::Comma)) {
4264    Parser.Lex(); // Eat the ','.
4265    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4266      return true;
4267  }
4268
4269  // Now we should have the closing ']'
4270  E = Parser.getTok().getLoc();
4271  if (Parser.getTok().isNot(AsmToken::RBrac))
4272    return Error(E, "']' expected");
4273  Parser.Lex(); // Eat right bracket token.
4274
4275  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4276                                           ShiftType, ShiftImm, 0, isNegative,
4277                                           S, E));
4278
4279  // If there's a pre-indexing writeback marker, '!', just add it as a token
4280  // operand.
4281  if (Parser.getTok().is(AsmToken::Exclaim)) {
4282    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4283    Parser.Lex(); // Eat the '!'.
4284  }
4285
4286  return false;
4287}
4288
4289/// parseMemRegOffsetShift - one of these two:
4290///   ( lsl | lsr | asr | ror ) , # shift_amount
4291///   rrx
4292/// return true if it parses a shift otherwise it returns false.
4293bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4294                                          unsigned &Amount) {
4295  SMLoc Loc = Parser.getTok().getLoc();
4296  const AsmToken &Tok = Parser.getTok();
4297  if (Tok.isNot(AsmToken::Identifier))
4298    return true;
4299  StringRef ShiftName = Tok.getString();
4300  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4301      ShiftName == "asl" || ShiftName == "ASL")
4302    St = ARM_AM::lsl;
4303  else if (ShiftName == "lsr" || ShiftName == "LSR")
4304    St = ARM_AM::lsr;
4305  else if (ShiftName == "asr" || ShiftName == "ASR")
4306    St = ARM_AM::asr;
4307  else if (ShiftName == "ror" || ShiftName == "ROR")
4308    St = ARM_AM::ror;
4309  else if (ShiftName == "rrx" || ShiftName == "RRX")
4310    St = ARM_AM::rrx;
4311  else
4312    return Error(Loc, "illegal shift operator");
4313  Parser.Lex(); // Eat shift type token.
4314
4315  // rrx stands alone.
4316  Amount = 0;
4317  if (St != ARM_AM::rrx) {
4318    Loc = Parser.getTok().getLoc();
4319    // A '#' and a shift amount.
4320    const AsmToken &HashTok = Parser.getTok();
4321    if (HashTok.isNot(AsmToken::Hash) &&
4322        HashTok.isNot(AsmToken::Dollar))
4323      return Error(HashTok.getLoc(), "'#' expected");
4324    Parser.Lex(); // Eat hash token.
4325
4326    const MCExpr *Expr;
4327    if (getParser().ParseExpression(Expr))
4328      return true;
4329    // Range check the immediate.
4330    // lsl, ror: 0 <= imm <= 31
4331    // lsr, asr: 0 <= imm <= 32
4332    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4333    if (!CE)
4334      return Error(Loc, "shift amount must be an immediate");
4335    int64_t Imm = CE->getValue();
4336    if (Imm < 0 ||
4337        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4338        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4339      return Error(Loc, "immediate shift value out of range");
4340    Amount = Imm;
4341  }
4342
4343  return false;
4344}
4345
4346/// parseFPImm - A floating point immediate expression operand.
4347ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4348parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4349  // Anything that can accept a floating point constant as an operand
4350  // needs to go through here, as the regular ParseExpression is
4351  // integer only.
4352  //
4353  // This routine still creates a generic Immediate operand, containing
4354  // a bitcast of the 64-bit floating point value. The various operands
4355  // that accept floats can check whether the value is valid for them
4356  // via the standard is*() predicates.
4357
4358  SMLoc S = Parser.getTok().getLoc();
4359
4360  if (Parser.getTok().isNot(AsmToken::Hash) &&
4361      Parser.getTok().isNot(AsmToken::Dollar))
4362    return MatchOperand_NoMatch;
4363
4364  // Disambiguate the VMOV forms that can accept an FP immediate.
4365  // vmov.f32 <sreg>, #imm
4366  // vmov.f64 <dreg>, #imm
4367  // vmov.f32 <dreg>, #imm  @ vector f32x2
4368  // vmov.f32 <qreg>, #imm  @ vector f32x4
4369  //
4370  // There are also the NEON VMOV instructions which expect an
4371  // integer constant. Make sure we don't try to parse an FPImm
4372  // for these:
4373  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4374  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4375  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4376                           TyOp->getToken() != ".f64"))
4377    return MatchOperand_NoMatch;
4378
4379  Parser.Lex(); // Eat the '#'.
4380
4381  // Handle negation, as that still comes through as a separate token.
4382  bool isNegative = false;
4383  if (Parser.getTok().is(AsmToken::Minus)) {
4384    isNegative = true;
4385    Parser.Lex();
4386  }
4387  const AsmToken &Tok = Parser.getTok();
4388  SMLoc Loc = Tok.getLoc();
4389  if (Tok.is(AsmToken::Real)) {
4390    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4391    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4392    // If we had a '-' in front, toggle the sign bit.
4393    IntVal ^= (uint64_t)isNegative << 31;
4394    Parser.Lex(); // Eat the token.
4395    Operands.push_back(ARMOperand::CreateImm(
4396          MCConstantExpr::Create(IntVal, getContext()),
4397          S, Parser.getTok().getLoc()));
4398    return MatchOperand_Success;
4399  }
4400  // Also handle plain integers. Instructions which allow floating point
4401  // immediates also allow a raw encoded 8-bit value.
4402  if (Tok.is(AsmToken::Integer)) {
4403    int64_t Val = Tok.getIntVal();
4404    Parser.Lex(); // Eat the token.
4405    if (Val > 255 || Val < 0) {
4406      Error(Loc, "encoded floating point value out of range");
4407      return MatchOperand_ParseFail;
4408    }
4409    double RealVal = ARM_AM::getFPImmFloat(Val);
4410    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4411    Operands.push_back(ARMOperand::CreateImm(
4412        MCConstantExpr::Create(Val, getContext()), S,
4413        Parser.getTok().getLoc()));
4414    return MatchOperand_Success;
4415  }
4416
4417  Error(Loc, "invalid floating point immediate");
4418  return MatchOperand_ParseFail;
4419}
4420
4421/// Parse a arm instruction operand.  For now this parses the operand regardless
4422/// of the mnemonic.
4423bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4424                                StringRef Mnemonic) {
4425  SMLoc S, E;
4426
4427  // Check if the current operand has a custom associated parser, if so, try to
4428  // custom parse the operand, or fallback to the general approach.
4429  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4430  if (ResTy == MatchOperand_Success)
4431    return false;
4432  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4433  // there was a match, but an error occurred, in which case, just return that
4434  // the operand parsing failed.
4435  if (ResTy == MatchOperand_ParseFail)
4436    return true;
4437
4438  switch (getLexer().getKind()) {
4439  default:
4440    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4441    return true;
4442  case AsmToken::Identifier: {
4443    if (!tryParseRegisterWithWriteBack(Operands))
4444      return false;
4445    int Res = tryParseShiftRegister(Operands);
4446    if (Res == 0) // success
4447      return false;
4448    else if (Res == -1) // irrecoverable error
4449      return true;
4450    // If this is VMRS, check for the apsr_nzcv operand.
4451    if (Mnemonic == "vmrs" &&
4452        Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4453      S = Parser.getTok().getLoc();
4454      Parser.Lex();
4455      Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4456      return false;
4457    }
4458
4459    // Fall though for the Identifier case that is not a register or a
4460    // special name.
4461  }
4462  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4463  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4464  case AsmToken::String:  // quoted label names.
4465  case AsmToken::Dot: {   // . as a branch target
4466    // This was not a register so parse other operands that start with an
4467    // identifier (like labels) as expressions and create them as immediates.
4468    const MCExpr *IdVal;
4469    S = Parser.getTok().getLoc();
4470    if (getParser().ParseExpression(IdVal))
4471      return true;
4472    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4473    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4474    return false;
4475  }
4476  case AsmToken::LBrac:
4477    return parseMemory(Operands);
4478  case AsmToken::LCurly:
4479    return parseRegisterList(Operands);
4480  case AsmToken::Dollar:
4481  case AsmToken::Hash: {
4482    // #42 -> immediate.
4483    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4484    S = Parser.getTok().getLoc();
4485    Parser.Lex();
4486    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4487    const MCExpr *ImmVal;
4488    if (getParser().ParseExpression(ImmVal))
4489      return true;
4490    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4491    if (CE) {
4492      int32_t Val = CE->getValue();
4493      if (isNegative && Val == 0)
4494        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4495    }
4496    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4497    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4498    return false;
4499  }
4500  case AsmToken::Colon: {
4501    // ":lower16:" and ":upper16:" expression prefixes
4502    // FIXME: Check it's an expression prefix,
4503    // e.g. (FOO - :lower16:BAR) isn't legal.
4504    ARMMCExpr::VariantKind RefKind;
4505    if (parsePrefix(RefKind))
4506      return true;
4507
4508    const MCExpr *SubExprVal;
4509    if (getParser().ParseExpression(SubExprVal))
4510      return true;
4511
4512    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4513                                                   getContext());
4514    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4515    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4516    return false;
4517  }
4518  }
4519}
4520
4521// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4522//  :lower16: and :upper16:.
4523bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4524  RefKind = ARMMCExpr::VK_ARM_None;
4525
4526  // :lower16: and :upper16: modifiers
4527  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4528  Parser.Lex(); // Eat ':'
4529
4530  if (getLexer().isNot(AsmToken::Identifier)) {
4531    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4532    return true;
4533  }
4534
4535  StringRef IDVal = Parser.getTok().getIdentifier();
4536  if (IDVal == "lower16") {
4537    RefKind = ARMMCExpr::VK_ARM_LO16;
4538  } else if (IDVal == "upper16") {
4539    RefKind = ARMMCExpr::VK_ARM_HI16;
4540  } else {
4541    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4542    return true;
4543  }
4544  Parser.Lex();
4545
4546  if (getLexer().isNot(AsmToken::Colon)) {
4547    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4548    return true;
4549  }
4550  Parser.Lex(); // Eat the last ':'
4551  return false;
4552}
4553
4554/// \brief Given a mnemonic, split out possible predication code and carry
4555/// setting letters to form a canonical mnemonic and flags.
4556//
4557// FIXME: Would be nice to autogen this.
4558// FIXME: This is a bit of a maze of special cases.
4559StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4560                                      unsigned &PredicationCode,
4561                                      bool &CarrySetting,
4562                                      unsigned &ProcessorIMod,
4563                                      StringRef &ITMask) {
4564  PredicationCode = ARMCC::AL;
4565  CarrySetting = false;
4566  ProcessorIMod = 0;
4567
4568  // Ignore some mnemonics we know aren't predicated forms.
4569  //
4570  // FIXME: Would be nice to autogen this.
4571  if ((Mnemonic == "movs" && isThumb()) ||
4572      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4573      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4574      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4575      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4576      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4577      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4578      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4579      Mnemonic == "fmuls")
4580    return Mnemonic;
4581
4582  // First, split out any predication code. Ignore mnemonics we know aren't
4583  // predicated but do have a carry-set and so weren't caught above.
4584  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4585      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4586      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4587      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4588    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4589      .Case("eq", ARMCC::EQ)
4590      .Case("ne", ARMCC::NE)
4591      .Case("hs", ARMCC::HS)
4592      .Case("cs", ARMCC::HS)
4593      .Case("lo", ARMCC::LO)
4594      .Case("cc", ARMCC::LO)
4595      .Case("mi", ARMCC::MI)
4596      .Case("pl", ARMCC::PL)
4597      .Case("vs", ARMCC::VS)
4598      .Case("vc", ARMCC::VC)
4599      .Case("hi", ARMCC::HI)
4600      .Case("ls", ARMCC::LS)
4601      .Case("ge", ARMCC::GE)
4602      .Case("lt", ARMCC::LT)
4603      .Case("gt", ARMCC::GT)
4604      .Case("le", ARMCC::LE)
4605      .Case("al", ARMCC::AL)
4606      .Default(~0U);
4607    if (CC != ~0U) {
4608      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4609      PredicationCode = CC;
4610    }
4611  }
4612
4613  // Next, determine if we have a carry setting bit. We explicitly ignore all
4614  // the instructions we know end in 's'.
4615  if (Mnemonic.endswith("s") &&
4616      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4617        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4618        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4619        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4620        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4621        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4622        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4623        Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4624        (Mnemonic == "movs" && isThumb()))) {
4625    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4626    CarrySetting = true;
4627  }
4628
4629  // The "cps" instruction can have a interrupt mode operand which is glued into
4630  // the mnemonic. Check if this is the case, split it and parse the imod op
4631  if (Mnemonic.startswith("cps")) {
4632    // Split out any imod code.
4633    unsigned IMod =
4634      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4635      .Case("ie", ARM_PROC::IE)
4636      .Case("id", ARM_PROC::ID)
4637      .Default(~0U);
4638    if (IMod != ~0U) {
4639      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4640      ProcessorIMod = IMod;
4641    }
4642  }
4643
4644  // The "it" instruction has the condition mask on the end of the mnemonic.
4645  if (Mnemonic.startswith("it")) {
4646    ITMask = Mnemonic.slice(2, Mnemonic.size());
4647    Mnemonic = Mnemonic.slice(0, 2);
4648  }
4649
4650  return Mnemonic;
4651}
4652
4653/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4654/// inclusion of carry set or predication code operands.
4655//
4656// FIXME: It would be nice to autogen this.
4657void ARMAsmParser::
4658getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4659                      bool &CanAcceptPredicationCode) {
4660  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4661      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4662      Mnemonic == "add" || Mnemonic == "adc" ||
4663      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4664      Mnemonic == "orr" || Mnemonic == "mvn" ||
4665      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4666      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4667      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4668                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4669                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4670    CanAcceptCarrySet = true;
4671  } else
4672    CanAcceptCarrySet = false;
4673
4674  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4675      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4676      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4677      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4678      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4679      (Mnemonic == "clrex" && !isThumb()) ||
4680      (Mnemonic == "nop" && isThumbOne()) ||
4681      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4682        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4683        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4684      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4685       !isThumb()) ||
4686      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4687    CanAcceptPredicationCode = false;
4688  } else
4689    CanAcceptPredicationCode = true;
4690
4691  if (isThumb()) {
4692    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4693        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4694      CanAcceptPredicationCode = false;
4695  }
4696}
4697
4698bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4699                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4700  // FIXME: This is all horribly hacky. We really need a better way to deal
4701  // with optional operands like this in the matcher table.
4702
4703  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4704  // another does not. Specifically, the MOVW instruction does not. So we
4705  // special case it here and remove the defaulted (non-setting) cc_out
4706  // operand if that's the instruction we're trying to match.
4707  //
4708  // We do this as post-processing of the explicit operands rather than just
4709  // conditionally adding the cc_out in the first place because we need
4710  // to check the type of the parsed immediate operand.
4711  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4712      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4713      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4714      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4715    return true;
4716
4717  // Register-register 'add' for thumb does not have a cc_out operand
4718  // when there are only two register operands.
4719  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4720      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4721      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4722      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4723    return true;
4724  // Register-register 'add' for thumb does not have a cc_out operand
4725  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4726  // have to check the immediate range here since Thumb2 has a variant
4727  // that can handle a different range and has a cc_out operand.
4728  if (((isThumb() && Mnemonic == "add") ||
4729       (isThumbTwo() && Mnemonic == "sub")) &&
4730      Operands.size() == 6 &&
4731      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4732      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4733      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4734      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4735      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4736       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4737    return true;
4738  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4739  // imm0_4095 variant. That's the least-preferred variant when
4740  // selecting via the generic "add" mnemonic, so to know that we
4741  // should remove the cc_out operand, we have to explicitly check that
4742  // it's not one of the other variants. Ugh.
4743  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4744      Operands.size() == 6 &&
4745      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4746      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4747      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4748    // Nest conditions rather than one big 'if' statement for readability.
4749    //
4750    // If either register is a high reg, it's either one of the SP
4751    // variants (handled above) or a 32-bit encoding, so we just
4752    // check against T3. If the second register is the PC, this is an
4753    // alternate form of ADR, which uses encoding T4, so check for that too.
4754    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4755         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4756        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4757        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4758      return false;
4759    // If both registers are low, we're in an IT block, and the immediate is
4760    // in range, we should use encoding T1 instead, which has a cc_out.
4761    if (inITBlock() &&
4762        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4763        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4764        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4765      return false;
4766
4767    // Otherwise, we use encoding T4, which does not have a cc_out
4768    // operand.
4769    return true;
4770  }
4771
4772  // The thumb2 multiply instruction doesn't have a CCOut register, so
4773  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4774  // use the 16-bit encoding or not.
4775  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4776      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4777      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4778      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4779      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4780      // If the registers aren't low regs, the destination reg isn't the
4781      // same as one of the source regs, or the cc_out operand is zero
4782      // outside of an IT block, we have to use the 32-bit encoding, so
4783      // remove the cc_out operand.
4784      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4785       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4786       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4787       !inITBlock() ||
4788       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4789        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4790        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4791        static_cast<ARMOperand*>(Operands[4])->getReg())))
4792    return true;
4793
4794  // Also check the 'mul' syntax variant that doesn't specify an explicit
4795  // destination register.
4796  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4797      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4798      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4799      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4800      // If the registers aren't low regs  or the cc_out operand is zero
4801      // outside of an IT block, we have to use the 32-bit encoding, so
4802      // remove the cc_out operand.
4803      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4804       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4805       !inITBlock()))
4806    return true;
4807
4808
4809
4810  // Register-register 'add/sub' for thumb does not have a cc_out operand
4811  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4812  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4813  // right, this will result in better diagnostics (which operand is off)
4814  // anyway.
4815  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4816      (Operands.size() == 5 || Operands.size() == 6) &&
4817      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4818      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4819      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4820    return true;
4821
4822  return false;
4823}
4824
4825static bool isDataTypeToken(StringRef Tok) {
4826  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4827    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4828    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4829    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4830    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4831    Tok == ".f" || Tok == ".d";
4832}
4833
4834// FIXME: This bit should probably be handled via an explicit match class
4835// in the .td files that matches the suffix instead of having it be
4836// a literal string token the way it is now.
4837static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4838  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4839}
4840
4841static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4842/// Parse an arm instruction mnemonic followed by its operands.
4843bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4844                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4845  // Apply mnemonic aliases before doing anything else, as the destination
4846  // mnemnonic may include suffices and we want to handle them normally.
4847  // The generic tblgen'erated code does this later, at the start of
4848  // MatchInstructionImpl(), but that's too late for aliases that include
4849  // any sort of suffix.
4850  unsigned AvailableFeatures = getAvailableFeatures();
4851  applyMnemonicAliases(Name, AvailableFeatures);
4852
4853  // First check for the ARM-specific .req directive.
4854  if (Parser.getTok().is(AsmToken::Identifier) &&
4855      Parser.getTok().getIdentifier() == ".req") {
4856    parseDirectiveReq(Name, NameLoc);
4857    // We always return 'error' for this, as we're done with this
4858    // statement and don't need to match the 'instruction."
4859    return true;
4860  }
4861
4862  // Create the leading tokens for the mnemonic, split by '.' characters.
4863  size_t Start = 0, Next = Name.find('.');
4864  StringRef Mnemonic = Name.slice(Start, Next);
4865
4866  // Split out the predication code and carry setting flag from the mnemonic.
4867  unsigned PredicationCode;
4868  unsigned ProcessorIMod;
4869  bool CarrySetting;
4870  StringRef ITMask;
4871  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4872                           ProcessorIMod, ITMask);
4873
4874  // In Thumb1, only the branch (B) instruction can be predicated.
4875  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4876    Parser.EatToEndOfStatement();
4877    return Error(NameLoc, "conditional execution not supported in Thumb1");
4878  }
4879
4880  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4881
4882  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4883  // is the mask as it will be for the IT encoding if the conditional
4884  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4885  // where the conditional bit0 is zero, the instruction post-processing
4886  // will adjust the mask accordingly.
4887  if (Mnemonic == "it") {
4888    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4889    if (ITMask.size() > 3) {
4890      Parser.EatToEndOfStatement();
4891      return Error(Loc, "too many conditions on IT instruction");
4892    }
4893    unsigned Mask = 8;
4894    for (unsigned i = ITMask.size(); i != 0; --i) {
4895      char pos = ITMask[i - 1];
4896      if (pos != 't' && pos != 'e') {
4897        Parser.EatToEndOfStatement();
4898        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4899      }
4900      Mask >>= 1;
4901      if (ITMask[i - 1] == 't')
4902        Mask |= 8;
4903    }
4904    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4905  }
4906
4907  // FIXME: This is all a pretty gross hack. We should automatically handle
4908  // optional operands like this via tblgen.
4909
4910  // Next, add the CCOut and ConditionCode operands, if needed.
4911  //
4912  // For mnemonics which can ever incorporate a carry setting bit or predication
4913  // code, our matching model involves us always generating CCOut and
4914  // ConditionCode operands to match the mnemonic "as written" and then we let
4915  // the matcher deal with finding the right instruction or generating an
4916  // appropriate error.
4917  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4918  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4919
4920  // If we had a carry-set on an instruction that can't do that, issue an
4921  // error.
4922  if (!CanAcceptCarrySet && CarrySetting) {
4923    Parser.EatToEndOfStatement();
4924    return Error(NameLoc, "instruction '" + Mnemonic +
4925                 "' can not set flags, but 's' suffix specified");
4926  }
4927  // If we had a predication code on an instruction that can't do that, issue an
4928  // error.
4929  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4930    Parser.EatToEndOfStatement();
4931    return Error(NameLoc, "instruction '" + Mnemonic +
4932                 "' is not predicable, but condition code specified");
4933  }
4934
4935  // Add the carry setting operand, if necessary.
4936  if (CanAcceptCarrySet) {
4937    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4938    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4939                                               Loc));
4940  }
4941
4942  // Add the predication code operand, if necessary.
4943  if (CanAcceptPredicationCode) {
4944    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4945                                      CarrySetting);
4946    Operands.push_back(ARMOperand::CreateCondCode(
4947                         ARMCC::CondCodes(PredicationCode), Loc));
4948  }
4949
4950  // Add the processor imod operand, if necessary.
4951  if (ProcessorIMod) {
4952    Operands.push_back(ARMOperand::CreateImm(
4953          MCConstantExpr::Create(ProcessorIMod, getContext()),
4954                                 NameLoc, NameLoc));
4955  }
4956
4957  // Add the remaining tokens in the mnemonic.
4958  while (Next != StringRef::npos) {
4959    Start = Next;
4960    Next = Name.find('.', Start + 1);
4961    StringRef ExtraToken = Name.slice(Start, Next);
4962
4963    // Some NEON instructions have an optional datatype suffix that is
4964    // completely ignored. Check for that.
4965    if (isDataTypeToken(ExtraToken) &&
4966        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4967      continue;
4968
4969    if (ExtraToken != ".n") {
4970      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4971      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4972    }
4973  }
4974
4975  // Read the remaining operands.
4976  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4977    // Read the first operand.
4978    if (parseOperand(Operands, Mnemonic)) {
4979      Parser.EatToEndOfStatement();
4980      return true;
4981    }
4982
4983    while (getLexer().is(AsmToken::Comma)) {
4984      Parser.Lex();  // Eat the comma.
4985
4986      // Parse and remember the operand.
4987      if (parseOperand(Operands, Mnemonic)) {
4988        Parser.EatToEndOfStatement();
4989        return true;
4990      }
4991    }
4992  }
4993
4994  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4995    SMLoc Loc = getLexer().getLoc();
4996    Parser.EatToEndOfStatement();
4997    return Error(Loc, "unexpected token in argument list");
4998  }
4999
5000  Parser.Lex(); // Consume the EndOfStatement
5001
5002  // Some instructions, mostly Thumb, have forms for the same mnemonic that
5003  // do and don't have a cc_out optional-def operand. With some spot-checks
5004  // of the operand list, we can figure out which variant we're trying to
5005  // parse and adjust accordingly before actually matching. We shouldn't ever
5006  // try to remove a cc_out operand that was explicitly set on the the
5007  // mnemonic, of course (CarrySetting == true). Reason number #317 the
5008  // table driven matcher doesn't fit well with the ARM instruction set.
5009  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5010    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5011    Operands.erase(Operands.begin() + 1);
5012    delete Op;
5013  }
5014
5015  // ARM mode 'blx' need special handling, as the register operand version
5016  // is predicable, but the label operand version is not. So, we can't rely
5017  // on the Mnemonic based checking to correctly figure out when to put
5018  // a k_CondCode operand in the list. If we're trying to match the label
5019  // version, remove the k_CondCode operand here.
5020  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5021      static_cast<ARMOperand*>(Operands[2])->isImm()) {
5022    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5023    Operands.erase(Operands.begin() + 1);
5024    delete Op;
5025  }
5026
5027  // The vector-compare-to-zero instructions have a literal token "#0" at
5028  // the end that comes to here as an immediate operand. Convert it to a
5029  // token to play nicely with the matcher.
5030  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
5031      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
5032      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5033    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5034    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5035    if (CE && CE->getValue() == 0) {
5036      Operands.erase(Operands.begin() + 5);
5037      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5038      delete Op;
5039    }
5040  }
5041  // VCMP{E} does the same thing, but with a different operand count.
5042  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
5043      static_cast<ARMOperand*>(Operands[4])->isImm()) {
5044    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
5045    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5046    if (CE && CE->getValue() == 0) {
5047      Operands.erase(Operands.begin() + 4);
5048      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5049      delete Op;
5050    }
5051  }
5052  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
5053  // end. Convert it to a token here. Take care not to convert those
5054  // that should hit the Thumb2 encoding.
5055  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
5056      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5057      static_cast<ARMOperand*>(Operands[4])->isReg() &&
5058      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5059    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5060    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5061    if (CE && CE->getValue() == 0 &&
5062        (isThumbOne() ||
5063         // The cc_out operand matches the IT block.
5064         ((inITBlock() != CarrySetting) &&
5065         // Neither register operand is a high register.
5066         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
5067          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
5068      Operands.erase(Operands.begin() + 5);
5069      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5070      delete Op;
5071    }
5072  }
5073
5074  return false;
5075}
5076
5077// Validate context-sensitive operand constraints.
5078
5079// return 'true' if register list contains non-low GPR registers,
5080// 'false' otherwise. If Reg is in the register list or is HiReg, set
5081// 'containsReg' to true.
5082static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5083                                 unsigned HiReg, bool &containsReg) {
5084  containsReg = false;
5085  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5086    unsigned OpReg = Inst.getOperand(i).getReg();
5087    if (OpReg == Reg)
5088      containsReg = true;
5089    // Anything other than a low register isn't legal here.
5090    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5091      return true;
5092  }
5093  return false;
5094}
5095
5096// Check if the specified regisgter is in the register list of the inst,
5097// starting at the indicated operand number.
5098static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5099  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5100    unsigned OpReg = Inst.getOperand(i).getReg();
5101    if (OpReg == Reg)
5102      return true;
5103  }
5104  return false;
5105}
5106
5107// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5108// the ARMInsts array) instead. Getting that here requires awkward
5109// API changes, though. Better way?
5110namespace llvm {
5111extern const MCInstrDesc ARMInsts[];
5112}
5113static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5114  return ARMInsts[Opcode];
5115}
5116
5117// FIXME: We would really like to be able to tablegen'erate this.
5118bool ARMAsmParser::
5119validateInstruction(MCInst &Inst,
5120                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5121  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5122  SMLoc Loc = Operands[0]->getStartLoc();
5123  // Check the IT block state first.
5124  // NOTE: BKPT instruction has the interesting property of being
5125  // allowed in IT blocks, but not being predicable.  It just always
5126  // executes.
5127  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5128      Inst.getOpcode() != ARM::BKPT) {
5129    unsigned bit = 1;
5130    if (ITState.FirstCond)
5131      ITState.FirstCond = false;
5132    else
5133      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5134    // The instruction must be predicable.
5135    if (!MCID.isPredicable())
5136      return Error(Loc, "instructions in IT block must be predicable");
5137    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5138    unsigned ITCond = bit ? ITState.Cond :
5139      ARMCC::getOppositeCondition(ITState.Cond);
5140    if (Cond != ITCond) {
5141      // Find the condition code Operand to get its SMLoc information.
5142      SMLoc CondLoc;
5143      for (unsigned i = 1; i < Operands.size(); ++i)
5144        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5145          CondLoc = Operands[i]->getStartLoc();
5146      return Error(CondLoc, "incorrect condition in IT block; got '" +
5147                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5148                   "', but expected '" +
5149                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5150    }
5151  // Check for non-'al' condition codes outside of the IT block.
5152  } else if (isThumbTwo() && MCID.isPredicable() &&
5153             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5154             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5155             Inst.getOpcode() != ARM::t2B)
5156    return Error(Loc, "predicated instructions must be in IT block");
5157
5158  switch (Inst.getOpcode()) {
5159  case ARM::LDRD:
5160  case ARM::LDRD_PRE:
5161  case ARM::LDRD_POST:
5162  case ARM::LDREXD: {
5163    // Rt2 must be Rt + 1.
5164    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5165    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5166    if (Rt2 != Rt + 1)
5167      return Error(Operands[3]->getStartLoc(),
5168                   "destination operands must be sequential");
5169    return false;
5170  }
5171  case ARM::STRD: {
5172    // Rt2 must be Rt + 1.
5173    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5174    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5175    if (Rt2 != Rt + 1)
5176      return Error(Operands[3]->getStartLoc(),
5177                   "source operands must be sequential");
5178    return false;
5179  }
5180  case ARM::STRD_PRE:
5181  case ARM::STRD_POST:
5182  case ARM::STREXD: {
5183    // Rt2 must be Rt + 1.
5184    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5185    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5186    if (Rt2 != Rt + 1)
5187      return Error(Operands[3]->getStartLoc(),
5188                   "source operands must be sequential");
5189    return false;
5190  }
5191  case ARM::SBFX:
5192  case ARM::UBFX: {
5193    // width must be in range [1, 32-lsb]
5194    unsigned lsb = Inst.getOperand(2).getImm();
5195    unsigned widthm1 = Inst.getOperand(3).getImm();
5196    if (widthm1 >= 32 - lsb)
5197      return Error(Operands[5]->getStartLoc(),
5198                   "bitfield width must be in range [1,32-lsb]");
5199    return false;
5200  }
5201  case ARM::tLDMIA: {
5202    // If we're parsing Thumb2, the .w variant is available and handles
5203    // most cases that are normally illegal for a Thumb1 LDM
5204    // instruction. We'll make the transformation in processInstruction()
5205    // if necessary.
5206    //
5207    // Thumb LDM instructions are writeback iff the base register is not
5208    // in the register list.
5209    unsigned Rn = Inst.getOperand(0).getReg();
5210    bool hasWritebackToken =
5211      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5212       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5213    bool listContainsBase;
5214    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5215      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5216                   "registers must be in range r0-r7");
5217    // If we should have writeback, then there should be a '!' token.
5218    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5219      return Error(Operands[2]->getStartLoc(),
5220                   "writeback operator '!' expected");
5221    // If we should not have writeback, there must not be a '!'. This is
5222    // true even for the 32-bit wide encodings.
5223    if (listContainsBase && hasWritebackToken)
5224      return Error(Operands[3]->getStartLoc(),
5225                   "writeback operator '!' not allowed when base register "
5226                   "in register list");
5227
5228    break;
5229  }
5230  case ARM::t2LDMIA_UPD: {
5231    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5232      return Error(Operands[4]->getStartLoc(),
5233                   "writeback operator '!' not allowed when base register "
5234                   "in register list");
5235    break;
5236  }
5237  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5238  // so only issue a diagnostic for thumb1. The instructions will be
5239  // switched to the t2 encodings in processInstruction() if necessary.
5240  case ARM::tPOP: {
5241    bool listContainsBase;
5242    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5243        !isThumbTwo())
5244      return Error(Operands[2]->getStartLoc(),
5245                   "registers must be in range r0-r7 or pc");
5246    break;
5247  }
5248  case ARM::tPUSH: {
5249    bool listContainsBase;
5250    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5251        !isThumbTwo())
5252      return Error(Operands[2]->getStartLoc(),
5253                   "registers must be in range r0-r7 or lr");
5254    break;
5255  }
5256  case ARM::tSTMIA_UPD: {
5257    bool listContainsBase;
5258    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5259      return Error(Operands[4]->getStartLoc(),
5260                   "registers must be in range r0-r7");
5261    break;
5262  }
5263  }
5264
5265  return false;
5266}
5267
5268static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5269  switch(Opc) {
5270  default: llvm_unreachable("unexpected opcode!");
5271  // VST1LN
5272  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5273  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5274  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5275  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5276  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5277  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5278  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5279  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5280  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5281
5282  // VST2LN
5283  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5284  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5285  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5286  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5287  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5288
5289  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5290  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5291  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5292  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5293  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5294
5295  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5296  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5297  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5298  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5299  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5300
5301  // VST3LN
5302  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5303  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5304  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5305  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5306  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5307  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5308  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5309  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5310  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5311  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5312  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5313  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5314  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5315  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5316  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5317
5318  // VST3
5319  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5320  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5321  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5322  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5323  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5324  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5325  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5326  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5327  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5328  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5329  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5330  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5331  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5332  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5333  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5334  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5335  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5336  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5337
5338  // VST4LN
5339  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5340  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5341  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5342  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5343  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5344  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5345  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5346  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5347  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5348  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5349  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5350  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5351  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5352  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5353  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5354
5355  // VST4
5356  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5357  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5358  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5359  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5360  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5361  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5362  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5363  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5364  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5365  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5366  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5367  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5368  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5369  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5370  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5371  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5372  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5373  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5374  }
5375}
5376
5377static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5378  switch(Opc) {
5379  default: llvm_unreachable("unexpected opcode!");
5380  // VLD1LN
5381  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5382  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5383  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5384  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5385  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5386  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5387  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5388  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5389  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5390
5391  // VLD2LN
5392  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5393  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5394  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5395  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5396  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5397  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5398  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5399  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5400  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5401  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5402  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5403  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5404  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5405  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5406  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5407
5408  // VLD3DUP
5409  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5410  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5411  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5412  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5413  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5414  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5415  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5416  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5417  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5418  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5419  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5420  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5421  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5422  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5423  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5424  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5425  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5426  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5427
5428  // VLD3LN
5429  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5430  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5431  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5432  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5433  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5434  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5435  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5436  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5437  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5438  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5439  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5440  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5441  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5442  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5443  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5444
5445  // VLD3
5446  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5447  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5448  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5449  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5450  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5451  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5452  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5453  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5454  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5455  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5456  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5457  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5458  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5459  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5460  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5461  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5462  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5463  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5464
5465  // VLD4LN
5466  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5467  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5468  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5469  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5470  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5471  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5472  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5473  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5474  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5475  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5476  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5477  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5478  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5479  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5480  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5481
5482  // VLD4DUP
5483  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5484  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5485  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5486  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5487  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5488  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5489  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5490  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5491  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5492  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5493  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5494  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5495  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5496  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5497  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5498  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5499  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5500  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5501
5502  // VLD4
5503  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5504  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5505  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5506  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5507  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5508  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5509  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5510  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5511  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5512  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5513  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5514  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5515  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5516  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5517  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5518  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5519  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5520  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5521  }
5522}
5523
5524bool ARMAsmParser::
5525processInstruction(MCInst &Inst,
5526                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5527  switch (Inst.getOpcode()) {
5528  // Aliases for alternate PC+imm syntax of LDR instructions.
5529  case ARM::t2LDRpcrel:
5530    Inst.setOpcode(ARM::t2LDRpci);
5531    return true;
5532  case ARM::t2LDRBpcrel:
5533    Inst.setOpcode(ARM::t2LDRBpci);
5534    return true;
5535  case ARM::t2LDRHpcrel:
5536    Inst.setOpcode(ARM::t2LDRHpci);
5537    return true;
5538  case ARM::t2LDRSBpcrel:
5539    Inst.setOpcode(ARM::t2LDRSBpci);
5540    return true;
5541  case ARM::t2LDRSHpcrel:
5542    Inst.setOpcode(ARM::t2LDRSHpci);
5543    return true;
5544  // Handle NEON VST complex aliases.
5545  case ARM::VST1LNdWB_register_Asm_8:
5546  case ARM::VST1LNdWB_register_Asm_16:
5547  case ARM::VST1LNdWB_register_Asm_32: {
5548    MCInst TmpInst;
5549    // Shuffle the operands around so the lane index operand is in the
5550    // right place.
5551    unsigned Spacing;
5552    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5553    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5554    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5555    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5556    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5557    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5558    TmpInst.addOperand(Inst.getOperand(1)); // lane
5559    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5560    TmpInst.addOperand(Inst.getOperand(6));
5561    Inst = TmpInst;
5562    return true;
5563  }
5564
5565  case ARM::VST2LNdWB_register_Asm_8:
5566  case ARM::VST2LNdWB_register_Asm_16:
5567  case ARM::VST2LNdWB_register_Asm_32:
5568  case ARM::VST2LNqWB_register_Asm_16:
5569  case ARM::VST2LNqWB_register_Asm_32: {
5570    MCInst TmpInst;
5571    // Shuffle the operands around so the lane index operand is in the
5572    // right place.
5573    unsigned Spacing;
5574    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5575    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5576    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5577    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5578    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5579    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5580    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5581                                            Spacing));
5582    TmpInst.addOperand(Inst.getOperand(1)); // lane
5583    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5584    TmpInst.addOperand(Inst.getOperand(6));
5585    Inst = TmpInst;
5586    return true;
5587  }
5588
5589  case ARM::VST3LNdWB_register_Asm_8:
5590  case ARM::VST3LNdWB_register_Asm_16:
5591  case ARM::VST3LNdWB_register_Asm_32:
5592  case ARM::VST3LNqWB_register_Asm_16:
5593  case ARM::VST3LNqWB_register_Asm_32: {
5594    MCInst TmpInst;
5595    // Shuffle the operands around so the lane index operand is in the
5596    // right place.
5597    unsigned Spacing;
5598    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5599    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5600    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5601    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5602    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5603    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5604    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5605                                            Spacing));
5606    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5607                                            Spacing * 2));
5608    TmpInst.addOperand(Inst.getOperand(1)); // lane
5609    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5610    TmpInst.addOperand(Inst.getOperand(6));
5611    Inst = TmpInst;
5612    return true;
5613  }
5614
5615  case ARM::VST4LNdWB_register_Asm_8:
5616  case ARM::VST4LNdWB_register_Asm_16:
5617  case ARM::VST4LNdWB_register_Asm_32:
5618  case ARM::VST4LNqWB_register_Asm_16:
5619  case ARM::VST4LNqWB_register_Asm_32: {
5620    MCInst TmpInst;
5621    // Shuffle the operands around so the lane index operand is in the
5622    // right place.
5623    unsigned Spacing;
5624    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5625    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5626    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5627    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5628    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5629    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5630    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5631                                            Spacing));
5632    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5633                                            Spacing * 2));
5634    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5635                                            Spacing * 3));
5636    TmpInst.addOperand(Inst.getOperand(1)); // lane
5637    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5638    TmpInst.addOperand(Inst.getOperand(6));
5639    Inst = TmpInst;
5640    return true;
5641  }
5642
5643  case ARM::VST1LNdWB_fixed_Asm_8:
5644  case ARM::VST1LNdWB_fixed_Asm_16:
5645  case ARM::VST1LNdWB_fixed_Asm_32: {
5646    MCInst TmpInst;
5647    // Shuffle the operands around so the lane index operand is in the
5648    // right place.
5649    unsigned Spacing;
5650    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5651    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5652    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5653    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5654    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5655    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5656    TmpInst.addOperand(Inst.getOperand(1)); // lane
5657    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5658    TmpInst.addOperand(Inst.getOperand(5));
5659    Inst = TmpInst;
5660    return true;
5661  }
5662
5663  case ARM::VST2LNdWB_fixed_Asm_8:
5664  case ARM::VST2LNdWB_fixed_Asm_16:
5665  case ARM::VST2LNdWB_fixed_Asm_32:
5666  case ARM::VST2LNqWB_fixed_Asm_16:
5667  case ARM::VST2LNqWB_fixed_Asm_32: {
5668    MCInst TmpInst;
5669    // Shuffle the operands around so the lane index operand is in the
5670    // right place.
5671    unsigned Spacing;
5672    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5673    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5674    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5675    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5676    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5677    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5678    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5679                                            Spacing));
5680    TmpInst.addOperand(Inst.getOperand(1)); // lane
5681    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5682    TmpInst.addOperand(Inst.getOperand(5));
5683    Inst = TmpInst;
5684    return true;
5685  }
5686
5687  case ARM::VST3LNdWB_fixed_Asm_8:
5688  case ARM::VST3LNdWB_fixed_Asm_16:
5689  case ARM::VST3LNdWB_fixed_Asm_32:
5690  case ARM::VST3LNqWB_fixed_Asm_16:
5691  case ARM::VST3LNqWB_fixed_Asm_32: {
5692    MCInst TmpInst;
5693    // Shuffle the operands around so the lane index operand is in the
5694    // right place.
5695    unsigned Spacing;
5696    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5697    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5698    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5699    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5700    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5701    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5702    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5703                                            Spacing));
5704    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5705                                            Spacing * 2));
5706    TmpInst.addOperand(Inst.getOperand(1)); // lane
5707    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5708    TmpInst.addOperand(Inst.getOperand(5));
5709    Inst = TmpInst;
5710    return true;
5711  }
5712
5713  case ARM::VST4LNdWB_fixed_Asm_8:
5714  case ARM::VST4LNdWB_fixed_Asm_16:
5715  case ARM::VST4LNdWB_fixed_Asm_32:
5716  case ARM::VST4LNqWB_fixed_Asm_16:
5717  case ARM::VST4LNqWB_fixed_Asm_32: {
5718    MCInst TmpInst;
5719    // Shuffle the operands around so the lane index operand is in the
5720    // right place.
5721    unsigned Spacing;
5722    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5723    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5724    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5725    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5726    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5727    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5728    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5729                                            Spacing));
5730    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5731                                            Spacing * 2));
5732    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5733                                            Spacing * 3));
5734    TmpInst.addOperand(Inst.getOperand(1)); // lane
5735    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5736    TmpInst.addOperand(Inst.getOperand(5));
5737    Inst = TmpInst;
5738    return true;
5739  }
5740
5741  case ARM::VST1LNdAsm_8:
5742  case ARM::VST1LNdAsm_16:
5743  case ARM::VST1LNdAsm_32: {
5744    MCInst TmpInst;
5745    // Shuffle the operands around so the lane index operand is in the
5746    // right place.
5747    unsigned Spacing;
5748    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5749    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5750    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5751    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5752    TmpInst.addOperand(Inst.getOperand(1)); // lane
5753    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5754    TmpInst.addOperand(Inst.getOperand(5));
5755    Inst = TmpInst;
5756    return true;
5757  }
5758
5759  case ARM::VST2LNdAsm_8:
5760  case ARM::VST2LNdAsm_16:
5761  case ARM::VST2LNdAsm_32:
5762  case ARM::VST2LNqAsm_16:
5763  case ARM::VST2LNqAsm_32: {
5764    MCInst TmpInst;
5765    // Shuffle the operands around so the lane index operand is in the
5766    // right place.
5767    unsigned Spacing;
5768    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5769    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5770    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5771    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5772    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5773                                            Spacing));
5774    TmpInst.addOperand(Inst.getOperand(1)); // lane
5775    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5776    TmpInst.addOperand(Inst.getOperand(5));
5777    Inst = TmpInst;
5778    return true;
5779  }
5780
5781  case ARM::VST3LNdAsm_8:
5782  case ARM::VST3LNdAsm_16:
5783  case ARM::VST3LNdAsm_32:
5784  case ARM::VST3LNqAsm_16:
5785  case ARM::VST3LNqAsm_32: {
5786    MCInst TmpInst;
5787    // Shuffle the operands around so the lane index operand is in the
5788    // right place.
5789    unsigned Spacing;
5790    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5791    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5792    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5793    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5794    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5795                                            Spacing));
5796    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5797                                            Spacing * 2));
5798    TmpInst.addOperand(Inst.getOperand(1)); // lane
5799    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5800    TmpInst.addOperand(Inst.getOperand(5));
5801    Inst = TmpInst;
5802    return true;
5803  }
5804
5805  case ARM::VST4LNdAsm_8:
5806  case ARM::VST4LNdAsm_16:
5807  case ARM::VST4LNdAsm_32:
5808  case ARM::VST4LNqAsm_16:
5809  case ARM::VST4LNqAsm_32: {
5810    MCInst TmpInst;
5811    // Shuffle the operands around so the lane index operand is in the
5812    // right place.
5813    unsigned Spacing;
5814    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5815    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5816    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5817    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5818    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5819                                            Spacing));
5820    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5821                                            Spacing * 2));
5822    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5823                                            Spacing * 3));
5824    TmpInst.addOperand(Inst.getOperand(1)); // lane
5825    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5826    TmpInst.addOperand(Inst.getOperand(5));
5827    Inst = TmpInst;
5828    return true;
5829  }
5830
5831  // Handle NEON VLD complex aliases.
5832  case ARM::VLD1LNdWB_register_Asm_8:
5833  case ARM::VLD1LNdWB_register_Asm_16:
5834  case ARM::VLD1LNdWB_register_Asm_32: {
5835    MCInst TmpInst;
5836    // Shuffle the operands around so the lane index operand is in the
5837    // right place.
5838    unsigned Spacing;
5839    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5840    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5841    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5842    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5843    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5844    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5845    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5846    TmpInst.addOperand(Inst.getOperand(1)); // lane
5847    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5848    TmpInst.addOperand(Inst.getOperand(6));
5849    Inst = TmpInst;
5850    return true;
5851  }
5852
5853  case ARM::VLD2LNdWB_register_Asm_8:
5854  case ARM::VLD2LNdWB_register_Asm_16:
5855  case ARM::VLD2LNdWB_register_Asm_32:
5856  case ARM::VLD2LNqWB_register_Asm_16:
5857  case ARM::VLD2LNqWB_register_Asm_32: {
5858    MCInst TmpInst;
5859    // Shuffle the operands around so the lane index operand is in the
5860    // right place.
5861    unsigned Spacing;
5862    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5863    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5864    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5865                                            Spacing));
5866    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5867    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5868    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5869    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5870    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5871    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5872                                            Spacing));
5873    TmpInst.addOperand(Inst.getOperand(1)); // lane
5874    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5875    TmpInst.addOperand(Inst.getOperand(6));
5876    Inst = TmpInst;
5877    return true;
5878  }
5879
5880  case ARM::VLD3LNdWB_register_Asm_8:
5881  case ARM::VLD3LNdWB_register_Asm_16:
5882  case ARM::VLD3LNdWB_register_Asm_32:
5883  case ARM::VLD3LNqWB_register_Asm_16:
5884  case ARM::VLD3LNqWB_register_Asm_32: {
5885    MCInst TmpInst;
5886    // Shuffle the operands around so the lane index operand is in the
5887    // right place.
5888    unsigned Spacing;
5889    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5890    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5891    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5892                                            Spacing));
5893    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5894                                            Spacing * 2));
5895    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5896    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5897    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5898    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5899    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5900    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5901                                            Spacing));
5902    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5903                                            Spacing * 2));
5904    TmpInst.addOperand(Inst.getOperand(1)); // lane
5905    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5906    TmpInst.addOperand(Inst.getOperand(6));
5907    Inst = TmpInst;
5908    return true;
5909  }
5910
5911  case ARM::VLD4LNdWB_register_Asm_8:
5912  case ARM::VLD4LNdWB_register_Asm_16:
5913  case ARM::VLD4LNdWB_register_Asm_32:
5914  case ARM::VLD4LNqWB_register_Asm_16:
5915  case ARM::VLD4LNqWB_register_Asm_32: {
5916    MCInst TmpInst;
5917    // Shuffle the operands around so the lane index operand is in the
5918    // right place.
5919    unsigned Spacing;
5920    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5921    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5922    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5923                                            Spacing));
5924    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5925                                            Spacing * 2));
5926    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5927                                            Spacing * 3));
5928    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5929    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5930    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5931    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5932    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5933    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5934                                            Spacing));
5935    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5936                                            Spacing * 2));
5937    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5938                                            Spacing * 3));
5939    TmpInst.addOperand(Inst.getOperand(1)); // lane
5940    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5941    TmpInst.addOperand(Inst.getOperand(6));
5942    Inst = TmpInst;
5943    return true;
5944  }
5945
5946  case ARM::VLD1LNdWB_fixed_Asm_8:
5947  case ARM::VLD1LNdWB_fixed_Asm_16:
5948  case ARM::VLD1LNdWB_fixed_Asm_32: {
5949    MCInst TmpInst;
5950    // Shuffle the operands around so the lane index operand is in the
5951    // right place.
5952    unsigned Spacing;
5953    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5954    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5955    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5956    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5957    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5958    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5959    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5960    TmpInst.addOperand(Inst.getOperand(1)); // lane
5961    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5962    TmpInst.addOperand(Inst.getOperand(5));
5963    Inst = TmpInst;
5964    return true;
5965  }
5966
5967  case ARM::VLD2LNdWB_fixed_Asm_8:
5968  case ARM::VLD2LNdWB_fixed_Asm_16:
5969  case ARM::VLD2LNdWB_fixed_Asm_32:
5970  case ARM::VLD2LNqWB_fixed_Asm_16:
5971  case ARM::VLD2LNqWB_fixed_Asm_32: {
5972    MCInst TmpInst;
5973    // Shuffle the operands around so the lane index operand is in the
5974    // right place.
5975    unsigned Spacing;
5976    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5977    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5978    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5979                                            Spacing));
5980    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5981    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5982    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5983    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5984    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5985    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5986                                            Spacing));
5987    TmpInst.addOperand(Inst.getOperand(1)); // lane
5988    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5989    TmpInst.addOperand(Inst.getOperand(5));
5990    Inst = TmpInst;
5991    return true;
5992  }
5993
5994  case ARM::VLD3LNdWB_fixed_Asm_8:
5995  case ARM::VLD3LNdWB_fixed_Asm_16:
5996  case ARM::VLD3LNdWB_fixed_Asm_32:
5997  case ARM::VLD3LNqWB_fixed_Asm_16:
5998  case ARM::VLD3LNqWB_fixed_Asm_32: {
5999    MCInst TmpInst;
6000    // Shuffle the operands around so the lane index operand is in the
6001    // right place.
6002    unsigned Spacing;
6003    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6004    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6005    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6006                                            Spacing));
6007    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6008                                            Spacing * 2));
6009    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6010    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6011    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6012    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6013    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6014    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6015                                            Spacing));
6016    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6017                                            Spacing * 2));
6018    TmpInst.addOperand(Inst.getOperand(1)); // lane
6019    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6020    TmpInst.addOperand(Inst.getOperand(5));
6021    Inst = TmpInst;
6022    return true;
6023  }
6024
6025  case ARM::VLD4LNdWB_fixed_Asm_8:
6026  case ARM::VLD4LNdWB_fixed_Asm_16:
6027  case ARM::VLD4LNdWB_fixed_Asm_32:
6028  case ARM::VLD4LNqWB_fixed_Asm_16:
6029  case ARM::VLD4LNqWB_fixed_Asm_32: {
6030    MCInst TmpInst;
6031    // Shuffle the operands around so the lane index operand is in the
6032    // right place.
6033    unsigned Spacing;
6034    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6035    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6036    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6037                                            Spacing));
6038    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6039                                            Spacing * 2));
6040    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6041                                            Spacing * 3));
6042    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6043    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6044    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6045    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6046    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6047    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6048                                            Spacing));
6049    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6050                                            Spacing * 2));
6051    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6052                                            Spacing * 3));
6053    TmpInst.addOperand(Inst.getOperand(1)); // lane
6054    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6055    TmpInst.addOperand(Inst.getOperand(5));
6056    Inst = TmpInst;
6057    return true;
6058  }
6059
6060  case ARM::VLD1LNdAsm_8:
6061  case ARM::VLD1LNdAsm_16:
6062  case ARM::VLD1LNdAsm_32: {
6063    MCInst TmpInst;
6064    // Shuffle the operands around so the lane index operand is in the
6065    // right place.
6066    unsigned Spacing;
6067    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6068    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6069    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6070    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6071    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6072    TmpInst.addOperand(Inst.getOperand(1)); // lane
6073    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6074    TmpInst.addOperand(Inst.getOperand(5));
6075    Inst = TmpInst;
6076    return true;
6077  }
6078
6079  case ARM::VLD2LNdAsm_8:
6080  case ARM::VLD2LNdAsm_16:
6081  case ARM::VLD2LNdAsm_32:
6082  case ARM::VLD2LNqAsm_16:
6083  case ARM::VLD2LNqAsm_32: {
6084    MCInst TmpInst;
6085    // Shuffle the operands around so the lane index operand is in the
6086    // right place.
6087    unsigned Spacing;
6088    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6089    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6090    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6091                                            Spacing));
6092    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6093    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6094    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6095    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6096                                            Spacing));
6097    TmpInst.addOperand(Inst.getOperand(1)); // lane
6098    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6099    TmpInst.addOperand(Inst.getOperand(5));
6100    Inst = TmpInst;
6101    return true;
6102  }
6103
6104  case ARM::VLD3LNdAsm_8:
6105  case ARM::VLD3LNdAsm_16:
6106  case ARM::VLD3LNdAsm_32:
6107  case ARM::VLD3LNqAsm_16:
6108  case ARM::VLD3LNqAsm_32: {
6109    MCInst TmpInst;
6110    // Shuffle the operands around so the lane index operand is in the
6111    // right place.
6112    unsigned Spacing;
6113    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6114    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6115    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6116                                            Spacing));
6117    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6118                                            Spacing * 2));
6119    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6120    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6121    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6122    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6123                                            Spacing));
6124    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6125                                            Spacing * 2));
6126    TmpInst.addOperand(Inst.getOperand(1)); // lane
6127    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6128    TmpInst.addOperand(Inst.getOperand(5));
6129    Inst = TmpInst;
6130    return true;
6131  }
6132
6133  case ARM::VLD4LNdAsm_8:
6134  case ARM::VLD4LNdAsm_16:
6135  case ARM::VLD4LNdAsm_32:
6136  case ARM::VLD4LNqAsm_16:
6137  case ARM::VLD4LNqAsm_32: {
6138    MCInst TmpInst;
6139    // Shuffle the operands around so the lane index operand is in the
6140    // right place.
6141    unsigned Spacing;
6142    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6143    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6144    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6145                                            Spacing));
6146    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6147                                            Spacing * 2));
6148    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6149                                            Spacing * 3));
6150    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6151    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6152    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6153    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6154                                            Spacing));
6155    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6156                                            Spacing * 2));
6157    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6158                                            Spacing * 3));
6159    TmpInst.addOperand(Inst.getOperand(1)); // lane
6160    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6161    TmpInst.addOperand(Inst.getOperand(5));
6162    Inst = TmpInst;
6163    return true;
6164  }
6165
6166  // VLD3DUP single 3-element structure to all lanes instructions.
6167  case ARM::VLD3DUPdAsm_8:
6168  case ARM::VLD3DUPdAsm_16:
6169  case ARM::VLD3DUPdAsm_32:
6170  case ARM::VLD3DUPqAsm_8:
6171  case ARM::VLD3DUPqAsm_16:
6172  case ARM::VLD3DUPqAsm_32: {
6173    MCInst TmpInst;
6174    unsigned Spacing;
6175    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6176    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6177    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6178                                            Spacing));
6179    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6180                                            Spacing * 2));
6181    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6182    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6183    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6184    TmpInst.addOperand(Inst.getOperand(4));
6185    Inst = TmpInst;
6186    return true;
6187  }
6188
6189  case ARM::VLD3DUPdWB_fixed_Asm_8:
6190  case ARM::VLD3DUPdWB_fixed_Asm_16:
6191  case ARM::VLD3DUPdWB_fixed_Asm_32:
6192  case ARM::VLD3DUPqWB_fixed_Asm_8:
6193  case ARM::VLD3DUPqWB_fixed_Asm_16:
6194  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6195    MCInst TmpInst;
6196    unsigned Spacing;
6197    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6198    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6199    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6200                                            Spacing));
6201    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6202                                            Spacing * 2));
6203    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6204    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6205    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6206    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6207    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6208    TmpInst.addOperand(Inst.getOperand(4));
6209    Inst = TmpInst;
6210    return true;
6211  }
6212
6213  case ARM::VLD3DUPdWB_register_Asm_8:
6214  case ARM::VLD3DUPdWB_register_Asm_16:
6215  case ARM::VLD3DUPdWB_register_Asm_32:
6216  case ARM::VLD3DUPqWB_register_Asm_8:
6217  case ARM::VLD3DUPqWB_register_Asm_16:
6218  case ARM::VLD3DUPqWB_register_Asm_32: {
6219    MCInst TmpInst;
6220    unsigned Spacing;
6221    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6222    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6223    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6224                                            Spacing));
6225    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6226                                            Spacing * 2));
6227    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6228    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6229    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6230    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6231    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6232    TmpInst.addOperand(Inst.getOperand(5));
6233    Inst = TmpInst;
6234    return true;
6235  }
6236
6237  // VLD3 multiple 3-element structure instructions.
6238  case ARM::VLD3dAsm_8:
6239  case ARM::VLD3dAsm_16:
6240  case ARM::VLD3dAsm_32:
6241  case ARM::VLD3qAsm_8:
6242  case ARM::VLD3qAsm_16:
6243  case ARM::VLD3qAsm_32: {
6244    MCInst TmpInst;
6245    unsigned Spacing;
6246    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6247    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6248    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6249                                            Spacing));
6250    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6251                                            Spacing * 2));
6252    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6253    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6254    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6255    TmpInst.addOperand(Inst.getOperand(4));
6256    Inst = TmpInst;
6257    return true;
6258  }
6259
6260  case ARM::VLD3dWB_fixed_Asm_8:
6261  case ARM::VLD3dWB_fixed_Asm_16:
6262  case ARM::VLD3dWB_fixed_Asm_32:
6263  case ARM::VLD3qWB_fixed_Asm_8:
6264  case ARM::VLD3qWB_fixed_Asm_16:
6265  case ARM::VLD3qWB_fixed_Asm_32: {
6266    MCInst TmpInst;
6267    unsigned Spacing;
6268    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6269    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6270    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6271                                            Spacing));
6272    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6273                                            Spacing * 2));
6274    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6275    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6276    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6277    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6278    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6279    TmpInst.addOperand(Inst.getOperand(4));
6280    Inst = TmpInst;
6281    return true;
6282  }
6283
6284  case ARM::VLD3dWB_register_Asm_8:
6285  case ARM::VLD3dWB_register_Asm_16:
6286  case ARM::VLD3dWB_register_Asm_32:
6287  case ARM::VLD3qWB_register_Asm_8:
6288  case ARM::VLD3qWB_register_Asm_16:
6289  case ARM::VLD3qWB_register_Asm_32: {
6290    MCInst TmpInst;
6291    unsigned Spacing;
6292    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6293    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6294    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6295                                            Spacing));
6296    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6297                                            Spacing * 2));
6298    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6299    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6300    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6301    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6302    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6303    TmpInst.addOperand(Inst.getOperand(5));
6304    Inst = TmpInst;
6305    return true;
6306  }
6307
6308  // VLD4DUP single 3-element structure to all lanes instructions.
6309  case ARM::VLD4DUPdAsm_8:
6310  case ARM::VLD4DUPdAsm_16:
6311  case ARM::VLD4DUPdAsm_32:
6312  case ARM::VLD4DUPqAsm_8:
6313  case ARM::VLD4DUPqAsm_16:
6314  case ARM::VLD4DUPqAsm_32: {
6315    MCInst TmpInst;
6316    unsigned Spacing;
6317    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6318    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6319    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6320                                            Spacing));
6321    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6322                                            Spacing * 2));
6323    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6324                                            Spacing * 3));
6325    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6326    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6327    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6328    TmpInst.addOperand(Inst.getOperand(4));
6329    Inst = TmpInst;
6330    return true;
6331  }
6332
6333  case ARM::VLD4DUPdWB_fixed_Asm_8:
6334  case ARM::VLD4DUPdWB_fixed_Asm_16:
6335  case ARM::VLD4DUPdWB_fixed_Asm_32:
6336  case ARM::VLD4DUPqWB_fixed_Asm_8:
6337  case ARM::VLD4DUPqWB_fixed_Asm_16:
6338  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6339    MCInst TmpInst;
6340    unsigned Spacing;
6341    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6342    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6343    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6344                                            Spacing));
6345    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6346                                            Spacing * 2));
6347    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6348                                            Spacing * 3));
6349    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6350    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6351    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6352    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6353    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6354    TmpInst.addOperand(Inst.getOperand(4));
6355    Inst = TmpInst;
6356    return true;
6357  }
6358
6359  case ARM::VLD4DUPdWB_register_Asm_8:
6360  case ARM::VLD4DUPdWB_register_Asm_16:
6361  case ARM::VLD4DUPdWB_register_Asm_32:
6362  case ARM::VLD4DUPqWB_register_Asm_8:
6363  case ARM::VLD4DUPqWB_register_Asm_16:
6364  case ARM::VLD4DUPqWB_register_Asm_32: {
6365    MCInst TmpInst;
6366    unsigned Spacing;
6367    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6368    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6369    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6370                                            Spacing));
6371    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6372                                            Spacing * 2));
6373    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6374                                            Spacing * 3));
6375    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6376    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6377    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6378    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6379    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6380    TmpInst.addOperand(Inst.getOperand(5));
6381    Inst = TmpInst;
6382    return true;
6383  }
6384
6385  // VLD4 multiple 4-element structure instructions.
6386  case ARM::VLD4dAsm_8:
6387  case ARM::VLD4dAsm_16:
6388  case ARM::VLD4dAsm_32:
6389  case ARM::VLD4qAsm_8:
6390  case ARM::VLD4qAsm_16:
6391  case ARM::VLD4qAsm_32: {
6392    MCInst TmpInst;
6393    unsigned Spacing;
6394    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6395    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6396    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6397                                            Spacing));
6398    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6399                                            Spacing * 2));
6400    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6401                                            Spacing * 3));
6402    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6403    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6404    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6405    TmpInst.addOperand(Inst.getOperand(4));
6406    Inst = TmpInst;
6407    return true;
6408  }
6409
6410  case ARM::VLD4dWB_fixed_Asm_8:
6411  case ARM::VLD4dWB_fixed_Asm_16:
6412  case ARM::VLD4dWB_fixed_Asm_32:
6413  case ARM::VLD4qWB_fixed_Asm_8:
6414  case ARM::VLD4qWB_fixed_Asm_16:
6415  case ARM::VLD4qWB_fixed_Asm_32: {
6416    MCInst TmpInst;
6417    unsigned Spacing;
6418    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6419    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6420    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6421                                            Spacing));
6422    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6423                                            Spacing * 2));
6424    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6425                                            Spacing * 3));
6426    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6427    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6428    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6429    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6430    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6431    TmpInst.addOperand(Inst.getOperand(4));
6432    Inst = TmpInst;
6433    return true;
6434  }
6435
6436  case ARM::VLD4dWB_register_Asm_8:
6437  case ARM::VLD4dWB_register_Asm_16:
6438  case ARM::VLD4dWB_register_Asm_32:
6439  case ARM::VLD4qWB_register_Asm_8:
6440  case ARM::VLD4qWB_register_Asm_16:
6441  case ARM::VLD4qWB_register_Asm_32: {
6442    MCInst TmpInst;
6443    unsigned Spacing;
6444    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6445    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6446    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6447                                            Spacing));
6448    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6449                                            Spacing * 2));
6450    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6451                                            Spacing * 3));
6452    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6453    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6454    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6455    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6456    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6457    TmpInst.addOperand(Inst.getOperand(5));
6458    Inst = TmpInst;
6459    return true;
6460  }
6461
6462  // VST3 multiple 3-element structure instructions.
6463  case ARM::VST3dAsm_8:
6464  case ARM::VST3dAsm_16:
6465  case ARM::VST3dAsm_32:
6466  case ARM::VST3qAsm_8:
6467  case ARM::VST3qAsm_16:
6468  case ARM::VST3qAsm_32: {
6469    MCInst TmpInst;
6470    unsigned Spacing;
6471    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6472    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6473    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6474    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6475    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6476                                            Spacing));
6477    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6478                                            Spacing * 2));
6479    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6480    TmpInst.addOperand(Inst.getOperand(4));
6481    Inst = TmpInst;
6482    return true;
6483  }
6484
6485  case ARM::VST3dWB_fixed_Asm_8:
6486  case ARM::VST3dWB_fixed_Asm_16:
6487  case ARM::VST3dWB_fixed_Asm_32:
6488  case ARM::VST3qWB_fixed_Asm_8:
6489  case ARM::VST3qWB_fixed_Asm_16:
6490  case ARM::VST3qWB_fixed_Asm_32: {
6491    MCInst TmpInst;
6492    unsigned Spacing;
6493    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6494    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6495    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6496    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6497    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6498    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6499    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6500                                            Spacing));
6501    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6502                                            Spacing * 2));
6503    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6504    TmpInst.addOperand(Inst.getOperand(4));
6505    Inst = TmpInst;
6506    return true;
6507  }
6508
6509  case ARM::VST3dWB_register_Asm_8:
6510  case ARM::VST3dWB_register_Asm_16:
6511  case ARM::VST3dWB_register_Asm_32:
6512  case ARM::VST3qWB_register_Asm_8:
6513  case ARM::VST3qWB_register_Asm_16:
6514  case ARM::VST3qWB_register_Asm_32: {
6515    MCInst TmpInst;
6516    unsigned Spacing;
6517    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6518    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6519    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6520    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6521    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6522    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6523    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6524                                            Spacing));
6525    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6526                                            Spacing * 2));
6527    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6528    TmpInst.addOperand(Inst.getOperand(5));
6529    Inst = TmpInst;
6530    return true;
6531  }
6532
6533  // VST4 multiple 3-element structure instructions.
6534  case ARM::VST4dAsm_8:
6535  case ARM::VST4dAsm_16:
6536  case ARM::VST4dAsm_32:
6537  case ARM::VST4qAsm_8:
6538  case ARM::VST4qAsm_16:
6539  case ARM::VST4qAsm_32: {
6540    MCInst TmpInst;
6541    unsigned Spacing;
6542    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6543    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6544    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6545    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6546    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6547                                            Spacing));
6548    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6549                                            Spacing * 2));
6550    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6551                                            Spacing * 3));
6552    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6553    TmpInst.addOperand(Inst.getOperand(4));
6554    Inst = TmpInst;
6555    return true;
6556  }
6557
6558  case ARM::VST4dWB_fixed_Asm_8:
6559  case ARM::VST4dWB_fixed_Asm_16:
6560  case ARM::VST4dWB_fixed_Asm_32:
6561  case ARM::VST4qWB_fixed_Asm_8:
6562  case ARM::VST4qWB_fixed_Asm_16:
6563  case ARM::VST4qWB_fixed_Asm_32: {
6564    MCInst TmpInst;
6565    unsigned Spacing;
6566    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6567    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6568    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6569    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6570    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6571    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6572    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6573                                            Spacing));
6574    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6575                                            Spacing * 2));
6576    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6577                                            Spacing * 3));
6578    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6579    TmpInst.addOperand(Inst.getOperand(4));
6580    Inst = TmpInst;
6581    return true;
6582  }
6583
6584  case ARM::VST4dWB_register_Asm_8:
6585  case ARM::VST4dWB_register_Asm_16:
6586  case ARM::VST4dWB_register_Asm_32:
6587  case ARM::VST4qWB_register_Asm_8:
6588  case ARM::VST4qWB_register_Asm_16:
6589  case ARM::VST4qWB_register_Asm_32: {
6590    MCInst TmpInst;
6591    unsigned Spacing;
6592    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6593    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6594    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6595    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6596    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6597    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6598    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6599                                            Spacing));
6600    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6601                                            Spacing * 2));
6602    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6603                                            Spacing * 3));
6604    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6605    TmpInst.addOperand(Inst.getOperand(5));
6606    Inst = TmpInst;
6607    return true;
6608  }
6609
6610  // Handle the Thumb2 mode MOV complex aliases.
6611  case ARM::t2MOVsr:
6612  case ARM::t2MOVSsr: {
6613    // Which instruction to expand to depends on the CCOut operand and
6614    // whether we're in an IT block if the register operands are low
6615    // registers.
6616    bool isNarrow = false;
6617    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6618        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6619        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6620        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6621        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6622      isNarrow = true;
6623    MCInst TmpInst;
6624    unsigned newOpc;
6625    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6626    default: llvm_unreachable("unexpected opcode!");
6627    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6628    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6629    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6630    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6631    }
6632    TmpInst.setOpcode(newOpc);
6633    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6634    if (isNarrow)
6635      TmpInst.addOperand(MCOperand::CreateReg(
6636          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6637    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6638    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6639    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6640    TmpInst.addOperand(Inst.getOperand(5));
6641    if (!isNarrow)
6642      TmpInst.addOperand(MCOperand::CreateReg(
6643          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6644    Inst = TmpInst;
6645    return true;
6646  }
6647  case ARM::t2MOVsi:
6648  case ARM::t2MOVSsi: {
6649    // Which instruction to expand to depends on the CCOut operand and
6650    // whether we're in an IT block if the register operands are low
6651    // registers.
6652    bool isNarrow = false;
6653    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6654        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6655        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6656      isNarrow = true;
6657    MCInst TmpInst;
6658    unsigned newOpc;
6659    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6660    default: llvm_unreachable("unexpected opcode!");
6661    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6662    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6663    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6664    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6665    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6666    }
6667    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6668    if (Ammount == 32) Ammount = 0;
6669    TmpInst.setOpcode(newOpc);
6670    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6671    if (isNarrow)
6672      TmpInst.addOperand(MCOperand::CreateReg(
6673          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6674    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6675    if (newOpc != ARM::t2RRX)
6676      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
6677    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6678    TmpInst.addOperand(Inst.getOperand(4));
6679    if (!isNarrow)
6680      TmpInst.addOperand(MCOperand::CreateReg(
6681          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6682    Inst = TmpInst;
6683    return true;
6684  }
6685  // Handle the ARM mode MOV complex aliases.
6686  case ARM::ASRr:
6687  case ARM::LSRr:
6688  case ARM::LSLr:
6689  case ARM::RORr: {
6690    ARM_AM::ShiftOpc ShiftTy;
6691    switch(Inst.getOpcode()) {
6692    default: llvm_unreachable("unexpected opcode!");
6693    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6694    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6695    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6696    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6697    }
6698    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6699    MCInst TmpInst;
6700    TmpInst.setOpcode(ARM::MOVsr);
6701    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6702    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6703    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6704    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6705    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6706    TmpInst.addOperand(Inst.getOperand(4));
6707    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6708    Inst = TmpInst;
6709    return true;
6710  }
6711  case ARM::ASRi:
6712  case ARM::LSRi:
6713  case ARM::LSLi:
6714  case ARM::RORi: {
6715    ARM_AM::ShiftOpc ShiftTy;
6716    switch(Inst.getOpcode()) {
6717    default: llvm_unreachable("unexpected opcode!");
6718    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6719    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6720    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6721    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6722    }
6723    // A shift by zero is a plain MOVr, not a MOVsi.
6724    unsigned Amt = Inst.getOperand(2).getImm();
6725    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6726    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6727    MCInst TmpInst;
6728    TmpInst.setOpcode(Opc);
6729    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6730    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6731    if (Opc == ARM::MOVsi)
6732      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6733    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6734    TmpInst.addOperand(Inst.getOperand(4));
6735    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6736    Inst = TmpInst;
6737    return true;
6738  }
6739  case ARM::RRXi: {
6740    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6741    MCInst TmpInst;
6742    TmpInst.setOpcode(ARM::MOVsi);
6743    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6744    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6745    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6746    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6747    TmpInst.addOperand(Inst.getOperand(3));
6748    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6749    Inst = TmpInst;
6750    return true;
6751  }
6752  case ARM::t2LDMIA_UPD: {
6753    // If this is a load of a single register, then we should use
6754    // a post-indexed LDR instruction instead, per the ARM ARM.
6755    if (Inst.getNumOperands() != 5)
6756      return false;
6757    MCInst TmpInst;
6758    TmpInst.setOpcode(ARM::t2LDR_POST);
6759    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6760    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6761    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6762    TmpInst.addOperand(MCOperand::CreateImm(4));
6763    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6764    TmpInst.addOperand(Inst.getOperand(3));
6765    Inst = TmpInst;
6766    return true;
6767  }
6768  case ARM::t2STMDB_UPD: {
6769    // If this is a store of a single register, then we should use
6770    // a pre-indexed STR instruction instead, per the ARM ARM.
6771    if (Inst.getNumOperands() != 5)
6772      return false;
6773    MCInst TmpInst;
6774    TmpInst.setOpcode(ARM::t2STR_PRE);
6775    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6776    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6777    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6778    TmpInst.addOperand(MCOperand::CreateImm(-4));
6779    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6780    TmpInst.addOperand(Inst.getOperand(3));
6781    Inst = TmpInst;
6782    return true;
6783  }
6784  case ARM::LDMIA_UPD:
6785    // If this is a load of a single register via a 'pop', then we should use
6786    // a post-indexed LDR instruction instead, per the ARM ARM.
6787    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
6788        Inst.getNumOperands() == 5) {
6789      MCInst TmpInst;
6790      TmpInst.setOpcode(ARM::LDR_POST_IMM);
6791      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6792      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6793      TmpInst.addOperand(Inst.getOperand(1)); // Rn
6794      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
6795      TmpInst.addOperand(MCOperand::CreateImm(4));
6796      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6797      TmpInst.addOperand(Inst.getOperand(3));
6798      Inst = TmpInst;
6799      return true;
6800    }
6801    break;
6802  case ARM::STMDB_UPD:
6803    // If this is a store of a single register via a 'push', then we should use
6804    // a pre-indexed STR instruction instead, per the ARM ARM.
6805    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
6806        Inst.getNumOperands() == 5) {
6807      MCInst TmpInst;
6808      TmpInst.setOpcode(ARM::STR_PRE_IMM);
6809      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6810      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6811      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
6812      TmpInst.addOperand(MCOperand::CreateImm(-4));
6813      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6814      TmpInst.addOperand(Inst.getOperand(3));
6815      Inst = TmpInst;
6816    }
6817    break;
6818  case ARM::t2ADDri12:
6819    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
6820    // mnemonic was used (not "addw"), encoding T3 is preferred.
6821    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
6822        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6823      break;
6824    Inst.setOpcode(ARM::t2ADDri);
6825    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6826    break;
6827  case ARM::t2SUBri12:
6828    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
6829    // mnemonic was used (not "subw"), encoding T3 is preferred.
6830    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
6831        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6832      break;
6833    Inst.setOpcode(ARM::t2SUBri);
6834    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6835    break;
6836  case ARM::tADDi8:
6837    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6838    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6839    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6840    // to encoding T1 if <Rd> is omitted."
6841    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6842      Inst.setOpcode(ARM::tADDi3);
6843      return true;
6844    }
6845    break;
6846  case ARM::tSUBi8:
6847    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6848    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6849    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6850    // to encoding T1 if <Rd> is omitted."
6851    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6852      Inst.setOpcode(ARM::tSUBi3);
6853      return true;
6854    }
6855    break;
6856  case ARM::t2ADDri:
6857  case ARM::t2SUBri: {
6858    // If the destination and first source operand are the same, and
6859    // the flags are compatible with the current IT status, use encoding T2
6860    // instead of T3. For compatibility with the system 'as'. Make sure the
6861    // wide encoding wasn't explicit.
6862    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6863        !isARMLowRegister(Inst.getOperand(0).getReg()) ||
6864        (unsigned)Inst.getOperand(2).getImm() > 255 ||
6865        ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
6866        (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
6867        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6868         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6869      break;
6870    MCInst TmpInst;
6871    TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
6872                      ARM::tADDi8 : ARM::tSUBi8);
6873    TmpInst.addOperand(Inst.getOperand(0));
6874    TmpInst.addOperand(Inst.getOperand(5));
6875    TmpInst.addOperand(Inst.getOperand(0));
6876    TmpInst.addOperand(Inst.getOperand(2));
6877    TmpInst.addOperand(Inst.getOperand(3));
6878    TmpInst.addOperand(Inst.getOperand(4));
6879    Inst = TmpInst;
6880    return true;
6881  }
6882  case ARM::t2ADDrr: {
6883    // If the destination and first source operand are the same, and
6884    // there's no setting of the flags, use encoding T2 instead of T3.
6885    // Note that this is only for ADD, not SUB. This mirrors the system
6886    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6887    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6888        Inst.getOperand(5).getReg() != 0 ||
6889        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6890         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6891      break;
6892    MCInst TmpInst;
6893    TmpInst.setOpcode(ARM::tADDhirr);
6894    TmpInst.addOperand(Inst.getOperand(0));
6895    TmpInst.addOperand(Inst.getOperand(0));
6896    TmpInst.addOperand(Inst.getOperand(2));
6897    TmpInst.addOperand(Inst.getOperand(3));
6898    TmpInst.addOperand(Inst.getOperand(4));
6899    Inst = TmpInst;
6900    return true;
6901  }
6902  case ARM::tB:
6903    // A Thumb conditional branch outside of an IT block is a tBcc.
6904    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6905      Inst.setOpcode(ARM::tBcc);
6906      return true;
6907    }
6908    break;
6909  case ARM::t2B:
6910    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6911    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6912      Inst.setOpcode(ARM::t2Bcc);
6913      return true;
6914    }
6915    break;
6916  case ARM::t2Bcc:
6917    // If the conditional is AL or we're in an IT block, we really want t2B.
6918    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6919      Inst.setOpcode(ARM::t2B);
6920      return true;
6921    }
6922    break;
6923  case ARM::tBcc:
6924    // If the conditional is AL, we really want tB.
6925    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6926      Inst.setOpcode(ARM::tB);
6927      return true;
6928    }
6929    break;
6930  case ARM::tLDMIA: {
6931    // If the register list contains any high registers, or if the writeback
6932    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6933    // instead if we're in Thumb2. Otherwise, this should have generated
6934    // an error in validateInstruction().
6935    unsigned Rn = Inst.getOperand(0).getReg();
6936    bool hasWritebackToken =
6937      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6938       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6939    bool listContainsBase;
6940    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6941        (!listContainsBase && !hasWritebackToken) ||
6942        (listContainsBase && hasWritebackToken)) {
6943      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6944      assert (isThumbTwo());
6945      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6946      // If we're switching to the updating version, we need to insert
6947      // the writeback tied operand.
6948      if (hasWritebackToken)
6949        Inst.insert(Inst.begin(),
6950                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6951      return true;
6952    }
6953    break;
6954  }
6955  case ARM::tSTMIA_UPD: {
6956    // If the register list contains any high registers, we need to use
6957    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6958    // should have generated an error in validateInstruction().
6959    unsigned Rn = Inst.getOperand(0).getReg();
6960    bool listContainsBase;
6961    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6962      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6963      assert (isThumbTwo());
6964      Inst.setOpcode(ARM::t2STMIA_UPD);
6965      return true;
6966    }
6967    break;
6968  }
6969  case ARM::tPOP: {
6970    bool listContainsBase;
6971    // If the register list contains any high registers, we need to use
6972    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6973    // should have generated an error in validateInstruction().
6974    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6975      return false;
6976    assert (isThumbTwo());
6977    Inst.setOpcode(ARM::t2LDMIA_UPD);
6978    // Add the base register and writeback operands.
6979    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6980    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6981    return true;
6982  }
6983  case ARM::tPUSH: {
6984    bool listContainsBase;
6985    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6986      return false;
6987    assert (isThumbTwo());
6988    Inst.setOpcode(ARM::t2STMDB_UPD);
6989    // Add the base register and writeback operands.
6990    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6991    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6992    return true;
6993  }
6994  case ARM::t2MOVi: {
6995    // If we can use the 16-bit encoding and the user didn't explicitly
6996    // request the 32-bit variant, transform it here.
6997    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6998        (unsigned)Inst.getOperand(1).getImm() <= 255 &&
6999        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
7000         Inst.getOperand(4).getReg() == ARM::CPSR) ||
7001        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
7002        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7003         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7004      // The operands aren't in the same order for tMOVi8...
7005      MCInst TmpInst;
7006      TmpInst.setOpcode(ARM::tMOVi8);
7007      TmpInst.addOperand(Inst.getOperand(0));
7008      TmpInst.addOperand(Inst.getOperand(4));
7009      TmpInst.addOperand(Inst.getOperand(1));
7010      TmpInst.addOperand(Inst.getOperand(2));
7011      TmpInst.addOperand(Inst.getOperand(3));
7012      Inst = TmpInst;
7013      return true;
7014    }
7015    break;
7016  }
7017  case ARM::t2MOVr: {
7018    // If we can use the 16-bit encoding and the user didn't explicitly
7019    // request the 32-bit variant, transform it here.
7020    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7021        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7022        Inst.getOperand(2).getImm() == ARMCC::AL &&
7023        Inst.getOperand(4).getReg() == ARM::CPSR &&
7024        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7025         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7026      // The operands aren't the same for tMOV[S]r... (no cc_out)
7027      MCInst TmpInst;
7028      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7029      TmpInst.addOperand(Inst.getOperand(0));
7030      TmpInst.addOperand(Inst.getOperand(1));
7031      TmpInst.addOperand(Inst.getOperand(2));
7032      TmpInst.addOperand(Inst.getOperand(3));
7033      Inst = TmpInst;
7034      return true;
7035    }
7036    break;
7037  }
7038  case ARM::t2SXTH:
7039  case ARM::t2SXTB:
7040  case ARM::t2UXTH:
7041  case ARM::t2UXTB: {
7042    // If we can use the 16-bit encoding and the user didn't explicitly
7043    // request the 32-bit variant, transform it here.
7044    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7045        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7046        Inst.getOperand(2).getImm() == 0 &&
7047        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7048         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7049      unsigned NewOpc;
7050      switch (Inst.getOpcode()) {
7051      default: llvm_unreachable("Illegal opcode!");
7052      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7053      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7054      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7055      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7056      }
7057      // The operands aren't the same for thumb1 (no rotate operand).
7058      MCInst TmpInst;
7059      TmpInst.setOpcode(NewOpc);
7060      TmpInst.addOperand(Inst.getOperand(0));
7061      TmpInst.addOperand(Inst.getOperand(1));
7062      TmpInst.addOperand(Inst.getOperand(3));
7063      TmpInst.addOperand(Inst.getOperand(4));
7064      Inst = TmpInst;
7065      return true;
7066    }
7067    break;
7068  }
7069  case ARM::MOVsi: {
7070    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7071    if (SOpc == ARM_AM::rrx) return false;
7072    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7073      // Shifting by zero is accepted as a vanilla 'MOVr'
7074      MCInst TmpInst;
7075      TmpInst.setOpcode(ARM::MOVr);
7076      TmpInst.addOperand(Inst.getOperand(0));
7077      TmpInst.addOperand(Inst.getOperand(1));
7078      TmpInst.addOperand(Inst.getOperand(3));
7079      TmpInst.addOperand(Inst.getOperand(4));
7080      TmpInst.addOperand(Inst.getOperand(5));
7081      Inst = TmpInst;
7082      return true;
7083    }
7084    return false;
7085  }
7086  case ARM::ANDrsi:
7087  case ARM::ORRrsi:
7088  case ARM::EORrsi:
7089  case ARM::BICrsi:
7090  case ARM::SUBrsi:
7091  case ARM::ADDrsi: {
7092    unsigned newOpc;
7093    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7094    if (SOpc == ARM_AM::rrx) return false;
7095    switch (Inst.getOpcode()) {
7096    default: llvm_unreachable("unexpected opcode!");
7097    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7098    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7099    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7100    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7101    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7102    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7103    }
7104    // If the shift is by zero, use the non-shifted instruction definition.
7105    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
7106      MCInst TmpInst;
7107      TmpInst.setOpcode(newOpc);
7108      TmpInst.addOperand(Inst.getOperand(0));
7109      TmpInst.addOperand(Inst.getOperand(1));
7110      TmpInst.addOperand(Inst.getOperand(2));
7111      TmpInst.addOperand(Inst.getOperand(4));
7112      TmpInst.addOperand(Inst.getOperand(5));
7113      TmpInst.addOperand(Inst.getOperand(6));
7114      Inst = TmpInst;
7115      return true;
7116    }
7117    return false;
7118  }
7119  case ARM::ITasm:
7120  case ARM::t2IT: {
7121    // The mask bits for all but the first condition are represented as
7122    // the low bit of the condition code value implies 't'. We currently
7123    // always have 1 implies 't', so XOR toggle the bits if the low bit
7124    // of the condition code is zero. The encoding also expects the low
7125    // bit of the condition to be encoded as bit 4 of the mask operand,
7126    // so mask that in if needed
7127    MCOperand &MO = Inst.getOperand(1);
7128    unsigned Mask = MO.getImm();
7129    unsigned OrigMask = Mask;
7130    unsigned TZ = CountTrailingZeros_32(Mask);
7131    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7132      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7133      for (unsigned i = 3; i != TZ; --i)
7134        Mask ^= 1 << i;
7135    } else
7136      Mask |= 0x10;
7137    MO.setImm(Mask);
7138
7139    // Set up the IT block state according to the IT instruction we just
7140    // matched.
7141    assert(!inITBlock() && "nested IT blocks?!");
7142    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7143    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7144    ITState.CurPosition = 0;
7145    ITState.FirstCond = true;
7146    break;
7147  }
7148  }
7149  return false;
7150}
7151
7152unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7153  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7154  // suffix depending on whether they're in an IT block or not.
7155  unsigned Opc = Inst.getOpcode();
7156  const MCInstrDesc &MCID = getInstDesc(Opc);
7157  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7158    assert(MCID.hasOptionalDef() &&
7159           "optionally flag setting instruction missing optional def operand");
7160    assert(MCID.NumOperands == Inst.getNumOperands() &&
7161           "operand count mismatch!");
7162    // Find the optional-def operand (cc_out).
7163    unsigned OpNo;
7164    for (OpNo = 0;
7165         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7166         ++OpNo)
7167      ;
7168    // If we're parsing Thumb1, reject it completely.
7169    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7170      return Match_MnemonicFail;
7171    // If we're parsing Thumb2, which form is legal depends on whether we're
7172    // in an IT block.
7173    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7174        !inITBlock())
7175      return Match_RequiresITBlock;
7176    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7177        inITBlock())
7178      return Match_RequiresNotITBlock;
7179  }
7180  // Some high-register supporting Thumb1 encodings only allow both registers
7181  // to be from r0-r7 when in Thumb2.
7182  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7183           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7184           isARMLowRegister(Inst.getOperand(2).getReg()))
7185    return Match_RequiresThumb2;
7186  // Others only require ARMv6 or later.
7187  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7188           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7189           isARMLowRegister(Inst.getOperand(1).getReg()))
7190    return Match_RequiresV6;
7191  return Match_Success;
7192}
7193
7194bool ARMAsmParser::
7195MatchAndEmitInstruction(SMLoc IDLoc,
7196                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7197                        MCStreamer &Out) {
7198  MCInst Inst;
7199  unsigned ErrorInfo;
7200  unsigned MatchResult;
7201  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
7202  switch (MatchResult) {
7203  default: break;
7204  case Match_Success:
7205    // Context sensitive operand constraints aren't handled by the matcher,
7206    // so check them here.
7207    if (validateInstruction(Inst, Operands)) {
7208      // Still progress the IT block, otherwise one wrong condition causes
7209      // nasty cascading errors.
7210      forwardITPosition();
7211      return true;
7212    }
7213
7214    // Some instructions need post-processing to, for example, tweak which
7215    // encoding is selected. Loop on it while changes happen so the
7216    // individual transformations can chain off each other. E.g.,
7217    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7218    while (processInstruction(Inst, Operands))
7219      ;
7220
7221    // Only move forward at the very end so that everything in validate
7222    // and process gets a consistent answer about whether we're in an IT
7223    // block.
7224    forwardITPosition();
7225
7226    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7227    // doesn't actually encode.
7228    if (Inst.getOpcode() == ARM::ITasm)
7229      return false;
7230
7231    Inst.setLoc(IDLoc);
7232    Out.EmitInstruction(Inst);
7233    return false;
7234  case Match_MissingFeature:
7235    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
7236    return true;
7237  case Match_InvalidOperand: {
7238    SMLoc ErrorLoc = IDLoc;
7239    if (ErrorInfo != ~0U) {
7240      if (ErrorInfo >= Operands.size())
7241        return Error(IDLoc, "too few operands for instruction");
7242
7243      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7244      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7245    }
7246
7247    return Error(ErrorLoc, "invalid operand for instruction");
7248  }
7249  case Match_MnemonicFail:
7250    return Error(IDLoc, "invalid instruction");
7251  case Match_ConversionFail:
7252    // The converter function will have already emited a diagnostic.
7253    return true;
7254  case Match_RequiresNotITBlock:
7255    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7256  case Match_RequiresITBlock:
7257    return Error(IDLoc, "instruction only valid inside IT block");
7258  case Match_RequiresV6:
7259    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7260  case Match_RequiresThumb2:
7261    return Error(IDLoc, "instruction variant requires Thumb2");
7262  }
7263
7264  llvm_unreachable("Implement any new match types added!");
7265}
7266
7267/// parseDirective parses the arm specific directives
7268bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7269  StringRef IDVal = DirectiveID.getIdentifier();
7270  if (IDVal == ".word")
7271    return parseDirectiveWord(4, DirectiveID.getLoc());
7272  else if (IDVal == ".thumb")
7273    return parseDirectiveThumb(DirectiveID.getLoc());
7274  else if (IDVal == ".arm")
7275    return parseDirectiveARM(DirectiveID.getLoc());
7276  else if (IDVal == ".thumb_func")
7277    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7278  else if (IDVal == ".code")
7279    return parseDirectiveCode(DirectiveID.getLoc());
7280  else if (IDVal == ".syntax")
7281    return parseDirectiveSyntax(DirectiveID.getLoc());
7282  else if (IDVal == ".unreq")
7283    return parseDirectiveUnreq(DirectiveID.getLoc());
7284  else if (IDVal == ".arch")
7285    return parseDirectiveArch(DirectiveID.getLoc());
7286  else if (IDVal == ".eabi_attribute")
7287    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7288  return true;
7289}
7290
7291/// parseDirectiveWord
7292///  ::= .word [ expression (, expression)* ]
7293bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7294  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7295    for (;;) {
7296      const MCExpr *Value;
7297      if (getParser().ParseExpression(Value))
7298        return true;
7299
7300      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
7301
7302      if (getLexer().is(AsmToken::EndOfStatement))
7303        break;
7304
7305      // FIXME: Improve diagnostic.
7306      if (getLexer().isNot(AsmToken::Comma))
7307        return Error(L, "unexpected token in directive");
7308      Parser.Lex();
7309    }
7310  }
7311
7312  Parser.Lex();
7313  return false;
7314}
7315
7316/// parseDirectiveThumb
7317///  ::= .thumb
7318bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7319  if (getLexer().isNot(AsmToken::EndOfStatement))
7320    return Error(L, "unexpected token in directive");
7321  Parser.Lex();
7322
7323  if (!isThumb())
7324    SwitchMode();
7325  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7326  return false;
7327}
7328
7329/// parseDirectiveARM
7330///  ::= .arm
7331bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7332  if (getLexer().isNot(AsmToken::EndOfStatement))
7333    return Error(L, "unexpected token in directive");
7334  Parser.Lex();
7335
7336  if (isThumb())
7337    SwitchMode();
7338  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7339  return false;
7340}
7341
7342/// parseDirectiveThumbFunc
7343///  ::= .thumbfunc symbol_name
7344bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7345  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7346  bool isMachO = MAI.hasSubsectionsViaSymbols();
7347  StringRef Name;
7348  bool needFuncName = true;
7349
7350  // Darwin asm has (optionally) function name after .thumb_func direction
7351  // ELF doesn't
7352  if (isMachO) {
7353    const AsmToken &Tok = Parser.getTok();
7354    if (Tok.isNot(AsmToken::EndOfStatement)) {
7355      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7356        return Error(L, "unexpected token in .thumb_func directive");
7357      Name = Tok.getIdentifier();
7358      Parser.Lex(); // Consume the identifier token.
7359      needFuncName = false;
7360    }
7361  }
7362
7363  if (getLexer().isNot(AsmToken::EndOfStatement))
7364    return Error(L, "unexpected token in directive");
7365
7366  // Eat the end of statement and any blank lines that follow.
7367  while (getLexer().is(AsmToken::EndOfStatement))
7368    Parser.Lex();
7369
7370  // FIXME: assuming function name will be the line following .thumb_func
7371  // We really should be checking the next symbol definition even if there's
7372  // stuff in between.
7373  if (needFuncName) {
7374    Name = Parser.getTok().getIdentifier();
7375  }
7376
7377  // Mark symbol as a thumb symbol.
7378  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7379  getParser().getStreamer().EmitThumbFunc(Func);
7380  return false;
7381}
7382
7383/// parseDirectiveSyntax
7384///  ::= .syntax unified | divided
7385bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7386  const AsmToken &Tok = Parser.getTok();
7387  if (Tok.isNot(AsmToken::Identifier))
7388    return Error(L, "unexpected token in .syntax directive");
7389  StringRef Mode = Tok.getString();
7390  if (Mode == "unified" || Mode == "UNIFIED")
7391    Parser.Lex();
7392  else if (Mode == "divided" || Mode == "DIVIDED")
7393    return Error(L, "'.syntax divided' arm asssembly not supported");
7394  else
7395    return Error(L, "unrecognized syntax mode in .syntax directive");
7396
7397  if (getLexer().isNot(AsmToken::EndOfStatement))
7398    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7399  Parser.Lex();
7400
7401  // TODO tell the MC streamer the mode
7402  // getParser().getStreamer().Emit???();
7403  return false;
7404}
7405
7406/// parseDirectiveCode
7407///  ::= .code 16 | 32
7408bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7409  const AsmToken &Tok = Parser.getTok();
7410  if (Tok.isNot(AsmToken::Integer))
7411    return Error(L, "unexpected token in .code directive");
7412  int64_t Val = Parser.getTok().getIntVal();
7413  if (Val == 16)
7414    Parser.Lex();
7415  else if (Val == 32)
7416    Parser.Lex();
7417  else
7418    return Error(L, "invalid operand to .code directive");
7419
7420  if (getLexer().isNot(AsmToken::EndOfStatement))
7421    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7422  Parser.Lex();
7423
7424  if (Val == 16) {
7425    if (!isThumb())
7426      SwitchMode();
7427    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7428  } else {
7429    if (isThumb())
7430      SwitchMode();
7431    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7432  }
7433
7434  return false;
7435}
7436
7437/// parseDirectiveReq
7438///  ::= name .req registername
7439bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7440  Parser.Lex(); // Eat the '.req' token.
7441  unsigned Reg;
7442  SMLoc SRegLoc, ERegLoc;
7443  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7444    Parser.EatToEndOfStatement();
7445    return Error(SRegLoc, "register name expected");
7446  }
7447
7448  // Shouldn't be anything else.
7449  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7450    Parser.EatToEndOfStatement();
7451    return Error(Parser.getTok().getLoc(),
7452                 "unexpected input in .req directive.");
7453  }
7454
7455  Parser.Lex(); // Consume the EndOfStatement
7456
7457  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7458    return Error(SRegLoc, "redefinition of '" + Name +
7459                          "' does not match original.");
7460
7461  return false;
7462}
7463
7464/// parseDirectiveUneq
7465///  ::= .unreq registername
7466bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7467  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7468    Parser.EatToEndOfStatement();
7469    return Error(L, "unexpected input in .unreq directive.");
7470  }
7471  RegisterReqs.erase(Parser.getTok().getIdentifier());
7472  Parser.Lex(); // Eat the identifier.
7473  return false;
7474}
7475
7476/// parseDirectiveArch
7477///  ::= .arch token
7478bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7479  return true;
7480}
7481
7482/// parseDirectiveEabiAttr
7483///  ::= .eabi_attribute int, int
7484bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7485  return true;
7486}
7487
7488extern "C" void LLVMInitializeARMAsmLexer();
7489
7490/// Force static initialization.
7491extern "C" void LLVMInitializeARMAsmParser() {
7492  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7493  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7494  LLVMInitializeARMAsmLexer();
7495}
7496
7497#define GET_REGISTER_MATCHER
7498#define GET_MATCHER_IMPLEMENTATION
7499#include "ARMGenAsmMatcher.inc"
7500