ARMAsmParser.cpp revision b22e70d835a88753d3ec6d5ee5e85b23fa6834b1
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47  const MCRegisterInfo *MRI;
48
49  // Map of register aliases registers via the .req directive.
50  StringMap<unsigned> RegisterReqs;
51
52  struct {
53    ARMCC::CondCodes Cond;    // Condition for IT block.
54    unsigned Mask:4;          // Condition mask for instructions.
55                              // Starting at first 1 (from lsb).
56                              //   '1'  condition as indicated in IT.
57                              //   '0'  inverse of condition (else).
58                              // Count of instructions in IT block is
59                              // 4 - trailingzeroes(mask)
60
61    bool FirstCond;           // Explicit flag for when we're parsing the
62                              // First instruction in the IT block. It's
63                              // implied in the mask, so needs special
64                              // handling.
65
66    unsigned CurPosition;     // Current position in parsing of IT
67                              // block. In range [0,3]. Initialized
68                              // according to count of instructions in block.
69                              // ~0U if no active IT block.
70  } ITState;
71  bool inITBlock() { return ITState.CurPosition != ~0U;}
72  void forwardITPosition() {
73    if (!inITBlock()) return;
74    // Move to the next instruction in the IT block, if there is one. If not,
75    // mark the block as done.
76    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
77    if (++ITState.CurPosition == 5 - TZ)
78      ITState.CurPosition = ~0U; // Done with the IT block after this.
79  }
80
81
82  MCAsmParser &getParser() const { return Parser; }
83  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
84
85  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
86  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
87
88  int tryParseRegister();
89  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
90  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
93  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
94  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
95  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
96                              unsigned &ShiftAmount);
97  bool parseDirectiveWord(unsigned Size, SMLoc L);
98  bool parseDirectiveThumb(SMLoc L);
99  bool parseDirectiveARM(SMLoc L);
100  bool parseDirectiveThumbFunc(SMLoc L);
101  bool parseDirectiveCode(SMLoc L);
102  bool parseDirectiveSyntax(SMLoc L);
103  bool parseDirectiveReq(StringRef Name, SMLoc L);
104  bool parseDirectiveUnreq(SMLoc L);
105  bool parseDirectiveArch(SMLoc L);
106  bool parseDirectiveEabiAttr(SMLoc L);
107
108  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
109                          bool &CarrySetting, unsigned &ProcessorIMod,
110                          StringRef &ITMask);
111  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
112                             bool &CanAcceptPredicationCode);
113
114  bool isThumb() const {
115    // FIXME: Can tablegen auto-generate this?
116    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
117  }
118  bool isThumbOne() const {
119    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
120  }
121  bool isThumbTwo() const {
122    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
123  }
124  bool hasV6Ops() const {
125    return STI.getFeatureBits() & ARM::HasV6Ops;
126  }
127  bool hasV7Ops() const {
128    return STI.getFeatureBits() & ARM::HasV7Ops;
129  }
130  void SwitchMode() {
131    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
132    setAvailableFeatures(FB);
133  }
134  bool isMClass() const {
135    return STI.getFeatureBits() & ARM::FeatureMClass;
136  }
137
138  /// @name Auto-generated Match Functions
139  /// {
140
141#define GET_ASSEMBLER_HEADER
142#include "ARMGenAsmMatcher.inc"
143
144  /// }
145
146  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
147  OperandMatchResultTy parseCoprocNumOperand(
148    SmallVectorImpl<MCParsedAsmOperand*>&);
149  OperandMatchResultTy parseCoprocRegOperand(
150    SmallVectorImpl<MCParsedAsmOperand*>&);
151  OperandMatchResultTy parseCoprocOptionOperand(
152    SmallVectorImpl<MCParsedAsmOperand*>&);
153  OperandMatchResultTy parseMemBarrierOptOperand(
154    SmallVectorImpl<MCParsedAsmOperand*>&);
155  OperandMatchResultTy parseProcIFlagsOperand(
156    SmallVectorImpl<MCParsedAsmOperand*>&);
157  OperandMatchResultTy parseMSRMaskOperand(
158    SmallVectorImpl<MCParsedAsmOperand*>&);
159  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
160                                   StringRef Op, int Low, int High);
161  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
162    return parsePKHImm(O, "lsl", 0, 31);
163  }
164  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
165    return parsePKHImm(O, "asr", 1, 32);
166  }
167  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
175  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
176
177  // Asm Match Converter Methods
178  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
179                    const SmallVectorImpl<MCParsedAsmOperand*> &);
180  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
181                    const SmallVectorImpl<MCParsedAsmOperand*> &);
182  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
183                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
184  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
185                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
186  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
187                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
188  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
189                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
190  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
191                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
192  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
193                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
194  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
195                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
196  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
197                             const SmallVectorImpl<MCParsedAsmOperand*> &);
198  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
199                             const SmallVectorImpl<MCParsedAsmOperand*> &);
200  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
201                             const SmallVectorImpl<MCParsedAsmOperand*> &);
202  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
203                             const SmallVectorImpl<MCParsedAsmOperand*> &);
204  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
205                  const SmallVectorImpl<MCParsedAsmOperand*> &);
206  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
207                  const SmallVectorImpl<MCParsedAsmOperand*> &);
208  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
209                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
210  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
211                        const SmallVectorImpl<MCParsedAsmOperand*> &);
212  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
213                     const SmallVectorImpl<MCParsedAsmOperand*> &);
214  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
215                        const SmallVectorImpl<MCParsedAsmOperand*> &);
216  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
217                     const SmallVectorImpl<MCParsedAsmOperand*> &);
218  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
219                        const SmallVectorImpl<MCParsedAsmOperand*> &);
220
221  bool validateInstruction(MCInst &Inst,
222                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
223  bool processInstruction(MCInst &Inst,
224                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
225  bool shouldOmitCCOutOperand(StringRef Mnemonic,
226                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
227
228public:
229  enum ARMMatchResultTy {
230    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
231    Match_RequiresNotITBlock,
232    Match_RequiresV6,
233    Match_RequiresThumb2
234  };
235
236  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
237    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
238    MCAsmParserExtension::Initialize(_Parser);
239
240    // Cache the MCRegisterInfo.
241    MRI = &getContext().getRegisterInfo();
242
243    // Initialize the set of available features.
244    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
245
246    // Not in an ITBlock to start with.
247    ITState.CurPosition = ~0U;
248  }
249
250  // Implementation of the MCTargetAsmParser interface:
251  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
252  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
253                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
254  bool ParseDirective(AsmToken DirectiveID);
255
256  unsigned checkTargetMatchPredicate(MCInst &Inst);
257
258  bool MatchAndEmitInstruction(SMLoc IDLoc,
259                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
260                               MCStreamer &Out);
261};
262} // end anonymous namespace
263
264namespace {
265
266/// ARMOperand - Instances of this class represent a parsed ARM machine
267/// instruction.
268class ARMOperand : public MCParsedAsmOperand {
269  enum KindTy {
270    k_CondCode,
271    k_CCOut,
272    k_ITCondMask,
273    k_CoprocNum,
274    k_CoprocReg,
275    k_CoprocOption,
276    k_Immediate,
277    k_MemBarrierOpt,
278    k_Memory,
279    k_PostIndexRegister,
280    k_MSRMask,
281    k_ProcIFlags,
282    k_VectorIndex,
283    k_Register,
284    k_RegisterList,
285    k_DPRRegisterList,
286    k_SPRRegisterList,
287    k_VectorList,
288    k_VectorListAllLanes,
289    k_VectorListIndexed,
290    k_ShiftedRegister,
291    k_ShiftedImmediate,
292    k_ShifterImmediate,
293    k_RotateImmediate,
294    k_BitfieldDescriptor,
295    k_Token
296  } Kind;
297
298  SMLoc StartLoc, EndLoc;
299  SmallVector<unsigned, 8> Registers;
300
301  union {
302    struct {
303      ARMCC::CondCodes Val;
304    } CC;
305
306    struct {
307      unsigned Val;
308    } Cop;
309
310    struct {
311      unsigned Val;
312    } CoprocOption;
313
314    struct {
315      unsigned Mask:4;
316    } ITMask;
317
318    struct {
319      ARM_MB::MemBOpt Val;
320    } MBOpt;
321
322    struct {
323      ARM_PROC::IFlags Val;
324    } IFlags;
325
326    struct {
327      unsigned Val;
328    } MMask;
329
330    struct {
331      const char *Data;
332      unsigned Length;
333    } Tok;
334
335    struct {
336      unsigned RegNum;
337    } Reg;
338
339    // A vector register list is a sequential list of 1 to 4 registers.
340    struct {
341      unsigned RegNum;
342      unsigned Count;
343      unsigned LaneIndex;
344      bool isDoubleSpaced;
345    } VectorList;
346
347    struct {
348      unsigned Val;
349    } VectorIndex;
350
351    struct {
352      const MCExpr *Val;
353    } Imm;
354
355    /// Combined record for all forms of ARM address expressions.
356    struct {
357      unsigned BaseRegNum;
358      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
359      // was specified.
360      const MCConstantExpr *OffsetImm;  // Offset immediate value
361      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
362      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
363      unsigned ShiftImm;        // shift for OffsetReg.
364      unsigned Alignment;       // 0 = no alignment specified
365                                // n = alignment in bytes (2, 4, 8, 16, or 32)
366      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
367    } Memory;
368
369    struct {
370      unsigned RegNum;
371      bool isAdd;
372      ARM_AM::ShiftOpc ShiftTy;
373      unsigned ShiftImm;
374    } PostIdxReg;
375
376    struct {
377      bool isASR;
378      unsigned Imm;
379    } ShifterImm;
380    struct {
381      ARM_AM::ShiftOpc ShiftTy;
382      unsigned SrcReg;
383      unsigned ShiftReg;
384      unsigned ShiftImm;
385    } RegShiftedReg;
386    struct {
387      ARM_AM::ShiftOpc ShiftTy;
388      unsigned SrcReg;
389      unsigned ShiftImm;
390    } RegShiftedImm;
391    struct {
392      unsigned Imm;
393    } RotImm;
394    struct {
395      unsigned LSB;
396      unsigned Width;
397    } Bitfield;
398  };
399
400  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
401public:
402  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
403    Kind = o.Kind;
404    StartLoc = o.StartLoc;
405    EndLoc = o.EndLoc;
406    switch (Kind) {
407    case k_CondCode:
408      CC = o.CC;
409      break;
410    case k_ITCondMask:
411      ITMask = o.ITMask;
412      break;
413    case k_Token:
414      Tok = o.Tok;
415      break;
416    case k_CCOut:
417    case k_Register:
418      Reg = o.Reg;
419      break;
420    case k_RegisterList:
421    case k_DPRRegisterList:
422    case k_SPRRegisterList:
423      Registers = o.Registers;
424      break;
425    case k_VectorList:
426    case k_VectorListAllLanes:
427    case k_VectorListIndexed:
428      VectorList = o.VectorList;
429      break;
430    case k_CoprocNum:
431    case k_CoprocReg:
432      Cop = o.Cop;
433      break;
434    case k_CoprocOption:
435      CoprocOption = o.CoprocOption;
436      break;
437    case k_Immediate:
438      Imm = o.Imm;
439      break;
440    case k_MemBarrierOpt:
441      MBOpt = o.MBOpt;
442      break;
443    case k_Memory:
444      Memory = o.Memory;
445      break;
446    case k_PostIndexRegister:
447      PostIdxReg = o.PostIdxReg;
448      break;
449    case k_MSRMask:
450      MMask = o.MMask;
451      break;
452    case k_ProcIFlags:
453      IFlags = o.IFlags;
454      break;
455    case k_ShifterImmediate:
456      ShifterImm = o.ShifterImm;
457      break;
458    case k_ShiftedRegister:
459      RegShiftedReg = o.RegShiftedReg;
460      break;
461    case k_ShiftedImmediate:
462      RegShiftedImm = o.RegShiftedImm;
463      break;
464    case k_RotateImmediate:
465      RotImm = o.RotImm;
466      break;
467    case k_BitfieldDescriptor:
468      Bitfield = o.Bitfield;
469      break;
470    case k_VectorIndex:
471      VectorIndex = o.VectorIndex;
472      break;
473    }
474  }
475
476  /// getStartLoc - Get the location of the first token of this operand.
477  SMLoc getStartLoc() const { return StartLoc; }
478  /// getEndLoc - Get the location of the last token of this operand.
479  SMLoc getEndLoc() const { return EndLoc; }
480
481  ARMCC::CondCodes getCondCode() const {
482    assert(Kind == k_CondCode && "Invalid access!");
483    return CC.Val;
484  }
485
486  unsigned getCoproc() const {
487    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
488    return Cop.Val;
489  }
490
491  StringRef getToken() const {
492    assert(Kind == k_Token && "Invalid access!");
493    return StringRef(Tok.Data, Tok.Length);
494  }
495
496  unsigned getReg() const {
497    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
498    return Reg.RegNum;
499  }
500
501  const SmallVectorImpl<unsigned> &getRegList() const {
502    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
503            Kind == k_SPRRegisterList) && "Invalid access!");
504    return Registers;
505  }
506
507  const MCExpr *getImm() const {
508    assert(isImm() && "Invalid access!");
509    return Imm.Val;
510  }
511
512  unsigned getVectorIndex() const {
513    assert(Kind == k_VectorIndex && "Invalid access!");
514    return VectorIndex.Val;
515  }
516
517  ARM_MB::MemBOpt getMemBarrierOpt() const {
518    assert(Kind == k_MemBarrierOpt && "Invalid access!");
519    return MBOpt.Val;
520  }
521
522  ARM_PROC::IFlags getProcIFlags() const {
523    assert(Kind == k_ProcIFlags && "Invalid access!");
524    return IFlags.Val;
525  }
526
527  unsigned getMSRMask() const {
528    assert(Kind == k_MSRMask && "Invalid access!");
529    return MMask.Val;
530  }
531
532  bool isCoprocNum() const { return Kind == k_CoprocNum; }
533  bool isCoprocReg() const { return Kind == k_CoprocReg; }
534  bool isCoprocOption() const { return Kind == k_CoprocOption; }
535  bool isCondCode() const { return Kind == k_CondCode; }
536  bool isCCOut() const { return Kind == k_CCOut; }
537  bool isITMask() const { return Kind == k_ITCondMask; }
538  bool isITCondCode() const { return Kind == k_CondCode; }
539  bool isImm() const { return Kind == k_Immediate; }
540  bool isFPImm() const {
541    if (!isImm()) return false;
542    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
543    if (!CE) return false;
544    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
545    return Val != -1;
546  }
547  bool isFBits16() const {
548    if (!isImm()) return false;
549    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
550    if (!CE) return false;
551    int64_t Value = CE->getValue();
552    return Value >= 0 && Value <= 16;
553  }
554  bool isFBits32() const {
555    if (!isImm()) return false;
556    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
557    if (!CE) return false;
558    int64_t Value = CE->getValue();
559    return Value >= 1 && Value <= 32;
560  }
561  bool isImm8s4() const {
562    if (!isImm()) return false;
563    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
564    if (!CE) return false;
565    int64_t Value = CE->getValue();
566    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
567  }
568  bool isImm0_1020s4() const {
569    if (!isImm()) return false;
570    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
571    if (!CE) return false;
572    int64_t Value = CE->getValue();
573    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
574  }
575  bool isImm0_508s4() const {
576    if (!isImm()) return false;
577    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
578    if (!CE) return false;
579    int64_t Value = CE->getValue();
580    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
581  }
582  bool isImm0_255() const {
583    if (!isImm()) return false;
584    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
585    if (!CE) return false;
586    int64_t Value = CE->getValue();
587    return Value >= 0 && Value < 256;
588  }
589  bool isImm0_1() const {
590    if (!isImm()) return false;
591    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
592    if (!CE) return false;
593    int64_t Value = CE->getValue();
594    return Value >= 0 && Value < 2;
595  }
596  bool isImm0_3() const {
597    if (!isImm()) return false;
598    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
599    if (!CE) return false;
600    int64_t Value = CE->getValue();
601    return Value >= 0 && Value < 4;
602  }
603  bool isImm0_7() const {
604    if (!isImm()) return false;
605    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
606    if (!CE) return false;
607    int64_t Value = CE->getValue();
608    return Value >= 0 && Value < 8;
609  }
610  bool isImm0_15() const {
611    if (!isImm()) return false;
612    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
613    if (!CE) return false;
614    int64_t Value = CE->getValue();
615    return Value >= 0 && Value < 16;
616  }
617  bool isImm0_31() const {
618    if (!isImm()) return false;
619    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
620    if (!CE) return false;
621    int64_t Value = CE->getValue();
622    return Value >= 0 && Value < 32;
623  }
624  bool isImm0_63() const {
625    if (!isImm()) return false;
626    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
627    if (!CE) return false;
628    int64_t Value = CE->getValue();
629    return Value >= 0 && Value < 64;
630  }
631  bool isImm8() const {
632    if (!isImm()) return false;
633    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
634    if (!CE) return false;
635    int64_t Value = CE->getValue();
636    return Value == 8;
637  }
638  bool isImm16() const {
639    if (!isImm()) return false;
640    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
641    if (!CE) return false;
642    int64_t Value = CE->getValue();
643    return Value == 16;
644  }
645  bool isImm32() const {
646    if (!isImm()) return false;
647    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
648    if (!CE) return false;
649    int64_t Value = CE->getValue();
650    return Value == 32;
651  }
652  bool isShrImm8() const {
653    if (!isImm()) return false;
654    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
655    if (!CE) return false;
656    int64_t Value = CE->getValue();
657    return Value > 0 && Value <= 8;
658  }
659  bool isShrImm16() const {
660    if (!isImm()) return false;
661    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
662    if (!CE) return false;
663    int64_t Value = CE->getValue();
664    return Value > 0 && Value <= 16;
665  }
666  bool isShrImm32() const {
667    if (!isImm()) return false;
668    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
669    if (!CE) return false;
670    int64_t Value = CE->getValue();
671    return Value > 0 && Value <= 32;
672  }
673  bool isShrImm64() const {
674    if (!isImm()) return false;
675    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
676    if (!CE) return false;
677    int64_t Value = CE->getValue();
678    return Value > 0 && Value <= 64;
679  }
680  bool isImm1_7() const {
681    if (!isImm()) return false;
682    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
683    if (!CE) return false;
684    int64_t Value = CE->getValue();
685    return Value > 0 && Value < 8;
686  }
687  bool isImm1_15() const {
688    if (!isImm()) return false;
689    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
690    if (!CE) return false;
691    int64_t Value = CE->getValue();
692    return Value > 0 && Value < 16;
693  }
694  bool isImm1_31() const {
695    if (!isImm()) return false;
696    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
697    if (!CE) return false;
698    int64_t Value = CE->getValue();
699    return Value > 0 && Value < 32;
700  }
701  bool isImm1_16() const {
702    if (!isImm()) return false;
703    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
704    if (!CE) return false;
705    int64_t Value = CE->getValue();
706    return Value > 0 && Value < 17;
707  }
708  bool isImm1_32() const {
709    if (!isImm()) return false;
710    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
711    if (!CE) return false;
712    int64_t Value = CE->getValue();
713    return Value > 0 && Value < 33;
714  }
715  bool isImm0_32() const {
716    if (!isImm()) return false;
717    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
718    if (!CE) return false;
719    int64_t Value = CE->getValue();
720    return Value >= 0 && Value < 33;
721  }
722  bool isImm0_65535() const {
723    if (!isImm()) return false;
724    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
725    if (!CE) return false;
726    int64_t Value = CE->getValue();
727    return Value >= 0 && Value < 65536;
728  }
729  bool isImm0_65535Expr() const {
730    if (!isImm()) return false;
731    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
732    // If it's not a constant expression, it'll generate a fixup and be
733    // handled later.
734    if (!CE) return true;
735    int64_t Value = CE->getValue();
736    return Value >= 0 && Value < 65536;
737  }
738  bool isImm24bit() const {
739    if (!isImm()) return false;
740    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
741    if (!CE) return false;
742    int64_t Value = CE->getValue();
743    return Value >= 0 && Value <= 0xffffff;
744  }
745  bool isImmThumbSR() const {
746    if (!isImm()) return false;
747    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
748    if (!CE) return false;
749    int64_t Value = CE->getValue();
750    return Value > 0 && Value < 33;
751  }
752  bool isPKHLSLImm() const {
753    if (!isImm()) return false;
754    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
755    if (!CE) return false;
756    int64_t Value = CE->getValue();
757    return Value >= 0 && Value < 32;
758  }
759  bool isPKHASRImm() const {
760    if (!isImm()) return false;
761    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
762    if (!CE) return false;
763    int64_t Value = CE->getValue();
764    return Value > 0 && Value <= 32;
765  }
766  bool isARMSOImm() const {
767    if (!isImm()) return false;
768    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
769    if (!CE) return false;
770    int64_t Value = CE->getValue();
771    return ARM_AM::getSOImmVal(Value) != -1;
772  }
773  bool isARMSOImmNot() const {
774    if (!isImm()) return false;
775    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
776    if (!CE) return false;
777    int64_t Value = CE->getValue();
778    return ARM_AM::getSOImmVal(~Value) != -1;
779  }
780  bool isARMSOImmNeg() const {
781    if (!isImm()) return false;
782    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
783    if (!CE) return false;
784    int64_t Value = CE->getValue();
785    // Negation must be representable as an so_imm and be non-zero.
786    return Value && ARM_AM::getSOImmVal(-Value) != -1;
787  }
788  bool isT2SOImm() const {
789    if (!isImm()) return false;
790    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
791    if (!CE) return false;
792    int64_t Value = CE->getValue();
793    return ARM_AM::getT2SOImmVal(Value) != -1;
794  }
795  bool isT2SOImmNot() const {
796    if (!isImm()) return false;
797    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
798    if (!CE) return false;
799    int64_t Value = CE->getValue();
800    return ARM_AM::getT2SOImmVal(~Value) != -1;
801  }
802  bool isT2SOImmNeg() const {
803    if (!isImm()) return false;
804    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
805    if (!CE) return false;
806    int64_t Value = CE->getValue();
807    // Negation must be representable as a t2_so_imm and be non-zero.
808    return Value && ARM_AM::getT2SOImmVal(-Value) != -1;
809  }
810  bool isSetEndImm() const {
811    if (!isImm()) return false;
812    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
813    if (!CE) return false;
814    int64_t Value = CE->getValue();
815    return Value == 1 || Value == 0;
816  }
817  bool isReg() const { return Kind == k_Register; }
818  bool isRegList() const { return Kind == k_RegisterList; }
819  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
820  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
821  bool isToken() const { return Kind == k_Token; }
822  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
823  bool isMemory() const { return Kind == k_Memory; }
824  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
825  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
826  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
827  bool isRotImm() const { return Kind == k_RotateImmediate; }
828  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
829  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
830  bool isPostIdxReg() const {
831    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
832  }
833  bool isMemNoOffset(bool alignOK = false) const {
834    if (!isMemory())
835      return false;
836    // No offset of any kind.
837    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
838     (alignOK || Memory.Alignment == 0);
839  }
840  bool isMemPCRelImm12() const {
841    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
842      return false;
843    // Base register must be PC.
844    if (Memory.BaseRegNum != ARM::PC)
845      return false;
846    // Immediate offset in range [-4095, 4095].
847    if (!Memory.OffsetImm) return true;
848    int64_t Val = Memory.OffsetImm->getValue();
849    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
850  }
851  bool isAlignedMemory() const {
852    return isMemNoOffset(true);
853  }
854  bool isAddrMode2() const {
855    if (!isMemory() || Memory.Alignment != 0) return false;
856    // Check for register offset.
857    if (Memory.OffsetRegNum) return true;
858    // Immediate offset in range [-4095, 4095].
859    if (!Memory.OffsetImm) return true;
860    int64_t Val = Memory.OffsetImm->getValue();
861    return Val > -4096 && Val < 4096;
862  }
863  bool isAM2OffsetImm() const {
864    if (!isImm()) return false;
865    // Immediate offset in range [-4095, 4095].
866    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
867    if (!CE) return false;
868    int64_t Val = CE->getValue();
869    return Val > -4096 && Val < 4096;
870  }
871  bool isAddrMode3() const {
872    // If we have an immediate that's not a constant, treat it as a label
873    // reference needing a fixup. If it is a constant, it's something else
874    // and we reject it.
875    if (isImm() && !isa<MCConstantExpr>(getImm()))
876      return true;
877    if (!isMemory() || Memory.Alignment != 0) return false;
878    // No shifts are legal for AM3.
879    if (Memory.ShiftType != ARM_AM::no_shift) return false;
880    // Check for register offset.
881    if (Memory.OffsetRegNum) return true;
882    // Immediate offset in range [-255, 255].
883    if (!Memory.OffsetImm) return true;
884    int64_t Val = Memory.OffsetImm->getValue();
885    return Val > -256 && Val < 256;
886  }
887  bool isAM3Offset() const {
888    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
889      return false;
890    if (Kind == k_PostIndexRegister)
891      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
892    // Immediate offset in range [-255, 255].
893    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
894    if (!CE) return false;
895    int64_t Val = CE->getValue();
896    // Special case, #-0 is INT32_MIN.
897    return (Val > -256 && Val < 256) || Val == INT32_MIN;
898  }
899  bool isAddrMode5() const {
900    // If we have an immediate that's not a constant, treat it as a label
901    // reference needing a fixup. If it is a constant, it's something else
902    // and we reject it.
903    if (isImm() && !isa<MCConstantExpr>(getImm()))
904      return true;
905    if (!isMemory() || Memory.Alignment != 0) return false;
906    // Check for register offset.
907    if (Memory.OffsetRegNum) return false;
908    // Immediate offset in range [-1020, 1020] and a multiple of 4.
909    if (!Memory.OffsetImm) return true;
910    int64_t Val = Memory.OffsetImm->getValue();
911    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
912      Val == INT32_MIN;
913  }
914  bool isMemTBB() const {
915    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
916        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
917      return false;
918    return true;
919  }
920  bool isMemTBH() const {
921    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
922        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
923        Memory.Alignment != 0 )
924      return false;
925    return true;
926  }
927  bool isMemRegOffset() const {
928    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
929      return false;
930    return true;
931  }
932  bool isT2MemRegOffset() const {
933    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
934        Memory.Alignment != 0)
935      return false;
936    // Only lsl #{0, 1, 2, 3} allowed.
937    if (Memory.ShiftType == ARM_AM::no_shift)
938      return true;
939    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
940      return false;
941    return true;
942  }
943  bool isMemThumbRR() const {
944    // Thumb reg+reg addressing is simple. Just two registers, a base and
945    // an offset. No shifts, negations or any other complicating factors.
946    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
947        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
948      return false;
949    return isARMLowRegister(Memory.BaseRegNum) &&
950      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
951  }
952  bool isMemThumbRIs4() const {
953    if (!isMemory() || Memory.OffsetRegNum != 0 ||
954        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
955      return false;
956    // Immediate offset, multiple of 4 in range [0, 124].
957    if (!Memory.OffsetImm) return true;
958    int64_t Val = Memory.OffsetImm->getValue();
959    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
960  }
961  bool isMemThumbRIs2() const {
962    if (!isMemory() || Memory.OffsetRegNum != 0 ||
963        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
964      return false;
965    // Immediate offset, multiple of 4 in range [0, 62].
966    if (!Memory.OffsetImm) return true;
967    int64_t Val = Memory.OffsetImm->getValue();
968    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
969  }
970  bool isMemThumbRIs1() const {
971    if (!isMemory() || Memory.OffsetRegNum != 0 ||
972        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
973      return false;
974    // Immediate offset in range [0, 31].
975    if (!Memory.OffsetImm) return true;
976    int64_t Val = Memory.OffsetImm->getValue();
977    return Val >= 0 && Val <= 31;
978  }
979  bool isMemThumbSPI() const {
980    if (!isMemory() || Memory.OffsetRegNum != 0 ||
981        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
982      return false;
983    // Immediate offset, multiple of 4 in range [0, 1020].
984    if (!Memory.OffsetImm) return true;
985    int64_t Val = Memory.OffsetImm->getValue();
986    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
987  }
988  bool isMemImm8s4Offset() const {
989    // If we have an immediate that's not a constant, treat it as a label
990    // reference needing a fixup. If it is a constant, it's something else
991    // and we reject it.
992    if (isImm() && !isa<MCConstantExpr>(getImm()))
993      return true;
994    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
995      return false;
996    // Immediate offset a multiple of 4 in range [-1020, 1020].
997    if (!Memory.OffsetImm) return true;
998    int64_t Val = Memory.OffsetImm->getValue();
999    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1000  }
1001  bool isMemImm0_1020s4Offset() const {
1002    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1003      return false;
1004    // Immediate offset a multiple of 4 in range [0, 1020].
1005    if (!Memory.OffsetImm) return true;
1006    int64_t Val = Memory.OffsetImm->getValue();
1007    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1008  }
1009  bool isMemImm8Offset() const {
1010    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1011      return false;
1012    // Base reg of PC isn't allowed for these encodings.
1013    if (Memory.BaseRegNum == ARM::PC) return false;
1014    // Immediate offset in range [-255, 255].
1015    if (!Memory.OffsetImm) return true;
1016    int64_t Val = Memory.OffsetImm->getValue();
1017    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1018  }
1019  bool isMemPosImm8Offset() const {
1020    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1021      return false;
1022    // Immediate offset in range [0, 255].
1023    if (!Memory.OffsetImm) return true;
1024    int64_t Val = Memory.OffsetImm->getValue();
1025    return Val >= 0 && Val < 256;
1026  }
1027  bool isMemNegImm8Offset() const {
1028    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1029      return false;
1030    // Base reg of PC isn't allowed for these encodings.
1031    if (Memory.BaseRegNum == ARM::PC) return false;
1032    // Immediate offset in range [-255, -1].
1033    if (!Memory.OffsetImm) return false;
1034    int64_t Val = Memory.OffsetImm->getValue();
1035    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1036  }
1037  bool isMemUImm12Offset() const {
1038    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1039      return false;
1040    // Immediate offset in range [0, 4095].
1041    if (!Memory.OffsetImm) return true;
1042    int64_t Val = Memory.OffsetImm->getValue();
1043    return (Val >= 0 && Val < 4096);
1044  }
1045  bool isMemImm12Offset() const {
1046    // If we have an immediate that's not a constant, treat it as a label
1047    // reference needing a fixup. If it is a constant, it's something else
1048    // and we reject it.
1049    if (isImm() && !isa<MCConstantExpr>(getImm()))
1050      return true;
1051
1052    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1053      return false;
1054    // Immediate offset in range [-4095, 4095].
1055    if (!Memory.OffsetImm) return true;
1056    int64_t Val = Memory.OffsetImm->getValue();
1057    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1058  }
1059  bool isPostIdxImm8() const {
1060    if (!isImm()) return false;
1061    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1062    if (!CE) return false;
1063    int64_t Val = CE->getValue();
1064    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1065  }
1066  bool isPostIdxImm8s4() const {
1067    if (!isImm()) return false;
1068    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1069    if (!CE) return false;
1070    int64_t Val = CE->getValue();
1071    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1072      (Val == INT32_MIN);
1073  }
1074
1075  bool isMSRMask() const { return Kind == k_MSRMask; }
1076  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1077
1078  // NEON operands.
1079  bool isSingleSpacedVectorList() const {
1080    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1081  }
1082  bool isDoubleSpacedVectorList() const {
1083    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1084  }
1085  bool isVecListOneD() const {
1086    if (!isSingleSpacedVectorList()) return false;
1087    return VectorList.Count == 1;
1088  }
1089
1090  bool isVecListDPair() const {
1091    if (!isSingleSpacedVectorList()) return false;
1092    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1093              .contains(VectorList.RegNum));
1094  }
1095
1096  bool isVecListThreeD() const {
1097    if (!isSingleSpacedVectorList()) return false;
1098    return VectorList.Count == 3;
1099  }
1100
1101  bool isVecListFourD() const {
1102    if (!isSingleSpacedVectorList()) return false;
1103    return VectorList.Count == 4;
1104  }
1105
1106  bool isVecListDPairSpaced() const {
1107    if (isSingleSpacedVectorList()) return false;
1108    return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1109              .contains(VectorList.RegNum));
1110  }
1111
1112  bool isVecListThreeQ() const {
1113    if (!isDoubleSpacedVectorList()) return false;
1114    return VectorList.Count == 3;
1115  }
1116
1117  bool isVecListFourQ() const {
1118    if (!isDoubleSpacedVectorList()) return false;
1119    return VectorList.Count == 4;
1120  }
1121
1122  bool isSingleSpacedVectorAllLanes() const {
1123    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1124  }
1125  bool isDoubleSpacedVectorAllLanes() const {
1126    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1127  }
1128  bool isVecListOneDAllLanes() const {
1129    if (!isSingleSpacedVectorAllLanes()) return false;
1130    return VectorList.Count == 1;
1131  }
1132
1133  bool isVecListDPairAllLanes() const {
1134    if (!isSingleSpacedVectorAllLanes()) return false;
1135    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1136              .contains(VectorList.RegNum));
1137  }
1138
1139  bool isVecListDPairSpacedAllLanes() const {
1140    if (!isDoubleSpacedVectorAllLanes()) return false;
1141    return VectorList.Count == 2;
1142  }
1143
1144  bool isVecListThreeDAllLanes() const {
1145    if (!isSingleSpacedVectorAllLanes()) return false;
1146    return VectorList.Count == 3;
1147  }
1148
1149  bool isVecListThreeQAllLanes() const {
1150    if (!isDoubleSpacedVectorAllLanes()) return false;
1151    return VectorList.Count == 3;
1152  }
1153
1154  bool isVecListFourDAllLanes() const {
1155    if (!isSingleSpacedVectorAllLanes()) return false;
1156    return VectorList.Count == 4;
1157  }
1158
1159  bool isVecListFourQAllLanes() const {
1160    if (!isDoubleSpacedVectorAllLanes()) return false;
1161    return VectorList.Count == 4;
1162  }
1163
1164  bool isSingleSpacedVectorIndexed() const {
1165    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1166  }
1167  bool isDoubleSpacedVectorIndexed() const {
1168    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1169  }
1170  bool isVecListOneDByteIndexed() const {
1171    if (!isSingleSpacedVectorIndexed()) return false;
1172    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1173  }
1174
1175  bool isVecListOneDHWordIndexed() const {
1176    if (!isSingleSpacedVectorIndexed()) return false;
1177    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1178  }
1179
1180  bool isVecListOneDWordIndexed() const {
1181    if (!isSingleSpacedVectorIndexed()) return false;
1182    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1183  }
1184
1185  bool isVecListTwoDByteIndexed() const {
1186    if (!isSingleSpacedVectorIndexed()) return false;
1187    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1188  }
1189
1190  bool isVecListTwoDHWordIndexed() const {
1191    if (!isSingleSpacedVectorIndexed()) return false;
1192    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1193  }
1194
1195  bool isVecListTwoQWordIndexed() const {
1196    if (!isDoubleSpacedVectorIndexed()) return false;
1197    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1198  }
1199
1200  bool isVecListTwoQHWordIndexed() const {
1201    if (!isDoubleSpacedVectorIndexed()) return false;
1202    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1203  }
1204
1205  bool isVecListTwoDWordIndexed() const {
1206    if (!isSingleSpacedVectorIndexed()) return false;
1207    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1208  }
1209
1210  bool isVecListThreeDByteIndexed() const {
1211    if (!isSingleSpacedVectorIndexed()) return false;
1212    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1213  }
1214
1215  bool isVecListThreeDHWordIndexed() const {
1216    if (!isSingleSpacedVectorIndexed()) return false;
1217    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1218  }
1219
1220  bool isVecListThreeQWordIndexed() const {
1221    if (!isDoubleSpacedVectorIndexed()) return false;
1222    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1223  }
1224
1225  bool isVecListThreeQHWordIndexed() const {
1226    if (!isDoubleSpacedVectorIndexed()) return false;
1227    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1228  }
1229
1230  bool isVecListThreeDWordIndexed() const {
1231    if (!isSingleSpacedVectorIndexed()) return false;
1232    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1233  }
1234
1235  bool isVecListFourDByteIndexed() const {
1236    if (!isSingleSpacedVectorIndexed()) return false;
1237    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1238  }
1239
1240  bool isVecListFourDHWordIndexed() const {
1241    if (!isSingleSpacedVectorIndexed()) return false;
1242    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1243  }
1244
1245  bool isVecListFourQWordIndexed() const {
1246    if (!isDoubleSpacedVectorIndexed()) return false;
1247    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1248  }
1249
1250  bool isVecListFourQHWordIndexed() const {
1251    if (!isDoubleSpacedVectorIndexed()) return false;
1252    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1253  }
1254
1255  bool isVecListFourDWordIndexed() const {
1256    if (!isSingleSpacedVectorIndexed()) return false;
1257    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1258  }
1259
1260  bool isVectorIndex8() const {
1261    if (Kind != k_VectorIndex) return false;
1262    return VectorIndex.Val < 8;
1263  }
1264  bool isVectorIndex16() const {
1265    if (Kind != k_VectorIndex) return false;
1266    return VectorIndex.Val < 4;
1267  }
1268  bool isVectorIndex32() const {
1269    if (Kind != k_VectorIndex) return false;
1270    return VectorIndex.Val < 2;
1271  }
1272
1273  bool isNEONi8splat() const {
1274    if (!isImm()) return false;
1275    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1276    // Must be a constant.
1277    if (!CE) return false;
1278    int64_t Value = CE->getValue();
1279    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1280    // value.
1281    return Value >= 0 && Value < 256;
1282  }
1283
1284  bool isNEONi16splat() const {
1285    if (!isImm()) return false;
1286    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1287    // Must be a constant.
1288    if (!CE) return false;
1289    int64_t Value = CE->getValue();
1290    // i16 value in the range [0,255] or [0x0100, 0xff00]
1291    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1292  }
1293
1294  bool isNEONi32splat() const {
1295    if (!isImm()) return false;
1296    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1297    // Must be a constant.
1298    if (!CE) return false;
1299    int64_t Value = CE->getValue();
1300    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1301    return (Value >= 0 && Value < 256) ||
1302      (Value >= 0x0100 && Value <= 0xff00) ||
1303      (Value >= 0x010000 && Value <= 0xff0000) ||
1304      (Value >= 0x01000000 && Value <= 0xff000000);
1305  }
1306
1307  bool isNEONi32vmov() const {
1308    if (!isImm()) return false;
1309    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1310    // Must be a constant.
1311    if (!CE) return false;
1312    int64_t Value = CE->getValue();
1313    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1314    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1315    return (Value >= 0 && Value < 256) ||
1316      (Value >= 0x0100 && Value <= 0xff00) ||
1317      (Value >= 0x010000 && Value <= 0xff0000) ||
1318      (Value >= 0x01000000 && Value <= 0xff000000) ||
1319      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1320      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1321  }
1322  bool isNEONi32vmovNeg() const {
1323    if (!isImm()) return false;
1324    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1325    // Must be a constant.
1326    if (!CE) return false;
1327    int64_t Value = ~CE->getValue();
1328    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1329    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1330    return (Value >= 0 && Value < 256) ||
1331      (Value >= 0x0100 && Value <= 0xff00) ||
1332      (Value >= 0x010000 && Value <= 0xff0000) ||
1333      (Value >= 0x01000000 && Value <= 0xff000000) ||
1334      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1335      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1336  }
1337
1338  bool isNEONi64splat() const {
1339    if (!isImm()) return false;
1340    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1341    // Must be a constant.
1342    if (!CE) return false;
1343    uint64_t Value = CE->getValue();
1344    // i64 value with each byte being either 0 or 0xff.
1345    for (unsigned i = 0; i < 8; ++i)
1346      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1347    return true;
1348  }
1349
1350  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1351    // Add as immediates when possible.  Null MCExpr = 0.
1352    if (Expr == 0)
1353      Inst.addOperand(MCOperand::CreateImm(0));
1354    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1355      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1356    else
1357      Inst.addOperand(MCOperand::CreateExpr(Expr));
1358  }
1359
1360  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1361    assert(N == 2 && "Invalid number of operands!");
1362    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1363    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1364    Inst.addOperand(MCOperand::CreateReg(RegNum));
1365  }
1366
1367  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1368    assert(N == 1 && "Invalid number of operands!");
1369    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1370  }
1371
1372  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1373    assert(N == 1 && "Invalid number of operands!");
1374    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1375  }
1376
1377  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1378    assert(N == 1 && "Invalid number of operands!");
1379    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1380  }
1381
1382  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1383    assert(N == 1 && "Invalid number of operands!");
1384    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1385  }
1386
1387  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1388    assert(N == 1 && "Invalid number of operands!");
1389    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1390  }
1391
1392  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1393    assert(N == 1 && "Invalid number of operands!");
1394    Inst.addOperand(MCOperand::CreateReg(getReg()));
1395  }
1396
1397  void addRegOperands(MCInst &Inst, unsigned N) const {
1398    assert(N == 1 && "Invalid number of operands!");
1399    Inst.addOperand(MCOperand::CreateReg(getReg()));
1400  }
1401
1402  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1403    assert(N == 3 && "Invalid number of operands!");
1404    assert(isRegShiftedReg() &&
1405           "addRegShiftedRegOperands() on non RegShiftedReg!");
1406    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1407    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1408    Inst.addOperand(MCOperand::CreateImm(
1409      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1410  }
1411
1412  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1413    assert(N == 2 && "Invalid number of operands!");
1414    assert(isRegShiftedImm() &&
1415           "addRegShiftedImmOperands() on non RegShiftedImm!");
1416    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1417    Inst.addOperand(MCOperand::CreateImm(
1418      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1419  }
1420
1421  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1422    assert(N == 1 && "Invalid number of operands!");
1423    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1424                                         ShifterImm.Imm));
1425  }
1426
1427  void addRegListOperands(MCInst &Inst, unsigned N) const {
1428    assert(N == 1 && "Invalid number of operands!");
1429    const SmallVectorImpl<unsigned> &RegList = getRegList();
1430    for (SmallVectorImpl<unsigned>::const_iterator
1431           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1432      Inst.addOperand(MCOperand::CreateReg(*I));
1433  }
1434
1435  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1436    addRegListOperands(Inst, N);
1437  }
1438
1439  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1440    addRegListOperands(Inst, N);
1441  }
1442
1443  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1444    assert(N == 1 && "Invalid number of operands!");
1445    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1446    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1447  }
1448
1449  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1450    assert(N == 1 && "Invalid number of operands!");
1451    // Munge the lsb/width into a bitfield mask.
1452    unsigned lsb = Bitfield.LSB;
1453    unsigned width = Bitfield.Width;
1454    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1455    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1456                      (32 - (lsb + width)));
1457    Inst.addOperand(MCOperand::CreateImm(Mask));
1458  }
1459
1460  void addImmOperands(MCInst &Inst, unsigned N) const {
1461    assert(N == 1 && "Invalid number of operands!");
1462    addExpr(Inst, getImm());
1463  }
1464
1465  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1466    assert(N == 1 && "Invalid number of operands!");
1467    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1468    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1469  }
1470
1471  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1472    assert(N == 1 && "Invalid number of operands!");
1473    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1474    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1475  }
1476
1477  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1478    assert(N == 1 && "Invalid number of operands!");
1479    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1480    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1481    Inst.addOperand(MCOperand::CreateImm(Val));
1482  }
1483
1484  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1485    assert(N == 1 && "Invalid number of operands!");
1486    // FIXME: We really want to scale the value here, but the LDRD/STRD
1487    // instruction don't encode operands that way yet.
1488    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1489    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1490  }
1491
1492  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1493    assert(N == 1 && "Invalid number of operands!");
1494    // The immediate is scaled by four in the encoding and is stored
1495    // in the MCInst as such. Lop off the low two bits here.
1496    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1497    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1498  }
1499
1500  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1501    assert(N == 1 && "Invalid number of operands!");
1502    // The immediate is scaled by four in the encoding and is stored
1503    // in the MCInst as such. Lop off the low two bits here.
1504    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1505    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1506  }
1507
1508  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1509    assert(N == 1 && "Invalid number of operands!");
1510    // The constant encodes as the immediate-1, and we store in the instruction
1511    // the bits as encoded, so subtract off one here.
1512    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1513    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1514  }
1515
1516  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1517    assert(N == 1 && "Invalid number of operands!");
1518    // The constant encodes as the immediate-1, and we store in the instruction
1519    // the bits as encoded, so subtract off one here.
1520    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1521    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1522  }
1523
1524  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1525    assert(N == 1 && "Invalid number of operands!");
1526    // The constant encodes as the immediate, except for 32, which encodes as
1527    // zero.
1528    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1529    unsigned Imm = CE->getValue();
1530    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1531  }
1532
1533  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1534    assert(N == 1 && "Invalid number of operands!");
1535    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1536    // the instruction as well.
1537    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1538    int Val = CE->getValue();
1539    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1540  }
1541
1542  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1543    assert(N == 1 && "Invalid number of operands!");
1544    // The operand is actually a t2_so_imm, but we have its bitwise
1545    // negation in the assembly source, so twiddle it here.
1546    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1547    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1548  }
1549
1550  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1551    assert(N == 1 && "Invalid number of operands!");
1552    // The operand is actually a t2_so_imm, but we have its
1553    // negation in the assembly source, so twiddle it here.
1554    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1555    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1556  }
1557
1558  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1559    assert(N == 1 && "Invalid number of operands!");
1560    // The operand is actually a so_imm, but we have its bitwise
1561    // negation in the assembly source, so twiddle it here.
1562    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1563    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1564  }
1565
1566  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1567    assert(N == 1 && "Invalid number of operands!");
1568    // The operand is actually a so_imm, but we have its
1569    // negation in the assembly source, so twiddle it here.
1570    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1571    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1572  }
1573
1574  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1575    assert(N == 1 && "Invalid number of operands!");
1576    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1577  }
1578
1579  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1580    assert(N == 1 && "Invalid number of operands!");
1581    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1582  }
1583
1584  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1585    assert(N == 1 && "Invalid number of operands!");
1586    int32_t Imm = Memory.OffsetImm->getValue();
1587    // FIXME: Handle #-0
1588    if (Imm == INT32_MIN) Imm = 0;
1589    Inst.addOperand(MCOperand::CreateImm(Imm));
1590  }
1591
1592  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1593    assert(N == 2 && "Invalid number of operands!");
1594    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1595    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1596  }
1597
1598  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1599    assert(N == 3 && "Invalid number of operands!");
1600    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1601    if (!Memory.OffsetRegNum) {
1602      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1603      // Special case for #-0
1604      if (Val == INT32_MIN) Val = 0;
1605      if (Val < 0) Val = -Val;
1606      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1607    } else {
1608      // For register offset, we encode the shift type and negation flag
1609      // here.
1610      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1611                              Memory.ShiftImm, Memory.ShiftType);
1612    }
1613    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1614    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1615    Inst.addOperand(MCOperand::CreateImm(Val));
1616  }
1617
1618  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1619    assert(N == 2 && "Invalid number of operands!");
1620    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1621    assert(CE && "non-constant AM2OffsetImm operand!");
1622    int32_t Val = CE->getValue();
1623    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1624    // Special case for #-0
1625    if (Val == INT32_MIN) Val = 0;
1626    if (Val < 0) Val = -Val;
1627    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1628    Inst.addOperand(MCOperand::CreateReg(0));
1629    Inst.addOperand(MCOperand::CreateImm(Val));
1630  }
1631
1632  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1633    assert(N == 3 && "Invalid number of operands!");
1634    // If we have an immediate that's not a constant, treat it as a label
1635    // reference needing a fixup. If it is a constant, it's something else
1636    // and we reject it.
1637    if (isImm()) {
1638      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1639      Inst.addOperand(MCOperand::CreateReg(0));
1640      Inst.addOperand(MCOperand::CreateImm(0));
1641      return;
1642    }
1643
1644    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1645    if (!Memory.OffsetRegNum) {
1646      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1647      // Special case for #-0
1648      if (Val == INT32_MIN) Val = 0;
1649      if (Val < 0) Val = -Val;
1650      Val = ARM_AM::getAM3Opc(AddSub, Val);
1651    } else {
1652      // For register offset, we encode the shift type and negation flag
1653      // here.
1654      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1655    }
1656    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1657    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1658    Inst.addOperand(MCOperand::CreateImm(Val));
1659  }
1660
1661  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1662    assert(N == 2 && "Invalid number of operands!");
1663    if (Kind == k_PostIndexRegister) {
1664      int32_t Val =
1665        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1666      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1667      Inst.addOperand(MCOperand::CreateImm(Val));
1668      return;
1669    }
1670
1671    // Constant offset.
1672    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1673    int32_t Val = CE->getValue();
1674    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1675    // Special case for #-0
1676    if (Val == INT32_MIN) Val = 0;
1677    if (Val < 0) Val = -Val;
1678    Val = ARM_AM::getAM3Opc(AddSub, Val);
1679    Inst.addOperand(MCOperand::CreateReg(0));
1680    Inst.addOperand(MCOperand::CreateImm(Val));
1681  }
1682
1683  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1684    assert(N == 2 && "Invalid number of operands!");
1685    // If we have an immediate that's not a constant, treat it as a label
1686    // reference needing a fixup. If it is a constant, it's something else
1687    // and we reject it.
1688    if (isImm()) {
1689      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1690      Inst.addOperand(MCOperand::CreateImm(0));
1691      return;
1692    }
1693
1694    // The lower two bits are always zero and as such are not encoded.
1695    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1696    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1697    // Special case for #-0
1698    if (Val == INT32_MIN) Val = 0;
1699    if (Val < 0) Val = -Val;
1700    Val = ARM_AM::getAM5Opc(AddSub, Val);
1701    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1702    Inst.addOperand(MCOperand::CreateImm(Val));
1703  }
1704
1705  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1706    assert(N == 2 && "Invalid number of operands!");
1707    // If we have an immediate that's not a constant, treat it as a label
1708    // reference needing a fixup. If it is a constant, it's something else
1709    // and we reject it.
1710    if (isImm()) {
1711      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1712      Inst.addOperand(MCOperand::CreateImm(0));
1713      return;
1714    }
1715
1716    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1717    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1718    Inst.addOperand(MCOperand::CreateImm(Val));
1719  }
1720
1721  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1722    assert(N == 2 && "Invalid number of operands!");
1723    // The lower two bits are always zero and as such are not encoded.
1724    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1725    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1726    Inst.addOperand(MCOperand::CreateImm(Val));
1727  }
1728
1729  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1730    assert(N == 2 && "Invalid number of operands!");
1731    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1732    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1733    Inst.addOperand(MCOperand::CreateImm(Val));
1734  }
1735
1736  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1737    addMemImm8OffsetOperands(Inst, N);
1738  }
1739
1740  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1741    addMemImm8OffsetOperands(Inst, N);
1742  }
1743
1744  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1745    assert(N == 2 && "Invalid number of operands!");
1746    // If this is an immediate, it's a label reference.
1747    if (isImm()) {
1748      addExpr(Inst, getImm());
1749      Inst.addOperand(MCOperand::CreateImm(0));
1750      return;
1751    }
1752
1753    // Otherwise, it's a normal memory reg+offset.
1754    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1755    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1756    Inst.addOperand(MCOperand::CreateImm(Val));
1757  }
1758
1759  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1760    assert(N == 2 && "Invalid number of operands!");
1761    // If this is an immediate, it's a label reference.
1762    if (isImm()) {
1763      addExpr(Inst, getImm());
1764      Inst.addOperand(MCOperand::CreateImm(0));
1765      return;
1766    }
1767
1768    // Otherwise, it's a normal memory reg+offset.
1769    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1770    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1771    Inst.addOperand(MCOperand::CreateImm(Val));
1772  }
1773
1774  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1775    assert(N == 2 && "Invalid number of operands!");
1776    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1777    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1778  }
1779
1780  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1781    assert(N == 2 && "Invalid number of operands!");
1782    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1783    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1784  }
1785
1786  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1787    assert(N == 3 && "Invalid number of operands!");
1788    unsigned Val =
1789      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1790                        Memory.ShiftImm, Memory.ShiftType);
1791    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1792    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1793    Inst.addOperand(MCOperand::CreateImm(Val));
1794  }
1795
1796  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1797    assert(N == 3 && "Invalid number of operands!");
1798    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1799    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1800    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1801  }
1802
1803  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1804    assert(N == 2 && "Invalid number of operands!");
1805    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1806    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1807  }
1808
1809  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1810    assert(N == 2 && "Invalid number of operands!");
1811    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1812    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1813    Inst.addOperand(MCOperand::CreateImm(Val));
1814  }
1815
1816  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1817    assert(N == 2 && "Invalid number of operands!");
1818    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1819    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1820    Inst.addOperand(MCOperand::CreateImm(Val));
1821  }
1822
1823  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1824    assert(N == 2 && "Invalid number of operands!");
1825    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1826    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1827    Inst.addOperand(MCOperand::CreateImm(Val));
1828  }
1829
1830  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1831    assert(N == 2 && "Invalid number of operands!");
1832    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1833    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1834    Inst.addOperand(MCOperand::CreateImm(Val));
1835  }
1836
1837  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1838    assert(N == 1 && "Invalid number of operands!");
1839    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1840    assert(CE && "non-constant post-idx-imm8 operand!");
1841    int Imm = CE->getValue();
1842    bool isAdd = Imm >= 0;
1843    if (Imm == INT32_MIN) Imm = 0;
1844    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1845    Inst.addOperand(MCOperand::CreateImm(Imm));
1846  }
1847
1848  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1849    assert(N == 1 && "Invalid number of operands!");
1850    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1851    assert(CE && "non-constant post-idx-imm8s4 operand!");
1852    int Imm = CE->getValue();
1853    bool isAdd = Imm >= 0;
1854    if (Imm == INT32_MIN) Imm = 0;
1855    // Immediate is scaled by 4.
1856    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1857    Inst.addOperand(MCOperand::CreateImm(Imm));
1858  }
1859
1860  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1861    assert(N == 2 && "Invalid number of operands!");
1862    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1863    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1864  }
1865
1866  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1867    assert(N == 2 && "Invalid number of operands!");
1868    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1869    // The sign, shift type, and shift amount are encoded in a single operand
1870    // using the AM2 encoding helpers.
1871    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1872    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1873                                     PostIdxReg.ShiftTy);
1874    Inst.addOperand(MCOperand::CreateImm(Imm));
1875  }
1876
1877  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1878    assert(N == 1 && "Invalid number of operands!");
1879    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1880  }
1881
1882  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1883    assert(N == 1 && "Invalid number of operands!");
1884    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1885  }
1886
1887  void addVecListOperands(MCInst &Inst, unsigned N) const {
1888    assert(N == 1 && "Invalid number of operands!");
1889    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1890  }
1891
1892  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1893    assert(N == 2 && "Invalid number of operands!");
1894    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1895    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1896  }
1897
1898  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1899    assert(N == 1 && "Invalid number of operands!");
1900    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1901  }
1902
1903  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1904    assert(N == 1 && "Invalid number of operands!");
1905    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1906  }
1907
1908  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1909    assert(N == 1 && "Invalid number of operands!");
1910    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1911  }
1912
1913  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1914    assert(N == 1 && "Invalid number of operands!");
1915    // The immediate encodes the type of constant as well as the value.
1916    // Mask in that this is an i8 splat.
1917    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1918    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1919  }
1920
1921  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1922    assert(N == 1 && "Invalid number of operands!");
1923    // The immediate encodes the type of constant as well as the value.
1924    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1925    unsigned Value = CE->getValue();
1926    if (Value >= 256)
1927      Value = (Value >> 8) | 0xa00;
1928    else
1929      Value |= 0x800;
1930    Inst.addOperand(MCOperand::CreateImm(Value));
1931  }
1932
1933  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1934    assert(N == 1 && "Invalid number of operands!");
1935    // The immediate encodes the type of constant as well as the value.
1936    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1937    unsigned Value = CE->getValue();
1938    if (Value >= 256 && Value <= 0xff00)
1939      Value = (Value >> 8) | 0x200;
1940    else if (Value > 0xffff && Value <= 0xff0000)
1941      Value = (Value >> 16) | 0x400;
1942    else if (Value > 0xffffff)
1943      Value = (Value >> 24) | 0x600;
1944    Inst.addOperand(MCOperand::CreateImm(Value));
1945  }
1946
1947  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1948    assert(N == 1 && "Invalid number of operands!");
1949    // The immediate encodes the type of constant as well as the value.
1950    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1951    unsigned Value = CE->getValue();
1952    if (Value >= 256 && Value <= 0xffff)
1953      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1954    else if (Value > 0xffff && Value <= 0xffffff)
1955      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1956    else if (Value > 0xffffff)
1957      Value = (Value >> 24) | 0x600;
1958    Inst.addOperand(MCOperand::CreateImm(Value));
1959  }
1960
1961  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1962    assert(N == 1 && "Invalid number of operands!");
1963    // The immediate encodes the type of constant as well as the value.
1964    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1965    unsigned Value = ~CE->getValue();
1966    if (Value >= 256 && Value <= 0xffff)
1967      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1968    else if (Value > 0xffff && Value <= 0xffffff)
1969      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1970    else if (Value > 0xffffff)
1971      Value = (Value >> 24) | 0x600;
1972    Inst.addOperand(MCOperand::CreateImm(Value));
1973  }
1974
1975  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1976    assert(N == 1 && "Invalid number of operands!");
1977    // The immediate encodes the type of constant as well as the value.
1978    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1979    uint64_t Value = CE->getValue();
1980    unsigned Imm = 0;
1981    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1982      Imm |= (Value & 1) << i;
1983    }
1984    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1985  }
1986
1987  virtual void print(raw_ostream &OS) const;
1988
1989  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1990    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1991    Op->ITMask.Mask = Mask;
1992    Op->StartLoc = S;
1993    Op->EndLoc = S;
1994    return Op;
1995  }
1996
1997  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1998    ARMOperand *Op = new ARMOperand(k_CondCode);
1999    Op->CC.Val = CC;
2000    Op->StartLoc = S;
2001    Op->EndLoc = S;
2002    return Op;
2003  }
2004
2005  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2006    ARMOperand *Op = new ARMOperand(k_CoprocNum);
2007    Op->Cop.Val = CopVal;
2008    Op->StartLoc = S;
2009    Op->EndLoc = S;
2010    return Op;
2011  }
2012
2013  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2014    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2015    Op->Cop.Val = CopVal;
2016    Op->StartLoc = S;
2017    Op->EndLoc = S;
2018    return Op;
2019  }
2020
2021  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2022    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2023    Op->Cop.Val = Val;
2024    Op->StartLoc = S;
2025    Op->EndLoc = E;
2026    return Op;
2027  }
2028
2029  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2030    ARMOperand *Op = new ARMOperand(k_CCOut);
2031    Op->Reg.RegNum = RegNum;
2032    Op->StartLoc = S;
2033    Op->EndLoc = S;
2034    return Op;
2035  }
2036
2037  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2038    ARMOperand *Op = new ARMOperand(k_Token);
2039    Op->Tok.Data = Str.data();
2040    Op->Tok.Length = Str.size();
2041    Op->StartLoc = S;
2042    Op->EndLoc = S;
2043    return Op;
2044  }
2045
2046  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2047    ARMOperand *Op = new ARMOperand(k_Register);
2048    Op->Reg.RegNum = RegNum;
2049    Op->StartLoc = S;
2050    Op->EndLoc = E;
2051    return Op;
2052  }
2053
2054  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2055                                           unsigned SrcReg,
2056                                           unsigned ShiftReg,
2057                                           unsigned ShiftImm,
2058                                           SMLoc S, SMLoc E) {
2059    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2060    Op->RegShiftedReg.ShiftTy = ShTy;
2061    Op->RegShiftedReg.SrcReg = SrcReg;
2062    Op->RegShiftedReg.ShiftReg = ShiftReg;
2063    Op->RegShiftedReg.ShiftImm = ShiftImm;
2064    Op->StartLoc = S;
2065    Op->EndLoc = E;
2066    return Op;
2067  }
2068
2069  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2070                                            unsigned SrcReg,
2071                                            unsigned ShiftImm,
2072                                            SMLoc S, SMLoc E) {
2073    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2074    Op->RegShiftedImm.ShiftTy = ShTy;
2075    Op->RegShiftedImm.SrcReg = SrcReg;
2076    Op->RegShiftedImm.ShiftImm = ShiftImm;
2077    Op->StartLoc = S;
2078    Op->EndLoc = E;
2079    return Op;
2080  }
2081
2082  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2083                                   SMLoc S, SMLoc E) {
2084    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2085    Op->ShifterImm.isASR = isASR;
2086    Op->ShifterImm.Imm = Imm;
2087    Op->StartLoc = S;
2088    Op->EndLoc = E;
2089    return Op;
2090  }
2091
2092  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2093    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2094    Op->RotImm.Imm = Imm;
2095    Op->StartLoc = S;
2096    Op->EndLoc = E;
2097    return Op;
2098  }
2099
2100  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2101                                    SMLoc S, SMLoc E) {
2102    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2103    Op->Bitfield.LSB = LSB;
2104    Op->Bitfield.Width = Width;
2105    Op->StartLoc = S;
2106    Op->EndLoc = E;
2107    return Op;
2108  }
2109
2110  static ARMOperand *
2111  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2112                SMLoc StartLoc, SMLoc EndLoc) {
2113    KindTy Kind = k_RegisterList;
2114
2115    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2116      Kind = k_DPRRegisterList;
2117    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2118             contains(Regs.front().first))
2119      Kind = k_SPRRegisterList;
2120
2121    ARMOperand *Op = new ARMOperand(Kind);
2122    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2123           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2124      Op->Registers.push_back(I->first);
2125    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2126    Op->StartLoc = StartLoc;
2127    Op->EndLoc = EndLoc;
2128    return Op;
2129  }
2130
2131  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2132                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2133    ARMOperand *Op = new ARMOperand(k_VectorList);
2134    Op->VectorList.RegNum = RegNum;
2135    Op->VectorList.Count = Count;
2136    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2137    Op->StartLoc = S;
2138    Op->EndLoc = E;
2139    return Op;
2140  }
2141
2142  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2143                                              bool isDoubleSpaced,
2144                                              SMLoc S, SMLoc E) {
2145    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2146    Op->VectorList.RegNum = RegNum;
2147    Op->VectorList.Count = Count;
2148    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2149    Op->StartLoc = S;
2150    Op->EndLoc = E;
2151    return Op;
2152  }
2153
2154  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2155                                             unsigned Index,
2156                                             bool isDoubleSpaced,
2157                                             SMLoc S, SMLoc E) {
2158    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2159    Op->VectorList.RegNum = RegNum;
2160    Op->VectorList.Count = Count;
2161    Op->VectorList.LaneIndex = Index;
2162    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2163    Op->StartLoc = S;
2164    Op->EndLoc = E;
2165    return Op;
2166  }
2167
2168  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2169                                       MCContext &Ctx) {
2170    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2171    Op->VectorIndex.Val = Idx;
2172    Op->StartLoc = S;
2173    Op->EndLoc = E;
2174    return Op;
2175  }
2176
2177  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2178    ARMOperand *Op = new ARMOperand(k_Immediate);
2179    Op->Imm.Val = Val;
2180    Op->StartLoc = S;
2181    Op->EndLoc = E;
2182    return Op;
2183  }
2184
2185  static ARMOperand *CreateMem(unsigned BaseRegNum,
2186                               const MCConstantExpr *OffsetImm,
2187                               unsigned OffsetRegNum,
2188                               ARM_AM::ShiftOpc ShiftType,
2189                               unsigned ShiftImm,
2190                               unsigned Alignment,
2191                               bool isNegative,
2192                               SMLoc S, SMLoc E) {
2193    ARMOperand *Op = new ARMOperand(k_Memory);
2194    Op->Memory.BaseRegNum = BaseRegNum;
2195    Op->Memory.OffsetImm = OffsetImm;
2196    Op->Memory.OffsetRegNum = OffsetRegNum;
2197    Op->Memory.ShiftType = ShiftType;
2198    Op->Memory.ShiftImm = ShiftImm;
2199    Op->Memory.Alignment = Alignment;
2200    Op->Memory.isNegative = isNegative;
2201    Op->StartLoc = S;
2202    Op->EndLoc = E;
2203    return Op;
2204  }
2205
2206  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2207                                      ARM_AM::ShiftOpc ShiftTy,
2208                                      unsigned ShiftImm,
2209                                      SMLoc S, SMLoc E) {
2210    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2211    Op->PostIdxReg.RegNum = RegNum;
2212    Op->PostIdxReg.isAdd = isAdd;
2213    Op->PostIdxReg.ShiftTy = ShiftTy;
2214    Op->PostIdxReg.ShiftImm = ShiftImm;
2215    Op->StartLoc = S;
2216    Op->EndLoc = E;
2217    return Op;
2218  }
2219
2220  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2221    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2222    Op->MBOpt.Val = Opt;
2223    Op->StartLoc = S;
2224    Op->EndLoc = S;
2225    return Op;
2226  }
2227
2228  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2229    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2230    Op->IFlags.Val = IFlags;
2231    Op->StartLoc = S;
2232    Op->EndLoc = S;
2233    return Op;
2234  }
2235
2236  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2237    ARMOperand *Op = new ARMOperand(k_MSRMask);
2238    Op->MMask.Val = MMask;
2239    Op->StartLoc = S;
2240    Op->EndLoc = S;
2241    return Op;
2242  }
2243};
2244
2245} // end anonymous namespace.
2246
2247void ARMOperand::print(raw_ostream &OS) const {
2248  switch (Kind) {
2249  case k_CondCode:
2250    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2251    break;
2252  case k_CCOut:
2253    OS << "<ccout " << getReg() << ">";
2254    break;
2255  case k_ITCondMask: {
2256    static const char *MaskStr[] = {
2257      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2258      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2259    };
2260    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2261    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2262    break;
2263  }
2264  case k_CoprocNum:
2265    OS << "<coprocessor number: " << getCoproc() << ">";
2266    break;
2267  case k_CoprocReg:
2268    OS << "<coprocessor register: " << getCoproc() << ">";
2269    break;
2270  case k_CoprocOption:
2271    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2272    break;
2273  case k_MSRMask:
2274    OS << "<mask: " << getMSRMask() << ">";
2275    break;
2276  case k_Immediate:
2277    getImm()->print(OS);
2278    break;
2279  case k_MemBarrierOpt:
2280    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2281    break;
2282  case k_Memory:
2283    OS << "<memory "
2284       << " base:" << Memory.BaseRegNum;
2285    OS << ">";
2286    break;
2287  case k_PostIndexRegister:
2288    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2289       << PostIdxReg.RegNum;
2290    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2291      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2292         << PostIdxReg.ShiftImm;
2293    OS << ">";
2294    break;
2295  case k_ProcIFlags: {
2296    OS << "<ARM_PROC::";
2297    unsigned IFlags = getProcIFlags();
2298    for (int i=2; i >= 0; --i)
2299      if (IFlags & (1 << i))
2300        OS << ARM_PROC::IFlagsToString(1 << i);
2301    OS << ">";
2302    break;
2303  }
2304  case k_Register:
2305    OS << "<register " << getReg() << ">";
2306    break;
2307  case k_ShifterImmediate:
2308    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2309       << " #" << ShifterImm.Imm << ">";
2310    break;
2311  case k_ShiftedRegister:
2312    OS << "<so_reg_reg "
2313       << RegShiftedReg.SrcReg << " "
2314       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2315       << " " << RegShiftedReg.ShiftReg << ">";
2316    break;
2317  case k_ShiftedImmediate:
2318    OS << "<so_reg_imm "
2319       << RegShiftedImm.SrcReg << " "
2320       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2321       << " #" << RegShiftedImm.ShiftImm << ">";
2322    break;
2323  case k_RotateImmediate:
2324    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2325    break;
2326  case k_BitfieldDescriptor:
2327    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2328       << ", width: " << Bitfield.Width << ">";
2329    break;
2330  case k_RegisterList:
2331  case k_DPRRegisterList:
2332  case k_SPRRegisterList: {
2333    OS << "<register_list ";
2334
2335    const SmallVectorImpl<unsigned> &RegList = getRegList();
2336    for (SmallVectorImpl<unsigned>::const_iterator
2337           I = RegList.begin(), E = RegList.end(); I != E; ) {
2338      OS << *I;
2339      if (++I < E) OS << ", ";
2340    }
2341
2342    OS << ">";
2343    break;
2344  }
2345  case k_VectorList:
2346    OS << "<vector_list " << VectorList.Count << " * "
2347       << VectorList.RegNum << ">";
2348    break;
2349  case k_VectorListAllLanes:
2350    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2351       << VectorList.RegNum << ">";
2352    break;
2353  case k_VectorListIndexed:
2354    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2355       << VectorList.Count << " * " << VectorList.RegNum << ">";
2356    break;
2357  case k_Token:
2358    OS << "'" << getToken() << "'";
2359    break;
2360  case k_VectorIndex:
2361    OS << "<vectorindex " << getVectorIndex() << ">";
2362    break;
2363  }
2364}
2365
2366/// @name Auto-generated Match Functions
2367/// {
2368
2369static unsigned MatchRegisterName(StringRef Name);
2370
2371/// }
2372
2373bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2374                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2375  StartLoc = Parser.getTok().getLoc();
2376  RegNo = tryParseRegister();
2377  EndLoc = Parser.getTok().getLoc();
2378
2379  return (RegNo == (unsigned)-1);
2380}
2381
2382/// Try to parse a register name.  The token must be an Identifier when called,
2383/// and if it is a register name the token is eaten and the register number is
2384/// returned.  Otherwise return -1.
2385///
2386int ARMAsmParser::tryParseRegister() {
2387  const AsmToken &Tok = Parser.getTok();
2388  if (Tok.isNot(AsmToken::Identifier)) return -1;
2389
2390  std::string lowerCase = Tok.getString().lower();
2391  unsigned RegNum = MatchRegisterName(lowerCase);
2392  if (!RegNum) {
2393    RegNum = StringSwitch<unsigned>(lowerCase)
2394      .Case("r13", ARM::SP)
2395      .Case("r14", ARM::LR)
2396      .Case("r15", ARM::PC)
2397      .Case("ip", ARM::R12)
2398      // Additional register name aliases for 'gas' compatibility.
2399      .Case("a1", ARM::R0)
2400      .Case("a2", ARM::R1)
2401      .Case("a3", ARM::R2)
2402      .Case("a4", ARM::R3)
2403      .Case("v1", ARM::R4)
2404      .Case("v2", ARM::R5)
2405      .Case("v3", ARM::R6)
2406      .Case("v4", ARM::R7)
2407      .Case("v5", ARM::R8)
2408      .Case("v6", ARM::R9)
2409      .Case("v7", ARM::R10)
2410      .Case("v8", ARM::R11)
2411      .Case("sb", ARM::R9)
2412      .Case("sl", ARM::R10)
2413      .Case("fp", ARM::R11)
2414      .Default(0);
2415  }
2416  if (!RegNum) {
2417    // Check for aliases registered via .req. Canonicalize to lower case.
2418    // That's more consistent since register names are case insensitive, and
2419    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2420    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2421    // If no match, return failure.
2422    if (Entry == RegisterReqs.end())
2423      return -1;
2424    Parser.Lex(); // Eat identifier token.
2425    return Entry->getValue();
2426  }
2427
2428  Parser.Lex(); // Eat identifier token.
2429
2430  return RegNum;
2431}
2432
2433// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2434// If a recoverable error occurs, return 1. If an irrecoverable error
2435// occurs, return -1. An irrecoverable error is one where tokens have been
2436// consumed in the process of trying to parse the shifter (i.e., when it is
2437// indeed a shifter operand, but malformed).
2438int ARMAsmParser::tryParseShiftRegister(
2439                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2440  SMLoc S = Parser.getTok().getLoc();
2441  const AsmToken &Tok = Parser.getTok();
2442  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2443
2444  std::string lowerCase = Tok.getString().lower();
2445  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2446      .Case("asl", ARM_AM::lsl)
2447      .Case("lsl", ARM_AM::lsl)
2448      .Case("lsr", ARM_AM::lsr)
2449      .Case("asr", ARM_AM::asr)
2450      .Case("ror", ARM_AM::ror)
2451      .Case("rrx", ARM_AM::rrx)
2452      .Default(ARM_AM::no_shift);
2453
2454  if (ShiftTy == ARM_AM::no_shift)
2455    return 1;
2456
2457  Parser.Lex(); // Eat the operator.
2458
2459  // The source register for the shift has already been added to the
2460  // operand list, so we need to pop it off and combine it into the shifted
2461  // register operand instead.
2462  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2463  if (!PrevOp->isReg())
2464    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2465  int SrcReg = PrevOp->getReg();
2466  int64_t Imm = 0;
2467  int ShiftReg = 0;
2468  if (ShiftTy == ARM_AM::rrx) {
2469    // RRX Doesn't have an explicit shift amount. The encoder expects
2470    // the shift register to be the same as the source register. Seems odd,
2471    // but OK.
2472    ShiftReg = SrcReg;
2473  } else {
2474    // Figure out if this is shifted by a constant or a register (for non-RRX).
2475    if (Parser.getTok().is(AsmToken::Hash) ||
2476        Parser.getTok().is(AsmToken::Dollar)) {
2477      Parser.Lex(); // Eat hash.
2478      SMLoc ImmLoc = Parser.getTok().getLoc();
2479      const MCExpr *ShiftExpr = 0;
2480      if (getParser().ParseExpression(ShiftExpr)) {
2481        Error(ImmLoc, "invalid immediate shift value");
2482        return -1;
2483      }
2484      // The expression must be evaluatable as an immediate.
2485      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2486      if (!CE) {
2487        Error(ImmLoc, "invalid immediate shift value");
2488        return -1;
2489      }
2490      // Range check the immediate.
2491      // lsl, ror: 0 <= imm <= 31
2492      // lsr, asr: 0 <= imm <= 32
2493      Imm = CE->getValue();
2494      if (Imm < 0 ||
2495          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2496          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2497        Error(ImmLoc, "immediate shift value out of range");
2498        return -1;
2499      }
2500      // shift by zero is a nop. Always send it through as lsl.
2501      // ('as' compatibility)
2502      if (Imm == 0)
2503        ShiftTy = ARM_AM::lsl;
2504    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2505      ShiftReg = tryParseRegister();
2506      SMLoc L = Parser.getTok().getLoc();
2507      if (ShiftReg == -1) {
2508        Error (L, "expected immediate or register in shift operand");
2509        return -1;
2510      }
2511    } else {
2512      Error (Parser.getTok().getLoc(),
2513                    "expected immediate or register in shift operand");
2514      return -1;
2515    }
2516  }
2517
2518  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2519    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2520                                                         ShiftReg, Imm,
2521                                               S, Parser.getTok().getLoc()));
2522  else
2523    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2524                                               S, Parser.getTok().getLoc()));
2525
2526  return 0;
2527}
2528
2529
2530/// Try to parse a register name.  The token must be an Identifier when called.
2531/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2532/// if there is a "writeback". 'true' if it's not a register.
2533///
2534/// TODO this is likely to change to allow different register types and or to
2535/// parse for a specific register type.
2536bool ARMAsmParser::
2537tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2538  SMLoc S = Parser.getTok().getLoc();
2539  int RegNo = tryParseRegister();
2540  if (RegNo == -1)
2541    return true;
2542
2543  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2544
2545  const AsmToken &ExclaimTok = Parser.getTok();
2546  if (ExclaimTok.is(AsmToken::Exclaim)) {
2547    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2548                                               ExclaimTok.getLoc()));
2549    Parser.Lex(); // Eat exclaim token
2550    return false;
2551  }
2552
2553  // Also check for an index operand. This is only legal for vector registers,
2554  // but that'll get caught OK in operand matching, so we don't need to
2555  // explicitly filter everything else out here.
2556  if (Parser.getTok().is(AsmToken::LBrac)) {
2557    SMLoc SIdx = Parser.getTok().getLoc();
2558    Parser.Lex(); // Eat left bracket token.
2559
2560    const MCExpr *ImmVal;
2561    if (getParser().ParseExpression(ImmVal))
2562      return true;
2563    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2564    if (!MCE)
2565      return TokError("immediate value expected for vector index");
2566
2567    SMLoc E = Parser.getTok().getLoc();
2568    if (Parser.getTok().isNot(AsmToken::RBrac))
2569      return Error(E, "']' expected");
2570
2571    Parser.Lex(); // Eat right bracket token.
2572
2573    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2574                                                     SIdx, E,
2575                                                     getContext()));
2576  }
2577
2578  return false;
2579}
2580
2581/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2582/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2583/// "c5", ...
2584static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2585  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2586  // but efficient.
2587  switch (Name.size()) {
2588  default: return -1;
2589  case 2:
2590    if (Name[0] != CoprocOp)
2591      return -1;
2592    switch (Name[1]) {
2593    default:  return -1;
2594    case '0': return 0;
2595    case '1': return 1;
2596    case '2': return 2;
2597    case '3': return 3;
2598    case '4': return 4;
2599    case '5': return 5;
2600    case '6': return 6;
2601    case '7': return 7;
2602    case '8': return 8;
2603    case '9': return 9;
2604    }
2605  case 3:
2606    if (Name[0] != CoprocOp || Name[1] != '1')
2607      return -1;
2608    switch (Name[2]) {
2609    default:  return -1;
2610    case '0': return 10;
2611    case '1': return 11;
2612    case '2': return 12;
2613    case '3': return 13;
2614    case '4': return 14;
2615    case '5': return 15;
2616    }
2617  }
2618}
2619
2620/// parseITCondCode - Try to parse a condition code for an IT instruction.
2621ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2622parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2623  SMLoc S = Parser.getTok().getLoc();
2624  const AsmToken &Tok = Parser.getTok();
2625  if (!Tok.is(AsmToken::Identifier))
2626    return MatchOperand_NoMatch;
2627  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2628    .Case("eq", ARMCC::EQ)
2629    .Case("ne", ARMCC::NE)
2630    .Case("hs", ARMCC::HS)
2631    .Case("cs", ARMCC::HS)
2632    .Case("lo", ARMCC::LO)
2633    .Case("cc", ARMCC::LO)
2634    .Case("mi", ARMCC::MI)
2635    .Case("pl", ARMCC::PL)
2636    .Case("vs", ARMCC::VS)
2637    .Case("vc", ARMCC::VC)
2638    .Case("hi", ARMCC::HI)
2639    .Case("ls", ARMCC::LS)
2640    .Case("ge", ARMCC::GE)
2641    .Case("lt", ARMCC::LT)
2642    .Case("gt", ARMCC::GT)
2643    .Case("le", ARMCC::LE)
2644    .Case("al", ARMCC::AL)
2645    .Default(~0U);
2646  if (CC == ~0U)
2647    return MatchOperand_NoMatch;
2648  Parser.Lex(); // Eat the token.
2649
2650  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2651
2652  return MatchOperand_Success;
2653}
2654
2655/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2656/// token must be an Identifier when called, and if it is a coprocessor
2657/// number, the token is eaten and the operand is added to the operand list.
2658ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2659parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2660  SMLoc S = Parser.getTok().getLoc();
2661  const AsmToken &Tok = Parser.getTok();
2662  if (Tok.isNot(AsmToken::Identifier))
2663    return MatchOperand_NoMatch;
2664
2665  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2666  if (Num == -1)
2667    return MatchOperand_NoMatch;
2668
2669  Parser.Lex(); // Eat identifier token.
2670  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2671  return MatchOperand_Success;
2672}
2673
2674/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2675/// token must be an Identifier when called, and if it is a coprocessor
2676/// number, the token is eaten and the operand is added to the operand list.
2677ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2678parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2679  SMLoc S = Parser.getTok().getLoc();
2680  const AsmToken &Tok = Parser.getTok();
2681  if (Tok.isNot(AsmToken::Identifier))
2682    return MatchOperand_NoMatch;
2683
2684  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2685  if (Reg == -1)
2686    return MatchOperand_NoMatch;
2687
2688  Parser.Lex(); // Eat identifier token.
2689  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2690  return MatchOperand_Success;
2691}
2692
2693/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2694/// coproc_option : '{' imm0_255 '}'
2695ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2696parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2697  SMLoc S = Parser.getTok().getLoc();
2698
2699  // If this isn't a '{', this isn't a coprocessor immediate operand.
2700  if (Parser.getTok().isNot(AsmToken::LCurly))
2701    return MatchOperand_NoMatch;
2702  Parser.Lex(); // Eat the '{'
2703
2704  const MCExpr *Expr;
2705  SMLoc Loc = Parser.getTok().getLoc();
2706  if (getParser().ParseExpression(Expr)) {
2707    Error(Loc, "illegal expression");
2708    return MatchOperand_ParseFail;
2709  }
2710  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2711  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2712    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2713    return MatchOperand_ParseFail;
2714  }
2715  int Val = CE->getValue();
2716
2717  // Check for and consume the closing '}'
2718  if (Parser.getTok().isNot(AsmToken::RCurly))
2719    return MatchOperand_ParseFail;
2720  SMLoc E = Parser.getTok().getLoc();
2721  Parser.Lex(); // Eat the '}'
2722
2723  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2724  return MatchOperand_Success;
2725}
2726
2727// For register list parsing, we need to map from raw GPR register numbering
2728// to the enumeration values. The enumeration values aren't sorted by
2729// register number due to our using "sp", "lr" and "pc" as canonical names.
2730static unsigned getNextRegister(unsigned Reg) {
2731  // If this is a GPR, we need to do it manually, otherwise we can rely
2732  // on the sort ordering of the enumeration since the other reg-classes
2733  // are sane.
2734  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2735    return Reg + 1;
2736  switch(Reg) {
2737  default: llvm_unreachable("Invalid GPR number!");
2738  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2739  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2740  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2741  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2742  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2743  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2744  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2745  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2746  }
2747}
2748
2749// Return the low-subreg of a given Q register.
2750static unsigned getDRegFromQReg(unsigned QReg) {
2751  switch (QReg) {
2752  default: llvm_unreachable("expected a Q register!");
2753  case ARM::Q0:  return ARM::D0;
2754  case ARM::Q1:  return ARM::D2;
2755  case ARM::Q2:  return ARM::D4;
2756  case ARM::Q3:  return ARM::D6;
2757  case ARM::Q4:  return ARM::D8;
2758  case ARM::Q5:  return ARM::D10;
2759  case ARM::Q6:  return ARM::D12;
2760  case ARM::Q7:  return ARM::D14;
2761  case ARM::Q8:  return ARM::D16;
2762  case ARM::Q9:  return ARM::D18;
2763  case ARM::Q10: return ARM::D20;
2764  case ARM::Q11: return ARM::D22;
2765  case ARM::Q12: return ARM::D24;
2766  case ARM::Q13: return ARM::D26;
2767  case ARM::Q14: return ARM::D28;
2768  case ARM::Q15: return ARM::D30;
2769  }
2770}
2771
2772/// Parse a register list.
2773bool ARMAsmParser::
2774parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2775  assert(Parser.getTok().is(AsmToken::LCurly) &&
2776         "Token is not a Left Curly Brace");
2777  SMLoc S = Parser.getTok().getLoc();
2778  Parser.Lex(); // Eat '{' token.
2779  SMLoc RegLoc = Parser.getTok().getLoc();
2780
2781  // Check the first register in the list to see what register class
2782  // this is a list of.
2783  int Reg = tryParseRegister();
2784  if (Reg == -1)
2785    return Error(RegLoc, "register expected");
2786
2787  // The reglist instructions have at most 16 registers, so reserve
2788  // space for that many.
2789  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2790
2791  // Allow Q regs and just interpret them as the two D sub-registers.
2792  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2793    Reg = getDRegFromQReg(Reg);
2794    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2795    ++Reg;
2796  }
2797  const MCRegisterClass *RC;
2798  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2799    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2800  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2801    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2802  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2803    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2804  else
2805    return Error(RegLoc, "invalid register in register list");
2806
2807  // Store the register.
2808  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2809
2810  // This starts immediately after the first register token in the list,
2811  // so we can see either a comma or a minus (range separator) as a legal
2812  // next token.
2813  while (Parser.getTok().is(AsmToken::Comma) ||
2814         Parser.getTok().is(AsmToken::Minus)) {
2815    if (Parser.getTok().is(AsmToken::Minus)) {
2816      Parser.Lex(); // Eat the minus.
2817      SMLoc EndLoc = Parser.getTok().getLoc();
2818      int EndReg = tryParseRegister();
2819      if (EndReg == -1)
2820        return Error(EndLoc, "register expected");
2821      // Allow Q regs and just interpret them as the two D sub-registers.
2822      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2823        EndReg = getDRegFromQReg(EndReg) + 1;
2824      // If the register is the same as the start reg, there's nothing
2825      // more to do.
2826      if (Reg == EndReg)
2827        continue;
2828      // The register must be in the same register class as the first.
2829      if (!RC->contains(EndReg))
2830        return Error(EndLoc, "invalid register in register list");
2831      // Ranges must go from low to high.
2832      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2833        return Error(EndLoc, "bad range in register list");
2834
2835      // Add all the registers in the range to the register list.
2836      while (Reg != EndReg) {
2837        Reg = getNextRegister(Reg);
2838        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2839      }
2840      continue;
2841    }
2842    Parser.Lex(); // Eat the comma.
2843    RegLoc = Parser.getTok().getLoc();
2844    int OldReg = Reg;
2845    const AsmToken RegTok = Parser.getTok();
2846    Reg = tryParseRegister();
2847    if (Reg == -1)
2848      return Error(RegLoc, "register expected");
2849    // Allow Q regs and just interpret them as the two D sub-registers.
2850    bool isQReg = false;
2851    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2852      Reg = getDRegFromQReg(Reg);
2853      isQReg = true;
2854    }
2855    // The register must be in the same register class as the first.
2856    if (!RC->contains(Reg))
2857      return Error(RegLoc, "invalid register in register list");
2858    // List must be monotonically increasing.
2859    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) {
2860      if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2861        Warning(RegLoc, "register list not in ascending order");
2862      else
2863        return Error(RegLoc, "register list not in ascending order");
2864    }
2865    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2866      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2867              ") in register list");
2868      continue;
2869    }
2870    // VFP register lists must also be contiguous.
2871    // It's OK to use the enumeration values directly here rather, as the
2872    // VFP register classes have the enum sorted properly.
2873    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2874        Reg != OldReg + 1)
2875      return Error(RegLoc, "non-contiguous register range");
2876    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2877    if (isQReg)
2878      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2879  }
2880
2881  SMLoc E = Parser.getTok().getLoc();
2882  if (Parser.getTok().isNot(AsmToken::RCurly))
2883    return Error(E, "'}' expected");
2884  Parser.Lex(); // Eat '}' token.
2885
2886  // Push the register list operand.
2887  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2888
2889  // The ARM system instruction variants for LDM/STM have a '^' token here.
2890  if (Parser.getTok().is(AsmToken::Caret)) {
2891    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2892    Parser.Lex(); // Eat '^' token.
2893  }
2894
2895  return false;
2896}
2897
2898// Helper function to parse the lane index for vector lists.
2899ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2900parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2901  Index = 0; // Always return a defined index value.
2902  if (Parser.getTok().is(AsmToken::LBrac)) {
2903    Parser.Lex(); // Eat the '['.
2904    if (Parser.getTok().is(AsmToken::RBrac)) {
2905      // "Dn[]" is the 'all lanes' syntax.
2906      LaneKind = AllLanes;
2907      Parser.Lex(); // Eat the ']'.
2908      return MatchOperand_Success;
2909    }
2910
2911    // There's an optional '#' token here. Normally there wouldn't be, but
2912    // inline assemble puts one in, and it's friendly to accept that.
2913    if (Parser.getTok().is(AsmToken::Hash))
2914      Parser.Lex(); // Eat the '#'
2915
2916    const MCExpr *LaneIndex;
2917    SMLoc Loc = Parser.getTok().getLoc();
2918    if (getParser().ParseExpression(LaneIndex)) {
2919      Error(Loc, "illegal expression");
2920      return MatchOperand_ParseFail;
2921    }
2922    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2923    if (!CE) {
2924      Error(Loc, "lane index must be empty or an integer");
2925      return MatchOperand_ParseFail;
2926    }
2927    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2928      Error(Parser.getTok().getLoc(), "']' expected");
2929      return MatchOperand_ParseFail;
2930    }
2931    Parser.Lex(); // Eat the ']'.
2932    int64_t Val = CE->getValue();
2933
2934    // FIXME: Make this range check context sensitive for .8, .16, .32.
2935    if (Val < 0 || Val > 7) {
2936      Error(Parser.getTok().getLoc(), "lane index out of range");
2937      return MatchOperand_ParseFail;
2938    }
2939    Index = Val;
2940    LaneKind = IndexedLane;
2941    return MatchOperand_Success;
2942  }
2943  LaneKind = NoLanes;
2944  return MatchOperand_Success;
2945}
2946
2947// parse a vector register list
2948ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2949parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2950  VectorLaneTy LaneKind;
2951  unsigned LaneIndex;
2952  SMLoc S = Parser.getTok().getLoc();
2953  // As an extension (to match gas), support a plain D register or Q register
2954  // (without encosing curly braces) as a single or double entry list,
2955  // respectively.
2956  if (Parser.getTok().is(AsmToken::Identifier)) {
2957    int Reg = tryParseRegister();
2958    if (Reg == -1)
2959      return MatchOperand_NoMatch;
2960    SMLoc E = Parser.getTok().getLoc();
2961    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2962      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2963      if (Res != MatchOperand_Success)
2964        return Res;
2965      switch (LaneKind) {
2966      case NoLanes:
2967        E = Parser.getTok().getLoc();
2968        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2969        break;
2970      case AllLanes:
2971        E = Parser.getTok().getLoc();
2972        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2973                                                                S, E));
2974        break;
2975      case IndexedLane:
2976        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2977                                                               LaneIndex,
2978                                                               false, S, E));
2979        break;
2980      }
2981      return MatchOperand_Success;
2982    }
2983    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2984      Reg = getDRegFromQReg(Reg);
2985      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2986      if (Res != MatchOperand_Success)
2987        return Res;
2988      switch (LaneKind) {
2989      case NoLanes:
2990        E = Parser.getTok().getLoc();
2991        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
2992                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
2993        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2994        break;
2995      case AllLanes:
2996        E = Parser.getTok().getLoc();
2997        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
2998                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
2999        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3000                                                                S, E));
3001        break;
3002      case IndexedLane:
3003        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3004                                                               LaneIndex,
3005                                                               false, S, E));
3006        break;
3007      }
3008      return MatchOperand_Success;
3009    }
3010    Error(S, "vector register expected");
3011    return MatchOperand_ParseFail;
3012  }
3013
3014  if (Parser.getTok().isNot(AsmToken::LCurly))
3015    return MatchOperand_NoMatch;
3016
3017  Parser.Lex(); // Eat '{' token.
3018  SMLoc RegLoc = Parser.getTok().getLoc();
3019
3020  int Reg = tryParseRegister();
3021  if (Reg == -1) {
3022    Error(RegLoc, "register expected");
3023    return MatchOperand_ParseFail;
3024  }
3025  unsigned Count = 1;
3026  int Spacing = 0;
3027  unsigned FirstReg = Reg;
3028  // The list is of D registers, but we also allow Q regs and just interpret
3029  // them as the two D sub-registers.
3030  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3031    FirstReg = Reg = getDRegFromQReg(Reg);
3032    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3033                 // it's ambiguous with four-register single spaced.
3034    ++Reg;
3035    ++Count;
3036  }
3037  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
3038    return MatchOperand_ParseFail;
3039
3040  while (Parser.getTok().is(AsmToken::Comma) ||
3041         Parser.getTok().is(AsmToken::Minus)) {
3042    if (Parser.getTok().is(AsmToken::Minus)) {
3043      if (!Spacing)
3044        Spacing = 1; // Register range implies a single spaced list.
3045      else if (Spacing == 2) {
3046        Error(Parser.getTok().getLoc(),
3047              "sequential registers in double spaced list");
3048        return MatchOperand_ParseFail;
3049      }
3050      Parser.Lex(); // Eat the minus.
3051      SMLoc EndLoc = Parser.getTok().getLoc();
3052      int EndReg = tryParseRegister();
3053      if (EndReg == -1) {
3054        Error(EndLoc, "register expected");
3055        return MatchOperand_ParseFail;
3056      }
3057      // Allow Q regs and just interpret them as the two D sub-registers.
3058      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3059        EndReg = getDRegFromQReg(EndReg) + 1;
3060      // If the register is the same as the start reg, there's nothing
3061      // more to do.
3062      if (Reg == EndReg)
3063        continue;
3064      // The register must be in the same register class as the first.
3065      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3066        Error(EndLoc, "invalid register in register list");
3067        return MatchOperand_ParseFail;
3068      }
3069      // Ranges must go from low to high.
3070      if (Reg > EndReg) {
3071        Error(EndLoc, "bad range in register list");
3072        return MatchOperand_ParseFail;
3073      }
3074      // Parse the lane specifier if present.
3075      VectorLaneTy NextLaneKind;
3076      unsigned NextLaneIndex;
3077      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3078        return MatchOperand_ParseFail;
3079      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3080        Error(EndLoc, "mismatched lane index in register list");
3081        return MatchOperand_ParseFail;
3082      }
3083      EndLoc = Parser.getTok().getLoc();
3084
3085      // Add all the registers in the range to the register list.
3086      Count += EndReg - Reg;
3087      Reg = EndReg;
3088      continue;
3089    }
3090    Parser.Lex(); // Eat the comma.
3091    RegLoc = Parser.getTok().getLoc();
3092    int OldReg = Reg;
3093    Reg = tryParseRegister();
3094    if (Reg == -1) {
3095      Error(RegLoc, "register expected");
3096      return MatchOperand_ParseFail;
3097    }
3098    // vector register lists must be contiguous.
3099    // It's OK to use the enumeration values directly here rather, as the
3100    // VFP register classes have the enum sorted properly.
3101    //
3102    // The list is of D registers, but we also allow Q regs and just interpret
3103    // them as the two D sub-registers.
3104    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3105      if (!Spacing)
3106        Spacing = 1; // Register range implies a single spaced list.
3107      else if (Spacing == 2) {
3108        Error(RegLoc,
3109              "invalid register in double-spaced list (must be 'D' register')");
3110        return MatchOperand_ParseFail;
3111      }
3112      Reg = getDRegFromQReg(Reg);
3113      if (Reg != OldReg + 1) {
3114        Error(RegLoc, "non-contiguous register range");
3115        return MatchOperand_ParseFail;
3116      }
3117      ++Reg;
3118      Count += 2;
3119      // Parse the lane specifier if present.
3120      VectorLaneTy NextLaneKind;
3121      unsigned NextLaneIndex;
3122      SMLoc EndLoc = Parser.getTok().getLoc();
3123      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3124        return MatchOperand_ParseFail;
3125      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3126        Error(EndLoc, "mismatched lane index in register list");
3127        return MatchOperand_ParseFail;
3128      }
3129      continue;
3130    }
3131    // Normal D register.
3132    // Figure out the register spacing (single or double) of the list if
3133    // we don't know it already.
3134    if (!Spacing)
3135      Spacing = 1 + (Reg == OldReg + 2);
3136
3137    // Just check that it's contiguous and keep going.
3138    if (Reg != OldReg + Spacing) {
3139      Error(RegLoc, "non-contiguous register range");
3140      return MatchOperand_ParseFail;
3141    }
3142    ++Count;
3143    // Parse the lane specifier if present.
3144    VectorLaneTy NextLaneKind;
3145    unsigned NextLaneIndex;
3146    SMLoc EndLoc = Parser.getTok().getLoc();
3147    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3148      return MatchOperand_ParseFail;
3149    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3150      Error(EndLoc, "mismatched lane index in register list");
3151      return MatchOperand_ParseFail;
3152    }
3153  }
3154
3155  SMLoc E = Parser.getTok().getLoc();
3156  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3157    Error(E, "'}' expected");
3158    return MatchOperand_ParseFail;
3159  }
3160  Parser.Lex(); // Eat '}' token.
3161
3162  switch (LaneKind) {
3163  case NoLanes:
3164    // Two-register operands have been converted to the
3165    // composite register classes.
3166    if (Count == 2) {
3167      const MCRegisterClass *RC = (Spacing == 1) ?
3168        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3169        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3170      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3171    }
3172
3173    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3174                                                    (Spacing == 2), S, E));
3175    break;
3176  case AllLanes:
3177    // Two-register operands have been converted to the
3178    // composite register classes.
3179    if (Count == 2) {
3180      const MCRegisterClass *RC = (Spacing == 1) ?
3181        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3182        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3183      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3184    }
3185    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3186                                                            (Spacing == 2),
3187                                                            S, E));
3188    break;
3189  case IndexedLane:
3190    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3191                                                           LaneIndex,
3192                                                           (Spacing == 2),
3193                                                           S, E));
3194    break;
3195  }
3196  return MatchOperand_Success;
3197}
3198
3199/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3200ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3201parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3202  SMLoc S = Parser.getTok().getLoc();
3203  const AsmToken &Tok = Parser.getTok();
3204  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3205  StringRef OptStr = Tok.getString();
3206
3207  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3208    .Case("sy",    ARM_MB::SY)
3209    .Case("st",    ARM_MB::ST)
3210    .Case("sh",    ARM_MB::ISH)
3211    .Case("ish",   ARM_MB::ISH)
3212    .Case("shst",  ARM_MB::ISHST)
3213    .Case("ishst", ARM_MB::ISHST)
3214    .Case("nsh",   ARM_MB::NSH)
3215    .Case("un",    ARM_MB::NSH)
3216    .Case("nshst", ARM_MB::NSHST)
3217    .Case("unst",  ARM_MB::NSHST)
3218    .Case("osh",   ARM_MB::OSH)
3219    .Case("oshst", ARM_MB::OSHST)
3220    .Default(~0U);
3221
3222  if (Opt == ~0U)
3223    return MatchOperand_NoMatch;
3224
3225  Parser.Lex(); // Eat identifier token.
3226  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3227  return MatchOperand_Success;
3228}
3229
3230/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3231ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3232parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3233  SMLoc S = Parser.getTok().getLoc();
3234  const AsmToken &Tok = Parser.getTok();
3235  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3236  StringRef IFlagsStr = Tok.getString();
3237
3238  // An iflags string of "none" is interpreted to mean that none of the AIF
3239  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3240  unsigned IFlags = 0;
3241  if (IFlagsStr != "none") {
3242        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3243      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3244        .Case("a", ARM_PROC::A)
3245        .Case("i", ARM_PROC::I)
3246        .Case("f", ARM_PROC::F)
3247        .Default(~0U);
3248
3249      // If some specific iflag is already set, it means that some letter is
3250      // present more than once, this is not acceptable.
3251      if (Flag == ~0U || (IFlags & Flag))
3252        return MatchOperand_NoMatch;
3253
3254      IFlags |= Flag;
3255    }
3256  }
3257
3258  Parser.Lex(); // Eat identifier token.
3259  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3260  return MatchOperand_Success;
3261}
3262
3263/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3264ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3265parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3266  SMLoc S = Parser.getTok().getLoc();
3267  const AsmToken &Tok = Parser.getTok();
3268  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3269  StringRef Mask = Tok.getString();
3270
3271  if (isMClass()) {
3272    // See ARMv6-M 10.1.1
3273    std::string Name = Mask.lower();
3274    unsigned FlagsVal = StringSwitch<unsigned>(Name)
3275      .Case("apsr", 0)
3276      .Case("iapsr", 1)
3277      .Case("eapsr", 2)
3278      .Case("xpsr", 3)
3279      .Case("ipsr", 5)
3280      .Case("epsr", 6)
3281      .Case("iepsr", 7)
3282      .Case("msp", 8)
3283      .Case("psp", 9)
3284      .Case("primask", 16)
3285      .Case("basepri", 17)
3286      .Case("basepri_max", 18)
3287      .Case("faultmask", 19)
3288      .Case("control", 20)
3289      .Default(~0U);
3290
3291    if (FlagsVal == ~0U)
3292      return MatchOperand_NoMatch;
3293
3294    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3295      // basepri, basepri_max and faultmask only valid for V7m.
3296      return MatchOperand_NoMatch;
3297
3298    Parser.Lex(); // Eat identifier token.
3299    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3300    return MatchOperand_Success;
3301  }
3302
3303  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3304  size_t Start = 0, Next = Mask.find('_');
3305  StringRef Flags = "";
3306  std::string SpecReg = Mask.slice(Start, Next).lower();
3307  if (Next != StringRef::npos)
3308    Flags = Mask.slice(Next+1, Mask.size());
3309
3310  // FlagsVal contains the complete mask:
3311  // 3-0: Mask
3312  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3313  unsigned FlagsVal = 0;
3314
3315  if (SpecReg == "apsr") {
3316    FlagsVal = StringSwitch<unsigned>(Flags)
3317    .Case("nzcvq",  0x8) // same as CPSR_f
3318    .Case("g",      0x4) // same as CPSR_s
3319    .Case("nzcvqg", 0xc) // same as CPSR_fs
3320    .Default(~0U);
3321
3322    if (FlagsVal == ~0U) {
3323      if (!Flags.empty())
3324        return MatchOperand_NoMatch;
3325      else
3326        FlagsVal = 8; // No flag
3327    }
3328  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3329    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3330      Flags = "fc";
3331    for (int i = 0, e = Flags.size(); i != e; ++i) {
3332      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3333      .Case("c", 1)
3334      .Case("x", 2)
3335      .Case("s", 4)
3336      .Case("f", 8)
3337      .Default(~0U);
3338
3339      // If some specific flag is already set, it means that some letter is
3340      // present more than once, this is not acceptable.
3341      if (FlagsVal == ~0U || (FlagsVal & Flag))
3342        return MatchOperand_NoMatch;
3343      FlagsVal |= Flag;
3344    }
3345  } else // No match for special register.
3346    return MatchOperand_NoMatch;
3347
3348  // Special register without flags is NOT equivalent to "fc" flags.
3349  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3350  // two lines would enable gas compatibility at the expense of breaking
3351  // round-tripping.
3352  //
3353  // if (!FlagsVal)
3354  //  FlagsVal = 0x9;
3355
3356  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3357  if (SpecReg == "spsr")
3358    FlagsVal |= 16;
3359
3360  Parser.Lex(); // Eat identifier token.
3361  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3362  return MatchOperand_Success;
3363}
3364
3365ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3366parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3367            int Low, int High) {
3368  const AsmToken &Tok = Parser.getTok();
3369  if (Tok.isNot(AsmToken::Identifier)) {
3370    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3371    return MatchOperand_ParseFail;
3372  }
3373  StringRef ShiftName = Tok.getString();
3374  std::string LowerOp = Op.lower();
3375  std::string UpperOp = Op.upper();
3376  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3377    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3378    return MatchOperand_ParseFail;
3379  }
3380  Parser.Lex(); // Eat shift type token.
3381
3382  // There must be a '#' and a shift amount.
3383  if (Parser.getTok().isNot(AsmToken::Hash) &&
3384      Parser.getTok().isNot(AsmToken::Dollar)) {
3385    Error(Parser.getTok().getLoc(), "'#' expected");
3386    return MatchOperand_ParseFail;
3387  }
3388  Parser.Lex(); // Eat hash token.
3389
3390  const MCExpr *ShiftAmount;
3391  SMLoc Loc = Parser.getTok().getLoc();
3392  if (getParser().ParseExpression(ShiftAmount)) {
3393    Error(Loc, "illegal expression");
3394    return MatchOperand_ParseFail;
3395  }
3396  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3397  if (!CE) {
3398    Error(Loc, "constant expression expected");
3399    return MatchOperand_ParseFail;
3400  }
3401  int Val = CE->getValue();
3402  if (Val < Low || Val > High) {
3403    Error(Loc, "immediate value out of range");
3404    return MatchOperand_ParseFail;
3405  }
3406
3407  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3408
3409  return MatchOperand_Success;
3410}
3411
3412ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3413parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3414  const AsmToken &Tok = Parser.getTok();
3415  SMLoc S = Tok.getLoc();
3416  if (Tok.isNot(AsmToken::Identifier)) {
3417    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3418    return MatchOperand_ParseFail;
3419  }
3420  int Val = StringSwitch<int>(Tok.getString())
3421    .Case("be", 1)
3422    .Case("le", 0)
3423    .Default(-1);
3424  Parser.Lex(); // Eat the token.
3425
3426  if (Val == -1) {
3427    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3428    return MatchOperand_ParseFail;
3429  }
3430  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3431                                                                  getContext()),
3432                                           S, Parser.getTok().getLoc()));
3433  return MatchOperand_Success;
3434}
3435
3436/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3437/// instructions. Legal values are:
3438///     lsl #n  'n' in [0,31]
3439///     asr #n  'n' in [1,32]
3440///             n == 32 encoded as n == 0.
3441ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3442parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3443  const AsmToken &Tok = Parser.getTok();
3444  SMLoc S = Tok.getLoc();
3445  if (Tok.isNot(AsmToken::Identifier)) {
3446    Error(S, "shift operator 'asr' or 'lsl' expected");
3447    return MatchOperand_ParseFail;
3448  }
3449  StringRef ShiftName = Tok.getString();
3450  bool isASR;
3451  if (ShiftName == "lsl" || ShiftName == "LSL")
3452    isASR = false;
3453  else if (ShiftName == "asr" || ShiftName == "ASR")
3454    isASR = true;
3455  else {
3456    Error(S, "shift operator 'asr' or 'lsl' expected");
3457    return MatchOperand_ParseFail;
3458  }
3459  Parser.Lex(); // Eat the operator.
3460
3461  // A '#' and a shift amount.
3462  if (Parser.getTok().isNot(AsmToken::Hash) &&
3463      Parser.getTok().isNot(AsmToken::Dollar)) {
3464    Error(Parser.getTok().getLoc(), "'#' expected");
3465    return MatchOperand_ParseFail;
3466  }
3467  Parser.Lex(); // Eat hash token.
3468
3469  const MCExpr *ShiftAmount;
3470  SMLoc E = Parser.getTok().getLoc();
3471  if (getParser().ParseExpression(ShiftAmount)) {
3472    Error(E, "malformed shift expression");
3473    return MatchOperand_ParseFail;
3474  }
3475  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3476  if (!CE) {
3477    Error(E, "shift amount must be an immediate");
3478    return MatchOperand_ParseFail;
3479  }
3480
3481  int64_t Val = CE->getValue();
3482  if (isASR) {
3483    // Shift amount must be in [1,32]
3484    if (Val < 1 || Val > 32) {
3485      Error(E, "'asr' shift amount must be in range [1,32]");
3486      return MatchOperand_ParseFail;
3487    }
3488    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3489    if (isThumb() && Val == 32) {
3490      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3491      return MatchOperand_ParseFail;
3492    }
3493    if (Val == 32) Val = 0;
3494  } else {
3495    // Shift amount must be in [1,32]
3496    if (Val < 0 || Val > 31) {
3497      Error(E, "'lsr' shift amount must be in range [0,31]");
3498      return MatchOperand_ParseFail;
3499    }
3500  }
3501
3502  E = Parser.getTok().getLoc();
3503  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3504
3505  return MatchOperand_Success;
3506}
3507
3508/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3509/// of instructions. Legal values are:
3510///     ror #n  'n' in {0, 8, 16, 24}
3511ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3512parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3513  const AsmToken &Tok = Parser.getTok();
3514  SMLoc S = Tok.getLoc();
3515  if (Tok.isNot(AsmToken::Identifier))
3516    return MatchOperand_NoMatch;
3517  StringRef ShiftName = Tok.getString();
3518  if (ShiftName != "ror" && ShiftName != "ROR")
3519    return MatchOperand_NoMatch;
3520  Parser.Lex(); // Eat the operator.
3521
3522  // A '#' and a rotate amount.
3523  if (Parser.getTok().isNot(AsmToken::Hash) &&
3524      Parser.getTok().isNot(AsmToken::Dollar)) {
3525    Error(Parser.getTok().getLoc(), "'#' expected");
3526    return MatchOperand_ParseFail;
3527  }
3528  Parser.Lex(); // Eat hash token.
3529
3530  const MCExpr *ShiftAmount;
3531  SMLoc E = Parser.getTok().getLoc();
3532  if (getParser().ParseExpression(ShiftAmount)) {
3533    Error(E, "malformed rotate expression");
3534    return MatchOperand_ParseFail;
3535  }
3536  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3537  if (!CE) {
3538    Error(E, "rotate amount must be an immediate");
3539    return MatchOperand_ParseFail;
3540  }
3541
3542  int64_t Val = CE->getValue();
3543  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3544  // normally, zero is represented in asm by omitting the rotate operand
3545  // entirely.
3546  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3547    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3548    return MatchOperand_ParseFail;
3549  }
3550
3551  E = Parser.getTok().getLoc();
3552  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3553
3554  return MatchOperand_Success;
3555}
3556
3557ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3558parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3559  SMLoc S = Parser.getTok().getLoc();
3560  // The bitfield descriptor is really two operands, the LSB and the width.
3561  if (Parser.getTok().isNot(AsmToken::Hash) &&
3562      Parser.getTok().isNot(AsmToken::Dollar)) {
3563    Error(Parser.getTok().getLoc(), "'#' expected");
3564    return MatchOperand_ParseFail;
3565  }
3566  Parser.Lex(); // Eat hash token.
3567
3568  const MCExpr *LSBExpr;
3569  SMLoc E = Parser.getTok().getLoc();
3570  if (getParser().ParseExpression(LSBExpr)) {
3571    Error(E, "malformed immediate expression");
3572    return MatchOperand_ParseFail;
3573  }
3574  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3575  if (!CE) {
3576    Error(E, "'lsb' operand must be an immediate");
3577    return MatchOperand_ParseFail;
3578  }
3579
3580  int64_t LSB = CE->getValue();
3581  // The LSB must be in the range [0,31]
3582  if (LSB < 0 || LSB > 31) {
3583    Error(E, "'lsb' operand must be in the range [0,31]");
3584    return MatchOperand_ParseFail;
3585  }
3586  E = Parser.getTok().getLoc();
3587
3588  // Expect another immediate operand.
3589  if (Parser.getTok().isNot(AsmToken::Comma)) {
3590    Error(Parser.getTok().getLoc(), "too few operands");
3591    return MatchOperand_ParseFail;
3592  }
3593  Parser.Lex(); // Eat hash token.
3594  if (Parser.getTok().isNot(AsmToken::Hash) &&
3595      Parser.getTok().isNot(AsmToken::Dollar)) {
3596    Error(Parser.getTok().getLoc(), "'#' expected");
3597    return MatchOperand_ParseFail;
3598  }
3599  Parser.Lex(); // Eat hash token.
3600
3601  const MCExpr *WidthExpr;
3602  if (getParser().ParseExpression(WidthExpr)) {
3603    Error(E, "malformed immediate expression");
3604    return MatchOperand_ParseFail;
3605  }
3606  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3607  if (!CE) {
3608    Error(E, "'width' operand must be an immediate");
3609    return MatchOperand_ParseFail;
3610  }
3611
3612  int64_t Width = CE->getValue();
3613  // The LSB must be in the range [1,32-lsb]
3614  if (Width < 1 || Width > 32 - LSB) {
3615    Error(E, "'width' operand must be in the range [1,32-lsb]");
3616    return MatchOperand_ParseFail;
3617  }
3618  E = Parser.getTok().getLoc();
3619
3620  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3621
3622  return MatchOperand_Success;
3623}
3624
3625ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3626parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3627  // Check for a post-index addressing register operand. Specifically:
3628  // postidx_reg := '+' register {, shift}
3629  //              | '-' register {, shift}
3630  //              | register {, shift}
3631
3632  // This method must return MatchOperand_NoMatch without consuming any tokens
3633  // in the case where there is no match, as other alternatives take other
3634  // parse methods.
3635  AsmToken Tok = Parser.getTok();
3636  SMLoc S = Tok.getLoc();
3637  bool haveEaten = false;
3638  bool isAdd = true;
3639  int Reg = -1;
3640  if (Tok.is(AsmToken::Plus)) {
3641    Parser.Lex(); // Eat the '+' token.
3642    haveEaten = true;
3643  } else if (Tok.is(AsmToken::Minus)) {
3644    Parser.Lex(); // Eat the '-' token.
3645    isAdd = false;
3646    haveEaten = true;
3647  }
3648  if (Parser.getTok().is(AsmToken::Identifier))
3649    Reg = tryParseRegister();
3650  if (Reg == -1) {
3651    if (!haveEaten)
3652      return MatchOperand_NoMatch;
3653    Error(Parser.getTok().getLoc(), "register expected");
3654    return MatchOperand_ParseFail;
3655  }
3656  SMLoc E = Parser.getTok().getLoc();
3657
3658  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3659  unsigned ShiftImm = 0;
3660  if (Parser.getTok().is(AsmToken::Comma)) {
3661    Parser.Lex(); // Eat the ','.
3662    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3663      return MatchOperand_ParseFail;
3664  }
3665
3666  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3667                                                  ShiftImm, S, E));
3668
3669  return MatchOperand_Success;
3670}
3671
3672ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3673parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3674  // Check for a post-index addressing register operand. Specifically:
3675  // am3offset := '+' register
3676  //              | '-' register
3677  //              | register
3678  //              | # imm
3679  //              | # + imm
3680  //              | # - imm
3681
3682  // This method must return MatchOperand_NoMatch without consuming any tokens
3683  // in the case where there is no match, as other alternatives take other
3684  // parse methods.
3685  AsmToken Tok = Parser.getTok();
3686  SMLoc S = Tok.getLoc();
3687
3688  // Do immediates first, as we always parse those if we have a '#'.
3689  if (Parser.getTok().is(AsmToken::Hash) ||
3690      Parser.getTok().is(AsmToken::Dollar)) {
3691    Parser.Lex(); // Eat the '#'.
3692    // Explicitly look for a '-', as we need to encode negative zero
3693    // differently.
3694    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3695    const MCExpr *Offset;
3696    if (getParser().ParseExpression(Offset))
3697      return MatchOperand_ParseFail;
3698    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3699    if (!CE) {
3700      Error(S, "constant expression expected");
3701      return MatchOperand_ParseFail;
3702    }
3703    SMLoc E = Tok.getLoc();
3704    // Negative zero is encoded as the flag value INT32_MIN.
3705    int32_t Val = CE->getValue();
3706    if (isNegative && Val == 0)
3707      Val = INT32_MIN;
3708
3709    Operands.push_back(
3710      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3711
3712    return MatchOperand_Success;
3713  }
3714
3715
3716  bool haveEaten = false;
3717  bool isAdd = true;
3718  int Reg = -1;
3719  if (Tok.is(AsmToken::Plus)) {
3720    Parser.Lex(); // Eat the '+' token.
3721    haveEaten = true;
3722  } else if (Tok.is(AsmToken::Minus)) {
3723    Parser.Lex(); // Eat the '-' token.
3724    isAdd = false;
3725    haveEaten = true;
3726  }
3727  if (Parser.getTok().is(AsmToken::Identifier))
3728    Reg = tryParseRegister();
3729  if (Reg == -1) {
3730    if (!haveEaten)
3731      return MatchOperand_NoMatch;
3732    Error(Parser.getTok().getLoc(), "register expected");
3733    return MatchOperand_ParseFail;
3734  }
3735  SMLoc E = Parser.getTok().getLoc();
3736
3737  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3738                                                  0, S, E));
3739
3740  return MatchOperand_Success;
3741}
3742
3743/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3744/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3745/// when they refer multiple MIOperands inside a single one.
3746bool ARMAsmParser::
3747cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3748             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3749  // Rt, Rt2
3750  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3751  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3752  // Create a writeback register dummy placeholder.
3753  Inst.addOperand(MCOperand::CreateReg(0));
3754  // addr
3755  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3756  // pred
3757  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3758  return true;
3759}
3760
3761/// cvtT2StrdPre - Convert parsed operands to MCInst.
3762/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3763/// when they refer multiple MIOperands inside a single one.
3764bool ARMAsmParser::
3765cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3766             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3767  // Create a writeback register dummy placeholder.
3768  Inst.addOperand(MCOperand::CreateReg(0));
3769  // Rt, Rt2
3770  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3771  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3772  // addr
3773  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3774  // pred
3775  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3776  return true;
3777}
3778
3779/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3780/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3781/// when they refer multiple MIOperands inside a single one.
3782bool ARMAsmParser::
3783cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3784                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3785  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3786
3787  // Create a writeback register dummy placeholder.
3788  Inst.addOperand(MCOperand::CreateImm(0));
3789
3790  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3791  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3792  return true;
3793}
3794
3795/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3796/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3797/// when they refer multiple MIOperands inside a single one.
3798bool ARMAsmParser::
3799cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3800                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3801  // Create a writeback register dummy placeholder.
3802  Inst.addOperand(MCOperand::CreateImm(0));
3803  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3804  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3805  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3806  return true;
3807}
3808
3809/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3810/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3811/// when they refer multiple MIOperands inside a single one.
3812bool ARMAsmParser::
3813cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3814                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3815  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3816
3817  // Create a writeback register dummy placeholder.
3818  Inst.addOperand(MCOperand::CreateImm(0));
3819
3820  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3821  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3822  return true;
3823}
3824
3825/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3826/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3827/// when they refer multiple MIOperands inside a single one.
3828bool ARMAsmParser::
3829cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3830                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3831  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3832
3833  // Create a writeback register dummy placeholder.
3834  Inst.addOperand(MCOperand::CreateImm(0));
3835
3836  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3837  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3838  return true;
3839}
3840
3841
3842/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3843/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3844/// when they refer multiple MIOperands inside a single one.
3845bool ARMAsmParser::
3846cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3847                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3848  // Create a writeback register dummy placeholder.
3849  Inst.addOperand(MCOperand::CreateImm(0));
3850  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3851  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3852  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3853  return true;
3854}
3855
3856/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3857/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3858/// when they refer multiple MIOperands inside a single one.
3859bool ARMAsmParser::
3860cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3861                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3862  // Create a writeback register dummy placeholder.
3863  Inst.addOperand(MCOperand::CreateImm(0));
3864  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3865  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3866  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3867  return true;
3868}
3869
3870/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3871/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3872/// when they refer multiple MIOperands inside a single one.
3873bool ARMAsmParser::
3874cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3875                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3876  // Create a writeback register dummy placeholder.
3877  Inst.addOperand(MCOperand::CreateImm(0));
3878  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3879  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3880  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3881  return true;
3882}
3883
3884/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3885/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3886/// when they refer multiple MIOperands inside a single one.
3887bool ARMAsmParser::
3888cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3889                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3890  // Rt
3891  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3892  // Create a writeback register dummy placeholder.
3893  Inst.addOperand(MCOperand::CreateImm(0));
3894  // addr
3895  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3896  // offset
3897  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3898  // pred
3899  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3900  return true;
3901}
3902
3903/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3904/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3905/// when they refer multiple MIOperands inside a single one.
3906bool ARMAsmParser::
3907cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3908                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3909  // Rt
3910  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3911  // Create a writeback register dummy placeholder.
3912  Inst.addOperand(MCOperand::CreateImm(0));
3913  // addr
3914  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3915  // offset
3916  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3917  // pred
3918  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3919  return true;
3920}
3921
3922/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3923/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3924/// when they refer multiple MIOperands inside a single one.
3925bool ARMAsmParser::
3926cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3927                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3928  // Create a writeback register dummy placeholder.
3929  Inst.addOperand(MCOperand::CreateImm(0));
3930  // Rt
3931  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3932  // addr
3933  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3934  // offset
3935  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3936  // pred
3937  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3938  return true;
3939}
3940
3941/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3942/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3943/// when they refer multiple MIOperands inside a single one.
3944bool ARMAsmParser::
3945cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3946                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3947  // Create a writeback register dummy placeholder.
3948  Inst.addOperand(MCOperand::CreateImm(0));
3949  // Rt
3950  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3951  // addr
3952  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3953  // offset
3954  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3955  // pred
3956  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3957  return true;
3958}
3959
3960/// cvtLdrdPre - Convert parsed operands to MCInst.
3961/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3962/// when they refer multiple MIOperands inside a single one.
3963bool ARMAsmParser::
3964cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3965           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3966  // Rt, Rt2
3967  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3968  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3969  // Create a writeback register dummy placeholder.
3970  Inst.addOperand(MCOperand::CreateImm(0));
3971  // addr
3972  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3973  // pred
3974  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3975  return true;
3976}
3977
3978/// cvtStrdPre - Convert parsed operands to MCInst.
3979/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3980/// when they refer multiple MIOperands inside a single one.
3981bool ARMAsmParser::
3982cvtStrdPre(MCInst &Inst, unsigned Opcode,
3983           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3984  // Create a writeback register dummy placeholder.
3985  Inst.addOperand(MCOperand::CreateImm(0));
3986  // Rt, Rt2
3987  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3988  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3989  // addr
3990  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3991  // pred
3992  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3993  return true;
3994}
3995
3996/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3997/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3998/// when they refer multiple MIOperands inside a single one.
3999bool ARMAsmParser::
4000cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
4001                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4002  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4003  // Create a writeback register dummy placeholder.
4004  Inst.addOperand(MCOperand::CreateImm(0));
4005  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4006  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4007  return true;
4008}
4009
4010/// cvtThumbMultiple- Convert parsed operands to MCInst.
4011/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4012/// when they refer multiple MIOperands inside a single one.
4013bool ARMAsmParser::
4014cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
4015           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4016  // The second source operand must be the same register as the destination
4017  // operand.
4018  if (Operands.size() == 6 &&
4019      (((ARMOperand*)Operands[3])->getReg() !=
4020       ((ARMOperand*)Operands[5])->getReg()) &&
4021      (((ARMOperand*)Operands[3])->getReg() !=
4022       ((ARMOperand*)Operands[4])->getReg())) {
4023    Error(Operands[3]->getStartLoc(),
4024          "destination register must match source register");
4025    return false;
4026  }
4027  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4028  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4029  // If we have a three-operand form, make sure to set Rn to be the operand
4030  // that isn't the same as Rd.
4031  unsigned RegOp = 4;
4032  if (Operands.size() == 6 &&
4033      ((ARMOperand*)Operands[4])->getReg() ==
4034        ((ARMOperand*)Operands[3])->getReg())
4035    RegOp = 5;
4036  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4037  Inst.addOperand(Inst.getOperand(0));
4038  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4039
4040  return true;
4041}
4042
4043bool ARMAsmParser::
4044cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
4045              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4046  // Vd
4047  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4048  // Create a writeback register dummy placeholder.
4049  Inst.addOperand(MCOperand::CreateImm(0));
4050  // Vn
4051  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4052  // pred
4053  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4054  return true;
4055}
4056
4057bool ARMAsmParser::
4058cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
4059                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4060  // Vd
4061  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4062  // Create a writeback register dummy placeholder.
4063  Inst.addOperand(MCOperand::CreateImm(0));
4064  // Vn
4065  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4066  // Vm
4067  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4068  // pred
4069  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4070  return true;
4071}
4072
4073bool ARMAsmParser::
4074cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
4075              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4076  // Create a writeback register dummy placeholder.
4077  Inst.addOperand(MCOperand::CreateImm(0));
4078  // Vn
4079  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4080  // Vt
4081  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4082  // pred
4083  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4084  return true;
4085}
4086
4087bool ARMAsmParser::
4088cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
4089                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4090  // Create a writeback register dummy placeholder.
4091  Inst.addOperand(MCOperand::CreateImm(0));
4092  // Vn
4093  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4094  // Vm
4095  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4096  // Vt
4097  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4098  // pred
4099  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4100  return true;
4101}
4102
4103/// Parse an ARM memory expression, return false if successful else return true
4104/// or an error.  The first token must be a '[' when called.
4105bool ARMAsmParser::
4106parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4107  SMLoc S, E;
4108  assert(Parser.getTok().is(AsmToken::LBrac) &&
4109         "Token is not a Left Bracket");
4110  S = Parser.getTok().getLoc();
4111  Parser.Lex(); // Eat left bracket token.
4112
4113  const AsmToken &BaseRegTok = Parser.getTok();
4114  int BaseRegNum = tryParseRegister();
4115  if (BaseRegNum == -1)
4116    return Error(BaseRegTok.getLoc(), "register expected");
4117
4118  // The next token must either be a comma or a closing bracket.
4119  const AsmToken &Tok = Parser.getTok();
4120  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4121    return Error(Tok.getLoc(), "malformed memory operand");
4122
4123  if (Tok.is(AsmToken::RBrac)) {
4124    E = Tok.getLoc();
4125    Parser.Lex(); // Eat right bracket token.
4126
4127    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4128                                             0, 0, false, S, E));
4129
4130    // If there's a pre-indexing writeback marker, '!', just add it as a token
4131    // operand. It's rather odd, but syntactically valid.
4132    if (Parser.getTok().is(AsmToken::Exclaim)) {
4133      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4134      Parser.Lex(); // Eat the '!'.
4135    }
4136
4137    return false;
4138  }
4139
4140  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4141  Parser.Lex(); // Eat the comma.
4142
4143  // If we have a ':', it's an alignment specifier.
4144  if (Parser.getTok().is(AsmToken::Colon)) {
4145    Parser.Lex(); // Eat the ':'.
4146    E = Parser.getTok().getLoc();
4147
4148    const MCExpr *Expr;
4149    if (getParser().ParseExpression(Expr))
4150     return true;
4151
4152    // The expression has to be a constant. Memory references with relocations
4153    // don't come through here, as they use the <label> forms of the relevant
4154    // instructions.
4155    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4156    if (!CE)
4157      return Error (E, "constant expression expected");
4158
4159    unsigned Align = 0;
4160    switch (CE->getValue()) {
4161    default:
4162      return Error(E,
4163                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4164    case 16:  Align = 2; break;
4165    case 32:  Align = 4; break;
4166    case 64:  Align = 8; break;
4167    case 128: Align = 16; break;
4168    case 256: Align = 32; break;
4169    }
4170
4171    // Now we should have the closing ']'
4172    E = Parser.getTok().getLoc();
4173    if (Parser.getTok().isNot(AsmToken::RBrac))
4174      return Error(E, "']' expected");
4175    Parser.Lex(); // Eat right bracket token.
4176
4177    // Don't worry about range checking the value here. That's handled by
4178    // the is*() predicates.
4179    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4180                                             ARM_AM::no_shift, 0, Align,
4181                                             false, S, E));
4182
4183    // If there's a pre-indexing writeback marker, '!', just add it as a token
4184    // operand.
4185    if (Parser.getTok().is(AsmToken::Exclaim)) {
4186      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4187      Parser.Lex(); // Eat the '!'.
4188    }
4189
4190    return false;
4191  }
4192
4193  // If we have a '#', it's an immediate offset, else assume it's a register
4194  // offset. Be friendly and also accept a plain integer (without a leading
4195  // hash) for gas compatibility.
4196  if (Parser.getTok().is(AsmToken::Hash) ||
4197      Parser.getTok().is(AsmToken::Dollar) ||
4198      Parser.getTok().is(AsmToken::Integer)) {
4199    if (Parser.getTok().isNot(AsmToken::Integer))
4200      Parser.Lex(); // Eat the '#'.
4201    E = Parser.getTok().getLoc();
4202
4203    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4204    const MCExpr *Offset;
4205    if (getParser().ParseExpression(Offset))
4206     return true;
4207
4208    // The expression has to be a constant. Memory references with relocations
4209    // don't come through here, as they use the <label> forms of the relevant
4210    // instructions.
4211    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4212    if (!CE)
4213      return Error (E, "constant expression expected");
4214
4215    // If the constant was #-0, represent it as INT32_MIN.
4216    int32_t Val = CE->getValue();
4217    if (isNegative && Val == 0)
4218      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4219
4220    // Now we should have the closing ']'
4221    E = Parser.getTok().getLoc();
4222    if (Parser.getTok().isNot(AsmToken::RBrac))
4223      return Error(E, "']' expected");
4224    Parser.Lex(); // Eat right bracket token.
4225
4226    // Don't worry about range checking the value here. That's handled by
4227    // the is*() predicates.
4228    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4229                                             ARM_AM::no_shift, 0, 0,
4230                                             false, S, E));
4231
4232    // If there's a pre-indexing writeback marker, '!', just add it as a token
4233    // operand.
4234    if (Parser.getTok().is(AsmToken::Exclaim)) {
4235      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4236      Parser.Lex(); // Eat the '!'.
4237    }
4238
4239    return false;
4240  }
4241
4242  // The register offset is optionally preceded by a '+' or '-'
4243  bool isNegative = false;
4244  if (Parser.getTok().is(AsmToken::Minus)) {
4245    isNegative = true;
4246    Parser.Lex(); // Eat the '-'.
4247  } else if (Parser.getTok().is(AsmToken::Plus)) {
4248    // Nothing to do.
4249    Parser.Lex(); // Eat the '+'.
4250  }
4251
4252  E = Parser.getTok().getLoc();
4253  int OffsetRegNum = tryParseRegister();
4254  if (OffsetRegNum == -1)
4255    return Error(E, "register expected");
4256
4257  // If there's a shift operator, handle it.
4258  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4259  unsigned ShiftImm = 0;
4260  if (Parser.getTok().is(AsmToken::Comma)) {
4261    Parser.Lex(); // Eat the ','.
4262    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4263      return true;
4264  }
4265
4266  // Now we should have the closing ']'
4267  E = Parser.getTok().getLoc();
4268  if (Parser.getTok().isNot(AsmToken::RBrac))
4269    return Error(E, "']' expected");
4270  Parser.Lex(); // Eat right bracket token.
4271
4272  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4273                                           ShiftType, ShiftImm, 0, isNegative,
4274                                           S, E));
4275
4276  // If there's a pre-indexing writeback marker, '!', just add it as a token
4277  // operand.
4278  if (Parser.getTok().is(AsmToken::Exclaim)) {
4279    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4280    Parser.Lex(); // Eat the '!'.
4281  }
4282
4283  return false;
4284}
4285
4286/// parseMemRegOffsetShift - one of these two:
4287///   ( lsl | lsr | asr | ror ) , # shift_amount
4288///   rrx
4289/// return true if it parses a shift otherwise it returns false.
4290bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4291                                          unsigned &Amount) {
4292  SMLoc Loc = Parser.getTok().getLoc();
4293  const AsmToken &Tok = Parser.getTok();
4294  if (Tok.isNot(AsmToken::Identifier))
4295    return true;
4296  StringRef ShiftName = Tok.getString();
4297  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4298      ShiftName == "asl" || ShiftName == "ASL")
4299    St = ARM_AM::lsl;
4300  else if (ShiftName == "lsr" || ShiftName == "LSR")
4301    St = ARM_AM::lsr;
4302  else if (ShiftName == "asr" || ShiftName == "ASR")
4303    St = ARM_AM::asr;
4304  else if (ShiftName == "ror" || ShiftName == "ROR")
4305    St = ARM_AM::ror;
4306  else if (ShiftName == "rrx" || ShiftName == "RRX")
4307    St = ARM_AM::rrx;
4308  else
4309    return Error(Loc, "illegal shift operator");
4310  Parser.Lex(); // Eat shift type token.
4311
4312  // rrx stands alone.
4313  Amount = 0;
4314  if (St != ARM_AM::rrx) {
4315    Loc = Parser.getTok().getLoc();
4316    // A '#' and a shift amount.
4317    const AsmToken &HashTok = Parser.getTok();
4318    if (HashTok.isNot(AsmToken::Hash) &&
4319        HashTok.isNot(AsmToken::Dollar))
4320      return Error(HashTok.getLoc(), "'#' expected");
4321    Parser.Lex(); // Eat hash token.
4322
4323    const MCExpr *Expr;
4324    if (getParser().ParseExpression(Expr))
4325      return true;
4326    // Range check the immediate.
4327    // lsl, ror: 0 <= imm <= 31
4328    // lsr, asr: 0 <= imm <= 32
4329    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4330    if (!CE)
4331      return Error(Loc, "shift amount must be an immediate");
4332    int64_t Imm = CE->getValue();
4333    if (Imm < 0 ||
4334        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4335        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4336      return Error(Loc, "immediate shift value out of range");
4337    Amount = Imm;
4338  }
4339
4340  return false;
4341}
4342
4343/// parseFPImm - A floating point immediate expression operand.
4344ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4345parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4346  // Anything that can accept a floating point constant as an operand
4347  // needs to go through here, as the regular ParseExpression is
4348  // integer only.
4349  //
4350  // This routine still creates a generic Immediate operand, containing
4351  // a bitcast of the 64-bit floating point value. The various operands
4352  // that accept floats can check whether the value is valid for them
4353  // via the standard is*() predicates.
4354
4355  SMLoc S = Parser.getTok().getLoc();
4356
4357  if (Parser.getTok().isNot(AsmToken::Hash) &&
4358      Parser.getTok().isNot(AsmToken::Dollar))
4359    return MatchOperand_NoMatch;
4360
4361  // Disambiguate the VMOV forms that can accept an FP immediate.
4362  // vmov.f32 <sreg>, #imm
4363  // vmov.f64 <dreg>, #imm
4364  // vmov.f32 <dreg>, #imm  @ vector f32x2
4365  // vmov.f32 <qreg>, #imm  @ vector f32x4
4366  //
4367  // There are also the NEON VMOV instructions which expect an
4368  // integer constant. Make sure we don't try to parse an FPImm
4369  // for these:
4370  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4371  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4372  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4373                           TyOp->getToken() != ".f64"))
4374    return MatchOperand_NoMatch;
4375
4376  Parser.Lex(); // Eat the '#'.
4377
4378  // Handle negation, as that still comes through as a separate token.
4379  bool isNegative = false;
4380  if (Parser.getTok().is(AsmToken::Minus)) {
4381    isNegative = true;
4382    Parser.Lex();
4383  }
4384  const AsmToken &Tok = Parser.getTok();
4385  SMLoc Loc = Tok.getLoc();
4386  if (Tok.is(AsmToken::Real)) {
4387    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4388    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4389    // If we had a '-' in front, toggle the sign bit.
4390    IntVal ^= (uint64_t)isNegative << 31;
4391    Parser.Lex(); // Eat the token.
4392    Operands.push_back(ARMOperand::CreateImm(
4393          MCConstantExpr::Create(IntVal, getContext()),
4394          S, Parser.getTok().getLoc()));
4395    return MatchOperand_Success;
4396  }
4397  // Also handle plain integers. Instructions which allow floating point
4398  // immediates also allow a raw encoded 8-bit value.
4399  if (Tok.is(AsmToken::Integer)) {
4400    int64_t Val = Tok.getIntVal();
4401    Parser.Lex(); // Eat the token.
4402    if (Val > 255 || Val < 0) {
4403      Error(Loc, "encoded floating point value out of range");
4404      return MatchOperand_ParseFail;
4405    }
4406    double RealVal = ARM_AM::getFPImmFloat(Val);
4407    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4408    Operands.push_back(ARMOperand::CreateImm(
4409        MCConstantExpr::Create(Val, getContext()), S,
4410        Parser.getTok().getLoc()));
4411    return MatchOperand_Success;
4412  }
4413
4414  Error(Loc, "invalid floating point immediate");
4415  return MatchOperand_ParseFail;
4416}
4417
4418/// Parse a arm instruction operand.  For now this parses the operand regardless
4419/// of the mnemonic.
4420bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4421                                StringRef Mnemonic) {
4422  SMLoc S, E;
4423
4424  // Check if the current operand has a custom associated parser, if so, try to
4425  // custom parse the operand, or fallback to the general approach.
4426  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4427  if (ResTy == MatchOperand_Success)
4428    return false;
4429  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4430  // there was a match, but an error occurred, in which case, just return that
4431  // the operand parsing failed.
4432  if (ResTy == MatchOperand_ParseFail)
4433    return true;
4434
4435  switch (getLexer().getKind()) {
4436  default:
4437    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4438    return true;
4439  case AsmToken::Identifier: {
4440    if (!tryParseRegisterWithWriteBack(Operands))
4441      return false;
4442    int Res = tryParseShiftRegister(Operands);
4443    if (Res == 0) // success
4444      return false;
4445    else if (Res == -1) // irrecoverable error
4446      return true;
4447    // If this is VMRS, check for the apsr_nzcv operand.
4448    if (Mnemonic == "vmrs" &&
4449        Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4450      S = Parser.getTok().getLoc();
4451      Parser.Lex();
4452      Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4453      return false;
4454    }
4455
4456    // Fall though for the Identifier case that is not a register or a
4457    // special name.
4458  }
4459  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4460  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4461  case AsmToken::String:  // quoted label names.
4462  case AsmToken::Dot: {   // . as a branch target
4463    // This was not a register so parse other operands that start with an
4464    // identifier (like labels) as expressions and create them as immediates.
4465    const MCExpr *IdVal;
4466    S = Parser.getTok().getLoc();
4467    if (getParser().ParseExpression(IdVal))
4468      return true;
4469    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4470    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4471    return false;
4472  }
4473  case AsmToken::LBrac:
4474    return parseMemory(Operands);
4475  case AsmToken::LCurly:
4476    return parseRegisterList(Operands);
4477  case AsmToken::Dollar:
4478  case AsmToken::Hash: {
4479    // #42 -> immediate.
4480    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4481    S = Parser.getTok().getLoc();
4482    Parser.Lex();
4483    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4484    const MCExpr *ImmVal;
4485    if (getParser().ParseExpression(ImmVal))
4486      return true;
4487    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4488    if (CE) {
4489      int32_t Val = CE->getValue();
4490      if (isNegative && Val == 0)
4491        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4492    }
4493    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4494    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4495    return false;
4496  }
4497  case AsmToken::Colon: {
4498    // ":lower16:" and ":upper16:" expression prefixes
4499    // FIXME: Check it's an expression prefix,
4500    // e.g. (FOO - :lower16:BAR) isn't legal.
4501    ARMMCExpr::VariantKind RefKind;
4502    if (parsePrefix(RefKind))
4503      return true;
4504
4505    const MCExpr *SubExprVal;
4506    if (getParser().ParseExpression(SubExprVal))
4507      return true;
4508
4509    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4510                                                   getContext());
4511    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4512    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4513    return false;
4514  }
4515  }
4516}
4517
4518// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4519//  :lower16: and :upper16:.
4520bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4521  RefKind = ARMMCExpr::VK_ARM_None;
4522
4523  // :lower16: and :upper16: modifiers
4524  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4525  Parser.Lex(); // Eat ':'
4526
4527  if (getLexer().isNot(AsmToken::Identifier)) {
4528    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4529    return true;
4530  }
4531
4532  StringRef IDVal = Parser.getTok().getIdentifier();
4533  if (IDVal == "lower16") {
4534    RefKind = ARMMCExpr::VK_ARM_LO16;
4535  } else if (IDVal == "upper16") {
4536    RefKind = ARMMCExpr::VK_ARM_HI16;
4537  } else {
4538    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4539    return true;
4540  }
4541  Parser.Lex();
4542
4543  if (getLexer().isNot(AsmToken::Colon)) {
4544    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4545    return true;
4546  }
4547  Parser.Lex(); // Eat the last ':'
4548  return false;
4549}
4550
4551/// \brief Given a mnemonic, split out possible predication code and carry
4552/// setting letters to form a canonical mnemonic and flags.
4553//
4554// FIXME: Would be nice to autogen this.
4555// FIXME: This is a bit of a maze of special cases.
4556StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4557                                      unsigned &PredicationCode,
4558                                      bool &CarrySetting,
4559                                      unsigned &ProcessorIMod,
4560                                      StringRef &ITMask) {
4561  PredicationCode = ARMCC::AL;
4562  CarrySetting = false;
4563  ProcessorIMod = 0;
4564
4565  // Ignore some mnemonics we know aren't predicated forms.
4566  //
4567  // FIXME: Would be nice to autogen this.
4568  if ((Mnemonic == "movs" && isThumb()) ||
4569      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4570      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4571      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4572      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4573      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4574      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4575      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4576      Mnemonic == "fmuls")
4577    return Mnemonic;
4578
4579  // First, split out any predication code. Ignore mnemonics we know aren't
4580  // predicated but do have a carry-set and so weren't caught above.
4581  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4582      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4583      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4584      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4585    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4586      .Case("eq", ARMCC::EQ)
4587      .Case("ne", ARMCC::NE)
4588      .Case("hs", ARMCC::HS)
4589      .Case("cs", ARMCC::HS)
4590      .Case("lo", ARMCC::LO)
4591      .Case("cc", ARMCC::LO)
4592      .Case("mi", ARMCC::MI)
4593      .Case("pl", ARMCC::PL)
4594      .Case("vs", ARMCC::VS)
4595      .Case("vc", ARMCC::VC)
4596      .Case("hi", ARMCC::HI)
4597      .Case("ls", ARMCC::LS)
4598      .Case("ge", ARMCC::GE)
4599      .Case("lt", ARMCC::LT)
4600      .Case("gt", ARMCC::GT)
4601      .Case("le", ARMCC::LE)
4602      .Case("al", ARMCC::AL)
4603      .Default(~0U);
4604    if (CC != ~0U) {
4605      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4606      PredicationCode = CC;
4607    }
4608  }
4609
4610  // Next, determine if we have a carry setting bit. We explicitly ignore all
4611  // the instructions we know end in 's'.
4612  if (Mnemonic.endswith("s") &&
4613      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4614        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4615        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4616        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4617        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4618        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4619        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4620        Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4621        (Mnemonic == "movs" && isThumb()))) {
4622    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4623    CarrySetting = true;
4624  }
4625
4626  // The "cps" instruction can have a interrupt mode operand which is glued into
4627  // the mnemonic. Check if this is the case, split it and parse the imod op
4628  if (Mnemonic.startswith("cps")) {
4629    // Split out any imod code.
4630    unsigned IMod =
4631      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4632      .Case("ie", ARM_PROC::IE)
4633      .Case("id", ARM_PROC::ID)
4634      .Default(~0U);
4635    if (IMod != ~0U) {
4636      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4637      ProcessorIMod = IMod;
4638    }
4639  }
4640
4641  // The "it" instruction has the condition mask on the end of the mnemonic.
4642  if (Mnemonic.startswith("it")) {
4643    ITMask = Mnemonic.slice(2, Mnemonic.size());
4644    Mnemonic = Mnemonic.slice(0, 2);
4645  }
4646
4647  return Mnemonic;
4648}
4649
4650/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4651/// inclusion of carry set or predication code operands.
4652//
4653// FIXME: It would be nice to autogen this.
4654void ARMAsmParser::
4655getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4656                      bool &CanAcceptPredicationCode) {
4657  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4658      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4659      Mnemonic == "add" || Mnemonic == "adc" ||
4660      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4661      Mnemonic == "orr" || Mnemonic == "mvn" ||
4662      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4663      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4664      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4665                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4666                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4667    CanAcceptCarrySet = true;
4668  } else
4669    CanAcceptCarrySet = false;
4670
4671  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4672      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4673      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4674      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4675      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4676      (Mnemonic == "clrex" && !isThumb()) ||
4677      (Mnemonic == "nop" && isThumbOne()) ||
4678      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4679        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4680        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4681      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4682       !isThumb()) ||
4683      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4684    CanAcceptPredicationCode = false;
4685  } else
4686    CanAcceptPredicationCode = true;
4687
4688  if (isThumb()) {
4689    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4690        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4691      CanAcceptPredicationCode = false;
4692  }
4693}
4694
4695bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4696                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4697  // FIXME: This is all horribly hacky. We really need a better way to deal
4698  // with optional operands like this in the matcher table.
4699
4700  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4701  // another does not. Specifically, the MOVW instruction does not. So we
4702  // special case it here and remove the defaulted (non-setting) cc_out
4703  // operand if that's the instruction we're trying to match.
4704  //
4705  // We do this as post-processing of the explicit operands rather than just
4706  // conditionally adding the cc_out in the first place because we need
4707  // to check the type of the parsed immediate operand.
4708  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4709      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4710      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4711      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4712    return true;
4713
4714  // Register-register 'add' for thumb does not have a cc_out operand
4715  // when there are only two register operands.
4716  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4717      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4718      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4719      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4720    return true;
4721  // Register-register 'add' for thumb does not have a cc_out operand
4722  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4723  // have to check the immediate range here since Thumb2 has a variant
4724  // that can handle a different range and has a cc_out operand.
4725  if (((isThumb() && Mnemonic == "add") ||
4726       (isThumbTwo() && Mnemonic == "sub")) &&
4727      Operands.size() == 6 &&
4728      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4729      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4730      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4731      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4732      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4733       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4734    return true;
4735  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4736  // imm0_4095 variant. That's the least-preferred variant when
4737  // selecting via the generic "add" mnemonic, so to know that we
4738  // should remove the cc_out operand, we have to explicitly check that
4739  // it's not one of the other variants. Ugh.
4740  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4741      Operands.size() == 6 &&
4742      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4743      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4744      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4745    // Nest conditions rather than one big 'if' statement for readability.
4746    //
4747    // If either register is a high reg, it's either one of the SP
4748    // variants (handled above) or a 32-bit encoding, so we just
4749    // check against T3. If the second register is the PC, this is an
4750    // alternate form of ADR, which uses encoding T4, so check for that too.
4751    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4752         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4753        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4754        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4755      return false;
4756    // If both registers are low, we're in an IT block, and the immediate is
4757    // in range, we should use encoding T1 instead, which has a cc_out.
4758    if (inITBlock() &&
4759        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4760        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4761        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4762      return false;
4763
4764    // Otherwise, we use encoding T4, which does not have a cc_out
4765    // operand.
4766    return true;
4767  }
4768
4769  // The thumb2 multiply instruction doesn't have a CCOut register, so
4770  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4771  // use the 16-bit encoding or not.
4772  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4773      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4774      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4775      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4776      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4777      // If the registers aren't low regs, the destination reg isn't the
4778      // same as one of the source regs, or the cc_out operand is zero
4779      // outside of an IT block, we have to use the 32-bit encoding, so
4780      // remove the cc_out operand.
4781      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4782       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4783       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4784       !inITBlock() ||
4785       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4786        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4787        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4788        static_cast<ARMOperand*>(Operands[4])->getReg())))
4789    return true;
4790
4791  // Also check the 'mul' syntax variant that doesn't specify an explicit
4792  // destination register.
4793  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4794      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4795      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4796      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4797      // If the registers aren't low regs  or the cc_out operand is zero
4798      // outside of an IT block, we have to use the 32-bit encoding, so
4799      // remove the cc_out operand.
4800      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4801       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4802       !inITBlock()))
4803    return true;
4804
4805
4806
4807  // Register-register 'add/sub' for thumb does not have a cc_out operand
4808  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4809  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4810  // right, this will result in better diagnostics (which operand is off)
4811  // anyway.
4812  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4813      (Operands.size() == 5 || Operands.size() == 6) &&
4814      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4815      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4816      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4817    return true;
4818
4819  return false;
4820}
4821
4822static bool isDataTypeToken(StringRef Tok) {
4823  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4824    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4825    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4826    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4827    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4828    Tok == ".f" || Tok == ".d";
4829}
4830
4831// FIXME: This bit should probably be handled via an explicit match class
4832// in the .td files that matches the suffix instead of having it be
4833// a literal string token the way it is now.
4834static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4835  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4836}
4837
4838static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4839/// Parse an arm instruction mnemonic followed by its operands.
4840bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4841                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4842  // Apply mnemonic aliases before doing anything else, as the destination
4843  // mnemnonic may include suffices and we want to handle them normally.
4844  // The generic tblgen'erated code does this later, at the start of
4845  // MatchInstructionImpl(), but that's too late for aliases that include
4846  // any sort of suffix.
4847  unsigned AvailableFeatures = getAvailableFeatures();
4848  applyMnemonicAliases(Name, AvailableFeatures);
4849
4850  // First check for the ARM-specific .req directive.
4851  if (Parser.getTok().is(AsmToken::Identifier) &&
4852      Parser.getTok().getIdentifier() == ".req") {
4853    parseDirectiveReq(Name, NameLoc);
4854    // We always return 'error' for this, as we're done with this
4855    // statement and don't need to match the 'instruction."
4856    return true;
4857  }
4858
4859  // Create the leading tokens for the mnemonic, split by '.' characters.
4860  size_t Start = 0, Next = Name.find('.');
4861  StringRef Mnemonic = Name.slice(Start, Next);
4862
4863  // Split out the predication code and carry setting flag from the mnemonic.
4864  unsigned PredicationCode;
4865  unsigned ProcessorIMod;
4866  bool CarrySetting;
4867  StringRef ITMask;
4868  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4869                           ProcessorIMod, ITMask);
4870
4871  // In Thumb1, only the branch (B) instruction can be predicated.
4872  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4873    Parser.EatToEndOfStatement();
4874    return Error(NameLoc, "conditional execution not supported in Thumb1");
4875  }
4876
4877  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4878
4879  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4880  // is the mask as it will be for the IT encoding if the conditional
4881  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4882  // where the conditional bit0 is zero, the instruction post-processing
4883  // will adjust the mask accordingly.
4884  if (Mnemonic == "it") {
4885    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4886    if (ITMask.size() > 3) {
4887      Parser.EatToEndOfStatement();
4888      return Error(Loc, "too many conditions on IT instruction");
4889    }
4890    unsigned Mask = 8;
4891    for (unsigned i = ITMask.size(); i != 0; --i) {
4892      char pos = ITMask[i - 1];
4893      if (pos != 't' && pos != 'e') {
4894        Parser.EatToEndOfStatement();
4895        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4896      }
4897      Mask >>= 1;
4898      if (ITMask[i - 1] == 't')
4899        Mask |= 8;
4900    }
4901    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4902  }
4903
4904  // FIXME: This is all a pretty gross hack. We should automatically handle
4905  // optional operands like this via tblgen.
4906
4907  // Next, add the CCOut and ConditionCode operands, if needed.
4908  //
4909  // For mnemonics which can ever incorporate a carry setting bit or predication
4910  // code, our matching model involves us always generating CCOut and
4911  // ConditionCode operands to match the mnemonic "as written" and then we let
4912  // the matcher deal with finding the right instruction or generating an
4913  // appropriate error.
4914  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4915  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4916
4917  // If we had a carry-set on an instruction that can't do that, issue an
4918  // error.
4919  if (!CanAcceptCarrySet && CarrySetting) {
4920    Parser.EatToEndOfStatement();
4921    return Error(NameLoc, "instruction '" + Mnemonic +
4922                 "' can not set flags, but 's' suffix specified");
4923  }
4924  // If we had a predication code on an instruction that can't do that, issue an
4925  // error.
4926  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4927    Parser.EatToEndOfStatement();
4928    return Error(NameLoc, "instruction '" + Mnemonic +
4929                 "' is not predicable, but condition code specified");
4930  }
4931
4932  // Add the carry setting operand, if necessary.
4933  if (CanAcceptCarrySet) {
4934    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4935    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4936                                               Loc));
4937  }
4938
4939  // Add the predication code operand, if necessary.
4940  if (CanAcceptPredicationCode) {
4941    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4942                                      CarrySetting);
4943    Operands.push_back(ARMOperand::CreateCondCode(
4944                         ARMCC::CondCodes(PredicationCode), Loc));
4945  }
4946
4947  // Add the processor imod operand, if necessary.
4948  if (ProcessorIMod) {
4949    Operands.push_back(ARMOperand::CreateImm(
4950          MCConstantExpr::Create(ProcessorIMod, getContext()),
4951                                 NameLoc, NameLoc));
4952  }
4953
4954  // Add the remaining tokens in the mnemonic.
4955  while (Next != StringRef::npos) {
4956    Start = Next;
4957    Next = Name.find('.', Start + 1);
4958    StringRef ExtraToken = Name.slice(Start, Next);
4959
4960    // Some NEON instructions have an optional datatype suffix that is
4961    // completely ignored. Check for that.
4962    if (isDataTypeToken(ExtraToken) &&
4963        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4964      continue;
4965
4966    if (ExtraToken != ".n") {
4967      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4968      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4969    }
4970  }
4971
4972  // Read the remaining operands.
4973  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4974    // Read the first operand.
4975    if (parseOperand(Operands, Mnemonic)) {
4976      Parser.EatToEndOfStatement();
4977      return true;
4978    }
4979
4980    while (getLexer().is(AsmToken::Comma)) {
4981      Parser.Lex();  // Eat the comma.
4982
4983      // Parse and remember the operand.
4984      if (parseOperand(Operands, Mnemonic)) {
4985        Parser.EatToEndOfStatement();
4986        return true;
4987      }
4988    }
4989  }
4990
4991  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4992    SMLoc Loc = getLexer().getLoc();
4993    Parser.EatToEndOfStatement();
4994    return Error(Loc, "unexpected token in argument list");
4995  }
4996
4997  Parser.Lex(); // Consume the EndOfStatement
4998
4999  // Some instructions, mostly Thumb, have forms for the same mnemonic that
5000  // do and don't have a cc_out optional-def operand. With some spot-checks
5001  // of the operand list, we can figure out which variant we're trying to
5002  // parse and adjust accordingly before actually matching. We shouldn't ever
5003  // try to remove a cc_out operand that was explicitly set on the the
5004  // mnemonic, of course (CarrySetting == true). Reason number #317 the
5005  // table driven matcher doesn't fit well with the ARM instruction set.
5006  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5007    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5008    Operands.erase(Operands.begin() + 1);
5009    delete Op;
5010  }
5011
5012  // ARM mode 'blx' need special handling, as the register operand version
5013  // is predicable, but the label operand version is not. So, we can't rely
5014  // on the Mnemonic based checking to correctly figure out when to put
5015  // a k_CondCode operand in the list. If we're trying to match the label
5016  // version, remove the k_CondCode operand here.
5017  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5018      static_cast<ARMOperand*>(Operands[2])->isImm()) {
5019    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5020    Operands.erase(Operands.begin() + 1);
5021    delete Op;
5022  }
5023
5024  // The vector-compare-to-zero instructions have a literal token "#0" at
5025  // the end that comes to here as an immediate operand. Convert it to a
5026  // token to play nicely with the matcher.
5027  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
5028      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
5029      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5030    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5031    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5032    if (CE && CE->getValue() == 0) {
5033      Operands.erase(Operands.begin() + 5);
5034      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5035      delete Op;
5036    }
5037  }
5038  // VCMP{E} does the same thing, but with a different operand count.
5039  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
5040      static_cast<ARMOperand*>(Operands[4])->isImm()) {
5041    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
5042    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5043    if (CE && CE->getValue() == 0) {
5044      Operands.erase(Operands.begin() + 4);
5045      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5046      delete Op;
5047    }
5048  }
5049  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
5050  // end. Convert it to a token here. Take care not to convert those
5051  // that should hit the Thumb2 encoding.
5052  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
5053      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5054      static_cast<ARMOperand*>(Operands[4])->isReg() &&
5055      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5056    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5057    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5058    if (CE && CE->getValue() == 0 &&
5059        (isThumbOne() ||
5060         // The cc_out operand matches the IT block.
5061         ((inITBlock() != CarrySetting) &&
5062         // Neither register operand is a high register.
5063         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
5064          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
5065      Operands.erase(Operands.begin() + 5);
5066      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5067      delete Op;
5068    }
5069  }
5070
5071  return false;
5072}
5073
5074// Validate context-sensitive operand constraints.
5075
5076// return 'true' if register list contains non-low GPR registers,
5077// 'false' otherwise. If Reg is in the register list or is HiReg, set
5078// 'containsReg' to true.
5079static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5080                                 unsigned HiReg, bool &containsReg) {
5081  containsReg = false;
5082  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5083    unsigned OpReg = Inst.getOperand(i).getReg();
5084    if (OpReg == Reg)
5085      containsReg = true;
5086    // Anything other than a low register isn't legal here.
5087    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5088      return true;
5089  }
5090  return false;
5091}
5092
5093// Check if the specified regisgter is in the register list of the inst,
5094// starting at the indicated operand number.
5095static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5096  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5097    unsigned OpReg = Inst.getOperand(i).getReg();
5098    if (OpReg == Reg)
5099      return true;
5100  }
5101  return false;
5102}
5103
5104// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5105// the ARMInsts array) instead. Getting that here requires awkward
5106// API changes, though. Better way?
5107namespace llvm {
5108extern const MCInstrDesc ARMInsts[];
5109}
5110static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5111  return ARMInsts[Opcode];
5112}
5113
5114// FIXME: We would really like to be able to tablegen'erate this.
5115bool ARMAsmParser::
5116validateInstruction(MCInst &Inst,
5117                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5118  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5119  SMLoc Loc = Operands[0]->getStartLoc();
5120  // Check the IT block state first.
5121  // NOTE: BKPT instruction has the interesting property of being
5122  // allowed in IT blocks, but not being predicable.  It just always
5123  // executes.
5124  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5125      Inst.getOpcode() != ARM::BKPT) {
5126    unsigned bit = 1;
5127    if (ITState.FirstCond)
5128      ITState.FirstCond = false;
5129    else
5130      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5131    // The instruction must be predicable.
5132    if (!MCID.isPredicable())
5133      return Error(Loc, "instructions in IT block must be predicable");
5134    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5135    unsigned ITCond = bit ? ITState.Cond :
5136      ARMCC::getOppositeCondition(ITState.Cond);
5137    if (Cond != ITCond) {
5138      // Find the condition code Operand to get its SMLoc information.
5139      SMLoc CondLoc;
5140      for (unsigned i = 1; i < Operands.size(); ++i)
5141        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5142          CondLoc = Operands[i]->getStartLoc();
5143      return Error(CondLoc, "incorrect condition in IT block; got '" +
5144                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5145                   "', but expected '" +
5146                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5147    }
5148  // Check for non-'al' condition codes outside of the IT block.
5149  } else if (isThumbTwo() && MCID.isPredicable() &&
5150             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5151             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5152             Inst.getOpcode() != ARM::t2B)
5153    return Error(Loc, "predicated instructions must be in IT block");
5154
5155  switch (Inst.getOpcode()) {
5156  case ARM::LDRD:
5157  case ARM::LDRD_PRE:
5158  case ARM::LDRD_POST:
5159  case ARM::LDREXD: {
5160    // Rt2 must be Rt + 1.
5161    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5162    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5163    if (Rt2 != Rt + 1)
5164      return Error(Operands[3]->getStartLoc(),
5165                   "destination operands must be sequential");
5166    return false;
5167  }
5168  case ARM::STRD: {
5169    // Rt2 must be Rt + 1.
5170    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5171    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5172    if (Rt2 != Rt + 1)
5173      return Error(Operands[3]->getStartLoc(),
5174                   "source operands must be sequential");
5175    return false;
5176  }
5177  case ARM::STRD_PRE:
5178  case ARM::STRD_POST:
5179  case ARM::STREXD: {
5180    // Rt2 must be Rt + 1.
5181    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5182    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5183    if (Rt2 != Rt + 1)
5184      return Error(Operands[3]->getStartLoc(),
5185                   "source operands must be sequential");
5186    return false;
5187  }
5188  case ARM::SBFX:
5189  case ARM::UBFX: {
5190    // width must be in range [1, 32-lsb]
5191    unsigned lsb = Inst.getOperand(2).getImm();
5192    unsigned widthm1 = Inst.getOperand(3).getImm();
5193    if (widthm1 >= 32 - lsb)
5194      return Error(Operands[5]->getStartLoc(),
5195                   "bitfield width must be in range [1,32-lsb]");
5196    return false;
5197  }
5198  case ARM::tLDMIA: {
5199    // If we're parsing Thumb2, the .w variant is available and handles
5200    // most cases that are normally illegal for a Thumb1 LDM
5201    // instruction. We'll make the transformation in processInstruction()
5202    // if necessary.
5203    //
5204    // Thumb LDM instructions are writeback iff the base register is not
5205    // in the register list.
5206    unsigned Rn = Inst.getOperand(0).getReg();
5207    bool hasWritebackToken =
5208      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5209       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5210    bool listContainsBase;
5211    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5212      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5213                   "registers must be in range r0-r7");
5214    // If we should have writeback, then there should be a '!' token.
5215    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5216      return Error(Operands[2]->getStartLoc(),
5217                   "writeback operator '!' expected");
5218    // If we should not have writeback, there must not be a '!'. This is
5219    // true even for the 32-bit wide encodings.
5220    if (listContainsBase && hasWritebackToken)
5221      return Error(Operands[3]->getStartLoc(),
5222                   "writeback operator '!' not allowed when base register "
5223                   "in register list");
5224
5225    break;
5226  }
5227  case ARM::t2LDMIA_UPD: {
5228    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5229      return Error(Operands[4]->getStartLoc(),
5230                   "writeback operator '!' not allowed when base register "
5231                   "in register list");
5232    break;
5233  }
5234  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5235  // so only issue a diagnostic for thumb1. The instructions will be
5236  // switched to the t2 encodings in processInstruction() if necessary.
5237  case ARM::tPOP: {
5238    bool listContainsBase;
5239    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5240        !isThumbTwo())
5241      return Error(Operands[2]->getStartLoc(),
5242                   "registers must be in range r0-r7 or pc");
5243    break;
5244  }
5245  case ARM::tPUSH: {
5246    bool listContainsBase;
5247    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5248        !isThumbTwo())
5249      return Error(Operands[2]->getStartLoc(),
5250                   "registers must be in range r0-r7 or lr");
5251    break;
5252  }
5253  case ARM::tSTMIA_UPD: {
5254    bool listContainsBase;
5255    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5256      return Error(Operands[4]->getStartLoc(),
5257                   "registers must be in range r0-r7");
5258    break;
5259  }
5260  }
5261
5262  return false;
5263}
5264
5265static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5266  switch(Opc) {
5267  default: llvm_unreachable("unexpected opcode!");
5268  // VST1LN
5269  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5270  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5271  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5272  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5273  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5274  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5275  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5276  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5277  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5278
5279  // VST2LN
5280  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5281  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5282  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5283  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5284  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5285
5286  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5287  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5288  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5289  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5290  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5291
5292  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5293  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5294  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5295  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5296  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5297
5298  // VST3LN
5299  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5300  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5301  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5302  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5303  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5304  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5305  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5306  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5307  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5308  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5309  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5310  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5311  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5312  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5313  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5314
5315  // VST3
5316  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5317  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5318  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5319  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5320  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5321  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5322  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5323  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5324  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5325  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5326  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5327  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5328  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5329  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5330  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5331  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5332  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5333  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5334
5335  // VST4LN
5336  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5337  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5338  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5339  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5340  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5341  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5342  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5343  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5344  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5345  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5346  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5347  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5348  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5349  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5350  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5351
5352  // VST4
5353  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5354  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5355  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5356  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5357  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5358  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5359  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5360  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5361  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5362  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5363  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5364  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5365  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5366  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5367  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5368  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5369  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5370  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5371  }
5372}
5373
5374static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5375  switch(Opc) {
5376  default: llvm_unreachable("unexpected opcode!");
5377  // VLD1LN
5378  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5379  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5380  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5381  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5382  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5383  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5384  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5385  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5386  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5387
5388  // VLD2LN
5389  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5390  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5391  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5392  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5393  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5394  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5395  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5396  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5397  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5398  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5399  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5400  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5401  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5402  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5403  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5404
5405  // VLD3DUP
5406  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5407  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5408  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5409  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5410  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5411  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5412  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5413  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5414  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5415  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5416  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5417  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5418  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5419  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5420  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5421  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5422  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5423  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5424
5425  // VLD3LN
5426  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5427  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5428  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5429  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5430  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5431  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5432  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5433  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5434  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5435  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5436  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5437  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5438  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5439  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5440  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5441
5442  // VLD3
5443  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5444  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5445  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5446  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5447  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5448  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5449  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5450  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5451  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5452  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5453  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5454  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5455  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5456  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5457  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5458  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5459  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5460  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5461
5462  // VLD4LN
5463  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5464  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5465  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5466  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5467  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5468  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5469  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5470  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5471  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5472  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5473  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5474  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5475  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5476  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5477  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5478
5479  // VLD4DUP
5480  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5481  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5482  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5483  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5484  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5485  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5486  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5487  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5488  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5489  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5490  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5491  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5492  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5493  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5494  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5495  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5496  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5497  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5498
5499  // VLD4
5500  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5501  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5502  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5503  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5504  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5505  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5506  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5507  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5508  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5509  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5510  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5511  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5512  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5513  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5514  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5515  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5516  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5517  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5518  }
5519}
5520
5521bool ARMAsmParser::
5522processInstruction(MCInst &Inst,
5523                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5524  switch (Inst.getOpcode()) {
5525  // Aliases for alternate PC+imm syntax of LDR instructions.
5526  case ARM::t2LDRpcrel:
5527    Inst.setOpcode(ARM::t2LDRpci);
5528    return true;
5529  case ARM::t2LDRBpcrel:
5530    Inst.setOpcode(ARM::t2LDRBpci);
5531    return true;
5532  case ARM::t2LDRHpcrel:
5533    Inst.setOpcode(ARM::t2LDRHpci);
5534    return true;
5535  case ARM::t2LDRSBpcrel:
5536    Inst.setOpcode(ARM::t2LDRSBpci);
5537    return true;
5538  case ARM::t2LDRSHpcrel:
5539    Inst.setOpcode(ARM::t2LDRSHpci);
5540    return true;
5541  // Handle NEON VST complex aliases.
5542  case ARM::VST1LNdWB_register_Asm_8:
5543  case ARM::VST1LNdWB_register_Asm_16:
5544  case ARM::VST1LNdWB_register_Asm_32: {
5545    MCInst TmpInst;
5546    // Shuffle the operands around so the lane index operand is in the
5547    // right place.
5548    unsigned Spacing;
5549    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5550    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5551    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5552    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5553    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5554    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5555    TmpInst.addOperand(Inst.getOperand(1)); // lane
5556    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5557    TmpInst.addOperand(Inst.getOperand(6));
5558    Inst = TmpInst;
5559    return true;
5560  }
5561
5562  case ARM::VST2LNdWB_register_Asm_8:
5563  case ARM::VST2LNdWB_register_Asm_16:
5564  case ARM::VST2LNdWB_register_Asm_32:
5565  case ARM::VST2LNqWB_register_Asm_16:
5566  case ARM::VST2LNqWB_register_Asm_32: {
5567    MCInst TmpInst;
5568    // Shuffle the operands around so the lane index operand is in the
5569    // right place.
5570    unsigned Spacing;
5571    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5572    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5573    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5574    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5575    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5576    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5577    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5578                                            Spacing));
5579    TmpInst.addOperand(Inst.getOperand(1)); // lane
5580    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5581    TmpInst.addOperand(Inst.getOperand(6));
5582    Inst = TmpInst;
5583    return true;
5584  }
5585
5586  case ARM::VST3LNdWB_register_Asm_8:
5587  case ARM::VST3LNdWB_register_Asm_16:
5588  case ARM::VST3LNdWB_register_Asm_32:
5589  case ARM::VST3LNqWB_register_Asm_16:
5590  case ARM::VST3LNqWB_register_Asm_32: {
5591    MCInst TmpInst;
5592    // Shuffle the operands around so the lane index operand is in the
5593    // right place.
5594    unsigned Spacing;
5595    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5596    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5597    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5598    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5599    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5600    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5601    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5602                                            Spacing));
5603    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5604                                            Spacing * 2));
5605    TmpInst.addOperand(Inst.getOperand(1)); // lane
5606    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5607    TmpInst.addOperand(Inst.getOperand(6));
5608    Inst = TmpInst;
5609    return true;
5610  }
5611
5612  case ARM::VST4LNdWB_register_Asm_8:
5613  case ARM::VST4LNdWB_register_Asm_16:
5614  case ARM::VST4LNdWB_register_Asm_32:
5615  case ARM::VST4LNqWB_register_Asm_16:
5616  case ARM::VST4LNqWB_register_Asm_32: {
5617    MCInst TmpInst;
5618    // Shuffle the operands around so the lane index operand is in the
5619    // right place.
5620    unsigned Spacing;
5621    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5622    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5623    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5624    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5625    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5626    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5627    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5628                                            Spacing));
5629    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5630                                            Spacing * 2));
5631    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5632                                            Spacing * 3));
5633    TmpInst.addOperand(Inst.getOperand(1)); // lane
5634    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5635    TmpInst.addOperand(Inst.getOperand(6));
5636    Inst = TmpInst;
5637    return true;
5638  }
5639
5640  case ARM::VST1LNdWB_fixed_Asm_8:
5641  case ARM::VST1LNdWB_fixed_Asm_16:
5642  case ARM::VST1LNdWB_fixed_Asm_32: {
5643    MCInst TmpInst;
5644    // Shuffle the operands around so the lane index operand is in the
5645    // right place.
5646    unsigned Spacing;
5647    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5648    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5649    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5650    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5651    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5652    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5653    TmpInst.addOperand(Inst.getOperand(1)); // lane
5654    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5655    TmpInst.addOperand(Inst.getOperand(5));
5656    Inst = TmpInst;
5657    return true;
5658  }
5659
5660  case ARM::VST2LNdWB_fixed_Asm_8:
5661  case ARM::VST2LNdWB_fixed_Asm_16:
5662  case ARM::VST2LNdWB_fixed_Asm_32:
5663  case ARM::VST2LNqWB_fixed_Asm_16:
5664  case ARM::VST2LNqWB_fixed_Asm_32: {
5665    MCInst TmpInst;
5666    // Shuffle the operands around so the lane index operand is in the
5667    // right place.
5668    unsigned Spacing;
5669    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5670    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5671    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5672    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5673    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5674    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5675    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5676                                            Spacing));
5677    TmpInst.addOperand(Inst.getOperand(1)); // lane
5678    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5679    TmpInst.addOperand(Inst.getOperand(5));
5680    Inst = TmpInst;
5681    return true;
5682  }
5683
5684  case ARM::VST3LNdWB_fixed_Asm_8:
5685  case ARM::VST3LNdWB_fixed_Asm_16:
5686  case ARM::VST3LNdWB_fixed_Asm_32:
5687  case ARM::VST3LNqWB_fixed_Asm_16:
5688  case ARM::VST3LNqWB_fixed_Asm_32: {
5689    MCInst TmpInst;
5690    // Shuffle the operands around so the lane index operand is in the
5691    // right place.
5692    unsigned Spacing;
5693    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5694    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5695    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5696    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5697    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5698    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5699    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5700                                            Spacing));
5701    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5702                                            Spacing * 2));
5703    TmpInst.addOperand(Inst.getOperand(1)); // lane
5704    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5705    TmpInst.addOperand(Inst.getOperand(5));
5706    Inst = TmpInst;
5707    return true;
5708  }
5709
5710  case ARM::VST4LNdWB_fixed_Asm_8:
5711  case ARM::VST4LNdWB_fixed_Asm_16:
5712  case ARM::VST4LNdWB_fixed_Asm_32:
5713  case ARM::VST4LNqWB_fixed_Asm_16:
5714  case ARM::VST4LNqWB_fixed_Asm_32: {
5715    MCInst TmpInst;
5716    // Shuffle the operands around so the lane index operand is in the
5717    // right place.
5718    unsigned Spacing;
5719    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5720    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5721    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5722    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5723    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5724    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5725    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5726                                            Spacing));
5727    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5728                                            Spacing * 2));
5729    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5730                                            Spacing * 3));
5731    TmpInst.addOperand(Inst.getOperand(1)); // lane
5732    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5733    TmpInst.addOperand(Inst.getOperand(5));
5734    Inst = TmpInst;
5735    return true;
5736  }
5737
5738  case ARM::VST1LNdAsm_8:
5739  case ARM::VST1LNdAsm_16:
5740  case ARM::VST1LNdAsm_32: {
5741    MCInst TmpInst;
5742    // Shuffle the operands around so the lane index operand is in the
5743    // right place.
5744    unsigned Spacing;
5745    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5746    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5747    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5748    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5749    TmpInst.addOperand(Inst.getOperand(1)); // lane
5750    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5751    TmpInst.addOperand(Inst.getOperand(5));
5752    Inst = TmpInst;
5753    return true;
5754  }
5755
5756  case ARM::VST2LNdAsm_8:
5757  case ARM::VST2LNdAsm_16:
5758  case ARM::VST2LNdAsm_32:
5759  case ARM::VST2LNqAsm_16:
5760  case ARM::VST2LNqAsm_32: {
5761    MCInst TmpInst;
5762    // Shuffle the operands around so the lane index operand is in the
5763    // right place.
5764    unsigned Spacing;
5765    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5766    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5767    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5768    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5769    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5770                                            Spacing));
5771    TmpInst.addOperand(Inst.getOperand(1)); // lane
5772    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5773    TmpInst.addOperand(Inst.getOperand(5));
5774    Inst = TmpInst;
5775    return true;
5776  }
5777
5778  case ARM::VST3LNdAsm_8:
5779  case ARM::VST3LNdAsm_16:
5780  case ARM::VST3LNdAsm_32:
5781  case ARM::VST3LNqAsm_16:
5782  case ARM::VST3LNqAsm_32: {
5783    MCInst TmpInst;
5784    // Shuffle the operands around so the lane index operand is in the
5785    // right place.
5786    unsigned Spacing;
5787    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5788    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5789    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5790    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5791    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5792                                            Spacing));
5793    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5794                                            Spacing * 2));
5795    TmpInst.addOperand(Inst.getOperand(1)); // lane
5796    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5797    TmpInst.addOperand(Inst.getOperand(5));
5798    Inst = TmpInst;
5799    return true;
5800  }
5801
5802  case ARM::VST4LNdAsm_8:
5803  case ARM::VST4LNdAsm_16:
5804  case ARM::VST4LNdAsm_32:
5805  case ARM::VST4LNqAsm_16:
5806  case ARM::VST4LNqAsm_32: {
5807    MCInst TmpInst;
5808    // Shuffle the operands around so the lane index operand is in the
5809    // right place.
5810    unsigned Spacing;
5811    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5812    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5813    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5814    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5815    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5816                                            Spacing));
5817    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5818                                            Spacing * 2));
5819    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5820                                            Spacing * 3));
5821    TmpInst.addOperand(Inst.getOperand(1)); // lane
5822    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5823    TmpInst.addOperand(Inst.getOperand(5));
5824    Inst = TmpInst;
5825    return true;
5826  }
5827
5828  // Handle NEON VLD complex aliases.
5829  case ARM::VLD1LNdWB_register_Asm_8:
5830  case ARM::VLD1LNdWB_register_Asm_16:
5831  case ARM::VLD1LNdWB_register_Asm_32: {
5832    MCInst TmpInst;
5833    // Shuffle the operands around so the lane index operand is in the
5834    // right place.
5835    unsigned Spacing;
5836    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5837    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5838    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5839    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5840    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5841    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5842    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5843    TmpInst.addOperand(Inst.getOperand(1)); // lane
5844    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5845    TmpInst.addOperand(Inst.getOperand(6));
5846    Inst = TmpInst;
5847    return true;
5848  }
5849
5850  case ARM::VLD2LNdWB_register_Asm_8:
5851  case ARM::VLD2LNdWB_register_Asm_16:
5852  case ARM::VLD2LNdWB_register_Asm_32:
5853  case ARM::VLD2LNqWB_register_Asm_16:
5854  case ARM::VLD2LNqWB_register_Asm_32: {
5855    MCInst TmpInst;
5856    // Shuffle the operands around so the lane index operand is in the
5857    // right place.
5858    unsigned Spacing;
5859    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5860    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5861    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5862                                            Spacing));
5863    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5864    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5865    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5866    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5867    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5868    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5869                                            Spacing));
5870    TmpInst.addOperand(Inst.getOperand(1)); // lane
5871    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5872    TmpInst.addOperand(Inst.getOperand(6));
5873    Inst = TmpInst;
5874    return true;
5875  }
5876
5877  case ARM::VLD3LNdWB_register_Asm_8:
5878  case ARM::VLD3LNdWB_register_Asm_16:
5879  case ARM::VLD3LNdWB_register_Asm_32:
5880  case ARM::VLD3LNqWB_register_Asm_16:
5881  case ARM::VLD3LNqWB_register_Asm_32: {
5882    MCInst TmpInst;
5883    // Shuffle the operands around so the lane index operand is in the
5884    // right place.
5885    unsigned Spacing;
5886    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5887    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5888    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5889                                            Spacing));
5890    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5891                                            Spacing * 2));
5892    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5893    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5894    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5895    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5896    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5897    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5898                                            Spacing));
5899    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5900                                            Spacing * 2));
5901    TmpInst.addOperand(Inst.getOperand(1)); // lane
5902    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5903    TmpInst.addOperand(Inst.getOperand(6));
5904    Inst = TmpInst;
5905    return true;
5906  }
5907
5908  case ARM::VLD4LNdWB_register_Asm_8:
5909  case ARM::VLD4LNdWB_register_Asm_16:
5910  case ARM::VLD4LNdWB_register_Asm_32:
5911  case ARM::VLD4LNqWB_register_Asm_16:
5912  case ARM::VLD4LNqWB_register_Asm_32: {
5913    MCInst TmpInst;
5914    // Shuffle the operands around so the lane index operand is in the
5915    // right place.
5916    unsigned Spacing;
5917    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5918    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5919    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5920                                            Spacing));
5921    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5922                                            Spacing * 2));
5923    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5924                                            Spacing * 3));
5925    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5926    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5927    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5928    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5929    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5930    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5931                                            Spacing));
5932    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5933                                            Spacing * 2));
5934    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5935                                            Spacing * 3));
5936    TmpInst.addOperand(Inst.getOperand(1)); // lane
5937    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5938    TmpInst.addOperand(Inst.getOperand(6));
5939    Inst = TmpInst;
5940    return true;
5941  }
5942
5943  case ARM::VLD1LNdWB_fixed_Asm_8:
5944  case ARM::VLD1LNdWB_fixed_Asm_16:
5945  case ARM::VLD1LNdWB_fixed_Asm_32: {
5946    MCInst TmpInst;
5947    // Shuffle the operands around so the lane index operand is in the
5948    // right place.
5949    unsigned Spacing;
5950    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5951    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5952    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5953    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5954    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5955    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5956    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5957    TmpInst.addOperand(Inst.getOperand(1)); // lane
5958    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5959    TmpInst.addOperand(Inst.getOperand(5));
5960    Inst = TmpInst;
5961    return true;
5962  }
5963
5964  case ARM::VLD2LNdWB_fixed_Asm_8:
5965  case ARM::VLD2LNdWB_fixed_Asm_16:
5966  case ARM::VLD2LNdWB_fixed_Asm_32:
5967  case ARM::VLD2LNqWB_fixed_Asm_16:
5968  case ARM::VLD2LNqWB_fixed_Asm_32: {
5969    MCInst TmpInst;
5970    // Shuffle the operands around so the lane index operand is in the
5971    // right place.
5972    unsigned Spacing;
5973    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5974    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5975    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5976                                            Spacing));
5977    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5978    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5979    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5980    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5981    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5982    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5983                                            Spacing));
5984    TmpInst.addOperand(Inst.getOperand(1)); // lane
5985    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5986    TmpInst.addOperand(Inst.getOperand(5));
5987    Inst = TmpInst;
5988    return true;
5989  }
5990
5991  case ARM::VLD3LNdWB_fixed_Asm_8:
5992  case ARM::VLD3LNdWB_fixed_Asm_16:
5993  case ARM::VLD3LNdWB_fixed_Asm_32:
5994  case ARM::VLD3LNqWB_fixed_Asm_16:
5995  case ARM::VLD3LNqWB_fixed_Asm_32: {
5996    MCInst TmpInst;
5997    // Shuffle the operands around so the lane index operand is in the
5998    // right place.
5999    unsigned Spacing;
6000    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6001    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6002    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6003                                            Spacing));
6004    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6005                                            Spacing * 2));
6006    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6007    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6008    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6009    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6010    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6011    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6012                                            Spacing));
6013    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6014                                            Spacing * 2));
6015    TmpInst.addOperand(Inst.getOperand(1)); // lane
6016    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6017    TmpInst.addOperand(Inst.getOperand(5));
6018    Inst = TmpInst;
6019    return true;
6020  }
6021
6022  case ARM::VLD4LNdWB_fixed_Asm_8:
6023  case ARM::VLD4LNdWB_fixed_Asm_16:
6024  case ARM::VLD4LNdWB_fixed_Asm_32:
6025  case ARM::VLD4LNqWB_fixed_Asm_16:
6026  case ARM::VLD4LNqWB_fixed_Asm_32: {
6027    MCInst TmpInst;
6028    // Shuffle the operands around so the lane index operand is in the
6029    // right place.
6030    unsigned Spacing;
6031    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6032    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6033    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6034                                            Spacing));
6035    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6036                                            Spacing * 2));
6037    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6038                                            Spacing * 3));
6039    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6040    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6041    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6042    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6043    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6044    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6045                                            Spacing));
6046    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6047                                            Spacing * 2));
6048    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6049                                            Spacing * 3));
6050    TmpInst.addOperand(Inst.getOperand(1)); // lane
6051    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6052    TmpInst.addOperand(Inst.getOperand(5));
6053    Inst = TmpInst;
6054    return true;
6055  }
6056
6057  case ARM::VLD1LNdAsm_8:
6058  case ARM::VLD1LNdAsm_16:
6059  case ARM::VLD1LNdAsm_32: {
6060    MCInst TmpInst;
6061    // Shuffle the operands around so the lane index operand is in the
6062    // right place.
6063    unsigned Spacing;
6064    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6065    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6066    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6067    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6068    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6069    TmpInst.addOperand(Inst.getOperand(1)); // lane
6070    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6071    TmpInst.addOperand(Inst.getOperand(5));
6072    Inst = TmpInst;
6073    return true;
6074  }
6075
6076  case ARM::VLD2LNdAsm_8:
6077  case ARM::VLD2LNdAsm_16:
6078  case ARM::VLD2LNdAsm_32:
6079  case ARM::VLD2LNqAsm_16:
6080  case ARM::VLD2LNqAsm_32: {
6081    MCInst TmpInst;
6082    // Shuffle the operands around so the lane index operand is in the
6083    // right place.
6084    unsigned Spacing;
6085    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6086    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6087    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6088                                            Spacing));
6089    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6090    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6091    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6092    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6093                                            Spacing));
6094    TmpInst.addOperand(Inst.getOperand(1)); // lane
6095    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6096    TmpInst.addOperand(Inst.getOperand(5));
6097    Inst = TmpInst;
6098    return true;
6099  }
6100
6101  case ARM::VLD3LNdAsm_8:
6102  case ARM::VLD3LNdAsm_16:
6103  case ARM::VLD3LNdAsm_32:
6104  case ARM::VLD3LNqAsm_16:
6105  case ARM::VLD3LNqAsm_32: {
6106    MCInst TmpInst;
6107    // Shuffle the operands around so the lane index operand is in the
6108    // right place.
6109    unsigned Spacing;
6110    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6111    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6112    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6113                                            Spacing));
6114    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6115                                            Spacing * 2));
6116    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6117    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6118    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6119    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6120                                            Spacing));
6121    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6122                                            Spacing * 2));
6123    TmpInst.addOperand(Inst.getOperand(1)); // lane
6124    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6125    TmpInst.addOperand(Inst.getOperand(5));
6126    Inst = TmpInst;
6127    return true;
6128  }
6129
6130  case ARM::VLD4LNdAsm_8:
6131  case ARM::VLD4LNdAsm_16:
6132  case ARM::VLD4LNdAsm_32:
6133  case ARM::VLD4LNqAsm_16:
6134  case ARM::VLD4LNqAsm_32: {
6135    MCInst TmpInst;
6136    // Shuffle the operands around so the lane index operand is in the
6137    // right place.
6138    unsigned Spacing;
6139    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6140    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6141    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6142                                            Spacing));
6143    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6144                                            Spacing * 2));
6145    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6146                                            Spacing * 3));
6147    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6148    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6149    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6150    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6151                                            Spacing));
6152    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6153                                            Spacing * 2));
6154    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6155                                            Spacing * 3));
6156    TmpInst.addOperand(Inst.getOperand(1)); // lane
6157    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6158    TmpInst.addOperand(Inst.getOperand(5));
6159    Inst = TmpInst;
6160    return true;
6161  }
6162
6163  // VLD3DUP single 3-element structure to all lanes instructions.
6164  case ARM::VLD3DUPdAsm_8:
6165  case ARM::VLD3DUPdAsm_16:
6166  case ARM::VLD3DUPdAsm_32:
6167  case ARM::VLD3DUPqAsm_8:
6168  case ARM::VLD3DUPqAsm_16:
6169  case ARM::VLD3DUPqAsm_32: {
6170    MCInst TmpInst;
6171    unsigned Spacing;
6172    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6173    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6174    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6175                                            Spacing));
6176    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6177                                            Spacing * 2));
6178    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6179    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6180    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6181    TmpInst.addOperand(Inst.getOperand(4));
6182    Inst = TmpInst;
6183    return true;
6184  }
6185
6186  case ARM::VLD3DUPdWB_fixed_Asm_8:
6187  case ARM::VLD3DUPdWB_fixed_Asm_16:
6188  case ARM::VLD3DUPdWB_fixed_Asm_32:
6189  case ARM::VLD3DUPqWB_fixed_Asm_8:
6190  case ARM::VLD3DUPqWB_fixed_Asm_16:
6191  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6192    MCInst TmpInst;
6193    unsigned Spacing;
6194    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6195    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6196    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6197                                            Spacing));
6198    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6199                                            Spacing * 2));
6200    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6201    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6202    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6203    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6204    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6205    TmpInst.addOperand(Inst.getOperand(4));
6206    Inst = TmpInst;
6207    return true;
6208  }
6209
6210  case ARM::VLD3DUPdWB_register_Asm_8:
6211  case ARM::VLD3DUPdWB_register_Asm_16:
6212  case ARM::VLD3DUPdWB_register_Asm_32:
6213  case ARM::VLD3DUPqWB_register_Asm_8:
6214  case ARM::VLD3DUPqWB_register_Asm_16:
6215  case ARM::VLD3DUPqWB_register_Asm_32: {
6216    MCInst TmpInst;
6217    unsigned Spacing;
6218    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6219    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6220    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6221                                            Spacing));
6222    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6223                                            Spacing * 2));
6224    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6225    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6226    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6227    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6228    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6229    TmpInst.addOperand(Inst.getOperand(5));
6230    Inst = TmpInst;
6231    return true;
6232  }
6233
6234  // VLD3 multiple 3-element structure instructions.
6235  case ARM::VLD3dAsm_8:
6236  case ARM::VLD3dAsm_16:
6237  case ARM::VLD3dAsm_32:
6238  case ARM::VLD3qAsm_8:
6239  case ARM::VLD3qAsm_16:
6240  case ARM::VLD3qAsm_32: {
6241    MCInst TmpInst;
6242    unsigned Spacing;
6243    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6244    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6245    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6246                                            Spacing));
6247    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6248                                            Spacing * 2));
6249    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6250    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6251    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6252    TmpInst.addOperand(Inst.getOperand(4));
6253    Inst = TmpInst;
6254    return true;
6255  }
6256
6257  case ARM::VLD3dWB_fixed_Asm_8:
6258  case ARM::VLD3dWB_fixed_Asm_16:
6259  case ARM::VLD3dWB_fixed_Asm_32:
6260  case ARM::VLD3qWB_fixed_Asm_8:
6261  case ARM::VLD3qWB_fixed_Asm_16:
6262  case ARM::VLD3qWB_fixed_Asm_32: {
6263    MCInst TmpInst;
6264    unsigned Spacing;
6265    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6266    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6267    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6268                                            Spacing));
6269    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6270                                            Spacing * 2));
6271    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6272    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6273    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6274    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6275    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6276    TmpInst.addOperand(Inst.getOperand(4));
6277    Inst = TmpInst;
6278    return true;
6279  }
6280
6281  case ARM::VLD3dWB_register_Asm_8:
6282  case ARM::VLD3dWB_register_Asm_16:
6283  case ARM::VLD3dWB_register_Asm_32:
6284  case ARM::VLD3qWB_register_Asm_8:
6285  case ARM::VLD3qWB_register_Asm_16:
6286  case ARM::VLD3qWB_register_Asm_32: {
6287    MCInst TmpInst;
6288    unsigned Spacing;
6289    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6290    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6291    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6292                                            Spacing));
6293    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6294                                            Spacing * 2));
6295    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6296    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6297    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6298    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6299    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6300    TmpInst.addOperand(Inst.getOperand(5));
6301    Inst = TmpInst;
6302    return true;
6303  }
6304
6305  // VLD4DUP single 3-element structure to all lanes instructions.
6306  case ARM::VLD4DUPdAsm_8:
6307  case ARM::VLD4DUPdAsm_16:
6308  case ARM::VLD4DUPdAsm_32:
6309  case ARM::VLD4DUPqAsm_8:
6310  case ARM::VLD4DUPqAsm_16:
6311  case ARM::VLD4DUPqAsm_32: {
6312    MCInst TmpInst;
6313    unsigned Spacing;
6314    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6315    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6316    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6317                                            Spacing));
6318    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6319                                            Spacing * 2));
6320    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6321                                            Spacing * 3));
6322    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6323    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6324    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6325    TmpInst.addOperand(Inst.getOperand(4));
6326    Inst = TmpInst;
6327    return true;
6328  }
6329
6330  case ARM::VLD4DUPdWB_fixed_Asm_8:
6331  case ARM::VLD4DUPdWB_fixed_Asm_16:
6332  case ARM::VLD4DUPdWB_fixed_Asm_32:
6333  case ARM::VLD4DUPqWB_fixed_Asm_8:
6334  case ARM::VLD4DUPqWB_fixed_Asm_16:
6335  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6336    MCInst TmpInst;
6337    unsigned Spacing;
6338    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6339    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6340    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6341                                            Spacing));
6342    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6343                                            Spacing * 2));
6344    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6345                                            Spacing * 3));
6346    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6347    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6348    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6349    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6350    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6351    TmpInst.addOperand(Inst.getOperand(4));
6352    Inst = TmpInst;
6353    return true;
6354  }
6355
6356  case ARM::VLD4DUPdWB_register_Asm_8:
6357  case ARM::VLD4DUPdWB_register_Asm_16:
6358  case ARM::VLD4DUPdWB_register_Asm_32:
6359  case ARM::VLD4DUPqWB_register_Asm_8:
6360  case ARM::VLD4DUPqWB_register_Asm_16:
6361  case ARM::VLD4DUPqWB_register_Asm_32: {
6362    MCInst TmpInst;
6363    unsigned Spacing;
6364    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6365    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6366    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6367                                            Spacing));
6368    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6369                                            Spacing * 2));
6370    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6371                                            Spacing * 3));
6372    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6373    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6374    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6375    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6376    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6377    TmpInst.addOperand(Inst.getOperand(5));
6378    Inst = TmpInst;
6379    return true;
6380  }
6381
6382  // VLD4 multiple 4-element structure instructions.
6383  case ARM::VLD4dAsm_8:
6384  case ARM::VLD4dAsm_16:
6385  case ARM::VLD4dAsm_32:
6386  case ARM::VLD4qAsm_8:
6387  case ARM::VLD4qAsm_16:
6388  case ARM::VLD4qAsm_32: {
6389    MCInst TmpInst;
6390    unsigned Spacing;
6391    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6392    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6393    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6394                                            Spacing));
6395    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6396                                            Spacing * 2));
6397    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6398                                            Spacing * 3));
6399    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6400    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6401    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6402    TmpInst.addOperand(Inst.getOperand(4));
6403    Inst = TmpInst;
6404    return true;
6405  }
6406
6407  case ARM::VLD4dWB_fixed_Asm_8:
6408  case ARM::VLD4dWB_fixed_Asm_16:
6409  case ARM::VLD4dWB_fixed_Asm_32:
6410  case ARM::VLD4qWB_fixed_Asm_8:
6411  case ARM::VLD4qWB_fixed_Asm_16:
6412  case ARM::VLD4qWB_fixed_Asm_32: {
6413    MCInst TmpInst;
6414    unsigned Spacing;
6415    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6416    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6417    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6418                                            Spacing));
6419    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6420                                            Spacing * 2));
6421    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6422                                            Spacing * 3));
6423    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6424    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6425    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6426    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6427    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6428    TmpInst.addOperand(Inst.getOperand(4));
6429    Inst = TmpInst;
6430    return true;
6431  }
6432
6433  case ARM::VLD4dWB_register_Asm_8:
6434  case ARM::VLD4dWB_register_Asm_16:
6435  case ARM::VLD4dWB_register_Asm_32:
6436  case ARM::VLD4qWB_register_Asm_8:
6437  case ARM::VLD4qWB_register_Asm_16:
6438  case ARM::VLD4qWB_register_Asm_32: {
6439    MCInst TmpInst;
6440    unsigned Spacing;
6441    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6442    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6443    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6444                                            Spacing));
6445    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6446                                            Spacing * 2));
6447    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6448                                            Spacing * 3));
6449    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6450    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6451    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6452    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6453    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6454    TmpInst.addOperand(Inst.getOperand(5));
6455    Inst = TmpInst;
6456    return true;
6457  }
6458
6459  // VST3 multiple 3-element structure instructions.
6460  case ARM::VST3dAsm_8:
6461  case ARM::VST3dAsm_16:
6462  case ARM::VST3dAsm_32:
6463  case ARM::VST3qAsm_8:
6464  case ARM::VST3qAsm_16:
6465  case ARM::VST3qAsm_32: {
6466    MCInst TmpInst;
6467    unsigned Spacing;
6468    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6469    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6470    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6471    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6472    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6473                                            Spacing));
6474    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6475                                            Spacing * 2));
6476    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6477    TmpInst.addOperand(Inst.getOperand(4));
6478    Inst = TmpInst;
6479    return true;
6480  }
6481
6482  case ARM::VST3dWB_fixed_Asm_8:
6483  case ARM::VST3dWB_fixed_Asm_16:
6484  case ARM::VST3dWB_fixed_Asm_32:
6485  case ARM::VST3qWB_fixed_Asm_8:
6486  case ARM::VST3qWB_fixed_Asm_16:
6487  case ARM::VST3qWB_fixed_Asm_32: {
6488    MCInst TmpInst;
6489    unsigned Spacing;
6490    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6491    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6492    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6493    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6494    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6495    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6496    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6497                                            Spacing));
6498    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6499                                            Spacing * 2));
6500    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6501    TmpInst.addOperand(Inst.getOperand(4));
6502    Inst = TmpInst;
6503    return true;
6504  }
6505
6506  case ARM::VST3dWB_register_Asm_8:
6507  case ARM::VST3dWB_register_Asm_16:
6508  case ARM::VST3dWB_register_Asm_32:
6509  case ARM::VST3qWB_register_Asm_8:
6510  case ARM::VST3qWB_register_Asm_16:
6511  case ARM::VST3qWB_register_Asm_32: {
6512    MCInst TmpInst;
6513    unsigned Spacing;
6514    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6515    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6516    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6517    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6518    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6519    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6520    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6521                                            Spacing));
6522    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6523                                            Spacing * 2));
6524    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6525    TmpInst.addOperand(Inst.getOperand(5));
6526    Inst = TmpInst;
6527    return true;
6528  }
6529
6530  // VST4 multiple 3-element structure instructions.
6531  case ARM::VST4dAsm_8:
6532  case ARM::VST4dAsm_16:
6533  case ARM::VST4dAsm_32:
6534  case ARM::VST4qAsm_8:
6535  case ARM::VST4qAsm_16:
6536  case ARM::VST4qAsm_32: {
6537    MCInst TmpInst;
6538    unsigned Spacing;
6539    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6540    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6541    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6542    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6543    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6544                                            Spacing));
6545    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6546                                            Spacing * 2));
6547    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6548                                            Spacing * 3));
6549    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6550    TmpInst.addOperand(Inst.getOperand(4));
6551    Inst = TmpInst;
6552    return true;
6553  }
6554
6555  case ARM::VST4dWB_fixed_Asm_8:
6556  case ARM::VST4dWB_fixed_Asm_16:
6557  case ARM::VST4dWB_fixed_Asm_32:
6558  case ARM::VST4qWB_fixed_Asm_8:
6559  case ARM::VST4qWB_fixed_Asm_16:
6560  case ARM::VST4qWB_fixed_Asm_32: {
6561    MCInst TmpInst;
6562    unsigned Spacing;
6563    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6564    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6565    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6566    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6567    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6568    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6569    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6570                                            Spacing));
6571    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6572                                            Spacing * 2));
6573    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6574                                            Spacing * 3));
6575    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6576    TmpInst.addOperand(Inst.getOperand(4));
6577    Inst = TmpInst;
6578    return true;
6579  }
6580
6581  case ARM::VST4dWB_register_Asm_8:
6582  case ARM::VST4dWB_register_Asm_16:
6583  case ARM::VST4dWB_register_Asm_32:
6584  case ARM::VST4qWB_register_Asm_8:
6585  case ARM::VST4qWB_register_Asm_16:
6586  case ARM::VST4qWB_register_Asm_32: {
6587    MCInst TmpInst;
6588    unsigned Spacing;
6589    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6590    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6591    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6592    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6593    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6594    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6595    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6596                                            Spacing));
6597    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6598                                            Spacing * 2));
6599    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6600                                            Spacing * 3));
6601    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6602    TmpInst.addOperand(Inst.getOperand(5));
6603    Inst = TmpInst;
6604    return true;
6605  }
6606
6607  // Handle the Thumb2 mode MOV complex aliases.
6608  case ARM::t2MOVsr:
6609  case ARM::t2MOVSsr: {
6610    // Which instruction to expand to depends on the CCOut operand and
6611    // whether we're in an IT block if the register operands are low
6612    // registers.
6613    bool isNarrow = false;
6614    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6615        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6616        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6617        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6618        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6619      isNarrow = true;
6620    MCInst TmpInst;
6621    unsigned newOpc;
6622    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6623    default: llvm_unreachable("unexpected opcode!");
6624    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6625    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6626    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6627    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6628    }
6629    TmpInst.setOpcode(newOpc);
6630    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6631    if (isNarrow)
6632      TmpInst.addOperand(MCOperand::CreateReg(
6633          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6634    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6635    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6636    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6637    TmpInst.addOperand(Inst.getOperand(5));
6638    if (!isNarrow)
6639      TmpInst.addOperand(MCOperand::CreateReg(
6640          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6641    Inst = TmpInst;
6642    return true;
6643  }
6644  case ARM::t2MOVsi:
6645  case ARM::t2MOVSsi: {
6646    // Which instruction to expand to depends on the CCOut operand and
6647    // whether we're in an IT block if the register operands are low
6648    // registers.
6649    bool isNarrow = false;
6650    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6651        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6652        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6653      isNarrow = true;
6654    MCInst TmpInst;
6655    unsigned newOpc;
6656    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6657    default: llvm_unreachable("unexpected opcode!");
6658    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6659    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6660    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6661    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6662    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6663    }
6664    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6665    if (Ammount == 32) Ammount = 0;
6666    TmpInst.setOpcode(newOpc);
6667    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6668    if (isNarrow)
6669      TmpInst.addOperand(MCOperand::CreateReg(
6670          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6671    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6672    if (newOpc != ARM::t2RRX)
6673      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
6674    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6675    TmpInst.addOperand(Inst.getOperand(4));
6676    if (!isNarrow)
6677      TmpInst.addOperand(MCOperand::CreateReg(
6678          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6679    Inst = TmpInst;
6680    return true;
6681  }
6682  // Handle the ARM mode MOV complex aliases.
6683  case ARM::ASRr:
6684  case ARM::LSRr:
6685  case ARM::LSLr:
6686  case ARM::RORr: {
6687    ARM_AM::ShiftOpc ShiftTy;
6688    switch(Inst.getOpcode()) {
6689    default: llvm_unreachable("unexpected opcode!");
6690    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6691    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6692    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6693    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6694    }
6695    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6696    MCInst TmpInst;
6697    TmpInst.setOpcode(ARM::MOVsr);
6698    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6699    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6700    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6701    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6702    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6703    TmpInst.addOperand(Inst.getOperand(4));
6704    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6705    Inst = TmpInst;
6706    return true;
6707  }
6708  case ARM::ASRi:
6709  case ARM::LSRi:
6710  case ARM::LSLi:
6711  case ARM::RORi: {
6712    ARM_AM::ShiftOpc ShiftTy;
6713    switch(Inst.getOpcode()) {
6714    default: llvm_unreachable("unexpected opcode!");
6715    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6716    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6717    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6718    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6719    }
6720    // A shift by zero is a plain MOVr, not a MOVsi.
6721    unsigned Amt = Inst.getOperand(2).getImm();
6722    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6723    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6724    MCInst TmpInst;
6725    TmpInst.setOpcode(Opc);
6726    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6727    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6728    if (Opc == ARM::MOVsi)
6729      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6730    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6731    TmpInst.addOperand(Inst.getOperand(4));
6732    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6733    Inst = TmpInst;
6734    return true;
6735  }
6736  case ARM::RRXi: {
6737    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6738    MCInst TmpInst;
6739    TmpInst.setOpcode(ARM::MOVsi);
6740    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6741    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6742    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6743    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6744    TmpInst.addOperand(Inst.getOperand(3));
6745    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6746    Inst = TmpInst;
6747    return true;
6748  }
6749  case ARM::t2LDMIA_UPD: {
6750    // If this is a load of a single register, then we should use
6751    // a post-indexed LDR instruction instead, per the ARM ARM.
6752    if (Inst.getNumOperands() != 5)
6753      return false;
6754    MCInst TmpInst;
6755    TmpInst.setOpcode(ARM::t2LDR_POST);
6756    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6757    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6758    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6759    TmpInst.addOperand(MCOperand::CreateImm(4));
6760    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6761    TmpInst.addOperand(Inst.getOperand(3));
6762    Inst = TmpInst;
6763    return true;
6764  }
6765  case ARM::t2STMDB_UPD: {
6766    // If this is a store of a single register, then we should use
6767    // a pre-indexed STR instruction instead, per the ARM ARM.
6768    if (Inst.getNumOperands() != 5)
6769      return false;
6770    MCInst TmpInst;
6771    TmpInst.setOpcode(ARM::t2STR_PRE);
6772    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6773    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6774    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6775    TmpInst.addOperand(MCOperand::CreateImm(-4));
6776    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6777    TmpInst.addOperand(Inst.getOperand(3));
6778    Inst = TmpInst;
6779    return true;
6780  }
6781  case ARM::LDMIA_UPD:
6782    // If this is a load of a single register via a 'pop', then we should use
6783    // a post-indexed LDR instruction instead, per the ARM ARM.
6784    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
6785        Inst.getNumOperands() == 5) {
6786      MCInst TmpInst;
6787      TmpInst.setOpcode(ARM::LDR_POST_IMM);
6788      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6789      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6790      TmpInst.addOperand(Inst.getOperand(1)); // Rn
6791      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
6792      TmpInst.addOperand(MCOperand::CreateImm(4));
6793      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6794      TmpInst.addOperand(Inst.getOperand(3));
6795      Inst = TmpInst;
6796      return true;
6797    }
6798    break;
6799  case ARM::STMDB_UPD:
6800    // If this is a store of a single register via a 'push', then we should use
6801    // a pre-indexed STR instruction instead, per the ARM ARM.
6802    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
6803        Inst.getNumOperands() == 5) {
6804      MCInst TmpInst;
6805      TmpInst.setOpcode(ARM::STR_PRE_IMM);
6806      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6807      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6808      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
6809      TmpInst.addOperand(MCOperand::CreateImm(-4));
6810      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6811      TmpInst.addOperand(Inst.getOperand(3));
6812      Inst = TmpInst;
6813    }
6814    break;
6815  case ARM::t2ADDri12:
6816    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
6817    // mnemonic was used (not "addw"), encoding T3 is preferred.
6818    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
6819        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6820      break;
6821    Inst.setOpcode(ARM::t2ADDri);
6822    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6823    break;
6824  case ARM::t2SUBri12:
6825    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
6826    // mnemonic was used (not "subw"), encoding T3 is preferred.
6827    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
6828        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6829      break;
6830    Inst.setOpcode(ARM::t2SUBri);
6831    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6832    break;
6833  case ARM::tADDi8:
6834    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6835    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6836    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6837    // to encoding T1 if <Rd> is omitted."
6838    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6839      Inst.setOpcode(ARM::tADDi3);
6840      return true;
6841    }
6842    break;
6843  case ARM::tSUBi8:
6844    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6845    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6846    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6847    // to encoding T1 if <Rd> is omitted."
6848    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6849      Inst.setOpcode(ARM::tSUBi3);
6850      return true;
6851    }
6852    break;
6853  case ARM::t2ADDrr: {
6854    // If the destination and first source operand are the same, and
6855    // there's no setting of the flags, use encoding T2 instead of T3.
6856    // Note that this is only for ADD, not SUB. This mirrors the system
6857    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6858    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6859        Inst.getOperand(5).getReg() != 0 ||
6860        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6861         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6862      break;
6863    MCInst TmpInst;
6864    TmpInst.setOpcode(ARM::tADDhirr);
6865    TmpInst.addOperand(Inst.getOperand(0));
6866    TmpInst.addOperand(Inst.getOperand(0));
6867    TmpInst.addOperand(Inst.getOperand(2));
6868    TmpInst.addOperand(Inst.getOperand(3));
6869    TmpInst.addOperand(Inst.getOperand(4));
6870    Inst = TmpInst;
6871    return true;
6872  }
6873  case ARM::tB:
6874    // A Thumb conditional branch outside of an IT block is a tBcc.
6875    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6876      Inst.setOpcode(ARM::tBcc);
6877      return true;
6878    }
6879    break;
6880  case ARM::t2B:
6881    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6882    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6883      Inst.setOpcode(ARM::t2Bcc);
6884      return true;
6885    }
6886    break;
6887  case ARM::t2Bcc:
6888    // If the conditional is AL or we're in an IT block, we really want t2B.
6889    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6890      Inst.setOpcode(ARM::t2B);
6891      return true;
6892    }
6893    break;
6894  case ARM::tBcc:
6895    // If the conditional is AL, we really want tB.
6896    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6897      Inst.setOpcode(ARM::tB);
6898      return true;
6899    }
6900    break;
6901  case ARM::tLDMIA: {
6902    // If the register list contains any high registers, or if the writeback
6903    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6904    // instead if we're in Thumb2. Otherwise, this should have generated
6905    // an error in validateInstruction().
6906    unsigned Rn = Inst.getOperand(0).getReg();
6907    bool hasWritebackToken =
6908      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6909       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6910    bool listContainsBase;
6911    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6912        (!listContainsBase && !hasWritebackToken) ||
6913        (listContainsBase && hasWritebackToken)) {
6914      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6915      assert (isThumbTwo());
6916      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6917      // If we're switching to the updating version, we need to insert
6918      // the writeback tied operand.
6919      if (hasWritebackToken)
6920        Inst.insert(Inst.begin(),
6921                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6922      return true;
6923    }
6924    break;
6925  }
6926  case ARM::tSTMIA_UPD: {
6927    // If the register list contains any high registers, we need to use
6928    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6929    // should have generated an error in validateInstruction().
6930    unsigned Rn = Inst.getOperand(0).getReg();
6931    bool listContainsBase;
6932    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6933      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6934      assert (isThumbTwo());
6935      Inst.setOpcode(ARM::t2STMIA_UPD);
6936      return true;
6937    }
6938    break;
6939  }
6940  case ARM::tPOP: {
6941    bool listContainsBase;
6942    // If the register list contains any high registers, we need to use
6943    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6944    // should have generated an error in validateInstruction().
6945    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6946      return false;
6947    assert (isThumbTwo());
6948    Inst.setOpcode(ARM::t2LDMIA_UPD);
6949    // Add the base register and writeback operands.
6950    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6951    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6952    return true;
6953  }
6954  case ARM::tPUSH: {
6955    bool listContainsBase;
6956    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6957      return false;
6958    assert (isThumbTwo());
6959    Inst.setOpcode(ARM::t2STMDB_UPD);
6960    // Add the base register and writeback operands.
6961    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6962    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6963    return true;
6964  }
6965  case ARM::t2MOVi: {
6966    // If we can use the 16-bit encoding and the user didn't explicitly
6967    // request the 32-bit variant, transform it here.
6968    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6969        Inst.getOperand(1).getImm() <= 255 &&
6970        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6971         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6972        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6973        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6974         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6975      // The operands aren't in the same order for tMOVi8...
6976      MCInst TmpInst;
6977      TmpInst.setOpcode(ARM::tMOVi8);
6978      TmpInst.addOperand(Inst.getOperand(0));
6979      TmpInst.addOperand(Inst.getOperand(4));
6980      TmpInst.addOperand(Inst.getOperand(1));
6981      TmpInst.addOperand(Inst.getOperand(2));
6982      TmpInst.addOperand(Inst.getOperand(3));
6983      Inst = TmpInst;
6984      return true;
6985    }
6986    break;
6987  }
6988  case ARM::t2MOVr: {
6989    // If we can use the 16-bit encoding and the user didn't explicitly
6990    // request the 32-bit variant, transform it here.
6991    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6992        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6993        Inst.getOperand(2).getImm() == ARMCC::AL &&
6994        Inst.getOperand(4).getReg() == ARM::CPSR &&
6995        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6996         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6997      // The operands aren't the same for tMOV[S]r... (no cc_out)
6998      MCInst TmpInst;
6999      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7000      TmpInst.addOperand(Inst.getOperand(0));
7001      TmpInst.addOperand(Inst.getOperand(1));
7002      TmpInst.addOperand(Inst.getOperand(2));
7003      TmpInst.addOperand(Inst.getOperand(3));
7004      Inst = TmpInst;
7005      return true;
7006    }
7007    break;
7008  }
7009  case ARM::t2SXTH:
7010  case ARM::t2SXTB:
7011  case ARM::t2UXTH:
7012  case ARM::t2UXTB: {
7013    // If we can use the 16-bit encoding and the user didn't explicitly
7014    // request the 32-bit variant, transform it here.
7015    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7016        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7017        Inst.getOperand(2).getImm() == 0 &&
7018        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7019         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7020      unsigned NewOpc;
7021      switch (Inst.getOpcode()) {
7022      default: llvm_unreachable("Illegal opcode!");
7023      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7024      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7025      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7026      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7027      }
7028      // The operands aren't the same for thumb1 (no rotate operand).
7029      MCInst TmpInst;
7030      TmpInst.setOpcode(NewOpc);
7031      TmpInst.addOperand(Inst.getOperand(0));
7032      TmpInst.addOperand(Inst.getOperand(1));
7033      TmpInst.addOperand(Inst.getOperand(3));
7034      TmpInst.addOperand(Inst.getOperand(4));
7035      Inst = TmpInst;
7036      return true;
7037    }
7038    break;
7039  }
7040  case ARM::MOVsi: {
7041    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7042    if (SOpc == ARM_AM::rrx) return false;
7043    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7044      // Shifting by zero is accepted as a vanilla 'MOVr'
7045      MCInst TmpInst;
7046      TmpInst.setOpcode(ARM::MOVr);
7047      TmpInst.addOperand(Inst.getOperand(0));
7048      TmpInst.addOperand(Inst.getOperand(1));
7049      TmpInst.addOperand(Inst.getOperand(3));
7050      TmpInst.addOperand(Inst.getOperand(4));
7051      TmpInst.addOperand(Inst.getOperand(5));
7052      Inst = TmpInst;
7053      return true;
7054    }
7055    return false;
7056  }
7057  case ARM::ANDrsi:
7058  case ARM::ORRrsi:
7059  case ARM::EORrsi:
7060  case ARM::BICrsi:
7061  case ARM::SUBrsi:
7062  case ARM::ADDrsi: {
7063    unsigned newOpc;
7064    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7065    if (SOpc == ARM_AM::rrx) return false;
7066    switch (Inst.getOpcode()) {
7067    default: llvm_unreachable("unexpected opcode!");
7068    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7069    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7070    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7071    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7072    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7073    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7074    }
7075    // If the shift is by zero, use the non-shifted instruction definition.
7076    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
7077      MCInst TmpInst;
7078      TmpInst.setOpcode(newOpc);
7079      TmpInst.addOperand(Inst.getOperand(0));
7080      TmpInst.addOperand(Inst.getOperand(1));
7081      TmpInst.addOperand(Inst.getOperand(2));
7082      TmpInst.addOperand(Inst.getOperand(4));
7083      TmpInst.addOperand(Inst.getOperand(5));
7084      TmpInst.addOperand(Inst.getOperand(6));
7085      Inst = TmpInst;
7086      return true;
7087    }
7088    return false;
7089  }
7090  case ARM::ITasm:
7091  case ARM::t2IT: {
7092    // The mask bits for all but the first condition are represented as
7093    // the low bit of the condition code value implies 't'. We currently
7094    // always have 1 implies 't', so XOR toggle the bits if the low bit
7095    // of the condition code is zero. The encoding also expects the low
7096    // bit of the condition to be encoded as bit 4 of the mask operand,
7097    // so mask that in if needed
7098    MCOperand &MO = Inst.getOperand(1);
7099    unsigned Mask = MO.getImm();
7100    unsigned OrigMask = Mask;
7101    unsigned TZ = CountTrailingZeros_32(Mask);
7102    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7103      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7104      for (unsigned i = 3; i != TZ; --i)
7105        Mask ^= 1 << i;
7106    } else
7107      Mask |= 0x10;
7108    MO.setImm(Mask);
7109
7110    // Set up the IT block state according to the IT instruction we just
7111    // matched.
7112    assert(!inITBlock() && "nested IT blocks?!");
7113    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7114    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7115    ITState.CurPosition = 0;
7116    ITState.FirstCond = true;
7117    break;
7118  }
7119  }
7120  return false;
7121}
7122
7123unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7124  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7125  // suffix depending on whether they're in an IT block or not.
7126  unsigned Opc = Inst.getOpcode();
7127  const MCInstrDesc &MCID = getInstDesc(Opc);
7128  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7129    assert(MCID.hasOptionalDef() &&
7130           "optionally flag setting instruction missing optional def operand");
7131    assert(MCID.NumOperands == Inst.getNumOperands() &&
7132           "operand count mismatch!");
7133    // Find the optional-def operand (cc_out).
7134    unsigned OpNo;
7135    for (OpNo = 0;
7136         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7137         ++OpNo)
7138      ;
7139    // If we're parsing Thumb1, reject it completely.
7140    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7141      return Match_MnemonicFail;
7142    // If we're parsing Thumb2, which form is legal depends on whether we're
7143    // in an IT block.
7144    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7145        !inITBlock())
7146      return Match_RequiresITBlock;
7147    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7148        inITBlock())
7149      return Match_RequiresNotITBlock;
7150  }
7151  // Some high-register supporting Thumb1 encodings only allow both registers
7152  // to be from r0-r7 when in Thumb2.
7153  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7154           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7155           isARMLowRegister(Inst.getOperand(2).getReg()))
7156    return Match_RequiresThumb2;
7157  // Others only require ARMv6 or later.
7158  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7159           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7160           isARMLowRegister(Inst.getOperand(1).getReg()))
7161    return Match_RequiresV6;
7162  return Match_Success;
7163}
7164
7165bool ARMAsmParser::
7166MatchAndEmitInstruction(SMLoc IDLoc,
7167                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7168                        MCStreamer &Out) {
7169  MCInst Inst;
7170  unsigned ErrorInfo;
7171  unsigned MatchResult;
7172  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
7173  switch (MatchResult) {
7174  default: break;
7175  case Match_Success:
7176    // Context sensitive operand constraints aren't handled by the matcher,
7177    // so check them here.
7178    if (validateInstruction(Inst, Operands)) {
7179      // Still progress the IT block, otherwise one wrong condition causes
7180      // nasty cascading errors.
7181      forwardITPosition();
7182      return true;
7183    }
7184
7185    // Some instructions need post-processing to, for example, tweak which
7186    // encoding is selected. Loop on it while changes happen so the
7187    // individual transformations can chain off each other. E.g.,
7188    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7189    while (processInstruction(Inst, Operands))
7190      ;
7191
7192    // Only move forward at the very end so that everything in validate
7193    // and process gets a consistent answer about whether we're in an IT
7194    // block.
7195    forwardITPosition();
7196
7197    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7198    // doesn't actually encode.
7199    if (Inst.getOpcode() == ARM::ITasm)
7200      return false;
7201
7202    Inst.setLoc(IDLoc);
7203    Out.EmitInstruction(Inst);
7204    return false;
7205  case Match_MissingFeature:
7206    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
7207    return true;
7208  case Match_InvalidOperand: {
7209    SMLoc ErrorLoc = IDLoc;
7210    if (ErrorInfo != ~0U) {
7211      if (ErrorInfo >= Operands.size())
7212        return Error(IDLoc, "too few operands for instruction");
7213
7214      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7215      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7216    }
7217
7218    return Error(ErrorLoc, "invalid operand for instruction");
7219  }
7220  case Match_MnemonicFail:
7221    return Error(IDLoc, "invalid instruction");
7222  case Match_ConversionFail:
7223    // The converter function will have already emited a diagnostic.
7224    return true;
7225  case Match_RequiresNotITBlock:
7226    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7227  case Match_RequiresITBlock:
7228    return Error(IDLoc, "instruction only valid inside IT block");
7229  case Match_RequiresV6:
7230    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7231  case Match_RequiresThumb2:
7232    return Error(IDLoc, "instruction variant requires Thumb2");
7233  }
7234
7235  llvm_unreachable("Implement any new match types added!");
7236}
7237
7238/// parseDirective parses the arm specific directives
7239bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7240  StringRef IDVal = DirectiveID.getIdentifier();
7241  if (IDVal == ".word")
7242    return parseDirectiveWord(4, DirectiveID.getLoc());
7243  else if (IDVal == ".thumb")
7244    return parseDirectiveThumb(DirectiveID.getLoc());
7245  else if (IDVal == ".arm")
7246    return parseDirectiveARM(DirectiveID.getLoc());
7247  else if (IDVal == ".thumb_func")
7248    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7249  else if (IDVal == ".code")
7250    return parseDirectiveCode(DirectiveID.getLoc());
7251  else if (IDVal == ".syntax")
7252    return parseDirectiveSyntax(DirectiveID.getLoc());
7253  else if (IDVal == ".unreq")
7254    return parseDirectiveUnreq(DirectiveID.getLoc());
7255  else if (IDVal == ".arch")
7256    return parseDirectiveArch(DirectiveID.getLoc());
7257  else if (IDVal == ".eabi_attribute")
7258    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7259  return true;
7260}
7261
7262/// parseDirectiveWord
7263///  ::= .word [ expression (, expression)* ]
7264bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7265  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7266    for (;;) {
7267      const MCExpr *Value;
7268      if (getParser().ParseExpression(Value))
7269        return true;
7270
7271      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
7272
7273      if (getLexer().is(AsmToken::EndOfStatement))
7274        break;
7275
7276      // FIXME: Improve diagnostic.
7277      if (getLexer().isNot(AsmToken::Comma))
7278        return Error(L, "unexpected token in directive");
7279      Parser.Lex();
7280    }
7281  }
7282
7283  Parser.Lex();
7284  return false;
7285}
7286
7287/// parseDirectiveThumb
7288///  ::= .thumb
7289bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7290  if (getLexer().isNot(AsmToken::EndOfStatement))
7291    return Error(L, "unexpected token in directive");
7292  Parser.Lex();
7293
7294  if (!isThumb())
7295    SwitchMode();
7296  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7297  return false;
7298}
7299
7300/// parseDirectiveARM
7301///  ::= .arm
7302bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7303  if (getLexer().isNot(AsmToken::EndOfStatement))
7304    return Error(L, "unexpected token in directive");
7305  Parser.Lex();
7306
7307  if (isThumb())
7308    SwitchMode();
7309  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7310  return false;
7311}
7312
7313/// parseDirectiveThumbFunc
7314///  ::= .thumbfunc symbol_name
7315bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7316  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7317  bool isMachO = MAI.hasSubsectionsViaSymbols();
7318  StringRef Name;
7319  bool needFuncName = true;
7320
7321  // Darwin asm has (optionally) function name after .thumb_func direction
7322  // ELF doesn't
7323  if (isMachO) {
7324    const AsmToken &Tok = Parser.getTok();
7325    if (Tok.isNot(AsmToken::EndOfStatement)) {
7326      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7327        return Error(L, "unexpected token in .thumb_func directive");
7328      Name = Tok.getIdentifier();
7329      Parser.Lex(); // Consume the identifier token.
7330      needFuncName = false;
7331    }
7332  }
7333
7334  if (getLexer().isNot(AsmToken::EndOfStatement))
7335    return Error(L, "unexpected token in directive");
7336
7337  // Eat the end of statement and any blank lines that follow.
7338  while (getLexer().is(AsmToken::EndOfStatement))
7339    Parser.Lex();
7340
7341  // FIXME: assuming function name will be the line following .thumb_func
7342  // We really should be checking the next symbol definition even if there's
7343  // stuff in between.
7344  if (needFuncName) {
7345    Name = Parser.getTok().getIdentifier();
7346  }
7347
7348  // Mark symbol as a thumb symbol.
7349  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7350  getParser().getStreamer().EmitThumbFunc(Func);
7351  return false;
7352}
7353
7354/// parseDirectiveSyntax
7355///  ::= .syntax unified | divided
7356bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7357  const AsmToken &Tok = Parser.getTok();
7358  if (Tok.isNot(AsmToken::Identifier))
7359    return Error(L, "unexpected token in .syntax directive");
7360  StringRef Mode = Tok.getString();
7361  if (Mode == "unified" || Mode == "UNIFIED")
7362    Parser.Lex();
7363  else if (Mode == "divided" || Mode == "DIVIDED")
7364    return Error(L, "'.syntax divided' arm asssembly not supported");
7365  else
7366    return Error(L, "unrecognized syntax mode in .syntax directive");
7367
7368  if (getLexer().isNot(AsmToken::EndOfStatement))
7369    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7370  Parser.Lex();
7371
7372  // TODO tell the MC streamer the mode
7373  // getParser().getStreamer().Emit???();
7374  return false;
7375}
7376
7377/// parseDirectiveCode
7378///  ::= .code 16 | 32
7379bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7380  const AsmToken &Tok = Parser.getTok();
7381  if (Tok.isNot(AsmToken::Integer))
7382    return Error(L, "unexpected token in .code directive");
7383  int64_t Val = Parser.getTok().getIntVal();
7384  if (Val == 16)
7385    Parser.Lex();
7386  else if (Val == 32)
7387    Parser.Lex();
7388  else
7389    return Error(L, "invalid operand to .code directive");
7390
7391  if (getLexer().isNot(AsmToken::EndOfStatement))
7392    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7393  Parser.Lex();
7394
7395  if (Val == 16) {
7396    if (!isThumb())
7397      SwitchMode();
7398    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7399  } else {
7400    if (isThumb())
7401      SwitchMode();
7402    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7403  }
7404
7405  return false;
7406}
7407
7408/// parseDirectiveReq
7409///  ::= name .req registername
7410bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7411  Parser.Lex(); // Eat the '.req' token.
7412  unsigned Reg;
7413  SMLoc SRegLoc, ERegLoc;
7414  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7415    Parser.EatToEndOfStatement();
7416    return Error(SRegLoc, "register name expected");
7417  }
7418
7419  // Shouldn't be anything else.
7420  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7421    Parser.EatToEndOfStatement();
7422    return Error(Parser.getTok().getLoc(),
7423                 "unexpected input in .req directive.");
7424  }
7425
7426  Parser.Lex(); // Consume the EndOfStatement
7427
7428  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7429    return Error(SRegLoc, "redefinition of '" + Name +
7430                          "' does not match original.");
7431
7432  return false;
7433}
7434
7435/// parseDirectiveUneq
7436///  ::= .unreq registername
7437bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7438  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7439    Parser.EatToEndOfStatement();
7440    return Error(L, "unexpected input in .unreq directive.");
7441  }
7442  RegisterReqs.erase(Parser.getTok().getIdentifier());
7443  Parser.Lex(); // Eat the identifier.
7444  return false;
7445}
7446
7447/// parseDirectiveArch
7448///  ::= .arch token
7449bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7450  return true;
7451}
7452
7453/// parseDirectiveEabiAttr
7454///  ::= .eabi_attribute int, int
7455bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7456  return true;
7457}
7458
7459extern "C" void LLVMInitializeARMAsmLexer();
7460
7461/// Force static initialization.
7462extern "C" void LLVMInitializeARMAsmParser() {
7463  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7464  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7465  LLVMInitializeARMAsmLexer();
7466}
7467
7468#define GET_REGISTER_MATCHER
7469#define GET_MATCHER_IMPLEMENTATION
7470#include "ARMGenAsmMatcher.inc"
7471