ARMAsmParser.cpp revision 1ced208be9cab0f994c5df9000da36bc313b2507
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "llvm/MC/MCTargetAsmParser.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMBaseInfo.h"
13#include "MCTargetDesc/ARMMCExpr.h"
14#include "llvm/ADT/BitVector.h"
15#include "llvm/ADT/OwningPtr.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/StringSwitch.h"
19#include "llvm/ADT/Twine.h"
20#include "llvm/MC/MCAsmInfo.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInst.h"
24#include "llvm/MC/MCInstrDesc.h"
25#include "llvm/MC/MCParser/MCAsmLexer.h"
26#include "llvm/MC/MCParser/MCAsmParser.h"
27#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
28#include "llvm/MC/MCRegisterInfo.h"
29#include "llvm/MC/MCStreamer.h"
30#include "llvm/MC/MCSubtargetInfo.h"
31#include "llvm/Support/MathExtras.h"
32#include "llvm/Support/SourceMgr.h"
33#include "llvm/Support/TargetRegistry.h"
34#include "llvm/Support/raw_ostream.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47  const MCRegisterInfo *MRI;
48
49  // Map of register aliases registers via the .req directive.
50  StringMap<unsigned> RegisterReqs;
51
52  struct {
53    ARMCC::CondCodes Cond;    // Condition for IT block.
54    unsigned Mask:4;          // Condition mask for instructions.
55                              // Starting at first 1 (from lsb).
56                              //   '1'  condition as indicated in IT.
57                              //   '0'  inverse of condition (else).
58                              // Count of instructions in IT block is
59                              // 4 - trailingzeroes(mask)
60
61    bool FirstCond;           // Explicit flag for when we're parsing the
62                              // First instruction in the IT block. It's
63                              // implied in the mask, so needs special
64                              // handling.
65
66    unsigned CurPosition;     // Current position in parsing of IT
67                              // block. In range [0,3]. Initialized
68                              // according to count of instructions in block.
69                              // ~0U if no active IT block.
70  } ITState;
71  bool inITBlock() { return ITState.CurPosition != ~0U;}
72  void forwardITPosition() {
73    if (!inITBlock()) return;
74    // Move to the next instruction in the IT block, if there is one. If not,
75    // mark the block as done.
76    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
77    if (++ITState.CurPosition == 5 - TZ)
78      ITState.CurPosition = ~0U; // Done with the IT block after this.
79  }
80
81
82  MCAsmParser &getParser() const { return Parser; }
83  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
84
85  bool Warning(SMLoc L, const Twine &Msg,
86               ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
87    return Parser.Warning(L, Msg, Ranges);
88  }
89  bool Error(SMLoc L, const Twine &Msg,
90             ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
91    return Parser.Error(L, Msg, Ranges);
92  }
93
94  int tryParseRegister();
95  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
96  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
97  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
98  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
99  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
100  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
101  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
102                              unsigned &ShiftAmount);
103  bool parseDirectiveWord(unsigned Size, SMLoc L);
104  bool parseDirectiveThumb(SMLoc L);
105  bool parseDirectiveARM(SMLoc L);
106  bool parseDirectiveThumbFunc(SMLoc L);
107  bool parseDirectiveCode(SMLoc L);
108  bool parseDirectiveSyntax(SMLoc L);
109  bool parseDirectiveReq(StringRef Name, SMLoc L);
110  bool parseDirectiveUnreq(SMLoc L);
111  bool parseDirectiveArch(SMLoc L);
112  bool parseDirectiveEabiAttr(SMLoc L);
113
114  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
115                          bool &CarrySetting, unsigned &ProcessorIMod,
116                          StringRef &ITMask);
117  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
118                             bool &CanAcceptPredicationCode);
119
120  bool isThumb() const {
121    // FIXME: Can tablegen auto-generate this?
122    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
123  }
124  bool isThumbOne() const {
125    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
126  }
127  bool isThumbTwo() const {
128    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
129  }
130  bool hasV6Ops() const {
131    return STI.getFeatureBits() & ARM::HasV6Ops;
132  }
133  bool hasV7Ops() const {
134    return STI.getFeatureBits() & ARM::HasV7Ops;
135  }
136  void SwitchMode() {
137    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
138    setAvailableFeatures(FB);
139  }
140  bool isMClass() const {
141    return STI.getFeatureBits() & ARM::FeatureMClass;
142  }
143
144  /// @name Auto-generated Match Functions
145  /// {
146
147#define GET_ASSEMBLER_HEADER
148#include "ARMGenAsmMatcher.inc"
149
150  /// }
151
152  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
153  OperandMatchResultTy parseCoprocNumOperand(
154    SmallVectorImpl<MCParsedAsmOperand*>&);
155  OperandMatchResultTy parseCoprocRegOperand(
156    SmallVectorImpl<MCParsedAsmOperand*>&);
157  OperandMatchResultTy parseCoprocOptionOperand(
158    SmallVectorImpl<MCParsedAsmOperand*>&);
159  OperandMatchResultTy parseMemBarrierOptOperand(
160    SmallVectorImpl<MCParsedAsmOperand*>&);
161  OperandMatchResultTy parseProcIFlagsOperand(
162    SmallVectorImpl<MCParsedAsmOperand*>&);
163  OperandMatchResultTy parseMSRMaskOperand(
164    SmallVectorImpl<MCParsedAsmOperand*>&);
165  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
166                                   StringRef Op, int Low, int High);
167  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
168    return parsePKHImm(O, "lsl", 0, 31);
169  }
170  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
171    return parsePKHImm(O, "asr", 1, 32);
172  }
173  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
175  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
176  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
177  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
178  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
179  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
180  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
181  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
182                                       SMLoc &EndLoc);
183
184  // Asm Match Converter Methods
185  void cvtT2LdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
186  void cvtT2StrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
187  void cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  void cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  void cvtLdWriteBackRegAddrMode2(MCInst &Inst,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  void cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
194                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
195  void cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
196                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
197  void cvtStWriteBackRegAddrMode2(MCInst &Inst,
198                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
199  void cvtStWriteBackRegAddrMode3(MCInst &Inst,
200                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
201  void cvtLdExtTWriteBackImm(MCInst &Inst,
202                             const SmallVectorImpl<MCParsedAsmOperand*> &);
203  void cvtLdExtTWriteBackReg(MCInst &Inst,
204                             const SmallVectorImpl<MCParsedAsmOperand*> &);
205  void cvtStExtTWriteBackImm(MCInst &Inst,
206                             const SmallVectorImpl<MCParsedAsmOperand*> &);
207  void cvtStExtTWriteBackReg(MCInst &Inst,
208                             const SmallVectorImpl<MCParsedAsmOperand*> &);
209  void cvtLdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
210  void cvtStrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
211  void cvtLdWriteBackRegAddrMode3(MCInst &Inst,
212                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
213  void cvtThumbMultiply(MCInst &Inst,
214                        const SmallVectorImpl<MCParsedAsmOperand*> &);
215  void cvtVLDwbFixed(MCInst &Inst,
216                     const SmallVectorImpl<MCParsedAsmOperand*> &);
217  void cvtVLDwbRegister(MCInst &Inst,
218                        const SmallVectorImpl<MCParsedAsmOperand*> &);
219  void cvtVSTwbFixed(MCInst &Inst,
220                     const SmallVectorImpl<MCParsedAsmOperand*> &);
221  void cvtVSTwbRegister(MCInst &Inst,
222                        const SmallVectorImpl<MCParsedAsmOperand*> &);
223  bool validateInstruction(MCInst &Inst,
224                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
225  bool processInstruction(MCInst &Inst,
226                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
227  bool shouldOmitCCOutOperand(StringRef Mnemonic,
228                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
229
230public:
231  enum ARMMatchResultTy {
232    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
233    Match_RequiresNotITBlock,
234    Match_RequiresV6,
235    Match_RequiresThumb2,
236#define GET_OPERAND_DIAGNOSTIC_TYPES
237#include "ARMGenAsmMatcher.inc"
238
239  };
240
241  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
242    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
243    MCAsmParserExtension::Initialize(_Parser);
244
245    // Cache the MCRegisterInfo.
246    MRI = &getContext().getRegisterInfo();
247
248    // Initialize the set of available features.
249    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
250
251    // Not in an ITBlock to start with.
252    ITState.CurPosition = ~0U;
253  }
254
255  // Implementation of the MCTargetAsmParser interface:
256  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
257  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
258                        SMLoc NameLoc,
259                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
260  bool ParseDirective(AsmToken DirectiveID);
261
262  unsigned checkTargetMatchPredicate(MCInst &Inst);
263
264  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
265                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
266                               MCStreamer &Out, unsigned &ErrorInfo,
267                               bool MatchingInlineAsm);
268};
269} // end anonymous namespace
270
271namespace {
272
273/// ARMOperand - Instances of this class represent a parsed ARM machine
274/// instruction.
275class ARMOperand : public MCParsedAsmOperand {
276  enum KindTy {
277    k_CondCode,
278    k_CCOut,
279    k_ITCondMask,
280    k_CoprocNum,
281    k_CoprocReg,
282    k_CoprocOption,
283    k_Immediate,
284    k_MemBarrierOpt,
285    k_Memory,
286    k_PostIndexRegister,
287    k_MSRMask,
288    k_ProcIFlags,
289    k_VectorIndex,
290    k_Register,
291    k_RegisterList,
292    k_DPRRegisterList,
293    k_SPRRegisterList,
294    k_VectorList,
295    k_VectorListAllLanes,
296    k_VectorListIndexed,
297    k_ShiftedRegister,
298    k_ShiftedImmediate,
299    k_ShifterImmediate,
300    k_RotateImmediate,
301    k_BitfieldDescriptor,
302    k_Token
303  } Kind;
304
305  SMLoc StartLoc, EndLoc;
306  SmallVector<unsigned, 8> Registers;
307
308  union {
309    struct {
310      ARMCC::CondCodes Val;
311    } CC;
312
313    struct {
314      unsigned Val;
315    } Cop;
316
317    struct {
318      unsigned Val;
319    } CoprocOption;
320
321    struct {
322      unsigned Mask:4;
323    } ITMask;
324
325    struct {
326      ARM_MB::MemBOpt Val;
327    } MBOpt;
328
329    struct {
330      ARM_PROC::IFlags Val;
331    } IFlags;
332
333    struct {
334      unsigned Val;
335    } MMask;
336
337    struct {
338      const char *Data;
339      unsigned Length;
340    } Tok;
341
342    struct {
343      unsigned RegNum;
344    } Reg;
345
346    // A vector register list is a sequential list of 1 to 4 registers.
347    struct {
348      unsigned RegNum;
349      unsigned Count;
350      unsigned LaneIndex;
351      bool isDoubleSpaced;
352    } VectorList;
353
354    struct {
355      unsigned Val;
356    } VectorIndex;
357
358    struct {
359      const MCExpr *Val;
360    } Imm;
361
362    /// Combined record for all forms of ARM address expressions.
363    struct {
364      unsigned BaseRegNum;
365      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
366      // was specified.
367      const MCConstantExpr *OffsetImm;  // Offset immediate value
368      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
369      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
370      unsigned ShiftImm;        // shift for OffsetReg.
371      unsigned Alignment;       // 0 = no alignment specified
372                                // n = alignment in bytes (2, 4, 8, 16, or 32)
373      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
374    } Memory;
375
376    struct {
377      unsigned RegNum;
378      bool isAdd;
379      ARM_AM::ShiftOpc ShiftTy;
380      unsigned ShiftImm;
381    } PostIdxReg;
382
383    struct {
384      bool isASR;
385      unsigned Imm;
386    } ShifterImm;
387    struct {
388      ARM_AM::ShiftOpc ShiftTy;
389      unsigned SrcReg;
390      unsigned ShiftReg;
391      unsigned ShiftImm;
392    } RegShiftedReg;
393    struct {
394      ARM_AM::ShiftOpc ShiftTy;
395      unsigned SrcReg;
396      unsigned ShiftImm;
397    } RegShiftedImm;
398    struct {
399      unsigned Imm;
400    } RotImm;
401    struct {
402      unsigned LSB;
403      unsigned Width;
404    } Bitfield;
405  };
406
407  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
408public:
409  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
410    Kind = o.Kind;
411    StartLoc = o.StartLoc;
412    EndLoc = o.EndLoc;
413    switch (Kind) {
414    case k_CondCode:
415      CC = o.CC;
416      break;
417    case k_ITCondMask:
418      ITMask = o.ITMask;
419      break;
420    case k_Token:
421      Tok = o.Tok;
422      break;
423    case k_CCOut:
424    case k_Register:
425      Reg = o.Reg;
426      break;
427    case k_RegisterList:
428    case k_DPRRegisterList:
429    case k_SPRRegisterList:
430      Registers = o.Registers;
431      break;
432    case k_VectorList:
433    case k_VectorListAllLanes:
434    case k_VectorListIndexed:
435      VectorList = o.VectorList;
436      break;
437    case k_CoprocNum:
438    case k_CoprocReg:
439      Cop = o.Cop;
440      break;
441    case k_CoprocOption:
442      CoprocOption = o.CoprocOption;
443      break;
444    case k_Immediate:
445      Imm = o.Imm;
446      break;
447    case k_MemBarrierOpt:
448      MBOpt = o.MBOpt;
449      break;
450    case k_Memory:
451      Memory = o.Memory;
452      break;
453    case k_PostIndexRegister:
454      PostIdxReg = o.PostIdxReg;
455      break;
456    case k_MSRMask:
457      MMask = o.MMask;
458      break;
459    case k_ProcIFlags:
460      IFlags = o.IFlags;
461      break;
462    case k_ShifterImmediate:
463      ShifterImm = o.ShifterImm;
464      break;
465    case k_ShiftedRegister:
466      RegShiftedReg = o.RegShiftedReg;
467      break;
468    case k_ShiftedImmediate:
469      RegShiftedImm = o.RegShiftedImm;
470      break;
471    case k_RotateImmediate:
472      RotImm = o.RotImm;
473      break;
474    case k_BitfieldDescriptor:
475      Bitfield = o.Bitfield;
476      break;
477    case k_VectorIndex:
478      VectorIndex = o.VectorIndex;
479      break;
480    }
481  }
482
483  /// getStartLoc - Get the location of the first token of this operand.
484  SMLoc getStartLoc() const { return StartLoc; }
485  /// getEndLoc - Get the location of the last token of this operand.
486  SMLoc getEndLoc() const { return EndLoc; }
487  /// getLocRange - Get the range between the first and last token of this
488  /// operand.
489  SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
490
491  ARMCC::CondCodes getCondCode() const {
492    assert(Kind == k_CondCode && "Invalid access!");
493    return CC.Val;
494  }
495
496  unsigned getCoproc() const {
497    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
498    return Cop.Val;
499  }
500
501  StringRef getToken() const {
502    assert(Kind == k_Token && "Invalid access!");
503    return StringRef(Tok.Data, Tok.Length);
504  }
505
506  unsigned getReg() const {
507    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
508    return Reg.RegNum;
509  }
510
511  const SmallVectorImpl<unsigned> &getRegList() const {
512    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
513            Kind == k_SPRRegisterList) && "Invalid access!");
514    return Registers;
515  }
516
517  const MCExpr *getImm() const {
518    assert(isImm() && "Invalid access!");
519    return Imm.Val;
520  }
521
522  unsigned getVectorIndex() const {
523    assert(Kind == k_VectorIndex && "Invalid access!");
524    return VectorIndex.Val;
525  }
526
527  ARM_MB::MemBOpt getMemBarrierOpt() const {
528    assert(Kind == k_MemBarrierOpt && "Invalid access!");
529    return MBOpt.Val;
530  }
531
532  ARM_PROC::IFlags getProcIFlags() const {
533    assert(Kind == k_ProcIFlags && "Invalid access!");
534    return IFlags.Val;
535  }
536
537  unsigned getMSRMask() const {
538    assert(Kind == k_MSRMask && "Invalid access!");
539    return MMask.Val;
540  }
541
542  bool isCoprocNum() const { return Kind == k_CoprocNum; }
543  bool isCoprocReg() const { return Kind == k_CoprocReg; }
544  bool isCoprocOption() const { return Kind == k_CoprocOption; }
545  bool isCondCode() const { return Kind == k_CondCode; }
546  bool isCCOut() const { return Kind == k_CCOut; }
547  bool isITMask() const { return Kind == k_ITCondMask; }
548  bool isITCondCode() const { return Kind == k_CondCode; }
549  bool isImm() const { return Kind == k_Immediate; }
550  bool isFPImm() const {
551    if (!isImm()) return false;
552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
553    if (!CE) return false;
554    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
555    return Val != -1;
556  }
557  bool isFBits16() const {
558    if (!isImm()) return false;
559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
560    if (!CE) return false;
561    int64_t Value = CE->getValue();
562    return Value >= 0 && Value <= 16;
563  }
564  bool isFBits32() const {
565    if (!isImm()) return false;
566    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
567    if (!CE) return false;
568    int64_t Value = CE->getValue();
569    return Value >= 1 && Value <= 32;
570  }
571  bool isImm8s4() const {
572    if (!isImm()) return false;
573    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
574    if (!CE) return false;
575    int64_t Value = CE->getValue();
576    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
577  }
578  bool isImm0_1020s4() const {
579    if (!isImm()) return false;
580    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
581    if (!CE) return false;
582    int64_t Value = CE->getValue();
583    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
584  }
585  bool isImm0_508s4() const {
586    if (!isImm()) return false;
587    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
588    if (!CE) return false;
589    int64_t Value = CE->getValue();
590    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
591  }
592  bool isImm0_508s4Neg() const {
593    if (!isImm()) return false;
594    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
595    if (!CE) return false;
596    int64_t Value = -CE->getValue();
597    // explicitly exclude zero. we want that to use the normal 0_508 version.
598    return ((Value & 3) == 0) && Value > 0 && Value <= 508;
599  }
600  bool isImm0_255() const {
601    if (!isImm()) return false;
602    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
603    if (!CE) return false;
604    int64_t Value = CE->getValue();
605    return Value >= 0 && Value < 256;
606  }
607  bool isImm0_4095() const {
608    if (!isImm()) return false;
609    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
610    if (!CE) return false;
611    int64_t Value = CE->getValue();
612    return Value >= 0 && Value < 4096;
613  }
614  bool isImm0_4095Neg() const {
615    if (!isImm()) return false;
616    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
617    if (!CE) return false;
618    int64_t Value = -CE->getValue();
619    return Value > 0 && Value < 4096;
620  }
621  bool isImm0_1() const {
622    if (!isImm()) return false;
623    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
624    if (!CE) return false;
625    int64_t Value = CE->getValue();
626    return Value >= 0 && Value < 2;
627  }
628  bool isImm0_3() const {
629    if (!isImm()) return false;
630    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
631    if (!CE) return false;
632    int64_t Value = CE->getValue();
633    return Value >= 0 && Value < 4;
634  }
635  bool isImm0_7() const {
636    if (!isImm()) return false;
637    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
638    if (!CE) return false;
639    int64_t Value = CE->getValue();
640    return Value >= 0 && Value < 8;
641  }
642  bool isImm0_15() const {
643    if (!isImm()) return false;
644    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
645    if (!CE) return false;
646    int64_t Value = CE->getValue();
647    return Value >= 0 && Value < 16;
648  }
649  bool isImm0_31() const {
650    if (!isImm()) return false;
651    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
652    if (!CE) return false;
653    int64_t Value = CE->getValue();
654    return Value >= 0 && Value < 32;
655  }
656  bool isImm0_63() const {
657    if (!isImm()) return false;
658    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
659    if (!CE) return false;
660    int64_t Value = CE->getValue();
661    return Value >= 0 && Value < 64;
662  }
663  bool isImm8() const {
664    if (!isImm()) return false;
665    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
666    if (!CE) return false;
667    int64_t Value = CE->getValue();
668    return Value == 8;
669  }
670  bool isImm16() const {
671    if (!isImm()) return false;
672    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
673    if (!CE) return false;
674    int64_t Value = CE->getValue();
675    return Value == 16;
676  }
677  bool isImm32() const {
678    if (!isImm()) return false;
679    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
680    if (!CE) return false;
681    int64_t Value = CE->getValue();
682    return Value == 32;
683  }
684  bool isShrImm8() const {
685    if (!isImm()) return false;
686    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
687    if (!CE) return false;
688    int64_t Value = CE->getValue();
689    return Value > 0 && Value <= 8;
690  }
691  bool isShrImm16() const {
692    if (!isImm()) return false;
693    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
694    if (!CE) return false;
695    int64_t Value = CE->getValue();
696    return Value > 0 && Value <= 16;
697  }
698  bool isShrImm32() const {
699    if (!isImm()) return false;
700    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
701    if (!CE) return false;
702    int64_t Value = CE->getValue();
703    return Value > 0 && Value <= 32;
704  }
705  bool isShrImm64() const {
706    if (!isImm()) return false;
707    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
708    if (!CE) return false;
709    int64_t Value = CE->getValue();
710    return Value > 0 && Value <= 64;
711  }
712  bool isImm1_7() const {
713    if (!isImm()) return false;
714    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
715    if (!CE) return false;
716    int64_t Value = CE->getValue();
717    return Value > 0 && Value < 8;
718  }
719  bool isImm1_15() const {
720    if (!isImm()) return false;
721    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
722    if (!CE) return false;
723    int64_t Value = CE->getValue();
724    return Value > 0 && Value < 16;
725  }
726  bool isImm1_31() const {
727    if (!isImm()) return false;
728    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
729    if (!CE) return false;
730    int64_t Value = CE->getValue();
731    return Value > 0 && Value < 32;
732  }
733  bool isImm1_16() const {
734    if (!isImm()) return false;
735    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
736    if (!CE) return false;
737    int64_t Value = CE->getValue();
738    return Value > 0 && Value < 17;
739  }
740  bool isImm1_32() const {
741    if (!isImm()) return false;
742    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
743    if (!CE) return false;
744    int64_t Value = CE->getValue();
745    return Value > 0 && Value < 33;
746  }
747  bool isImm0_32() const {
748    if (!isImm()) return false;
749    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
750    if (!CE) return false;
751    int64_t Value = CE->getValue();
752    return Value >= 0 && Value < 33;
753  }
754  bool isImm0_65535() const {
755    if (!isImm()) return false;
756    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
757    if (!CE) return false;
758    int64_t Value = CE->getValue();
759    return Value >= 0 && Value < 65536;
760  }
761  bool isImm0_65535Expr() const {
762    if (!isImm()) return false;
763    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
764    // If it's not a constant expression, it'll generate a fixup and be
765    // handled later.
766    if (!CE) return true;
767    int64_t Value = CE->getValue();
768    return Value >= 0 && Value < 65536;
769  }
770  bool isImm24bit() const {
771    if (!isImm()) return false;
772    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
773    if (!CE) return false;
774    int64_t Value = CE->getValue();
775    return Value >= 0 && Value <= 0xffffff;
776  }
777  bool isImmThumbSR() const {
778    if (!isImm()) return false;
779    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
780    if (!CE) return false;
781    int64_t Value = CE->getValue();
782    return Value > 0 && Value < 33;
783  }
784  bool isPKHLSLImm() const {
785    if (!isImm()) return false;
786    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
787    if (!CE) return false;
788    int64_t Value = CE->getValue();
789    return Value >= 0 && Value < 32;
790  }
791  bool isPKHASRImm() const {
792    if (!isImm()) return false;
793    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
794    if (!CE) return false;
795    int64_t Value = CE->getValue();
796    return Value > 0 && Value <= 32;
797  }
798  bool isAdrLabel() const {
799    // If we have an immediate that's not a constant, treat it as a label
800    // reference needing a fixup. If it is a constant, but it can't fit
801    // into shift immediate encoding, we reject it.
802    if (isImm() && !isa<MCConstantExpr>(getImm())) return true;
803    else return (isARMSOImm() || isARMSOImmNeg());
804  }
805  bool isARMSOImm() const {
806    if (!isImm()) return false;
807    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
808    if (!CE) return false;
809    int64_t Value = CE->getValue();
810    return ARM_AM::getSOImmVal(Value) != -1;
811  }
812  bool isARMSOImmNot() const {
813    if (!isImm()) return false;
814    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
815    if (!CE) return false;
816    int64_t Value = CE->getValue();
817    return ARM_AM::getSOImmVal(~Value) != -1;
818  }
819  bool isARMSOImmNeg() const {
820    if (!isImm()) return false;
821    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
822    if (!CE) return false;
823    int64_t Value = CE->getValue();
824    // Only use this when not representable as a plain so_imm.
825    return ARM_AM::getSOImmVal(Value) == -1 &&
826      ARM_AM::getSOImmVal(-Value) != -1;
827  }
828  bool isT2SOImm() const {
829    if (!isImm()) return false;
830    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
831    if (!CE) return false;
832    int64_t Value = CE->getValue();
833    return ARM_AM::getT2SOImmVal(Value) != -1;
834  }
835  bool isT2SOImmNot() const {
836    if (!isImm()) return false;
837    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
838    if (!CE) return false;
839    int64_t Value = CE->getValue();
840    return ARM_AM::getT2SOImmVal(~Value) != -1;
841  }
842  bool isT2SOImmNeg() const {
843    if (!isImm()) return false;
844    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
845    if (!CE) return false;
846    int64_t Value = CE->getValue();
847    // Only use this when not representable as a plain so_imm.
848    return ARM_AM::getT2SOImmVal(Value) == -1 &&
849      ARM_AM::getT2SOImmVal(-Value) != -1;
850  }
851  bool isSetEndImm() const {
852    if (!isImm()) return false;
853    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
854    if (!CE) return false;
855    int64_t Value = CE->getValue();
856    return Value == 1 || Value == 0;
857  }
858  bool isReg() const { return Kind == k_Register; }
859  bool isRegList() const { return Kind == k_RegisterList; }
860  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
861  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
862  bool isToken() const { return Kind == k_Token; }
863  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
864  bool isMem() const { return Kind == k_Memory; }
865  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
866  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
867  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
868  bool isRotImm() const { return Kind == k_RotateImmediate; }
869  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
870  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
871  bool isPostIdxReg() const {
872    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
873  }
874  bool isMemNoOffset(bool alignOK = false) const {
875    if (!isMem())
876      return false;
877    // No offset of any kind.
878    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
879     (alignOK || Memory.Alignment == 0);
880  }
881  bool isMemPCRelImm12() const {
882    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
883      return false;
884    // Base register must be PC.
885    if (Memory.BaseRegNum != ARM::PC)
886      return false;
887    // Immediate offset in range [-4095, 4095].
888    if (!Memory.OffsetImm) return true;
889    int64_t Val = Memory.OffsetImm->getValue();
890    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
891  }
892  bool isAlignedMemory() const {
893    return isMemNoOffset(true);
894  }
895  bool isAddrMode2() const {
896    if (!isMem() || Memory.Alignment != 0) return false;
897    // Check for register offset.
898    if (Memory.OffsetRegNum) return true;
899    // Immediate offset in range [-4095, 4095].
900    if (!Memory.OffsetImm) return true;
901    int64_t Val = Memory.OffsetImm->getValue();
902    return Val > -4096 && Val < 4096;
903  }
904  bool isAM2OffsetImm() const {
905    if (!isImm()) return false;
906    // Immediate offset in range [-4095, 4095].
907    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
908    if (!CE) return false;
909    int64_t Val = CE->getValue();
910    return Val > -4096 && Val < 4096;
911  }
912  bool isAddrMode3() const {
913    // If we have an immediate that's not a constant, treat it as a label
914    // reference needing a fixup. If it is a constant, it's something else
915    // and we reject it.
916    if (isImm() && !isa<MCConstantExpr>(getImm()))
917      return true;
918    if (!isMem() || Memory.Alignment != 0) return false;
919    // No shifts are legal for AM3.
920    if (Memory.ShiftType != ARM_AM::no_shift) return false;
921    // Check for register offset.
922    if (Memory.OffsetRegNum) return true;
923    // Immediate offset in range [-255, 255].
924    if (!Memory.OffsetImm) return true;
925    int64_t Val = Memory.OffsetImm->getValue();
926    // The #-0 offset is encoded as INT32_MIN, and we have to check
927    // for this too.
928    return (Val > -256 && Val < 256) || Val == INT32_MIN;
929  }
930  bool isAM3Offset() const {
931    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
932      return false;
933    if (Kind == k_PostIndexRegister)
934      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
935    // Immediate offset in range [-255, 255].
936    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
937    if (!CE) return false;
938    int64_t Val = CE->getValue();
939    // Special case, #-0 is INT32_MIN.
940    return (Val > -256 && Val < 256) || Val == INT32_MIN;
941  }
942  bool isAddrMode5() const {
943    // If we have an immediate that's not a constant, treat it as a label
944    // reference needing a fixup. If it is a constant, it's something else
945    // and we reject it.
946    if (isImm() && !isa<MCConstantExpr>(getImm()))
947      return true;
948    if (!isMem() || Memory.Alignment != 0) return false;
949    // Check for register offset.
950    if (Memory.OffsetRegNum) return false;
951    // Immediate offset in range [-1020, 1020] and a multiple of 4.
952    if (!Memory.OffsetImm) return true;
953    int64_t Val = Memory.OffsetImm->getValue();
954    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
955      Val == INT32_MIN;
956  }
957  bool isMemTBB() const {
958    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
959        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
960      return false;
961    return true;
962  }
963  bool isMemTBH() const {
964    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
965        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
966        Memory.Alignment != 0 )
967      return false;
968    return true;
969  }
970  bool isMemRegOffset() const {
971    if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
972      return false;
973    return true;
974  }
975  bool isT2MemRegOffset() const {
976    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
977        Memory.Alignment != 0)
978      return false;
979    // Only lsl #{0, 1, 2, 3} allowed.
980    if (Memory.ShiftType == ARM_AM::no_shift)
981      return true;
982    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
983      return false;
984    return true;
985  }
986  bool isMemThumbRR() const {
987    // Thumb reg+reg addressing is simple. Just two registers, a base and
988    // an offset. No shifts, negations or any other complicating factors.
989    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
990        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
991      return false;
992    return isARMLowRegister(Memory.BaseRegNum) &&
993      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
994  }
995  bool isMemThumbRIs4() const {
996    if (!isMem() || Memory.OffsetRegNum != 0 ||
997        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
998      return false;
999    // Immediate offset, multiple of 4 in range [0, 124].
1000    if (!Memory.OffsetImm) return true;
1001    int64_t Val = Memory.OffsetImm->getValue();
1002    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1003  }
1004  bool isMemThumbRIs2() const {
1005    if (!isMem() || Memory.OffsetRegNum != 0 ||
1006        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1007      return false;
1008    // Immediate offset, multiple of 4 in range [0, 62].
1009    if (!Memory.OffsetImm) return true;
1010    int64_t Val = Memory.OffsetImm->getValue();
1011    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1012  }
1013  bool isMemThumbRIs1() const {
1014    if (!isMem() || Memory.OffsetRegNum != 0 ||
1015        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1016      return false;
1017    // Immediate offset in range [0, 31].
1018    if (!Memory.OffsetImm) return true;
1019    int64_t Val = Memory.OffsetImm->getValue();
1020    return Val >= 0 && Val <= 31;
1021  }
1022  bool isMemThumbSPI() const {
1023    if (!isMem() || Memory.OffsetRegNum != 0 ||
1024        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1025      return false;
1026    // Immediate offset, multiple of 4 in range [0, 1020].
1027    if (!Memory.OffsetImm) return true;
1028    int64_t Val = Memory.OffsetImm->getValue();
1029    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1030  }
1031  bool isMemImm8s4Offset() const {
1032    // If we have an immediate that's not a constant, treat it as a label
1033    // reference needing a fixup. If it is a constant, it's something else
1034    // and we reject it.
1035    if (isImm() && !isa<MCConstantExpr>(getImm()))
1036      return true;
1037    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1038      return false;
1039    // Immediate offset a multiple of 4 in range [-1020, 1020].
1040    if (!Memory.OffsetImm) return true;
1041    int64_t Val = Memory.OffsetImm->getValue();
1042    // Special case, #-0 is INT32_MIN.
1043    return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
1044  }
1045  bool isMemImm0_1020s4Offset() const {
1046    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1047      return false;
1048    // Immediate offset a multiple of 4 in range [0, 1020].
1049    if (!Memory.OffsetImm) return true;
1050    int64_t Val = Memory.OffsetImm->getValue();
1051    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1052  }
1053  bool isMemImm8Offset() const {
1054    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1055      return false;
1056    // Base reg of PC isn't allowed for these encodings.
1057    if (Memory.BaseRegNum == ARM::PC) return false;
1058    // Immediate offset in range [-255, 255].
1059    if (!Memory.OffsetImm) return true;
1060    int64_t Val = Memory.OffsetImm->getValue();
1061    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1062  }
1063  bool isMemPosImm8Offset() const {
1064    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1065      return false;
1066    // Immediate offset in range [0, 255].
1067    if (!Memory.OffsetImm) return true;
1068    int64_t Val = Memory.OffsetImm->getValue();
1069    return Val >= 0 && Val < 256;
1070  }
1071  bool isMemNegImm8Offset() const {
1072    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1073      return false;
1074    // Base reg of PC isn't allowed for these encodings.
1075    if (Memory.BaseRegNum == ARM::PC) return false;
1076    // Immediate offset in range [-255, -1].
1077    if (!Memory.OffsetImm) return false;
1078    int64_t Val = Memory.OffsetImm->getValue();
1079    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1080  }
1081  bool isMemUImm12Offset() const {
1082    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1083      return false;
1084    // Immediate offset in range [0, 4095].
1085    if (!Memory.OffsetImm) return true;
1086    int64_t Val = Memory.OffsetImm->getValue();
1087    return (Val >= 0 && Val < 4096);
1088  }
1089  bool isMemImm12Offset() const {
1090    // If we have an immediate that's not a constant, treat it as a label
1091    // reference needing a fixup. If it is a constant, it's something else
1092    // and we reject it.
1093    if (isImm() && !isa<MCConstantExpr>(getImm()))
1094      return true;
1095
1096    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1097      return false;
1098    // Immediate offset in range [-4095, 4095].
1099    if (!Memory.OffsetImm) return true;
1100    int64_t Val = Memory.OffsetImm->getValue();
1101    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1102  }
1103  bool isPostIdxImm8() const {
1104    if (!isImm()) return false;
1105    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1106    if (!CE) return false;
1107    int64_t Val = CE->getValue();
1108    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1109  }
1110  bool isPostIdxImm8s4() const {
1111    if (!isImm()) return false;
1112    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1113    if (!CE) return false;
1114    int64_t Val = CE->getValue();
1115    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1116      (Val == INT32_MIN);
1117  }
1118
1119  bool isMSRMask() const { return Kind == k_MSRMask; }
1120  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1121
1122  // NEON operands.
1123  bool isSingleSpacedVectorList() const {
1124    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1125  }
1126  bool isDoubleSpacedVectorList() const {
1127    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1128  }
1129  bool isVecListOneD() const {
1130    if (!isSingleSpacedVectorList()) return false;
1131    return VectorList.Count == 1;
1132  }
1133
1134  bool isVecListDPair() const {
1135    if (!isSingleSpacedVectorList()) return false;
1136    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1137              .contains(VectorList.RegNum));
1138  }
1139
1140  bool isVecListThreeD() const {
1141    if (!isSingleSpacedVectorList()) return false;
1142    return VectorList.Count == 3;
1143  }
1144
1145  bool isVecListFourD() const {
1146    if (!isSingleSpacedVectorList()) return false;
1147    return VectorList.Count == 4;
1148  }
1149
1150  bool isVecListDPairSpaced() const {
1151    if (isSingleSpacedVectorList()) return false;
1152    return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1153              .contains(VectorList.RegNum));
1154  }
1155
1156  bool isVecListThreeQ() const {
1157    if (!isDoubleSpacedVectorList()) return false;
1158    return VectorList.Count == 3;
1159  }
1160
1161  bool isVecListFourQ() const {
1162    if (!isDoubleSpacedVectorList()) return false;
1163    return VectorList.Count == 4;
1164  }
1165
1166  bool isSingleSpacedVectorAllLanes() const {
1167    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1168  }
1169  bool isDoubleSpacedVectorAllLanes() const {
1170    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1171  }
1172  bool isVecListOneDAllLanes() const {
1173    if (!isSingleSpacedVectorAllLanes()) return false;
1174    return VectorList.Count == 1;
1175  }
1176
1177  bool isVecListDPairAllLanes() const {
1178    if (!isSingleSpacedVectorAllLanes()) return false;
1179    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1180              .contains(VectorList.RegNum));
1181  }
1182
1183  bool isVecListDPairSpacedAllLanes() const {
1184    if (!isDoubleSpacedVectorAllLanes()) return false;
1185    return VectorList.Count == 2;
1186  }
1187
1188  bool isVecListThreeDAllLanes() const {
1189    if (!isSingleSpacedVectorAllLanes()) return false;
1190    return VectorList.Count == 3;
1191  }
1192
1193  bool isVecListThreeQAllLanes() const {
1194    if (!isDoubleSpacedVectorAllLanes()) return false;
1195    return VectorList.Count == 3;
1196  }
1197
1198  bool isVecListFourDAllLanes() const {
1199    if (!isSingleSpacedVectorAllLanes()) return false;
1200    return VectorList.Count == 4;
1201  }
1202
1203  bool isVecListFourQAllLanes() const {
1204    if (!isDoubleSpacedVectorAllLanes()) return false;
1205    return VectorList.Count == 4;
1206  }
1207
1208  bool isSingleSpacedVectorIndexed() const {
1209    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1210  }
1211  bool isDoubleSpacedVectorIndexed() const {
1212    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1213  }
1214  bool isVecListOneDByteIndexed() const {
1215    if (!isSingleSpacedVectorIndexed()) return false;
1216    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1217  }
1218
1219  bool isVecListOneDHWordIndexed() const {
1220    if (!isSingleSpacedVectorIndexed()) return false;
1221    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1222  }
1223
1224  bool isVecListOneDWordIndexed() const {
1225    if (!isSingleSpacedVectorIndexed()) return false;
1226    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1227  }
1228
1229  bool isVecListTwoDByteIndexed() const {
1230    if (!isSingleSpacedVectorIndexed()) return false;
1231    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1232  }
1233
1234  bool isVecListTwoDHWordIndexed() const {
1235    if (!isSingleSpacedVectorIndexed()) return false;
1236    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1237  }
1238
1239  bool isVecListTwoQWordIndexed() const {
1240    if (!isDoubleSpacedVectorIndexed()) return false;
1241    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1242  }
1243
1244  bool isVecListTwoQHWordIndexed() const {
1245    if (!isDoubleSpacedVectorIndexed()) return false;
1246    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1247  }
1248
1249  bool isVecListTwoDWordIndexed() const {
1250    if (!isSingleSpacedVectorIndexed()) return false;
1251    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1252  }
1253
1254  bool isVecListThreeDByteIndexed() const {
1255    if (!isSingleSpacedVectorIndexed()) return false;
1256    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1257  }
1258
1259  bool isVecListThreeDHWordIndexed() const {
1260    if (!isSingleSpacedVectorIndexed()) return false;
1261    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1262  }
1263
1264  bool isVecListThreeQWordIndexed() const {
1265    if (!isDoubleSpacedVectorIndexed()) return false;
1266    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1267  }
1268
1269  bool isVecListThreeQHWordIndexed() const {
1270    if (!isDoubleSpacedVectorIndexed()) return false;
1271    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1272  }
1273
1274  bool isVecListThreeDWordIndexed() const {
1275    if (!isSingleSpacedVectorIndexed()) return false;
1276    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1277  }
1278
1279  bool isVecListFourDByteIndexed() const {
1280    if (!isSingleSpacedVectorIndexed()) return false;
1281    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1282  }
1283
1284  bool isVecListFourDHWordIndexed() const {
1285    if (!isSingleSpacedVectorIndexed()) return false;
1286    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1287  }
1288
1289  bool isVecListFourQWordIndexed() const {
1290    if (!isDoubleSpacedVectorIndexed()) return false;
1291    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1292  }
1293
1294  bool isVecListFourQHWordIndexed() const {
1295    if (!isDoubleSpacedVectorIndexed()) return false;
1296    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1297  }
1298
1299  bool isVecListFourDWordIndexed() const {
1300    if (!isSingleSpacedVectorIndexed()) return false;
1301    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1302  }
1303
1304  bool isVectorIndex8() const {
1305    if (Kind != k_VectorIndex) return false;
1306    return VectorIndex.Val < 8;
1307  }
1308  bool isVectorIndex16() const {
1309    if (Kind != k_VectorIndex) return false;
1310    return VectorIndex.Val < 4;
1311  }
1312  bool isVectorIndex32() const {
1313    if (Kind != k_VectorIndex) return false;
1314    return VectorIndex.Val < 2;
1315  }
1316
1317  bool isNEONi8splat() const {
1318    if (!isImm()) return false;
1319    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1320    // Must be a constant.
1321    if (!CE) return false;
1322    int64_t Value = CE->getValue();
1323    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1324    // value.
1325    return Value >= 0 && Value < 256;
1326  }
1327
1328  bool isNEONi16splat() const {
1329    if (!isImm()) return false;
1330    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1331    // Must be a constant.
1332    if (!CE) return false;
1333    int64_t Value = CE->getValue();
1334    // i16 value in the range [0,255] or [0x0100, 0xff00]
1335    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1336  }
1337
1338  bool isNEONi32splat() const {
1339    if (!isImm()) return false;
1340    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1341    // Must be a constant.
1342    if (!CE) return false;
1343    int64_t Value = CE->getValue();
1344    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1345    return (Value >= 0 && Value < 256) ||
1346      (Value >= 0x0100 && Value <= 0xff00) ||
1347      (Value >= 0x010000 && Value <= 0xff0000) ||
1348      (Value >= 0x01000000 && Value <= 0xff000000);
1349  }
1350
1351  bool isNEONi32vmov() const {
1352    if (!isImm()) return false;
1353    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1354    // Must be a constant.
1355    if (!CE) return false;
1356    int64_t Value = CE->getValue();
1357    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1358    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1359    return (Value >= 0 && Value < 256) ||
1360      (Value >= 0x0100 && Value <= 0xff00) ||
1361      (Value >= 0x010000 && Value <= 0xff0000) ||
1362      (Value >= 0x01000000 && Value <= 0xff000000) ||
1363      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1364      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1365  }
1366  bool isNEONi32vmovNeg() const {
1367    if (!isImm()) return false;
1368    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1369    // Must be a constant.
1370    if (!CE) return false;
1371    int64_t Value = ~CE->getValue();
1372    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1373    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1374    return (Value >= 0 && Value < 256) ||
1375      (Value >= 0x0100 && Value <= 0xff00) ||
1376      (Value >= 0x010000 && Value <= 0xff0000) ||
1377      (Value >= 0x01000000 && Value <= 0xff000000) ||
1378      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1379      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1380  }
1381
1382  bool isNEONi64splat() const {
1383    if (!isImm()) return false;
1384    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1385    // Must be a constant.
1386    if (!CE) return false;
1387    uint64_t Value = CE->getValue();
1388    // i64 value with each byte being either 0 or 0xff.
1389    for (unsigned i = 0; i < 8; ++i)
1390      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1391    return true;
1392  }
1393
1394  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1395    // Add as immediates when possible.  Null MCExpr = 0.
1396    if (Expr == 0)
1397      Inst.addOperand(MCOperand::CreateImm(0));
1398    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1399      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1400    else
1401      Inst.addOperand(MCOperand::CreateExpr(Expr));
1402  }
1403
1404  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1405    assert(N == 2 && "Invalid number of operands!");
1406    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1407    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1408    Inst.addOperand(MCOperand::CreateReg(RegNum));
1409  }
1410
1411  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1412    assert(N == 1 && "Invalid number of operands!");
1413    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1414  }
1415
1416  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1417    assert(N == 1 && "Invalid number of operands!");
1418    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1419  }
1420
1421  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1422    assert(N == 1 && "Invalid number of operands!");
1423    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1424  }
1425
1426  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1427    assert(N == 1 && "Invalid number of operands!");
1428    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1429  }
1430
1431  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1432    assert(N == 1 && "Invalid number of operands!");
1433    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1434  }
1435
1436  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1437    assert(N == 1 && "Invalid number of operands!");
1438    Inst.addOperand(MCOperand::CreateReg(getReg()));
1439  }
1440
1441  void addRegOperands(MCInst &Inst, unsigned N) const {
1442    assert(N == 1 && "Invalid number of operands!");
1443    Inst.addOperand(MCOperand::CreateReg(getReg()));
1444  }
1445
1446  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1447    assert(N == 3 && "Invalid number of operands!");
1448    assert(isRegShiftedReg() &&
1449           "addRegShiftedRegOperands() on non RegShiftedReg!");
1450    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1451    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1452    Inst.addOperand(MCOperand::CreateImm(
1453      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1454  }
1455
1456  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1457    assert(N == 2 && "Invalid number of operands!");
1458    assert(isRegShiftedImm() &&
1459           "addRegShiftedImmOperands() on non RegShiftedImm!");
1460    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1461    // Shift of #32 is encoded as 0 where permitted
1462    unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
1463    Inst.addOperand(MCOperand::CreateImm(
1464      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
1465  }
1466
1467  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1468    assert(N == 1 && "Invalid number of operands!");
1469    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1470                                         ShifterImm.Imm));
1471  }
1472
1473  void addRegListOperands(MCInst &Inst, unsigned N) const {
1474    assert(N == 1 && "Invalid number of operands!");
1475    const SmallVectorImpl<unsigned> &RegList = getRegList();
1476    for (SmallVectorImpl<unsigned>::const_iterator
1477           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1478      Inst.addOperand(MCOperand::CreateReg(*I));
1479  }
1480
1481  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1482    addRegListOperands(Inst, N);
1483  }
1484
1485  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1486    addRegListOperands(Inst, N);
1487  }
1488
1489  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1490    assert(N == 1 && "Invalid number of operands!");
1491    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1492    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1493  }
1494
1495  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1496    assert(N == 1 && "Invalid number of operands!");
1497    // Munge the lsb/width into a bitfield mask.
1498    unsigned lsb = Bitfield.LSB;
1499    unsigned width = Bitfield.Width;
1500    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1501    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1502                      (32 - (lsb + width)));
1503    Inst.addOperand(MCOperand::CreateImm(Mask));
1504  }
1505
1506  void addImmOperands(MCInst &Inst, unsigned N) const {
1507    assert(N == 1 && "Invalid number of operands!");
1508    addExpr(Inst, getImm());
1509  }
1510
1511  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1512    assert(N == 1 && "Invalid number of operands!");
1513    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1514    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1515  }
1516
1517  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1518    assert(N == 1 && "Invalid number of operands!");
1519    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1520    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1521  }
1522
1523  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1524    assert(N == 1 && "Invalid number of operands!");
1525    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1526    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1527    Inst.addOperand(MCOperand::CreateImm(Val));
1528  }
1529
1530  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1531    assert(N == 1 && "Invalid number of operands!");
1532    // FIXME: We really want to scale the value here, but the LDRD/STRD
1533    // instruction don't encode operands that way yet.
1534    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1535    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1536  }
1537
1538  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1539    assert(N == 1 && "Invalid number of operands!");
1540    // The immediate is scaled by four in the encoding and is stored
1541    // in the MCInst as such. Lop off the low two bits here.
1542    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1543    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1544  }
1545
1546  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1547    assert(N == 1 && "Invalid number of operands!");
1548    // The immediate is scaled by four in the encoding and is stored
1549    // in the MCInst as such. Lop off the low two bits here.
1550    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1551    Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
1552  }
1553
1554  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1555    assert(N == 1 && "Invalid number of operands!");
1556    // The immediate is scaled by four in the encoding and is stored
1557    // in the MCInst as such. Lop off the low two bits here.
1558    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1559    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1560  }
1561
1562  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1563    assert(N == 1 && "Invalid number of operands!");
1564    // The constant encodes as the immediate-1, and we store in the instruction
1565    // the bits as encoded, so subtract off one here.
1566    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1567    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1568  }
1569
1570  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1571    assert(N == 1 && "Invalid number of operands!");
1572    // The constant encodes as the immediate-1, and we store in the instruction
1573    // the bits as encoded, so subtract off one here.
1574    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1575    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1576  }
1577
1578  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1579    assert(N == 1 && "Invalid number of operands!");
1580    // The constant encodes as the immediate, except for 32, which encodes as
1581    // zero.
1582    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1583    unsigned Imm = CE->getValue();
1584    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1585  }
1586
1587  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1588    assert(N == 1 && "Invalid number of operands!");
1589    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1590    // the instruction as well.
1591    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1592    int Val = CE->getValue();
1593    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1594  }
1595
1596  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1597    assert(N == 1 && "Invalid number of operands!");
1598    // The operand is actually a t2_so_imm, but we have its bitwise
1599    // negation in the assembly source, so twiddle it here.
1600    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1601    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1602  }
1603
1604  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1605    assert(N == 1 && "Invalid number of operands!");
1606    // The operand is actually a t2_so_imm, but we have its
1607    // negation in the assembly source, so twiddle it here.
1608    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1609    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1610  }
1611
1612  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1613    assert(N == 1 && "Invalid number of operands!");
1614    // The operand is actually an imm0_4095, but we have its
1615    // negation in the assembly source, so twiddle it here.
1616    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1617    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1618  }
1619
1620  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1621    assert(N == 1 && "Invalid number of operands!");
1622    // The operand is actually a so_imm, but we have its bitwise
1623    // negation in the assembly source, so twiddle it here.
1624    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1625    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1626  }
1627
1628  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1629    assert(N == 1 && "Invalid number of operands!");
1630    // The operand is actually a so_imm, but we have its
1631    // negation in the assembly source, so twiddle it here.
1632    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1633    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1634  }
1635
1636  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1637    assert(N == 1 && "Invalid number of operands!");
1638    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1639  }
1640
1641  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1642    assert(N == 1 && "Invalid number of operands!");
1643    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1644  }
1645
1646  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1647    assert(N == 1 && "Invalid number of operands!");
1648    int32_t Imm = Memory.OffsetImm->getValue();
1649    // FIXME: Handle #-0
1650    if (Imm == INT32_MIN) Imm = 0;
1651    Inst.addOperand(MCOperand::CreateImm(Imm));
1652  }
1653
1654  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1655    assert(N == 1 && "Invalid number of operands!");
1656    assert(isImm() && "Not an immediate!");
1657
1658    // If we have an immediate that's not a constant, treat it as a label
1659    // reference needing a fixup.
1660    if (!isa<MCConstantExpr>(getImm())) {
1661      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1662      return;
1663    }
1664
1665    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1666    int Val = CE->getValue();
1667    Inst.addOperand(MCOperand::CreateImm(Val));
1668  }
1669
1670  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1671    assert(N == 2 && "Invalid number of operands!");
1672    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1673    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1674  }
1675
1676  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1677    assert(N == 3 && "Invalid number of operands!");
1678    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1679    if (!Memory.OffsetRegNum) {
1680      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1681      // Special case for #-0
1682      if (Val == INT32_MIN) Val = 0;
1683      if (Val < 0) Val = -Val;
1684      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1685    } else {
1686      // For register offset, we encode the shift type and negation flag
1687      // here.
1688      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1689                              Memory.ShiftImm, Memory.ShiftType);
1690    }
1691    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1692    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1693    Inst.addOperand(MCOperand::CreateImm(Val));
1694  }
1695
1696  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1697    assert(N == 2 && "Invalid number of operands!");
1698    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1699    assert(CE && "non-constant AM2OffsetImm operand!");
1700    int32_t Val = CE->getValue();
1701    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1702    // Special case for #-0
1703    if (Val == INT32_MIN) Val = 0;
1704    if (Val < 0) Val = -Val;
1705    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1706    Inst.addOperand(MCOperand::CreateReg(0));
1707    Inst.addOperand(MCOperand::CreateImm(Val));
1708  }
1709
1710  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1711    assert(N == 3 && "Invalid number of operands!");
1712    // If we have an immediate that's not a constant, treat it as a label
1713    // reference needing a fixup. If it is a constant, it's something else
1714    // and we reject it.
1715    if (isImm()) {
1716      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1717      Inst.addOperand(MCOperand::CreateReg(0));
1718      Inst.addOperand(MCOperand::CreateImm(0));
1719      return;
1720    }
1721
1722    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1723    if (!Memory.OffsetRegNum) {
1724      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1725      // Special case for #-0
1726      if (Val == INT32_MIN) Val = 0;
1727      if (Val < 0) Val = -Val;
1728      Val = ARM_AM::getAM3Opc(AddSub, Val);
1729    } else {
1730      // For register offset, we encode the shift type and negation flag
1731      // here.
1732      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1733    }
1734    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1735    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1736    Inst.addOperand(MCOperand::CreateImm(Val));
1737  }
1738
1739  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1740    assert(N == 2 && "Invalid number of operands!");
1741    if (Kind == k_PostIndexRegister) {
1742      int32_t Val =
1743        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1744      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1745      Inst.addOperand(MCOperand::CreateImm(Val));
1746      return;
1747    }
1748
1749    // Constant offset.
1750    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1751    int32_t Val = CE->getValue();
1752    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1753    // Special case for #-0
1754    if (Val == INT32_MIN) Val = 0;
1755    if (Val < 0) Val = -Val;
1756    Val = ARM_AM::getAM3Opc(AddSub, Val);
1757    Inst.addOperand(MCOperand::CreateReg(0));
1758    Inst.addOperand(MCOperand::CreateImm(Val));
1759  }
1760
1761  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1762    assert(N == 2 && "Invalid number of operands!");
1763    // If we have an immediate that's not a constant, treat it as a label
1764    // reference needing a fixup. If it is a constant, it's something else
1765    // and we reject it.
1766    if (isImm()) {
1767      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1768      Inst.addOperand(MCOperand::CreateImm(0));
1769      return;
1770    }
1771
1772    // The lower two bits are always zero and as such are not encoded.
1773    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1774    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1775    // Special case for #-0
1776    if (Val == INT32_MIN) Val = 0;
1777    if (Val < 0) Val = -Val;
1778    Val = ARM_AM::getAM5Opc(AddSub, Val);
1779    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1780    Inst.addOperand(MCOperand::CreateImm(Val));
1781  }
1782
1783  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1784    assert(N == 2 && "Invalid number of operands!");
1785    // If we have an immediate that's not a constant, treat it as a label
1786    // reference needing a fixup. If it is a constant, it's something else
1787    // and we reject it.
1788    if (isImm()) {
1789      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1790      Inst.addOperand(MCOperand::CreateImm(0));
1791      return;
1792    }
1793
1794    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1795    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1796    Inst.addOperand(MCOperand::CreateImm(Val));
1797  }
1798
1799  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1800    assert(N == 2 && "Invalid number of operands!");
1801    // The lower two bits are always zero and as such are not encoded.
1802    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1803    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1804    Inst.addOperand(MCOperand::CreateImm(Val));
1805  }
1806
1807  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1808    assert(N == 2 && "Invalid number of operands!");
1809    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1810    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1811    Inst.addOperand(MCOperand::CreateImm(Val));
1812  }
1813
1814  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1815    addMemImm8OffsetOperands(Inst, N);
1816  }
1817
1818  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1819    addMemImm8OffsetOperands(Inst, N);
1820  }
1821
1822  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1823    assert(N == 2 && "Invalid number of operands!");
1824    // If this is an immediate, it's a label reference.
1825    if (isImm()) {
1826      addExpr(Inst, getImm());
1827      Inst.addOperand(MCOperand::CreateImm(0));
1828      return;
1829    }
1830
1831    // Otherwise, it's a normal memory reg+offset.
1832    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1833    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1834    Inst.addOperand(MCOperand::CreateImm(Val));
1835  }
1836
1837  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1838    assert(N == 2 && "Invalid number of operands!");
1839    // If this is an immediate, it's a label reference.
1840    if (isImm()) {
1841      addExpr(Inst, getImm());
1842      Inst.addOperand(MCOperand::CreateImm(0));
1843      return;
1844    }
1845
1846    // Otherwise, it's a normal memory reg+offset.
1847    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1848    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1849    Inst.addOperand(MCOperand::CreateImm(Val));
1850  }
1851
1852  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1853    assert(N == 2 && "Invalid number of operands!");
1854    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1855    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1856  }
1857
1858  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1859    assert(N == 2 && "Invalid number of operands!");
1860    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1861    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1862  }
1863
1864  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1865    assert(N == 3 && "Invalid number of operands!");
1866    unsigned Val =
1867      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1868                        Memory.ShiftImm, Memory.ShiftType);
1869    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1870    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1871    Inst.addOperand(MCOperand::CreateImm(Val));
1872  }
1873
1874  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1875    assert(N == 3 && "Invalid number of operands!");
1876    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1877    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1878    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1879  }
1880
1881  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1882    assert(N == 2 && "Invalid number of operands!");
1883    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1884    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1885  }
1886
1887  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1888    assert(N == 2 && "Invalid number of operands!");
1889    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1890    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1891    Inst.addOperand(MCOperand::CreateImm(Val));
1892  }
1893
1894  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1895    assert(N == 2 && "Invalid number of operands!");
1896    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1897    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1898    Inst.addOperand(MCOperand::CreateImm(Val));
1899  }
1900
1901  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1902    assert(N == 2 && "Invalid number of operands!");
1903    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1904    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1905    Inst.addOperand(MCOperand::CreateImm(Val));
1906  }
1907
1908  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1909    assert(N == 2 && "Invalid number of operands!");
1910    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1911    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1912    Inst.addOperand(MCOperand::CreateImm(Val));
1913  }
1914
1915  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1916    assert(N == 1 && "Invalid number of operands!");
1917    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1918    assert(CE && "non-constant post-idx-imm8 operand!");
1919    int Imm = CE->getValue();
1920    bool isAdd = Imm >= 0;
1921    if (Imm == INT32_MIN) Imm = 0;
1922    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1923    Inst.addOperand(MCOperand::CreateImm(Imm));
1924  }
1925
1926  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1927    assert(N == 1 && "Invalid number of operands!");
1928    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1929    assert(CE && "non-constant post-idx-imm8s4 operand!");
1930    int Imm = CE->getValue();
1931    bool isAdd = Imm >= 0;
1932    if (Imm == INT32_MIN) Imm = 0;
1933    // Immediate is scaled by 4.
1934    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1935    Inst.addOperand(MCOperand::CreateImm(Imm));
1936  }
1937
1938  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1939    assert(N == 2 && "Invalid number of operands!");
1940    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1941    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1942  }
1943
1944  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1945    assert(N == 2 && "Invalid number of operands!");
1946    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1947    // The sign, shift type, and shift amount are encoded in a single operand
1948    // using the AM2 encoding helpers.
1949    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1950    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1951                                     PostIdxReg.ShiftTy);
1952    Inst.addOperand(MCOperand::CreateImm(Imm));
1953  }
1954
1955  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1956    assert(N == 1 && "Invalid number of operands!");
1957    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1958  }
1959
1960  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1961    assert(N == 1 && "Invalid number of operands!");
1962    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1963  }
1964
1965  void addVecListOperands(MCInst &Inst, unsigned N) const {
1966    assert(N == 1 && "Invalid number of operands!");
1967    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1968  }
1969
1970  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1971    assert(N == 2 && "Invalid number of operands!");
1972    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1973    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1974  }
1975
1976  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1977    assert(N == 1 && "Invalid number of operands!");
1978    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1979  }
1980
1981  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1982    assert(N == 1 && "Invalid number of operands!");
1983    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1984  }
1985
1986  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1987    assert(N == 1 && "Invalid number of operands!");
1988    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1989  }
1990
1991  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1992    assert(N == 1 && "Invalid number of operands!");
1993    // The immediate encodes the type of constant as well as the value.
1994    // Mask in that this is an i8 splat.
1995    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1996    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1997  }
1998
1999  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2000    assert(N == 1 && "Invalid number of operands!");
2001    // The immediate encodes the type of constant as well as the value.
2002    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2003    unsigned Value = CE->getValue();
2004    if (Value >= 256)
2005      Value = (Value >> 8) | 0xa00;
2006    else
2007      Value |= 0x800;
2008    Inst.addOperand(MCOperand::CreateImm(Value));
2009  }
2010
2011  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2012    assert(N == 1 && "Invalid number of operands!");
2013    // The immediate encodes the type of constant as well as the value.
2014    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2015    unsigned Value = CE->getValue();
2016    if (Value >= 256 && Value <= 0xff00)
2017      Value = (Value >> 8) | 0x200;
2018    else if (Value > 0xffff && Value <= 0xff0000)
2019      Value = (Value >> 16) | 0x400;
2020    else if (Value > 0xffffff)
2021      Value = (Value >> 24) | 0x600;
2022    Inst.addOperand(MCOperand::CreateImm(Value));
2023  }
2024
2025  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2026    assert(N == 1 && "Invalid number of operands!");
2027    // The immediate encodes the type of constant as well as the value.
2028    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2029    unsigned Value = CE->getValue();
2030    if (Value >= 256 && Value <= 0xffff)
2031      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2032    else if (Value > 0xffff && Value <= 0xffffff)
2033      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2034    else if (Value > 0xffffff)
2035      Value = (Value >> 24) | 0x600;
2036    Inst.addOperand(MCOperand::CreateImm(Value));
2037  }
2038
2039  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2040    assert(N == 1 && "Invalid number of operands!");
2041    // The immediate encodes the type of constant as well as the value.
2042    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2043    unsigned Value = ~CE->getValue();
2044    if (Value >= 256 && Value <= 0xffff)
2045      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2046    else if (Value > 0xffff && Value <= 0xffffff)
2047      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2048    else if (Value > 0xffffff)
2049      Value = (Value >> 24) | 0x600;
2050    Inst.addOperand(MCOperand::CreateImm(Value));
2051  }
2052
2053  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2054    assert(N == 1 && "Invalid number of operands!");
2055    // The immediate encodes the type of constant as well as the value.
2056    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2057    uint64_t Value = CE->getValue();
2058    unsigned Imm = 0;
2059    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2060      Imm |= (Value & 1) << i;
2061    }
2062    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
2063  }
2064
2065  virtual void print(raw_ostream &OS) const;
2066
2067  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
2068    ARMOperand *Op = new ARMOperand(k_ITCondMask);
2069    Op->ITMask.Mask = Mask;
2070    Op->StartLoc = S;
2071    Op->EndLoc = S;
2072    return Op;
2073  }
2074
2075  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
2076    ARMOperand *Op = new ARMOperand(k_CondCode);
2077    Op->CC.Val = CC;
2078    Op->StartLoc = S;
2079    Op->EndLoc = S;
2080    return Op;
2081  }
2082
2083  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2084    ARMOperand *Op = new ARMOperand(k_CoprocNum);
2085    Op->Cop.Val = CopVal;
2086    Op->StartLoc = S;
2087    Op->EndLoc = S;
2088    return Op;
2089  }
2090
2091  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2092    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2093    Op->Cop.Val = CopVal;
2094    Op->StartLoc = S;
2095    Op->EndLoc = S;
2096    return Op;
2097  }
2098
2099  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2100    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2101    Op->Cop.Val = Val;
2102    Op->StartLoc = S;
2103    Op->EndLoc = E;
2104    return Op;
2105  }
2106
2107  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2108    ARMOperand *Op = new ARMOperand(k_CCOut);
2109    Op->Reg.RegNum = RegNum;
2110    Op->StartLoc = S;
2111    Op->EndLoc = S;
2112    return Op;
2113  }
2114
2115  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2116    ARMOperand *Op = new ARMOperand(k_Token);
2117    Op->Tok.Data = Str.data();
2118    Op->Tok.Length = Str.size();
2119    Op->StartLoc = S;
2120    Op->EndLoc = S;
2121    return Op;
2122  }
2123
2124  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2125    ARMOperand *Op = new ARMOperand(k_Register);
2126    Op->Reg.RegNum = RegNum;
2127    Op->StartLoc = S;
2128    Op->EndLoc = E;
2129    return Op;
2130  }
2131
2132  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2133                                           unsigned SrcReg,
2134                                           unsigned ShiftReg,
2135                                           unsigned ShiftImm,
2136                                           SMLoc S, SMLoc E) {
2137    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2138    Op->RegShiftedReg.ShiftTy = ShTy;
2139    Op->RegShiftedReg.SrcReg = SrcReg;
2140    Op->RegShiftedReg.ShiftReg = ShiftReg;
2141    Op->RegShiftedReg.ShiftImm = ShiftImm;
2142    Op->StartLoc = S;
2143    Op->EndLoc = E;
2144    return Op;
2145  }
2146
2147  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2148                                            unsigned SrcReg,
2149                                            unsigned ShiftImm,
2150                                            SMLoc S, SMLoc E) {
2151    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2152    Op->RegShiftedImm.ShiftTy = ShTy;
2153    Op->RegShiftedImm.SrcReg = SrcReg;
2154    Op->RegShiftedImm.ShiftImm = ShiftImm;
2155    Op->StartLoc = S;
2156    Op->EndLoc = E;
2157    return Op;
2158  }
2159
2160  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2161                                   SMLoc S, SMLoc E) {
2162    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2163    Op->ShifterImm.isASR = isASR;
2164    Op->ShifterImm.Imm = Imm;
2165    Op->StartLoc = S;
2166    Op->EndLoc = E;
2167    return Op;
2168  }
2169
2170  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2171    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2172    Op->RotImm.Imm = Imm;
2173    Op->StartLoc = S;
2174    Op->EndLoc = E;
2175    return Op;
2176  }
2177
2178  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2179                                    SMLoc S, SMLoc E) {
2180    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2181    Op->Bitfield.LSB = LSB;
2182    Op->Bitfield.Width = Width;
2183    Op->StartLoc = S;
2184    Op->EndLoc = E;
2185    return Op;
2186  }
2187
2188  static ARMOperand *
2189  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2190                SMLoc StartLoc, SMLoc EndLoc) {
2191    KindTy Kind = k_RegisterList;
2192
2193    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2194      Kind = k_DPRRegisterList;
2195    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2196             contains(Regs.front().first))
2197      Kind = k_SPRRegisterList;
2198
2199    ARMOperand *Op = new ARMOperand(Kind);
2200    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2201           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2202      Op->Registers.push_back(I->first);
2203    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2204    Op->StartLoc = StartLoc;
2205    Op->EndLoc = EndLoc;
2206    return Op;
2207  }
2208
2209  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2210                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2211    ARMOperand *Op = new ARMOperand(k_VectorList);
2212    Op->VectorList.RegNum = RegNum;
2213    Op->VectorList.Count = Count;
2214    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2215    Op->StartLoc = S;
2216    Op->EndLoc = E;
2217    return Op;
2218  }
2219
2220  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2221                                              bool isDoubleSpaced,
2222                                              SMLoc S, SMLoc E) {
2223    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2224    Op->VectorList.RegNum = RegNum;
2225    Op->VectorList.Count = Count;
2226    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2227    Op->StartLoc = S;
2228    Op->EndLoc = E;
2229    return Op;
2230  }
2231
2232  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2233                                             unsigned Index,
2234                                             bool isDoubleSpaced,
2235                                             SMLoc S, SMLoc E) {
2236    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2237    Op->VectorList.RegNum = RegNum;
2238    Op->VectorList.Count = Count;
2239    Op->VectorList.LaneIndex = Index;
2240    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2241    Op->StartLoc = S;
2242    Op->EndLoc = E;
2243    return Op;
2244  }
2245
2246  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2247                                       MCContext &Ctx) {
2248    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2249    Op->VectorIndex.Val = Idx;
2250    Op->StartLoc = S;
2251    Op->EndLoc = E;
2252    return Op;
2253  }
2254
2255  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2256    ARMOperand *Op = new ARMOperand(k_Immediate);
2257    Op->Imm.Val = Val;
2258    Op->StartLoc = S;
2259    Op->EndLoc = E;
2260    return Op;
2261  }
2262
2263  static ARMOperand *CreateMem(unsigned BaseRegNum,
2264                               const MCConstantExpr *OffsetImm,
2265                               unsigned OffsetRegNum,
2266                               ARM_AM::ShiftOpc ShiftType,
2267                               unsigned ShiftImm,
2268                               unsigned Alignment,
2269                               bool isNegative,
2270                               SMLoc S, SMLoc E) {
2271    ARMOperand *Op = new ARMOperand(k_Memory);
2272    Op->Memory.BaseRegNum = BaseRegNum;
2273    Op->Memory.OffsetImm = OffsetImm;
2274    Op->Memory.OffsetRegNum = OffsetRegNum;
2275    Op->Memory.ShiftType = ShiftType;
2276    Op->Memory.ShiftImm = ShiftImm;
2277    Op->Memory.Alignment = Alignment;
2278    Op->Memory.isNegative = isNegative;
2279    Op->StartLoc = S;
2280    Op->EndLoc = E;
2281    return Op;
2282  }
2283
2284  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2285                                      ARM_AM::ShiftOpc ShiftTy,
2286                                      unsigned ShiftImm,
2287                                      SMLoc S, SMLoc E) {
2288    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2289    Op->PostIdxReg.RegNum = RegNum;
2290    Op->PostIdxReg.isAdd = isAdd;
2291    Op->PostIdxReg.ShiftTy = ShiftTy;
2292    Op->PostIdxReg.ShiftImm = ShiftImm;
2293    Op->StartLoc = S;
2294    Op->EndLoc = E;
2295    return Op;
2296  }
2297
2298  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2299    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2300    Op->MBOpt.Val = Opt;
2301    Op->StartLoc = S;
2302    Op->EndLoc = S;
2303    return Op;
2304  }
2305
2306  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2307    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2308    Op->IFlags.Val = IFlags;
2309    Op->StartLoc = S;
2310    Op->EndLoc = S;
2311    return Op;
2312  }
2313
2314  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2315    ARMOperand *Op = new ARMOperand(k_MSRMask);
2316    Op->MMask.Val = MMask;
2317    Op->StartLoc = S;
2318    Op->EndLoc = S;
2319    return Op;
2320  }
2321};
2322
2323} // end anonymous namespace.
2324
2325void ARMOperand::print(raw_ostream &OS) const {
2326  switch (Kind) {
2327  case k_CondCode:
2328    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2329    break;
2330  case k_CCOut:
2331    OS << "<ccout " << getReg() << ">";
2332    break;
2333  case k_ITCondMask: {
2334    static const char *const MaskStr[] = {
2335      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2336      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2337    };
2338    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2339    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2340    break;
2341  }
2342  case k_CoprocNum:
2343    OS << "<coprocessor number: " << getCoproc() << ">";
2344    break;
2345  case k_CoprocReg:
2346    OS << "<coprocessor register: " << getCoproc() << ">";
2347    break;
2348  case k_CoprocOption:
2349    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2350    break;
2351  case k_MSRMask:
2352    OS << "<mask: " << getMSRMask() << ">";
2353    break;
2354  case k_Immediate:
2355    getImm()->print(OS);
2356    break;
2357  case k_MemBarrierOpt:
2358    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2359    break;
2360  case k_Memory:
2361    OS << "<memory "
2362       << " base:" << Memory.BaseRegNum;
2363    OS << ">";
2364    break;
2365  case k_PostIndexRegister:
2366    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2367       << PostIdxReg.RegNum;
2368    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2369      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2370         << PostIdxReg.ShiftImm;
2371    OS << ">";
2372    break;
2373  case k_ProcIFlags: {
2374    OS << "<ARM_PROC::";
2375    unsigned IFlags = getProcIFlags();
2376    for (int i=2; i >= 0; --i)
2377      if (IFlags & (1 << i))
2378        OS << ARM_PROC::IFlagsToString(1 << i);
2379    OS << ">";
2380    break;
2381  }
2382  case k_Register:
2383    OS << "<register " << getReg() << ">";
2384    break;
2385  case k_ShifterImmediate:
2386    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2387       << " #" << ShifterImm.Imm << ">";
2388    break;
2389  case k_ShiftedRegister:
2390    OS << "<so_reg_reg "
2391       << RegShiftedReg.SrcReg << " "
2392       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2393       << " " << RegShiftedReg.ShiftReg << ">";
2394    break;
2395  case k_ShiftedImmediate:
2396    OS << "<so_reg_imm "
2397       << RegShiftedImm.SrcReg << " "
2398       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2399       << " #" << RegShiftedImm.ShiftImm << ">";
2400    break;
2401  case k_RotateImmediate:
2402    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2403    break;
2404  case k_BitfieldDescriptor:
2405    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2406       << ", width: " << Bitfield.Width << ">";
2407    break;
2408  case k_RegisterList:
2409  case k_DPRRegisterList:
2410  case k_SPRRegisterList: {
2411    OS << "<register_list ";
2412
2413    const SmallVectorImpl<unsigned> &RegList = getRegList();
2414    for (SmallVectorImpl<unsigned>::const_iterator
2415           I = RegList.begin(), E = RegList.end(); I != E; ) {
2416      OS << *I;
2417      if (++I < E) OS << ", ";
2418    }
2419
2420    OS << ">";
2421    break;
2422  }
2423  case k_VectorList:
2424    OS << "<vector_list " << VectorList.Count << " * "
2425       << VectorList.RegNum << ">";
2426    break;
2427  case k_VectorListAllLanes:
2428    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2429       << VectorList.RegNum << ">";
2430    break;
2431  case k_VectorListIndexed:
2432    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2433       << VectorList.Count << " * " << VectorList.RegNum << ">";
2434    break;
2435  case k_Token:
2436    OS << "'" << getToken() << "'";
2437    break;
2438  case k_VectorIndex:
2439    OS << "<vectorindex " << getVectorIndex() << ">";
2440    break;
2441  }
2442}
2443
2444/// @name Auto-generated Match Functions
2445/// {
2446
2447static unsigned MatchRegisterName(StringRef Name);
2448
2449/// }
2450
2451bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2452                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2453  StartLoc = Parser.getTok().getLoc();
2454  EndLoc = Parser.getTok().getEndLoc();
2455  RegNo = tryParseRegister();
2456
2457  return (RegNo == (unsigned)-1);
2458}
2459
2460/// Try to parse a register name.  The token must be an Identifier when called,
2461/// and if it is a register name the token is eaten and the register number is
2462/// returned.  Otherwise return -1.
2463///
2464int ARMAsmParser::tryParseRegister() {
2465  const AsmToken &Tok = Parser.getTok();
2466  if (Tok.isNot(AsmToken::Identifier)) return -1;
2467
2468  std::string lowerCase = Tok.getString().lower();
2469  unsigned RegNum = MatchRegisterName(lowerCase);
2470  if (!RegNum) {
2471    RegNum = StringSwitch<unsigned>(lowerCase)
2472      .Case("r13", ARM::SP)
2473      .Case("r14", ARM::LR)
2474      .Case("r15", ARM::PC)
2475      .Case("ip", ARM::R12)
2476      // Additional register name aliases for 'gas' compatibility.
2477      .Case("a1", ARM::R0)
2478      .Case("a2", ARM::R1)
2479      .Case("a3", ARM::R2)
2480      .Case("a4", ARM::R3)
2481      .Case("v1", ARM::R4)
2482      .Case("v2", ARM::R5)
2483      .Case("v3", ARM::R6)
2484      .Case("v4", ARM::R7)
2485      .Case("v5", ARM::R8)
2486      .Case("v6", ARM::R9)
2487      .Case("v7", ARM::R10)
2488      .Case("v8", ARM::R11)
2489      .Case("sb", ARM::R9)
2490      .Case("sl", ARM::R10)
2491      .Case("fp", ARM::R11)
2492      .Default(0);
2493  }
2494  if (!RegNum) {
2495    // Check for aliases registered via .req. Canonicalize to lower case.
2496    // That's more consistent since register names are case insensitive, and
2497    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2498    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2499    // If no match, return failure.
2500    if (Entry == RegisterReqs.end())
2501      return -1;
2502    Parser.Lex(); // Eat identifier token.
2503    return Entry->getValue();
2504  }
2505
2506  Parser.Lex(); // Eat identifier token.
2507
2508  return RegNum;
2509}
2510
2511// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2512// If a recoverable error occurs, return 1. If an irrecoverable error
2513// occurs, return -1. An irrecoverable error is one where tokens have been
2514// consumed in the process of trying to parse the shifter (i.e., when it is
2515// indeed a shifter operand, but malformed).
2516int ARMAsmParser::tryParseShiftRegister(
2517                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2518  SMLoc S = Parser.getTok().getLoc();
2519  const AsmToken &Tok = Parser.getTok();
2520  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2521
2522  std::string lowerCase = Tok.getString().lower();
2523  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2524      .Case("asl", ARM_AM::lsl)
2525      .Case("lsl", ARM_AM::lsl)
2526      .Case("lsr", ARM_AM::lsr)
2527      .Case("asr", ARM_AM::asr)
2528      .Case("ror", ARM_AM::ror)
2529      .Case("rrx", ARM_AM::rrx)
2530      .Default(ARM_AM::no_shift);
2531
2532  if (ShiftTy == ARM_AM::no_shift)
2533    return 1;
2534
2535  Parser.Lex(); // Eat the operator.
2536
2537  // The source register for the shift has already been added to the
2538  // operand list, so we need to pop it off and combine it into the shifted
2539  // register operand instead.
2540  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2541  if (!PrevOp->isReg())
2542    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2543  int SrcReg = PrevOp->getReg();
2544
2545  SMLoc EndLoc;
2546  int64_t Imm = 0;
2547  int ShiftReg = 0;
2548  if (ShiftTy == ARM_AM::rrx) {
2549    // RRX Doesn't have an explicit shift amount. The encoder expects
2550    // the shift register to be the same as the source register. Seems odd,
2551    // but OK.
2552    ShiftReg = SrcReg;
2553  } else {
2554    // Figure out if this is shifted by a constant or a register (for non-RRX).
2555    if (Parser.getTok().is(AsmToken::Hash) ||
2556        Parser.getTok().is(AsmToken::Dollar)) {
2557      Parser.Lex(); // Eat hash.
2558      SMLoc ImmLoc = Parser.getTok().getLoc();
2559      const MCExpr *ShiftExpr = 0;
2560      if (getParser().ParseExpression(ShiftExpr, EndLoc)) {
2561        Error(ImmLoc, "invalid immediate shift value");
2562        return -1;
2563      }
2564      // The expression must be evaluatable as an immediate.
2565      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2566      if (!CE) {
2567        Error(ImmLoc, "invalid immediate shift value");
2568        return -1;
2569      }
2570      // Range check the immediate.
2571      // lsl, ror: 0 <= imm <= 31
2572      // lsr, asr: 0 <= imm <= 32
2573      Imm = CE->getValue();
2574      if (Imm < 0 ||
2575          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2576          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2577        Error(ImmLoc, "immediate shift value out of range");
2578        return -1;
2579      }
2580      // shift by zero is a nop. Always send it through as lsl.
2581      // ('as' compatibility)
2582      if (Imm == 0)
2583        ShiftTy = ARM_AM::lsl;
2584    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2585      SMLoc L = Parser.getTok().getLoc();
2586      EndLoc = Parser.getTok().getEndLoc();
2587      ShiftReg = tryParseRegister();
2588      if (ShiftReg == -1) {
2589        Error (L, "expected immediate or register in shift operand");
2590        return -1;
2591      }
2592    } else {
2593      Error (Parser.getTok().getLoc(),
2594                    "expected immediate or register in shift operand");
2595      return -1;
2596    }
2597  }
2598
2599  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2600    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2601                                                         ShiftReg, Imm,
2602                                                         S, EndLoc));
2603  else
2604    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2605                                                          S, EndLoc));
2606
2607  return 0;
2608}
2609
2610
2611/// Try to parse a register name.  The token must be an Identifier when called.
2612/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2613/// if there is a "writeback". 'true' if it's not a register.
2614///
2615/// TODO this is likely to change to allow different register types and or to
2616/// parse for a specific register type.
2617bool ARMAsmParser::
2618tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2619  const AsmToken &RegTok = Parser.getTok();
2620  int RegNo = tryParseRegister();
2621  if (RegNo == -1)
2622    return true;
2623
2624  Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(),
2625                                           RegTok.getEndLoc()));
2626
2627  const AsmToken &ExclaimTok = Parser.getTok();
2628  if (ExclaimTok.is(AsmToken::Exclaim)) {
2629    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2630                                               ExclaimTok.getLoc()));
2631    Parser.Lex(); // Eat exclaim token
2632    return false;
2633  }
2634
2635  // Also check for an index operand. This is only legal for vector registers,
2636  // but that'll get caught OK in operand matching, so we don't need to
2637  // explicitly filter everything else out here.
2638  if (Parser.getTok().is(AsmToken::LBrac)) {
2639    SMLoc SIdx = Parser.getTok().getLoc();
2640    Parser.Lex(); // Eat left bracket token.
2641
2642    const MCExpr *ImmVal;
2643    if (getParser().ParseExpression(ImmVal))
2644      return true;
2645    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2646    if (!MCE)
2647      return TokError("immediate value expected for vector index");
2648
2649    if (Parser.getTok().isNot(AsmToken::RBrac))
2650      return Error(Parser.getTok().getLoc(), "']' expected");
2651
2652    SMLoc E = Parser.getTok().getEndLoc();
2653    Parser.Lex(); // Eat right bracket token.
2654
2655    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2656                                                     SIdx, E,
2657                                                     getContext()));
2658  }
2659
2660  return false;
2661}
2662
2663/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2664/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2665/// "c5", ...
2666static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2667  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2668  // but efficient.
2669  switch (Name.size()) {
2670  default: return -1;
2671  case 2:
2672    if (Name[0] != CoprocOp)
2673      return -1;
2674    switch (Name[1]) {
2675    default:  return -1;
2676    case '0': return 0;
2677    case '1': return 1;
2678    case '2': return 2;
2679    case '3': return 3;
2680    case '4': return 4;
2681    case '5': return 5;
2682    case '6': return 6;
2683    case '7': return 7;
2684    case '8': return 8;
2685    case '9': return 9;
2686    }
2687  case 3:
2688    if (Name[0] != CoprocOp || Name[1] != '1')
2689      return -1;
2690    switch (Name[2]) {
2691    default:  return -1;
2692    case '0': return 10;
2693    case '1': return 11;
2694    case '2': return 12;
2695    case '3': return 13;
2696    case '4': return 14;
2697    case '5': return 15;
2698    }
2699  }
2700}
2701
2702/// parseITCondCode - Try to parse a condition code for an IT instruction.
2703ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2704parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2705  SMLoc S = Parser.getTok().getLoc();
2706  const AsmToken &Tok = Parser.getTok();
2707  if (!Tok.is(AsmToken::Identifier))
2708    return MatchOperand_NoMatch;
2709  unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
2710    .Case("eq", ARMCC::EQ)
2711    .Case("ne", ARMCC::NE)
2712    .Case("hs", ARMCC::HS)
2713    .Case("cs", ARMCC::HS)
2714    .Case("lo", ARMCC::LO)
2715    .Case("cc", ARMCC::LO)
2716    .Case("mi", ARMCC::MI)
2717    .Case("pl", ARMCC::PL)
2718    .Case("vs", ARMCC::VS)
2719    .Case("vc", ARMCC::VC)
2720    .Case("hi", ARMCC::HI)
2721    .Case("ls", ARMCC::LS)
2722    .Case("ge", ARMCC::GE)
2723    .Case("lt", ARMCC::LT)
2724    .Case("gt", ARMCC::GT)
2725    .Case("le", ARMCC::LE)
2726    .Case("al", ARMCC::AL)
2727    .Default(~0U);
2728  if (CC == ~0U)
2729    return MatchOperand_NoMatch;
2730  Parser.Lex(); // Eat the token.
2731
2732  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2733
2734  return MatchOperand_Success;
2735}
2736
2737/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2738/// token must be an Identifier when called, and if it is a coprocessor
2739/// number, the token is eaten and the operand is added to the operand list.
2740ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2741parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2742  SMLoc S = Parser.getTok().getLoc();
2743  const AsmToken &Tok = Parser.getTok();
2744  if (Tok.isNot(AsmToken::Identifier))
2745    return MatchOperand_NoMatch;
2746
2747  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2748  if (Num == -1)
2749    return MatchOperand_NoMatch;
2750
2751  Parser.Lex(); // Eat identifier token.
2752  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2753  return MatchOperand_Success;
2754}
2755
2756/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2757/// token must be an Identifier when called, and if it is a coprocessor
2758/// number, the token is eaten and the operand is added to the operand list.
2759ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2760parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2761  SMLoc S = Parser.getTok().getLoc();
2762  const AsmToken &Tok = Parser.getTok();
2763  if (Tok.isNot(AsmToken::Identifier))
2764    return MatchOperand_NoMatch;
2765
2766  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2767  if (Reg == -1)
2768    return MatchOperand_NoMatch;
2769
2770  Parser.Lex(); // Eat identifier token.
2771  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2772  return MatchOperand_Success;
2773}
2774
2775/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2776/// coproc_option : '{' imm0_255 '}'
2777ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2778parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2779  SMLoc S = Parser.getTok().getLoc();
2780
2781  // If this isn't a '{', this isn't a coprocessor immediate operand.
2782  if (Parser.getTok().isNot(AsmToken::LCurly))
2783    return MatchOperand_NoMatch;
2784  Parser.Lex(); // Eat the '{'
2785
2786  const MCExpr *Expr;
2787  SMLoc Loc = Parser.getTok().getLoc();
2788  if (getParser().ParseExpression(Expr)) {
2789    Error(Loc, "illegal expression");
2790    return MatchOperand_ParseFail;
2791  }
2792  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2793  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2794    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2795    return MatchOperand_ParseFail;
2796  }
2797  int Val = CE->getValue();
2798
2799  // Check for and consume the closing '}'
2800  if (Parser.getTok().isNot(AsmToken::RCurly))
2801    return MatchOperand_ParseFail;
2802  SMLoc E = Parser.getTok().getEndLoc();
2803  Parser.Lex(); // Eat the '}'
2804
2805  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2806  return MatchOperand_Success;
2807}
2808
2809// For register list parsing, we need to map from raw GPR register numbering
2810// to the enumeration values. The enumeration values aren't sorted by
2811// register number due to our using "sp", "lr" and "pc" as canonical names.
2812static unsigned getNextRegister(unsigned Reg) {
2813  // If this is a GPR, we need to do it manually, otherwise we can rely
2814  // on the sort ordering of the enumeration since the other reg-classes
2815  // are sane.
2816  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2817    return Reg + 1;
2818  switch(Reg) {
2819  default: llvm_unreachable("Invalid GPR number!");
2820  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2821  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2822  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2823  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2824  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2825  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2826  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2827  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2828  }
2829}
2830
2831// Return the low-subreg of a given Q register.
2832static unsigned getDRegFromQReg(unsigned QReg) {
2833  switch (QReg) {
2834  default: llvm_unreachable("expected a Q register!");
2835  case ARM::Q0:  return ARM::D0;
2836  case ARM::Q1:  return ARM::D2;
2837  case ARM::Q2:  return ARM::D4;
2838  case ARM::Q3:  return ARM::D6;
2839  case ARM::Q4:  return ARM::D8;
2840  case ARM::Q5:  return ARM::D10;
2841  case ARM::Q6:  return ARM::D12;
2842  case ARM::Q7:  return ARM::D14;
2843  case ARM::Q8:  return ARM::D16;
2844  case ARM::Q9:  return ARM::D18;
2845  case ARM::Q10: return ARM::D20;
2846  case ARM::Q11: return ARM::D22;
2847  case ARM::Q12: return ARM::D24;
2848  case ARM::Q13: return ARM::D26;
2849  case ARM::Q14: return ARM::D28;
2850  case ARM::Q15: return ARM::D30;
2851  }
2852}
2853
2854/// Parse a register list.
2855bool ARMAsmParser::
2856parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2857  assert(Parser.getTok().is(AsmToken::LCurly) &&
2858         "Token is not a Left Curly Brace");
2859  SMLoc S = Parser.getTok().getLoc();
2860  Parser.Lex(); // Eat '{' token.
2861  SMLoc RegLoc = Parser.getTok().getLoc();
2862
2863  // Check the first register in the list to see what register class
2864  // this is a list of.
2865  int Reg = tryParseRegister();
2866  if (Reg == -1)
2867    return Error(RegLoc, "register expected");
2868
2869  // The reglist instructions have at most 16 registers, so reserve
2870  // space for that many.
2871  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2872
2873  // Allow Q regs and just interpret them as the two D sub-registers.
2874  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2875    Reg = getDRegFromQReg(Reg);
2876    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2877    ++Reg;
2878  }
2879  const MCRegisterClass *RC;
2880  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2881    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2882  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2883    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2884  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2885    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2886  else
2887    return Error(RegLoc, "invalid register in register list");
2888
2889  // Store the register.
2890  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2891
2892  // This starts immediately after the first register token in the list,
2893  // so we can see either a comma or a minus (range separator) as a legal
2894  // next token.
2895  while (Parser.getTok().is(AsmToken::Comma) ||
2896         Parser.getTok().is(AsmToken::Minus)) {
2897    if (Parser.getTok().is(AsmToken::Minus)) {
2898      Parser.Lex(); // Eat the minus.
2899      SMLoc AfterMinusLoc = Parser.getTok().getLoc();
2900      int EndReg = tryParseRegister();
2901      if (EndReg == -1)
2902        return Error(AfterMinusLoc, "register expected");
2903      // Allow Q regs and just interpret them as the two D sub-registers.
2904      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2905        EndReg = getDRegFromQReg(EndReg) + 1;
2906      // If the register is the same as the start reg, there's nothing
2907      // more to do.
2908      if (Reg == EndReg)
2909        continue;
2910      // The register must be in the same register class as the first.
2911      if (!RC->contains(EndReg))
2912        return Error(AfterMinusLoc, "invalid register in register list");
2913      // Ranges must go from low to high.
2914      if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
2915        return Error(AfterMinusLoc, "bad range in register list");
2916
2917      // Add all the registers in the range to the register list.
2918      while (Reg != EndReg) {
2919        Reg = getNextRegister(Reg);
2920        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2921      }
2922      continue;
2923    }
2924    Parser.Lex(); // Eat the comma.
2925    RegLoc = Parser.getTok().getLoc();
2926    int OldReg = Reg;
2927    const AsmToken RegTok = Parser.getTok();
2928    Reg = tryParseRegister();
2929    if (Reg == -1)
2930      return Error(RegLoc, "register expected");
2931    // Allow Q regs and just interpret them as the two D sub-registers.
2932    bool isQReg = false;
2933    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2934      Reg = getDRegFromQReg(Reg);
2935      isQReg = true;
2936    }
2937    // The register must be in the same register class as the first.
2938    if (!RC->contains(Reg))
2939      return Error(RegLoc, "invalid register in register list");
2940    // List must be monotonically increasing.
2941    if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
2942      if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2943        Warning(RegLoc, "register list not in ascending order");
2944      else
2945        return Error(RegLoc, "register list not in ascending order");
2946    }
2947    if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
2948      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2949              ") in register list");
2950      continue;
2951    }
2952    // VFP register lists must also be contiguous.
2953    // It's OK to use the enumeration values directly here rather, as the
2954    // VFP register classes have the enum sorted properly.
2955    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2956        Reg != OldReg + 1)
2957      return Error(RegLoc, "non-contiguous register range");
2958    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2959    if (isQReg)
2960      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2961  }
2962
2963  if (Parser.getTok().isNot(AsmToken::RCurly))
2964    return Error(Parser.getTok().getLoc(), "'}' expected");
2965  SMLoc E = Parser.getTok().getEndLoc();
2966  Parser.Lex(); // Eat '}' token.
2967
2968  // Push the register list operand.
2969  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2970
2971  // The ARM system instruction variants for LDM/STM have a '^' token here.
2972  if (Parser.getTok().is(AsmToken::Caret)) {
2973    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2974    Parser.Lex(); // Eat '^' token.
2975  }
2976
2977  return false;
2978}
2979
2980// Helper function to parse the lane index for vector lists.
2981ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2982parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
2983  Index = 0; // Always return a defined index value.
2984  if (Parser.getTok().is(AsmToken::LBrac)) {
2985    Parser.Lex(); // Eat the '['.
2986    if (Parser.getTok().is(AsmToken::RBrac)) {
2987      // "Dn[]" is the 'all lanes' syntax.
2988      LaneKind = AllLanes;
2989      EndLoc = Parser.getTok().getEndLoc();
2990      Parser.Lex(); // Eat the ']'.
2991      return MatchOperand_Success;
2992    }
2993
2994    // There's an optional '#' token here. Normally there wouldn't be, but
2995    // inline assemble puts one in, and it's friendly to accept that.
2996    if (Parser.getTok().is(AsmToken::Hash))
2997      Parser.Lex(); // Eat the '#'
2998
2999    const MCExpr *LaneIndex;
3000    SMLoc Loc = Parser.getTok().getLoc();
3001    if (getParser().ParseExpression(LaneIndex)) {
3002      Error(Loc, "illegal expression");
3003      return MatchOperand_ParseFail;
3004    }
3005    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3006    if (!CE) {
3007      Error(Loc, "lane index must be empty or an integer");
3008      return MatchOperand_ParseFail;
3009    }
3010    if (Parser.getTok().isNot(AsmToken::RBrac)) {
3011      Error(Parser.getTok().getLoc(), "']' expected");
3012      return MatchOperand_ParseFail;
3013    }
3014    EndLoc = Parser.getTok().getEndLoc();
3015    Parser.Lex(); // Eat the ']'.
3016    int64_t Val = CE->getValue();
3017
3018    // FIXME: Make this range check context sensitive for .8, .16, .32.
3019    if (Val < 0 || Val > 7) {
3020      Error(Parser.getTok().getLoc(), "lane index out of range");
3021      return MatchOperand_ParseFail;
3022    }
3023    Index = Val;
3024    LaneKind = IndexedLane;
3025    return MatchOperand_Success;
3026  }
3027  LaneKind = NoLanes;
3028  return MatchOperand_Success;
3029}
3030
3031// parse a vector register list
3032ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3033parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3034  VectorLaneTy LaneKind;
3035  unsigned LaneIndex;
3036  SMLoc S = Parser.getTok().getLoc();
3037  // As an extension (to match gas), support a plain D register or Q register
3038  // (without encosing curly braces) as a single or double entry list,
3039  // respectively.
3040  if (Parser.getTok().is(AsmToken::Identifier)) {
3041    SMLoc E = Parser.getTok().getEndLoc();
3042    int Reg = tryParseRegister();
3043    if (Reg == -1)
3044      return MatchOperand_NoMatch;
3045    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3046      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3047      if (Res != MatchOperand_Success)
3048        return Res;
3049      switch (LaneKind) {
3050      case NoLanes:
3051        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3052        break;
3053      case AllLanes:
3054        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3055                                                                S, E));
3056        break;
3057      case IndexedLane:
3058        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3059                                                               LaneIndex,
3060                                                               false, S, E));
3061        break;
3062      }
3063      return MatchOperand_Success;
3064    }
3065    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3066      Reg = getDRegFromQReg(Reg);
3067      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3068      if (Res != MatchOperand_Success)
3069        return Res;
3070      switch (LaneKind) {
3071      case NoLanes:
3072        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3073                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3074        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3075        break;
3076      case AllLanes:
3077        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3078                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3079        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3080                                                                S, E));
3081        break;
3082      case IndexedLane:
3083        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3084                                                               LaneIndex,
3085                                                               false, S, E));
3086        break;
3087      }
3088      return MatchOperand_Success;
3089    }
3090    Error(S, "vector register expected");
3091    return MatchOperand_ParseFail;
3092  }
3093
3094  if (Parser.getTok().isNot(AsmToken::LCurly))
3095    return MatchOperand_NoMatch;
3096
3097  Parser.Lex(); // Eat '{' token.
3098  SMLoc RegLoc = Parser.getTok().getLoc();
3099
3100  int Reg = tryParseRegister();
3101  if (Reg == -1) {
3102    Error(RegLoc, "register expected");
3103    return MatchOperand_ParseFail;
3104  }
3105  unsigned Count = 1;
3106  int Spacing = 0;
3107  unsigned FirstReg = Reg;
3108  // The list is of D registers, but we also allow Q regs and just interpret
3109  // them as the two D sub-registers.
3110  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3111    FirstReg = Reg = getDRegFromQReg(Reg);
3112    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3113                 // it's ambiguous with four-register single spaced.
3114    ++Reg;
3115    ++Count;
3116  }
3117
3118  SMLoc E;
3119  if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3120    return MatchOperand_ParseFail;
3121
3122  while (Parser.getTok().is(AsmToken::Comma) ||
3123         Parser.getTok().is(AsmToken::Minus)) {
3124    if (Parser.getTok().is(AsmToken::Minus)) {
3125      if (!Spacing)
3126        Spacing = 1; // Register range implies a single spaced list.
3127      else if (Spacing == 2) {
3128        Error(Parser.getTok().getLoc(),
3129              "sequential registers in double spaced list");
3130        return MatchOperand_ParseFail;
3131      }
3132      Parser.Lex(); // Eat the minus.
3133      SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3134      int EndReg = tryParseRegister();
3135      if (EndReg == -1) {
3136        Error(AfterMinusLoc, "register expected");
3137        return MatchOperand_ParseFail;
3138      }
3139      // Allow Q regs and just interpret them as the two D sub-registers.
3140      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3141        EndReg = getDRegFromQReg(EndReg) + 1;
3142      // If the register is the same as the start reg, there's nothing
3143      // more to do.
3144      if (Reg == EndReg)
3145        continue;
3146      // The register must be in the same register class as the first.
3147      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3148        Error(AfterMinusLoc, "invalid register in register list");
3149        return MatchOperand_ParseFail;
3150      }
3151      // Ranges must go from low to high.
3152      if (Reg > EndReg) {
3153        Error(AfterMinusLoc, "bad range in register list");
3154        return MatchOperand_ParseFail;
3155      }
3156      // Parse the lane specifier if present.
3157      VectorLaneTy NextLaneKind;
3158      unsigned NextLaneIndex;
3159      if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3160          MatchOperand_Success)
3161        return MatchOperand_ParseFail;
3162      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3163        Error(AfterMinusLoc, "mismatched lane index in register list");
3164        return MatchOperand_ParseFail;
3165      }
3166
3167      // Add all the registers in the range to the register list.
3168      Count += EndReg - Reg;
3169      Reg = EndReg;
3170      continue;
3171    }
3172    Parser.Lex(); // Eat the comma.
3173    RegLoc = Parser.getTok().getLoc();
3174    int OldReg = Reg;
3175    Reg = tryParseRegister();
3176    if (Reg == -1) {
3177      Error(RegLoc, "register expected");
3178      return MatchOperand_ParseFail;
3179    }
3180    // vector register lists must be contiguous.
3181    // It's OK to use the enumeration values directly here rather, as the
3182    // VFP register classes have the enum sorted properly.
3183    //
3184    // The list is of D registers, but we also allow Q regs and just interpret
3185    // them as the two D sub-registers.
3186    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3187      if (!Spacing)
3188        Spacing = 1; // Register range implies a single spaced list.
3189      else if (Spacing == 2) {
3190        Error(RegLoc,
3191              "invalid register in double-spaced list (must be 'D' register')");
3192        return MatchOperand_ParseFail;
3193      }
3194      Reg = getDRegFromQReg(Reg);
3195      if (Reg != OldReg + 1) {
3196        Error(RegLoc, "non-contiguous register range");
3197        return MatchOperand_ParseFail;
3198      }
3199      ++Reg;
3200      Count += 2;
3201      // Parse the lane specifier if present.
3202      VectorLaneTy NextLaneKind;
3203      unsigned NextLaneIndex;
3204      SMLoc LaneLoc = Parser.getTok().getLoc();
3205      if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3206          MatchOperand_Success)
3207        return MatchOperand_ParseFail;
3208      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3209        Error(LaneLoc, "mismatched lane index in register list");
3210        return MatchOperand_ParseFail;
3211      }
3212      continue;
3213    }
3214    // Normal D register.
3215    // Figure out the register spacing (single or double) of the list if
3216    // we don't know it already.
3217    if (!Spacing)
3218      Spacing = 1 + (Reg == OldReg + 2);
3219
3220    // Just check that it's contiguous and keep going.
3221    if (Reg != OldReg + Spacing) {
3222      Error(RegLoc, "non-contiguous register range");
3223      return MatchOperand_ParseFail;
3224    }
3225    ++Count;
3226    // Parse the lane specifier if present.
3227    VectorLaneTy NextLaneKind;
3228    unsigned NextLaneIndex;
3229    SMLoc EndLoc = Parser.getTok().getLoc();
3230    if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
3231      return MatchOperand_ParseFail;
3232    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3233      Error(EndLoc, "mismatched lane index in register list");
3234      return MatchOperand_ParseFail;
3235    }
3236  }
3237
3238  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3239    Error(Parser.getTok().getLoc(), "'}' expected");
3240    return MatchOperand_ParseFail;
3241  }
3242  E = Parser.getTok().getEndLoc();
3243  Parser.Lex(); // Eat '}' token.
3244
3245  switch (LaneKind) {
3246  case NoLanes:
3247    // Two-register operands have been converted to the
3248    // composite register classes.
3249    if (Count == 2) {
3250      const MCRegisterClass *RC = (Spacing == 1) ?
3251        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3252        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3253      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3254    }
3255
3256    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3257                                                    (Spacing == 2), S, E));
3258    break;
3259  case AllLanes:
3260    // Two-register operands have been converted to the
3261    // composite register classes.
3262    if (Count == 2) {
3263      const MCRegisterClass *RC = (Spacing == 1) ?
3264        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3265        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3266      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3267    }
3268    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3269                                                            (Spacing == 2),
3270                                                            S, E));
3271    break;
3272  case IndexedLane:
3273    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3274                                                           LaneIndex,
3275                                                           (Spacing == 2),
3276                                                           S, E));
3277    break;
3278  }
3279  return MatchOperand_Success;
3280}
3281
3282/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3283ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3284parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3285  SMLoc S = Parser.getTok().getLoc();
3286  const AsmToken &Tok = Parser.getTok();
3287  unsigned Opt;
3288
3289  if (Tok.is(AsmToken::Identifier)) {
3290    StringRef OptStr = Tok.getString();
3291
3292    Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
3293      .Case("sy",    ARM_MB::SY)
3294      .Case("st",    ARM_MB::ST)
3295      .Case("sh",    ARM_MB::ISH)
3296      .Case("ish",   ARM_MB::ISH)
3297      .Case("shst",  ARM_MB::ISHST)
3298      .Case("ishst", ARM_MB::ISHST)
3299      .Case("nsh",   ARM_MB::NSH)
3300      .Case("un",    ARM_MB::NSH)
3301      .Case("nshst", ARM_MB::NSHST)
3302      .Case("unst",  ARM_MB::NSHST)
3303      .Case("osh",   ARM_MB::OSH)
3304      .Case("oshst", ARM_MB::OSHST)
3305      .Default(~0U);
3306
3307    if (Opt == ~0U)
3308      return MatchOperand_NoMatch;
3309
3310    Parser.Lex(); // Eat identifier token.
3311  } else if (Tok.is(AsmToken::Hash) ||
3312             Tok.is(AsmToken::Dollar) ||
3313             Tok.is(AsmToken::Integer)) {
3314    if (Parser.getTok().isNot(AsmToken::Integer))
3315      Parser.Lex(); // Eat the '#'.
3316    SMLoc Loc = Parser.getTok().getLoc();
3317
3318    const MCExpr *MemBarrierID;
3319    if (getParser().ParseExpression(MemBarrierID)) {
3320      Error(Loc, "illegal expression");
3321      return MatchOperand_ParseFail;
3322    }
3323
3324    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
3325    if (!CE) {
3326      Error(Loc, "constant expression expected");
3327      return MatchOperand_ParseFail;
3328    }
3329
3330    int Val = CE->getValue();
3331    if (Val & ~0xf) {
3332      Error(Loc, "immediate value out of range");
3333      return MatchOperand_ParseFail;
3334    }
3335
3336    Opt = ARM_MB::RESERVED_0 + Val;
3337  } else
3338    return MatchOperand_ParseFail;
3339
3340  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3341  return MatchOperand_Success;
3342}
3343
3344/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3345ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3346parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3347  SMLoc S = Parser.getTok().getLoc();
3348  const AsmToken &Tok = Parser.getTok();
3349  if (!Tok.is(AsmToken::Identifier))
3350    return MatchOperand_NoMatch;
3351  StringRef IFlagsStr = Tok.getString();
3352
3353  // An iflags string of "none" is interpreted to mean that none of the AIF
3354  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3355  unsigned IFlags = 0;
3356  if (IFlagsStr != "none") {
3357        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3358      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3359        .Case("a", ARM_PROC::A)
3360        .Case("i", ARM_PROC::I)
3361        .Case("f", ARM_PROC::F)
3362        .Default(~0U);
3363
3364      // If some specific iflag is already set, it means that some letter is
3365      // present more than once, this is not acceptable.
3366      if (Flag == ~0U || (IFlags & Flag))
3367        return MatchOperand_NoMatch;
3368
3369      IFlags |= Flag;
3370    }
3371  }
3372
3373  Parser.Lex(); // Eat identifier token.
3374  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3375  return MatchOperand_Success;
3376}
3377
3378/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3379ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3380parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3381  SMLoc S = Parser.getTok().getLoc();
3382  const AsmToken &Tok = Parser.getTok();
3383  if (!Tok.is(AsmToken::Identifier))
3384    return MatchOperand_NoMatch;
3385  StringRef Mask = Tok.getString();
3386
3387  if (isMClass()) {
3388    // See ARMv6-M 10.1.1
3389    std::string Name = Mask.lower();
3390    unsigned FlagsVal = StringSwitch<unsigned>(Name)
3391      // Note: in the documentation:
3392      //  ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
3393      //  for MSR APSR_nzcvq.
3394      // but we do make it an alias here.  This is so to get the "mask encoding"
3395      // bits correct on MSR APSR writes.
3396      //
3397      // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
3398      // should really only be allowed when writing a special register.  Note
3399      // they get dropped in the MRS instruction reading a special register as
3400      // the SYSm field is only 8 bits.
3401      //
3402      // FIXME: the _g and _nzcvqg versions are only allowed if the processor
3403      // includes the DSP extension but that is not checked.
3404      .Case("apsr", 0x800)
3405      .Case("apsr_nzcvq", 0x800)
3406      .Case("apsr_g", 0x400)
3407      .Case("apsr_nzcvqg", 0xc00)
3408      .Case("iapsr", 0x801)
3409      .Case("iapsr_nzcvq", 0x801)
3410      .Case("iapsr_g", 0x401)
3411      .Case("iapsr_nzcvqg", 0xc01)
3412      .Case("eapsr", 0x802)
3413      .Case("eapsr_nzcvq", 0x802)
3414      .Case("eapsr_g", 0x402)
3415      .Case("eapsr_nzcvqg", 0xc02)
3416      .Case("xpsr", 0x803)
3417      .Case("xpsr_nzcvq", 0x803)
3418      .Case("xpsr_g", 0x403)
3419      .Case("xpsr_nzcvqg", 0xc03)
3420      .Case("ipsr", 0x805)
3421      .Case("epsr", 0x806)
3422      .Case("iepsr", 0x807)
3423      .Case("msp", 0x808)
3424      .Case("psp", 0x809)
3425      .Case("primask", 0x810)
3426      .Case("basepri", 0x811)
3427      .Case("basepri_max", 0x812)
3428      .Case("faultmask", 0x813)
3429      .Case("control", 0x814)
3430      .Default(~0U);
3431
3432    if (FlagsVal == ~0U)
3433      return MatchOperand_NoMatch;
3434
3435    if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813)
3436      // basepri, basepri_max and faultmask only valid for V7m.
3437      return MatchOperand_NoMatch;
3438
3439    Parser.Lex(); // Eat identifier token.
3440    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3441    return MatchOperand_Success;
3442  }
3443
3444  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3445  size_t Start = 0, Next = Mask.find('_');
3446  StringRef Flags = "";
3447  std::string SpecReg = Mask.slice(Start, Next).lower();
3448  if (Next != StringRef::npos)
3449    Flags = Mask.slice(Next+1, Mask.size());
3450
3451  // FlagsVal contains the complete mask:
3452  // 3-0: Mask
3453  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3454  unsigned FlagsVal = 0;
3455
3456  if (SpecReg == "apsr") {
3457    FlagsVal = StringSwitch<unsigned>(Flags)
3458    .Case("nzcvq",  0x8) // same as CPSR_f
3459    .Case("g",      0x4) // same as CPSR_s
3460    .Case("nzcvqg", 0xc) // same as CPSR_fs
3461    .Default(~0U);
3462
3463    if (FlagsVal == ~0U) {
3464      if (!Flags.empty())
3465        return MatchOperand_NoMatch;
3466      else
3467        FlagsVal = 8; // No flag
3468    }
3469  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3470    // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
3471    if (Flags == "all" || Flags == "")
3472      Flags = "fc";
3473    for (int i = 0, e = Flags.size(); i != e; ++i) {
3474      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3475      .Case("c", 1)
3476      .Case("x", 2)
3477      .Case("s", 4)
3478      .Case("f", 8)
3479      .Default(~0U);
3480
3481      // If some specific flag is already set, it means that some letter is
3482      // present more than once, this is not acceptable.
3483      if (FlagsVal == ~0U || (FlagsVal & Flag))
3484        return MatchOperand_NoMatch;
3485      FlagsVal |= Flag;
3486    }
3487  } else // No match for special register.
3488    return MatchOperand_NoMatch;
3489
3490  // Special register without flags is NOT equivalent to "fc" flags.
3491  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3492  // two lines would enable gas compatibility at the expense of breaking
3493  // round-tripping.
3494  //
3495  // if (!FlagsVal)
3496  //  FlagsVal = 0x9;
3497
3498  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3499  if (SpecReg == "spsr")
3500    FlagsVal |= 16;
3501
3502  Parser.Lex(); // Eat identifier token.
3503  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3504  return MatchOperand_Success;
3505}
3506
3507ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3508parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3509            int Low, int High) {
3510  const AsmToken &Tok = Parser.getTok();
3511  if (Tok.isNot(AsmToken::Identifier)) {
3512    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3513    return MatchOperand_ParseFail;
3514  }
3515  StringRef ShiftName = Tok.getString();
3516  std::string LowerOp = Op.lower();
3517  std::string UpperOp = Op.upper();
3518  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3519    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3520    return MatchOperand_ParseFail;
3521  }
3522  Parser.Lex(); // Eat shift type token.
3523
3524  // There must be a '#' and a shift amount.
3525  if (Parser.getTok().isNot(AsmToken::Hash) &&
3526      Parser.getTok().isNot(AsmToken::Dollar)) {
3527    Error(Parser.getTok().getLoc(), "'#' expected");
3528    return MatchOperand_ParseFail;
3529  }
3530  Parser.Lex(); // Eat hash token.
3531
3532  const MCExpr *ShiftAmount;
3533  SMLoc Loc = Parser.getTok().getLoc();
3534  SMLoc EndLoc;
3535  if (getParser().ParseExpression(ShiftAmount, EndLoc)) {
3536    Error(Loc, "illegal expression");
3537    return MatchOperand_ParseFail;
3538  }
3539  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3540  if (!CE) {
3541    Error(Loc, "constant expression expected");
3542    return MatchOperand_ParseFail;
3543  }
3544  int Val = CE->getValue();
3545  if (Val < Low || Val > High) {
3546    Error(Loc, "immediate value out of range");
3547    return MatchOperand_ParseFail;
3548  }
3549
3550  Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
3551
3552  return MatchOperand_Success;
3553}
3554
3555ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3556parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3557  const AsmToken &Tok = Parser.getTok();
3558  SMLoc S = Tok.getLoc();
3559  if (Tok.isNot(AsmToken::Identifier)) {
3560    Error(S, "'be' or 'le' operand expected");
3561    return MatchOperand_ParseFail;
3562  }
3563  int Val = StringSwitch<int>(Tok.getString())
3564    .Case("be", 1)
3565    .Case("le", 0)
3566    .Default(-1);
3567  Parser.Lex(); // Eat the token.
3568
3569  if (Val == -1) {
3570    Error(S, "'be' or 'le' operand expected");
3571    return MatchOperand_ParseFail;
3572  }
3573  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3574                                                                  getContext()),
3575                                           S, Tok.getEndLoc()));
3576  return MatchOperand_Success;
3577}
3578
3579/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3580/// instructions. Legal values are:
3581///     lsl #n  'n' in [0,31]
3582///     asr #n  'n' in [1,32]
3583///             n == 32 encoded as n == 0.
3584ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3585parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3586  const AsmToken &Tok = Parser.getTok();
3587  SMLoc S = Tok.getLoc();
3588  if (Tok.isNot(AsmToken::Identifier)) {
3589    Error(S, "shift operator 'asr' or 'lsl' expected");
3590    return MatchOperand_ParseFail;
3591  }
3592  StringRef ShiftName = Tok.getString();
3593  bool isASR;
3594  if (ShiftName == "lsl" || ShiftName == "LSL")
3595    isASR = false;
3596  else if (ShiftName == "asr" || ShiftName == "ASR")
3597    isASR = true;
3598  else {
3599    Error(S, "shift operator 'asr' or 'lsl' expected");
3600    return MatchOperand_ParseFail;
3601  }
3602  Parser.Lex(); // Eat the operator.
3603
3604  // A '#' and a shift amount.
3605  if (Parser.getTok().isNot(AsmToken::Hash) &&
3606      Parser.getTok().isNot(AsmToken::Dollar)) {
3607    Error(Parser.getTok().getLoc(), "'#' expected");
3608    return MatchOperand_ParseFail;
3609  }
3610  Parser.Lex(); // Eat hash token.
3611  SMLoc ExLoc = Parser.getTok().getLoc();
3612
3613  const MCExpr *ShiftAmount;
3614  SMLoc EndLoc;
3615  if (getParser().ParseExpression(ShiftAmount, EndLoc)) {
3616    Error(ExLoc, "malformed shift expression");
3617    return MatchOperand_ParseFail;
3618  }
3619  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3620  if (!CE) {
3621    Error(ExLoc, "shift amount must be an immediate");
3622    return MatchOperand_ParseFail;
3623  }
3624
3625  int64_t Val = CE->getValue();
3626  if (isASR) {
3627    // Shift amount must be in [1,32]
3628    if (Val < 1 || Val > 32) {
3629      Error(ExLoc, "'asr' shift amount must be in range [1,32]");
3630      return MatchOperand_ParseFail;
3631    }
3632    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3633    if (isThumb() && Val == 32) {
3634      Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
3635      return MatchOperand_ParseFail;
3636    }
3637    if (Val == 32) Val = 0;
3638  } else {
3639    // Shift amount must be in [1,32]
3640    if (Val < 0 || Val > 31) {
3641      Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
3642      return MatchOperand_ParseFail;
3643    }
3644  }
3645
3646  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
3647
3648  return MatchOperand_Success;
3649}
3650
3651/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3652/// of instructions. Legal values are:
3653///     ror #n  'n' in {0, 8, 16, 24}
3654ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3655parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3656  const AsmToken &Tok = Parser.getTok();
3657  SMLoc S = Tok.getLoc();
3658  if (Tok.isNot(AsmToken::Identifier))
3659    return MatchOperand_NoMatch;
3660  StringRef ShiftName = Tok.getString();
3661  if (ShiftName != "ror" && ShiftName != "ROR")
3662    return MatchOperand_NoMatch;
3663  Parser.Lex(); // Eat the operator.
3664
3665  // A '#' and a rotate amount.
3666  if (Parser.getTok().isNot(AsmToken::Hash) &&
3667      Parser.getTok().isNot(AsmToken::Dollar)) {
3668    Error(Parser.getTok().getLoc(), "'#' expected");
3669    return MatchOperand_ParseFail;
3670  }
3671  Parser.Lex(); // Eat hash token.
3672  SMLoc ExLoc = Parser.getTok().getLoc();
3673
3674  const MCExpr *ShiftAmount;
3675  SMLoc EndLoc;
3676  if (getParser().ParseExpression(ShiftAmount, EndLoc)) {
3677    Error(ExLoc, "malformed rotate expression");
3678    return MatchOperand_ParseFail;
3679  }
3680  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3681  if (!CE) {
3682    Error(ExLoc, "rotate amount must be an immediate");
3683    return MatchOperand_ParseFail;
3684  }
3685
3686  int64_t Val = CE->getValue();
3687  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3688  // normally, zero is represented in asm by omitting the rotate operand
3689  // entirely.
3690  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3691    Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
3692    return MatchOperand_ParseFail;
3693  }
3694
3695  Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
3696
3697  return MatchOperand_Success;
3698}
3699
3700ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3701parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3702  SMLoc S = Parser.getTok().getLoc();
3703  // The bitfield descriptor is really two operands, the LSB and the width.
3704  if (Parser.getTok().isNot(AsmToken::Hash) &&
3705      Parser.getTok().isNot(AsmToken::Dollar)) {
3706    Error(Parser.getTok().getLoc(), "'#' expected");
3707    return MatchOperand_ParseFail;
3708  }
3709  Parser.Lex(); // Eat hash token.
3710
3711  const MCExpr *LSBExpr;
3712  SMLoc E = Parser.getTok().getLoc();
3713  if (getParser().ParseExpression(LSBExpr)) {
3714    Error(E, "malformed immediate expression");
3715    return MatchOperand_ParseFail;
3716  }
3717  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3718  if (!CE) {
3719    Error(E, "'lsb' operand must be an immediate");
3720    return MatchOperand_ParseFail;
3721  }
3722
3723  int64_t LSB = CE->getValue();
3724  // The LSB must be in the range [0,31]
3725  if (LSB < 0 || LSB > 31) {
3726    Error(E, "'lsb' operand must be in the range [0,31]");
3727    return MatchOperand_ParseFail;
3728  }
3729  E = Parser.getTok().getLoc();
3730
3731  // Expect another immediate operand.
3732  if (Parser.getTok().isNot(AsmToken::Comma)) {
3733    Error(Parser.getTok().getLoc(), "too few operands");
3734    return MatchOperand_ParseFail;
3735  }
3736  Parser.Lex(); // Eat hash token.
3737  if (Parser.getTok().isNot(AsmToken::Hash) &&
3738      Parser.getTok().isNot(AsmToken::Dollar)) {
3739    Error(Parser.getTok().getLoc(), "'#' expected");
3740    return MatchOperand_ParseFail;
3741  }
3742  Parser.Lex(); // Eat hash token.
3743
3744  const MCExpr *WidthExpr;
3745  SMLoc EndLoc;
3746  if (getParser().ParseExpression(WidthExpr, EndLoc)) {
3747    Error(E, "malformed immediate expression");
3748    return MatchOperand_ParseFail;
3749  }
3750  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3751  if (!CE) {
3752    Error(E, "'width' operand must be an immediate");
3753    return MatchOperand_ParseFail;
3754  }
3755
3756  int64_t Width = CE->getValue();
3757  // The LSB must be in the range [1,32-lsb]
3758  if (Width < 1 || Width > 32 - LSB) {
3759    Error(E, "'width' operand must be in the range [1,32-lsb]");
3760    return MatchOperand_ParseFail;
3761  }
3762
3763  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
3764
3765  return MatchOperand_Success;
3766}
3767
3768ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3769parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3770  // Check for a post-index addressing register operand. Specifically:
3771  // postidx_reg := '+' register {, shift}
3772  //              | '-' register {, shift}
3773  //              | register {, shift}
3774
3775  // This method must return MatchOperand_NoMatch without consuming any tokens
3776  // in the case where there is no match, as other alternatives take other
3777  // parse methods.
3778  AsmToken Tok = Parser.getTok();
3779  SMLoc S = Tok.getLoc();
3780  bool haveEaten = false;
3781  bool isAdd = true;
3782  if (Tok.is(AsmToken::Plus)) {
3783    Parser.Lex(); // Eat the '+' token.
3784    haveEaten = true;
3785  } else if (Tok.is(AsmToken::Minus)) {
3786    Parser.Lex(); // Eat the '-' token.
3787    isAdd = false;
3788    haveEaten = true;
3789  }
3790
3791  SMLoc E = Parser.getTok().getEndLoc();
3792  int Reg = tryParseRegister();
3793  if (Reg == -1) {
3794    if (!haveEaten)
3795      return MatchOperand_NoMatch;
3796    Error(Parser.getTok().getLoc(), "register expected");
3797    return MatchOperand_ParseFail;
3798  }
3799
3800  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3801  unsigned ShiftImm = 0;
3802  if (Parser.getTok().is(AsmToken::Comma)) {
3803    Parser.Lex(); // Eat the ','.
3804    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3805      return MatchOperand_ParseFail;
3806
3807    // FIXME: Only approximates end...may include intervening whitespace.
3808    E = Parser.getTok().getLoc();
3809  }
3810
3811  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3812                                                  ShiftImm, S, E));
3813
3814  return MatchOperand_Success;
3815}
3816
3817ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3818parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3819  // Check for a post-index addressing register operand. Specifically:
3820  // am3offset := '+' register
3821  //              | '-' register
3822  //              | register
3823  //              | # imm
3824  //              | # + imm
3825  //              | # - imm
3826
3827  // This method must return MatchOperand_NoMatch without consuming any tokens
3828  // in the case where there is no match, as other alternatives take other
3829  // parse methods.
3830  AsmToken Tok = Parser.getTok();
3831  SMLoc S = Tok.getLoc();
3832
3833  // Do immediates first, as we always parse those if we have a '#'.
3834  if (Parser.getTok().is(AsmToken::Hash) ||
3835      Parser.getTok().is(AsmToken::Dollar)) {
3836    Parser.Lex(); // Eat the '#'.
3837    // Explicitly look for a '-', as we need to encode negative zero
3838    // differently.
3839    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3840    const MCExpr *Offset;
3841    SMLoc E;
3842    if (getParser().ParseExpression(Offset, E))
3843      return MatchOperand_ParseFail;
3844    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3845    if (!CE) {
3846      Error(S, "constant expression expected");
3847      return MatchOperand_ParseFail;
3848    }
3849    // Negative zero is encoded as the flag value INT32_MIN.
3850    int32_t Val = CE->getValue();
3851    if (isNegative && Val == 0)
3852      Val = INT32_MIN;
3853
3854    Operands.push_back(
3855      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3856
3857    return MatchOperand_Success;
3858  }
3859
3860
3861  bool haveEaten = false;
3862  bool isAdd = true;
3863  if (Tok.is(AsmToken::Plus)) {
3864    Parser.Lex(); // Eat the '+' token.
3865    haveEaten = true;
3866  } else if (Tok.is(AsmToken::Minus)) {
3867    Parser.Lex(); // Eat the '-' token.
3868    isAdd = false;
3869    haveEaten = true;
3870  }
3871
3872  Tok = Parser.getTok();
3873  int Reg = tryParseRegister();
3874  if (Reg == -1) {
3875    if (!haveEaten)
3876      return MatchOperand_NoMatch;
3877    Error(Tok.getLoc(), "register expected");
3878    return MatchOperand_ParseFail;
3879  }
3880
3881  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3882                                                  0, S, Tok.getEndLoc()));
3883
3884  return MatchOperand_Success;
3885}
3886
3887/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3888/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3889/// when they refer multiple MIOperands inside a single one.
3890void ARMAsmParser::
3891cvtT2LdrdPre(MCInst &Inst,
3892             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3893  // Rt, Rt2
3894  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3895  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3896  // Create a writeback register dummy placeholder.
3897  Inst.addOperand(MCOperand::CreateReg(0));
3898  // addr
3899  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3900  // pred
3901  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3902}
3903
3904/// cvtT2StrdPre - Convert parsed operands to MCInst.
3905/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3906/// when they refer multiple MIOperands inside a single one.
3907void ARMAsmParser::
3908cvtT2StrdPre(MCInst &Inst,
3909             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3910  // Create a writeback register dummy placeholder.
3911  Inst.addOperand(MCOperand::CreateReg(0));
3912  // Rt, Rt2
3913  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3914  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3915  // addr
3916  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3917  // pred
3918  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3919}
3920
3921/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3922/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3923/// when they refer multiple MIOperands inside a single one.
3924void ARMAsmParser::
3925cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
3926                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3927  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3928
3929  // Create a writeback register dummy placeholder.
3930  Inst.addOperand(MCOperand::CreateImm(0));
3931
3932  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3933  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3934}
3935
3936/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3937/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3938/// when they refer multiple MIOperands inside a single one.
3939void ARMAsmParser::
3940cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
3941                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3942  // Create a writeback register dummy placeholder.
3943  Inst.addOperand(MCOperand::CreateImm(0));
3944  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3945  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3946  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3947}
3948
3949/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3950/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3951/// when they refer multiple MIOperands inside a single one.
3952void ARMAsmParser::
3953cvtLdWriteBackRegAddrMode2(MCInst &Inst,
3954                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3955  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3956
3957  // Create a writeback register dummy placeholder.
3958  Inst.addOperand(MCOperand::CreateImm(0));
3959
3960  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3961  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3962}
3963
3964/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3965/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3966/// when they refer multiple MIOperands inside a single one.
3967void ARMAsmParser::
3968cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
3969                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3970  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3971
3972  // Create a writeback register dummy placeholder.
3973  Inst.addOperand(MCOperand::CreateImm(0));
3974
3975  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3976  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3977}
3978
3979
3980/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3981/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3982/// when they refer multiple MIOperands inside a single one.
3983void ARMAsmParser::
3984cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
3985                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3986  // Create a writeback register dummy placeholder.
3987  Inst.addOperand(MCOperand::CreateImm(0));
3988  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3989  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3990  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3991}
3992
3993/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3994/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3995/// when they refer multiple MIOperands inside a single one.
3996void ARMAsmParser::
3997cvtStWriteBackRegAddrMode2(MCInst &Inst,
3998                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3999  // Create a writeback register dummy placeholder.
4000  Inst.addOperand(MCOperand::CreateImm(0));
4001  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4002  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
4003  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4004}
4005
4006/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4007/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4008/// when they refer multiple MIOperands inside a single one.
4009void ARMAsmParser::
4010cvtStWriteBackRegAddrMode3(MCInst &Inst,
4011                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4012  // Create a writeback register dummy placeholder.
4013  Inst.addOperand(MCOperand::CreateImm(0));
4014  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4015  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4016  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4017}
4018
4019/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
4020/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4021/// when they refer multiple MIOperands inside a single one.
4022void ARMAsmParser::
4023cvtLdExtTWriteBackImm(MCInst &Inst,
4024                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4025  // Rt
4026  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4027  // Create a writeback register dummy placeholder.
4028  Inst.addOperand(MCOperand::CreateImm(0));
4029  // addr
4030  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4031  // offset
4032  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
4033  // pred
4034  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4035}
4036
4037/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
4038/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4039/// when they refer multiple MIOperands inside a single one.
4040void ARMAsmParser::
4041cvtLdExtTWriteBackReg(MCInst &Inst,
4042                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4043  // Rt
4044  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4045  // Create a writeback register dummy placeholder.
4046  Inst.addOperand(MCOperand::CreateImm(0));
4047  // addr
4048  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4049  // offset
4050  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
4051  // pred
4052  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4053}
4054
4055/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
4056/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4057/// when they refer multiple MIOperands inside a single one.
4058void ARMAsmParser::
4059cvtStExtTWriteBackImm(MCInst &Inst,
4060                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4061  // Create a writeback register dummy placeholder.
4062  Inst.addOperand(MCOperand::CreateImm(0));
4063  // Rt
4064  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4065  // addr
4066  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4067  // offset
4068  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
4069  // pred
4070  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4071}
4072
4073/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
4074/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4075/// when they refer multiple MIOperands inside a single one.
4076void ARMAsmParser::
4077cvtStExtTWriteBackReg(MCInst &Inst,
4078                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4079  // Create a writeback register dummy placeholder.
4080  Inst.addOperand(MCOperand::CreateImm(0));
4081  // Rt
4082  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4083  // addr
4084  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4085  // offset
4086  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
4087  // pred
4088  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4089}
4090
4091/// cvtLdrdPre - Convert parsed operands to MCInst.
4092/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4093/// when they refer multiple MIOperands inside a single one.
4094void ARMAsmParser::
4095cvtLdrdPre(MCInst &Inst,
4096           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4097  // Rt, Rt2
4098  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4099  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4100  // Create a writeback register dummy placeholder.
4101  Inst.addOperand(MCOperand::CreateImm(0));
4102  // addr
4103  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4104  // pred
4105  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4106}
4107
4108/// cvtStrdPre - Convert parsed operands to MCInst.
4109/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4110/// when they refer multiple MIOperands inside a single one.
4111void ARMAsmParser::
4112cvtStrdPre(MCInst &Inst,
4113           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4114  // Create a writeback register dummy placeholder.
4115  Inst.addOperand(MCOperand::CreateImm(0));
4116  // Rt, Rt2
4117  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4118  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4119  // addr
4120  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4121  // pred
4122  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4123}
4124
4125/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4126/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4127/// when they refer multiple MIOperands inside a single one.
4128void ARMAsmParser::
4129cvtLdWriteBackRegAddrMode3(MCInst &Inst,
4130                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4131  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4132  // Create a writeback register dummy placeholder.
4133  Inst.addOperand(MCOperand::CreateImm(0));
4134  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4135  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4136}
4137
4138/// cvtThumbMultiply - Convert parsed operands to MCInst.
4139/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4140/// when they refer multiple MIOperands inside a single one.
4141void ARMAsmParser::
4142cvtThumbMultiply(MCInst &Inst,
4143           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4144  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4145  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4146  // If we have a three-operand form, make sure to set Rn to be the operand
4147  // that isn't the same as Rd.
4148  unsigned RegOp = 4;
4149  if (Operands.size() == 6 &&
4150      ((ARMOperand*)Operands[4])->getReg() ==
4151        ((ARMOperand*)Operands[3])->getReg())
4152    RegOp = 5;
4153  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4154  Inst.addOperand(Inst.getOperand(0));
4155  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4156}
4157
4158void ARMAsmParser::
4159cvtVLDwbFixed(MCInst &Inst,
4160              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4161  // Vd
4162  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4163  // Create a writeback register dummy placeholder.
4164  Inst.addOperand(MCOperand::CreateImm(0));
4165  // Vn
4166  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4167  // pred
4168  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4169}
4170
4171void ARMAsmParser::
4172cvtVLDwbRegister(MCInst &Inst,
4173                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4174  // Vd
4175  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4176  // Create a writeback register dummy placeholder.
4177  Inst.addOperand(MCOperand::CreateImm(0));
4178  // Vn
4179  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4180  // Vm
4181  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4182  // pred
4183  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4184}
4185
4186void ARMAsmParser::
4187cvtVSTwbFixed(MCInst &Inst,
4188              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4189  // Create a writeback register dummy placeholder.
4190  Inst.addOperand(MCOperand::CreateImm(0));
4191  // Vn
4192  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4193  // Vt
4194  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4195  // pred
4196  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4197}
4198
4199void ARMAsmParser::
4200cvtVSTwbRegister(MCInst &Inst,
4201                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4202  // Create a writeback register dummy placeholder.
4203  Inst.addOperand(MCOperand::CreateImm(0));
4204  // Vn
4205  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4206  // Vm
4207  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4208  // Vt
4209  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4210  // pred
4211  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4212}
4213
4214/// Parse an ARM memory expression, return false if successful else return true
4215/// or an error.  The first token must be a '[' when called.
4216bool ARMAsmParser::
4217parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4218  SMLoc S, E;
4219  assert(Parser.getTok().is(AsmToken::LBrac) &&
4220         "Token is not a Left Bracket");
4221  S = Parser.getTok().getLoc();
4222  Parser.Lex(); // Eat left bracket token.
4223
4224  const AsmToken &BaseRegTok = Parser.getTok();
4225  int BaseRegNum = tryParseRegister();
4226  if (BaseRegNum == -1)
4227    return Error(BaseRegTok.getLoc(), "register expected");
4228
4229  // The next token must either be a comma or a closing bracket.
4230  const AsmToken &Tok = Parser.getTok();
4231  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4232    return Error(Tok.getLoc(), "malformed memory operand");
4233
4234  if (Tok.is(AsmToken::RBrac)) {
4235    E = Tok.getEndLoc();
4236    Parser.Lex(); // Eat right bracket token.
4237
4238    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4239                                             0, 0, false, S, E));
4240
4241    // If there's a pre-indexing writeback marker, '!', just add it as a token
4242    // operand. It's rather odd, but syntactically valid.
4243    if (Parser.getTok().is(AsmToken::Exclaim)) {
4244      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4245      Parser.Lex(); // Eat the '!'.
4246    }
4247
4248    return false;
4249  }
4250
4251  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4252  Parser.Lex(); // Eat the comma.
4253
4254  // If we have a ':', it's an alignment specifier.
4255  if (Parser.getTok().is(AsmToken::Colon)) {
4256    Parser.Lex(); // Eat the ':'.
4257    E = Parser.getTok().getLoc();
4258
4259    const MCExpr *Expr;
4260    if (getParser().ParseExpression(Expr))
4261     return true;
4262
4263    // The expression has to be a constant. Memory references with relocations
4264    // don't come through here, as they use the <label> forms of the relevant
4265    // instructions.
4266    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4267    if (!CE)
4268      return Error (E, "constant expression expected");
4269
4270    unsigned Align = 0;
4271    switch (CE->getValue()) {
4272    default:
4273      return Error(E,
4274                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4275    case 16:  Align = 2; break;
4276    case 32:  Align = 4; break;
4277    case 64:  Align = 8; break;
4278    case 128: Align = 16; break;
4279    case 256: Align = 32; break;
4280    }
4281
4282    // Now we should have the closing ']'
4283    if (Parser.getTok().isNot(AsmToken::RBrac))
4284      return Error(Parser.getTok().getLoc(), "']' expected");
4285    E = Parser.getTok().getEndLoc();
4286    Parser.Lex(); // Eat right bracket token.
4287
4288    // Don't worry about range checking the value here. That's handled by
4289    // the is*() predicates.
4290    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4291                                             ARM_AM::no_shift, 0, Align,
4292                                             false, S, E));
4293
4294    // If there's a pre-indexing writeback marker, '!', just add it as a token
4295    // operand.
4296    if (Parser.getTok().is(AsmToken::Exclaim)) {
4297      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4298      Parser.Lex(); // Eat the '!'.
4299    }
4300
4301    return false;
4302  }
4303
4304  // If we have a '#', it's an immediate offset, else assume it's a register
4305  // offset. Be friendly and also accept a plain integer (without a leading
4306  // hash) for gas compatibility.
4307  if (Parser.getTok().is(AsmToken::Hash) ||
4308      Parser.getTok().is(AsmToken::Dollar) ||
4309      Parser.getTok().is(AsmToken::Integer)) {
4310    if (Parser.getTok().isNot(AsmToken::Integer))
4311      Parser.Lex(); // Eat the '#'.
4312    E = Parser.getTok().getLoc();
4313
4314    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4315    const MCExpr *Offset;
4316    if (getParser().ParseExpression(Offset))
4317     return true;
4318
4319    // The expression has to be a constant. Memory references with relocations
4320    // don't come through here, as they use the <label> forms of the relevant
4321    // instructions.
4322    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4323    if (!CE)
4324      return Error (E, "constant expression expected");
4325
4326    // If the constant was #-0, represent it as INT32_MIN.
4327    int32_t Val = CE->getValue();
4328    if (isNegative && Val == 0)
4329      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4330
4331    // Now we should have the closing ']'
4332    if (Parser.getTok().isNot(AsmToken::RBrac))
4333      return Error(Parser.getTok().getLoc(), "']' expected");
4334    E = Parser.getTok().getEndLoc();
4335    Parser.Lex(); // Eat right bracket token.
4336
4337    // Don't worry about range checking the value here. That's handled by
4338    // the is*() predicates.
4339    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4340                                             ARM_AM::no_shift, 0, 0,
4341                                             false, S, E));
4342
4343    // If there's a pre-indexing writeback marker, '!', just add it as a token
4344    // operand.
4345    if (Parser.getTok().is(AsmToken::Exclaim)) {
4346      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4347      Parser.Lex(); // Eat the '!'.
4348    }
4349
4350    return false;
4351  }
4352
4353  // The register offset is optionally preceded by a '+' or '-'
4354  bool isNegative = false;
4355  if (Parser.getTok().is(AsmToken::Minus)) {
4356    isNegative = true;
4357    Parser.Lex(); // Eat the '-'.
4358  } else if (Parser.getTok().is(AsmToken::Plus)) {
4359    // Nothing to do.
4360    Parser.Lex(); // Eat the '+'.
4361  }
4362
4363  E = Parser.getTok().getLoc();
4364  int OffsetRegNum = tryParseRegister();
4365  if (OffsetRegNum == -1)
4366    return Error(E, "register expected");
4367
4368  // If there's a shift operator, handle it.
4369  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4370  unsigned ShiftImm = 0;
4371  if (Parser.getTok().is(AsmToken::Comma)) {
4372    Parser.Lex(); // Eat the ','.
4373    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4374      return true;
4375  }
4376
4377  // Now we should have the closing ']'
4378  if (Parser.getTok().isNot(AsmToken::RBrac))
4379    return Error(Parser.getTok().getLoc(), "']' expected");
4380  E = Parser.getTok().getEndLoc();
4381  Parser.Lex(); // Eat right bracket token.
4382
4383  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4384                                           ShiftType, ShiftImm, 0, isNegative,
4385                                           S, E));
4386
4387  // If there's a pre-indexing writeback marker, '!', just add it as a token
4388  // operand.
4389  if (Parser.getTok().is(AsmToken::Exclaim)) {
4390    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4391    Parser.Lex(); // Eat the '!'.
4392  }
4393
4394  return false;
4395}
4396
4397/// parseMemRegOffsetShift - one of these two:
4398///   ( lsl | lsr | asr | ror ) , # shift_amount
4399///   rrx
4400/// return true if it parses a shift otherwise it returns false.
4401bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4402                                          unsigned &Amount) {
4403  SMLoc Loc = Parser.getTok().getLoc();
4404  const AsmToken &Tok = Parser.getTok();
4405  if (Tok.isNot(AsmToken::Identifier))
4406    return true;
4407  StringRef ShiftName = Tok.getString();
4408  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4409      ShiftName == "asl" || ShiftName == "ASL")
4410    St = ARM_AM::lsl;
4411  else if (ShiftName == "lsr" || ShiftName == "LSR")
4412    St = ARM_AM::lsr;
4413  else if (ShiftName == "asr" || ShiftName == "ASR")
4414    St = ARM_AM::asr;
4415  else if (ShiftName == "ror" || ShiftName == "ROR")
4416    St = ARM_AM::ror;
4417  else if (ShiftName == "rrx" || ShiftName == "RRX")
4418    St = ARM_AM::rrx;
4419  else
4420    return Error(Loc, "illegal shift operator");
4421  Parser.Lex(); // Eat shift type token.
4422
4423  // rrx stands alone.
4424  Amount = 0;
4425  if (St != ARM_AM::rrx) {
4426    Loc = Parser.getTok().getLoc();
4427    // A '#' and a shift amount.
4428    const AsmToken &HashTok = Parser.getTok();
4429    if (HashTok.isNot(AsmToken::Hash) &&
4430        HashTok.isNot(AsmToken::Dollar))
4431      return Error(HashTok.getLoc(), "'#' expected");
4432    Parser.Lex(); // Eat hash token.
4433
4434    const MCExpr *Expr;
4435    if (getParser().ParseExpression(Expr))
4436      return true;
4437    // Range check the immediate.
4438    // lsl, ror: 0 <= imm <= 31
4439    // lsr, asr: 0 <= imm <= 32
4440    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4441    if (!CE)
4442      return Error(Loc, "shift amount must be an immediate");
4443    int64_t Imm = CE->getValue();
4444    if (Imm < 0 ||
4445        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4446        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4447      return Error(Loc, "immediate shift value out of range");
4448    // If <ShiftTy> #0, turn it into a no_shift.
4449    if (Imm == 0)
4450      St = ARM_AM::lsl;
4451    // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
4452    if (Imm == 32)
4453      Imm = 0;
4454    Amount = Imm;
4455  }
4456
4457  return false;
4458}
4459
4460/// parseFPImm - A floating point immediate expression operand.
4461ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4462parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4463  // Anything that can accept a floating point constant as an operand
4464  // needs to go through here, as the regular ParseExpression is
4465  // integer only.
4466  //
4467  // This routine still creates a generic Immediate operand, containing
4468  // a bitcast of the 64-bit floating point value. The various operands
4469  // that accept floats can check whether the value is valid for them
4470  // via the standard is*() predicates.
4471
4472  SMLoc S = Parser.getTok().getLoc();
4473
4474  if (Parser.getTok().isNot(AsmToken::Hash) &&
4475      Parser.getTok().isNot(AsmToken::Dollar))
4476    return MatchOperand_NoMatch;
4477
4478  // Disambiguate the VMOV forms that can accept an FP immediate.
4479  // vmov.f32 <sreg>, #imm
4480  // vmov.f64 <dreg>, #imm
4481  // vmov.f32 <dreg>, #imm  @ vector f32x2
4482  // vmov.f32 <qreg>, #imm  @ vector f32x4
4483  //
4484  // There are also the NEON VMOV instructions which expect an
4485  // integer constant. Make sure we don't try to parse an FPImm
4486  // for these:
4487  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4488  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4489  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4490                           TyOp->getToken() != ".f64"))
4491    return MatchOperand_NoMatch;
4492
4493  Parser.Lex(); // Eat the '#'.
4494
4495  // Handle negation, as that still comes through as a separate token.
4496  bool isNegative = false;
4497  if (Parser.getTok().is(AsmToken::Minus)) {
4498    isNegative = true;
4499    Parser.Lex();
4500  }
4501  const AsmToken &Tok = Parser.getTok();
4502  SMLoc Loc = Tok.getLoc();
4503  if (Tok.is(AsmToken::Real)) {
4504    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4505    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4506    // If we had a '-' in front, toggle the sign bit.
4507    IntVal ^= (uint64_t)isNegative << 31;
4508    Parser.Lex(); // Eat the token.
4509    Operands.push_back(ARMOperand::CreateImm(
4510          MCConstantExpr::Create(IntVal, getContext()),
4511          S, Parser.getTok().getLoc()));
4512    return MatchOperand_Success;
4513  }
4514  // Also handle plain integers. Instructions which allow floating point
4515  // immediates also allow a raw encoded 8-bit value.
4516  if (Tok.is(AsmToken::Integer)) {
4517    int64_t Val = Tok.getIntVal();
4518    Parser.Lex(); // Eat the token.
4519    if (Val > 255 || Val < 0) {
4520      Error(Loc, "encoded floating point value out of range");
4521      return MatchOperand_ParseFail;
4522    }
4523    double RealVal = ARM_AM::getFPImmFloat(Val);
4524    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4525    Operands.push_back(ARMOperand::CreateImm(
4526        MCConstantExpr::Create(Val, getContext()), S,
4527        Parser.getTok().getLoc()));
4528    return MatchOperand_Success;
4529  }
4530
4531  Error(Loc, "invalid floating point immediate");
4532  return MatchOperand_ParseFail;
4533}
4534
4535/// Parse a arm instruction operand.  For now this parses the operand regardless
4536/// of the mnemonic.
4537bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4538                                StringRef Mnemonic) {
4539  SMLoc S, E;
4540
4541  // Check if the current operand has a custom associated parser, if so, try to
4542  // custom parse the operand, or fallback to the general approach.
4543  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4544  if (ResTy == MatchOperand_Success)
4545    return false;
4546  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4547  // there was a match, but an error occurred, in which case, just return that
4548  // the operand parsing failed.
4549  if (ResTy == MatchOperand_ParseFail)
4550    return true;
4551
4552  switch (getLexer().getKind()) {
4553  default:
4554    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4555    return true;
4556  case AsmToken::Identifier: {
4557    if (!tryParseRegisterWithWriteBack(Operands))
4558      return false;
4559    int Res = tryParseShiftRegister(Operands);
4560    if (Res == 0) // success
4561      return false;
4562    else if (Res == -1) // irrecoverable error
4563      return true;
4564    // If this is VMRS, check for the apsr_nzcv operand.
4565    if (Mnemonic == "vmrs" &&
4566        Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4567      S = Parser.getTok().getLoc();
4568      Parser.Lex();
4569      Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4570      return false;
4571    }
4572
4573    // Fall though for the Identifier case that is not a register or a
4574    // special name.
4575  }
4576  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4577  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4578  case AsmToken::String:  // quoted label names.
4579  case AsmToken::Dot: {   // . as a branch target
4580    // This was not a register so parse other operands that start with an
4581    // identifier (like labels) as expressions and create them as immediates.
4582    const MCExpr *IdVal;
4583    S = Parser.getTok().getLoc();
4584    if (getParser().ParseExpression(IdVal))
4585      return true;
4586    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4587    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4588    return false;
4589  }
4590  case AsmToken::LBrac:
4591    return parseMemory(Operands);
4592  case AsmToken::LCurly:
4593    return parseRegisterList(Operands);
4594  case AsmToken::Dollar:
4595  case AsmToken::Hash: {
4596    // #42 -> immediate.
4597    S = Parser.getTok().getLoc();
4598    Parser.Lex();
4599
4600    if (Parser.getTok().isNot(AsmToken::Colon)) {
4601      bool isNegative = Parser.getTok().is(AsmToken::Minus);
4602      const MCExpr *ImmVal;
4603      if (getParser().ParseExpression(ImmVal))
4604        return true;
4605      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4606      if (CE) {
4607        int32_t Val = CE->getValue();
4608        if (isNegative && Val == 0)
4609          ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4610      }
4611      E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4612      Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4613      return false;
4614    }
4615    // w/ a ':' after the '#', it's just like a plain ':'.
4616    // FALLTHROUGH
4617  }
4618  case AsmToken::Colon: {
4619    // ":lower16:" and ":upper16:" expression prefixes
4620    // FIXME: Check it's an expression prefix,
4621    // e.g. (FOO - :lower16:BAR) isn't legal.
4622    ARMMCExpr::VariantKind RefKind;
4623    if (parsePrefix(RefKind))
4624      return true;
4625
4626    const MCExpr *SubExprVal;
4627    if (getParser().ParseExpression(SubExprVal))
4628      return true;
4629
4630    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4631                                              getContext());
4632    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4633    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4634    return false;
4635  }
4636  }
4637}
4638
4639// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4640//  :lower16: and :upper16:.
4641bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4642  RefKind = ARMMCExpr::VK_ARM_None;
4643
4644  // :lower16: and :upper16: modifiers
4645  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4646  Parser.Lex(); // Eat ':'
4647
4648  if (getLexer().isNot(AsmToken::Identifier)) {
4649    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4650    return true;
4651  }
4652
4653  StringRef IDVal = Parser.getTok().getIdentifier();
4654  if (IDVal == "lower16") {
4655    RefKind = ARMMCExpr::VK_ARM_LO16;
4656  } else if (IDVal == "upper16") {
4657    RefKind = ARMMCExpr::VK_ARM_HI16;
4658  } else {
4659    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4660    return true;
4661  }
4662  Parser.Lex();
4663
4664  if (getLexer().isNot(AsmToken::Colon)) {
4665    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4666    return true;
4667  }
4668  Parser.Lex(); // Eat the last ':'
4669  return false;
4670}
4671
4672/// \brief Given a mnemonic, split out possible predication code and carry
4673/// setting letters to form a canonical mnemonic and flags.
4674//
4675// FIXME: Would be nice to autogen this.
4676// FIXME: This is a bit of a maze of special cases.
4677StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4678                                      unsigned &PredicationCode,
4679                                      bool &CarrySetting,
4680                                      unsigned &ProcessorIMod,
4681                                      StringRef &ITMask) {
4682  PredicationCode = ARMCC::AL;
4683  CarrySetting = false;
4684  ProcessorIMod = 0;
4685
4686  // Ignore some mnemonics we know aren't predicated forms.
4687  //
4688  // FIXME: Would be nice to autogen this.
4689  if ((Mnemonic == "movs" && isThumb()) ||
4690      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4691      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4692      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4693      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4694      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4695      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4696      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4697      Mnemonic == "fmuls")
4698    return Mnemonic;
4699
4700  // First, split out any predication code. Ignore mnemonics we know aren't
4701  // predicated but do have a carry-set and so weren't caught above.
4702  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4703      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4704      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4705      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4706    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4707      .Case("eq", ARMCC::EQ)
4708      .Case("ne", ARMCC::NE)
4709      .Case("hs", ARMCC::HS)
4710      .Case("cs", ARMCC::HS)
4711      .Case("lo", ARMCC::LO)
4712      .Case("cc", ARMCC::LO)
4713      .Case("mi", ARMCC::MI)
4714      .Case("pl", ARMCC::PL)
4715      .Case("vs", ARMCC::VS)
4716      .Case("vc", ARMCC::VC)
4717      .Case("hi", ARMCC::HI)
4718      .Case("ls", ARMCC::LS)
4719      .Case("ge", ARMCC::GE)
4720      .Case("lt", ARMCC::LT)
4721      .Case("gt", ARMCC::GT)
4722      .Case("le", ARMCC::LE)
4723      .Case("al", ARMCC::AL)
4724      .Default(~0U);
4725    if (CC != ~0U) {
4726      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4727      PredicationCode = CC;
4728    }
4729  }
4730
4731  // Next, determine if we have a carry setting bit. We explicitly ignore all
4732  // the instructions we know end in 's'.
4733  if (Mnemonic.endswith("s") &&
4734      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4735        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4736        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4737        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4738        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4739        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4740        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4741        Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4742        Mnemonic == "vfms" || Mnemonic == "vfnms" ||
4743        (Mnemonic == "movs" && isThumb()))) {
4744    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4745    CarrySetting = true;
4746  }
4747
4748  // The "cps" instruction can have a interrupt mode operand which is glued into
4749  // the mnemonic. Check if this is the case, split it and parse the imod op
4750  if (Mnemonic.startswith("cps")) {
4751    // Split out any imod code.
4752    unsigned IMod =
4753      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4754      .Case("ie", ARM_PROC::IE)
4755      .Case("id", ARM_PROC::ID)
4756      .Default(~0U);
4757    if (IMod != ~0U) {
4758      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4759      ProcessorIMod = IMod;
4760    }
4761  }
4762
4763  // The "it" instruction has the condition mask on the end of the mnemonic.
4764  if (Mnemonic.startswith("it")) {
4765    ITMask = Mnemonic.slice(2, Mnemonic.size());
4766    Mnemonic = Mnemonic.slice(0, 2);
4767  }
4768
4769  return Mnemonic;
4770}
4771
4772/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4773/// inclusion of carry set or predication code operands.
4774//
4775// FIXME: It would be nice to autogen this.
4776void ARMAsmParser::
4777getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4778                      bool &CanAcceptPredicationCode) {
4779  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4780      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4781      Mnemonic == "add" || Mnemonic == "adc" ||
4782      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4783      Mnemonic == "orr" || Mnemonic == "mvn" ||
4784      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4785      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4786      Mnemonic == "vfm" || Mnemonic == "vfnm" ||
4787      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4788                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4789                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4790    CanAcceptCarrySet = true;
4791  } else
4792    CanAcceptCarrySet = false;
4793
4794  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4795      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4796      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4797      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4798      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4799      (Mnemonic == "clrex" && !isThumb()) ||
4800      (Mnemonic == "nop" && isThumbOne()) ||
4801      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4802        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4803        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4804      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4805       !isThumb()) ||
4806      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4807    CanAcceptPredicationCode = false;
4808  } else
4809    CanAcceptPredicationCode = true;
4810
4811  if (isThumb()) {
4812    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4813        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4814      CanAcceptPredicationCode = false;
4815  }
4816}
4817
4818bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4819                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4820  // FIXME: This is all horribly hacky. We really need a better way to deal
4821  // with optional operands like this in the matcher table.
4822
4823  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4824  // another does not. Specifically, the MOVW instruction does not. So we
4825  // special case it here and remove the defaulted (non-setting) cc_out
4826  // operand if that's the instruction we're trying to match.
4827  //
4828  // We do this as post-processing of the explicit operands rather than just
4829  // conditionally adding the cc_out in the first place because we need
4830  // to check the type of the parsed immediate operand.
4831  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4832      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4833      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4834      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4835    return true;
4836
4837  // Register-register 'add' for thumb does not have a cc_out operand
4838  // when there are only two register operands.
4839  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4840      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4841      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4842      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4843    return true;
4844  // Register-register 'add' for thumb does not have a cc_out operand
4845  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4846  // have to check the immediate range here since Thumb2 has a variant
4847  // that can handle a different range and has a cc_out operand.
4848  if (((isThumb() && Mnemonic == "add") ||
4849       (isThumbTwo() && Mnemonic == "sub")) &&
4850      Operands.size() == 6 &&
4851      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4852      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4853      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4854      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4855      ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
4856       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4857    return true;
4858  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4859  // imm0_4095 variant. That's the least-preferred variant when
4860  // selecting via the generic "add" mnemonic, so to know that we
4861  // should remove the cc_out operand, we have to explicitly check that
4862  // it's not one of the other variants. Ugh.
4863  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4864      Operands.size() == 6 &&
4865      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4866      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4867      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4868    // Nest conditions rather than one big 'if' statement for readability.
4869    //
4870    // If either register is a high reg, it's either one of the SP
4871    // variants (handled above) or a 32-bit encoding, so we just
4872    // check against T3. If the second register is the PC, this is an
4873    // alternate form of ADR, which uses encoding T4, so check for that too.
4874    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4875         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4876        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4877        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4878      return false;
4879    // If both registers are low, we're in an IT block, and the immediate is
4880    // in range, we should use encoding T1 instead, which has a cc_out.
4881    if (inITBlock() &&
4882        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4883        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4884        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4885      return false;
4886
4887    // Otherwise, we use encoding T4, which does not have a cc_out
4888    // operand.
4889    return true;
4890  }
4891
4892  // The thumb2 multiply instruction doesn't have a CCOut register, so
4893  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4894  // use the 16-bit encoding or not.
4895  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4896      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4897      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4898      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4899      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4900      // If the registers aren't low regs, the destination reg isn't the
4901      // same as one of the source regs, or the cc_out operand is zero
4902      // outside of an IT block, we have to use the 32-bit encoding, so
4903      // remove the cc_out operand.
4904      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4905       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4906       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4907       !inITBlock() ||
4908       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4909        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4910        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4911        static_cast<ARMOperand*>(Operands[4])->getReg())))
4912    return true;
4913
4914  // Also check the 'mul' syntax variant that doesn't specify an explicit
4915  // destination register.
4916  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4917      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4918      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4919      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4920      // If the registers aren't low regs  or the cc_out operand is zero
4921      // outside of an IT block, we have to use the 32-bit encoding, so
4922      // remove the cc_out operand.
4923      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4924       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4925       !inITBlock()))
4926    return true;
4927
4928
4929
4930  // Register-register 'add/sub' for thumb does not have a cc_out operand
4931  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4932  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4933  // right, this will result in better diagnostics (which operand is off)
4934  // anyway.
4935  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4936      (Operands.size() == 5 || Operands.size() == 6) &&
4937      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4938      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4939      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4940      (static_cast<ARMOperand*>(Operands[4])->isImm() ||
4941       (Operands.size() == 6 &&
4942        static_cast<ARMOperand*>(Operands[5])->isImm())))
4943    return true;
4944
4945  return false;
4946}
4947
4948static bool isDataTypeToken(StringRef Tok) {
4949  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4950    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4951    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4952    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4953    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4954    Tok == ".f" || Tok == ".d";
4955}
4956
4957// FIXME: This bit should probably be handled via an explicit match class
4958// in the .td files that matches the suffix instead of having it be
4959// a literal string token the way it is now.
4960static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4961  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4962}
4963
4964static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4965/// Parse an arm instruction mnemonic followed by its operands.
4966bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
4967                                    SMLoc NameLoc,
4968                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4969  // Apply mnemonic aliases before doing anything else, as the destination
4970  // mnemnonic may include suffices and we want to handle them normally.
4971  // The generic tblgen'erated code does this later, at the start of
4972  // MatchInstructionImpl(), but that's too late for aliases that include
4973  // any sort of suffix.
4974  unsigned AvailableFeatures = getAvailableFeatures();
4975  applyMnemonicAliases(Name, AvailableFeatures);
4976
4977  // First check for the ARM-specific .req directive.
4978  if (Parser.getTok().is(AsmToken::Identifier) &&
4979      Parser.getTok().getIdentifier() == ".req") {
4980    parseDirectiveReq(Name, NameLoc);
4981    // We always return 'error' for this, as we're done with this
4982    // statement and don't need to match the 'instruction."
4983    return true;
4984  }
4985
4986  // Create the leading tokens for the mnemonic, split by '.' characters.
4987  size_t Start = 0, Next = Name.find('.');
4988  StringRef Mnemonic = Name.slice(Start, Next);
4989
4990  // Split out the predication code and carry setting flag from the mnemonic.
4991  unsigned PredicationCode;
4992  unsigned ProcessorIMod;
4993  bool CarrySetting;
4994  StringRef ITMask;
4995  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4996                           ProcessorIMod, ITMask);
4997
4998  // In Thumb1, only the branch (B) instruction can be predicated.
4999  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
5000    Parser.EatToEndOfStatement();
5001    return Error(NameLoc, "conditional execution not supported in Thumb1");
5002  }
5003
5004  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
5005
5006  // Handle the IT instruction ITMask. Convert it to a bitmask. This
5007  // is the mask as it will be for the IT encoding if the conditional
5008  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
5009  // where the conditional bit0 is zero, the instruction post-processing
5010  // will adjust the mask accordingly.
5011  if (Mnemonic == "it") {
5012    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
5013    if (ITMask.size() > 3) {
5014      Parser.EatToEndOfStatement();
5015      return Error(Loc, "too many conditions on IT instruction");
5016    }
5017    unsigned Mask = 8;
5018    for (unsigned i = ITMask.size(); i != 0; --i) {
5019      char pos = ITMask[i - 1];
5020      if (pos != 't' && pos != 'e') {
5021        Parser.EatToEndOfStatement();
5022        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
5023      }
5024      Mask >>= 1;
5025      if (ITMask[i - 1] == 't')
5026        Mask |= 8;
5027    }
5028    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
5029  }
5030
5031  // FIXME: This is all a pretty gross hack. We should automatically handle
5032  // optional operands like this via tblgen.
5033
5034  // Next, add the CCOut and ConditionCode operands, if needed.
5035  //
5036  // For mnemonics which can ever incorporate a carry setting bit or predication
5037  // code, our matching model involves us always generating CCOut and
5038  // ConditionCode operands to match the mnemonic "as written" and then we let
5039  // the matcher deal with finding the right instruction or generating an
5040  // appropriate error.
5041  bool CanAcceptCarrySet, CanAcceptPredicationCode;
5042  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
5043
5044  // If we had a carry-set on an instruction that can't do that, issue an
5045  // error.
5046  if (!CanAcceptCarrySet && CarrySetting) {
5047    Parser.EatToEndOfStatement();
5048    return Error(NameLoc, "instruction '" + Mnemonic +
5049                 "' can not set flags, but 's' suffix specified");
5050  }
5051  // If we had a predication code on an instruction that can't do that, issue an
5052  // error.
5053  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
5054    Parser.EatToEndOfStatement();
5055    return Error(NameLoc, "instruction '" + Mnemonic +
5056                 "' is not predicable, but condition code specified");
5057  }
5058
5059  // Add the carry setting operand, if necessary.
5060  if (CanAcceptCarrySet) {
5061    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
5062    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
5063                                               Loc));
5064  }
5065
5066  // Add the predication code operand, if necessary.
5067  if (CanAcceptPredicationCode) {
5068    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
5069                                      CarrySetting);
5070    Operands.push_back(ARMOperand::CreateCondCode(
5071                         ARMCC::CondCodes(PredicationCode), Loc));
5072  }
5073
5074  // Add the processor imod operand, if necessary.
5075  if (ProcessorIMod) {
5076    Operands.push_back(ARMOperand::CreateImm(
5077          MCConstantExpr::Create(ProcessorIMod, getContext()),
5078                                 NameLoc, NameLoc));
5079  }
5080
5081  // Add the remaining tokens in the mnemonic.
5082  while (Next != StringRef::npos) {
5083    Start = Next;
5084    Next = Name.find('.', Start + 1);
5085    StringRef ExtraToken = Name.slice(Start, Next);
5086
5087    // Some NEON instructions have an optional datatype suffix that is
5088    // completely ignored. Check for that.
5089    if (isDataTypeToken(ExtraToken) &&
5090        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5091      continue;
5092
5093    if (ExtraToken != ".n") {
5094      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5095      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5096    }
5097  }
5098
5099  // Read the remaining operands.
5100  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5101    // Read the first operand.
5102    if (parseOperand(Operands, Mnemonic)) {
5103      Parser.EatToEndOfStatement();
5104      return true;
5105    }
5106
5107    while (getLexer().is(AsmToken::Comma)) {
5108      Parser.Lex();  // Eat the comma.
5109
5110      // Parse and remember the operand.
5111      if (parseOperand(Operands, Mnemonic)) {
5112        Parser.EatToEndOfStatement();
5113        return true;
5114      }
5115    }
5116  }
5117
5118  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5119    SMLoc Loc = getLexer().getLoc();
5120    Parser.EatToEndOfStatement();
5121    return Error(Loc, "unexpected token in argument list");
5122  }
5123
5124  Parser.Lex(); // Consume the EndOfStatement
5125
5126  // Some instructions, mostly Thumb, have forms for the same mnemonic that
5127  // do and don't have a cc_out optional-def operand. With some spot-checks
5128  // of the operand list, we can figure out which variant we're trying to
5129  // parse and adjust accordingly before actually matching. We shouldn't ever
5130  // try to remove a cc_out operand that was explicitly set on the the
5131  // mnemonic, of course (CarrySetting == true). Reason number #317 the
5132  // table driven matcher doesn't fit well with the ARM instruction set.
5133  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5134    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5135    Operands.erase(Operands.begin() + 1);
5136    delete Op;
5137  }
5138
5139  // ARM mode 'blx' need special handling, as the register operand version
5140  // is predicable, but the label operand version is not. So, we can't rely
5141  // on the Mnemonic based checking to correctly figure out when to put
5142  // a k_CondCode operand in the list. If we're trying to match the label
5143  // version, remove the k_CondCode operand here.
5144  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5145      static_cast<ARMOperand*>(Operands[2])->isImm()) {
5146    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5147    Operands.erase(Operands.begin() + 1);
5148    delete Op;
5149  }
5150
5151  // The vector-compare-to-zero instructions have a literal token "#0" at
5152  // the end that comes to here as an immediate operand. Convert it to a
5153  // token to play nicely with the matcher.
5154  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
5155      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
5156      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5157    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5158    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5159    if (CE && CE->getValue() == 0) {
5160      Operands.erase(Operands.begin() + 5);
5161      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5162      delete Op;
5163    }
5164  }
5165  // VCMP{E} does the same thing, but with a different operand count.
5166  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
5167      static_cast<ARMOperand*>(Operands[4])->isImm()) {
5168    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
5169    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5170    if (CE && CE->getValue() == 0) {
5171      Operands.erase(Operands.begin() + 4);
5172      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5173      delete Op;
5174    }
5175  }
5176  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
5177  // end. Convert it to a token here. Take care not to convert those
5178  // that should hit the Thumb2 encoding.
5179  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
5180      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5181      static_cast<ARMOperand*>(Operands[4])->isReg() &&
5182      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5183    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5184    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5185    if (CE && CE->getValue() == 0 &&
5186        (isThumbOne() ||
5187         // The cc_out operand matches the IT block.
5188         ((inITBlock() != CarrySetting) &&
5189         // Neither register operand is a high register.
5190         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
5191          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
5192      Operands.erase(Operands.begin() + 5);
5193      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5194      delete Op;
5195    }
5196  }
5197
5198  // Adjust operands of ldrexd/strexd to MCK_GPRPair.
5199  // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
5200  // a single GPRPair reg operand is used in the .td file to replace the two
5201  // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
5202  // expressed as a GPRPair, so we have to manually merge them.
5203  // FIXME: We would really like to be able to tablegen'erate this.
5204  if (!isThumb() && Operands.size() > 4 &&
5205      (Mnemonic == "ldrexd" || Mnemonic == "strexd")) {
5206    bool isLoad = (Mnemonic == "ldrexd");
5207    unsigned Idx = isLoad ? 2 : 3;
5208    ARMOperand* Op1 = static_cast<ARMOperand*>(Operands[Idx]);
5209    ARMOperand* Op2 = static_cast<ARMOperand*>(Operands[Idx+1]);
5210
5211    const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
5212    // Adjust only if Op1 and Op2 are GPRs.
5213    if (Op1->isReg() && Op2->isReg() && MRC.contains(Op1->getReg()) &&
5214        MRC.contains(Op2->getReg())) {
5215      unsigned Reg1 = Op1->getReg();
5216      unsigned Reg2 = Op2->getReg();
5217      unsigned Rt = MRI->getEncodingValue(Reg1);
5218      unsigned Rt2 = MRI->getEncodingValue(Reg2);
5219
5220      // Rt2 must be Rt + 1 and Rt must be even.
5221      if (Rt + 1 != Rt2 || (Rt & 1)) {
5222        Error(Op2->getStartLoc(), isLoad ?
5223            "destination operands must be sequential" :
5224            "source operands must be sequential");
5225        return true;
5226      }
5227      unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
5228          &(MRI->getRegClass(ARM::GPRPairRegClassID)));
5229      Operands.erase(Operands.begin() + Idx, Operands.begin() + Idx + 2);
5230      Operands.insert(Operands.begin() + Idx, ARMOperand::CreateReg(
5231            NewReg, Op1->getStartLoc(), Op2->getEndLoc()));
5232      delete Op1;
5233      delete Op2;
5234    }
5235  }
5236
5237  return false;
5238}
5239
5240// Validate context-sensitive operand constraints.
5241
5242// return 'true' if register list contains non-low GPR registers,
5243// 'false' otherwise. If Reg is in the register list or is HiReg, set
5244// 'containsReg' to true.
5245static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5246                                 unsigned HiReg, bool &containsReg) {
5247  containsReg = false;
5248  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5249    unsigned OpReg = Inst.getOperand(i).getReg();
5250    if (OpReg == Reg)
5251      containsReg = true;
5252    // Anything other than a low register isn't legal here.
5253    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5254      return true;
5255  }
5256  return false;
5257}
5258
5259// Check if the specified regisgter is in the register list of the inst,
5260// starting at the indicated operand number.
5261static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5262  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5263    unsigned OpReg = Inst.getOperand(i).getReg();
5264    if (OpReg == Reg)
5265      return true;
5266  }
5267  return false;
5268}
5269
5270// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5271// the ARMInsts array) instead. Getting that here requires awkward
5272// API changes, though. Better way?
5273namespace llvm {
5274extern const MCInstrDesc ARMInsts[];
5275}
5276static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5277  return ARMInsts[Opcode];
5278}
5279
5280// FIXME: We would really like to be able to tablegen'erate this.
5281bool ARMAsmParser::
5282validateInstruction(MCInst &Inst,
5283                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5284  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5285  SMLoc Loc = Operands[0]->getStartLoc();
5286  // Check the IT block state first.
5287  // NOTE: BKPT instruction has the interesting property of being
5288  // allowed in IT blocks, but not being predicable.  It just always
5289  // executes.
5290  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5291      Inst.getOpcode() != ARM::BKPT) {
5292    unsigned bit = 1;
5293    if (ITState.FirstCond)
5294      ITState.FirstCond = false;
5295    else
5296      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5297    // The instruction must be predicable.
5298    if (!MCID.isPredicable())
5299      return Error(Loc, "instructions in IT block must be predicable");
5300    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5301    unsigned ITCond = bit ? ITState.Cond :
5302      ARMCC::getOppositeCondition(ITState.Cond);
5303    if (Cond != ITCond) {
5304      // Find the condition code Operand to get its SMLoc information.
5305      SMLoc CondLoc;
5306      for (unsigned i = 1; i < Operands.size(); ++i)
5307        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5308          CondLoc = Operands[i]->getStartLoc();
5309      return Error(CondLoc, "incorrect condition in IT block; got '" +
5310                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5311                   "', but expected '" +
5312                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5313    }
5314  // Check for non-'al' condition codes outside of the IT block.
5315  } else if (isThumbTwo() && MCID.isPredicable() &&
5316             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5317             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5318             Inst.getOpcode() != ARM::t2B)
5319    return Error(Loc, "predicated instructions must be in IT block");
5320
5321  switch (Inst.getOpcode()) {
5322  case ARM::LDRD:
5323  case ARM::LDRD_PRE:
5324  case ARM::LDRD_POST: {
5325    // Rt2 must be Rt + 1.
5326    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5327    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5328    if (Rt2 != Rt + 1)
5329      return Error(Operands[3]->getStartLoc(),
5330                   "destination operands must be sequential");
5331    return false;
5332  }
5333  case ARM::STRD: {
5334    // Rt2 must be Rt + 1.
5335    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5336    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5337    if (Rt2 != Rt + 1)
5338      return Error(Operands[3]->getStartLoc(),
5339                   "source operands must be sequential");
5340    return false;
5341  }
5342  case ARM::STRD_PRE:
5343  case ARM::STRD_POST: {
5344    // Rt2 must be Rt + 1.
5345    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5346    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
5347    if (Rt2 != Rt + 1)
5348      return Error(Operands[3]->getStartLoc(),
5349                   "source operands must be sequential");
5350    return false;
5351  }
5352  case ARM::SBFX:
5353  case ARM::UBFX: {
5354    // width must be in range [1, 32-lsb]
5355    unsigned lsb = Inst.getOperand(2).getImm();
5356    unsigned widthm1 = Inst.getOperand(3).getImm();
5357    if (widthm1 >= 32 - lsb)
5358      return Error(Operands[5]->getStartLoc(),
5359                   "bitfield width must be in range [1,32-lsb]");
5360    return false;
5361  }
5362  case ARM::tLDMIA: {
5363    // If we're parsing Thumb2, the .w variant is available and handles
5364    // most cases that are normally illegal for a Thumb1 LDM
5365    // instruction. We'll make the transformation in processInstruction()
5366    // if necessary.
5367    //
5368    // Thumb LDM instructions are writeback iff the base register is not
5369    // in the register list.
5370    unsigned Rn = Inst.getOperand(0).getReg();
5371    bool hasWritebackToken =
5372      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5373       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5374    bool listContainsBase;
5375    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5376      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5377                   "registers must be in range r0-r7");
5378    // If we should have writeback, then there should be a '!' token.
5379    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5380      return Error(Operands[2]->getStartLoc(),
5381                   "writeback operator '!' expected");
5382    // If we should not have writeback, there must not be a '!'. This is
5383    // true even for the 32-bit wide encodings.
5384    if (listContainsBase && hasWritebackToken)
5385      return Error(Operands[3]->getStartLoc(),
5386                   "writeback operator '!' not allowed when base register "
5387                   "in register list");
5388
5389    break;
5390  }
5391  case ARM::t2LDMIA_UPD: {
5392    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5393      return Error(Operands[4]->getStartLoc(),
5394                   "writeback operator '!' not allowed when base register "
5395                   "in register list");
5396    break;
5397  }
5398  case ARM::tMUL: {
5399    // The second source operand must be the same register as the destination
5400    // operand.
5401    //
5402    // In this case, we must directly check the parsed operands because the
5403    // cvtThumbMultiply() function is written in such a way that it guarantees
5404    // this first statement is always true for the new Inst.  Essentially, the
5405    // destination is unconditionally copied into the second source operand
5406    // without checking to see if it matches what we actually parsed.
5407    if (Operands.size() == 6 &&
5408        (((ARMOperand*)Operands[3])->getReg() !=
5409         ((ARMOperand*)Operands[5])->getReg()) &&
5410        (((ARMOperand*)Operands[3])->getReg() !=
5411         ((ARMOperand*)Operands[4])->getReg())) {
5412      return Error(Operands[3]->getStartLoc(),
5413                   "destination register must match source register");
5414    }
5415    break;
5416  }
5417  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5418  // so only issue a diagnostic for thumb1. The instructions will be
5419  // switched to the t2 encodings in processInstruction() if necessary.
5420  case ARM::tPOP: {
5421    bool listContainsBase;
5422    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5423        !isThumbTwo())
5424      return Error(Operands[2]->getStartLoc(),
5425                   "registers must be in range r0-r7 or pc");
5426    break;
5427  }
5428  case ARM::tPUSH: {
5429    bool listContainsBase;
5430    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5431        !isThumbTwo())
5432      return Error(Operands[2]->getStartLoc(),
5433                   "registers must be in range r0-r7 or lr");
5434    break;
5435  }
5436  case ARM::tSTMIA_UPD: {
5437    bool listContainsBase;
5438    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5439      return Error(Operands[4]->getStartLoc(),
5440                   "registers must be in range r0-r7");
5441    break;
5442  }
5443  case ARM::tADDrSP: {
5444    // If the non-SP source operand and the destination operand are not the
5445    // same, we need thumb2 (for the wide encoding), or we have an error.
5446    if (!isThumbTwo() &&
5447        Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
5448      return Error(Operands[4]->getStartLoc(),
5449                   "source register must be the same as destination");
5450    }
5451    break;
5452  }
5453  }
5454
5455  return false;
5456}
5457
5458static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5459  switch(Opc) {
5460  default: llvm_unreachable("unexpected opcode!");
5461  // VST1LN
5462  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5463  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5464  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5465  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5466  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5467  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5468  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5469  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5470  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5471
5472  // VST2LN
5473  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5474  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5475  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5476  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5477  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5478
5479  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5480  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5481  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5482  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5483  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5484
5485  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5486  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5487  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5488  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5489  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5490
5491  // VST3LN
5492  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5493  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5494  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5495  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5496  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5497  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5498  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5499  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5500  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5501  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5502  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5503  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5504  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5505  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5506  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5507
5508  // VST3
5509  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5510  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5511  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5512  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5513  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5514  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5515  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5516  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5517  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5518  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5519  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5520  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5521  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5522  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5523  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5524  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5525  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5526  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5527
5528  // VST4LN
5529  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5530  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5531  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5532  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5533  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5534  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5535  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5536  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5537  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5538  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5539  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5540  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5541  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5542  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5543  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5544
5545  // VST4
5546  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5547  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5548  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5549  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5550  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5551  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5552  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5553  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5554  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5555  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5556  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5557  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5558  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5559  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5560  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5561  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5562  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5563  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5564  }
5565}
5566
5567static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5568  switch(Opc) {
5569  default: llvm_unreachable("unexpected opcode!");
5570  // VLD1LN
5571  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5572  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5573  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5574  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5575  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5576  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5577  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5578  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5579  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5580
5581  // VLD2LN
5582  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5583  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5584  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5585  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5586  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5587  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5588  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5589  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5590  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5591  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5592  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5593  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5594  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5595  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5596  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5597
5598  // VLD3DUP
5599  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5600  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5601  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5602  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5603  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5604  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5605  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5606  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5607  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5608  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5609  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5610  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5611  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5612  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5613  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5614  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5615  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5616  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5617
5618  // VLD3LN
5619  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5620  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5621  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5622  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5623  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5624  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5625  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5626  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5627  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5628  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5629  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5630  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5631  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5632  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5633  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5634
5635  // VLD3
5636  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5637  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5638  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5639  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5640  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5641  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5642  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5643  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5644  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5645  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5646  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5647  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5648  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5649  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5650  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5651  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5652  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5653  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5654
5655  // VLD4LN
5656  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5657  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5658  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5659  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5660  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5661  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5662  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5663  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5664  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5665  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5666  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5667  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5668  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5669  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5670  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5671
5672  // VLD4DUP
5673  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5674  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5675  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5676  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5677  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5678  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5679  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5680  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5681  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5682  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5683  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5684  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5685  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5686  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5687  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5688  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5689  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5690  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5691
5692  // VLD4
5693  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5694  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5695  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5696  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5697  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5698  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5699  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5700  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5701  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5702  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5703  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5704  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5705  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5706  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5707  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5708  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5709  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5710  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5711  }
5712}
5713
5714bool ARMAsmParser::
5715processInstruction(MCInst &Inst,
5716                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5717  switch (Inst.getOpcode()) {
5718  // Alias for alternate form of 'ADR Rd, #imm' instruction.
5719  case ARM::ADDri: {
5720    if (Inst.getOperand(1).getReg() != ARM::PC ||
5721        Inst.getOperand(5).getReg() != 0)
5722      return false;
5723    MCInst TmpInst;
5724    TmpInst.setOpcode(ARM::ADR);
5725    TmpInst.addOperand(Inst.getOperand(0));
5726    TmpInst.addOperand(Inst.getOperand(2));
5727    TmpInst.addOperand(Inst.getOperand(3));
5728    TmpInst.addOperand(Inst.getOperand(4));
5729    Inst = TmpInst;
5730    return true;
5731  }
5732  // Aliases for alternate PC+imm syntax of LDR instructions.
5733  case ARM::t2LDRpcrel:
5734    // Select the narrow version if the immediate will fit.
5735    if (Inst.getOperand(1).getImm() > 0 &&
5736        Inst.getOperand(1).getImm() <= 0xff)
5737      Inst.setOpcode(ARM::tLDRpci);
5738    else
5739      Inst.setOpcode(ARM::t2LDRpci);
5740    return true;
5741  case ARM::t2LDRBpcrel:
5742    Inst.setOpcode(ARM::t2LDRBpci);
5743    return true;
5744  case ARM::t2LDRHpcrel:
5745    Inst.setOpcode(ARM::t2LDRHpci);
5746    return true;
5747  case ARM::t2LDRSBpcrel:
5748    Inst.setOpcode(ARM::t2LDRSBpci);
5749    return true;
5750  case ARM::t2LDRSHpcrel:
5751    Inst.setOpcode(ARM::t2LDRSHpci);
5752    return true;
5753  // Handle NEON VST complex aliases.
5754  case ARM::VST1LNdWB_register_Asm_8:
5755  case ARM::VST1LNdWB_register_Asm_16:
5756  case ARM::VST1LNdWB_register_Asm_32: {
5757    MCInst TmpInst;
5758    // Shuffle the operands around so the lane index operand is in the
5759    // right place.
5760    unsigned Spacing;
5761    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5762    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5763    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5764    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5765    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5766    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5767    TmpInst.addOperand(Inst.getOperand(1)); // lane
5768    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5769    TmpInst.addOperand(Inst.getOperand(6));
5770    Inst = TmpInst;
5771    return true;
5772  }
5773
5774  case ARM::VST2LNdWB_register_Asm_8:
5775  case ARM::VST2LNdWB_register_Asm_16:
5776  case ARM::VST2LNdWB_register_Asm_32:
5777  case ARM::VST2LNqWB_register_Asm_16:
5778  case ARM::VST2LNqWB_register_Asm_32: {
5779    MCInst TmpInst;
5780    // Shuffle the operands around so the lane index operand is in the
5781    // right place.
5782    unsigned Spacing;
5783    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5784    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5785    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5786    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5787    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5788    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5789    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5790                                            Spacing));
5791    TmpInst.addOperand(Inst.getOperand(1)); // lane
5792    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5793    TmpInst.addOperand(Inst.getOperand(6));
5794    Inst = TmpInst;
5795    return true;
5796  }
5797
5798  case ARM::VST3LNdWB_register_Asm_8:
5799  case ARM::VST3LNdWB_register_Asm_16:
5800  case ARM::VST3LNdWB_register_Asm_32:
5801  case ARM::VST3LNqWB_register_Asm_16:
5802  case ARM::VST3LNqWB_register_Asm_32: {
5803    MCInst TmpInst;
5804    // Shuffle the operands around so the lane index operand is in the
5805    // right place.
5806    unsigned Spacing;
5807    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5808    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5809    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5810    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5811    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5812    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5813    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5814                                            Spacing));
5815    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5816                                            Spacing * 2));
5817    TmpInst.addOperand(Inst.getOperand(1)); // lane
5818    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5819    TmpInst.addOperand(Inst.getOperand(6));
5820    Inst = TmpInst;
5821    return true;
5822  }
5823
5824  case ARM::VST4LNdWB_register_Asm_8:
5825  case ARM::VST4LNdWB_register_Asm_16:
5826  case ARM::VST4LNdWB_register_Asm_32:
5827  case ARM::VST4LNqWB_register_Asm_16:
5828  case ARM::VST4LNqWB_register_Asm_32: {
5829    MCInst TmpInst;
5830    // Shuffle the operands around so the lane index operand is in the
5831    // right place.
5832    unsigned Spacing;
5833    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5834    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5835    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5836    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5837    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5838    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5839    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5840                                            Spacing));
5841    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5842                                            Spacing * 2));
5843    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5844                                            Spacing * 3));
5845    TmpInst.addOperand(Inst.getOperand(1)); // lane
5846    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5847    TmpInst.addOperand(Inst.getOperand(6));
5848    Inst = TmpInst;
5849    return true;
5850  }
5851
5852  case ARM::VST1LNdWB_fixed_Asm_8:
5853  case ARM::VST1LNdWB_fixed_Asm_16:
5854  case ARM::VST1LNdWB_fixed_Asm_32: {
5855    MCInst TmpInst;
5856    // Shuffle the operands around so the lane index operand is in the
5857    // right place.
5858    unsigned Spacing;
5859    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5860    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5861    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5862    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5863    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5864    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5865    TmpInst.addOperand(Inst.getOperand(1)); // lane
5866    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5867    TmpInst.addOperand(Inst.getOperand(5));
5868    Inst = TmpInst;
5869    return true;
5870  }
5871
5872  case ARM::VST2LNdWB_fixed_Asm_8:
5873  case ARM::VST2LNdWB_fixed_Asm_16:
5874  case ARM::VST2LNdWB_fixed_Asm_32:
5875  case ARM::VST2LNqWB_fixed_Asm_16:
5876  case ARM::VST2LNqWB_fixed_Asm_32: {
5877    MCInst TmpInst;
5878    // Shuffle the operands around so the lane index operand is in the
5879    // right place.
5880    unsigned Spacing;
5881    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5882    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5883    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5884    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5885    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5886    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5887    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5888                                            Spacing));
5889    TmpInst.addOperand(Inst.getOperand(1)); // lane
5890    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5891    TmpInst.addOperand(Inst.getOperand(5));
5892    Inst = TmpInst;
5893    return true;
5894  }
5895
5896  case ARM::VST3LNdWB_fixed_Asm_8:
5897  case ARM::VST3LNdWB_fixed_Asm_16:
5898  case ARM::VST3LNdWB_fixed_Asm_32:
5899  case ARM::VST3LNqWB_fixed_Asm_16:
5900  case ARM::VST3LNqWB_fixed_Asm_32: {
5901    MCInst TmpInst;
5902    // Shuffle the operands around so the lane index operand is in the
5903    // right place.
5904    unsigned Spacing;
5905    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5906    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5907    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5908    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5909    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5910    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5911    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5912                                            Spacing));
5913    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5914                                            Spacing * 2));
5915    TmpInst.addOperand(Inst.getOperand(1)); // lane
5916    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5917    TmpInst.addOperand(Inst.getOperand(5));
5918    Inst = TmpInst;
5919    return true;
5920  }
5921
5922  case ARM::VST4LNdWB_fixed_Asm_8:
5923  case ARM::VST4LNdWB_fixed_Asm_16:
5924  case ARM::VST4LNdWB_fixed_Asm_32:
5925  case ARM::VST4LNqWB_fixed_Asm_16:
5926  case ARM::VST4LNqWB_fixed_Asm_32: {
5927    MCInst TmpInst;
5928    // Shuffle the operands around so the lane index operand is in the
5929    // right place.
5930    unsigned Spacing;
5931    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5932    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5933    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5934    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5935    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5936    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5937    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5938                                            Spacing));
5939    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5940                                            Spacing * 2));
5941    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5942                                            Spacing * 3));
5943    TmpInst.addOperand(Inst.getOperand(1)); // lane
5944    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5945    TmpInst.addOperand(Inst.getOperand(5));
5946    Inst = TmpInst;
5947    return true;
5948  }
5949
5950  case ARM::VST1LNdAsm_8:
5951  case ARM::VST1LNdAsm_16:
5952  case ARM::VST1LNdAsm_32: {
5953    MCInst TmpInst;
5954    // Shuffle the operands around so the lane index operand is in the
5955    // right place.
5956    unsigned Spacing;
5957    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5958    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5959    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5960    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5961    TmpInst.addOperand(Inst.getOperand(1)); // lane
5962    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5963    TmpInst.addOperand(Inst.getOperand(5));
5964    Inst = TmpInst;
5965    return true;
5966  }
5967
5968  case ARM::VST2LNdAsm_8:
5969  case ARM::VST2LNdAsm_16:
5970  case ARM::VST2LNdAsm_32:
5971  case ARM::VST2LNqAsm_16:
5972  case ARM::VST2LNqAsm_32: {
5973    MCInst TmpInst;
5974    // Shuffle the operands around so the lane index operand is in the
5975    // right place.
5976    unsigned Spacing;
5977    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5978    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5979    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5980    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5981    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5982                                            Spacing));
5983    TmpInst.addOperand(Inst.getOperand(1)); // lane
5984    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5985    TmpInst.addOperand(Inst.getOperand(5));
5986    Inst = TmpInst;
5987    return true;
5988  }
5989
5990  case ARM::VST3LNdAsm_8:
5991  case ARM::VST3LNdAsm_16:
5992  case ARM::VST3LNdAsm_32:
5993  case ARM::VST3LNqAsm_16:
5994  case ARM::VST3LNqAsm_32: {
5995    MCInst TmpInst;
5996    // Shuffle the operands around so the lane index operand is in the
5997    // right place.
5998    unsigned Spacing;
5999    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6000    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6001    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6002    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6003    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6004                                            Spacing));
6005    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6006                                            Spacing * 2));
6007    TmpInst.addOperand(Inst.getOperand(1)); // lane
6008    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6009    TmpInst.addOperand(Inst.getOperand(5));
6010    Inst = TmpInst;
6011    return true;
6012  }
6013
6014  case ARM::VST4LNdAsm_8:
6015  case ARM::VST4LNdAsm_16:
6016  case ARM::VST4LNdAsm_32:
6017  case ARM::VST4LNqAsm_16:
6018  case ARM::VST4LNqAsm_32: {
6019    MCInst TmpInst;
6020    // Shuffle the operands around so the lane index operand is in the
6021    // right place.
6022    unsigned Spacing;
6023    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6024    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6025    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6026    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6027    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6028                                            Spacing));
6029    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6030                                            Spacing * 2));
6031    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6032                                            Spacing * 3));
6033    TmpInst.addOperand(Inst.getOperand(1)); // lane
6034    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6035    TmpInst.addOperand(Inst.getOperand(5));
6036    Inst = TmpInst;
6037    return true;
6038  }
6039
6040  // Handle NEON VLD complex aliases.
6041  case ARM::VLD1LNdWB_register_Asm_8:
6042  case ARM::VLD1LNdWB_register_Asm_16:
6043  case ARM::VLD1LNdWB_register_Asm_32: {
6044    MCInst TmpInst;
6045    // Shuffle the operands around so the lane index operand is in the
6046    // right place.
6047    unsigned Spacing;
6048    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6049    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6050    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6051    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6052    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6053    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6054    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6055    TmpInst.addOperand(Inst.getOperand(1)); // lane
6056    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6057    TmpInst.addOperand(Inst.getOperand(6));
6058    Inst = TmpInst;
6059    return true;
6060  }
6061
6062  case ARM::VLD2LNdWB_register_Asm_8:
6063  case ARM::VLD2LNdWB_register_Asm_16:
6064  case ARM::VLD2LNdWB_register_Asm_32:
6065  case ARM::VLD2LNqWB_register_Asm_16:
6066  case ARM::VLD2LNqWB_register_Asm_32: {
6067    MCInst TmpInst;
6068    // Shuffle the operands around so the lane index operand is in the
6069    // right place.
6070    unsigned Spacing;
6071    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6072    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6073    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6074                                            Spacing));
6075    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6076    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6077    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6078    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6079    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6080    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6081                                            Spacing));
6082    TmpInst.addOperand(Inst.getOperand(1)); // lane
6083    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6084    TmpInst.addOperand(Inst.getOperand(6));
6085    Inst = TmpInst;
6086    return true;
6087  }
6088
6089  case ARM::VLD3LNdWB_register_Asm_8:
6090  case ARM::VLD3LNdWB_register_Asm_16:
6091  case ARM::VLD3LNdWB_register_Asm_32:
6092  case ARM::VLD3LNqWB_register_Asm_16:
6093  case ARM::VLD3LNqWB_register_Asm_32: {
6094    MCInst TmpInst;
6095    // Shuffle the operands around so the lane index operand is in the
6096    // right place.
6097    unsigned Spacing;
6098    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6099    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6100    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6101                                            Spacing));
6102    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6103                                            Spacing * 2));
6104    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6105    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6106    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6107    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6108    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6109    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6110                                            Spacing));
6111    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6112                                            Spacing * 2));
6113    TmpInst.addOperand(Inst.getOperand(1)); // lane
6114    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6115    TmpInst.addOperand(Inst.getOperand(6));
6116    Inst = TmpInst;
6117    return true;
6118  }
6119
6120  case ARM::VLD4LNdWB_register_Asm_8:
6121  case ARM::VLD4LNdWB_register_Asm_16:
6122  case ARM::VLD4LNdWB_register_Asm_32:
6123  case ARM::VLD4LNqWB_register_Asm_16:
6124  case ARM::VLD4LNqWB_register_Asm_32: {
6125    MCInst TmpInst;
6126    // Shuffle the operands around so the lane index operand is in the
6127    // right place.
6128    unsigned Spacing;
6129    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6130    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6131    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6132                                            Spacing));
6133    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6134                                            Spacing * 2));
6135    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6136                                            Spacing * 3));
6137    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6138    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6139    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6140    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6141    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6142    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6143                                            Spacing));
6144    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6145                                            Spacing * 2));
6146    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6147                                            Spacing * 3));
6148    TmpInst.addOperand(Inst.getOperand(1)); // lane
6149    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6150    TmpInst.addOperand(Inst.getOperand(6));
6151    Inst = TmpInst;
6152    return true;
6153  }
6154
6155  case ARM::VLD1LNdWB_fixed_Asm_8:
6156  case ARM::VLD1LNdWB_fixed_Asm_16:
6157  case ARM::VLD1LNdWB_fixed_Asm_32: {
6158    MCInst TmpInst;
6159    // Shuffle the operands around so the lane index operand is in the
6160    // right place.
6161    unsigned Spacing;
6162    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6163    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6164    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6165    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6166    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6167    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6168    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6169    TmpInst.addOperand(Inst.getOperand(1)); // lane
6170    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6171    TmpInst.addOperand(Inst.getOperand(5));
6172    Inst = TmpInst;
6173    return true;
6174  }
6175
6176  case ARM::VLD2LNdWB_fixed_Asm_8:
6177  case ARM::VLD2LNdWB_fixed_Asm_16:
6178  case ARM::VLD2LNdWB_fixed_Asm_32:
6179  case ARM::VLD2LNqWB_fixed_Asm_16:
6180  case ARM::VLD2LNqWB_fixed_Asm_32: {
6181    MCInst TmpInst;
6182    // Shuffle the operands around so the lane index operand is in the
6183    // right place.
6184    unsigned Spacing;
6185    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6186    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6187    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6188                                            Spacing));
6189    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6190    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6191    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6192    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6193    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6194    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6195                                            Spacing));
6196    TmpInst.addOperand(Inst.getOperand(1)); // lane
6197    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6198    TmpInst.addOperand(Inst.getOperand(5));
6199    Inst = TmpInst;
6200    return true;
6201  }
6202
6203  case ARM::VLD3LNdWB_fixed_Asm_8:
6204  case ARM::VLD3LNdWB_fixed_Asm_16:
6205  case ARM::VLD3LNdWB_fixed_Asm_32:
6206  case ARM::VLD3LNqWB_fixed_Asm_16:
6207  case ARM::VLD3LNqWB_fixed_Asm_32: {
6208    MCInst TmpInst;
6209    // Shuffle the operands around so the lane index operand is in the
6210    // right place.
6211    unsigned Spacing;
6212    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6213    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6214    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6215                                            Spacing));
6216    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6217                                            Spacing * 2));
6218    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6219    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6220    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6221    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6222    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6223    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6224                                            Spacing));
6225    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6226                                            Spacing * 2));
6227    TmpInst.addOperand(Inst.getOperand(1)); // lane
6228    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6229    TmpInst.addOperand(Inst.getOperand(5));
6230    Inst = TmpInst;
6231    return true;
6232  }
6233
6234  case ARM::VLD4LNdWB_fixed_Asm_8:
6235  case ARM::VLD4LNdWB_fixed_Asm_16:
6236  case ARM::VLD4LNdWB_fixed_Asm_32:
6237  case ARM::VLD4LNqWB_fixed_Asm_16:
6238  case ARM::VLD4LNqWB_fixed_Asm_32: {
6239    MCInst TmpInst;
6240    // Shuffle the operands around so the lane index operand is in the
6241    // right place.
6242    unsigned Spacing;
6243    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6244    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6245    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6246                                            Spacing));
6247    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6248                                            Spacing * 2));
6249    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6250                                            Spacing * 3));
6251    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6252    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6253    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6254    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6255    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6256    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6257                                            Spacing));
6258    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6259                                            Spacing * 2));
6260    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6261                                            Spacing * 3));
6262    TmpInst.addOperand(Inst.getOperand(1)); // lane
6263    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6264    TmpInst.addOperand(Inst.getOperand(5));
6265    Inst = TmpInst;
6266    return true;
6267  }
6268
6269  case ARM::VLD1LNdAsm_8:
6270  case ARM::VLD1LNdAsm_16:
6271  case ARM::VLD1LNdAsm_32: {
6272    MCInst TmpInst;
6273    // Shuffle the operands around so the lane index operand is in the
6274    // right place.
6275    unsigned Spacing;
6276    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6277    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6278    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6279    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6280    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6281    TmpInst.addOperand(Inst.getOperand(1)); // lane
6282    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6283    TmpInst.addOperand(Inst.getOperand(5));
6284    Inst = TmpInst;
6285    return true;
6286  }
6287
6288  case ARM::VLD2LNdAsm_8:
6289  case ARM::VLD2LNdAsm_16:
6290  case ARM::VLD2LNdAsm_32:
6291  case ARM::VLD2LNqAsm_16:
6292  case ARM::VLD2LNqAsm_32: {
6293    MCInst TmpInst;
6294    // Shuffle the operands around so the lane index operand is in the
6295    // right place.
6296    unsigned Spacing;
6297    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6298    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6299    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6300                                            Spacing));
6301    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6302    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6303    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6304    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6305                                            Spacing));
6306    TmpInst.addOperand(Inst.getOperand(1)); // lane
6307    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6308    TmpInst.addOperand(Inst.getOperand(5));
6309    Inst = TmpInst;
6310    return true;
6311  }
6312
6313  case ARM::VLD3LNdAsm_8:
6314  case ARM::VLD3LNdAsm_16:
6315  case ARM::VLD3LNdAsm_32:
6316  case ARM::VLD3LNqAsm_16:
6317  case ARM::VLD3LNqAsm_32: {
6318    MCInst TmpInst;
6319    // Shuffle the operands around so the lane index operand is in the
6320    // right place.
6321    unsigned Spacing;
6322    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6323    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6324    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6325                                            Spacing));
6326    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6327                                            Spacing * 2));
6328    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6329    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6330    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6331    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6332                                            Spacing));
6333    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6334                                            Spacing * 2));
6335    TmpInst.addOperand(Inst.getOperand(1)); // lane
6336    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6337    TmpInst.addOperand(Inst.getOperand(5));
6338    Inst = TmpInst;
6339    return true;
6340  }
6341
6342  case ARM::VLD4LNdAsm_8:
6343  case ARM::VLD4LNdAsm_16:
6344  case ARM::VLD4LNdAsm_32:
6345  case ARM::VLD4LNqAsm_16:
6346  case ARM::VLD4LNqAsm_32: {
6347    MCInst TmpInst;
6348    // Shuffle the operands around so the lane index operand is in the
6349    // right place.
6350    unsigned Spacing;
6351    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6352    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6353    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6354                                            Spacing));
6355    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6356                                            Spacing * 2));
6357    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6358                                            Spacing * 3));
6359    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6360    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6361    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6362    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6363                                            Spacing));
6364    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6365                                            Spacing * 2));
6366    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6367                                            Spacing * 3));
6368    TmpInst.addOperand(Inst.getOperand(1)); // lane
6369    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6370    TmpInst.addOperand(Inst.getOperand(5));
6371    Inst = TmpInst;
6372    return true;
6373  }
6374
6375  // VLD3DUP single 3-element structure to all lanes instructions.
6376  case ARM::VLD3DUPdAsm_8:
6377  case ARM::VLD3DUPdAsm_16:
6378  case ARM::VLD3DUPdAsm_32:
6379  case ARM::VLD3DUPqAsm_8:
6380  case ARM::VLD3DUPqAsm_16:
6381  case ARM::VLD3DUPqAsm_32: {
6382    MCInst TmpInst;
6383    unsigned Spacing;
6384    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6385    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6386    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6387                                            Spacing));
6388    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6389                                            Spacing * 2));
6390    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6391    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6392    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6393    TmpInst.addOperand(Inst.getOperand(4));
6394    Inst = TmpInst;
6395    return true;
6396  }
6397
6398  case ARM::VLD3DUPdWB_fixed_Asm_8:
6399  case ARM::VLD3DUPdWB_fixed_Asm_16:
6400  case ARM::VLD3DUPdWB_fixed_Asm_32:
6401  case ARM::VLD3DUPqWB_fixed_Asm_8:
6402  case ARM::VLD3DUPqWB_fixed_Asm_16:
6403  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6404    MCInst TmpInst;
6405    unsigned Spacing;
6406    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6407    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6408    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6409                                            Spacing));
6410    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6411                                            Spacing * 2));
6412    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6413    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6414    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6415    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6416    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6417    TmpInst.addOperand(Inst.getOperand(4));
6418    Inst = TmpInst;
6419    return true;
6420  }
6421
6422  case ARM::VLD3DUPdWB_register_Asm_8:
6423  case ARM::VLD3DUPdWB_register_Asm_16:
6424  case ARM::VLD3DUPdWB_register_Asm_32:
6425  case ARM::VLD3DUPqWB_register_Asm_8:
6426  case ARM::VLD3DUPqWB_register_Asm_16:
6427  case ARM::VLD3DUPqWB_register_Asm_32: {
6428    MCInst TmpInst;
6429    unsigned Spacing;
6430    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6431    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6432    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6433                                            Spacing));
6434    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6435                                            Spacing * 2));
6436    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6437    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6438    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6439    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6440    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6441    TmpInst.addOperand(Inst.getOperand(5));
6442    Inst = TmpInst;
6443    return true;
6444  }
6445
6446  // VLD3 multiple 3-element structure instructions.
6447  case ARM::VLD3dAsm_8:
6448  case ARM::VLD3dAsm_16:
6449  case ARM::VLD3dAsm_32:
6450  case ARM::VLD3qAsm_8:
6451  case ARM::VLD3qAsm_16:
6452  case ARM::VLD3qAsm_32: {
6453    MCInst TmpInst;
6454    unsigned Spacing;
6455    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6456    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6457    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6458                                            Spacing));
6459    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6460                                            Spacing * 2));
6461    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6462    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6463    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6464    TmpInst.addOperand(Inst.getOperand(4));
6465    Inst = TmpInst;
6466    return true;
6467  }
6468
6469  case ARM::VLD3dWB_fixed_Asm_8:
6470  case ARM::VLD3dWB_fixed_Asm_16:
6471  case ARM::VLD3dWB_fixed_Asm_32:
6472  case ARM::VLD3qWB_fixed_Asm_8:
6473  case ARM::VLD3qWB_fixed_Asm_16:
6474  case ARM::VLD3qWB_fixed_Asm_32: {
6475    MCInst TmpInst;
6476    unsigned Spacing;
6477    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6478    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6479    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6480                                            Spacing));
6481    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6482                                            Spacing * 2));
6483    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6484    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6485    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6486    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6487    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6488    TmpInst.addOperand(Inst.getOperand(4));
6489    Inst = TmpInst;
6490    return true;
6491  }
6492
6493  case ARM::VLD3dWB_register_Asm_8:
6494  case ARM::VLD3dWB_register_Asm_16:
6495  case ARM::VLD3dWB_register_Asm_32:
6496  case ARM::VLD3qWB_register_Asm_8:
6497  case ARM::VLD3qWB_register_Asm_16:
6498  case ARM::VLD3qWB_register_Asm_32: {
6499    MCInst TmpInst;
6500    unsigned Spacing;
6501    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6502    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6503    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6504                                            Spacing));
6505    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6506                                            Spacing * 2));
6507    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6508    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6509    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6510    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6511    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6512    TmpInst.addOperand(Inst.getOperand(5));
6513    Inst = TmpInst;
6514    return true;
6515  }
6516
6517  // VLD4DUP single 3-element structure to all lanes instructions.
6518  case ARM::VLD4DUPdAsm_8:
6519  case ARM::VLD4DUPdAsm_16:
6520  case ARM::VLD4DUPdAsm_32:
6521  case ARM::VLD4DUPqAsm_8:
6522  case ARM::VLD4DUPqAsm_16:
6523  case ARM::VLD4DUPqAsm_32: {
6524    MCInst TmpInst;
6525    unsigned Spacing;
6526    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6527    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6528    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6529                                            Spacing));
6530    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6531                                            Spacing * 2));
6532    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6533                                            Spacing * 3));
6534    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6535    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6536    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6537    TmpInst.addOperand(Inst.getOperand(4));
6538    Inst = TmpInst;
6539    return true;
6540  }
6541
6542  case ARM::VLD4DUPdWB_fixed_Asm_8:
6543  case ARM::VLD4DUPdWB_fixed_Asm_16:
6544  case ARM::VLD4DUPdWB_fixed_Asm_32:
6545  case ARM::VLD4DUPqWB_fixed_Asm_8:
6546  case ARM::VLD4DUPqWB_fixed_Asm_16:
6547  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6548    MCInst TmpInst;
6549    unsigned Spacing;
6550    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6551    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6552    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6553                                            Spacing));
6554    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6555                                            Spacing * 2));
6556    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6557                                            Spacing * 3));
6558    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6559    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6560    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6561    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6562    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6563    TmpInst.addOperand(Inst.getOperand(4));
6564    Inst = TmpInst;
6565    return true;
6566  }
6567
6568  case ARM::VLD4DUPdWB_register_Asm_8:
6569  case ARM::VLD4DUPdWB_register_Asm_16:
6570  case ARM::VLD4DUPdWB_register_Asm_32:
6571  case ARM::VLD4DUPqWB_register_Asm_8:
6572  case ARM::VLD4DUPqWB_register_Asm_16:
6573  case ARM::VLD4DUPqWB_register_Asm_32: {
6574    MCInst TmpInst;
6575    unsigned Spacing;
6576    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6577    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6578    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6579                                            Spacing));
6580    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6581                                            Spacing * 2));
6582    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6583                                            Spacing * 3));
6584    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6585    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6586    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6587    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6588    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6589    TmpInst.addOperand(Inst.getOperand(5));
6590    Inst = TmpInst;
6591    return true;
6592  }
6593
6594  // VLD4 multiple 4-element structure instructions.
6595  case ARM::VLD4dAsm_8:
6596  case ARM::VLD4dAsm_16:
6597  case ARM::VLD4dAsm_32:
6598  case ARM::VLD4qAsm_8:
6599  case ARM::VLD4qAsm_16:
6600  case ARM::VLD4qAsm_32: {
6601    MCInst TmpInst;
6602    unsigned Spacing;
6603    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6604    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6605    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6606                                            Spacing));
6607    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6608                                            Spacing * 2));
6609    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6610                                            Spacing * 3));
6611    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6612    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6613    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6614    TmpInst.addOperand(Inst.getOperand(4));
6615    Inst = TmpInst;
6616    return true;
6617  }
6618
6619  case ARM::VLD4dWB_fixed_Asm_8:
6620  case ARM::VLD4dWB_fixed_Asm_16:
6621  case ARM::VLD4dWB_fixed_Asm_32:
6622  case ARM::VLD4qWB_fixed_Asm_8:
6623  case ARM::VLD4qWB_fixed_Asm_16:
6624  case ARM::VLD4qWB_fixed_Asm_32: {
6625    MCInst TmpInst;
6626    unsigned Spacing;
6627    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6628    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6629    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6630                                            Spacing));
6631    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6632                                            Spacing * 2));
6633    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6634                                            Spacing * 3));
6635    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6636    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6637    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6638    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6639    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6640    TmpInst.addOperand(Inst.getOperand(4));
6641    Inst = TmpInst;
6642    return true;
6643  }
6644
6645  case ARM::VLD4dWB_register_Asm_8:
6646  case ARM::VLD4dWB_register_Asm_16:
6647  case ARM::VLD4dWB_register_Asm_32:
6648  case ARM::VLD4qWB_register_Asm_8:
6649  case ARM::VLD4qWB_register_Asm_16:
6650  case ARM::VLD4qWB_register_Asm_32: {
6651    MCInst TmpInst;
6652    unsigned Spacing;
6653    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6654    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6655    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6656                                            Spacing));
6657    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6658                                            Spacing * 2));
6659    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6660                                            Spacing * 3));
6661    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6662    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6663    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6664    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6665    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6666    TmpInst.addOperand(Inst.getOperand(5));
6667    Inst = TmpInst;
6668    return true;
6669  }
6670
6671  // VST3 multiple 3-element structure instructions.
6672  case ARM::VST3dAsm_8:
6673  case ARM::VST3dAsm_16:
6674  case ARM::VST3dAsm_32:
6675  case ARM::VST3qAsm_8:
6676  case ARM::VST3qAsm_16:
6677  case ARM::VST3qAsm_32: {
6678    MCInst TmpInst;
6679    unsigned Spacing;
6680    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6681    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6682    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6683    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6684    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6685                                            Spacing));
6686    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6687                                            Spacing * 2));
6688    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6689    TmpInst.addOperand(Inst.getOperand(4));
6690    Inst = TmpInst;
6691    return true;
6692  }
6693
6694  case ARM::VST3dWB_fixed_Asm_8:
6695  case ARM::VST3dWB_fixed_Asm_16:
6696  case ARM::VST3dWB_fixed_Asm_32:
6697  case ARM::VST3qWB_fixed_Asm_8:
6698  case ARM::VST3qWB_fixed_Asm_16:
6699  case ARM::VST3qWB_fixed_Asm_32: {
6700    MCInst TmpInst;
6701    unsigned Spacing;
6702    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6703    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6704    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6705    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6706    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6707    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6708    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6709                                            Spacing));
6710    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6711                                            Spacing * 2));
6712    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6713    TmpInst.addOperand(Inst.getOperand(4));
6714    Inst = TmpInst;
6715    return true;
6716  }
6717
6718  case ARM::VST3dWB_register_Asm_8:
6719  case ARM::VST3dWB_register_Asm_16:
6720  case ARM::VST3dWB_register_Asm_32:
6721  case ARM::VST3qWB_register_Asm_8:
6722  case ARM::VST3qWB_register_Asm_16:
6723  case ARM::VST3qWB_register_Asm_32: {
6724    MCInst TmpInst;
6725    unsigned Spacing;
6726    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6727    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6728    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6729    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6730    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6731    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6732    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6733                                            Spacing));
6734    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6735                                            Spacing * 2));
6736    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6737    TmpInst.addOperand(Inst.getOperand(5));
6738    Inst = TmpInst;
6739    return true;
6740  }
6741
6742  // VST4 multiple 3-element structure instructions.
6743  case ARM::VST4dAsm_8:
6744  case ARM::VST4dAsm_16:
6745  case ARM::VST4dAsm_32:
6746  case ARM::VST4qAsm_8:
6747  case ARM::VST4qAsm_16:
6748  case ARM::VST4qAsm_32: {
6749    MCInst TmpInst;
6750    unsigned Spacing;
6751    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6752    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6753    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6754    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6755    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6756                                            Spacing));
6757    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6758                                            Spacing * 2));
6759    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6760                                            Spacing * 3));
6761    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6762    TmpInst.addOperand(Inst.getOperand(4));
6763    Inst = TmpInst;
6764    return true;
6765  }
6766
6767  case ARM::VST4dWB_fixed_Asm_8:
6768  case ARM::VST4dWB_fixed_Asm_16:
6769  case ARM::VST4dWB_fixed_Asm_32:
6770  case ARM::VST4qWB_fixed_Asm_8:
6771  case ARM::VST4qWB_fixed_Asm_16:
6772  case ARM::VST4qWB_fixed_Asm_32: {
6773    MCInst TmpInst;
6774    unsigned Spacing;
6775    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6776    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6777    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6778    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6779    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6780    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6781    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6782                                            Spacing));
6783    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6784                                            Spacing * 2));
6785    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6786                                            Spacing * 3));
6787    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6788    TmpInst.addOperand(Inst.getOperand(4));
6789    Inst = TmpInst;
6790    return true;
6791  }
6792
6793  case ARM::VST4dWB_register_Asm_8:
6794  case ARM::VST4dWB_register_Asm_16:
6795  case ARM::VST4dWB_register_Asm_32:
6796  case ARM::VST4qWB_register_Asm_8:
6797  case ARM::VST4qWB_register_Asm_16:
6798  case ARM::VST4qWB_register_Asm_32: {
6799    MCInst TmpInst;
6800    unsigned Spacing;
6801    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6802    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6803    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6804    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6805    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6806    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6807    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6808                                            Spacing));
6809    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6810                                            Spacing * 2));
6811    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6812                                            Spacing * 3));
6813    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6814    TmpInst.addOperand(Inst.getOperand(5));
6815    Inst = TmpInst;
6816    return true;
6817  }
6818
6819  // Handle encoding choice for the shift-immediate instructions.
6820  case ARM::t2LSLri:
6821  case ARM::t2LSRri:
6822  case ARM::t2ASRri: {
6823    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6824        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6825        Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
6826        !(static_cast<ARMOperand*>(Operands[3])->isToken() &&
6827         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) {
6828      unsigned NewOpc;
6829      switch (Inst.getOpcode()) {
6830      default: llvm_unreachable("unexpected opcode");
6831      case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
6832      case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
6833      case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
6834      }
6835      // The Thumb1 operands aren't in the same order. Awesome, eh?
6836      MCInst TmpInst;
6837      TmpInst.setOpcode(NewOpc);
6838      TmpInst.addOperand(Inst.getOperand(0));
6839      TmpInst.addOperand(Inst.getOperand(5));
6840      TmpInst.addOperand(Inst.getOperand(1));
6841      TmpInst.addOperand(Inst.getOperand(2));
6842      TmpInst.addOperand(Inst.getOperand(3));
6843      TmpInst.addOperand(Inst.getOperand(4));
6844      Inst = TmpInst;
6845      return true;
6846    }
6847    return false;
6848  }
6849
6850  // Handle the Thumb2 mode MOV complex aliases.
6851  case ARM::t2MOVsr:
6852  case ARM::t2MOVSsr: {
6853    // Which instruction to expand to depends on the CCOut operand and
6854    // whether we're in an IT block if the register operands are low
6855    // registers.
6856    bool isNarrow = false;
6857    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6858        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6859        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6860        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6861        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6862      isNarrow = true;
6863    MCInst TmpInst;
6864    unsigned newOpc;
6865    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6866    default: llvm_unreachable("unexpected opcode!");
6867    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6868    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6869    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6870    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6871    }
6872    TmpInst.setOpcode(newOpc);
6873    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6874    if (isNarrow)
6875      TmpInst.addOperand(MCOperand::CreateReg(
6876          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6877    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6878    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6879    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6880    TmpInst.addOperand(Inst.getOperand(5));
6881    if (!isNarrow)
6882      TmpInst.addOperand(MCOperand::CreateReg(
6883          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6884    Inst = TmpInst;
6885    return true;
6886  }
6887  case ARM::t2MOVsi:
6888  case ARM::t2MOVSsi: {
6889    // Which instruction to expand to depends on the CCOut operand and
6890    // whether we're in an IT block if the register operands are low
6891    // registers.
6892    bool isNarrow = false;
6893    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6894        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6895        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6896      isNarrow = true;
6897    MCInst TmpInst;
6898    unsigned newOpc;
6899    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6900    default: llvm_unreachable("unexpected opcode!");
6901    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6902    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6903    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6904    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6905    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6906    }
6907    unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6908    if (Amount == 32) Amount = 0;
6909    TmpInst.setOpcode(newOpc);
6910    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6911    if (isNarrow)
6912      TmpInst.addOperand(MCOperand::CreateReg(
6913          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6914    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6915    if (newOpc != ARM::t2RRX)
6916      TmpInst.addOperand(MCOperand::CreateImm(Amount));
6917    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6918    TmpInst.addOperand(Inst.getOperand(4));
6919    if (!isNarrow)
6920      TmpInst.addOperand(MCOperand::CreateReg(
6921          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6922    Inst = TmpInst;
6923    return true;
6924  }
6925  // Handle the ARM mode MOV complex aliases.
6926  case ARM::ASRr:
6927  case ARM::LSRr:
6928  case ARM::LSLr:
6929  case ARM::RORr: {
6930    ARM_AM::ShiftOpc ShiftTy;
6931    switch(Inst.getOpcode()) {
6932    default: llvm_unreachable("unexpected opcode!");
6933    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6934    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6935    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6936    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6937    }
6938    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6939    MCInst TmpInst;
6940    TmpInst.setOpcode(ARM::MOVsr);
6941    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6942    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6943    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6944    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6945    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6946    TmpInst.addOperand(Inst.getOperand(4));
6947    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6948    Inst = TmpInst;
6949    return true;
6950  }
6951  case ARM::ASRi:
6952  case ARM::LSRi:
6953  case ARM::LSLi:
6954  case ARM::RORi: {
6955    ARM_AM::ShiftOpc ShiftTy;
6956    switch(Inst.getOpcode()) {
6957    default: llvm_unreachable("unexpected opcode!");
6958    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6959    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6960    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6961    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6962    }
6963    // A shift by zero is a plain MOVr, not a MOVsi.
6964    unsigned Amt = Inst.getOperand(2).getImm();
6965    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6966    // A shift by 32 should be encoded as 0 when permitted
6967    if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
6968      Amt = 0;
6969    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6970    MCInst TmpInst;
6971    TmpInst.setOpcode(Opc);
6972    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6973    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6974    if (Opc == ARM::MOVsi)
6975      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6976    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6977    TmpInst.addOperand(Inst.getOperand(4));
6978    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6979    Inst = TmpInst;
6980    return true;
6981  }
6982  case ARM::RRXi: {
6983    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6984    MCInst TmpInst;
6985    TmpInst.setOpcode(ARM::MOVsi);
6986    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6987    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6988    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6989    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6990    TmpInst.addOperand(Inst.getOperand(3));
6991    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6992    Inst = TmpInst;
6993    return true;
6994  }
6995  case ARM::t2LDMIA_UPD: {
6996    // If this is a load of a single register, then we should use
6997    // a post-indexed LDR instruction instead, per the ARM ARM.
6998    if (Inst.getNumOperands() != 5)
6999      return false;
7000    MCInst TmpInst;
7001    TmpInst.setOpcode(ARM::t2LDR_POST);
7002    TmpInst.addOperand(Inst.getOperand(4)); // Rt
7003    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7004    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7005    TmpInst.addOperand(MCOperand::CreateImm(4));
7006    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7007    TmpInst.addOperand(Inst.getOperand(3));
7008    Inst = TmpInst;
7009    return true;
7010  }
7011  case ARM::t2STMDB_UPD: {
7012    // If this is a store of a single register, then we should use
7013    // a pre-indexed STR instruction instead, per the ARM ARM.
7014    if (Inst.getNumOperands() != 5)
7015      return false;
7016    MCInst TmpInst;
7017    TmpInst.setOpcode(ARM::t2STR_PRE);
7018    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7019    TmpInst.addOperand(Inst.getOperand(4)); // Rt
7020    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7021    TmpInst.addOperand(MCOperand::CreateImm(-4));
7022    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7023    TmpInst.addOperand(Inst.getOperand(3));
7024    Inst = TmpInst;
7025    return true;
7026  }
7027  case ARM::LDMIA_UPD:
7028    // If this is a load of a single register via a 'pop', then we should use
7029    // a post-indexed LDR instruction instead, per the ARM ARM.
7030    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
7031        Inst.getNumOperands() == 5) {
7032      MCInst TmpInst;
7033      TmpInst.setOpcode(ARM::LDR_POST_IMM);
7034      TmpInst.addOperand(Inst.getOperand(4)); // Rt
7035      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7036      TmpInst.addOperand(Inst.getOperand(1)); // Rn
7037      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
7038      TmpInst.addOperand(MCOperand::CreateImm(4));
7039      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7040      TmpInst.addOperand(Inst.getOperand(3));
7041      Inst = TmpInst;
7042      return true;
7043    }
7044    break;
7045  case ARM::STMDB_UPD:
7046    // If this is a store of a single register via a 'push', then we should use
7047    // a pre-indexed STR instruction instead, per the ARM ARM.
7048    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
7049        Inst.getNumOperands() == 5) {
7050      MCInst TmpInst;
7051      TmpInst.setOpcode(ARM::STR_PRE_IMM);
7052      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7053      TmpInst.addOperand(Inst.getOperand(4)); // Rt
7054      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
7055      TmpInst.addOperand(MCOperand::CreateImm(-4));
7056      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7057      TmpInst.addOperand(Inst.getOperand(3));
7058      Inst = TmpInst;
7059    }
7060    break;
7061  case ARM::t2ADDri12:
7062    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
7063    // mnemonic was used (not "addw"), encoding T3 is preferred.
7064    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
7065        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7066      break;
7067    Inst.setOpcode(ARM::t2ADDri);
7068    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7069    break;
7070  case ARM::t2SUBri12:
7071    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
7072    // mnemonic was used (not "subw"), encoding T3 is preferred.
7073    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
7074        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7075      break;
7076    Inst.setOpcode(ARM::t2SUBri);
7077    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7078    break;
7079  case ARM::tADDi8:
7080    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7081    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7082    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7083    // to encoding T1 if <Rd> is omitted."
7084    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7085      Inst.setOpcode(ARM::tADDi3);
7086      return true;
7087    }
7088    break;
7089  case ARM::tSUBi8:
7090    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7091    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7092    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7093    // to encoding T1 if <Rd> is omitted."
7094    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7095      Inst.setOpcode(ARM::tSUBi3);
7096      return true;
7097    }
7098    break;
7099  case ARM::t2ADDri:
7100  case ARM::t2SUBri: {
7101    // If the destination and first source operand are the same, and
7102    // the flags are compatible with the current IT status, use encoding T2
7103    // instead of T3. For compatibility with the system 'as'. Make sure the
7104    // wide encoding wasn't explicit.
7105    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7106        !isARMLowRegister(Inst.getOperand(0).getReg()) ||
7107        (unsigned)Inst.getOperand(2).getImm() > 255 ||
7108        ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
7109        (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
7110        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7111         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
7112      break;
7113    MCInst TmpInst;
7114    TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
7115                      ARM::tADDi8 : ARM::tSUBi8);
7116    TmpInst.addOperand(Inst.getOperand(0));
7117    TmpInst.addOperand(Inst.getOperand(5));
7118    TmpInst.addOperand(Inst.getOperand(0));
7119    TmpInst.addOperand(Inst.getOperand(2));
7120    TmpInst.addOperand(Inst.getOperand(3));
7121    TmpInst.addOperand(Inst.getOperand(4));
7122    Inst = TmpInst;
7123    return true;
7124  }
7125  case ARM::t2ADDrr: {
7126    // If the destination and first source operand are the same, and
7127    // there's no setting of the flags, use encoding T2 instead of T3.
7128    // Note that this is only for ADD, not SUB. This mirrors the system
7129    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
7130    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7131        Inst.getOperand(5).getReg() != 0 ||
7132        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7133         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
7134      break;
7135    MCInst TmpInst;
7136    TmpInst.setOpcode(ARM::tADDhirr);
7137    TmpInst.addOperand(Inst.getOperand(0));
7138    TmpInst.addOperand(Inst.getOperand(0));
7139    TmpInst.addOperand(Inst.getOperand(2));
7140    TmpInst.addOperand(Inst.getOperand(3));
7141    TmpInst.addOperand(Inst.getOperand(4));
7142    Inst = TmpInst;
7143    return true;
7144  }
7145  case ARM::tADDrSP: {
7146    // If the non-SP source operand and the destination operand are not the
7147    // same, we need to use the 32-bit encoding if it's available.
7148    if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
7149      Inst.setOpcode(ARM::t2ADDrr);
7150      Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7151      return true;
7152    }
7153    break;
7154  }
7155  case ARM::tB:
7156    // A Thumb conditional branch outside of an IT block is a tBcc.
7157    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
7158      Inst.setOpcode(ARM::tBcc);
7159      return true;
7160    }
7161    break;
7162  case ARM::t2B:
7163    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
7164    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
7165      Inst.setOpcode(ARM::t2Bcc);
7166      return true;
7167    }
7168    break;
7169  case ARM::t2Bcc:
7170    // If the conditional is AL or we're in an IT block, we really want t2B.
7171    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
7172      Inst.setOpcode(ARM::t2B);
7173      return true;
7174    }
7175    break;
7176  case ARM::tBcc:
7177    // If the conditional is AL, we really want tB.
7178    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
7179      Inst.setOpcode(ARM::tB);
7180      return true;
7181    }
7182    break;
7183  case ARM::tLDMIA: {
7184    // If the register list contains any high registers, or if the writeback
7185    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
7186    // instead if we're in Thumb2. Otherwise, this should have generated
7187    // an error in validateInstruction().
7188    unsigned Rn = Inst.getOperand(0).getReg();
7189    bool hasWritebackToken =
7190      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7191       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
7192    bool listContainsBase;
7193    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
7194        (!listContainsBase && !hasWritebackToken) ||
7195        (listContainsBase && hasWritebackToken)) {
7196      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7197      assert (isThumbTwo());
7198      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
7199      // If we're switching to the updating version, we need to insert
7200      // the writeback tied operand.
7201      if (hasWritebackToken)
7202        Inst.insert(Inst.begin(),
7203                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
7204      return true;
7205    }
7206    break;
7207  }
7208  case ARM::tSTMIA_UPD: {
7209    // If the register list contains any high registers, we need to use
7210    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7211    // should have generated an error in validateInstruction().
7212    unsigned Rn = Inst.getOperand(0).getReg();
7213    bool listContainsBase;
7214    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
7215      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7216      assert (isThumbTwo());
7217      Inst.setOpcode(ARM::t2STMIA_UPD);
7218      return true;
7219    }
7220    break;
7221  }
7222  case ARM::tPOP: {
7223    bool listContainsBase;
7224    // If the register list contains any high registers, we need to use
7225    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7226    // should have generated an error in validateInstruction().
7227    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
7228      return false;
7229    assert (isThumbTwo());
7230    Inst.setOpcode(ARM::t2LDMIA_UPD);
7231    // Add the base register and writeback operands.
7232    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7233    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7234    return true;
7235  }
7236  case ARM::tPUSH: {
7237    bool listContainsBase;
7238    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
7239      return false;
7240    assert (isThumbTwo());
7241    Inst.setOpcode(ARM::t2STMDB_UPD);
7242    // Add the base register and writeback operands.
7243    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7244    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7245    return true;
7246  }
7247  case ARM::t2MOVi: {
7248    // If we can use the 16-bit encoding and the user didn't explicitly
7249    // request the 32-bit variant, transform it here.
7250    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7251        (unsigned)Inst.getOperand(1).getImm() <= 255 &&
7252        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
7253         Inst.getOperand(4).getReg() == ARM::CPSR) ||
7254        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
7255        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7256         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7257      // The operands aren't in the same order for tMOVi8...
7258      MCInst TmpInst;
7259      TmpInst.setOpcode(ARM::tMOVi8);
7260      TmpInst.addOperand(Inst.getOperand(0));
7261      TmpInst.addOperand(Inst.getOperand(4));
7262      TmpInst.addOperand(Inst.getOperand(1));
7263      TmpInst.addOperand(Inst.getOperand(2));
7264      TmpInst.addOperand(Inst.getOperand(3));
7265      Inst = TmpInst;
7266      return true;
7267    }
7268    break;
7269  }
7270  case ARM::t2MOVr: {
7271    // If we can use the 16-bit encoding and the user didn't explicitly
7272    // request the 32-bit variant, transform it here.
7273    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7274        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7275        Inst.getOperand(2).getImm() == ARMCC::AL &&
7276        Inst.getOperand(4).getReg() == ARM::CPSR &&
7277        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7278         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7279      // The operands aren't the same for tMOV[S]r... (no cc_out)
7280      MCInst TmpInst;
7281      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7282      TmpInst.addOperand(Inst.getOperand(0));
7283      TmpInst.addOperand(Inst.getOperand(1));
7284      TmpInst.addOperand(Inst.getOperand(2));
7285      TmpInst.addOperand(Inst.getOperand(3));
7286      Inst = TmpInst;
7287      return true;
7288    }
7289    break;
7290  }
7291  case ARM::t2SXTH:
7292  case ARM::t2SXTB:
7293  case ARM::t2UXTH:
7294  case ARM::t2UXTB: {
7295    // If we can use the 16-bit encoding and the user didn't explicitly
7296    // request the 32-bit variant, transform it here.
7297    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7298        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7299        Inst.getOperand(2).getImm() == 0 &&
7300        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7301         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7302      unsigned NewOpc;
7303      switch (Inst.getOpcode()) {
7304      default: llvm_unreachable("Illegal opcode!");
7305      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7306      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7307      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7308      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7309      }
7310      // The operands aren't the same for thumb1 (no rotate operand).
7311      MCInst TmpInst;
7312      TmpInst.setOpcode(NewOpc);
7313      TmpInst.addOperand(Inst.getOperand(0));
7314      TmpInst.addOperand(Inst.getOperand(1));
7315      TmpInst.addOperand(Inst.getOperand(3));
7316      TmpInst.addOperand(Inst.getOperand(4));
7317      Inst = TmpInst;
7318      return true;
7319    }
7320    break;
7321  }
7322  case ARM::MOVsi: {
7323    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7324    // rrx shifts and asr/lsr of #32 is encoded as 0
7325    if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
7326      return false;
7327    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7328      // Shifting by zero is accepted as a vanilla 'MOVr'
7329      MCInst TmpInst;
7330      TmpInst.setOpcode(ARM::MOVr);
7331      TmpInst.addOperand(Inst.getOperand(0));
7332      TmpInst.addOperand(Inst.getOperand(1));
7333      TmpInst.addOperand(Inst.getOperand(3));
7334      TmpInst.addOperand(Inst.getOperand(4));
7335      TmpInst.addOperand(Inst.getOperand(5));
7336      Inst = TmpInst;
7337      return true;
7338    }
7339    return false;
7340  }
7341  case ARM::ANDrsi:
7342  case ARM::ORRrsi:
7343  case ARM::EORrsi:
7344  case ARM::BICrsi:
7345  case ARM::SUBrsi:
7346  case ARM::ADDrsi: {
7347    unsigned newOpc;
7348    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7349    if (SOpc == ARM_AM::rrx) return false;
7350    switch (Inst.getOpcode()) {
7351    default: llvm_unreachable("unexpected opcode!");
7352    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7353    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7354    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7355    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7356    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7357    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7358    }
7359    // If the shift is by zero, use the non-shifted instruction definition.
7360    // The exception is for right shifts, where 0 == 32
7361    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
7362        !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
7363      MCInst TmpInst;
7364      TmpInst.setOpcode(newOpc);
7365      TmpInst.addOperand(Inst.getOperand(0));
7366      TmpInst.addOperand(Inst.getOperand(1));
7367      TmpInst.addOperand(Inst.getOperand(2));
7368      TmpInst.addOperand(Inst.getOperand(4));
7369      TmpInst.addOperand(Inst.getOperand(5));
7370      TmpInst.addOperand(Inst.getOperand(6));
7371      Inst = TmpInst;
7372      return true;
7373    }
7374    return false;
7375  }
7376  case ARM::ITasm:
7377  case ARM::t2IT: {
7378    // The mask bits for all but the first condition are represented as
7379    // the low bit of the condition code value implies 't'. We currently
7380    // always have 1 implies 't', so XOR toggle the bits if the low bit
7381    // of the condition code is zero.
7382    MCOperand &MO = Inst.getOperand(1);
7383    unsigned Mask = MO.getImm();
7384    unsigned OrigMask = Mask;
7385    unsigned TZ = CountTrailingZeros_32(Mask);
7386    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7387      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7388      for (unsigned i = 3; i != TZ; --i)
7389        Mask ^= 1 << i;
7390    }
7391    MO.setImm(Mask);
7392
7393    // Set up the IT block state according to the IT instruction we just
7394    // matched.
7395    assert(!inITBlock() && "nested IT blocks?!");
7396    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7397    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7398    ITState.CurPosition = 0;
7399    ITState.FirstCond = true;
7400    break;
7401  }
7402  case ARM::t2LSLrr:
7403  case ARM::t2LSRrr:
7404  case ARM::t2ASRrr:
7405  case ARM::t2SBCrr:
7406  case ARM::t2RORrr:
7407  case ARM::t2BICrr:
7408  {
7409    // Assemblers should use the narrow encodings of these instructions when permissible.
7410    if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7411         isARMLowRegister(Inst.getOperand(2).getReg())) &&
7412        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7413        ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7414         (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7415        (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
7416         !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
7417      unsigned NewOpc;
7418      switch (Inst.getOpcode()) {
7419        default: llvm_unreachable("unexpected opcode");
7420        case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
7421        case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
7422        case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
7423        case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
7424        case ARM::t2RORrr: NewOpc = ARM::tROR; break;
7425        case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
7426      }
7427      MCInst TmpInst;
7428      TmpInst.setOpcode(NewOpc);
7429      TmpInst.addOperand(Inst.getOperand(0));
7430      TmpInst.addOperand(Inst.getOperand(5));
7431      TmpInst.addOperand(Inst.getOperand(1));
7432      TmpInst.addOperand(Inst.getOperand(2));
7433      TmpInst.addOperand(Inst.getOperand(3));
7434      TmpInst.addOperand(Inst.getOperand(4));
7435      Inst = TmpInst;
7436      return true;
7437    }
7438    return false;
7439  }
7440  case ARM::t2ANDrr:
7441  case ARM::t2EORrr:
7442  case ARM::t2ADCrr:
7443  case ARM::t2ORRrr:
7444  {
7445    // Assemblers should use the narrow encodings of these instructions when permissible.
7446    // These instructions are special in that they are commutable, so shorter encodings
7447    // are available more often.
7448    if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7449         isARMLowRegister(Inst.getOperand(2).getReg())) &&
7450        (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
7451         Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
7452        ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7453         (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7454        (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
7455         !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
7456      unsigned NewOpc;
7457      switch (Inst.getOpcode()) {
7458        default: llvm_unreachable("unexpected opcode");
7459        case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
7460        case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
7461        case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
7462        case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
7463      }
7464      MCInst TmpInst;
7465      TmpInst.setOpcode(NewOpc);
7466      TmpInst.addOperand(Inst.getOperand(0));
7467      TmpInst.addOperand(Inst.getOperand(5));
7468      if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
7469        TmpInst.addOperand(Inst.getOperand(1));
7470        TmpInst.addOperand(Inst.getOperand(2));
7471      } else {
7472        TmpInst.addOperand(Inst.getOperand(2));
7473        TmpInst.addOperand(Inst.getOperand(1));
7474      }
7475      TmpInst.addOperand(Inst.getOperand(3));
7476      TmpInst.addOperand(Inst.getOperand(4));
7477      Inst = TmpInst;
7478      return true;
7479    }
7480    return false;
7481  }
7482  }
7483  return false;
7484}
7485
7486unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7487  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7488  // suffix depending on whether they're in an IT block or not.
7489  unsigned Opc = Inst.getOpcode();
7490  const MCInstrDesc &MCID = getInstDesc(Opc);
7491  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7492    assert(MCID.hasOptionalDef() &&
7493           "optionally flag setting instruction missing optional def operand");
7494    assert(MCID.NumOperands == Inst.getNumOperands() &&
7495           "operand count mismatch!");
7496    // Find the optional-def operand (cc_out).
7497    unsigned OpNo;
7498    for (OpNo = 0;
7499         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7500         ++OpNo)
7501      ;
7502    // If we're parsing Thumb1, reject it completely.
7503    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7504      return Match_MnemonicFail;
7505    // If we're parsing Thumb2, which form is legal depends on whether we're
7506    // in an IT block.
7507    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7508        !inITBlock())
7509      return Match_RequiresITBlock;
7510    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7511        inITBlock())
7512      return Match_RequiresNotITBlock;
7513  }
7514  // Some high-register supporting Thumb1 encodings only allow both registers
7515  // to be from r0-r7 when in Thumb2.
7516  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7517           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7518           isARMLowRegister(Inst.getOperand(2).getReg()))
7519    return Match_RequiresThumb2;
7520  // Others only require ARMv6 or later.
7521  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7522           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7523           isARMLowRegister(Inst.getOperand(1).getReg()))
7524    return Match_RequiresV6;
7525  return Match_Success;
7526}
7527
7528static const char *getSubtargetFeatureName(unsigned Val);
7529bool ARMAsmParser::
7530MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
7531                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7532                        MCStreamer &Out, unsigned &ErrorInfo,
7533                        bool MatchingInlineAsm) {
7534  MCInst Inst;
7535  unsigned MatchResult;
7536
7537  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
7538                                     MatchingInlineAsm);
7539  switch (MatchResult) {
7540  default: break;
7541  case Match_Success:
7542    // Context sensitive operand constraints aren't handled by the matcher,
7543    // so check them here.
7544    if (validateInstruction(Inst, Operands)) {
7545      // Still progress the IT block, otherwise one wrong condition causes
7546      // nasty cascading errors.
7547      forwardITPosition();
7548      return true;
7549    }
7550
7551    // Some instructions need post-processing to, for example, tweak which
7552    // encoding is selected. Loop on it while changes happen so the
7553    // individual transformations can chain off each other. E.g.,
7554    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7555    while (processInstruction(Inst, Operands))
7556      ;
7557
7558    // Only move forward at the very end so that everything in validate
7559    // and process gets a consistent answer about whether we're in an IT
7560    // block.
7561    forwardITPosition();
7562
7563    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7564    // doesn't actually encode.
7565    if (Inst.getOpcode() == ARM::ITasm)
7566      return false;
7567
7568    Inst.setLoc(IDLoc);
7569    Out.EmitInstruction(Inst);
7570    return false;
7571  case Match_MissingFeature: {
7572    assert(ErrorInfo && "Unknown missing feature!");
7573    // Special case the error message for the very common case where only
7574    // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
7575    std::string Msg = "instruction requires:";
7576    unsigned Mask = 1;
7577    for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
7578      if (ErrorInfo & Mask) {
7579        Msg += " ";
7580        Msg += getSubtargetFeatureName(ErrorInfo & Mask);
7581      }
7582      Mask <<= 1;
7583    }
7584    return Error(IDLoc, Msg);
7585  }
7586  case Match_InvalidOperand: {
7587    SMLoc ErrorLoc = IDLoc;
7588    if (ErrorInfo != ~0U) {
7589      if (ErrorInfo >= Operands.size())
7590        return Error(IDLoc, "too few operands for instruction");
7591
7592      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7593      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7594    }
7595
7596    return Error(ErrorLoc, "invalid operand for instruction");
7597  }
7598  case Match_MnemonicFail:
7599    return Error(IDLoc, "invalid instruction",
7600                 ((ARMOperand*)Operands[0])->getLocRange());
7601  case Match_RequiresNotITBlock:
7602    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7603  case Match_RequiresITBlock:
7604    return Error(IDLoc, "instruction only valid inside IT block");
7605  case Match_RequiresV6:
7606    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7607  case Match_RequiresThumb2:
7608    return Error(IDLoc, "instruction variant requires Thumb2");
7609  case Match_ImmRange0_15: {
7610    SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7611    if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7612    return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
7613  }
7614  }
7615
7616  llvm_unreachable("Implement any new match types added!");
7617}
7618
7619/// parseDirective parses the arm specific directives
7620bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7621  StringRef IDVal = DirectiveID.getIdentifier();
7622  if (IDVal == ".word")
7623    return parseDirectiveWord(4, DirectiveID.getLoc());
7624  else if (IDVal == ".thumb")
7625    return parseDirectiveThumb(DirectiveID.getLoc());
7626  else if (IDVal == ".arm")
7627    return parseDirectiveARM(DirectiveID.getLoc());
7628  else if (IDVal == ".thumb_func")
7629    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7630  else if (IDVal == ".code")
7631    return parseDirectiveCode(DirectiveID.getLoc());
7632  else if (IDVal == ".syntax")
7633    return parseDirectiveSyntax(DirectiveID.getLoc());
7634  else if (IDVal == ".unreq")
7635    return parseDirectiveUnreq(DirectiveID.getLoc());
7636  else if (IDVal == ".arch")
7637    return parseDirectiveArch(DirectiveID.getLoc());
7638  else if (IDVal == ".eabi_attribute")
7639    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7640  return true;
7641}
7642
7643/// parseDirectiveWord
7644///  ::= .word [ expression (, expression)* ]
7645bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7646  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7647    for (;;) {
7648      const MCExpr *Value;
7649      if (getParser().ParseExpression(Value))
7650        return true;
7651
7652      getParser().getStreamer().EmitValue(Value, Size);
7653
7654      if (getLexer().is(AsmToken::EndOfStatement))
7655        break;
7656
7657      // FIXME: Improve diagnostic.
7658      if (getLexer().isNot(AsmToken::Comma))
7659        return Error(L, "unexpected token in directive");
7660      Parser.Lex();
7661    }
7662  }
7663
7664  Parser.Lex();
7665  return false;
7666}
7667
7668/// parseDirectiveThumb
7669///  ::= .thumb
7670bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7671  if (getLexer().isNot(AsmToken::EndOfStatement))
7672    return Error(L, "unexpected token in directive");
7673  Parser.Lex();
7674
7675  if (!isThumb())
7676    SwitchMode();
7677  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7678  return false;
7679}
7680
7681/// parseDirectiveARM
7682///  ::= .arm
7683bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7684  if (getLexer().isNot(AsmToken::EndOfStatement))
7685    return Error(L, "unexpected token in directive");
7686  Parser.Lex();
7687
7688  if (isThumb())
7689    SwitchMode();
7690  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7691  return false;
7692}
7693
7694/// parseDirectiveThumbFunc
7695///  ::= .thumbfunc symbol_name
7696bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7697  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7698  bool isMachO = MAI.hasSubsectionsViaSymbols();
7699  StringRef Name;
7700  bool needFuncName = true;
7701
7702  // Darwin asm has (optionally) function name after .thumb_func direction
7703  // ELF doesn't
7704  if (isMachO) {
7705    const AsmToken &Tok = Parser.getTok();
7706    if (Tok.isNot(AsmToken::EndOfStatement)) {
7707      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7708        return Error(L, "unexpected token in .thumb_func directive");
7709      Name = Tok.getIdentifier();
7710      Parser.Lex(); // Consume the identifier token.
7711      needFuncName = false;
7712    }
7713  }
7714
7715  if (getLexer().isNot(AsmToken::EndOfStatement))
7716    return Error(L, "unexpected token in directive");
7717
7718  // Eat the end of statement and any blank lines that follow.
7719  while (getLexer().is(AsmToken::EndOfStatement))
7720    Parser.Lex();
7721
7722  // FIXME: assuming function name will be the line following .thumb_func
7723  // We really should be checking the next symbol definition even if there's
7724  // stuff in between.
7725  if (needFuncName) {
7726    Name = Parser.getTok().getIdentifier();
7727  }
7728
7729  // Mark symbol as a thumb symbol.
7730  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7731  getParser().getStreamer().EmitThumbFunc(Func);
7732  return false;
7733}
7734
7735/// parseDirectiveSyntax
7736///  ::= .syntax unified | divided
7737bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7738  const AsmToken &Tok = Parser.getTok();
7739  if (Tok.isNot(AsmToken::Identifier))
7740    return Error(L, "unexpected token in .syntax directive");
7741  StringRef Mode = Tok.getString();
7742  if (Mode == "unified" || Mode == "UNIFIED")
7743    Parser.Lex();
7744  else if (Mode == "divided" || Mode == "DIVIDED")
7745    return Error(L, "'.syntax divided' arm asssembly not supported");
7746  else
7747    return Error(L, "unrecognized syntax mode in .syntax directive");
7748
7749  if (getLexer().isNot(AsmToken::EndOfStatement))
7750    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7751  Parser.Lex();
7752
7753  // TODO tell the MC streamer the mode
7754  // getParser().getStreamer().Emit???();
7755  return false;
7756}
7757
7758/// parseDirectiveCode
7759///  ::= .code 16 | 32
7760bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7761  const AsmToken &Tok = Parser.getTok();
7762  if (Tok.isNot(AsmToken::Integer))
7763    return Error(L, "unexpected token in .code directive");
7764  int64_t Val = Parser.getTok().getIntVal();
7765  if (Val == 16)
7766    Parser.Lex();
7767  else if (Val == 32)
7768    Parser.Lex();
7769  else
7770    return Error(L, "invalid operand to .code directive");
7771
7772  if (getLexer().isNot(AsmToken::EndOfStatement))
7773    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7774  Parser.Lex();
7775
7776  if (Val == 16) {
7777    if (!isThumb())
7778      SwitchMode();
7779    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7780  } else {
7781    if (isThumb())
7782      SwitchMode();
7783    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7784  }
7785
7786  return false;
7787}
7788
7789/// parseDirectiveReq
7790///  ::= name .req registername
7791bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7792  Parser.Lex(); // Eat the '.req' token.
7793  unsigned Reg;
7794  SMLoc SRegLoc, ERegLoc;
7795  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7796    Parser.EatToEndOfStatement();
7797    return Error(SRegLoc, "register name expected");
7798  }
7799
7800  // Shouldn't be anything else.
7801  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7802    Parser.EatToEndOfStatement();
7803    return Error(Parser.getTok().getLoc(),
7804                 "unexpected input in .req directive.");
7805  }
7806
7807  Parser.Lex(); // Consume the EndOfStatement
7808
7809  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7810    return Error(SRegLoc, "redefinition of '" + Name +
7811                          "' does not match original.");
7812
7813  return false;
7814}
7815
7816/// parseDirectiveUneq
7817///  ::= .unreq registername
7818bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7819  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7820    Parser.EatToEndOfStatement();
7821    return Error(L, "unexpected input in .unreq directive.");
7822  }
7823  RegisterReqs.erase(Parser.getTok().getIdentifier());
7824  Parser.Lex(); // Eat the identifier.
7825  return false;
7826}
7827
7828/// parseDirectiveArch
7829///  ::= .arch token
7830bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7831  return true;
7832}
7833
7834/// parseDirectiveEabiAttr
7835///  ::= .eabi_attribute int, int
7836bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7837  return true;
7838}
7839
7840/// Force static initialization.
7841extern "C" void LLVMInitializeARMAsmParser() {
7842  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7843  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7844}
7845
7846#define GET_REGISTER_MATCHER
7847#define GET_SUBTARGET_FEATURE_NAME
7848#define GET_MATCHER_IMPLEMENTATION
7849#include "ARMGenAsmMatcher.inc"
7850