ARMAsmParser.cpp revision 8abe7e33641fccfa70a7e335939e83dfbf654fe8
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104  bool parseDirectiveArch(SMLoc L);
105  bool parseDirectiveEabiAttr(SMLoc L);
106
107  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
108                          bool &CarrySetting, unsigned &ProcessorIMod,
109                          StringRef &ITMask);
110  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
111                             bool &CanAcceptPredicationCode);
112
113  bool isThumb() const {
114    // FIXME: Can tablegen auto-generate this?
115    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
116  }
117  bool isThumbOne() const {
118    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
119  }
120  bool isThumbTwo() const {
121    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
122  }
123  bool hasV6Ops() const {
124    return STI.getFeatureBits() & ARM::HasV6Ops;
125  }
126  bool hasV7Ops() const {
127    return STI.getFeatureBits() & ARM::HasV7Ops;
128  }
129  void SwitchMode() {
130    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
131    setAvailableFeatures(FB);
132  }
133  bool isMClass() const {
134    return STI.getFeatureBits() & ARM::FeatureMClass;
135  }
136
137  /// @name Auto-generated Match Functions
138  /// {
139
140#define GET_ASSEMBLER_HEADER
141#include "ARMGenAsmMatcher.inc"
142
143  /// }
144
145  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocNumOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocRegOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseCoprocOptionOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseMemBarrierOptOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseProcIFlagsOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parseMSRMaskOperand(
157    SmallVectorImpl<MCParsedAsmOperand*>&);
158  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
159                                   StringRef Op, int Low, int High);
160  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
161    return parsePKHImm(O, "lsl", 0, 31);
162  }
163  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
164    return parsePKHImm(O, "asr", 1, 32);
165  }
166  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
175
176  // Asm Match Converter Methods
177  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
180                    const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
194                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
202                             const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
206                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
208                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
210                        const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
212                     const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
214                        const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
216                     const SmallVectorImpl<MCParsedAsmOperand*> &);
217  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
218                        const SmallVectorImpl<MCParsedAsmOperand*> &);
219
220  bool validateInstruction(MCInst &Inst,
221                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool processInstruction(MCInst &Inst,
223                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
224  bool shouldOmitCCOutOperand(StringRef Mnemonic,
225                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
226
227public:
228  enum ARMMatchResultTy {
229    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
230    Match_RequiresNotITBlock,
231    Match_RequiresV6,
232    Match_RequiresThumb2
233  };
234
235  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
236    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
237    MCAsmParserExtension::Initialize(_Parser);
238
239    // Initialize the set of available features.
240    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
241
242    // Not in an ITBlock to start with.
243    ITState.CurPosition = ~0U;
244  }
245
246  // Implementation of the MCTargetAsmParser interface:
247  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
248  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
249                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
250  bool ParseDirective(AsmToken DirectiveID);
251
252  unsigned checkTargetMatchPredicate(MCInst &Inst);
253
254  bool MatchAndEmitInstruction(SMLoc IDLoc,
255                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
256                               MCStreamer &Out);
257};
258} // end anonymous namespace
259
260namespace {
261
262/// ARMOperand - Instances of this class represent a parsed ARM machine
263/// instruction.
264class ARMOperand : public MCParsedAsmOperand {
265  enum KindTy {
266    k_CondCode,
267    k_CCOut,
268    k_ITCondMask,
269    k_CoprocNum,
270    k_CoprocReg,
271    k_CoprocOption,
272    k_Immediate,
273    k_MemBarrierOpt,
274    k_Memory,
275    k_PostIndexRegister,
276    k_MSRMask,
277    k_ProcIFlags,
278    k_VectorIndex,
279    k_Register,
280    k_RegisterList,
281    k_DPRRegisterList,
282    k_SPRRegisterList,
283    k_VectorList,
284    k_VectorListAllLanes,
285    k_VectorListIndexed,
286    k_ShiftedRegister,
287    k_ShiftedImmediate,
288    k_ShifterImmediate,
289    k_RotateImmediate,
290    k_BitfieldDescriptor,
291    k_Token
292  } Kind;
293
294  SMLoc StartLoc, EndLoc;
295  SmallVector<unsigned, 8> Registers;
296
297  union {
298    struct {
299      ARMCC::CondCodes Val;
300    } CC;
301
302    struct {
303      unsigned Val;
304    } Cop;
305
306    struct {
307      unsigned Val;
308    } CoprocOption;
309
310    struct {
311      unsigned Mask:4;
312    } ITMask;
313
314    struct {
315      ARM_MB::MemBOpt Val;
316    } MBOpt;
317
318    struct {
319      ARM_PROC::IFlags Val;
320    } IFlags;
321
322    struct {
323      unsigned Val;
324    } MMask;
325
326    struct {
327      const char *Data;
328      unsigned Length;
329    } Tok;
330
331    struct {
332      unsigned RegNum;
333    } Reg;
334
335    // A vector register list is a sequential list of 1 to 4 registers.
336    struct {
337      unsigned RegNum;
338      unsigned Count;
339      unsigned LaneIndex;
340      bool isDoubleSpaced;
341    } VectorList;
342
343    struct {
344      unsigned Val;
345    } VectorIndex;
346
347    struct {
348      const MCExpr *Val;
349    } Imm;
350
351    /// Combined record for all forms of ARM address expressions.
352    struct {
353      unsigned BaseRegNum;
354      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
355      // was specified.
356      const MCConstantExpr *OffsetImm;  // Offset immediate value
357      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
358      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
359      unsigned ShiftImm;        // shift for OffsetReg.
360      unsigned Alignment;       // 0 = no alignment specified
361                                // n = alignment in bytes (2, 4, 8, 16, or 32)
362      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
363    } Memory;
364
365    struct {
366      unsigned RegNum;
367      bool isAdd;
368      ARM_AM::ShiftOpc ShiftTy;
369      unsigned ShiftImm;
370    } PostIdxReg;
371
372    struct {
373      bool isASR;
374      unsigned Imm;
375    } ShifterImm;
376    struct {
377      ARM_AM::ShiftOpc ShiftTy;
378      unsigned SrcReg;
379      unsigned ShiftReg;
380      unsigned ShiftImm;
381    } RegShiftedReg;
382    struct {
383      ARM_AM::ShiftOpc ShiftTy;
384      unsigned SrcReg;
385      unsigned ShiftImm;
386    } RegShiftedImm;
387    struct {
388      unsigned Imm;
389    } RotImm;
390    struct {
391      unsigned LSB;
392      unsigned Width;
393    } Bitfield;
394  };
395
396  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
397public:
398  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
399    Kind = o.Kind;
400    StartLoc = o.StartLoc;
401    EndLoc = o.EndLoc;
402    switch (Kind) {
403    case k_CondCode:
404      CC = o.CC;
405      break;
406    case k_ITCondMask:
407      ITMask = o.ITMask;
408      break;
409    case k_Token:
410      Tok = o.Tok;
411      break;
412    case k_CCOut:
413    case k_Register:
414      Reg = o.Reg;
415      break;
416    case k_RegisterList:
417    case k_DPRRegisterList:
418    case k_SPRRegisterList:
419      Registers = o.Registers;
420      break;
421    case k_VectorList:
422    case k_VectorListAllLanes:
423    case k_VectorListIndexed:
424      VectorList = o.VectorList;
425      break;
426    case k_CoprocNum:
427    case k_CoprocReg:
428      Cop = o.Cop;
429      break;
430    case k_CoprocOption:
431      CoprocOption = o.CoprocOption;
432      break;
433    case k_Immediate:
434      Imm = o.Imm;
435      break;
436    case k_MemBarrierOpt:
437      MBOpt = o.MBOpt;
438      break;
439    case k_Memory:
440      Memory = o.Memory;
441      break;
442    case k_PostIndexRegister:
443      PostIdxReg = o.PostIdxReg;
444      break;
445    case k_MSRMask:
446      MMask = o.MMask;
447      break;
448    case k_ProcIFlags:
449      IFlags = o.IFlags;
450      break;
451    case k_ShifterImmediate:
452      ShifterImm = o.ShifterImm;
453      break;
454    case k_ShiftedRegister:
455      RegShiftedReg = o.RegShiftedReg;
456      break;
457    case k_ShiftedImmediate:
458      RegShiftedImm = o.RegShiftedImm;
459      break;
460    case k_RotateImmediate:
461      RotImm = o.RotImm;
462      break;
463    case k_BitfieldDescriptor:
464      Bitfield = o.Bitfield;
465      break;
466    case k_VectorIndex:
467      VectorIndex = o.VectorIndex;
468      break;
469    }
470  }
471
472  /// getStartLoc - Get the location of the first token of this operand.
473  SMLoc getStartLoc() const { return StartLoc; }
474  /// getEndLoc - Get the location of the last token of this operand.
475  SMLoc getEndLoc() const { return EndLoc; }
476
477  ARMCC::CondCodes getCondCode() const {
478    assert(Kind == k_CondCode && "Invalid access!");
479    return CC.Val;
480  }
481
482  unsigned getCoproc() const {
483    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
484    return Cop.Val;
485  }
486
487  StringRef getToken() const {
488    assert(Kind == k_Token && "Invalid access!");
489    return StringRef(Tok.Data, Tok.Length);
490  }
491
492  unsigned getReg() const {
493    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
494    return Reg.RegNum;
495  }
496
497  const SmallVectorImpl<unsigned> &getRegList() const {
498    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
499            Kind == k_SPRRegisterList) && "Invalid access!");
500    return Registers;
501  }
502
503  const MCExpr *getImm() const {
504    assert(isImm() && "Invalid access!");
505    return Imm.Val;
506  }
507
508  unsigned getVectorIndex() const {
509    assert(Kind == k_VectorIndex && "Invalid access!");
510    return VectorIndex.Val;
511  }
512
513  ARM_MB::MemBOpt getMemBarrierOpt() const {
514    assert(Kind == k_MemBarrierOpt && "Invalid access!");
515    return MBOpt.Val;
516  }
517
518  ARM_PROC::IFlags getProcIFlags() const {
519    assert(Kind == k_ProcIFlags && "Invalid access!");
520    return IFlags.Val;
521  }
522
523  unsigned getMSRMask() const {
524    assert(Kind == k_MSRMask && "Invalid access!");
525    return MMask.Val;
526  }
527
528  bool isCoprocNum() const { return Kind == k_CoprocNum; }
529  bool isCoprocReg() const { return Kind == k_CoprocReg; }
530  bool isCoprocOption() const { return Kind == k_CoprocOption; }
531  bool isCondCode() const { return Kind == k_CondCode; }
532  bool isCCOut() const { return Kind == k_CCOut; }
533  bool isITMask() const { return Kind == k_ITCondMask; }
534  bool isITCondCode() const { return Kind == k_CondCode; }
535  bool isImm() const { return Kind == k_Immediate; }
536  bool isFPImm() const {
537    if (!isImm()) return false;
538    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
539    if (!CE) return false;
540    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
541    return Val != -1;
542  }
543  bool isFBits16() const {
544    if (!isImm()) return false;
545    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
546    if (!CE) return false;
547    int64_t Value = CE->getValue();
548    return Value >= 0 && Value <= 16;
549  }
550  bool isFBits32() const {
551    if (!isImm()) return false;
552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
553    if (!CE) return false;
554    int64_t Value = CE->getValue();
555    return Value >= 1 && Value <= 32;
556  }
557  bool isImm8s4() const {
558    if (!isImm()) return false;
559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
560    if (!CE) return false;
561    int64_t Value = CE->getValue();
562    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
563  }
564  bool isImm0_1020s4() const {
565    if (!isImm()) return false;
566    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
567    if (!CE) return false;
568    int64_t Value = CE->getValue();
569    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
570  }
571  bool isImm0_508s4() const {
572    if (!isImm()) return false;
573    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
574    if (!CE) return false;
575    int64_t Value = CE->getValue();
576    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
577  }
578  bool isImm0_255() const {
579    if (!isImm()) return false;
580    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
581    if (!CE) return false;
582    int64_t Value = CE->getValue();
583    return Value >= 0 && Value < 256;
584  }
585  bool isImm0_1() const {
586    if (!isImm()) return false;
587    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
588    if (!CE) return false;
589    int64_t Value = CE->getValue();
590    return Value >= 0 && Value < 2;
591  }
592  bool isImm0_3() const {
593    if (!isImm()) return false;
594    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
595    if (!CE) return false;
596    int64_t Value = CE->getValue();
597    return Value >= 0 && Value < 4;
598  }
599  bool isImm0_7() const {
600    if (!isImm()) return false;
601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602    if (!CE) return false;
603    int64_t Value = CE->getValue();
604    return Value >= 0 && Value < 8;
605  }
606  bool isImm0_15() const {
607    if (!isImm()) return false;
608    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
609    if (!CE) return false;
610    int64_t Value = CE->getValue();
611    return Value >= 0 && Value < 16;
612  }
613  bool isImm0_31() const {
614    if (!isImm()) return false;
615    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
616    if (!CE) return false;
617    int64_t Value = CE->getValue();
618    return Value >= 0 && Value < 32;
619  }
620  bool isImm0_63() const {
621    if (!isImm()) return false;
622    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
623    if (!CE) return false;
624    int64_t Value = CE->getValue();
625    return Value >= 0 && Value < 64;
626  }
627  bool isImm8() const {
628    if (!isImm()) return false;
629    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
630    if (!CE) return false;
631    int64_t Value = CE->getValue();
632    return Value == 8;
633  }
634  bool isImm16() const {
635    if (!isImm()) return false;
636    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
637    if (!CE) return false;
638    int64_t Value = CE->getValue();
639    return Value == 16;
640  }
641  bool isImm32() const {
642    if (!isImm()) return false;
643    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
644    if (!CE) return false;
645    int64_t Value = CE->getValue();
646    return Value == 32;
647  }
648  bool isShrImm8() const {
649    if (!isImm()) return false;
650    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
651    if (!CE) return false;
652    int64_t Value = CE->getValue();
653    return Value > 0 && Value <= 8;
654  }
655  bool isShrImm16() const {
656    if (!isImm()) return false;
657    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
658    if (!CE) return false;
659    int64_t Value = CE->getValue();
660    return Value > 0 && Value <= 16;
661  }
662  bool isShrImm32() const {
663    if (!isImm()) return false;
664    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
665    if (!CE) return false;
666    int64_t Value = CE->getValue();
667    return Value > 0 && Value <= 32;
668  }
669  bool isShrImm64() const {
670    if (!isImm()) return false;
671    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672    if (!CE) return false;
673    int64_t Value = CE->getValue();
674    return Value > 0 && Value <= 64;
675  }
676  bool isImm1_7() const {
677    if (!isImm()) return false;
678    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
679    if (!CE) return false;
680    int64_t Value = CE->getValue();
681    return Value > 0 && Value < 8;
682  }
683  bool isImm1_15() const {
684    if (!isImm()) return false;
685    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
686    if (!CE) return false;
687    int64_t Value = CE->getValue();
688    return Value > 0 && Value < 16;
689  }
690  bool isImm1_31() const {
691    if (!isImm()) return false;
692    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
693    if (!CE) return false;
694    int64_t Value = CE->getValue();
695    return Value > 0 && Value < 32;
696  }
697  bool isImm1_16() const {
698    if (!isImm()) return false;
699    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
700    if (!CE) return false;
701    int64_t Value = CE->getValue();
702    return Value > 0 && Value < 17;
703  }
704  bool isImm1_32() const {
705    if (!isImm()) return false;
706    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
707    if (!CE) return false;
708    int64_t Value = CE->getValue();
709    return Value > 0 && Value < 33;
710  }
711  bool isImm0_32() const {
712    if (!isImm()) return false;
713    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
714    if (!CE) return false;
715    int64_t Value = CE->getValue();
716    return Value >= 0 && Value < 33;
717  }
718  bool isImm0_65535() const {
719    if (!isImm()) return false;
720    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
721    if (!CE) return false;
722    int64_t Value = CE->getValue();
723    return Value >= 0 && Value < 65536;
724  }
725  bool isImm0_65535Expr() const {
726    if (!isImm()) return false;
727    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
728    // If it's not a constant expression, it'll generate a fixup and be
729    // handled later.
730    if (!CE) return true;
731    int64_t Value = CE->getValue();
732    return Value >= 0 && Value < 65536;
733  }
734  bool isImm24bit() const {
735    if (!isImm()) return false;
736    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
737    if (!CE) return false;
738    int64_t Value = CE->getValue();
739    return Value >= 0 && Value <= 0xffffff;
740  }
741  bool isImmThumbSR() const {
742    if (!isImm()) return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    if (!CE) return false;
745    int64_t Value = CE->getValue();
746    return Value > 0 && Value < 33;
747  }
748  bool isPKHLSLImm() const {
749    if (!isImm()) return false;
750    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
751    if (!CE) return false;
752    int64_t Value = CE->getValue();
753    return Value >= 0 && Value < 32;
754  }
755  bool isPKHASRImm() const {
756    if (!isImm()) return false;
757    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
758    if (!CE) return false;
759    int64_t Value = CE->getValue();
760    return Value > 0 && Value <= 32;
761  }
762  bool isARMSOImm() const {
763    if (!isImm()) return false;
764    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
765    if (!CE) return false;
766    int64_t Value = CE->getValue();
767    return ARM_AM::getSOImmVal(Value) != -1;
768  }
769  bool isARMSOImmNot() const {
770    if (!isImm()) return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return ARM_AM::getSOImmVal(~Value) != -1;
775  }
776  bool isARMSOImmNeg() const {
777    if (!isImm()) return false;
778    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
779    if (!CE) return false;
780    int64_t Value = CE->getValue();
781    return ARM_AM::getSOImmVal(-Value) != -1;
782  }
783  bool isT2SOImm() const {
784    if (!isImm()) return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return ARM_AM::getT2SOImmVal(Value) != -1;
789  }
790  bool isT2SOImmNot() const {
791    if (!isImm()) return false;
792    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793    if (!CE) return false;
794    int64_t Value = CE->getValue();
795    return ARM_AM::getT2SOImmVal(~Value) != -1;
796  }
797  bool isT2SOImmNeg() const {
798    if (!isImm()) return false;
799    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800    if (!CE) return false;
801    int64_t Value = CE->getValue();
802    return ARM_AM::getT2SOImmVal(-Value) != -1;
803  }
804  bool isSetEndImm() const {
805    if (!isImm()) return false;
806    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
807    if (!CE) return false;
808    int64_t Value = CE->getValue();
809    return Value == 1 || Value == 0;
810  }
811  bool isReg() const { return Kind == k_Register; }
812  bool isRegList() const { return Kind == k_RegisterList; }
813  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
814  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
815  bool isToken() const { return Kind == k_Token; }
816  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
817  bool isMemory() const { return Kind == k_Memory; }
818  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
819  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
820  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
821  bool isRotImm() const { return Kind == k_RotateImmediate; }
822  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
823  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
824  bool isPostIdxReg() const {
825    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
826  }
827  bool isMemNoOffset(bool alignOK = false) const {
828    if (!isMemory())
829      return false;
830    // No offset of any kind.
831    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
832     (alignOK || Memory.Alignment == 0);
833  }
834  bool isMemPCRelImm12() const {
835    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
836      return false;
837    // Base register must be PC.
838    if (Memory.BaseRegNum != ARM::PC)
839      return false;
840    // Immediate offset in range [-4095, 4095].
841    if (!Memory.OffsetImm) return true;
842    int64_t Val = Memory.OffsetImm->getValue();
843    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
844  }
845  bool isAlignedMemory() const {
846    return isMemNoOffset(true);
847  }
848  bool isAddrMode2() const {
849    if (!isMemory() || Memory.Alignment != 0) return false;
850    // Check for register offset.
851    if (Memory.OffsetRegNum) return true;
852    // Immediate offset in range [-4095, 4095].
853    if (!Memory.OffsetImm) return true;
854    int64_t Val = Memory.OffsetImm->getValue();
855    return Val > -4096 && Val < 4096;
856  }
857  bool isAM2OffsetImm() const {
858    if (!isImm()) return false;
859    // Immediate offset in range [-4095, 4095].
860    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
861    if (!CE) return false;
862    int64_t Val = CE->getValue();
863    return Val > -4096 && Val < 4096;
864  }
865  bool isAddrMode3() const {
866    // If we have an immediate that's not a constant, treat it as a label
867    // reference needing a fixup. If it is a constant, it's something else
868    // and we reject it.
869    if (isImm() && !isa<MCConstantExpr>(getImm()))
870      return true;
871    if (!isMemory() || Memory.Alignment != 0) return false;
872    // No shifts are legal for AM3.
873    if (Memory.ShiftType != ARM_AM::no_shift) return false;
874    // Check for register offset.
875    if (Memory.OffsetRegNum) return true;
876    // Immediate offset in range [-255, 255].
877    if (!Memory.OffsetImm) return true;
878    int64_t Val = Memory.OffsetImm->getValue();
879    return Val > -256 && Val < 256;
880  }
881  bool isAM3Offset() const {
882    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
883      return false;
884    if (Kind == k_PostIndexRegister)
885      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
886    // Immediate offset in range [-255, 255].
887    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
888    if (!CE) return false;
889    int64_t Val = CE->getValue();
890    // Special case, #-0 is INT32_MIN.
891    return (Val > -256 && Val < 256) || Val == INT32_MIN;
892  }
893  bool isAddrMode5() const {
894    // If we have an immediate that's not a constant, treat it as a label
895    // reference needing a fixup. If it is a constant, it's something else
896    // and we reject it.
897    if (isImm() && !isa<MCConstantExpr>(getImm()))
898      return true;
899    if (!isMemory() || Memory.Alignment != 0) return false;
900    // Check for register offset.
901    if (Memory.OffsetRegNum) return false;
902    // Immediate offset in range [-1020, 1020] and a multiple of 4.
903    if (!Memory.OffsetImm) return true;
904    int64_t Val = Memory.OffsetImm->getValue();
905    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
906      Val == INT32_MIN;
907  }
908  bool isMemTBB() const {
909    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
910        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
911      return false;
912    return true;
913  }
914  bool isMemTBH() const {
915    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
916        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
917        Memory.Alignment != 0 )
918      return false;
919    return true;
920  }
921  bool isMemRegOffset() const {
922    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
923      return false;
924    return true;
925  }
926  bool isT2MemRegOffset() const {
927    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
928        Memory.Alignment != 0)
929      return false;
930    // Only lsl #{0, 1, 2, 3} allowed.
931    if (Memory.ShiftType == ARM_AM::no_shift)
932      return true;
933    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
934      return false;
935    return true;
936  }
937  bool isMemThumbRR() const {
938    // Thumb reg+reg addressing is simple. Just two registers, a base and
939    // an offset. No shifts, negations or any other complicating factors.
940    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
941        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
942      return false;
943    return isARMLowRegister(Memory.BaseRegNum) &&
944      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
945  }
946  bool isMemThumbRIs4() const {
947    if (!isMemory() || Memory.OffsetRegNum != 0 ||
948        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
949      return false;
950    // Immediate offset, multiple of 4 in range [0, 124].
951    if (!Memory.OffsetImm) return true;
952    int64_t Val = Memory.OffsetImm->getValue();
953    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
954  }
955  bool isMemThumbRIs2() const {
956    if (!isMemory() || Memory.OffsetRegNum != 0 ||
957        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
958      return false;
959    // Immediate offset, multiple of 4 in range [0, 62].
960    if (!Memory.OffsetImm) return true;
961    int64_t Val = Memory.OffsetImm->getValue();
962    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
963  }
964  bool isMemThumbRIs1() const {
965    if (!isMemory() || Memory.OffsetRegNum != 0 ||
966        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
967      return false;
968    // Immediate offset in range [0, 31].
969    if (!Memory.OffsetImm) return true;
970    int64_t Val = Memory.OffsetImm->getValue();
971    return Val >= 0 && Val <= 31;
972  }
973  bool isMemThumbSPI() const {
974    if (!isMemory() || Memory.OffsetRegNum != 0 ||
975        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
976      return false;
977    // Immediate offset, multiple of 4 in range [0, 1020].
978    if (!Memory.OffsetImm) return true;
979    int64_t Val = Memory.OffsetImm->getValue();
980    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
981  }
982  bool isMemImm8s4Offset() const {
983    // If we have an immediate that's not a constant, treat it as a label
984    // reference needing a fixup. If it is a constant, it's something else
985    // and we reject it.
986    if (isImm() && !isa<MCConstantExpr>(getImm()))
987      return true;
988    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
989      return false;
990    // Immediate offset a multiple of 4 in range [-1020, 1020].
991    if (!Memory.OffsetImm) return true;
992    int64_t Val = Memory.OffsetImm->getValue();
993    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
994  }
995  bool isMemImm0_1020s4Offset() const {
996    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
997      return false;
998    // Immediate offset a multiple of 4 in range [0, 1020].
999    if (!Memory.OffsetImm) return true;
1000    int64_t Val = Memory.OffsetImm->getValue();
1001    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1002  }
1003  bool isMemImm8Offset() const {
1004    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1005      return false;
1006    // Base reg of PC isn't allowed for these encodings.
1007    if (Memory.BaseRegNum == ARM::PC) return false;
1008    // Immediate offset in range [-255, 255].
1009    if (!Memory.OffsetImm) return true;
1010    int64_t Val = Memory.OffsetImm->getValue();
1011    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1012  }
1013  bool isMemPosImm8Offset() const {
1014    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1015      return false;
1016    // Immediate offset in range [0, 255].
1017    if (!Memory.OffsetImm) return true;
1018    int64_t Val = Memory.OffsetImm->getValue();
1019    return Val >= 0 && Val < 256;
1020  }
1021  bool isMemNegImm8Offset() const {
1022    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1023      return false;
1024    // Base reg of PC isn't allowed for these encodings.
1025    if (Memory.BaseRegNum == ARM::PC) return false;
1026    // Immediate offset in range [-255, -1].
1027    if (!Memory.OffsetImm) return false;
1028    int64_t Val = Memory.OffsetImm->getValue();
1029    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1030  }
1031  bool isMemUImm12Offset() const {
1032    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1033      return false;
1034    // Immediate offset in range [0, 4095].
1035    if (!Memory.OffsetImm) return true;
1036    int64_t Val = Memory.OffsetImm->getValue();
1037    return (Val >= 0 && Val < 4096);
1038  }
1039  bool isMemImm12Offset() const {
1040    // If we have an immediate that's not a constant, treat it as a label
1041    // reference needing a fixup. If it is a constant, it's something else
1042    // and we reject it.
1043    if (isImm() && !isa<MCConstantExpr>(getImm()))
1044      return true;
1045
1046    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1047      return false;
1048    // Immediate offset in range [-4095, 4095].
1049    if (!Memory.OffsetImm) return true;
1050    int64_t Val = Memory.OffsetImm->getValue();
1051    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1052  }
1053  bool isPostIdxImm8() const {
1054    if (!isImm()) return false;
1055    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1056    if (!CE) return false;
1057    int64_t Val = CE->getValue();
1058    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1059  }
1060  bool isPostIdxImm8s4() const {
1061    if (!isImm()) return false;
1062    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1063    if (!CE) return false;
1064    int64_t Val = CE->getValue();
1065    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1066      (Val == INT32_MIN);
1067  }
1068
1069  bool isMSRMask() const { return Kind == k_MSRMask; }
1070  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1071
1072  // NEON operands.
1073  bool isSingleSpacedVectorList() const {
1074    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1075  }
1076  bool isDoubleSpacedVectorList() const {
1077    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1078  }
1079  bool isVecListOneD() const {
1080    if (!isSingleSpacedVectorList()) return false;
1081    return VectorList.Count == 1;
1082  }
1083
1084  bool isVecListTwoD() const {
1085    if (!isSingleSpacedVectorList()) return false;
1086    return VectorList.Count == 2;
1087  }
1088
1089  bool isVecListThreeD() const {
1090    if (!isSingleSpacedVectorList()) return false;
1091    return VectorList.Count == 3;
1092  }
1093
1094  bool isVecListFourD() const {
1095    if (!isSingleSpacedVectorList()) return false;
1096    return VectorList.Count == 4;
1097  }
1098
1099  bool isVecListTwoQ() const {
1100    if (!isDoubleSpacedVectorList()) return false;
1101    return VectorList.Count == 2;
1102  }
1103
1104  bool isVecListThreeQ() const {
1105    if (!isDoubleSpacedVectorList()) return false;
1106    return VectorList.Count == 3;
1107  }
1108
1109  bool isVecListFourQ() const {
1110    if (!isDoubleSpacedVectorList()) return false;
1111    return VectorList.Count == 4;
1112  }
1113
1114  bool isSingleSpacedVectorAllLanes() const {
1115    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1116  }
1117  bool isDoubleSpacedVectorAllLanes() const {
1118    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1119  }
1120  bool isVecListOneDAllLanes() const {
1121    if (!isSingleSpacedVectorAllLanes()) return false;
1122    return VectorList.Count == 1;
1123  }
1124
1125  bool isVecListTwoDAllLanes() const {
1126    if (!isSingleSpacedVectorAllLanes()) return false;
1127    return VectorList.Count == 2;
1128  }
1129
1130  bool isVecListTwoQAllLanes() const {
1131    if (!isDoubleSpacedVectorAllLanes()) return false;
1132    return VectorList.Count == 2;
1133  }
1134
1135  bool isSingleSpacedVectorIndexed() const {
1136    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1137  }
1138  bool isDoubleSpacedVectorIndexed() const {
1139    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1140  }
1141  bool isVecListOneDByteIndexed() const {
1142    if (!isSingleSpacedVectorIndexed()) return false;
1143    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1144  }
1145
1146  bool isVecListOneDHWordIndexed() const {
1147    if (!isSingleSpacedVectorIndexed()) return false;
1148    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1149  }
1150
1151  bool isVecListOneDWordIndexed() const {
1152    if (!isSingleSpacedVectorIndexed()) return false;
1153    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1154  }
1155
1156  bool isVecListTwoDByteIndexed() const {
1157    if (!isSingleSpacedVectorIndexed()) return false;
1158    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1159  }
1160
1161  bool isVecListTwoDHWordIndexed() const {
1162    if (!isSingleSpacedVectorIndexed()) return false;
1163    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1164  }
1165
1166  bool isVecListTwoQWordIndexed() const {
1167    if (!isDoubleSpacedVectorIndexed()) return false;
1168    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1169  }
1170
1171  bool isVecListTwoQHWordIndexed() const {
1172    if (!isDoubleSpacedVectorIndexed()) return false;
1173    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1174  }
1175
1176  bool isVecListTwoDWordIndexed() const {
1177    if (!isSingleSpacedVectorIndexed()) return false;
1178    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1179  }
1180
1181  bool isVecListThreeDByteIndexed() const {
1182    if (!isSingleSpacedVectorIndexed()) return false;
1183    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1184  }
1185
1186  bool isVecListThreeDHWordIndexed() const {
1187    if (!isSingleSpacedVectorIndexed()) return false;
1188    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1189  }
1190
1191  bool isVecListThreeQWordIndexed() const {
1192    if (!isDoubleSpacedVectorIndexed()) return false;
1193    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1194  }
1195
1196  bool isVecListThreeQHWordIndexed() const {
1197    if (!isDoubleSpacedVectorIndexed()) return false;
1198    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1199  }
1200
1201  bool isVecListThreeDWordIndexed() const {
1202    if (!isSingleSpacedVectorIndexed()) return false;
1203    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1204  }
1205
1206  bool isVectorIndex8() const {
1207    if (Kind != k_VectorIndex) return false;
1208    return VectorIndex.Val < 8;
1209  }
1210  bool isVectorIndex16() const {
1211    if (Kind != k_VectorIndex) return false;
1212    return VectorIndex.Val < 4;
1213  }
1214  bool isVectorIndex32() const {
1215    if (Kind != k_VectorIndex) return false;
1216    return VectorIndex.Val < 2;
1217  }
1218
1219  bool isNEONi8splat() const {
1220    if (!isImm()) return false;
1221    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1222    // Must be a constant.
1223    if (!CE) return false;
1224    int64_t Value = CE->getValue();
1225    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1226    // value.
1227    return Value >= 0 && Value < 256;
1228  }
1229
1230  bool isNEONi16splat() const {
1231    if (!isImm()) return false;
1232    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1233    // Must be a constant.
1234    if (!CE) return false;
1235    int64_t Value = CE->getValue();
1236    // i16 value in the range [0,255] or [0x0100, 0xff00]
1237    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1238  }
1239
1240  bool isNEONi32splat() const {
1241    if (!isImm()) return false;
1242    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1243    // Must be a constant.
1244    if (!CE) return false;
1245    int64_t Value = CE->getValue();
1246    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1247    return (Value >= 0 && Value < 256) ||
1248      (Value >= 0x0100 && Value <= 0xff00) ||
1249      (Value >= 0x010000 && Value <= 0xff0000) ||
1250      (Value >= 0x01000000 && Value <= 0xff000000);
1251  }
1252
1253  bool isNEONi32vmov() const {
1254    if (!isImm()) return false;
1255    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1256    // Must be a constant.
1257    if (!CE) return false;
1258    int64_t Value = CE->getValue();
1259    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1260    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1261    return (Value >= 0 && Value < 256) ||
1262      (Value >= 0x0100 && Value <= 0xff00) ||
1263      (Value >= 0x010000 && Value <= 0xff0000) ||
1264      (Value >= 0x01000000 && Value <= 0xff000000) ||
1265      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1266      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1267  }
1268  bool isNEONi32vmovNeg() const {
1269    if (!isImm()) return false;
1270    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1271    // Must be a constant.
1272    if (!CE) return false;
1273    int64_t Value = ~CE->getValue();
1274    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1275    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1276    return (Value >= 0 && Value < 256) ||
1277      (Value >= 0x0100 && Value <= 0xff00) ||
1278      (Value >= 0x010000 && Value <= 0xff0000) ||
1279      (Value >= 0x01000000 && Value <= 0xff000000) ||
1280      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1281      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1282  }
1283
1284  bool isNEONi64splat() const {
1285    if (!isImm()) return false;
1286    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1287    // Must be a constant.
1288    if (!CE) return false;
1289    uint64_t Value = CE->getValue();
1290    // i64 value with each byte being either 0 or 0xff.
1291    for (unsigned i = 0; i < 8; ++i)
1292      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1293    return true;
1294  }
1295
1296  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1297    // Add as immediates when possible.  Null MCExpr = 0.
1298    if (Expr == 0)
1299      Inst.addOperand(MCOperand::CreateImm(0));
1300    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1301      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1302    else
1303      Inst.addOperand(MCOperand::CreateExpr(Expr));
1304  }
1305
1306  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1307    assert(N == 2 && "Invalid number of operands!");
1308    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1309    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1310    Inst.addOperand(MCOperand::CreateReg(RegNum));
1311  }
1312
1313  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1314    assert(N == 1 && "Invalid number of operands!");
1315    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1316  }
1317
1318  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1319    assert(N == 1 && "Invalid number of operands!");
1320    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1321  }
1322
1323  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1324    assert(N == 1 && "Invalid number of operands!");
1325    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1326  }
1327
1328  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1329    assert(N == 1 && "Invalid number of operands!");
1330    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1331  }
1332
1333  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1334    assert(N == 1 && "Invalid number of operands!");
1335    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1336  }
1337
1338  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1339    assert(N == 1 && "Invalid number of operands!");
1340    Inst.addOperand(MCOperand::CreateReg(getReg()));
1341  }
1342
1343  void addRegOperands(MCInst &Inst, unsigned N) const {
1344    assert(N == 1 && "Invalid number of operands!");
1345    Inst.addOperand(MCOperand::CreateReg(getReg()));
1346  }
1347
1348  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1349    assert(N == 3 && "Invalid number of operands!");
1350    assert(isRegShiftedReg() &&
1351           "addRegShiftedRegOperands() on non RegShiftedReg!");
1352    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1353    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1354    Inst.addOperand(MCOperand::CreateImm(
1355      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1356  }
1357
1358  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1359    assert(N == 2 && "Invalid number of operands!");
1360    assert(isRegShiftedImm() &&
1361           "addRegShiftedImmOperands() on non RegShiftedImm!");
1362    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1363    Inst.addOperand(MCOperand::CreateImm(
1364      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1365  }
1366
1367  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1368    assert(N == 1 && "Invalid number of operands!");
1369    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1370                                         ShifterImm.Imm));
1371  }
1372
1373  void addRegListOperands(MCInst &Inst, unsigned N) const {
1374    assert(N == 1 && "Invalid number of operands!");
1375    const SmallVectorImpl<unsigned> &RegList = getRegList();
1376    for (SmallVectorImpl<unsigned>::const_iterator
1377           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1378      Inst.addOperand(MCOperand::CreateReg(*I));
1379  }
1380
1381  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1382    addRegListOperands(Inst, N);
1383  }
1384
1385  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1386    addRegListOperands(Inst, N);
1387  }
1388
1389  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1390    assert(N == 1 && "Invalid number of operands!");
1391    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1392    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1393  }
1394
1395  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1396    assert(N == 1 && "Invalid number of operands!");
1397    // Munge the lsb/width into a bitfield mask.
1398    unsigned lsb = Bitfield.LSB;
1399    unsigned width = Bitfield.Width;
1400    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1401    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1402                      (32 - (lsb + width)));
1403    Inst.addOperand(MCOperand::CreateImm(Mask));
1404  }
1405
1406  void addImmOperands(MCInst &Inst, unsigned N) const {
1407    assert(N == 1 && "Invalid number of operands!");
1408    addExpr(Inst, getImm());
1409  }
1410
1411  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1412    assert(N == 1 && "Invalid number of operands!");
1413    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1414    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1415  }
1416
1417  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1418    assert(N == 1 && "Invalid number of operands!");
1419    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1420    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1421  }
1422
1423  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1424    assert(N == 1 && "Invalid number of operands!");
1425    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1426    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1427    Inst.addOperand(MCOperand::CreateImm(Val));
1428  }
1429
1430  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1431    assert(N == 1 && "Invalid number of operands!");
1432    // FIXME: We really want to scale the value here, but the LDRD/STRD
1433    // instruction don't encode operands that way yet.
1434    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1435    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1436  }
1437
1438  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1439    assert(N == 1 && "Invalid number of operands!");
1440    // The immediate is scaled by four in the encoding and is stored
1441    // in the MCInst as such. Lop off the low two bits here.
1442    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1443    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1444  }
1445
1446  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1447    assert(N == 1 && "Invalid number of operands!");
1448    // The immediate is scaled by four in the encoding and is stored
1449    // in the MCInst as such. Lop off the low two bits here.
1450    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1451    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1452  }
1453
1454  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1455    assert(N == 1 && "Invalid number of operands!");
1456    // The constant encodes as the immediate-1, and we store in the instruction
1457    // the bits as encoded, so subtract off one here.
1458    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1459    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1460  }
1461
1462  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1463    assert(N == 1 && "Invalid number of operands!");
1464    // The constant encodes as the immediate-1, and we store in the instruction
1465    // the bits as encoded, so subtract off one here.
1466    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1467    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1468  }
1469
1470  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1471    assert(N == 1 && "Invalid number of operands!");
1472    // The constant encodes as the immediate, except for 32, which encodes as
1473    // zero.
1474    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1475    unsigned Imm = CE->getValue();
1476    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1477  }
1478
1479  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1480    assert(N == 1 && "Invalid number of operands!");
1481    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1482    // the instruction as well.
1483    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1484    int Val = CE->getValue();
1485    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1486  }
1487
1488  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1489    assert(N == 1 && "Invalid number of operands!");
1490    // The operand is actually a t2_so_imm, but we have its bitwise
1491    // negation in the assembly source, so twiddle it here.
1492    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1493    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1494  }
1495
1496  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1497    assert(N == 1 && "Invalid number of operands!");
1498    // The operand is actually a t2_so_imm, but we have its
1499    // negation in the assembly source, so twiddle it here.
1500    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1501    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1502  }
1503
1504  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1505    assert(N == 1 && "Invalid number of operands!");
1506    // The operand is actually a so_imm, but we have its bitwise
1507    // negation in the assembly source, so twiddle it here.
1508    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1509    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1510  }
1511
1512  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1513    assert(N == 1 && "Invalid number of operands!");
1514    // The operand is actually a so_imm, but we have its
1515    // negation in the assembly source, so twiddle it here.
1516    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1517    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1518  }
1519
1520  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1521    assert(N == 1 && "Invalid number of operands!");
1522    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1523  }
1524
1525  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1526    assert(N == 1 && "Invalid number of operands!");
1527    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1528  }
1529
1530  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1531    assert(N == 1 && "Invalid number of operands!");
1532    int32_t Imm = Memory.OffsetImm->getValue();
1533    // FIXME: Handle #-0
1534    if (Imm == INT32_MIN) Imm = 0;
1535    Inst.addOperand(MCOperand::CreateImm(Imm));
1536  }
1537
1538  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1539    assert(N == 2 && "Invalid number of operands!");
1540    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1541    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1542  }
1543
1544  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1545    assert(N == 3 && "Invalid number of operands!");
1546    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1547    if (!Memory.OffsetRegNum) {
1548      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1549      // Special case for #-0
1550      if (Val == INT32_MIN) Val = 0;
1551      if (Val < 0) Val = -Val;
1552      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1553    } else {
1554      // For register offset, we encode the shift type and negation flag
1555      // here.
1556      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1557                              Memory.ShiftImm, Memory.ShiftType);
1558    }
1559    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1560    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1561    Inst.addOperand(MCOperand::CreateImm(Val));
1562  }
1563
1564  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1565    assert(N == 2 && "Invalid number of operands!");
1566    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1567    assert(CE && "non-constant AM2OffsetImm operand!");
1568    int32_t Val = CE->getValue();
1569    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1570    // Special case for #-0
1571    if (Val == INT32_MIN) Val = 0;
1572    if (Val < 0) Val = -Val;
1573    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1574    Inst.addOperand(MCOperand::CreateReg(0));
1575    Inst.addOperand(MCOperand::CreateImm(Val));
1576  }
1577
1578  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1579    assert(N == 3 && "Invalid number of operands!");
1580    // If we have an immediate that's not a constant, treat it as a label
1581    // reference needing a fixup. If it is a constant, it's something else
1582    // and we reject it.
1583    if (isImm()) {
1584      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1585      Inst.addOperand(MCOperand::CreateReg(0));
1586      Inst.addOperand(MCOperand::CreateImm(0));
1587      return;
1588    }
1589
1590    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1591    if (!Memory.OffsetRegNum) {
1592      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1593      // Special case for #-0
1594      if (Val == INT32_MIN) Val = 0;
1595      if (Val < 0) Val = -Val;
1596      Val = ARM_AM::getAM3Opc(AddSub, Val);
1597    } else {
1598      // For register offset, we encode the shift type and negation flag
1599      // here.
1600      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1601    }
1602    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1603    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1604    Inst.addOperand(MCOperand::CreateImm(Val));
1605  }
1606
1607  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1608    assert(N == 2 && "Invalid number of operands!");
1609    if (Kind == k_PostIndexRegister) {
1610      int32_t Val =
1611        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1612      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1613      Inst.addOperand(MCOperand::CreateImm(Val));
1614      return;
1615    }
1616
1617    // Constant offset.
1618    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1619    int32_t Val = CE->getValue();
1620    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1621    // Special case for #-0
1622    if (Val == INT32_MIN) Val = 0;
1623    if (Val < 0) Val = -Val;
1624    Val = ARM_AM::getAM3Opc(AddSub, Val);
1625    Inst.addOperand(MCOperand::CreateReg(0));
1626    Inst.addOperand(MCOperand::CreateImm(Val));
1627  }
1628
1629  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1630    assert(N == 2 && "Invalid number of operands!");
1631    // If we have an immediate that's not a constant, treat it as a label
1632    // reference needing a fixup. If it is a constant, it's something else
1633    // and we reject it.
1634    if (isImm()) {
1635      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1636      Inst.addOperand(MCOperand::CreateImm(0));
1637      return;
1638    }
1639
1640    // The lower two bits are always zero and as such are not encoded.
1641    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1642    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1643    // Special case for #-0
1644    if (Val == INT32_MIN) Val = 0;
1645    if (Val < 0) Val = -Val;
1646    Val = ARM_AM::getAM5Opc(AddSub, Val);
1647    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1648    Inst.addOperand(MCOperand::CreateImm(Val));
1649  }
1650
1651  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1652    assert(N == 2 && "Invalid number of operands!");
1653    // If we have an immediate that's not a constant, treat it as a label
1654    // reference needing a fixup. If it is a constant, it's something else
1655    // and we reject it.
1656    if (isImm()) {
1657      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1658      Inst.addOperand(MCOperand::CreateImm(0));
1659      return;
1660    }
1661
1662    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1663    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1664    Inst.addOperand(MCOperand::CreateImm(Val));
1665  }
1666
1667  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1668    assert(N == 2 && "Invalid number of operands!");
1669    // The lower two bits are always zero and as such are not encoded.
1670    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1671    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1672    Inst.addOperand(MCOperand::CreateImm(Val));
1673  }
1674
1675  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1676    assert(N == 2 && "Invalid number of operands!");
1677    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1678    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1679    Inst.addOperand(MCOperand::CreateImm(Val));
1680  }
1681
1682  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1683    addMemImm8OffsetOperands(Inst, N);
1684  }
1685
1686  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1687    addMemImm8OffsetOperands(Inst, N);
1688  }
1689
1690  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1691    assert(N == 2 && "Invalid number of operands!");
1692    // If this is an immediate, it's a label reference.
1693    if (isImm()) {
1694      addExpr(Inst, getImm());
1695      Inst.addOperand(MCOperand::CreateImm(0));
1696      return;
1697    }
1698
1699    // Otherwise, it's a normal memory reg+offset.
1700    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1701    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1702    Inst.addOperand(MCOperand::CreateImm(Val));
1703  }
1704
1705  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1706    assert(N == 2 && "Invalid number of operands!");
1707    // If this is an immediate, it's a label reference.
1708    if (isImm()) {
1709      addExpr(Inst, getImm());
1710      Inst.addOperand(MCOperand::CreateImm(0));
1711      return;
1712    }
1713
1714    // Otherwise, it's a normal memory reg+offset.
1715    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1716    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1717    Inst.addOperand(MCOperand::CreateImm(Val));
1718  }
1719
1720  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1721    assert(N == 2 && "Invalid number of operands!");
1722    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1723    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1724  }
1725
1726  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1727    assert(N == 2 && "Invalid number of operands!");
1728    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1729    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1730  }
1731
1732  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1733    assert(N == 3 && "Invalid number of operands!");
1734    unsigned Val =
1735      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1736                        Memory.ShiftImm, Memory.ShiftType);
1737    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1738    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1739    Inst.addOperand(MCOperand::CreateImm(Val));
1740  }
1741
1742  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1743    assert(N == 3 && "Invalid number of operands!");
1744    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1745    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1746    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1747  }
1748
1749  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1750    assert(N == 2 && "Invalid number of operands!");
1751    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1752    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1753  }
1754
1755  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1756    assert(N == 2 && "Invalid number of operands!");
1757    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1758    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1759    Inst.addOperand(MCOperand::CreateImm(Val));
1760  }
1761
1762  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1763    assert(N == 2 && "Invalid number of operands!");
1764    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1765    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1766    Inst.addOperand(MCOperand::CreateImm(Val));
1767  }
1768
1769  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1770    assert(N == 2 && "Invalid number of operands!");
1771    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1772    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1773    Inst.addOperand(MCOperand::CreateImm(Val));
1774  }
1775
1776  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1777    assert(N == 2 && "Invalid number of operands!");
1778    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1779    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1780    Inst.addOperand(MCOperand::CreateImm(Val));
1781  }
1782
1783  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1784    assert(N == 1 && "Invalid number of operands!");
1785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1786    assert(CE && "non-constant post-idx-imm8 operand!");
1787    int Imm = CE->getValue();
1788    bool isAdd = Imm >= 0;
1789    if (Imm == INT32_MIN) Imm = 0;
1790    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1791    Inst.addOperand(MCOperand::CreateImm(Imm));
1792  }
1793
1794  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1795    assert(N == 1 && "Invalid number of operands!");
1796    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1797    assert(CE && "non-constant post-idx-imm8s4 operand!");
1798    int Imm = CE->getValue();
1799    bool isAdd = Imm >= 0;
1800    if (Imm == INT32_MIN) Imm = 0;
1801    // Immediate is scaled by 4.
1802    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1803    Inst.addOperand(MCOperand::CreateImm(Imm));
1804  }
1805
1806  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1807    assert(N == 2 && "Invalid number of operands!");
1808    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1809    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1810  }
1811
1812  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1813    assert(N == 2 && "Invalid number of operands!");
1814    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1815    // The sign, shift type, and shift amount are encoded in a single operand
1816    // using the AM2 encoding helpers.
1817    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1818    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1819                                     PostIdxReg.ShiftTy);
1820    Inst.addOperand(MCOperand::CreateImm(Imm));
1821  }
1822
1823  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1824    assert(N == 1 && "Invalid number of operands!");
1825    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1826  }
1827
1828  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1829    assert(N == 1 && "Invalid number of operands!");
1830    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1831  }
1832
1833  void addVecListOperands(MCInst &Inst, unsigned N) const {
1834    assert(N == 1 && "Invalid number of operands!");
1835    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1836  }
1837
1838  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1839    assert(N == 2 && "Invalid number of operands!");
1840    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1841    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1842  }
1843
1844  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1845    assert(N == 1 && "Invalid number of operands!");
1846    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1847  }
1848
1849  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1850    assert(N == 1 && "Invalid number of operands!");
1851    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1852  }
1853
1854  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1855    assert(N == 1 && "Invalid number of operands!");
1856    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1857  }
1858
1859  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1860    assert(N == 1 && "Invalid number of operands!");
1861    // The immediate encodes the type of constant as well as the value.
1862    // Mask in that this is an i8 splat.
1863    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1864    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1865  }
1866
1867  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1868    assert(N == 1 && "Invalid number of operands!");
1869    // The immediate encodes the type of constant as well as the value.
1870    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1871    unsigned Value = CE->getValue();
1872    if (Value >= 256)
1873      Value = (Value >> 8) | 0xa00;
1874    else
1875      Value |= 0x800;
1876    Inst.addOperand(MCOperand::CreateImm(Value));
1877  }
1878
1879  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1880    assert(N == 1 && "Invalid number of operands!");
1881    // The immediate encodes the type of constant as well as the value.
1882    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1883    unsigned Value = CE->getValue();
1884    if (Value >= 256 && Value <= 0xff00)
1885      Value = (Value >> 8) | 0x200;
1886    else if (Value > 0xffff && Value <= 0xff0000)
1887      Value = (Value >> 16) | 0x400;
1888    else if (Value > 0xffffff)
1889      Value = (Value >> 24) | 0x600;
1890    Inst.addOperand(MCOperand::CreateImm(Value));
1891  }
1892
1893  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1894    assert(N == 1 && "Invalid number of operands!");
1895    // The immediate encodes the type of constant as well as the value.
1896    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1897    unsigned Value = CE->getValue();
1898    if (Value >= 256 && Value <= 0xffff)
1899      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1900    else if (Value > 0xffff && Value <= 0xffffff)
1901      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1902    else if (Value > 0xffffff)
1903      Value = (Value >> 24) | 0x600;
1904    Inst.addOperand(MCOperand::CreateImm(Value));
1905  }
1906
1907  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1908    assert(N == 1 && "Invalid number of operands!");
1909    // The immediate encodes the type of constant as well as the value.
1910    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1911    unsigned Value = ~CE->getValue();
1912    if (Value >= 256 && Value <= 0xffff)
1913      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1914    else if (Value > 0xffff && Value <= 0xffffff)
1915      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1916    else if (Value > 0xffffff)
1917      Value = (Value >> 24) | 0x600;
1918    Inst.addOperand(MCOperand::CreateImm(Value));
1919  }
1920
1921  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1922    assert(N == 1 && "Invalid number of operands!");
1923    // The immediate encodes the type of constant as well as the value.
1924    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1925    uint64_t Value = CE->getValue();
1926    unsigned Imm = 0;
1927    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1928      Imm |= (Value & 1) << i;
1929    }
1930    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1931  }
1932
1933  virtual void print(raw_ostream &OS) const;
1934
1935  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1936    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1937    Op->ITMask.Mask = Mask;
1938    Op->StartLoc = S;
1939    Op->EndLoc = S;
1940    return Op;
1941  }
1942
1943  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1944    ARMOperand *Op = new ARMOperand(k_CondCode);
1945    Op->CC.Val = CC;
1946    Op->StartLoc = S;
1947    Op->EndLoc = S;
1948    return Op;
1949  }
1950
1951  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1952    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1953    Op->Cop.Val = CopVal;
1954    Op->StartLoc = S;
1955    Op->EndLoc = S;
1956    return Op;
1957  }
1958
1959  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1960    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1961    Op->Cop.Val = CopVal;
1962    Op->StartLoc = S;
1963    Op->EndLoc = S;
1964    return Op;
1965  }
1966
1967  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1968    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1969    Op->Cop.Val = Val;
1970    Op->StartLoc = S;
1971    Op->EndLoc = E;
1972    return Op;
1973  }
1974
1975  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1976    ARMOperand *Op = new ARMOperand(k_CCOut);
1977    Op->Reg.RegNum = RegNum;
1978    Op->StartLoc = S;
1979    Op->EndLoc = S;
1980    return Op;
1981  }
1982
1983  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1984    ARMOperand *Op = new ARMOperand(k_Token);
1985    Op->Tok.Data = Str.data();
1986    Op->Tok.Length = Str.size();
1987    Op->StartLoc = S;
1988    Op->EndLoc = S;
1989    return Op;
1990  }
1991
1992  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1993    ARMOperand *Op = new ARMOperand(k_Register);
1994    Op->Reg.RegNum = RegNum;
1995    Op->StartLoc = S;
1996    Op->EndLoc = E;
1997    return Op;
1998  }
1999
2000  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2001                                           unsigned SrcReg,
2002                                           unsigned ShiftReg,
2003                                           unsigned ShiftImm,
2004                                           SMLoc S, SMLoc E) {
2005    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2006    Op->RegShiftedReg.ShiftTy = ShTy;
2007    Op->RegShiftedReg.SrcReg = SrcReg;
2008    Op->RegShiftedReg.ShiftReg = ShiftReg;
2009    Op->RegShiftedReg.ShiftImm = ShiftImm;
2010    Op->StartLoc = S;
2011    Op->EndLoc = E;
2012    return Op;
2013  }
2014
2015  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2016                                            unsigned SrcReg,
2017                                            unsigned ShiftImm,
2018                                            SMLoc S, SMLoc E) {
2019    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2020    Op->RegShiftedImm.ShiftTy = ShTy;
2021    Op->RegShiftedImm.SrcReg = SrcReg;
2022    Op->RegShiftedImm.ShiftImm = ShiftImm;
2023    Op->StartLoc = S;
2024    Op->EndLoc = E;
2025    return Op;
2026  }
2027
2028  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2029                                   SMLoc S, SMLoc E) {
2030    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2031    Op->ShifterImm.isASR = isASR;
2032    Op->ShifterImm.Imm = Imm;
2033    Op->StartLoc = S;
2034    Op->EndLoc = E;
2035    return Op;
2036  }
2037
2038  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2039    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2040    Op->RotImm.Imm = Imm;
2041    Op->StartLoc = S;
2042    Op->EndLoc = E;
2043    return Op;
2044  }
2045
2046  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2047                                    SMLoc S, SMLoc E) {
2048    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2049    Op->Bitfield.LSB = LSB;
2050    Op->Bitfield.Width = Width;
2051    Op->StartLoc = S;
2052    Op->EndLoc = E;
2053    return Op;
2054  }
2055
2056  static ARMOperand *
2057  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2058                SMLoc StartLoc, SMLoc EndLoc) {
2059    KindTy Kind = k_RegisterList;
2060
2061    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2062      Kind = k_DPRRegisterList;
2063    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2064             contains(Regs.front().first))
2065      Kind = k_SPRRegisterList;
2066
2067    ARMOperand *Op = new ARMOperand(Kind);
2068    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2069           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2070      Op->Registers.push_back(I->first);
2071    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2072    Op->StartLoc = StartLoc;
2073    Op->EndLoc = EndLoc;
2074    return Op;
2075  }
2076
2077  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2078                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2079    ARMOperand *Op = new ARMOperand(k_VectorList);
2080    Op->VectorList.RegNum = RegNum;
2081    Op->VectorList.Count = Count;
2082    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2083    Op->StartLoc = S;
2084    Op->EndLoc = E;
2085    return Op;
2086  }
2087
2088  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2089                                              bool isDoubleSpaced,
2090                                              SMLoc S, SMLoc E) {
2091    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2092    Op->VectorList.RegNum = RegNum;
2093    Op->VectorList.Count = Count;
2094    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2095    Op->StartLoc = S;
2096    Op->EndLoc = E;
2097    return Op;
2098  }
2099
2100  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2101                                             unsigned Index,
2102                                             bool isDoubleSpaced,
2103                                             SMLoc S, SMLoc E) {
2104    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2105    Op->VectorList.RegNum = RegNum;
2106    Op->VectorList.Count = Count;
2107    Op->VectorList.LaneIndex = Index;
2108    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2109    Op->StartLoc = S;
2110    Op->EndLoc = E;
2111    return Op;
2112  }
2113
2114  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2115                                       MCContext &Ctx) {
2116    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2117    Op->VectorIndex.Val = Idx;
2118    Op->StartLoc = S;
2119    Op->EndLoc = E;
2120    return Op;
2121  }
2122
2123  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2124    ARMOperand *Op = new ARMOperand(k_Immediate);
2125    Op->Imm.Val = Val;
2126    Op->StartLoc = S;
2127    Op->EndLoc = E;
2128    return Op;
2129  }
2130
2131  static ARMOperand *CreateMem(unsigned BaseRegNum,
2132                               const MCConstantExpr *OffsetImm,
2133                               unsigned OffsetRegNum,
2134                               ARM_AM::ShiftOpc ShiftType,
2135                               unsigned ShiftImm,
2136                               unsigned Alignment,
2137                               bool isNegative,
2138                               SMLoc S, SMLoc E) {
2139    ARMOperand *Op = new ARMOperand(k_Memory);
2140    Op->Memory.BaseRegNum = BaseRegNum;
2141    Op->Memory.OffsetImm = OffsetImm;
2142    Op->Memory.OffsetRegNum = OffsetRegNum;
2143    Op->Memory.ShiftType = ShiftType;
2144    Op->Memory.ShiftImm = ShiftImm;
2145    Op->Memory.Alignment = Alignment;
2146    Op->Memory.isNegative = isNegative;
2147    Op->StartLoc = S;
2148    Op->EndLoc = E;
2149    return Op;
2150  }
2151
2152  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2153                                      ARM_AM::ShiftOpc ShiftTy,
2154                                      unsigned ShiftImm,
2155                                      SMLoc S, SMLoc E) {
2156    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2157    Op->PostIdxReg.RegNum = RegNum;
2158    Op->PostIdxReg.isAdd = isAdd;
2159    Op->PostIdxReg.ShiftTy = ShiftTy;
2160    Op->PostIdxReg.ShiftImm = ShiftImm;
2161    Op->StartLoc = S;
2162    Op->EndLoc = E;
2163    return Op;
2164  }
2165
2166  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2167    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2168    Op->MBOpt.Val = Opt;
2169    Op->StartLoc = S;
2170    Op->EndLoc = S;
2171    return Op;
2172  }
2173
2174  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2175    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2176    Op->IFlags.Val = IFlags;
2177    Op->StartLoc = S;
2178    Op->EndLoc = S;
2179    return Op;
2180  }
2181
2182  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2183    ARMOperand *Op = new ARMOperand(k_MSRMask);
2184    Op->MMask.Val = MMask;
2185    Op->StartLoc = S;
2186    Op->EndLoc = S;
2187    return Op;
2188  }
2189};
2190
2191} // end anonymous namespace.
2192
2193void ARMOperand::print(raw_ostream &OS) const {
2194  switch (Kind) {
2195  case k_CondCode:
2196    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2197    break;
2198  case k_CCOut:
2199    OS << "<ccout " << getReg() << ">";
2200    break;
2201  case k_ITCondMask: {
2202    static const char *MaskStr[] = {
2203      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2204      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2205    };
2206    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2207    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2208    break;
2209  }
2210  case k_CoprocNum:
2211    OS << "<coprocessor number: " << getCoproc() << ">";
2212    break;
2213  case k_CoprocReg:
2214    OS << "<coprocessor register: " << getCoproc() << ">";
2215    break;
2216  case k_CoprocOption:
2217    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2218    break;
2219  case k_MSRMask:
2220    OS << "<mask: " << getMSRMask() << ">";
2221    break;
2222  case k_Immediate:
2223    getImm()->print(OS);
2224    break;
2225  case k_MemBarrierOpt:
2226    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2227    break;
2228  case k_Memory:
2229    OS << "<memory "
2230       << " base:" << Memory.BaseRegNum;
2231    OS << ">";
2232    break;
2233  case k_PostIndexRegister:
2234    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2235       << PostIdxReg.RegNum;
2236    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2237      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2238         << PostIdxReg.ShiftImm;
2239    OS << ">";
2240    break;
2241  case k_ProcIFlags: {
2242    OS << "<ARM_PROC::";
2243    unsigned IFlags = getProcIFlags();
2244    for (int i=2; i >= 0; --i)
2245      if (IFlags & (1 << i))
2246        OS << ARM_PROC::IFlagsToString(1 << i);
2247    OS << ">";
2248    break;
2249  }
2250  case k_Register:
2251    OS << "<register " << getReg() << ">";
2252    break;
2253  case k_ShifterImmediate:
2254    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2255       << " #" << ShifterImm.Imm << ">";
2256    break;
2257  case k_ShiftedRegister:
2258    OS << "<so_reg_reg "
2259       << RegShiftedReg.SrcReg << " "
2260       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2261       << " " << RegShiftedReg.ShiftReg << ">";
2262    break;
2263  case k_ShiftedImmediate:
2264    OS << "<so_reg_imm "
2265       << RegShiftedImm.SrcReg << " "
2266       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2267       << " #" << RegShiftedImm.ShiftImm << ">";
2268    break;
2269  case k_RotateImmediate:
2270    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2271    break;
2272  case k_BitfieldDescriptor:
2273    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2274       << ", width: " << Bitfield.Width << ">";
2275    break;
2276  case k_RegisterList:
2277  case k_DPRRegisterList:
2278  case k_SPRRegisterList: {
2279    OS << "<register_list ";
2280
2281    const SmallVectorImpl<unsigned> &RegList = getRegList();
2282    for (SmallVectorImpl<unsigned>::const_iterator
2283           I = RegList.begin(), E = RegList.end(); I != E; ) {
2284      OS << *I;
2285      if (++I < E) OS << ", ";
2286    }
2287
2288    OS << ">";
2289    break;
2290  }
2291  case k_VectorList:
2292    OS << "<vector_list " << VectorList.Count << " * "
2293       << VectorList.RegNum << ">";
2294    break;
2295  case k_VectorListAllLanes:
2296    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2297       << VectorList.RegNum << ">";
2298    break;
2299  case k_VectorListIndexed:
2300    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2301       << VectorList.Count << " * " << VectorList.RegNum << ">";
2302    break;
2303  case k_Token:
2304    OS << "'" << getToken() << "'";
2305    break;
2306  case k_VectorIndex:
2307    OS << "<vectorindex " << getVectorIndex() << ">";
2308    break;
2309  }
2310}
2311
2312/// @name Auto-generated Match Functions
2313/// {
2314
2315static unsigned MatchRegisterName(StringRef Name);
2316
2317/// }
2318
2319bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2320                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2321  StartLoc = Parser.getTok().getLoc();
2322  RegNo = tryParseRegister();
2323  EndLoc = Parser.getTok().getLoc();
2324
2325  return (RegNo == (unsigned)-1);
2326}
2327
2328/// Try to parse a register name.  The token must be an Identifier when called,
2329/// and if it is a register name the token is eaten and the register number is
2330/// returned.  Otherwise return -1.
2331///
2332int ARMAsmParser::tryParseRegister() {
2333  const AsmToken &Tok = Parser.getTok();
2334  if (Tok.isNot(AsmToken::Identifier)) return -1;
2335
2336  std::string lowerCase = Tok.getString().lower();
2337  unsigned RegNum = MatchRegisterName(lowerCase);
2338  if (!RegNum) {
2339    RegNum = StringSwitch<unsigned>(lowerCase)
2340      .Case("r13", ARM::SP)
2341      .Case("r14", ARM::LR)
2342      .Case("r15", ARM::PC)
2343      .Case("ip", ARM::R12)
2344      // Additional register name aliases for 'gas' compatibility.
2345      .Case("a1", ARM::R0)
2346      .Case("a2", ARM::R1)
2347      .Case("a3", ARM::R2)
2348      .Case("a4", ARM::R3)
2349      .Case("v1", ARM::R4)
2350      .Case("v2", ARM::R5)
2351      .Case("v3", ARM::R6)
2352      .Case("v4", ARM::R7)
2353      .Case("v5", ARM::R8)
2354      .Case("v6", ARM::R9)
2355      .Case("v7", ARM::R10)
2356      .Case("v8", ARM::R11)
2357      .Case("sb", ARM::R9)
2358      .Case("sl", ARM::R10)
2359      .Case("fp", ARM::R11)
2360      .Default(0);
2361  }
2362  if (!RegNum) {
2363    // Check for aliases registered via .req. Canonicalize to lower case.
2364    // That's more consistent since register names are case insensitive, and
2365    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2366    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2367    // If no match, return failure.
2368    if (Entry == RegisterReqs.end())
2369      return -1;
2370    Parser.Lex(); // Eat identifier token.
2371    return Entry->getValue();
2372  }
2373
2374  Parser.Lex(); // Eat identifier token.
2375
2376  return RegNum;
2377}
2378
2379// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2380// If a recoverable error occurs, return 1. If an irrecoverable error
2381// occurs, return -1. An irrecoverable error is one where tokens have been
2382// consumed in the process of trying to parse the shifter (i.e., when it is
2383// indeed a shifter operand, but malformed).
2384int ARMAsmParser::tryParseShiftRegister(
2385                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2386  SMLoc S = Parser.getTok().getLoc();
2387  const AsmToken &Tok = Parser.getTok();
2388  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2389
2390  std::string lowerCase = Tok.getString().lower();
2391  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2392      .Case("asl", ARM_AM::lsl)
2393      .Case("lsl", ARM_AM::lsl)
2394      .Case("lsr", ARM_AM::lsr)
2395      .Case("asr", ARM_AM::asr)
2396      .Case("ror", ARM_AM::ror)
2397      .Case("rrx", ARM_AM::rrx)
2398      .Default(ARM_AM::no_shift);
2399
2400  if (ShiftTy == ARM_AM::no_shift)
2401    return 1;
2402
2403  Parser.Lex(); // Eat the operator.
2404
2405  // The source register for the shift has already been added to the
2406  // operand list, so we need to pop it off and combine it into the shifted
2407  // register operand instead.
2408  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2409  if (!PrevOp->isReg())
2410    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2411  int SrcReg = PrevOp->getReg();
2412  int64_t Imm = 0;
2413  int ShiftReg = 0;
2414  if (ShiftTy == ARM_AM::rrx) {
2415    // RRX Doesn't have an explicit shift amount. The encoder expects
2416    // the shift register to be the same as the source register. Seems odd,
2417    // but OK.
2418    ShiftReg = SrcReg;
2419  } else {
2420    // Figure out if this is shifted by a constant or a register (for non-RRX).
2421    if (Parser.getTok().is(AsmToken::Hash) ||
2422        Parser.getTok().is(AsmToken::Dollar)) {
2423      Parser.Lex(); // Eat hash.
2424      SMLoc ImmLoc = Parser.getTok().getLoc();
2425      const MCExpr *ShiftExpr = 0;
2426      if (getParser().ParseExpression(ShiftExpr)) {
2427        Error(ImmLoc, "invalid immediate shift value");
2428        return -1;
2429      }
2430      // The expression must be evaluatable as an immediate.
2431      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2432      if (!CE) {
2433        Error(ImmLoc, "invalid immediate shift value");
2434        return -1;
2435      }
2436      // Range check the immediate.
2437      // lsl, ror: 0 <= imm <= 31
2438      // lsr, asr: 0 <= imm <= 32
2439      Imm = CE->getValue();
2440      if (Imm < 0 ||
2441          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2442          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2443        Error(ImmLoc, "immediate shift value out of range");
2444        return -1;
2445      }
2446      // shift by zero is a nop. Always send it through as lsl.
2447      // ('as' compatibility)
2448      if (Imm == 0)
2449        ShiftTy = ARM_AM::lsl;
2450    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2451      ShiftReg = tryParseRegister();
2452      SMLoc L = Parser.getTok().getLoc();
2453      if (ShiftReg == -1) {
2454        Error (L, "expected immediate or register in shift operand");
2455        return -1;
2456      }
2457    } else {
2458      Error (Parser.getTok().getLoc(),
2459                    "expected immediate or register in shift operand");
2460      return -1;
2461    }
2462  }
2463
2464  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2465    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2466                                                         ShiftReg, Imm,
2467                                               S, Parser.getTok().getLoc()));
2468  else
2469    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2470                                               S, Parser.getTok().getLoc()));
2471
2472  return 0;
2473}
2474
2475
2476/// Try to parse a register name.  The token must be an Identifier when called.
2477/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2478/// if there is a "writeback". 'true' if it's not a register.
2479///
2480/// TODO this is likely to change to allow different register types and or to
2481/// parse for a specific register type.
2482bool ARMAsmParser::
2483tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2484  SMLoc S = Parser.getTok().getLoc();
2485  int RegNo = tryParseRegister();
2486  if (RegNo == -1)
2487    return true;
2488
2489  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2490
2491  const AsmToken &ExclaimTok = Parser.getTok();
2492  if (ExclaimTok.is(AsmToken::Exclaim)) {
2493    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2494                                               ExclaimTok.getLoc()));
2495    Parser.Lex(); // Eat exclaim token
2496    return false;
2497  }
2498
2499  // Also check for an index operand. This is only legal for vector registers,
2500  // but that'll get caught OK in operand matching, so we don't need to
2501  // explicitly filter everything else out here.
2502  if (Parser.getTok().is(AsmToken::LBrac)) {
2503    SMLoc SIdx = Parser.getTok().getLoc();
2504    Parser.Lex(); // Eat left bracket token.
2505
2506    const MCExpr *ImmVal;
2507    if (getParser().ParseExpression(ImmVal))
2508      return MatchOperand_ParseFail;
2509    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2510    if (!MCE) {
2511      TokError("immediate value expected for vector index");
2512      return MatchOperand_ParseFail;
2513    }
2514
2515    SMLoc E = Parser.getTok().getLoc();
2516    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2517      Error(E, "']' expected");
2518      return MatchOperand_ParseFail;
2519    }
2520
2521    Parser.Lex(); // Eat right bracket token.
2522
2523    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2524                                                     SIdx, E,
2525                                                     getContext()));
2526  }
2527
2528  return false;
2529}
2530
2531/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2532/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2533/// "c5", ...
2534static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2535  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2536  // but efficient.
2537  switch (Name.size()) {
2538  default: return -1;
2539  case 2:
2540    if (Name[0] != CoprocOp)
2541      return -1;
2542    switch (Name[1]) {
2543    default:  return -1;
2544    case '0': return 0;
2545    case '1': return 1;
2546    case '2': return 2;
2547    case '3': return 3;
2548    case '4': return 4;
2549    case '5': return 5;
2550    case '6': return 6;
2551    case '7': return 7;
2552    case '8': return 8;
2553    case '9': return 9;
2554    }
2555  case 3:
2556    if (Name[0] != CoprocOp || Name[1] != '1')
2557      return -1;
2558    switch (Name[2]) {
2559    default:  return -1;
2560    case '0': return 10;
2561    case '1': return 11;
2562    case '2': return 12;
2563    case '3': return 13;
2564    case '4': return 14;
2565    case '5': return 15;
2566    }
2567  }
2568}
2569
2570/// parseITCondCode - Try to parse a condition code for an IT instruction.
2571ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2572parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2573  SMLoc S = Parser.getTok().getLoc();
2574  const AsmToken &Tok = Parser.getTok();
2575  if (!Tok.is(AsmToken::Identifier))
2576    return MatchOperand_NoMatch;
2577  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2578    .Case("eq", ARMCC::EQ)
2579    .Case("ne", ARMCC::NE)
2580    .Case("hs", ARMCC::HS)
2581    .Case("cs", ARMCC::HS)
2582    .Case("lo", ARMCC::LO)
2583    .Case("cc", ARMCC::LO)
2584    .Case("mi", ARMCC::MI)
2585    .Case("pl", ARMCC::PL)
2586    .Case("vs", ARMCC::VS)
2587    .Case("vc", ARMCC::VC)
2588    .Case("hi", ARMCC::HI)
2589    .Case("ls", ARMCC::LS)
2590    .Case("ge", ARMCC::GE)
2591    .Case("lt", ARMCC::LT)
2592    .Case("gt", ARMCC::GT)
2593    .Case("le", ARMCC::LE)
2594    .Case("al", ARMCC::AL)
2595    .Default(~0U);
2596  if (CC == ~0U)
2597    return MatchOperand_NoMatch;
2598  Parser.Lex(); // Eat the token.
2599
2600  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2601
2602  return MatchOperand_Success;
2603}
2604
2605/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2606/// token must be an Identifier when called, and if it is a coprocessor
2607/// number, the token is eaten and the operand is added to the operand list.
2608ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2609parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2610  SMLoc S = Parser.getTok().getLoc();
2611  const AsmToken &Tok = Parser.getTok();
2612  if (Tok.isNot(AsmToken::Identifier))
2613    return MatchOperand_NoMatch;
2614
2615  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2616  if (Num == -1)
2617    return MatchOperand_NoMatch;
2618
2619  Parser.Lex(); // Eat identifier token.
2620  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2621  return MatchOperand_Success;
2622}
2623
2624/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2625/// token must be an Identifier when called, and if it is a coprocessor
2626/// number, the token is eaten and the operand is added to the operand list.
2627ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2628parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2629  SMLoc S = Parser.getTok().getLoc();
2630  const AsmToken &Tok = Parser.getTok();
2631  if (Tok.isNot(AsmToken::Identifier))
2632    return MatchOperand_NoMatch;
2633
2634  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2635  if (Reg == -1)
2636    return MatchOperand_NoMatch;
2637
2638  Parser.Lex(); // Eat identifier token.
2639  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2640  return MatchOperand_Success;
2641}
2642
2643/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2644/// coproc_option : '{' imm0_255 '}'
2645ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2646parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2647  SMLoc S = Parser.getTok().getLoc();
2648
2649  // If this isn't a '{', this isn't a coprocessor immediate operand.
2650  if (Parser.getTok().isNot(AsmToken::LCurly))
2651    return MatchOperand_NoMatch;
2652  Parser.Lex(); // Eat the '{'
2653
2654  const MCExpr *Expr;
2655  SMLoc Loc = Parser.getTok().getLoc();
2656  if (getParser().ParseExpression(Expr)) {
2657    Error(Loc, "illegal expression");
2658    return MatchOperand_ParseFail;
2659  }
2660  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2661  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2662    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2663    return MatchOperand_ParseFail;
2664  }
2665  int Val = CE->getValue();
2666
2667  // Check for and consume the closing '}'
2668  if (Parser.getTok().isNot(AsmToken::RCurly))
2669    return MatchOperand_ParseFail;
2670  SMLoc E = Parser.getTok().getLoc();
2671  Parser.Lex(); // Eat the '}'
2672
2673  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2674  return MatchOperand_Success;
2675}
2676
2677// For register list parsing, we need to map from raw GPR register numbering
2678// to the enumeration values. The enumeration values aren't sorted by
2679// register number due to our using "sp", "lr" and "pc" as canonical names.
2680static unsigned getNextRegister(unsigned Reg) {
2681  // If this is a GPR, we need to do it manually, otherwise we can rely
2682  // on the sort ordering of the enumeration since the other reg-classes
2683  // are sane.
2684  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2685    return Reg + 1;
2686  switch(Reg) {
2687  default: assert(0 && "Invalid GPR number!");
2688  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2689  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2690  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2691  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2692  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2693  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2694  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2695  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2696  }
2697}
2698
2699// Return the low-subreg of a given Q register.
2700static unsigned getDRegFromQReg(unsigned QReg) {
2701  switch (QReg) {
2702  default: llvm_unreachable("expected a Q register!");
2703  case ARM::Q0:  return ARM::D0;
2704  case ARM::Q1:  return ARM::D2;
2705  case ARM::Q2:  return ARM::D4;
2706  case ARM::Q3:  return ARM::D6;
2707  case ARM::Q4:  return ARM::D8;
2708  case ARM::Q5:  return ARM::D10;
2709  case ARM::Q6:  return ARM::D12;
2710  case ARM::Q7:  return ARM::D14;
2711  case ARM::Q8:  return ARM::D16;
2712  case ARM::Q9:  return ARM::D18;
2713  case ARM::Q10: return ARM::D20;
2714  case ARM::Q11: return ARM::D22;
2715  case ARM::Q12: return ARM::D24;
2716  case ARM::Q13: return ARM::D26;
2717  case ARM::Q14: return ARM::D28;
2718  case ARM::Q15: return ARM::D30;
2719  }
2720}
2721
2722/// Parse a register list.
2723bool ARMAsmParser::
2724parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2725  assert(Parser.getTok().is(AsmToken::LCurly) &&
2726         "Token is not a Left Curly Brace");
2727  SMLoc S = Parser.getTok().getLoc();
2728  Parser.Lex(); // Eat '{' token.
2729  SMLoc RegLoc = Parser.getTok().getLoc();
2730
2731  // Check the first register in the list to see what register class
2732  // this is a list of.
2733  int Reg = tryParseRegister();
2734  if (Reg == -1)
2735    return Error(RegLoc, "register expected");
2736
2737  // The reglist instructions have at most 16 registers, so reserve
2738  // space for that many.
2739  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2740
2741  // Allow Q regs and just interpret them as the two D sub-registers.
2742  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2743    Reg = getDRegFromQReg(Reg);
2744    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2745    ++Reg;
2746  }
2747  const MCRegisterClass *RC;
2748  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2749    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2750  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2751    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2752  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2753    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2754  else
2755    return Error(RegLoc, "invalid register in register list");
2756
2757  // Store the register.
2758  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2759
2760  // This starts immediately after the first register token in the list,
2761  // so we can see either a comma or a minus (range separator) as a legal
2762  // next token.
2763  while (Parser.getTok().is(AsmToken::Comma) ||
2764         Parser.getTok().is(AsmToken::Minus)) {
2765    if (Parser.getTok().is(AsmToken::Minus)) {
2766      Parser.Lex(); // Eat the minus.
2767      SMLoc EndLoc = Parser.getTok().getLoc();
2768      int EndReg = tryParseRegister();
2769      if (EndReg == -1)
2770        return Error(EndLoc, "register expected");
2771      // Allow Q regs and just interpret them as the two D sub-registers.
2772      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2773        EndReg = getDRegFromQReg(EndReg) + 1;
2774      // If the register is the same as the start reg, there's nothing
2775      // more to do.
2776      if (Reg == EndReg)
2777        continue;
2778      // The register must be in the same register class as the first.
2779      if (!RC->contains(EndReg))
2780        return Error(EndLoc, "invalid register in register list");
2781      // Ranges must go from low to high.
2782      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2783        return Error(EndLoc, "bad range in register list");
2784
2785      // Add all the registers in the range to the register list.
2786      while (Reg != EndReg) {
2787        Reg = getNextRegister(Reg);
2788        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2789      }
2790      continue;
2791    }
2792    Parser.Lex(); // Eat the comma.
2793    RegLoc = Parser.getTok().getLoc();
2794    int OldReg = Reg;
2795    const AsmToken RegTok = Parser.getTok();
2796    Reg = tryParseRegister();
2797    if (Reg == -1)
2798      return Error(RegLoc, "register expected");
2799    // Allow Q regs and just interpret them as the two D sub-registers.
2800    bool isQReg = false;
2801    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2802      Reg = getDRegFromQReg(Reg);
2803      isQReg = true;
2804    }
2805    // The register must be in the same register class as the first.
2806    if (!RC->contains(Reg))
2807      return Error(RegLoc, "invalid register in register list");
2808    // List must be monotonically increasing.
2809    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2810      return Error(RegLoc, "register list not in ascending order");
2811    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2812      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2813              ") in register list");
2814      continue;
2815    }
2816    // VFP register lists must also be contiguous.
2817    // It's OK to use the enumeration values directly here rather, as the
2818    // VFP register classes have the enum sorted properly.
2819    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2820        Reg != OldReg + 1)
2821      return Error(RegLoc, "non-contiguous register range");
2822    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2823    if (isQReg)
2824      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2825  }
2826
2827  SMLoc E = Parser.getTok().getLoc();
2828  if (Parser.getTok().isNot(AsmToken::RCurly))
2829    return Error(E, "'}' expected");
2830  Parser.Lex(); // Eat '}' token.
2831
2832  // Push the register list operand.
2833  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2834
2835  // The ARM system instruction variants for LDM/STM have a '^' token here.
2836  if (Parser.getTok().is(AsmToken::Caret)) {
2837    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2838    Parser.Lex(); // Eat '^' token.
2839  }
2840
2841  return false;
2842}
2843
2844// Helper function to parse the lane index for vector lists.
2845ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2846parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2847  Index = 0; // Always return a defined index value.
2848  if (Parser.getTok().is(AsmToken::LBrac)) {
2849    Parser.Lex(); // Eat the '['.
2850    if (Parser.getTok().is(AsmToken::RBrac)) {
2851      // "Dn[]" is the 'all lanes' syntax.
2852      LaneKind = AllLanes;
2853      Parser.Lex(); // Eat the ']'.
2854      return MatchOperand_Success;
2855    }
2856    const MCExpr *LaneIndex;
2857    SMLoc Loc = Parser.getTok().getLoc();
2858    if (getParser().ParseExpression(LaneIndex)) {
2859      Error(Loc, "illegal expression");
2860      return MatchOperand_ParseFail;
2861    }
2862    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2863    if (!CE) {
2864      Error(Loc, "lane index must be empty or an integer");
2865      return MatchOperand_ParseFail;
2866    }
2867    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2868      Error(Parser.getTok().getLoc(), "']' expected");
2869      return MatchOperand_ParseFail;
2870    }
2871    Parser.Lex(); // Eat the ']'.
2872    int64_t Val = CE->getValue();
2873
2874    // FIXME: Make this range check context sensitive for .8, .16, .32.
2875    if (Val < 0 || Val > 7) {
2876      Error(Parser.getTok().getLoc(), "lane index out of range");
2877      return MatchOperand_ParseFail;
2878    }
2879    Index = Val;
2880    LaneKind = IndexedLane;
2881    return MatchOperand_Success;
2882  }
2883  LaneKind = NoLanes;
2884  return MatchOperand_Success;
2885}
2886
2887// parse a vector register list
2888ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2889parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2890  VectorLaneTy LaneKind;
2891  unsigned LaneIndex;
2892  SMLoc S = Parser.getTok().getLoc();
2893  // As an extension (to match gas), support a plain D register or Q register
2894  // (without encosing curly braces) as a single or double entry list,
2895  // respectively.
2896  if (Parser.getTok().is(AsmToken::Identifier)) {
2897    int Reg = tryParseRegister();
2898    if (Reg == -1)
2899      return MatchOperand_NoMatch;
2900    SMLoc E = Parser.getTok().getLoc();
2901    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2902      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2903      if (Res != MatchOperand_Success)
2904        return Res;
2905      switch (LaneKind) {
2906      case NoLanes:
2907        E = Parser.getTok().getLoc();
2908        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2909        break;
2910      case AllLanes:
2911        E = Parser.getTok().getLoc();
2912        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2913                                                                S, E));
2914        break;
2915      case IndexedLane:
2916        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2917                                                               LaneIndex,
2918                                                               false, S, E));
2919        break;
2920      }
2921      return MatchOperand_Success;
2922    }
2923    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2924      Reg = getDRegFromQReg(Reg);
2925      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2926      if (Res != MatchOperand_Success)
2927        return Res;
2928      switch (LaneKind) {
2929      case NoLanes:
2930        E = Parser.getTok().getLoc();
2931        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2932        break;
2933      case AllLanes:
2934        E = Parser.getTok().getLoc();
2935        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
2936                                                                S, E));
2937        break;
2938      case IndexedLane:
2939        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2940                                                               LaneIndex,
2941                                                               false, S, E));
2942        break;
2943      }
2944      return MatchOperand_Success;
2945    }
2946    Error(S, "vector register expected");
2947    return MatchOperand_ParseFail;
2948  }
2949
2950  if (Parser.getTok().isNot(AsmToken::LCurly))
2951    return MatchOperand_NoMatch;
2952
2953  Parser.Lex(); // Eat '{' token.
2954  SMLoc RegLoc = Parser.getTok().getLoc();
2955
2956  int Reg = tryParseRegister();
2957  if (Reg == -1) {
2958    Error(RegLoc, "register expected");
2959    return MatchOperand_ParseFail;
2960  }
2961  unsigned Count = 1;
2962  int Spacing = 0;
2963  unsigned FirstReg = Reg;
2964  // The list is of D registers, but we also allow Q regs and just interpret
2965  // them as the two D sub-registers.
2966  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2967    FirstReg = Reg = getDRegFromQReg(Reg);
2968    Spacing = 1; // double-spacing requires explicit D registers, otherwise
2969                 // it's ambiguous with four-register single spaced.
2970    ++Reg;
2971    ++Count;
2972  }
2973  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2974    return MatchOperand_ParseFail;
2975
2976  while (Parser.getTok().is(AsmToken::Comma) ||
2977         Parser.getTok().is(AsmToken::Minus)) {
2978    if (Parser.getTok().is(AsmToken::Minus)) {
2979      if (!Spacing)
2980        Spacing = 1; // Register range implies a single spaced list.
2981      else if (Spacing == 2) {
2982        Error(Parser.getTok().getLoc(),
2983              "sequential registers in double spaced list");
2984        return MatchOperand_ParseFail;
2985      }
2986      Parser.Lex(); // Eat the minus.
2987      SMLoc EndLoc = Parser.getTok().getLoc();
2988      int EndReg = tryParseRegister();
2989      if (EndReg == -1) {
2990        Error(EndLoc, "register expected");
2991        return MatchOperand_ParseFail;
2992      }
2993      // Allow Q regs and just interpret them as the two D sub-registers.
2994      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2995        EndReg = getDRegFromQReg(EndReg) + 1;
2996      // If the register is the same as the start reg, there's nothing
2997      // more to do.
2998      if (Reg == EndReg)
2999        continue;
3000      // The register must be in the same register class as the first.
3001      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3002        Error(EndLoc, "invalid register in register list");
3003        return MatchOperand_ParseFail;
3004      }
3005      // Ranges must go from low to high.
3006      if (Reg > EndReg) {
3007        Error(EndLoc, "bad range in register list");
3008        return MatchOperand_ParseFail;
3009      }
3010      // Parse the lane specifier if present.
3011      VectorLaneTy NextLaneKind;
3012      unsigned NextLaneIndex;
3013      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3014        return MatchOperand_ParseFail;
3015      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3016        Error(EndLoc, "mismatched lane index in register list");
3017        return MatchOperand_ParseFail;
3018      }
3019      EndLoc = Parser.getTok().getLoc();
3020
3021      // Add all the registers in the range to the register list.
3022      Count += EndReg - Reg;
3023      Reg = EndReg;
3024      continue;
3025    }
3026    Parser.Lex(); // Eat the comma.
3027    RegLoc = Parser.getTok().getLoc();
3028    int OldReg = Reg;
3029    Reg = tryParseRegister();
3030    if (Reg == -1) {
3031      Error(RegLoc, "register expected");
3032      return MatchOperand_ParseFail;
3033    }
3034    // vector register lists must be contiguous.
3035    // It's OK to use the enumeration values directly here rather, as the
3036    // VFP register classes have the enum sorted properly.
3037    //
3038    // The list is of D registers, but we also allow Q regs and just interpret
3039    // them as the two D sub-registers.
3040    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3041      if (!Spacing)
3042        Spacing = 1; // Register range implies a single spaced list.
3043      else if (Spacing == 2) {
3044        Error(RegLoc,
3045              "invalid register in double-spaced list (must be 'D' register')");
3046        return MatchOperand_ParseFail;
3047      }
3048      Reg = getDRegFromQReg(Reg);
3049      if (Reg != OldReg + 1) {
3050        Error(RegLoc, "non-contiguous register range");
3051        return MatchOperand_ParseFail;
3052      }
3053      ++Reg;
3054      Count += 2;
3055      // Parse the lane specifier if present.
3056      VectorLaneTy NextLaneKind;
3057      unsigned NextLaneIndex;
3058      SMLoc EndLoc = Parser.getTok().getLoc();
3059      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3060        return MatchOperand_ParseFail;
3061      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3062        Error(EndLoc, "mismatched lane index in register list");
3063        return MatchOperand_ParseFail;
3064      }
3065      continue;
3066    }
3067    // Normal D register.
3068    // Figure out the register spacing (single or double) of the list if
3069    // we don't know it already.
3070    if (!Spacing)
3071      Spacing = 1 + (Reg == OldReg + 2);
3072
3073    // Just check that it's contiguous and keep going.
3074    if (Reg != OldReg + Spacing) {
3075      Error(RegLoc, "non-contiguous register range");
3076      return MatchOperand_ParseFail;
3077    }
3078    ++Count;
3079    // Parse the lane specifier if present.
3080    VectorLaneTy NextLaneKind;
3081    unsigned NextLaneIndex;
3082    SMLoc EndLoc = Parser.getTok().getLoc();
3083    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3084      return MatchOperand_ParseFail;
3085    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3086      Error(EndLoc, "mismatched lane index in register list");
3087      return MatchOperand_ParseFail;
3088    }
3089  }
3090
3091  SMLoc E = Parser.getTok().getLoc();
3092  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3093    Error(E, "'}' expected");
3094    return MatchOperand_ParseFail;
3095  }
3096  Parser.Lex(); // Eat '}' token.
3097
3098  switch (LaneKind) {
3099  case NoLanes:
3100    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3101                                                    (Spacing == 2), S, E));
3102    break;
3103  case AllLanes:
3104    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3105                                                            (Spacing == 2),
3106                                                            S, E));
3107    break;
3108  case IndexedLane:
3109    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3110                                                           LaneIndex,
3111                                                           (Spacing == 2),
3112                                                           S, E));
3113    break;
3114  }
3115  return MatchOperand_Success;
3116}
3117
3118/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3119ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3120parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3121  SMLoc S = Parser.getTok().getLoc();
3122  const AsmToken &Tok = Parser.getTok();
3123  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3124  StringRef OptStr = Tok.getString();
3125
3126  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3127    .Case("sy",    ARM_MB::SY)
3128    .Case("st",    ARM_MB::ST)
3129    .Case("sh",    ARM_MB::ISH)
3130    .Case("ish",   ARM_MB::ISH)
3131    .Case("shst",  ARM_MB::ISHST)
3132    .Case("ishst", ARM_MB::ISHST)
3133    .Case("nsh",   ARM_MB::NSH)
3134    .Case("un",    ARM_MB::NSH)
3135    .Case("nshst", ARM_MB::NSHST)
3136    .Case("unst",  ARM_MB::NSHST)
3137    .Case("osh",   ARM_MB::OSH)
3138    .Case("oshst", ARM_MB::OSHST)
3139    .Default(~0U);
3140
3141  if (Opt == ~0U)
3142    return MatchOperand_NoMatch;
3143
3144  Parser.Lex(); // Eat identifier token.
3145  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3146  return MatchOperand_Success;
3147}
3148
3149/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3150ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3151parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3152  SMLoc S = Parser.getTok().getLoc();
3153  const AsmToken &Tok = Parser.getTok();
3154  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3155  StringRef IFlagsStr = Tok.getString();
3156
3157  // An iflags string of "none" is interpreted to mean that none of the AIF
3158  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3159  unsigned IFlags = 0;
3160  if (IFlagsStr != "none") {
3161        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3162      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3163        .Case("a", ARM_PROC::A)
3164        .Case("i", ARM_PROC::I)
3165        .Case("f", ARM_PROC::F)
3166        .Default(~0U);
3167
3168      // If some specific iflag is already set, it means that some letter is
3169      // present more than once, this is not acceptable.
3170      if (Flag == ~0U || (IFlags & Flag))
3171        return MatchOperand_NoMatch;
3172
3173      IFlags |= Flag;
3174    }
3175  }
3176
3177  Parser.Lex(); // Eat identifier token.
3178  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3179  return MatchOperand_Success;
3180}
3181
3182/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3183ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3184parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3185  SMLoc S = Parser.getTok().getLoc();
3186  const AsmToken &Tok = Parser.getTok();
3187  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3188  StringRef Mask = Tok.getString();
3189
3190  if (isMClass()) {
3191    // See ARMv6-M 10.1.1
3192    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3193      .Case("apsr", 0)
3194      .Case("iapsr", 1)
3195      .Case("eapsr", 2)
3196      .Case("xpsr", 3)
3197      .Case("ipsr", 5)
3198      .Case("epsr", 6)
3199      .Case("iepsr", 7)
3200      .Case("msp", 8)
3201      .Case("psp", 9)
3202      .Case("primask", 16)
3203      .Case("basepri", 17)
3204      .Case("basepri_max", 18)
3205      .Case("faultmask", 19)
3206      .Case("control", 20)
3207      .Default(~0U);
3208
3209    if (FlagsVal == ~0U)
3210      return MatchOperand_NoMatch;
3211
3212    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3213      // basepri, basepri_max and faultmask only valid for V7m.
3214      return MatchOperand_NoMatch;
3215
3216    Parser.Lex(); // Eat identifier token.
3217    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3218    return MatchOperand_Success;
3219  }
3220
3221  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3222  size_t Start = 0, Next = Mask.find('_');
3223  StringRef Flags = "";
3224  std::string SpecReg = Mask.slice(Start, Next).lower();
3225  if (Next != StringRef::npos)
3226    Flags = Mask.slice(Next+1, Mask.size());
3227
3228  // FlagsVal contains the complete mask:
3229  // 3-0: Mask
3230  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3231  unsigned FlagsVal = 0;
3232
3233  if (SpecReg == "apsr") {
3234    FlagsVal = StringSwitch<unsigned>(Flags)
3235    .Case("nzcvq",  0x8) // same as CPSR_f
3236    .Case("g",      0x4) // same as CPSR_s
3237    .Case("nzcvqg", 0xc) // same as CPSR_fs
3238    .Default(~0U);
3239
3240    if (FlagsVal == ~0U) {
3241      if (!Flags.empty())
3242        return MatchOperand_NoMatch;
3243      else
3244        FlagsVal = 8; // No flag
3245    }
3246  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3247    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3248      Flags = "fc";
3249    for (int i = 0, e = Flags.size(); i != e; ++i) {
3250      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3251      .Case("c", 1)
3252      .Case("x", 2)
3253      .Case("s", 4)
3254      .Case("f", 8)
3255      .Default(~0U);
3256
3257      // If some specific flag is already set, it means that some letter is
3258      // present more than once, this is not acceptable.
3259      if (FlagsVal == ~0U || (FlagsVal & Flag))
3260        return MatchOperand_NoMatch;
3261      FlagsVal |= Flag;
3262    }
3263  } else // No match for special register.
3264    return MatchOperand_NoMatch;
3265
3266  // Special register without flags is NOT equivalent to "fc" flags.
3267  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3268  // two lines would enable gas compatibility at the expense of breaking
3269  // round-tripping.
3270  //
3271  // if (!FlagsVal)
3272  //  FlagsVal = 0x9;
3273
3274  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3275  if (SpecReg == "spsr")
3276    FlagsVal |= 16;
3277
3278  Parser.Lex(); // Eat identifier token.
3279  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3280  return MatchOperand_Success;
3281}
3282
3283ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3284parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3285            int Low, int High) {
3286  const AsmToken &Tok = Parser.getTok();
3287  if (Tok.isNot(AsmToken::Identifier)) {
3288    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3289    return MatchOperand_ParseFail;
3290  }
3291  StringRef ShiftName = Tok.getString();
3292  std::string LowerOp = Op.lower();
3293  std::string UpperOp = Op.upper();
3294  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3295    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3296    return MatchOperand_ParseFail;
3297  }
3298  Parser.Lex(); // Eat shift type token.
3299
3300  // There must be a '#' and a shift amount.
3301  if (Parser.getTok().isNot(AsmToken::Hash) &&
3302      Parser.getTok().isNot(AsmToken::Dollar)) {
3303    Error(Parser.getTok().getLoc(), "'#' expected");
3304    return MatchOperand_ParseFail;
3305  }
3306  Parser.Lex(); // Eat hash token.
3307
3308  const MCExpr *ShiftAmount;
3309  SMLoc Loc = Parser.getTok().getLoc();
3310  if (getParser().ParseExpression(ShiftAmount)) {
3311    Error(Loc, "illegal expression");
3312    return MatchOperand_ParseFail;
3313  }
3314  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3315  if (!CE) {
3316    Error(Loc, "constant expression expected");
3317    return MatchOperand_ParseFail;
3318  }
3319  int Val = CE->getValue();
3320  if (Val < Low || Val > High) {
3321    Error(Loc, "immediate value out of range");
3322    return MatchOperand_ParseFail;
3323  }
3324
3325  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3326
3327  return MatchOperand_Success;
3328}
3329
3330ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3331parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3332  const AsmToken &Tok = Parser.getTok();
3333  SMLoc S = Tok.getLoc();
3334  if (Tok.isNot(AsmToken::Identifier)) {
3335    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3336    return MatchOperand_ParseFail;
3337  }
3338  int Val = StringSwitch<int>(Tok.getString())
3339    .Case("be", 1)
3340    .Case("le", 0)
3341    .Default(-1);
3342  Parser.Lex(); // Eat the token.
3343
3344  if (Val == -1) {
3345    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3346    return MatchOperand_ParseFail;
3347  }
3348  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3349                                                                  getContext()),
3350                                           S, Parser.getTok().getLoc()));
3351  return MatchOperand_Success;
3352}
3353
3354/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3355/// instructions. Legal values are:
3356///     lsl #n  'n' in [0,31]
3357///     asr #n  'n' in [1,32]
3358///             n == 32 encoded as n == 0.
3359ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3360parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3361  const AsmToken &Tok = Parser.getTok();
3362  SMLoc S = Tok.getLoc();
3363  if (Tok.isNot(AsmToken::Identifier)) {
3364    Error(S, "shift operator 'asr' or 'lsl' expected");
3365    return MatchOperand_ParseFail;
3366  }
3367  StringRef ShiftName = Tok.getString();
3368  bool isASR;
3369  if (ShiftName == "lsl" || ShiftName == "LSL")
3370    isASR = false;
3371  else if (ShiftName == "asr" || ShiftName == "ASR")
3372    isASR = true;
3373  else {
3374    Error(S, "shift operator 'asr' or 'lsl' expected");
3375    return MatchOperand_ParseFail;
3376  }
3377  Parser.Lex(); // Eat the operator.
3378
3379  // A '#' and a shift amount.
3380  if (Parser.getTok().isNot(AsmToken::Hash) &&
3381      Parser.getTok().isNot(AsmToken::Dollar)) {
3382    Error(Parser.getTok().getLoc(), "'#' expected");
3383    return MatchOperand_ParseFail;
3384  }
3385  Parser.Lex(); // Eat hash token.
3386
3387  const MCExpr *ShiftAmount;
3388  SMLoc E = Parser.getTok().getLoc();
3389  if (getParser().ParseExpression(ShiftAmount)) {
3390    Error(E, "malformed shift expression");
3391    return MatchOperand_ParseFail;
3392  }
3393  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3394  if (!CE) {
3395    Error(E, "shift amount must be an immediate");
3396    return MatchOperand_ParseFail;
3397  }
3398
3399  int64_t Val = CE->getValue();
3400  if (isASR) {
3401    // Shift amount must be in [1,32]
3402    if (Val < 1 || Val > 32) {
3403      Error(E, "'asr' shift amount must be in range [1,32]");
3404      return MatchOperand_ParseFail;
3405    }
3406    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3407    if (isThumb() && Val == 32) {
3408      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3409      return MatchOperand_ParseFail;
3410    }
3411    if (Val == 32) Val = 0;
3412  } else {
3413    // Shift amount must be in [1,32]
3414    if (Val < 0 || Val > 31) {
3415      Error(E, "'lsr' shift amount must be in range [0,31]");
3416      return MatchOperand_ParseFail;
3417    }
3418  }
3419
3420  E = Parser.getTok().getLoc();
3421  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3422
3423  return MatchOperand_Success;
3424}
3425
3426/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3427/// of instructions. Legal values are:
3428///     ror #n  'n' in {0, 8, 16, 24}
3429ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3430parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3431  const AsmToken &Tok = Parser.getTok();
3432  SMLoc S = Tok.getLoc();
3433  if (Tok.isNot(AsmToken::Identifier))
3434    return MatchOperand_NoMatch;
3435  StringRef ShiftName = Tok.getString();
3436  if (ShiftName != "ror" && ShiftName != "ROR")
3437    return MatchOperand_NoMatch;
3438  Parser.Lex(); // Eat the operator.
3439
3440  // A '#' and a rotate amount.
3441  if (Parser.getTok().isNot(AsmToken::Hash) &&
3442      Parser.getTok().isNot(AsmToken::Dollar)) {
3443    Error(Parser.getTok().getLoc(), "'#' expected");
3444    return MatchOperand_ParseFail;
3445  }
3446  Parser.Lex(); // Eat hash token.
3447
3448  const MCExpr *ShiftAmount;
3449  SMLoc E = Parser.getTok().getLoc();
3450  if (getParser().ParseExpression(ShiftAmount)) {
3451    Error(E, "malformed rotate expression");
3452    return MatchOperand_ParseFail;
3453  }
3454  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3455  if (!CE) {
3456    Error(E, "rotate amount must be an immediate");
3457    return MatchOperand_ParseFail;
3458  }
3459
3460  int64_t Val = CE->getValue();
3461  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3462  // normally, zero is represented in asm by omitting the rotate operand
3463  // entirely.
3464  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3465    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3466    return MatchOperand_ParseFail;
3467  }
3468
3469  E = Parser.getTok().getLoc();
3470  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3471
3472  return MatchOperand_Success;
3473}
3474
3475ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3476parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3477  SMLoc S = Parser.getTok().getLoc();
3478  // The bitfield descriptor is really two operands, the LSB and the width.
3479  if (Parser.getTok().isNot(AsmToken::Hash) &&
3480      Parser.getTok().isNot(AsmToken::Dollar)) {
3481    Error(Parser.getTok().getLoc(), "'#' expected");
3482    return MatchOperand_ParseFail;
3483  }
3484  Parser.Lex(); // Eat hash token.
3485
3486  const MCExpr *LSBExpr;
3487  SMLoc E = Parser.getTok().getLoc();
3488  if (getParser().ParseExpression(LSBExpr)) {
3489    Error(E, "malformed immediate expression");
3490    return MatchOperand_ParseFail;
3491  }
3492  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3493  if (!CE) {
3494    Error(E, "'lsb' operand must be an immediate");
3495    return MatchOperand_ParseFail;
3496  }
3497
3498  int64_t LSB = CE->getValue();
3499  // The LSB must be in the range [0,31]
3500  if (LSB < 0 || LSB > 31) {
3501    Error(E, "'lsb' operand must be in the range [0,31]");
3502    return MatchOperand_ParseFail;
3503  }
3504  E = Parser.getTok().getLoc();
3505
3506  // Expect another immediate operand.
3507  if (Parser.getTok().isNot(AsmToken::Comma)) {
3508    Error(Parser.getTok().getLoc(), "too few operands");
3509    return MatchOperand_ParseFail;
3510  }
3511  Parser.Lex(); // Eat hash token.
3512  if (Parser.getTok().isNot(AsmToken::Hash) &&
3513      Parser.getTok().isNot(AsmToken::Dollar)) {
3514    Error(Parser.getTok().getLoc(), "'#' expected");
3515    return MatchOperand_ParseFail;
3516  }
3517  Parser.Lex(); // Eat hash token.
3518
3519  const MCExpr *WidthExpr;
3520  if (getParser().ParseExpression(WidthExpr)) {
3521    Error(E, "malformed immediate expression");
3522    return MatchOperand_ParseFail;
3523  }
3524  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3525  if (!CE) {
3526    Error(E, "'width' operand must be an immediate");
3527    return MatchOperand_ParseFail;
3528  }
3529
3530  int64_t Width = CE->getValue();
3531  // The LSB must be in the range [1,32-lsb]
3532  if (Width < 1 || Width > 32 - LSB) {
3533    Error(E, "'width' operand must be in the range [1,32-lsb]");
3534    return MatchOperand_ParseFail;
3535  }
3536  E = Parser.getTok().getLoc();
3537
3538  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3539
3540  return MatchOperand_Success;
3541}
3542
3543ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3544parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3545  // Check for a post-index addressing register operand. Specifically:
3546  // postidx_reg := '+' register {, shift}
3547  //              | '-' register {, shift}
3548  //              | register {, shift}
3549
3550  // This method must return MatchOperand_NoMatch without consuming any tokens
3551  // in the case where there is no match, as other alternatives take other
3552  // parse methods.
3553  AsmToken Tok = Parser.getTok();
3554  SMLoc S = Tok.getLoc();
3555  bool haveEaten = false;
3556  bool isAdd = true;
3557  int Reg = -1;
3558  if (Tok.is(AsmToken::Plus)) {
3559    Parser.Lex(); // Eat the '+' token.
3560    haveEaten = true;
3561  } else if (Tok.is(AsmToken::Minus)) {
3562    Parser.Lex(); // Eat the '-' token.
3563    isAdd = false;
3564    haveEaten = true;
3565  }
3566  if (Parser.getTok().is(AsmToken::Identifier))
3567    Reg = tryParseRegister();
3568  if (Reg == -1) {
3569    if (!haveEaten)
3570      return MatchOperand_NoMatch;
3571    Error(Parser.getTok().getLoc(), "register expected");
3572    return MatchOperand_ParseFail;
3573  }
3574  SMLoc E = Parser.getTok().getLoc();
3575
3576  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3577  unsigned ShiftImm = 0;
3578  if (Parser.getTok().is(AsmToken::Comma)) {
3579    Parser.Lex(); // Eat the ','.
3580    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3581      return MatchOperand_ParseFail;
3582  }
3583
3584  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3585                                                  ShiftImm, S, E));
3586
3587  return MatchOperand_Success;
3588}
3589
3590ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3591parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3592  // Check for a post-index addressing register operand. Specifically:
3593  // am3offset := '+' register
3594  //              | '-' register
3595  //              | register
3596  //              | # imm
3597  //              | # + imm
3598  //              | # - imm
3599
3600  // This method must return MatchOperand_NoMatch without consuming any tokens
3601  // in the case where there is no match, as other alternatives take other
3602  // parse methods.
3603  AsmToken Tok = Parser.getTok();
3604  SMLoc S = Tok.getLoc();
3605
3606  // Do immediates first, as we always parse those if we have a '#'.
3607  if (Parser.getTok().is(AsmToken::Hash) ||
3608      Parser.getTok().is(AsmToken::Dollar)) {
3609    Parser.Lex(); // Eat the '#'.
3610    // Explicitly look for a '-', as we need to encode negative zero
3611    // differently.
3612    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3613    const MCExpr *Offset;
3614    if (getParser().ParseExpression(Offset))
3615      return MatchOperand_ParseFail;
3616    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3617    if (!CE) {
3618      Error(S, "constant expression expected");
3619      return MatchOperand_ParseFail;
3620    }
3621    SMLoc E = Tok.getLoc();
3622    // Negative zero is encoded as the flag value INT32_MIN.
3623    int32_t Val = CE->getValue();
3624    if (isNegative && Val == 0)
3625      Val = INT32_MIN;
3626
3627    Operands.push_back(
3628      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3629
3630    return MatchOperand_Success;
3631  }
3632
3633
3634  bool haveEaten = false;
3635  bool isAdd = true;
3636  int Reg = -1;
3637  if (Tok.is(AsmToken::Plus)) {
3638    Parser.Lex(); // Eat the '+' token.
3639    haveEaten = true;
3640  } else if (Tok.is(AsmToken::Minus)) {
3641    Parser.Lex(); // Eat the '-' token.
3642    isAdd = false;
3643    haveEaten = true;
3644  }
3645  if (Parser.getTok().is(AsmToken::Identifier))
3646    Reg = tryParseRegister();
3647  if (Reg == -1) {
3648    if (!haveEaten)
3649      return MatchOperand_NoMatch;
3650    Error(Parser.getTok().getLoc(), "register expected");
3651    return MatchOperand_ParseFail;
3652  }
3653  SMLoc E = Parser.getTok().getLoc();
3654
3655  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3656                                                  0, S, E));
3657
3658  return MatchOperand_Success;
3659}
3660
3661/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3662/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3663/// when they refer multiple MIOperands inside a single one.
3664bool ARMAsmParser::
3665cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3666             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3667  // Rt, Rt2
3668  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3669  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3670  // Create a writeback register dummy placeholder.
3671  Inst.addOperand(MCOperand::CreateReg(0));
3672  // addr
3673  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3674  // pred
3675  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3676  return true;
3677}
3678
3679/// cvtT2StrdPre - Convert parsed operands to MCInst.
3680/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3681/// when they refer multiple MIOperands inside a single one.
3682bool ARMAsmParser::
3683cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3684             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3685  // Create a writeback register dummy placeholder.
3686  Inst.addOperand(MCOperand::CreateReg(0));
3687  // Rt, Rt2
3688  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3689  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3690  // addr
3691  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3692  // pred
3693  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3694  return true;
3695}
3696
3697/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3698/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3699/// when they refer multiple MIOperands inside a single one.
3700bool ARMAsmParser::
3701cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3702                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3703  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3704
3705  // Create a writeback register dummy placeholder.
3706  Inst.addOperand(MCOperand::CreateImm(0));
3707
3708  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3709  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3710  return true;
3711}
3712
3713/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3714/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3715/// when they refer multiple MIOperands inside a single one.
3716bool ARMAsmParser::
3717cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3718                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3719  // Create a writeback register dummy placeholder.
3720  Inst.addOperand(MCOperand::CreateImm(0));
3721  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3722  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3723  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3724  return true;
3725}
3726
3727/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3728/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3729/// when they refer multiple MIOperands inside a single one.
3730bool ARMAsmParser::
3731cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3732                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3733  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3734
3735  // Create a writeback register dummy placeholder.
3736  Inst.addOperand(MCOperand::CreateImm(0));
3737
3738  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3739  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3740  return true;
3741}
3742
3743/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3744/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3745/// when they refer multiple MIOperands inside a single one.
3746bool ARMAsmParser::
3747cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3748                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3749  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3750
3751  // Create a writeback register dummy placeholder.
3752  Inst.addOperand(MCOperand::CreateImm(0));
3753
3754  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3755  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3756  return true;
3757}
3758
3759
3760/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3761/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3762/// when they refer multiple MIOperands inside a single one.
3763bool ARMAsmParser::
3764cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3765                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3766  // Create a writeback register dummy placeholder.
3767  Inst.addOperand(MCOperand::CreateImm(0));
3768  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3769  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3770  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3771  return true;
3772}
3773
3774/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3775/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3776/// when they refer multiple MIOperands inside a single one.
3777bool ARMAsmParser::
3778cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3779                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3780  // Create a writeback register dummy placeholder.
3781  Inst.addOperand(MCOperand::CreateImm(0));
3782  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3783  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3784  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3785  return true;
3786}
3787
3788/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3789/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3790/// when they refer multiple MIOperands inside a single one.
3791bool ARMAsmParser::
3792cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3793                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3794  // Create a writeback register dummy placeholder.
3795  Inst.addOperand(MCOperand::CreateImm(0));
3796  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3797  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3798  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3799  return true;
3800}
3801
3802/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3803/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3804/// when they refer multiple MIOperands inside a single one.
3805bool ARMAsmParser::
3806cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3807                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3808  // Rt
3809  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3810  // Create a writeback register dummy placeholder.
3811  Inst.addOperand(MCOperand::CreateImm(0));
3812  // addr
3813  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3814  // offset
3815  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3816  // pred
3817  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3818  return true;
3819}
3820
3821/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3822/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3823/// when they refer multiple MIOperands inside a single one.
3824bool ARMAsmParser::
3825cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3826                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3827  // Rt
3828  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3829  // Create a writeback register dummy placeholder.
3830  Inst.addOperand(MCOperand::CreateImm(0));
3831  // addr
3832  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3833  // offset
3834  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3835  // pred
3836  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3837  return true;
3838}
3839
3840/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3841/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3842/// when they refer multiple MIOperands inside a single one.
3843bool ARMAsmParser::
3844cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3845                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3846  // Create a writeback register dummy placeholder.
3847  Inst.addOperand(MCOperand::CreateImm(0));
3848  // Rt
3849  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3850  // addr
3851  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3852  // offset
3853  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3854  // pred
3855  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3856  return true;
3857}
3858
3859/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3860/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3861/// when they refer multiple MIOperands inside a single one.
3862bool ARMAsmParser::
3863cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3864                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3865  // Create a writeback register dummy placeholder.
3866  Inst.addOperand(MCOperand::CreateImm(0));
3867  // Rt
3868  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3869  // addr
3870  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3871  // offset
3872  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3873  // pred
3874  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3875  return true;
3876}
3877
3878/// cvtLdrdPre - Convert parsed operands to MCInst.
3879/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3880/// when they refer multiple MIOperands inside a single one.
3881bool ARMAsmParser::
3882cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3883           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3884  // Rt, Rt2
3885  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3886  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3887  // Create a writeback register dummy placeholder.
3888  Inst.addOperand(MCOperand::CreateImm(0));
3889  // addr
3890  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3891  // pred
3892  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3893  return true;
3894}
3895
3896/// cvtStrdPre - Convert parsed operands to MCInst.
3897/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3898/// when they refer multiple MIOperands inside a single one.
3899bool ARMAsmParser::
3900cvtStrdPre(MCInst &Inst, unsigned Opcode,
3901           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3902  // Create a writeback register dummy placeholder.
3903  Inst.addOperand(MCOperand::CreateImm(0));
3904  // Rt, Rt2
3905  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3906  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3907  // addr
3908  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3909  // pred
3910  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3911  return true;
3912}
3913
3914/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3915/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3916/// when they refer multiple MIOperands inside a single one.
3917bool ARMAsmParser::
3918cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3919                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3920  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3921  // Create a writeback register dummy placeholder.
3922  Inst.addOperand(MCOperand::CreateImm(0));
3923  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3924  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3925  return true;
3926}
3927
3928/// cvtThumbMultiple- Convert parsed operands to MCInst.
3929/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3930/// when they refer multiple MIOperands inside a single one.
3931bool ARMAsmParser::
3932cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3933           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3934  // The second source operand must be the same register as the destination
3935  // operand.
3936  if (Operands.size() == 6 &&
3937      (((ARMOperand*)Operands[3])->getReg() !=
3938       ((ARMOperand*)Operands[5])->getReg()) &&
3939      (((ARMOperand*)Operands[3])->getReg() !=
3940       ((ARMOperand*)Operands[4])->getReg())) {
3941    Error(Operands[3]->getStartLoc(),
3942          "destination register must match source register");
3943    return false;
3944  }
3945  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3946  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3947  // If we have a three-operand form, make sure to set Rn to be the operand
3948  // that isn't the same as Rd.
3949  unsigned RegOp = 4;
3950  if (Operands.size() == 6 &&
3951      ((ARMOperand*)Operands[4])->getReg() ==
3952        ((ARMOperand*)Operands[3])->getReg())
3953    RegOp = 5;
3954  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3955  Inst.addOperand(Inst.getOperand(0));
3956  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3957
3958  return true;
3959}
3960
3961bool ARMAsmParser::
3962cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3963              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3964  // Vd
3965  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3966  // Create a writeback register dummy placeholder.
3967  Inst.addOperand(MCOperand::CreateImm(0));
3968  // Vn
3969  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3970  // pred
3971  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3972  return true;
3973}
3974
3975bool ARMAsmParser::
3976cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3977                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3978  // Vd
3979  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3980  // Create a writeback register dummy placeholder.
3981  Inst.addOperand(MCOperand::CreateImm(0));
3982  // Vn
3983  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3984  // Vm
3985  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3986  // pred
3987  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3988  return true;
3989}
3990
3991bool ARMAsmParser::
3992cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3993              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3994  // Create a writeback register dummy placeholder.
3995  Inst.addOperand(MCOperand::CreateImm(0));
3996  // Vn
3997  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3998  // Vt
3999  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4000  // pred
4001  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4002  return true;
4003}
4004
4005bool ARMAsmParser::
4006cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
4007                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4008  // Create a writeback register dummy placeholder.
4009  Inst.addOperand(MCOperand::CreateImm(0));
4010  // Vn
4011  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4012  // Vm
4013  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4014  // Vt
4015  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4016  // pred
4017  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4018  return true;
4019}
4020
4021/// Parse an ARM memory expression, return false if successful else return true
4022/// or an error.  The first token must be a '[' when called.
4023bool ARMAsmParser::
4024parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4025  SMLoc S, E;
4026  assert(Parser.getTok().is(AsmToken::LBrac) &&
4027         "Token is not a Left Bracket");
4028  S = Parser.getTok().getLoc();
4029  Parser.Lex(); // Eat left bracket token.
4030
4031  const AsmToken &BaseRegTok = Parser.getTok();
4032  int BaseRegNum = tryParseRegister();
4033  if (BaseRegNum == -1)
4034    return Error(BaseRegTok.getLoc(), "register expected");
4035
4036  // The next token must either be a comma or a closing bracket.
4037  const AsmToken &Tok = Parser.getTok();
4038  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4039    return Error(Tok.getLoc(), "malformed memory operand");
4040
4041  if (Tok.is(AsmToken::RBrac)) {
4042    E = Tok.getLoc();
4043    Parser.Lex(); // Eat right bracket token.
4044
4045    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4046                                             0, 0, false, S, E));
4047
4048    // If there's a pre-indexing writeback marker, '!', just add it as a token
4049    // operand. It's rather odd, but syntactically valid.
4050    if (Parser.getTok().is(AsmToken::Exclaim)) {
4051      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4052      Parser.Lex(); // Eat the '!'.
4053    }
4054
4055    return false;
4056  }
4057
4058  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4059  Parser.Lex(); // Eat the comma.
4060
4061  // If we have a ':', it's an alignment specifier.
4062  if (Parser.getTok().is(AsmToken::Colon)) {
4063    Parser.Lex(); // Eat the ':'.
4064    E = Parser.getTok().getLoc();
4065
4066    const MCExpr *Expr;
4067    if (getParser().ParseExpression(Expr))
4068     return true;
4069
4070    // The expression has to be a constant. Memory references with relocations
4071    // don't come through here, as they use the <label> forms of the relevant
4072    // instructions.
4073    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4074    if (!CE)
4075      return Error (E, "constant expression expected");
4076
4077    unsigned Align = 0;
4078    switch (CE->getValue()) {
4079    default:
4080      return Error(E,
4081                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4082    case 16:  Align = 2; break;
4083    case 32:  Align = 4; break;
4084    case 64:  Align = 8; break;
4085    case 128: Align = 16; break;
4086    case 256: Align = 32; break;
4087    }
4088
4089    // Now we should have the closing ']'
4090    E = Parser.getTok().getLoc();
4091    if (Parser.getTok().isNot(AsmToken::RBrac))
4092      return Error(E, "']' expected");
4093    Parser.Lex(); // Eat right bracket token.
4094
4095    // Don't worry about range checking the value here. That's handled by
4096    // the is*() predicates.
4097    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4098                                             ARM_AM::no_shift, 0, Align,
4099                                             false, S, E));
4100
4101    // If there's a pre-indexing writeback marker, '!', just add it as a token
4102    // operand.
4103    if (Parser.getTok().is(AsmToken::Exclaim)) {
4104      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4105      Parser.Lex(); // Eat the '!'.
4106    }
4107
4108    return false;
4109  }
4110
4111  // If we have a '#', it's an immediate offset, else assume it's a register
4112  // offset. Be friendly and also accept a plain integer (without a leading
4113  // hash) for gas compatibility.
4114  if (Parser.getTok().is(AsmToken::Hash) ||
4115      Parser.getTok().is(AsmToken::Dollar) ||
4116      Parser.getTok().is(AsmToken::Integer)) {
4117    if (Parser.getTok().isNot(AsmToken::Integer))
4118      Parser.Lex(); // Eat the '#'.
4119    E = Parser.getTok().getLoc();
4120
4121    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4122    const MCExpr *Offset;
4123    if (getParser().ParseExpression(Offset))
4124     return true;
4125
4126    // The expression has to be a constant. Memory references with relocations
4127    // don't come through here, as they use the <label> forms of the relevant
4128    // instructions.
4129    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4130    if (!CE)
4131      return Error (E, "constant expression expected");
4132
4133    // If the constant was #-0, represent it as INT32_MIN.
4134    int32_t Val = CE->getValue();
4135    if (isNegative && Val == 0)
4136      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4137
4138    // Now we should have the closing ']'
4139    E = Parser.getTok().getLoc();
4140    if (Parser.getTok().isNot(AsmToken::RBrac))
4141      return Error(E, "']' expected");
4142    Parser.Lex(); // Eat right bracket token.
4143
4144    // Don't worry about range checking the value here. That's handled by
4145    // the is*() predicates.
4146    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4147                                             ARM_AM::no_shift, 0, 0,
4148                                             false, S, E));
4149
4150    // If there's a pre-indexing writeback marker, '!', just add it as a token
4151    // operand.
4152    if (Parser.getTok().is(AsmToken::Exclaim)) {
4153      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4154      Parser.Lex(); // Eat the '!'.
4155    }
4156
4157    return false;
4158  }
4159
4160  // The register offset is optionally preceded by a '+' or '-'
4161  bool isNegative = false;
4162  if (Parser.getTok().is(AsmToken::Minus)) {
4163    isNegative = true;
4164    Parser.Lex(); // Eat the '-'.
4165  } else if (Parser.getTok().is(AsmToken::Plus)) {
4166    // Nothing to do.
4167    Parser.Lex(); // Eat the '+'.
4168  }
4169
4170  E = Parser.getTok().getLoc();
4171  int OffsetRegNum = tryParseRegister();
4172  if (OffsetRegNum == -1)
4173    return Error(E, "register expected");
4174
4175  // If there's a shift operator, handle it.
4176  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4177  unsigned ShiftImm = 0;
4178  if (Parser.getTok().is(AsmToken::Comma)) {
4179    Parser.Lex(); // Eat the ','.
4180    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4181      return true;
4182  }
4183
4184  // Now we should have the closing ']'
4185  E = Parser.getTok().getLoc();
4186  if (Parser.getTok().isNot(AsmToken::RBrac))
4187    return Error(E, "']' expected");
4188  Parser.Lex(); // Eat right bracket token.
4189
4190  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4191                                           ShiftType, ShiftImm, 0, isNegative,
4192                                           S, E));
4193
4194  // If there's a pre-indexing writeback marker, '!', just add it as a token
4195  // operand.
4196  if (Parser.getTok().is(AsmToken::Exclaim)) {
4197    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4198    Parser.Lex(); // Eat the '!'.
4199  }
4200
4201  return false;
4202}
4203
4204/// parseMemRegOffsetShift - one of these two:
4205///   ( lsl | lsr | asr | ror ) , # shift_amount
4206///   rrx
4207/// return true if it parses a shift otherwise it returns false.
4208bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4209                                          unsigned &Amount) {
4210  SMLoc Loc = Parser.getTok().getLoc();
4211  const AsmToken &Tok = Parser.getTok();
4212  if (Tok.isNot(AsmToken::Identifier))
4213    return true;
4214  StringRef ShiftName = Tok.getString();
4215  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4216      ShiftName == "asl" || ShiftName == "ASL")
4217    St = ARM_AM::lsl;
4218  else if (ShiftName == "lsr" || ShiftName == "LSR")
4219    St = ARM_AM::lsr;
4220  else if (ShiftName == "asr" || ShiftName == "ASR")
4221    St = ARM_AM::asr;
4222  else if (ShiftName == "ror" || ShiftName == "ROR")
4223    St = ARM_AM::ror;
4224  else if (ShiftName == "rrx" || ShiftName == "RRX")
4225    St = ARM_AM::rrx;
4226  else
4227    return Error(Loc, "illegal shift operator");
4228  Parser.Lex(); // Eat shift type token.
4229
4230  // rrx stands alone.
4231  Amount = 0;
4232  if (St != ARM_AM::rrx) {
4233    Loc = Parser.getTok().getLoc();
4234    // A '#' and a shift amount.
4235    const AsmToken &HashTok = Parser.getTok();
4236    if (HashTok.isNot(AsmToken::Hash) &&
4237        HashTok.isNot(AsmToken::Dollar))
4238      return Error(HashTok.getLoc(), "'#' expected");
4239    Parser.Lex(); // Eat hash token.
4240
4241    const MCExpr *Expr;
4242    if (getParser().ParseExpression(Expr))
4243      return true;
4244    // Range check the immediate.
4245    // lsl, ror: 0 <= imm <= 31
4246    // lsr, asr: 0 <= imm <= 32
4247    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4248    if (!CE)
4249      return Error(Loc, "shift amount must be an immediate");
4250    int64_t Imm = CE->getValue();
4251    if (Imm < 0 ||
4252        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4253        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4254      return Error(Loc, "immediate shift value out of range");
4255    Amount = Imm;
4256  }
4257
4258  return false;
4259}
4260
4261/// parseFPImm - A floating point immediate expression operand.
4262ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4263parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4264  // Anything that can accept a floating point constant as an operand
4265  // needs to go through here, as the regular ParseExpression is
4266  // integer only.
4267  //
4268  // This routine still creates a generic Immediate operand, containing
4269  // a bitcast of the 64-bit floating point value. The various operands
4270  // that accept floats can check whether the value is valid for them
4271  // via the standard is*() predicates.
4272
4273  SMLoc S = Parser.getTok().getLoc();
4274
4275  if (Parser.getTok().isNot(AsmToken::Hash) &&
4276      Parser.getTok().isNot(AsmToken::Dollar))
4277    return MatchOperand_NoMatch;
4278
4279  // Disambiguate the VMOV forms that can accept an FP immediate.
4280  // vmov.f32 <sreg>, #imm
4281  // vmov.f64 <dreg>, #imm
4282  // vmov.f32 <dreg>, #imm  @ vector f32x2
4283  // vmov.f32 <qreg>, #imm  @ vector f32x4
4284  //
4285  // There are also the NEON VMOV instructions which expect an
4286  // integer constant. Make sure we don't try to parse an FPImm
4287  // for these:
4288  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4289  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4290  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4291                           TyOp->getToken() != ".f64"))
4292    return MatchOperand_NoMatch;
4293
4294  Parser.Lex(); // Eat the '#'.
4295
4296  // Handle negation, as that still comes through as a separate token.
4297  bool isNegative = false;
4298  if (Parser.getTok().is(AsmToken::Minus)) {
4299    isNegative = true;
4300    Parser.Lex();
4301  }
4302  const AsmToken &Tok = Parser.getTok();
4303  SMLoc Loc = Tok.getLoc();
4304  if (Tok.is(AsmToken::Real)) {
4305    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4306    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4307    // If we had a '-' in front, toggle the sign bit.
4308    IntVal ^= (uint64_t)isNegative << 31;
4309    Parser.Lex(); // Eat the token.
4310    Operands.push_back(ARMOperand::CreateImm(
4311          MCConstantExpr::Create(IntVal, getContext()),
4312          S, Parser.getTok().getLoc()));
4313    return MatchOperand_Success;
4314  }
4315  // Also handle plain integers. Instructions which allow floating point
4316  // immediates also allow a raw encoded 8-bit value.
4317  if (Tok.is(AsmToken::Integer)) {
4318    int64_t Val = Tok.getIntVal();
4319    Parser.Lex(); // Eat the token.
4320    if (Val > 255 || Val < 0) {
4321      Error(Loc, "encoded floating point value out of range");
4322      return MatchOperand_ParseFail;
4323    }
4324    double RealVal = ARM_AM::getFPImmFloat(Val);
4325    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4326    Operands.push_back(ARMOperand::CreateImm(
4327        MCConstantExpr::Create(Val, getContext()), S,
4328        Parser.getTok().getLoc()));
4329    return MatchOperand_Success;
4330  }
4331
4332  Error(Loc, "invalid floating point immediate");
4333  return MatchOperand_ParseFail;
4334}
4335
4336/// Parse a arm instruction operand.  For now this parses the operand regardless
4337/// of the mnemonic.
4338bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4339                                StringRef Mnemonic) {
4340  SMLoc S, E;
4341
4342  // Check if the current operand has a custom associated parser, if so, try to
4343  // custom parse the operand, or fallback to the general approach.
4344  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4345  if (ResTy == MatchOperand_Success)
4346    return false;
4347  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4348  // there was a match, but an error occurred, in which case, just return that
4349  // the operand parsing failed.
4350  if (ResTy == MatchOperand_ParseFail)
4351    return true;
4352
4353  switch (getLexer().getKind()) {
4354  default:
4355    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4356    return true;
4357  case AsmToken::Identifier: {
4358    if (!tryParseRegisterWithWriteBack(Operands))
4359      return false;
4360    int Res = tryParseShiftRegister(Operands);
4361    if (Res == 0) // success
4362      return false;
4363    else if (Res == -1) // irrecoverable error
4364      return true;
4365    // If this is VMRS, check for the apsr_nzcv operand.
4366    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4367      S = Parser.getTok().getLoc();
4368      Parser.Lex();
4369      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4370      return false;
4371    }
4372
4373    // Fall though for the Identifier case that is not a register or a
4374    // special name.
4375  }
4376  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4377  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4378  case AsmToken::String:  // quoted label names.
4379  case AsmToken::Dot: {   // . as a branch target
4380    // This was not a register so parse other operands that start with an
4381    // identifier (like labels) as expressions and create them as immediates.
4382    const MCExpr *IdVal;
4383    S = Parser.getTok().getLoc();
4384    if (getParser().ParseExpression(IdVal))
4385      return true;
4386    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4387    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4388    return false;
4389  }
4390  case AsmToken::LBrac:
4391    return parseMemory(Operands);
4392  case AsmToken::LCurly:
4393    return parseRegisterList(Operands);
4394  case AsmToken::Dollar:
4395  case AsmToken::Hash: {
4396    // #42 -> immediate.
4397    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4398    S = Parser.getTok().getLoc();
4399    Parser.Lex();
4400    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4401    const MCExpr *ImmVal;
4402    if (getParser().ParseExpression(ImmVal))
4403      return true;
4404    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4405    if (CE) {
4406      int32_t Val = CE->getValue();
4407      if (isNegative && Val == 0)
4408        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4409    }
4410    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4411    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4412    return false;
4413  }
4414  case AsmToken::Colon: {
4415    // ":lower16:" and ":upper16:" expression prefixes
4416    // FIXME: Check it's an expression prefix,
4417    // e.g. (FOO - :lower16:BAR) isn't legal.
4418    ARMMCExpr::VariantKind RefKind;
4419    if (parsePrefix(RefKind))
4420      return true;
4421
4422    const MCExpr *SubExprVal;
4423    if (getParser().ParseExpression(SubExprVal))
4424      return true;
4425
4426    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4427                                                   getContext());
4428    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4429    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4430    return false;
4431  }
4432  }
4433}
4434
4435// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4436//  :lower16: and :upper16:.
4437bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4438  RefKind = ARMMCExpr::VK_ARM_None;
4439
4440  // :lower16: and :upper16: modifiers
4441  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4442  Parser.Lex(); // Eat ':'
4443
4444  if (getLexer().isNot(AsmToken::Identifier)) {
4445    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4446    return true;
4447  }
4448
4449  StringRef IDVal = Parser.getTok().getIdentifier();
4450  if (IDVal == "lower16") {
4451    RefKind = ARMMCExpr::VK_ARM_LO16;
4452  } else if (IDVal == "upper16") {
4453    RefKind = ARMMCExpr::VK_ARM_HI16;
4454  } else {
4455    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4456    return true;
4457  }
4458  Parser.Lex();
4459
4460  if (getLexer().isNot(AsmToken::Colon)) {
4461    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4462    return true;
4463  }
4464  Parser.Lex(); // Eat the last ':'
4465  return false;
4466}
4467
4468/// \brief Given a mnemonic, split out possible predication code and carry
4469/// setting letters to form a canonical mnemonic and flags.
4470//
4471// FIXME: Would be nice to autogen this.
4472// FIXME: This is a bit of a maze of special cases.
4473StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4474                                      unsigned &PredicationCode,
4475                                      bool &CarrySetting,
4476                                      unsigned &ProcessorIMod,
4477                                      StringRef &ITMask) {
4478  PredicationCode = ARMCC::AL;
4479  CarrySetting = false;
4480  ProcessorIMod = 0;
4481
4482  // Ignore some mnemonics we know aren't predicated forms.
4483  //
4484  // FIXME: Would be nice to autogen this.
4485  if ((Mnemonic == "movs" && isThumb()) ||
4486      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4487      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4488      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4489      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4490      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4491      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4492      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4493      Mnemonic == "fmuls")
4494    return Mnemonic;
4495
4496  // First, split out any predication code. Ignore mnemonics we know aren't
4497  // predicated but do have a carry-set and so weren't caught above.
4498  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4499      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4500      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4501      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4502    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4503      .Case("eq", ARMCC::EQ)
4504      .Case("ne", ARMCC::NE)
4505      .Case("hs", ARMCC::HS)
4506      .Case("cs", ARMCC::HS)
4507      .Case("lo", ARMCC::LO)
4508      .Case("cc", ARMCC::LO)
4509      .Case("mi", ARMCC::MI)
4510      .Case("pl", ARMCC::PL)
4511      .Case("vs", ARMCC::VS)
4512      .Case("vc", ARMCC::VC)
4513      .Case("hi", ARMCC::HI)
4514      .Case("ls", ARMCC::LS)
4515      .Case("ge", ARMCC::GE)
4516      .Case("lt", ARMCC::LT)
4517      .Case("gt", ARMCC::GT)
4518      .Case("le", ARMCC::LE)
4519      .Case("al", ARMCC::AL)
4520      .Default(~0U);
4521    if (CC != ~0U) {
4522      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4523      PredicationCode = CC;
4524    }
4525  }
4526
4527  // Next, determine if we have a carry setting bit. We explicitly ignore all
4528  // the instructions we know end in 's'.
4529  if (Mnemonic.endswith("s") &&
4530      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4531        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4532        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4533        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4534        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4535        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4536        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4537        Mnemonic == "fmuls" || Mnemonic == "fcmps" ||
4538        (Mnemonic == "movs" && isThumb()))) {
4539    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4540    CarrySetting = true;
4541  }
4542
4543  // The "cps" instruction can have a interrupt mode operand which is glued into
4544  // the mnemonic. Check if this is the case, split it and parse the imod op
4545  if (Mnemonic.startswith("cps")) {
4546    // Split out any imod code.
4547    unsigned IMod =
4548      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4549      .Case("ie", ARM_PROC::IE)
4550      .Case("id", ARM_PROC::ID)
4551      .Default(~0U);
4552    if (IMod != ~0U) {
4553      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4554      ProcessorIMod = IMod;
4555    }
4556  }
4557
4558  // The "it" instruction has the condition mask on the end of the mnemonic.
4559  if (Mnemonic.startswith("it")) {
4560    ITMask = Mnemonic.slice(2, Mnemonic.size());
4561    Mnemonic = Mnemonic.slice(0, 2);
4562  }
4563
4564  return Mnemonic;
4565}
4566
4567/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4568/// inclusion of carry set or predication code operands.
4569//
4570// FIXME: It would be nice to autogen this.
4571void ARMAsmParser::
4572getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4573                      bool &CanAcceptPredicationCode) {
4574  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4575      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4576      Mnemonic == "add" || Mnemonic == "adc" ||
4577      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4578      Mnemonic == "orr" || Mnemonic == "mvn" ||
4579      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4580      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4581      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4582                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4583                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4584    CanAcceptCarrySet = true;
4585  } else
4586    CanAcceptCarrySet = false;
4587
4588  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4589      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4590      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4591      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4592      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4593      (Mnemonic == "clrex" && !isThumb()) ||
4594      (Mnemonic == "nop" && isThumbOne()) ||
4595      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4596        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4597        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4598      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4599       !isThumb()) ||
4600      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4601    CanAcceptPredicationCode = false;
4602  } else
4603    CanAcceptPredicationCode = true;
4604
4605  if (isThumb()) {
4606    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4607        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4608      CanAcceptPredicationCode = false;
4609  }
4610}
4611
4612bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4613                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4614  // FIXME: This is all horribly hacky. We really need a better way to deal
4615  // with optional operands like this in the matcher table.
4616
4617  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4618  // another does not. Specifically, the MOVW instruction does not. So we
4619  // special case it here and remove the defaulted (non-setting) cc_out
4620  // operand if that's the instruction we're trying to match.
4621  //
4622  // We do this as post-processing of the explicit operands rather than just
4623  // conditionally adding the cc_out in the first place because we need
4624  // to check the type of the parsed immediate operand.
4625  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4626      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4627      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4628      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4629    return true;
4630
4631  // Register-register 'add' for thumb does not have a cc_out operand
4632  // when there are only two register operands.
4633  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4634      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4635      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4636      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4637    return true;
4638  // Register-register 'add' for thumb does not have a cc_out operand
4639  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4640  // have to check the immediate range here since Thumb2 has a variant
4641  // that can handle a different range and has a cc_out operand.
4642  if (((isThumb() && Mnemonic == "add") ||
4643       (isThumbTwo() && Mnemonic == "sub")) &&
4644      Operands.size() == 6 &&
4645      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4646      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4647      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4648      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4649      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4650       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4651    return true;
4652  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4653  // imm0_4095 variant. That's the least-preferred variant when
4654  // selecting via the generic "add" mnemonic, so to know that we
4655  // should remove the cc_out operand, we have to explicitly check that
4656  // it's not one of the other variants. Ugh.
4657  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4658      Operands.size() == 6 &&
4659      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4660      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4661      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4662    // Nest conditions rather than one big 'if' statement for readability.
4663    //
4664    // If either register is a high reg, it's either one of the SP
4665    // variants (handled above) or a 32-bit encoding, so we just
4666    // check against T3. If the second register is the PC, this is an
4667    // alternate form of ADR, which uses encoding T4, so check for that too.
4668    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4669         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4670        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4671        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4672      return false;
4673    // If both registers are low, we're in an IT block, and the immediate is
4674    // in range, we should use encoding T1 instead, which has a cc_out.
4675    if (inITBlock() &&
4676        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4677        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4678        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4679      return false;
4680
4681    // Otherwise, we use encoding T4, which does not have a cc_out
4682    // operand.
4683    return true;
4684  }
4685
4686  // The thumb2 multiply instruction doesn't have a CCOut register, so
4687  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4688  // use the 16-bit encoding or not.
4689  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4690      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4691      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4692      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4693      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4694      // If the registers aren't low regs, the destination reg isn't the
4695      // same as one of the source regs, or the cc_out operand is zero
4696      // outside of an IT block, we have to use the 32-bit encoding, so
4697      // remove the cc_out operand.
4698      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4699       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4700       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4701       !inITBlock() ||
4702       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4703        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4704        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4705        static_cast<ARMOperand*>(Operands[4])->getReg())))
4706    return true;
4707
4708  // Also check the 'mul' syntax variant that doesn't specify an explicit
4709  // destination register.
4710  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4711      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4712      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4713      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4714      // If the registers aren't low regs  or the cc_out operand is zero
4715      // outside of an IT block, we have to use the 32-bit encoding, so
4716      // remove the cc_out operand.
4717      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4718       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4719       !inITBlock()))
4720    return true;
4721
4722
4723
4724  // Register-register 'add/sub' for thumb does not have a cc_out operand
4725  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4726  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4727  // right, this will result in better diagnostics (which operand is off)
4728  // anyway.
4729  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4730      (Operands.size() == 5 || Operands.size() == 6) &&
4731      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4732      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4733      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4734    return true;
4735
4736  return false;
4737}
4738
4739static bool isDataTypeToken(StringRef Tok) {
4740  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4741    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4742    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4743    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4744    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4745    Tok == ".f" || Tok == ".d";
4746}
4747
4748// FIXME: This bit should probably be handled via an explicit match class
4749// in the .td files that matches the suffix instead of having it be
4750// a literal string token the way it is now.
4751static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4752  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4753}
4754
4755static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4756/// Parse an arm instruction mnemonic followed by its operands.
4757bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4758                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4759  // Apply mnemonic aliases before doing anything else, as the destination
4760  // mnemnonic may include suffices and we want to handle them normally.
4761  // The generic tblgen'erated code does this later, at the start of
4762  // MatchInstructionImpl(), but that's too late for aliases that include
4763  // any sort of suffix.
4764  unsigned AvailableFeatures = getAvailableFeatures();
4765  applyMnemonicAliases(Name, AvailableFeatures);
4766
4767  // First check for the ARM-specific .req directive.
4768  if (Parser.getTok().is(AsmToken::Identifier) &&
4769      Parser.getTok().getIdentifier() == ".req") {
4770    parseDirectiveReq(Name, NameLoc);
4771    // We always return 'error' for this, as we're done with this
4772    // statement and don't need to match the 'instruction."
4773    return true;
4774  }
4775
4776  // Create the leading tokens for the mnemonic, split by '.' characters.
4777  size_t Start = 0, Next = Name.find('.');
4778  StringRef Mnemonic = Name.slice(Start, Next);
4779
4780  // Split out the predication code and carry setting flag from the mnemonic.
4781  unsigned PredicationCode;
4782  unsigned ProcessorIMod;
4783  bool CarrySetting;
4784  StringRef ITMask;
4785  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4786                           ProcessorIMod, ITMask);
4787
4788  // In Thumb1, only the branch (B) instruction can be predicated.
4789  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4790    Parser.EatToEndOfStatement();
4791    return Error(NameLoc, "conditional execution not supported in Thumb1");
4792  }
4793
4794  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4795
4796  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4797  // is the mask as it will be for the IT encoding if the conditional
4798  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4799  // where the conditional bit0 is zero, the instruction post-processing
4800  // will adjust the mask accordingly.
4801  if (Mnemonic == "it") {
4802    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4803    if (ITMask.size() > 3) {
4804      Parser.EatToEndOfStatement();
4805      return Error(Loc, "too many conditions on IT instruction");
4806    }
4807    unsigned Mask = 8;
4808    for (unsigned i = ITMask.size(); i != 0; --i) {
4809      char pos = ITMask[i - 1];
4810      if (pos != 't' && pos != 'e') {
4811        Parser.EatToEndOfStatement();
4812        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4813      }
4814      Mask >>= 1;
4815      if (ITMask[i - 1] == 't')
4816        Mask |= 8;
4817    }
4818    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4819  }
4820
4821  // FIXME: This is all a pretty gross hack. We should automatically handle
4822  // optional operands like this via tblgen.
4823
4824  // Next, add the CCOut and ConditionCode operands, if needed.
4825  //
4826  // For mnemonics which can ever incorporate a carry setting bit or predication
4827  // code, our matching model involves us always generating CCOut and
4828  // ConditionCode operands to match the mnemonic "as written" and then we let
4829  // the matcher deal with finding the right instruction or generating an
4830  // appropriate error.
4831  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4832  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4833
4834  // If we had a carry-set on an instruction that can't do that, issue an
4835  // error.
4836  if (!CanAcceptCarrySet && CarrySetting) {
4837    Parser.EatToEndOfStatement();
4838    return Error(NameLoc, "instruction '" + Mnemonic +
4839                 "' can not set flags, but 's' suffix specified");
4840  }
4841  // If we had a predication code on an instruction that can't do that, issue an
4842  // error.
4843  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4844    Parser.EatToEndOfStatement();
4845    return Error(NameLoc, "instruction '" + Mnemonic +
4846                 "' is not predicable, but condition code specified");
4847  }
4848
4849  // Add the carry setting operand, if necessary.
4850  if (CanAcceptCarrySet) {
4851    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4852    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4853                                               Loc));
4854  }
4855
4856  // Add the predication code operand, if necessary.
4857  if (CanAcceptPredicationCode) {
4858    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4859                                      CarrySetting);
4860    Operands.push_back(ARMOperand::CreateCondCode(
4861                         ARMCC::CondCodes(PredicationCode), Loc));
4862  }
4863
4864  // Add the processor imod operand, if necessary.
4865  if (ProcessorIMod) {
4866    Operands.push_back(ARMOperand::CreateImm(
4867          MCConstantExpr::Create(ProcessorIMod, getContext()),
4868                                 NameLoc, NameLoc));
4869  }
4870
4871  // Add the remaining tokens in the mnemonic.
4872  while (Next != StringRef::npos) {
4873    Start = Next;
4874    Next = Name.find('.', Start + 1);
4875    StringRef ExtraToken = Name.slice(Start, Next);
4876
4877    // Some NEON instructions have an optional datatype suffix that is
4878    // completely ignored. Check for that.
4879    if (isDataTypeToken(ExtraToken) &&
4880        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4881      continue;
4882
4883    if (ExtraToken != ".n") {
4884      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4885      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4886    }
4887  }
4888
4889  // Read the remaining operands.
4890  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4891    // Read the first operand.
4892    if (parseOperand(Operands, Mnemonic)) {
4893      Parser.EatToEndOfStatement();
4894      return true;
4895    }
4896
4897    while (getLexer().is(AsmToken::Comma)) {
4898      Parser.Lex();  // Eat the comma.
4899
4900      // Parse and remember the operand.
4901      if (parseOperand(Operands, Mnemonic)) {
4902        Parser.EatToEndOfStatement();
4903        return true;
4904      }
4905    }
4906  }
4907
4908  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4909    SMLoc Loc = getLexer().getLoc();
4910    Parser.EatToEndOfStatement();
4911    return Error(Loc, "unexpected token in argument list");
4912  }
4913
4914  Parser.Lex(); // Consume the EndOfStatement
4915
4916  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4917  // do and don't have a cc_out optional-def operand. With some spot-checks
4918  // of the operand list, we can figure out which variant we're trying to
4919  // parse and adjust accordingly before actually matching. We shouldn't ever
4920  // try to remove a cc_out operand that was explicitly set on the the
4921  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4922  // table driven matcher doesn't fit well with the ARM instruction set.
4923  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4924    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4925    Operands.erase(Operands.begin() + 1);
4926    delete Op;
4927  }
4928
4929  // ARM mode 'blx' need special handling, as the register operand version
4930  // is predicable, but the label operand version is not. So, we can't rely
4931  // on the Mnemonic based checking to correctly figure out when to put
4932  // a k_CondCode operand in the list. If we're trying to match the label
4933  // version, remove the k_CondCode operand here.
4934  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4935      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4936    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4937    Operands.erase(Operands.begin() + 1);
4938    delete Op;
4939  }
4940
4941  // The vector-compare-to-zero instructions have a literal token "#0" at
4942  // the end that comes to here as an immediate operand. Convert it to a
4943  // token to play nicely with the matcher.
4944  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4945      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4946      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4947    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4948    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4949    if (CE && CE->getValue() == 0) {
4950      Operands.erase(Operands.begin() + 5);
4951      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4952      delete Op;
4953    }
4954  }
4955  // VCMP{E} does the same thing, but with a different operand count.
4956  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4957      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4958    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4959    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4960    if (CE && CE->getValue() == 0) {
4961      Operands.erase(Operands.begin() + 4);
4962      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4963      delete Op;
4964    }
4965  }
4966  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4967  // end. Convert it to a token here. Take care not to convert those
4968  // that should hit the Thumb2 encoding.
4969  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4970      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4971      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4972      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4973    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4974    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4975    if (CE && CE->getValue() == 0 &&
4976        (isThumbOne() ||
4977         // The cc_out operand matches the IT block.
4978         ((inITBlock() != CarrySetting) &&
4979         // Neither register operand is a high register.
4980         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4981          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
4982      Operands.erase(Operands.begin() + 5);
4983      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4984      delete Op;
4985    }
4986  }
4987
4988  return false;
4989}
4990
4991// Validate context-sensitive operand constraints.
4992
4993// return 'true' if register list contains non-low GPR registers,
4994// 'false' otherwise. If Reg is in the register list or is HiReg, set
4995// 'containsReg' to true.
4996static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4997                                 unsigned HiReg, bool &containsReg) {
4998  containsReg = false;
4999  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5000    unsigned OpReg = Inst.getOperand(i).getReg();
5001    if (OpReg == Reg)
5002      containsReg = true;
5003    // Anything other than a low register isn't legal here.
5004    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5005      return true;
5006  }
5007  return false;
5008}
5009
5010// Check if the specified regisgter is in the register list of the inst,
5011// starting at the indicated operand number.
5012static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5013  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5014    unsigned OpReg = Inst.getOperand(i).getReg();
5015    if (OpReg == Reg)
5016      return true;
5017  }
5018  return false;
5019}
5020
5021// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5022// the ARMInsts array) instead. Getting that here requires awkward
5023// API changes, though. Better way?
5024namespace llvm {
5025extern const MCInstrDesc ARMInsts[];
5026}
5027static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5028  return ARMInsts[Opcode];
5029}
5030
5031// FIXME: We would really like to be able to tablegen'erate this.
5032bool ARMAsmParser::
5033validateInstruction(MCInst &Inst,
5034                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5035  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5036  SMLoc Loc = Operands[0]->getStartLoc();
5037  // Check the IT block state first.
5038  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
5039  // being allowed in IT blocks, but not being predicable.  It just always
5040  // executes.
5041  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
5042    unsigned bit = 1;
5043    if (ITState.FirstCond)
5044      ITState.FirstCond = false;
5045    else
5046      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5047    // The instruction must be predicable.
5048    if (!MCID.isPredicable())
5049      return Error(Loc, "instructions in IT block must be predicable");
5050    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5051    unsigned ITCond = bit ? ITState.Cond :
5052      ARMCC::getOppositeCondition(ITState.Cond);
5053    if (Cond != ITCond) {
5054      // Find the condition code Operand to get its SMLoc information.
5055      SMLoc CondLoc;
5056      for (unsigned i = 1; i < Operands.size(); ++i)
5057        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5058          CondLoc = Operands[i]->getStartLoc();
5059      return Error(CondLoc, "incorrect condition in IT block; got '" +
5060                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5061                   "', but expected '" +
5062                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5063    }
5064  // Check for non-'al' condition codes outside of the IT block.
5065  } else if (isThumbTwo() && MCID.isPredicable() &&
5066             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5067             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5068             Inst.getOpcode() != ARM::t2B)
5069    return Error(Loc, "predicated instructions must be in IT block");
5070
5071  switch (Inst.getOpcode()) {
5072  case ARM::LDRD:
5073  case ARM::LDRD_PRE:
5074  case ARM::LDRD_POST:
5075  case ARM::LDREXD: {
5076    // Rt2 must be Rt + 1.
5077    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5078    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5079    if (Rt2 != Rt + 1)
5080      return Error(Operands[3]->getStartLoc(),
5081                   "destination operands must be sequential");
5082    return false;
5083  }
5084  case ARM::STRD: {
5085    // Rt2 must be Rt + 1.
5086    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5087    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5088    if (Rt2 != Rt + 1)
5089      return Error(Operands[3]->getStartLoc(),
5090                   "source operands must be sequential");
5091    return false;
5092  }
5093  case ARM::STRD_PRE:
5094  case ARM::STRD_POST:
5095  case ARM::STREXD: {
5096    // Rt2 must be Rt + 1.
5097    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5098    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5099    if (Rt2 != Rt + 1)
5100      return Error(Operands[3]->getStartLoc(),
5101                   "source operands must be sequential");
5102    return false;
5103  }
5104  case ARM::SBFX:
5105  case ARM::UBFX: {
5106    // width must be in range [1, 32-lsb]
5107    unsigned lsb = Inst.getOperand(2).getImm();
5108    unsigned widthm1 = Inst.getOperand(3).getImm();
5109    if (widthm1 >= 32 - lsb)
5110      return Error(Operands[5]->getStartLoc(),
5111                   "bitfield width must be in range [1,32-lsb]");
5112    return false;
5113  }
5114  case ARM::tLDMIA: {
5115    // If we're parsing Thumb2, the .w variant is available and handles
5116    // most cases that are normally illegal for a Thumb1 LDM
5117    // instruction. We'll make the transformation in processInstruction()
5118    // if necessary.
5119    //
5120    // Thumb LDM instructions are writeback iff the base register is not
5121    // in the register list.
5122    unsigned Rn = Inst.getOperand(0).getReg();
5123    bool hasWritebackToken =
5124      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5125       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5126    bool listContainsBase;
5127    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5128      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5129                   "registers must be in range r0-r7");
5130    // If we should have writeback, then there should be a '!' token.
5131    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5132      return Error(Operands[2]->getStartLoc(),
5133                   "writeback operator '!' expected");
5134    // If we should not have writeback, there must not be a '!'. This is
5135    // true even for the 32-bit wide encodings.
5136    if (listContainsBase && hasWritebackToken)
5137      return Error(Operands[3]->getStartLoc(),
5138                   "writeback operator '!' not allowed when base register "
5139                   "in register list");
5140
5141    break;
5142  }
5143  case ARM::t2LDMIA_UPD: {
5144    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5145      return Error(Operands[4]->getStartLoc(),
5146                   "writeback operator '!' not allowed when base register "
5147                   "in register list");
5148    break;
5149  }
5150  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5151  // so only issue a diagnostic for thumb1. The instructions will be
5152  // switched to the t2 encodings in processInstruction() if necessary.
5153  case ARM::tPOP: {
5154    bool listContainsBase;
5155    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5156        !isThumbTwo())
5157      return Error(Operands[2]->getStartLoc(),
5158                   "registers must be in range r0-r7 or pc");
5159    break;
5160  }
5161  case ARM::tPUSH: {
5162    bool listContainsBase;
5163    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5164        !isThumbTwo())
5165      return Error(Operands[2]->getStartLoc(),
5166                   "registers must be in range r0-r7 or lr");
5167    break;
5168  }
5169  case ARM::tSTMIA_UPD: {
5170    bool listContainsBase;
5171    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5172      return Error(Operands[4]->getStartLoc(),
5173                   "registers must be in range r0-r7");
5174    break;
5175  }
5176  }
5177
5178  return false;
5179}
5180
5181static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5182  switch(Opc) {
5183  default: assert(0 && "unexpected opcode!");
5184  // VST1LN
5185  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5186  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5187  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5188  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5189  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5190  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5191  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5192  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5193  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5194
5195  // VST2LN
5196  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5197  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5198  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5199  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5200  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5201
5202  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5203  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5204  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5205  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5206  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5207
5208  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5209  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5210  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5211  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5212  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5213
5214  // VST3LN
5215  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5216  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5217  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5218  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5219  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5220  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5221  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5222  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5223  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5224  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5225  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5226  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5227  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5228  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5229  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5230
5231  // VST3
5232  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5233  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5234  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5235  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5236  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5237  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5238  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5239  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5240  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5241  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5242  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5243  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5244  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5245  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5246  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5247  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5248  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5249  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5250  }
5251}
5252
5253static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5254  switch(Opc) {
5255  default: assert(0 && "unexpected opcode!");
5256  // VLD1LN
5257  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5258  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5259  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5260  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5261  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5262  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5263  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5264  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5265  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5266
5267  // VLD2LN
5268  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5269  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5270  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5271  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5272  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5273  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5274  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5275  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5276  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5277  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5278  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5279  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5280  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5281  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5282  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5283
5284  // VLD3LN
5285  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5286  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5287  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5288  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5289  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5290  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5291  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5292  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5293  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5294  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5295  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5296  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5297  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5298  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5299  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5300
5301  // VLD3
5302  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5303  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5304  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5305  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5306  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5307  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5308  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5309  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5310  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5311  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5312  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5313  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5314  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5315  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5316  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5317  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5318  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5319  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5320
5321  // VLD4
5322  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5323  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5324  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5325  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5326  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5327  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5328  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5329  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5330  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5331  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5332  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5333  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5334  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5335  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5336  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5337  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5338  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5339  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5340  }
5341}
5342
5343bool ARMAsmParser::
5344processInstruction(MCInst &Inst,
5345                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5346  switch (Inst.getOpcode()) {
5347  // Aliases for alternate PC+imm syntax of LDR instructions.
5348  case ARM::t2LDRpcrel:
5349    Inst.setOpcode(ARM::t2LDRpci);
5350    return true;
5351  case ARM::t2LDRBpcrel:
5352    Inst.setOpcode(ARM::t2LDRBpci);
5353    return true;
5354  case ARM::t2LDRHpcrel:
5355    Inst.setOpcode(ARM::t2LDRHpci);
5356    return true;
5357  case ARM::t2LDRSBpcrel:
5358    Inst.setOpcode(ARM::t2LDRSBpci);
5359    return true;
5360  case ARM::t2LDRSHpcrel:
5361    Inst.setOpcode(ARM::t2LDRSHpci);
5362    return true;
5363  // Handle NEON VST complex aliases.
5364  case ARM::VST1LNdWB_register_Asm_8:
5365  case ARM::VST1LNdWB_register_Asm_16:
5366  case ARM::VST1LNdWB_register_Asm_32: {
5367    MCInst TmpInst;
5368    // Shuffle the operands around so the lane index operand is in the
5369    // right place.
5370    unsigned Spacing;
5371    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5372    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5373    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5374    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5375    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5376    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5377    TmpInst.addOperand(Inst.getOperand(1)); // lane
5378    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5379    TmpInst.addOperand(Inst.getOperand(6));
5380    Inst = TmpInst;
5381    return true;
5382  }
5383
5384  case ARM::VST2LNdWB_register_Asm_8:
5385  case ARM::VST2LNdWB_register_Asm_16:
5386  case ARM::VST2LNdWB_register_Asm_32:
5387  case ARM::VST2LNqWB_register_Asm_16:
5388  case ARM::VST2LNqWB_register_Asm_32: {
5389    MCInst TmpInst;
5390    // Shuffle the operands around so the lane index operand is in the
5391    // right place.
5392    unsigned Spacing;
5393    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5394    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5395    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5396    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5397    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5398    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5399    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5400                                            Spacing));
5401    TmpInst.addOperand(Inst.getOperand(1)); // lane
5402    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5403    TmpInst.addOperand(Inst.getOperand(6));
5404    Inst = TmpInst;
5405    return true;
5406  }
5407
5408  case ARM::VST3LNdWB_register_Asm_8:
5409  case ARM::VST3LNdWB_register_Asm_16:
5410  case ARM::VST3LNdWB_register_Asm_32:
5411  case ARM::VST3LNqWB_register_Asm_16:
5412  case ARM::VST3LNqWB_register_Asm_32: {
5413    MCInst TmpInst;
5414    // Shuffle the operands around so the lane index operand is in the
5415    // right place.
5416    unsigned Spacing;
5417    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5418    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5419    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5420    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5421    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5422    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5423    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5424                                            Spacing));
5425    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5426                                            Spacing * 2));
5427    TmpInst.addOperand(Inst.getOperand(1)); // lane
5428    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5429    TmpInst.addOperand(Inst.getOperand(6));
5430    Inst = TmpInst;
5431    return true;
5432  }
5433
5434  case ARM::VST1LNdWB_fixed_Asm_8:
5435  case ARM::VST1LNdWB_fixed_Asm_16:
5436  case ARM::VST1LNdWB_fixed_Asm_32: {
5437    MCInst TmpInst;
5438    // Shuffle the operands around so the lane index operand is in the
5439    // right place.
5440    unsigned Spacing;
5441    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5442    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5443    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5444    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5445    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5446    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5447    TmpInst.addOperand(Inst.getOperand(1)); // lane
5448    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5449    TmpInst.addOperand(Inst.getOperand(5));
5450    Inst = TmpInst;
5451    return true;
5452  }
5453
5454  case ARM::VST2LNdWB_fixed_Asm_8:
5455  case ARM::VST2LNdWB_fixed_Asm_16:
5456  case ARM::VST2LNdWB_fixed_Asm_32:
5457  case ARM::VST2LNqWB_fixed_Asm_16:
5458  case ARM::VST2LNqWB_fixed_Asm_32: {
5459    MCInst TmpInst;
5460    // Shuffle the operands around so the lane index operand is in the
5461    // right place.
5462    unsigned Spacing;
5463    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5464    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5465    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5466    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5467    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5468    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5469    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5470                                            Spacing));
5471    TmpInst.addOperand(Inst.getOperand(1)); // lane
5472    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5473    TmpInst.addOperand(Inst.getOperand(5));
5474    Inst = TmpInst;
5475    return true;
5476  }
5477
5478  case ARM::VST3LNdWB_fixed_Asm_8:
5479  case ARM::VST3LNdWB_fixed_Asm_16:
5480  case ARM::VST3LNdWB_fixed_Asm_32:
5481  case ARM::VST3LNqWB_fixed_Asm_16:
5482  case ARM::VST3LNqWB_fixed_Asm_32: {
5483    MCInst TmpInst;
5484    // Shuffle the operands around so the lane index operand is in the
5485    // right place.
5486    unsigned Spacing;
5487    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5488    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5489    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5490    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5491    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5492    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5493    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5494                                            Spacing));
5495    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5496                                            Spacing * 2));
5497    TmpInst.addOperand(Inst.getOperand(1)); // lane
5498    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5499    TmpInst.addOperand(Inst.getOperand(5));
5500    Inst = TmpInst;
5501    return true;
5502  }
5503
5504  case ARM::VST1LNdAsm_8:
5505  case ARM::VST1LNdAsm_16:
5506  case ARM::VST1LNdAsm_32: {
5507    MCInst TmpInst;
5508    // Shuffle the operands around so the lane index operand is in the
5509    // right place.
5510    unsigned Spacing;
5511    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5512    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5513    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5514    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5515    TmpInst.addOperand(Inst.getOperand(1)); // lane
5516    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5517    TmpInst.addOperand(Inst.getOperand(5));
5518    Inst = TmpInst;
5519    return true;
5520  }
5521
5522  case ARM::VST2LNdAsm_8:
5523  case ARM::VST2LNdAsm_16:
5524  case ARM::VST2LNdAsm_32:
5525  case ARM::VST2LNqAsm_16:
5526  case ARM::VST2LNqAsm_32: {
5527    MCInst TmpInst;
5528    // Shuffle the operands around so the lane index operand is in the
5529    // right place.
5530    unsigned Spacing;
5531    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5532    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5533    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5534    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5535    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5536                                            Spacing));
5537    TmpInst.addOperand(Inst.getOperand(1)); // lane
5538    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5539    TmpInst.addOperand(Inst.getOperand(5));
5540    Inst = TmpInst;
5541    return true;
5542  }
5543
5544  case ARM::VST3LNdAsm_8:
5545  case ARM::VST3LNdAsm_16:
5546  case ARM::VST3LNdAsm_32:
5547  case ARM::VST3LNqAsm_16:
5548  case ARM::VST3LNqAsm_32: {
5549    MCInst TmpInst;
5550    // Shuffle the operands around so the lane index operand is in the
5551    // right place.
5552    unsigned Spacing;
5553    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5554    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5555    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5556    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5557    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5558                                            Spacing));
5559    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5560                                            Spacing * 2));
5561    TmpInst.addOperand(Inst.getOperand(1)); // lane
5562    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5563    TmpInst.addOperand(Inst.getOperand(5));
5564    Inst = TmpInst;
5565    return true;
5566  }
5567
5568  // Handle NEON VLD complex aliases.
5569  case ARM::VLD1LNdWB_register_Asm_8:
5570  case ARM::VLD1LNdWB_register_Asm_16:
5571  case ARM::VLD1LNdWB_register_Asm_32: {
5572    MCInst TmpInst;
5573    // Shuffle the operands around so the lane index operand is in the
5574    // right place.
5575    unsigned Spacing;
5576    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5577    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5578    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5579    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5580    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5581    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5582    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5583    TmpInst.addOperand(Inst.getOperand(1)); // lane
5584    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5585    TmpInst.addOperand(Inst.getOperand(6));
5586    Inst = TmpInst;
5587    return true;
5588  }
5589
5590  case ARM::VLD2LNdWB_register_Asm_8:
5591  case ARM::VLD2LNdWB_register_Asm_16:
5592  case ARM::VLD2LNdWB_register_Asm_32:
5593  case ARM::VLD2LNqWB_register_Asm_16:
5594  case ARM::VLD2LNqWB_register_Asm_32: {
5595    MCInst TmpInst;
5596    // Shuffle the operands around so the lane index operand is in the
5597    // right place.
5598    unsigned Spacing;
5599    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5600    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5601    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5602                                            Spacing));
5603    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5604    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5605    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5606    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5607    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5608    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5609                                            Spacing));
5610    TmpInst.addOperand(Inst.getOperand(1)); // lane
5611    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5612    TmpInst.addOperand(Inst.getOperand(6));
5613    Inst = TmpInst;
5614    return true;
5615  }
5616
5617  case ARM::VLD3LNdWB_register_Asm_8:
5618  case ARM::VLD3LNdWB_register_Asm_16:
5619  case ARM::VLD3LNdWB_register_Asm_32:
5620  case ARM::VLD3LNqWB_register_Asm_16:
5621  case ARM::VLD3LNqWB_register_Asm_32: {
5622    MCInst TmpInst;
5623    // Shuffle the operands around so the lane index operand is in the
5624    // right place.
5625    unsigned Spacing;
5626    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5627    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5628    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5629                                            Spacing));
5630    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5631                                            Spacing * 2));
5632    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5633    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5634    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5635    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5636    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5637    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5638                                            Spacing));
5639    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5640                                            Spacing * 2));
5641    TmpInst.addOperand(Inst.getOperand(1)); // lane
5642    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5643    TmpInst.addOperand(Inst.getOperand(6));
5644    Inst = TmpInst;
5645    return true;
5646  }
5647
5648  case ARM::VLD1LNdWB_fixed_Asm_8:
5649  case ARM::VLD1LNdWB_fixed_Asm_16:
5650  case ARM::VLD1LNdWB_fixed_Asm_32: {
5651    MCInst TmpInst;
5652    // Shuffle the operands around so the lane index operand is in the
5653    // right place.
5654    unsigned Spacing;
5655    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5656    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5657    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5658    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5659    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5660    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5661    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5662    TmpInst.addOperand(Inst.getOperand(1)); // lane
5663    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5664    TmpInst.addOperand(Inst.getOperand(5));
5665    Inst = TmpInst;
5666    return true;
5667  }
5668
5669  case ARM::VLD2LNdWB_fixed_Asm_8:
5670  case ARM::VLD2LNdWB_fixed_Asm_16:
5671  case ARM::VLD2LNdWB_fixed_Asm_32:
5672  case ARM::VLD2LNqWB_fixed_Asm_16:
5673  case ARM::VLD2LNqWB_fixed_Asm_32: {
5674    MCInst TmpInst;
5675    // Shuffle the operands around so the lane index operand is in the
5676    // right place.
5677    unsigned Spacing;
5678    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5679    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5680    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5681                                            Spacing));
5682    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5683    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5684    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5685    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5686    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5687    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5688                                            Spacing));
5689    TmpInst.addOperand(Inst.getOperand(1)); // lane
5690    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5691    TmpInst.addOperand(Inst.getOperand(5));
5692    Inst = TmpInst;
5693    return true;
5694  }
5695
5696  case ARM::VLD3LNdWB_fixed_Asm_8:
5697  case ARM::VLD3LNdWB_fixed_Asm_16:
5698  case ARM::VLD3LNdWB_fixed_Asm_32:
5699  case ARM::VLD3LNqWB_fixed_Asm_16:
5700  case ARM::VLD3LNqWB_fixed_Asm_32: {
5701    MCInst TmpInst;
5702    // Shuffle the operands around so the lane index operand is in the
5703    // right place.
5704    unsigned Spacing;
5705    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5706    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5707    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5708                                            Spacing));
5709    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5710                                            Spacing * 2));
5711    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5712    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5713    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5714    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5715    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5716    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5717                                            Spacing));
5718    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5719                                            Spacing * 2));
5720    TmpInst.addOperand(Inst.getOperand(1)); // lane
5721    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5722    TmpInst.addOperand(Inst.getOperand(5));
5723    Inst = TmpInst;
5724    return true;
5725  }
5726
5727  case ARM::VLD1LNdAsm_8:
5728  case ARM::VLD1LNdAsm_16:
5729  case ARM::VLD1LNdAsm_32: {
5730    MCInst TmpInst;
5731    // Shuffle the operands around so the lane index operand is in the
5732    // right place.
5733    unsigned Spacing;
5734    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5735    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5736    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5737    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5738    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5739    TmpInst.addOperand(Inst.getOperand(1)); // lane
5740    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5741    TmpInst.addOperand(Inst.getOperand(5));
5742    Inst = TmpInst;
5743    return true;
5744  }
5745
5746  case ARM::VLD2LNdAsm_8:
5747  case ARM::VLD2LNdAsm_16:
5748  case ARM::VLD2LNdAsm_32:
5749  case ARM::VLD2LNqAsm_16:
5750  case ARM::VLD2LNqAsm_32: {
5751    MCInst TmpInst;
5752    // Shuffle the operands around so the lane index operand is in the
5753    // right place.
5754    unsigned Spacing;
5755    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5756    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5757    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5758                                            Spacing));
5759    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5760    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5761    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5762    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5763                                            Spacing));
5764    TmpInst.addOperand(Inst.getOperand(1)); // lane
5765    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5766    TmpInst.addOperand(Inst.getOperand(5));
5767    Inst = TmpInst;
5768    return true;
5769  }
5770
5771  case ARM::VLD3LNdAsm_8:
5772  case ARM::VLD3LNdAsm_16:
5773  case ARM::VLD3LNdAsm_32:
5774  case ARM::VLD3LNqAsm_16:
5775  case ARM::VLD3LNqAsm_32: {
5776    MCInst TmpInst;
5777    // Shuffle the operands around so the lane index operand is in the
5778    // right place.
5779    unsigned Spacing;
5780    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5781    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5782    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5783                                            Spacing));
5784    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5785                                            Spacing * 2));
5786    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5787    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5788    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5789    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5790                                            Spacing));
5791    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5792                                            Spacing * 2));
5793    TmpInst.addOperand(Inst.getOperand(1)); // lane
5794    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5795    TmpInst.addOperand(Inst.getOperand(5));
5796    Inst = TmpInst;
5797    return true;
5798  }
5799
5800  // VLD3 multiple 3-element structure instructions.
5801  case ARM::VLD3dAsm_8:
5802  case ARM::VLD3dAsm_16:
5803  case ARM::VLD3dAsm_32:
5804  case ARM::VLD3qAsm_8:
5805  case ARM::VLD3qAsm_16:
5806  case ARM::VLD3qAsm_32: {
5807    MCInst TmpInst;
5808    unsigned Spacing;
5809    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5810    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5811    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5812                                            Spacing));
5813    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5814                                            Spacing * 2));
5815    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5816    TmpInst.addOperand(Inst.getOperand(2)); // alignment
5817    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5818    TmpInst.addOperand(Inst.getOperand(4));
5819    Inst = TmpInst;
5820    return true;
5821  }
5822
5823  case ARM::VLD3dWB_fixed_Asm_8:
5824  case ARM::VLD3dWB_fixed_Asm_16:
5825  case ARM::VLD3dWB_fixed_Asm_32:
5826  case ARM::VLD3qWB_fixed_Asm_8:
5827  case ARM::VLD3qWB_fixed_Asm_16:
5828  case ARM::VLD3qWB_fixed_Asm_32: {
5829    MCInst TmpInst;
5830    unsigned Spacing;
5831    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5832    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5833    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5834                                            Spacing));
5835    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5836                                            Spacing * 2));
5837    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5838    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
5839    TmpInst.addOperand(Inst.getOperand(2)); // alignment
5840    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5841    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5842    TmpInst.addOperand(Inst.getOperand(4));
5843    Inst = TmpInst;
5844    return true;
5845  }
5846
5847  case ARM::VLD3dWB_register_Asm_8:
5848  case ARM::VLD3dWB_register_Asm_16:
5849  case ARM::VLD3dWB_register_Asm_32:
5850  case ARM::VLD3qWB_register_Asm_8:
5851  case ARM::VLD3qWB_register_Asm_16:
5852  case ARM::VLD3qWB_register_Asm_32: {
5853    MCInst TmpInst;
5854    unsigned Spacing;
5855    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5856    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5857    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5858                                            Spacing));
5859    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5860                                            Spacing * 2));
5861    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5862    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
5863    TmpInst.addOperand(Inst.getOperand(2)); // alignment
5864    TmpInst.addOperand(Inst.getOperand(3)); // Rm
5865    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5866    TmpInst.addOperand(Inst.getOperand(5));
5867    Inst = TmpInst;
5868    return true;
5869  }
5870
5871  // VLD4 multiple 3-element structure instructions.
5872  case ARM::VLD4dAsm_8:
5873  case ARM::VLD4dAsm_16:
5874  case ARM::VLD4dAsm_32:
5875  case ARM::VLD4qAsm_8:
5876  case ARM::VLD4qAsm_16:
5877  case ARM::VLD4qAsm_32: {
5878    MCInst TmpInst;
5879    unsigned Spacing;
5880    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5881    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5882    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5883                                            Spacing));
5884    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5885                                            Spacing * 2));
5886    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5887                                            Spacing * 3));
5888    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5889    TmpInst.addOperand(Inst.getOperand(2)); // alignment
5890    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5891    TmpInst.addOperand(Inst.getOperand(4));
5892    Inst = TmpInst;
5893    return true;
5894  }
5895
5896  case ARM::VLD4dWB_fixed_Asm_8:
5897  case ARM::VLD4dWB_fixed_Asm_16:
5898  case ARM::VLD4dWB_fixed_Asm_32:
5899  case ARM::VLD4qWB_fixed_Asm_8:
5900  case ARM::VLD4qWB_fixed_Asm_16:
5901  case ARM::VLD4qWB_fixed_Asm_32: {
5902    MCInst TmpInst;
5903    unsigned Spacing;
5904    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5905    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5906    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5907                                            Spacing));
5908    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5909                                            Spacing * 2));
5910    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5911                                            Spacing * 3));
5912    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5913    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
5914    TmpInst.addOperand(Inst.getOperand(2)); // alignment
5915    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5916    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5917    TmpInst.addOperand(Inst.getOperand(4));
5918    Inst = TmpInst;
5919    return true;
5920  }
5921
5922  case ARM::VLD4dWB_register_Asm_8:
5923  case ARM::VLD4dWB_register_Asm_16:
5924  case ARM::VLD4dWB_register_Asm_32:
5925  case ARM::VLD4qWB_register_Asm_8:
5926  case ARM::VLD4qWB_register_Asm_16:
5927  case ARM::VLD4qWB_register_Asm_32: {
5928    MCInst TmpInst;
5929    unsigned Spacing;
5930    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5931    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5932    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5933                                            Spacing));
5934    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5935                                            Spacing * 2));
5936    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5937                                            Spacing * 3));
5938    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5939    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
5940    TmpInst.addOperand(Inst.getOperand(2)); // alignment
5941    TmpInst.addOperand(Inst.getOperand(3)); // Rm
5942    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5943    TmpInst.addOperand(Inst.getOperand(5));
5944    Inst = TmpInst;
5945    return true;
5946  }
5947
5948  // VST3 multiple 3-element structure instructions.
5949  case ARM::VST3dAsm_8:
5950  case ARM::VST3dAsm_16:
5951  case ARM::VST3dAsm_32:
5952  case ARM::VST3qAsm_8:
5953  case ARM::VST3qAsm_16:
5954  case ARM::VST3qAsm_32: {
5955    MCInst TmpInst;
5956    unsigned Spacing;
5957    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5958    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5959    TmpInst.addOperand(Inst.getOperand(2)); // alignment
5960    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5961    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5962                                            Spacing));
5963    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5964                                            Spacing * 2));
5965    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5966    TmpInst.addOperand(Inst.getOperand(4));
5967    Inst = TmpInst;
5968    return true;
5969  }
5970
5971  case ARM::VST3dWB_fixed_Asm_8:
5972  case ARM::VST3dWB_fixed_Asm_16:
5973  case ARM::VST3dWB_fixed_Asm_32:
5974  case ARM::VST3qWB_fixed_Asm_8:
5975  case ARM::VST3qWB_fixed_Asm_16:
5976  case ARM::VST3qWB_fixed_Asm_32: {
5977    MCInst TmpInst;
5978    unsigned Spacing;
5979    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5980    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5981    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
5982    TmpInst.addOperand(Inst.getOperand(2)); // alignment
5983    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5984    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5985    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5986                                            Spacing));
5987    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5988                                            Spacing * 2));
5989    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5990    TmpInst.addOperand(Inst.getOperand(4));
5991    Inst = TmpInst;
5992    return true;
5993  }
5994
5995  case ARM::VST3dWB_register_Asm_8:
5996  case ARM::VST3dWB_register_Asm_16:
5997  case ARM::VST3dWB_register_Asm_32:
5998  case ARM::VST3qWB_register_Asm_8:
5999  case ARM::VST3qWB_register_Asm_16:
6000  case ARM::VST3qWB_register_Asm_32: {
6001    MCInst TmpInst;
6002    unsigned Spacing;
6003    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6004    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6005    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6006    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6007    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6008    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6009    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6010                                            Spacing));
6011    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6012                                            Spacing * 2));
6013    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6014    TmpInst.addOperand(Inst.getOperand(5));
6015    Inst = TmpInst;
6016    return true;
6017  }
6018
6019  // Handle the Thumb2 mode MOV complex aliases.
6020  case ARM::t2MOVsr:
6021  case ARM::t2MOVSsr: {
6022    // Which instruction to expand to depends on the CCOut operand and
6023    // whether we're in an IT block if the register operands are low
6024    // registers.
6025    bool isNarrow = false;
6026    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6027        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6028        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6029        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6030        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6031      isNarrow = true;
6032    MCInst TmpInst;
6033    unsigned newOpc;
6034    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6035    default: llvm_unreachable("unexpected opcode!");
6036    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6037    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6038    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6039    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6040    }
6041    TmpInst.setOpcode(newOpc);
6042    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6043    if (isNarrow)
6044      TmpInst.addOperand(MCOperand::CreateReg(
6045          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6046    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6047    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6048    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6049    TmpInst.addOperand(Inst.getOperand(5));
6050    if (!isNarrow)
6051      TmpInst.addOperand(MCOperand::CreateReg(
6052          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6053    Inst = TmpInst;
6054    return true;
6055  }
6056  case ARM::t2MOVsi:
6057  case ARM::t2MOVSsi: {
6058    // Which instruction to expand to depends on the CCOut operand and
6059    // whether we're in an IT block if the register operands are low
6060    // registers.
6061    bool isNarrow = false;
6062    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6063        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6064        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6065      isNarrow = true;
6066    MCInst TmpInst;
6067    unsigned newOpc;
6068    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6069    default: llvm_unreachable("unexpected opcode!");
6070    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6071    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6072    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6073    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6074    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6075    }
6076    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6077    if (Ammount == 32) Ammount = 0;
6078    TmpInst.setOpcode(newOpc);
6079    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6080    if (isNarrow)
6081      TmpInst.addOperand(MCOperand::CreateReg(
6082          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6083    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6084    if (newOpc != ARM::t2RRX)
6085      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
6086    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6087    TmpInst.addOperand(Inst.getOperand(4));
6088    if (!isNarrow)
6089      TmpInst.addOperand(MCOperand::CreateReg(
6090          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6091    Inst = TmpInst;
6092    return true;
6093  }
6094  // Handle the ARM mode MOV complex aliases.
6095  case ARM::ASRr:
6096  case ARM::LSRr:
6097  case ARM::LSLr:
6098  case ARM::RORr: {
6099    ARM_AM::ShiftOpc ShiftTy;
6100    switch(Inst.getOpcode()) {
6101    default: llvm_unreachable("unexpected opcode!");
6102    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6103    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6104    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6105    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6106    }
6107    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6108    MCInst TmpInst;
6109    TmpInst.setOpcode(ARM::MOVsr);
6110    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6111    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6112    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6113    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6114    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6115    TmpInst.addOperand(Inst.getOperand(4));
6116    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6117    Inst = TmpInst;
6118    return true;
6119  }
6120  case ARM::ASRi:
6121  case ARM::LSRi:
6122  case ARM::LSLi:
6123  case ARM::RORi: {
6124    ARM_AM::ShiftOpc ShiftTy;
6125    switch(Inst.getOpcode()) {
6126    default: llvm_unreachable("unexpected opcode!");
6127    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6128    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6129    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6130    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6131    }
6132    // A shift by zero is a plain MOVr, not a MOVsi.
6133    unsigned Amt = Inst.getOperand(2).getImm();
6134    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6135    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6136    MCInst TmpInst;
6137    TmpInst.setOpcode(Opc);
6138    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6139    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6140    if (Opc == ARM::MOVsi)
6141      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6142    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6143    TmpInst.addOperand(Inst.getOperand(4));
6144    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6145    Inst = TmpInst;
6146    return true;
6147  }
6148  case ARM::RRXi: {
6149    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6150    MCInst TmpInst;
6151    TmpInst.setOpcode(ARM::MOVsi);
6152    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6153    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6154    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6155    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6156    TmpInst.addOperand(Inst.getOperand(3));
6157    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6158    Inst = TmpInst;
6159    return true;
6160  }
6161  case ARM::t2LDMIA_UPD: {
6162    // If this is a load of a single register, then we should use
6163    // a post-indexed LDR instruction instead, per the ARM ARM.
6164    if (Inst.getNumOperands() != 5)
6165      return false;
6166    MCInst TmpInst;
6167    TmpInst.setOpcode(ARM::t2LDR_POST);
6168    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6169    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6170    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6171    TmpInst.addOperand(MCOperand::CreateImm(4));
6172    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6173    TmpInst.addOperand(Inst.getOperand(3));
6174    Inst = TmpInst;
6175    return true;
6176  }
6177  case ARM::t2STMDB_UPD: {
6178    // If this is a store of a single register, then we should use
6179    // a pre-indexed STR instruction instead, per the ARM ARM.
6180    if (Inst.getNumOperands() != 5)
6181      return false;
6182    MCInst TmpInst;
6183    TmpInst.setOpcode(ARM::t2STR_PRE);
6184    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6185    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6186    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6187    TmpInst.addOperand(MCOperand::CreateImm(-4));
6188    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6189    TmpInst.addOperand(Inst.getOperand(3));
6190    Inst = TmpInst;
6191    return true;
6192  }
6193  case ARM::LDMIA_UPD:
6194    // If this is a load of a single register via a 'pop', then we should use
6195    // a post-indexed LDR instruction instead, per the ARM ARM.
6196    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
6197        Inst.getNumOperands() == 5) {
6198      MCInst TmpInst;
6199      TmpInst.setOpcode(ARM::LDR_POST_IMM);
6200      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6201      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6202      TmpInst.addOperand(Inst.getOperand(1)); // Rn
6203      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
6204      TmpInst.addOperand(MCOperand::CreateImm(4));
6205      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6206      TmpInst.addOperand(Inst.getOperand(3));
6207      Inst = TmpInst;
6208      return true;
6209    }
6210    break;
6211  case ARM::STMDB_UPD:
6212    // If this is a store of a single register via a 'push', then we should use
6213    // a pre-indexed STR instruction instead, per the ARM ARM.
6214    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
6215        Inst.getNumOperands() == 5) {
6216      MCInst TmpInst;
6217      TmpInst.setOpcode(ARM::STR_PRE_IMM);
6218      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6219      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6220      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
6221      TmpInst.addOperand(MCOperand::CreateImm(-4));
6222      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6223      TmpInst.addOperand(Inst.getOperand(3));
6224      Inst = TmpInst;
6225    }
6226    break;
6227  case ARM::t2ADDri12:
6228    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
6229    // mnemonic was used (not "addw"), encoding T3 is preferred.
6230    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
6231        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6232      break;
6233    Inst.setOpcode(ARM::t2ADDri);
6234    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6235    break;
6236  case ARM::t2SUBri12:
6237    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
6238    // mnemonic was used (not "subw"), encoding T3 is preferred.
6239    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
6240        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6241      break;
6242    Inst.setOpcode(ARM::t2SUBri);
6243    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6244    break;
6245  case ARM::tADDi8:
6246    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6247    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6248    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6249    // to encoding T1 if <Rd> is omitted."
6250    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6251      Inst.setOpcode(ARM::tADDi3);
6252      return true;
6253    }
6254    break;
6255  case ARM::tSUBi8:
6256    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6257    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6258    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6259    // to encoding T1 if <Rd> is omitted."
6260    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6261      Inst.setOpcode(ARM::tSUBi3);
6262      return true;
6263    }
6264    break;
6265  case ARM::t2ADDrr: {
6266    // If the destination and first source operand are the same, and
6267    // there's no setting of the flags, use encoding T2 instead of T3.
6268    // Note that this is only for ADD, not SUB. This mirrors the system
6269    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6270    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6271        Inst.getOperand(5).getReg() != 0 ||
6272        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6273         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6274      break;
6275    MCInst TmpInst;
6276    TmpInst.setOpcode(ARM::tADDhirr);
6277    TmpInst.addOperand(Inst.getOperand(0));
6278    TmpInst.addOperand(Inst.getOperand(0));
6279    TmpInst.addOperand(Inst.getOperand(2));
6280    TmpInst.addOperand(Inst.getOperand(3));
6281    TmpInst.addOperand(Inst.getOperand(4));
6282    Inst = TmpInst;
6283    return true;
6284  }
6285  case ARM::tB:
6286    // A Thumb conditional branch outside of an IT block is a tBcc.
6287    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6288      Inst.setOpcode(ARM::tBcc);
6289      return true;
6290    }
6291    break;
6292  case ARM::t2B:
6293    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6294    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6295      Inst.setOpcode(ARM::t2Bcc);
6296      return true;
6297    }
6298    break;
6299  case ARM::t2Bcc:
6300    // If the conditional is AL or we're in an IT block, we really want t2B.
6301    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6302      Inst.setOpcode(ARM::t2B);
6303      return true;
6304    }
6305    break;
6306  case ARM::tBcc:
6307    // If the conditional is AL, we really want tB.
6308    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6309      Inst.setOpcode(ARM::tB);
6310      return true;
6311    }
6312    break;
6313  case ARM::tLDMIA: {
6314    // If the register list contains any high registers, or if the writeback
6315    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6316    // instead if we're in Thumb2. Otherwise, this should have generated
6317    // an error in validateInstruction().
6318    unsigned Rn = Inst.getOperand(0).getReg();
6319    bool hasWritebackToken =
6320      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6321       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6322    bool listContainsBase;
6323    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6324        (!listContainsBase && !hasWritebackToken) ||
6325        (listContainsBase && hasWritebackToken)) {
6326      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6327      assert (isThumbTwo());
6328      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6329      // If we're switching to the updating version, we need to insert
6330      // the writeback tied operand.
6331      if (hasWritebackToken)
6332        Inst.insert(Inst.begin(),
6333                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6334      return true;
6335    }
6336    break;
6337  }
6338  case ARM::tSTMIA_UPD: {
6339    // If the register list contains any high registers, we need to use
6340    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6341    // should have generated an error in validateInstruction().
6342    unsigned Rn = Inst.getOperand(0).getReg();
6343    bool listContainsBase;
6344    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6345      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6346      assert (isThumbTwo());
6347      Inst.setOpcode(ARM::t2STMIA_UPD);
6348      return true;
6349    }
6350    break;
6351  }
6352  case ARM::tPOP: {
6353    bool listContainsBase;
6354    // If the register list contains any high registers, we need to use
6355    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6356    // should have generated an error in validateInstruction().
6357    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6358      return false;
6359    assert (isThumbTwo());
6360    Inst.setOpcode(ARM::t2LDMIA_UPD);
6361    // Add the base register and writeback operands.
6362    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6363    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6364    return true;
6365  }
6366  case ARM::tPUSH: {
6367    bool listContainsBase;
6368    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6369      return false;
6370    assert (isThumbTwo());
6371    Inst.setOpcode(ARM::t2STMDB_UPD);
6372    // Add the base register and writeback operands.
6373    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6374    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6375    return true;
6376  }
6377  case ARM::t2MOVi: {
6378    // If we can use the 16-bit encoding and the user didn't explicitly
6379    // request the 32-bit variant, transform it here.
6380    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6381        Inst.getOperand(1).getImm() <= 255 &&
6382        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6383         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6384        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6385        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6386         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6387      // The operands aren't in the same order for tMOVi8...
6388      MCInst TmpInst;
6389      TmpInst.setOpcode(ARM::tMOVi8);
6390      TmpInst.addOperand(Inst.getOperand(0));
6391      TmpInst.addOperand(Inst.getOperand(4));
6392      TmpInst.addOperand(Inst.getOperand(1));
6393      TmpInst.addOperand(Inst.getOperand(2));
6394      TmpInst.addOperand(Inst.getOperand(3));
6395      Inst = TmpInst;
6396      return true;
6397    }
6398    break;
6399  }
6400  case ARM::t2MOVr: {
6401    // If we can use the 16-bit encoding and the user didn't explicitly
6402    // request the 32-bit variant, transform it here.
6403    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6404        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6405        Inst.getOperand(2).getImm() == ARMCC::AL &&
6406        Inst.getOperand(4).getReg() == ARM::CPSR &&
6407        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6408         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6409      // The operands aren't the same for tMOV[S]r... (no cc_out)
6410      MCInst TmpInst;
6411      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
6412      TmpInst.addOperand(Inst.getOperand(0));
6413      TmpInst.addOperand(Inst.getOperand(1));
6414      TmpInst.addOperand(Inst.getOperand(2));
6415      TmpInst.addOperand(Inst.getOperand(3));
6416      Inst = TmpInst;
6417      return true;
6418    }
6419    break;
6420  }
6421  case ARM::t2SXTH:
6422  case ARM::t2SXTB:
6423  case ARM::t2UXTH:
6424  case ARM::t2UXTB: {
6425    // If we can use the 16-bit encoding and the user didn't explicitly
6426    // request the 32-bit variant, transform it here.
6427    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6428        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6429        Inst.getOperand(2).getImm() == 0 &&
6430        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6431         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6432      unsigned NewOpc;
6433      switch (Inst.getOpcode()) {
6434      default: llvm_unreachable("Illegal opcode!");
6435      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
6436      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
6437      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
6438      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
6439      }
6440      // The operands aren't the same for thumb1 (no rotate operand).
6441      MCInst TmpInst;
6442      TmpInst.setOpcode(NewOpc);
6443      TmpInst.addOperand(Inst.getOperand(0));
6444      TmpInst.addOperand(Inst.getOperand(1));
6445      TmpInst.addOperand(Inst.getOperand(3));
6446      TmpInst.addOperand(Inst.getOperand(4));
6447      Inst = TmpInst;
6448      return true;
6449    }
6450    break;
6451  }
6452  case ARM::MOVsi: {
6453    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
6454    if (SOpc == ARM_AM::rrx) return false;
6455    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
6456      // Shifting by zero is accepted as a vanilla 'MOVr'
6457      MCInst TmpInst;
6458      TmpInst.setOpcode(ARM::MOVr);
6459      TmpInst.addOperand(Inst.getOperand(0));
6460      TmpInst.addOperand(Inst.getOperand(1));
6461      TmpInst.addOperand(Inst.getOperand(3));
6462      TmpInst.addOperand(Inst.getOperand(4));
6463      TmpInst.addOperand(Inst.getOperand(5));
6464      Inst = TmpInst;
6465      return true;
6466    }
6467    return false;
6468  }
6469  case ARM::ANDrsi:
6470  case ARM::ORRrsi:
6471  case ARM::EORrsi:
6472  case ARM::BICrsi:
6473  case ARM::SUBrsi:
6474  case ARM::ADDrsi: {
6475    unsigned newOpc;
6476    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
6477    if (SOpc == ARM_AM::rrx) return false;
6478    switch (Inst.getOpcode()) {
6479    default: assert(0 && "unexpected opcode!");
6480    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
6481    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
6482    case ARM::EORrsi: newOpc = ARM::EORrr; break;
6483    case ARM::BICrsi: newOpc = ARM::BICrr; break;
6484    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
6485    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
6486    }
6487    // If the shift is by zero, use the non-shifted instruction definition.
6488    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
6489      MCInst TmpInst;
6490      TmpInst.setOpcode(newOpc);
6491      TmpInst.addOperand(Inst.getOperand(0));
6492      TmpInst.addOperand(Inst.getOperand(1));
6493      TmpInst.addOperand(Inst.getOperand(2));
6494      TmpInst.addOperand(Inst.getOperand(4));
6495      TmpInst.addOperand(Inst.getOperand(5));
6496      TmpInst.addOperand(Inst.getOperand(6));
6497      Inst = TmpInst;
6498      return true;
6499    }
6500    return false;
6501  }
6502  case ARM::t2IT: {
6503    // The mask bits for all but the first condition are represented as
6504    // the low bit of the condition code value implies 't'. We currently
6505    // always have 1 implies 't', so XOR toggle the bits if the low bit
6506    // of the condition code is zero. The encoding also expects the low
6507    // bit of the condition to be encoded as bit 4 of the mask operand,
6508    // so mask that in if needed
6509    MCOperand &MO = Inst.getOperand(1);
6510    unsigned Mask = MO.getImm();
6511    unsigned OrigMask = Mask;
6512    unsigned TZ = CountTrailingZeros_32(Mask);
6513    if ((Inst.getOperand(0).getImm() & 1) == 0) {
6514      assert(Mask && TZ <= 3 && "illegal IT mask value!");
6515      for (unsigned i = 3; i != TZ; --i)
6516        Mask ^= 1 << i;
6517    } else
6518      Mask |= 0x10;
6519    MO.setImm(Mask);
6520
6521    // Set up the IT block state according to the IT instruction we just
6522    // matched.
6523    assert(!inITBlock() && "nested IT blocks?!");
6524    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
6525    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
6526    ITState.CurPosition = 0;
6527    ITState.FirstCond = true;
6528    break;
6529  }
6530  }
6531  return false;
6532}
6533
6534unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
6535  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
6536  // suffix depending on whether they're in an IT block or not.
6537  unsigned Opc = Inst.getOpcode();
6538  const MCInstrDesc &MCID = getInstDesc(Opc);
6539  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
6540    assert(MCID.hasOptionalDef() &&
6541           "optionally flag setting instruction missing optional def operand");
6542    assert(MCID.NumOperands == Inst.getNumOperands() &&
6543           "operand count mismatch!");
6544    // Find the optional-def operand (cc_out).
6545    unsigned OpNo;
6546    for (OpNo = 0;
6547         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
6548         ++OpNo)
6549      ;
6550    // If we're parsing Thumb1, reject it completely.
6551    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
6552      return Match_MnemonicFail;
6553    // If we're parsing Thumb2, which form is legal depends on whether we're
6554    // in an IT block.
6555    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
6556        !inITBlock())
6557      return Match_RequiresITBlock;
6558    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
6559        inITBlock())
6560      return Match_RequiresNotITBlock;
6561  }
6562  // Some high-register supporting Thumb1 encodings only allow both registers
6563  // to be from r0-r7 when in Thumb2.
6564  else if (Opc == ARM::tADDhirr && isThumbOne() &&
6565           isARMLowRegister(Inst.getOperand(1).getReg()) &&
6566           isARMLowRegister(Inst.getOperand(2).getReg()))
6567    return Match_RequiresThumb2;
6568  // Others only require ARMv6 or later.
6569  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
6570           isARMLowRegister(Inst.getOperand(0).getReg()) &&
6571           isARMLowRegister(Inst.getOperand(1).getReg()))
6572    return Match_RequiresV6;
6573  return Match_Success;
6574}
6575
6576bool ARMAsmParser::
6577MatchAndEmitInstruction(SMLoc IDLoc,
6578                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
6579                        MCStreamer &Out) {
6580  MCInst Inst;
6581  unsigned ErrorInfo;
6582  unsigned MatchResult;
6583  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
6584  switch (MatchResult) {
6585  default: break;
6586  case Match_Success:
6587    // Context sensitive operand constraints aren't handled by the matcher,
6588    // so check them here.
6589    if (validateInstruction(Inst, Operands)) {
6590      // Still progress the IT block, otherwise one wrong condition causes
6591      // nasty cascading errors.
6592      forwardITPosition();
6593      return true;
6594    }
6595
6596    // Some instructions need post-processing to, for example, tweak which
6597    // encoding is selected. Loop on it while changes happen so the
6598    // individual transformations can chain off each other. E.g.,
6599    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
6600    while (processInstruction(Inst, Operands))
6601      ;
6602
6603    // Only move forward at the very end so that everything in validate
6604    // and process gets a consistent answer about whether we're in an IT
6605    // block.
6606    forwardITPosition();
6607
6608    Out.EmitInstruction(Inst);
6609    return false;
6610  case Match_MissingFeature:
6611    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
6612    return true;
6613  case Match_InvalidOperand: {
6614    SMLoc ErrorLoc = IDLoc;
6615    if (ErrorInfo != ~0U) {
6616      if (ErrorInfo >= Operands.size())
6617        return Error(IDLoc, "too few operands for instruction");
6618
6619      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
6620      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
6621    }
6622
6623    return Error(ErrorLoc, "invalid operand for instruction");
6624  }
6625  case Match_MnemonicFail:
6626    return Error(IDLoc, "invalid instruction");
6627  case Match_ConversionFail:
6628    // The converter function will have already emited a diagnostic.
6629    return true;
6630  case Match_RequiresNotITBlock:
6631    return Error(IDLoc, "flag setting instruction only valid outside IT block");
6632  case Match_RequiresITBlock:
6633    return Error(IDLoc, "instruction only valid inside IT block");
6634  case Match_RequiresV6:
6635    return Error(IDLoc, "instruction variant requires ARMv6 or later");
6636  case Match_RequiresThumb2:
6637    return Error(IDLoc, "instruction variant requires Thumb2");
6638  }
6639
6640  llvm_unreachable("Implement any new match types added!");
6641}
6642
6643/// parseDirective parses the arm specific directives
6644bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
6645  StringRef IDVal = DirectiveID.getIdentifier();
6646  if (IDVal == ".word")
6647    return parseDirectiveWord(4, DirectiveID.getLoc());
6648  else if (IDVal == ".thumb")
6649    return parseDirectiveThumb(DirectiveID.getLoc());
6650  else if (IDVal == ".arm")
6651    return parseDirectiveARM(DirectiveID.getLoc());
6652  else if (IDVal == ".thumb_func")
6653    return parseDirectiveThumbFunc(DirectiveID.getLoc());
6654  else if (IDVal == ".code")
6655    return parseDirectiveCode(DirectiveID.getLoc());
6656  else if (IDVal == ".syntax")
6657    return parseDirectiveSyntax(DirectiveID.getLoc());
6658  else if (IDVal == ".unreq")
6659    return parseDirectiveUnreq(DirectiveID.getLoc());
6660  else if (IDVal == ".arch")
6661    return parseDirectiveArch(DirectiveID.getLoc());
6662  else if (IDVal == ".eabi_attribute")
6663    return parseDirectiveEabiAttr(DirectiveID.getLoc());
6664  return true;
6665}
6666
6667/// parseDirectiveWord
6668///  ::= .word [ expression (, expression)* ]
6669bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
6670  if (getLexer().isNot(AsmToken::EndOfStatement)) {
6671    for (;;) {
6672      const MCExpr *Value;
6673      if (getParser().ParseExpression(Value))
6674        return true;
6675
6676      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
6677
6678      if (getLexer().is(AsmToken::EndOfStatement))
6679        break;
6680
6681      // FIXME: Improve diagnostic.
6682      if (getLexer().isNot(AsmToken::Comma))
6683        return Error(L, "unexpected token in directive");
6684      Parser.Lex();
6685    }
6686  }
6687
6688  Parser.Lex();
6689  return false;
6690}
6691
6692/// parseDirectiveThumb
6693///  ::= .thumb
6694bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
6695  if (getLexer().isNot(AsmToken::EndOfStatement))
6696    return Error(L, "unexpected token in directive");
6697  Parser.Lex();
6698
6699  if (!isThumb())
6700    SwitchMode();
6701  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6702  return false;
6703}
6704
6705/// parseDirectiveARM
6706///  ::= .arm
6707bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
6708  if (getLexer().isNot(AsmToken::EndOfStatement))
6709    return Error(L, "unexpected token in directive");
6710  Parser.Lex();
6711
6712  if (isThumb())
6713    SwitchMode();
6714  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6715  return false;
6716}
6717
6718/// parseDirectiveThumbFunc
6719///  ::= .thumbfunc symbol_name
6720bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
6721  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
6722  bool isMachO = MAI.hasSubsectionsViaSymbols();
6723  StringRef Name;
6724  bool needFuncName = true;
6725
6726  // Darwin asm has (optionally) function name after .thumb_func direction
6727  // ELF doesn't
6728  if (isMachO) {
6729    const AsmToken &Tok = Parser.getTok();
6730    if (Tok.isNot(AsmToken::EndOfStatement)) {
6731      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
6732        return Error(L, "unexpected token in .thumb_func directive");
6733      Name = Tok.getIdentifier();
6734      Parser.Lex(); // Consume the identifier token.
6735      needFuncName = false;
6736    }
6737  }
6738
6739  if (getLexer().isNot(AsmToken::EndOfStatement))
6740    return Error(L, "unexpected token in directive");
6741
6742  // Eat the end of statement and any blank lines that follow.
6743  while (getLexer().is(AsmToken::EndOfStatement))
6744    Parser.Lex();
6745
6746  // FIXME: assuming function name will be the line following .thumb_func
6747  // We really should be checking the next symbol definition even if there's
6748  // stuff in between.
6749  if (needFuncName) {
6750    Name = Parser.getTok().getIdentifier();
6751  }
6752
6753  // Mark symbol as a thumb symbol.
6754  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
6755  getParser().getStreamer().EmitThumbFunc(Func);
6756  return false;
6757}
6758
6759/// parseDirectiveSyntax
6760///  ::= .syntax unified | divided
6761bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
6762  const AsmToken &Tok = Parser.getTok();
6763  if (Tok.isNot(AsmToken::Identifier))
6764    return Error(L, "unexpected token in .syntax directive");
6765  StringRef Mode = Tok.getString();
6766  if (Mode == "unified" || Mode == "UNIFIED")
6767    Parser.Lex();
6768  else if (Mode == "divided" || Mode == "DIVIDED")
6769    return Error(L, "'.syntax divided' arm asssembly not supported");
6770  else
6771    return Error(L, "unrecognized syntax mode in .syntax directive");
6772
6773  if (getLexer().isNot(AsmToken::EndOfStatement))
6774    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6775  Parser.Lex();
6776
6777  // TODO tell the MC streamer the mode
6778  // getParser().getStreamer().Emit???();
6779  return false;
6780}
6781
6782/// parseDirectiveCode
6783///  ::= .code 16 | 32
6784bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
6785  const AsmToken &Tok = Parser.getTok();
6786  if (Tok.isNot(AsmToken::Integer))
6787    return Error(L, "unexpected token in .code directive");
6788  int64_t Val = Parser.getTok().getIntVal();
6789  if (Val == 16)
6790    Parser.Lex();
6791  else if (Val == 32)
6792    Parser.Lex();
6793  else
6794    return Error(L, "invalid operand to .code directive");
6795
6796  if (getLexer().isNot(AsmToken::EndOfStatement))
6797    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6798  Parser.Lex();
6799
6800  if (Val == 16) {
6801    if (!isThumb())
6802      SwitchMode();
6803    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6804  } else {
6805    if (isThumb())
6806      SwitchMode();
6807    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6808  }
6809
6810  return false;
6811}
6812
6813/// parseDirectiveReq
6814///  ::= name .req registername
6815bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6816  Parser.Lex(); // Eat the '.req' token.
6817  unsigned Reg;
6818  SMLoc SRegLoc, ERegLoc;
6819  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
6820    Parser.EatToEndOfStatement();
6821    return Error(SRegLoc, "register name expected");
6822  }
6823
6824  // Shouldn't be anything else.
6825  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
6826    Parser.EatToEndOfStatement();
6827    return Error(Parser.getTok().getLoc(),
6828                 "unexpected input in .req directive.");
6829  }
6830
6831  Parser.Lex(); // Consume the EndOfStatement
6832
6833  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
6834    return Error(SRegLoc, "redefinition of '" + Name +
6835                          "' does not match original.");
6836
6837  return false;
6838}
6839
6840/// parseDirectiveUneq
6841///  ::= .unreq registername
6842bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
6843  if (Parser.getTok().isNot(AsmToken::Identifier)) {
6844    Parser.EatToEndOfStatement();
6845    return Error(L, "unexpected input in .unreq directive.");
6846  }
6847  RegisterReqs.erase(Parser.getTok().getIdentifier());
6848  Parser.Lex(); // Eat the identifier.
6849  return false;
6850}
6851
6852/// parseDirectiveArch
6853///  ::= .arch token
6854bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
6855  return true;
6856}
6857
6858/// parseDirectiveEabiAttr
6859///  ::= .eabi_attribute int, int
6860bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
6861  return true;
6862}
6863
6864extern "C" void LLVMInitializeARMAsmLexer();
6865
6866/// Force static initialization.
6867extern "C" void LLVMInitializeARMAsmParser() {
6868  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
6869  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
6870  LLVMInitializeARMAsmLexer();
6871}
6872
6873#define GET_REGISTER_MATCHER
6874#define GET_MATCHER_IMPLEMENTATION
6875#include "ARMGenAsmMatcher.inc"
6876