ARMAsmParser.cpp revision bc2198133a1836598b54b943420748e75d5dea94
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104  bool parseDirectiveArch(SMLoc L);
105  bool parseDirectiveEabiAttr(SMLoc L);
106
107  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
108                          bool &CarrySetting, unsigned &ProcessorIMod,
109                          StringRef &ITMask);
110  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
111                             bool &CanAcceptPredicationCode);
112
113  bool isThumb() const {
114    // FIXME: Can tablegen auto-generate this?
115    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
116  }
117  bool isThumbOne() const {
118    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
119  }
120  bool isThumbTwo() const {
121    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
122  }
123  bool hasV6Ops() const {
124    return STI.getFeatureBits() & ARM::HasV6Ops;
125  }
126  bool hasV7Ops() const {
127    return STI.getFeatureBits() & ARM::HasV7Ops;
128  }
129  void SwitchMode() {
130    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
131    setAvailableFeatures(FB);
132  }
133  bool isMClass() const {
134    return STI.getFeatureBits() & ARM::FeatureMClass;
135  }
136
137  /// @name Auto-generated Match Functions
138  /// {
139
140#define GET_ASSEMBLER_HEADER
141#include "ARMGenAsmMatcher.inc"
142
143  /// }
144
145  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocNumOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocRegOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseCoprocOptionOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseMemBarrierOptOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseProcIFlagsOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parseMSRMaskOperand(
157    SmallVectorImpl<MCParsedAsmOperand*>&);
158  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
159                                   StringRef Op, int Low, int High);
160  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
161    return parsePKHImm(O, "lsl", 0, 31);
162  }
163  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
164    return parsePKHImm(O, "asr", 1, 32);
165  }
166  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
175
176  // Asm Match Converter Methods
177  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
180                    const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
194                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
202                             const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
206                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
208                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
210                        const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
212                     const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
214                        const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
216                     const SmallVectorImpl<MCParsedAsmOperand*> &);
217  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
218                        const SmallVectorImpl<MCParsedAsmOperand*> &);
219
220  bool validateInstruction(MCInst &Inst,
221                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool processInstruction(MCInst &Inst,
223                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
224  bool shouldOmitCCOutOperand(StringRef Mnemonic,
225                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
226
227public:
228  enum ARMMatchResultTy {
229    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
230    Match_RequiresNotITBlock,
231    Match_RequiresV6,
232    Match_RequiresThumb2
233  };
234
235  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
236    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
237    MCAsmParserExtension::Initialize(_Parser);
238
239    // Initialize the set of available features.
240    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
241
242    // Not in an ITBlock to start with.
243    ITState.CurPosition = ~0U;
244  }
245
246  // Implementation of the MCTargetAsmParser interface:
247  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
248  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
249                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
250  bool ParseDirective(AsmToken DirectiveID);
251
252  unsigned checkTargetMatchPredicate(MCInst &Inst);
253
254  bool MatchAndEmitInstruction(SMLoc IDLoc,
255                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
256                               MCStreamer &Out);
257};
258} // end anonymous namespace
259
260namespace {
261
262/// ARMOperand - Instances of this class represent a parsed ARM machine
263/// instruction.
264class ARMOperand : public MCParsedAsmOperand {
265  enum KindTy {
266    k_CondCode,
267    k_CCOut,
268    k_ITCondMask,
269    k_CoprocNum,
270    k_CoprocReg,
271    k_CoprocOption,
272    k_Immediate,
273    k_MemBarrierOpt,
274    k_Memory,
275    k_PostIndexRegister,
276    k_MSRMask,
277    k_ProcIFlags,
278    k_VectorIndex,
279    k_Register,
280    k_RegisterList,
281    k_DPRRegisterList,
282    k_SPRRegisterList,
283    k_VectorList,
284    k_VectorListAllLanes,
285    k_VectorListIndexed,
286    k_ShiftedRegister,
287    k_ShiftedImmediate,
288    k_ShifterImmediate,
289    k_RotateImmediate,
290    k_BitfieldDescriptor,
291    k_Token
292  } Kind;
293
294  SMLoc StartLoc, EndLoc;
295  SmallVector<unsigned, 8> Registers;
296
297  union {
298    struct {
299      ARMCC::CondCodes Val;
300    } CC;
301
302    struct {
303      unsigned Val;
304    } Cop;
305
306    struct {
307      unsigned Val;
308    } CoprocOption;
309
310    struct {
311      unsigned Mask:4;
312    } ITMask;
313
314    struct {
315      ARM_MB::MemBOpt Val;
316    } MBOpt;
317
318    struct {
319      ARM_PROC::IFlags Val;
320    } IFlags;
321
322    struct {
323      unsigned Val;
324    } MMask;
325
326    struct {
327      const char *Data;
328      unsigned Length;
329    } Tok;
330
331    struct {
332      unsigned RegNum;
333    } Reg;
334
335    // A vector register list is a sequential list of 1 to 4 registers.
336    struct {
337      unsigned RegNum;
338      unsigned Count;
339      unsigned LaneIndex;
340      bool isDoubleSpaced;
341    } VectorList;
342
343    struct {
344      unsigned Val;
345    } VectorIndex;
346
347    struct {
348      const MCExpr *Val;
349    } Imm;
350
351    /// Combined record for all forms of ARM address expressions.
352    struct {
353      unsigned BaseRegNum;
354      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
355      // was specified.
356      const MCConstantExpr *OffsetImm;  // Offset immediate value
357      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
358      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
359      unsigned ShiftImm;        // shift for OffsetReg.
360      unsigned Alignment;       // 0 = no alignment specified
361                                // n = alignment in bytes (2, 4, 8, 16, or 32)
362      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
363    } Memory;
364
365    struct {
366      unsigned RegNum;
367      bool isAdd;
368      ARM_AM::ShiftOpc ShiftTy;
369      unsigned ShiftImm;
370    } PostIdxReg;
371
372    struct {
373      bool isASR;
374      unsigned Imm;
375    } ShifterImm;
376    struct {
377      ARM_AM::ShiftOpc ShiftTy;
378      unsigned SrcReg;
379      unsigned ShiftReg;
380      unsigned ShiftImm;
381    } RegShiftedReg;
382    struct {
383      ARM_AM::ShiftOpc ShiftTy;
384      unsigned SrcReg;
385      unsigned ShiftImm;
386    } RegShiftedImm;
387    struct {
388      unsigned Imm;
389    } RotImm;
390    struct {
391      unsigned LSB;
392      unsigned Width;
393    } Bitfield;
394  };
395
396  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
397public:
398  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
399    Kind = o.Kind;
400    StartLoc = o.StartLoc;
401    EndLoc = o.EndLoc;
402    switch (Kind) {
403    case k_CondCode:
404      CC = o.CC;
405      break;
406    case k_ITCondMask:
407      ITMask = o.ITMask;
408      break;
409    case k_Token:
410      Tok = o.Tok;
411      break;
412    case k_CCOut:
413    case k_Register:
414      Reg = o.Reg;
415      break;
416    case k_RegisterList:
417    case k_DPRRegisterList:
418    case k_SPRRegisterList:
419      Registers = o.Registers;
420      break;
421    case k_VectorList:
422    case k_VectorListAllLanes:
423    case k_VectorListIndexed:
424      VectorList = o.VectorList;
425      break;
426    case k_CoprocNum:
427    case k_CoprocReg:
428      Cop = o.Cop;
429      break;
430    case k_CoprocOption:
431      CoprocOption = o.CoprocOption;
432      break;
433    case k_Immediate:
434      Imm = o.Imm;
435      break;
436    case k_MemBarrierOpt:
437      MBOpt = o.MBOpt;
438      break;
439    case k_Memory:
440      Memory = o.Memory;
441      break;
442    case k_PostIndexRegister:
443      PostIdxReg = o.PostIdxReg;
444      break;
445    case k_MSRMask:
446      MMask = o.MMask;
447      break;
448    case k_ProcIFlags:
449      IFlags = o.IFlags;
450      break;
451    case k_ShifterImmediate:
452      ShifterImm = o.ShifterImm;
453      break;
454    case k_ShiftedRegister:
455      RegShiftedReg = o.RegShiftedReg;
456      break;
457    case k_ShiftedImmediate:
458      RegShiftedImm = o.RegShiftedImm;
459      break;
460    case k_RotateImmediate:
461      RotImm = o.RotImm;
462      break;
463    case k_BitfieldDescriptor:
464      Bitfield = o.Bitfield;
465      break;
466    case k_VectorIndex:
467      VectorIndex = o.VectorIndex;
468      break;
469    }
470  }
471
472  /// getStartLoc - Get the location of the first token of this operand.
473  SMLoc getStartLoc() const { return StartLoc; }
474  /// getEndLoc - Get the location of the last token of this operand.
475  SMLoc getEndLoc() const { return EndLoc; }
476
477  ARMCC::CondCodes getCondCode() const {
478    assert(Kind == k_CondCode && "Invalid access!");
479    return CC.Val;
480  }
481
482  unsigned getCoproc() const {
483    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
484    return Cop.Val;
485  }
486
487  StringRef getToken() const {
488    assert(Kind == k_Token && "Invalid access!");
489    return StringRef(Tok.Data, Tok.Length);
490  }
491
492  unsigned getReg() const {
493    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
494    return Reg.RegNum;
495  }
496
497  const SmallVectorImpl<unsigned> &getRegList() const {
498    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
499            Kind == k_SPRRegisterList) && "Invalid access!");
500    return Registers;
501  }
502
503  const MCExpr *getImm() const {
504    assert(isImm() && "Invalid access!");
505    return Imm.Val;
506  }
507
508  unsigned getVectorIndex() const {
509    assert(Kind == k_VectorIndex && "Invalid access!");
510    return VectorIndex.Val;
511  }
512
513  ARM_MB::MemBOpt getMemBarrierOpt() const {
514    assert(Kind == k_MemBarrierOpt && "Invalid access!");
515    return MBOpt.Val;
516  }
517
518  ARM_PROC::IFlags getProcIFlags() const {
519    assert(Kind == k_ProcIFlags && "Invalid access!");
520    return IFlags.Val;
521  }
522
523  unsigned getMSRMask() const {
524    assert(Kind == k_MSRMask && "Invalid access!");
525    return MMask.Val;
526  }
527
528  bool isCoprocNum() const { return Kind == k_CoprocNum; }
529  bool isCoprocReg() const { return Kind == k_CoprocReg; }
530  bool isCoprocOption() const { return Kind == k_CoprocOption; }
531  bool isCondCode() const { return Kind == k_CondCode; }
532  bool isCCOut() const { return Kind == k_CCOut; }
533  bool isITMask() const { return Kind == k_ITCondMask; }
534  bool isITCondCode() const { return Kind == k_CondCode; }
535  bool isImm() const { return Kind == k_Immediate; }
536  bool isFPImm() const {
537    if (!isImm()) return false;
538    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
539    if (!CE) return false;
540    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
541    return Val != -1;
542  }
543  bool isFBits16() const {
544    if (!isImm()) return false;
545    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
546    if (!CE) return false;
547    int64_t Value = CE->getValue();
548    return Value >= 0 && Value <= 16;
549  }
550  bool isFBits32() const {
551    if (!isImm()) return false;
552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
553    if (!CE) return false;
554    int64_t Value = CE->getValue();
555    return Value >= 1 && Value <= 32;
556  }
557  bool isImm8s4() const {
558    if (!isImm()) return false;
559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
560    if (!CE) return false;
561    int64_t Value = CE->getValue();
562    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
563  }
564  bool isImm0_1020s4() const {
565    if (!isImm()) return false;
566    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
567    if (!CE) return false;
568    int64_t Value = CE->getValue();
569    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
570  }
571  bool isImm0_508s4() const {
572    if (!isImm()) return false;
573    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
574    if (!CE) return false;
575    int64_t Value = CE->getValue();
576    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
577  }
578  bool isImm0_255() const {
579    if (!isImm()) return false;
580    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
581    if (!CE) return false;
582    int64_t Value = CE->getValue();
583    return Value >= 0 && Value < 256;
584  }
585  bool isImm0_1() const {
586    if (!isImm()) return false;
587    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
588    if (!CE) return false;
589    int64_t Value = CE->getValue();
590    return Value >= 0 && Value < 2;
591  }
592  bool isImm0_3() const {
593    if (!isImm()) return false;
594    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
595    if (!CE) return false;
596    int64_t Value = CE->getValue();
597    return Value >= 0 && Value < 4;
598  }
599  bool isImm0_7() const {
600    if (!isImm()) return false;
601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602    if (!CE) return false;
603    int64_t Value = CE->getValue();
604    return Value >= 0 && Value < 8;
605  }
606  bool isImm0_15() const {
607    if (!isImm()) return false;
608    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
609    if (!CE) return false;
610    int64_t Value = CE->getValue();
611    return Value >= 0 && Value < 16;
612  }
613  bool isImm0_31() const {
614    if (!isImm()) return false;
615    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
616    if (!CE) return false;
617    int64_t Value = CE->getValue();
618    return Value >= 0 && Value < 32;
619  }
620  bool isImm0_63() const {
621    if (!isImm()) return false;
622    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
623    if (!CE) return false;
624    int64_t Value = CE->getValue();
625    return Value >= 0 && Value < 64;
626  }
627  bool isImm8() const {
628    if (!isImm()) return false;
629    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
630    if (!CE) return false;
631    int64_t Value = CE->getValue();
632    return Value == 8;
633  }
634  bool isImm16() const {
635    if (!isImm()) return false;
636    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
637    if (!CE) return false;
638    int64_t Value = CE->getValue();
639    return Value == 16;
640  }
641  bool isImm32() const {
642    if (!isImm()) return false;
643    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
644    if (!CE) return false;
645    int64_t Value = CE->getValue();
646    return Value == 32;
647  }
648  bool isShrImm8() const {
649    if (!isImm()) return false;
650    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
651    if (!CE) return false;
652    int64_t Value = CE->getValue();
653    return Value > 0 && Value <= 8;
654  }
655  bool isShrImm16() const {
656    if (!isImm()) return false;
657    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
658    if (!CE) return false;
659    int64_t Value = CE->getValue();
660    return Value > 0 && Value <= 16;
661  }
662  bool isShrImm32() const {
663    if (!isImm()) return false;
664    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
665    if (!CE) return false;
666    int64_t Value = CE->getValue();
667    return Value > 0 && Value <= 32;
668  }
669  bool isShrImm64() const {
670    if (!isImm()) return false;
671    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672    if (!CE) return false;
673    int64_t Value = CE->getValue();
674    return Value > 0 && Value <= 64;
675  }
676  bool isImm1_7() const {
677    if (!isImm()) return false;
678    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
679    if (!CE) return false;
680    int64_t Value = CE->getValue();
681    return Value > 0 && Value < 8;
682  }
683  bool isImm1_15() const {
684    if (!isImm()) return false;
685    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
686    if (!CE) return false;
687    int64_t Value = CE->getValue();
688    return Value > 0 && Value < 16;
689  }
690  bool isImm1_31() const {
691    if (!isImm()) return false;
692    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
693    if (!CE) return false;
694    int64_t Value = CE->getValue();
695    return Value > 0 && Value < 32;
696  }
697  bool isImm1_16() const {
698    if (!isImm()) return false;
699    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
700    if (!CE) return false;
701    int64_t Value = CE->getValue();
702    return Value > 0 && Value < 17;
703  }
704  bool isImm1_32() const {
705    if (!isImm()) return false;
706    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
707    if (!CE) return false;
708    int64_t Value = CE->getValue();
709    return Value > 0 && Value < 33;
710  }
711  bool isImm0_32() const {
712    if (!isImm()) return false;
713    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
714    if (!CE) return false;
715    int64_t Value = CE->getValue();
716    return Value >= 0 && Value < 33;
717  }
718  bool isImm0_65535() const {
719    if (!isImm()) return false;
720    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
721    if (!CE) return false;
722    int64_t Value = CE->getValue();
723    return Value >= 0 && Value < 65536;
724  }
725  bool isImm0_65535Expr() const {
726    if (!isImm()) return false;
727    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
728    // If it's not a constant expression, it'll generate a fixup and be
729    // handled later.
730    if (!CE) return true;
731    int64_t Value = CE->getValue();
732    return Value >= 0 && Value < 65536;
733  }
734  bool isImm24bit() const {
735    if (!isImm()) return false;
736    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
737    if (!CE) return false;
738    int64_t Value = CE->getValue();
739    return Value >= 0 && Value <= 0xffffff;
740  }
741  bool isImmThumbSR() const {
742    if (!isImm()) return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    if (!CE) return false;
745    int64_t Value = CE->getValue();
746    return Value > 0 && Value < 33;
747  }
748  bool isPKHLSLImm() const {
749    if (!isImm()) return false;
750    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
751    if (!CE) return false;
752    int64_t Value = CE->getValue();
753    return Value >= 0 && Value < 32;
754  }
755  bool isPKHASRImm() const {
756    if (!isImm()) return false;
757    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
758    if (!CE) return false;
759    int64_t Value = CE->getValue();
760    return Value > 0 && Value <= 32;
761  }
762  bool isARMSOImm() const {
763    if (!isImm()) return false;
764    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
765    if (!CE) return false;
766    int64_t Value = CE->getValue();
767    return ARM_AM::getSOImmVal(Value) != -1;
768  }
769  bool isARMSOImmNot() const {
770    if (!isImm()) return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return ARM_AM::getSOImmVal(~Value) != -1;
775  }
776  bool isARMSOImmNeg() const {
777    if (!isImm()) return false;
778    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
779    if (!CE) return false;
780    int64_t Value = CE->getValue();
781    return ARM_AM::getSOImmVal(-Value) != -1;
782  }
783  bool isT2SOImm() const {
784    if (!isImm()) return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return ARM_AM::getT2SOImmVal(Value) != -1;
789  }
790  bool isT2SOImmNot() const {
791    if (!isImm()) return false;
792    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793    if (!CE) return false;
794    int64_t Value = CE->getValue();
795    return ARM_AM::getT2SOImmVal(~Value) != -1;
796  }
797  bool isT2SOImmNeg() const {
798    if (!isImm()) return false;
799    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800    if (!CE) return false;
801    int64_t Value = CE->getValue();
802    return ARM_AM::getT2SOImmVal(-Value) != -1;
803  }
804  bool isSetEndImm() const {
805    if (!isImm()) return false;
806    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
807    if (!CE) return false;
808    int64_t Value = CE->getValue();
809    return Value == 1 || Value == 0;
810  }
811  bool isReg() const { return Kind == k_Register; }
812  bool isRegList() const { return Kind == k_RegisterList; }
813  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
814  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
815  bool isToken() const { return Kind == k_Token; }
816  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
817  bool isMemory() const { return Kind == k_Memory; }
818  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
819  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
820  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
821  bool isRotImm() const { return Kind == k_RotateImmediate; }
822  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
823  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
824  bool isPostIdxReg() const {
825    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
826  }
827  bool isMemNoOffset(bool alignOK = false) const {
828    if (!isMemory())
829      return false;
830    // No offset of any kind.
831    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
832     (alignOK || Memory.Alignment == 0);
833  }
834  bool isMemPCRelImm12() const {
835    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
836      return false;
837    // Base register must be PC.
838    if (Memory.BaseRegNum != ARM::PC)
839      return false;
840    // Immediate offset in range [-4095, 4095].
841    if (!Memory.OffsetImm) return true;
842    int64_t Val = Memory.OffsetImm->getValue();
843    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
844  }
845  bool isAlignedMemory() const {
846    return isMemNoOffset(true);
847  }
848  bool isAddrMode2() const {
849    if (!isMemory() || Memory.Alignment != 0) return false;
850    // Check for register offset.
851    if (Memory.OffsetRegNum) return true;
852    // Immediate offset in range [-4095, 4095].
853    if (!Memory.OffsetImm) return true;
854    int64_t Val = Memory.OffsetImm->getValue();
855    return Val > -4096 && Val < 4096;
856  }
857  bool isAM2OffsetImm() const {
858    if (!isImm()) return false;
859    // Immediate offset in range [-4095, 4095].
860    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
861    if (!CE) return false;
862    int64_t Val = CE->getValue();
863    return Val > -4096 && Val < 4096;
864  }
865  bool isAddrMode3() const {
866    // If we have an immediate that's not a constant, treat it as a label
867    // reference needing a fixup. If it is a constant, it's something else
868    // and we reject it.
869    if (isImm() && !isa<MCConstantExpr>(getImm()))
870      return true;
871    if (!isMemory() || Memory.Alignment != 0) return false;
872    // No shifts are legal for AM3.
873    if (Memory.ShiftType != ARM_AM::no_shift) return false;
874    // Check for register offset.
875    if (Memory.OffsetRegNum) return true;
876    // Immediate offset in range [-255, 255].
877    if (!Memory.OffsetImm) return true;
878    int64_t Val = Memory.OffsetImm->getValue();
879    return Val > -256 && Val < 256;
880  }
881  bool isAM3Offset() const {
882    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
883      return false;
884    if (Kind == k_PostIndexRegister)
885      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
886    // Immediate offset in range [-255, 255].
887    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
888    if (!CE) return false;
889    int64_t Val = CE->getValue();
890    // Special case, #-0 is INT32_MIN.
891    return (Val > -256 && Val < 256) || Val == INT32_MIN;
892  }
893  bool isAddrMode5() const {
894    // If we have an immediate that's not a constant, treat it as a label
895    // reference needing a fixup. If it is a constant, it's something else
896    // and we reject it.
897    if (isImm() && !isa<MCConstantExpr>(getImm()))
898      return true;
899    if (!isMemory() || Memory.Alignment != 0) return false;
900    // Check for register offset.
901    if (Memory.OffsetRegNum) return false;
902    // Immediate offset in range [-1020, 1020] and a multiple of 4.
903    if (!Memory.OffsetImm) return true;
904    int64_t Val = Memory.OffsetImm->getValue();
905    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
906      Val == INT32_MIN;
907  }
908  bool isMemTBB() const {
909    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
910        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
911      return false;
912    return true;
913  }
914  bool isMemTBH() const {
915    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
916        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
917        Memory.Alignment != 0 )
918      return false;
919    return true;
920  }
921  bool isMemRegOffset() const {
922    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
923      return false;
924    return true;
925  }
926  bool isT2MemRegOffset() const {
927    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
928        Memory.Alignment != 0)
929      return false;
930    // Only lsl #{0, 1, 2, 3} allowed.
931    if (Memory.ShiftType == ARM_AM::no_shift)
932      return true;
933    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
934      return false;
935    return true;
936  }
937  bool isMemThumbRR() const {
938    // Thumb reg+reg addressing is simple. Just two registers, a base and
939    // an offset. No shifts, negations or any other complicating factors.
940    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
941        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
942      return false;
943    return isARMLowRegister(Memory.BaseRegNum) &&
944      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
945  }
946  bool isMemThumbRIs4() const {
947    if (!isMemory() || Memory.OffsetRegNum != 0 ||
948        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
949      return false;
950    // Immediate offset, multiple of 4 in range [0, 124].
951    if (!Memory.OffsetImm) return true;
952    int64_t Val = Memory.OffsetImm->getValue();
953    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
954  }
955  bool isMemThumbRIs2() const {
956    if (!isMemory() || Memory.OffsetRegNum != 0 ||
957        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
958      return false;
959    // Immediate offset, multiple of 4 in range [0, 62].
960    if (!Memory.OffsetImm) return true;
961    int64_t Val = Memory.OffsetImm->getValue();
962    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
963  }
964  bool isMemThumbRIs1() const {
965    if (!isMemory() || Memory.OffsetRegNum != 0 ||
966        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
967      return false;
968    // Immediate offset in range [0, 31].
969    if (!Memory.OffsetImm) return true;
970    int64_t Val = Memory.OffsetImm->getValue();
971    return Val >= 0 && Val <= 31;
972  }
973  bool isMemThumbSPI() const {
974    if (!isMemory() || Memory.OffsetRegNum != 0 ||
975        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
976      return false;
977    // Immediate offset, multiple of 4 in range [0, 1020].
978    if (!Memory.OffsetImm) return true;
979    int64_t Val = Memory.OffsetImm->getValue();
980    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
981  }
982  bool isMemImm8s4Offset() const {
983    // If we have an immediate that's not a constant, treat it as a label
984    // reference needing a fixup. If it is a constant, it's something else
985    // and we reject it.
986    if (isImm() && !isa<MCConstantExpr>(getImm()))
987      return true;
988    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
989      return false;
990    // Immediate offset a multiple of 4 in range [-1020, 1020].
991    if (!Memory.OffsetImm) return true;
992    int64_t Val = Memory.OffsetImm->getValue();
993    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
994  }
995  bool isMemImm0_1020s4Offset() const {
996    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
997      return false;
998    // Immediate offset a multiple of 4 in range [0, 1020].
999    if (!Memory.OffsetImm) return true;
1000    int64_t Val = Memory.OffsetImm->getValue();
1001    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1002  }
1003  bool isMemImm8Offset() const {
1004    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1005      return false;
1006    // Base reg of PC isn't allowed for these encodings.
1007    if (Memory.BaseRegNum == ARM::PC) return false;
1008    // Immediate offset in range [-255, 255].
1009    if (!Memory.OffsetImm) return true;
1010    int64_t Val = Memory.OffsetImm->getValue();
1011    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1012  }
1013  bool isMemPosImm8Offset() const {
1014    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1015      return false;
1016    // Immediate offset in range [0, 255].
1017    if (!Memory.OffsetImm) return true;
1018    int64_t Val = Memory.OffsetImm->getValue();
1019    return Val >= 0 && Val < 256;
1020  }
1021  bool isMemNegImm8Offset() const {
1022    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1023      return false;
1024    // Base reg of PC isn't allowed for these encodings.
1025    if (Memory.BaseRegNum == ARM::PC) return false;
1026    // Immediate offset in range [-255, -1].
1027    if (!Memory.OffsetImm) return false;
1028    int64_t Val = Memory.OffsetImm->getValue();
1029    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1030  }
1031  bool isMemUImm12Offset() const {
1032    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1033      return false;
1034    // Immediate offset in range [0, 4095].
1035    if (!Memory.OffsetImm) return true;
1036    int64_t Val = Memory.OffsetImm->getValue();
1037    return (Val >= 0 && Val < 4096);
1038  }
1039  bool isMemImm12Offset() const {
1040    // If we have an immediate that's not a constant, treat it as a label
1041    // reference needing a fixup. If it is a constant, it's something else
1042    // and we reject it.
1043    if (isImm() && !isa<MCConstantExpr>(getImm()))
1044      return true;
1045
1046    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1047      return false;
1048    // Immediate offset in range [-4095, 4095].
1049    if (!Memory.OffsetImm) return true;
1050    int64_t Val = Memory.OffsetImm->getValue();
1051    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1052  }
1053  bool isPostIdxImm8() const {
1054    if (!isImm()) return false;
1055    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1056    if (!CE) return false;
1057    int64_t Val = CE->getValue();
1058    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1059  }
1060  bool isPostIdxImm8s4() const {
1061    if (!isImm()) return false;
1062    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1063    if (!CE) return false;
1064    int64_t Val = CE->getValue();
1065    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1066      (Val == INT32_MIN);
1067  }
1068
1069  bool isMSRMask() const { return Kind == k_MSRMask; }
1070  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1071
1072  // NEON operands.
1073  bool isSingleSpacedVectorList() const {
1074    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1075  }
1076  bool isDoubleSpacedVectorList() const {
1077    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1078  }
1079  bool isVecListOneD() const {
1080    if (!isSingleSpacedVectorList()) return false;
1081    return VectorList.Count == 1;
1082  }
1083
1084  bool isVecListTwoD() const {
1085    if (!isSingleSpacedVectorList()) return false;
1086    return VectorList.Count == 2;
1087  }
1088
1089  bool isVecListThreeD() const {
1090    if (!isSingleSpacedVectorList()) return false;
1091    return VectorList.Count == 3;
1092  }
1093
1094  bool isVecListFourD() const {
1095    if (!isSingleSpacedVectorList()) return false;
1096    return VectorList.Count == 4;
1097  }
1098
1099  bool isVecListTwoQ() const {
1100    if (!isDoubleSpacedVectorList()) return false;
1101    return VectorList.Count == 2;
1102  }
1103
1104  bool isVecListThreeQ() const {
1105    if (!isDoubleSpacedVectorList()) return false;
1106    return VectorList.Count == 3;
1107  }
1108
1109  bool isVecListFourQ() const {
1110    if (!isDoubleSpacedVectorList()) return false;
1111    return VectorList.Count == 4;
1112  }
1113
1114  bool isSingleSpacedVectorAllLanes() const {
1115    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1116  }
1117  bool isDoubleSpacedVectorAllLanes() const {
1118    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1119  }
1120  bool isVecListOneDAllLanes() const {
1121    if (!isSingleSpacedVectorAllLanes()) return false;
1122    return VectorList.Count == 1;
1123  }
1124
1125  bool isVecListTwoDAllLanes() const {
1126    if (!isSingleSpacedVectorAllLanes()) return false;
1127    return VectorList.Count == 2;
1128  }
1129
1130  bool isVecListTwoQAllLanes() const {
1131    if (!isDoubleSpacedVectorAllLanes()) return false;
1132    return VectorList.Count == 2;
1133  }
1134
1135  bool isVecListThreeDAllLanes() const {
1136    if (!isSingleSpacedVectorAllLanes()) return false;
1137    return VectorList.Count == 3;
1138  }
1139
1140  bool isVecListThreeQAllLanes() const {
1141    if (!isDoubleSpacedVectorAllLanes()) return false;
1142    return VectorList.Count == 3;
1143  }
1144
1145  bool isVecListFourDAllLanes() const {
1146    if (!isSingleSpacedVectorAllLanes()) return false;
1147    return VectorList.Count == 4;
1148  }
1149
1150  bool isVecListFourQAllLanes() const {
1151    if (!isDoubleSpacedVectorAllLanes()) return false;
1152    return VectorList.Count == 4;
1153  }
1154
1155  bool isSingleSpacedVectorIndexed() const {
1156    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1157  }
1158  bool isDoubleSpacedVectorIndexed() const {
1159    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1160  }
1161  bool isVecListOneDByteIndexed() const {
1162    if (!isSingleSpacedVectorIndexed()) return false;
1163    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1164  }
1165
1166  bool isVecListOneDHWordIndexed() const {
1167    if (!isSingleSpacedVectorIndexed()) return false;
1168    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1169  }
1170
1171  bool isVecListOneDWordIndexed() const {
1172    if (!isSingleSpacedVectorIndexed()) return false;
1173    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1174  }
1175
1176  bool isVecListTwoDByteIndexed() const {
1177    if (!isSingleSpacedVectorIndexed()) return false;
1178    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1179  }
1180
1181  bool isVecListTwoDHWordIndexed() const {
1182    if (!isSingleSpacedVectorIndexed()) return false;
1183    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1184  }
1185
1186  bool isVecListTwoQWordIndexed() const {
1187    if (!isDoubleSpacedVectorIndexed()) return false;
1188    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1189  }
1190
1191  bool isVecListTwoQHWordIndexed() const {
1192    if (!isDoubleSpacedVectorIndexed()) return false;
1193    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1194  }
1195
1196  bool isVecListTwoDWordIndexed() const {
1197    if (!isSingleSpacedVectorIndexed()) return false;
1198    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1199  }
1200
1201  bool isVecListThreeDByteIndexed() const {
1202    if (!isSingleSpacedVectorIndexed()) return false;
1203    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1204  }
1205
1206  bool isVecListThreeDHWordIndexed() const {
1207    if (!isSingleSpacedVectorIndexed()) return false;
1208    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1209  }
1210
1211  bool isVecListThreeQWordIndexed() const {
1212    if (!isDoubleSpacedVectorIndexed()) return false;
1213    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1214  }
1215
1216  bool isVecListThreeQHWordIndexed() const {
1217    if (!isDoubleSpacedVectorIndexed()) return false;
1218    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1219  }
1220
1221  bool isVecListThreeDWordIndexed() const {
1222    if (!isSingleSpacedVectorIndexed()) return false;
1223    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1224  }
1225
1226  bool isVecListFourDByteIndexed() const {
1227    if (!isSingleSpacedVectorIndexed()) return false;
1228    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1229  }
1230
1231  bool isVecListFourDHWordIndexed() const {
1232    if (!isSingleSpacedVectorIndexed()) return false;
1233    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1234  }
1235
1236  bool isVecListFourQWordIndexed() const {
1237    if (!isDoubleSpacedVectorIndexed()) return false;
1238    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1239  }
1240
1241  bool isVecListFourQHWordIndexed() const {
1242    if (!isDoubleSpacedVectorIndexed()) return false;
1243    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1244  }
1245
1246  bool isVecListFourDWordIndexed() const {
1247    if (!isSingleSpacedVectorIndexed()) return false;
1248    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1249  }
1250
1251  bool isVectorIndex8() const {
1252    if (Kind != k_VectorIndex) return false;
1253    return VectorIndex.Val < 8;
1254  }
1255  bool isVectorIndex16() const {
1256    if (Kind != k_VectorIndex) return false;
1257    return VectorIndex.Val < 4;
1258  }
1259  bool isVectorIndex32() const {
1260    if (Kind != k_VectorIndex) return false;
1261    return VectorIndex.Val < 2;
1262  }
1263
1264  bool isNEONi8splat() const {
1265    if (!isImm()) return false;
1266    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1267    // Must be a constant.
1268    if (!CE) return false;
1269    int64_t Value = CE->getValue();
1270    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1271    // value.
1272    return Value >= 0 && Value < 256;
1273  }
1274
1275  bool isNEONi16splat() const {
1276    if (!isImm()) return false;
1277    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1278    // Must be a constant.
1279    if (!CE) return false;
1280    int64_t Value = CE->getValue();
1281    // i16 value in the range [0,255] or [0x0100, 0xff00]
1282    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1283  }
1284
1285  bool isNEONi32splat() const {
1286    if (!isImm()) return false;
1287    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1288    // Must be a constant.
1289    if (!CE) return false;
1290    int64_t Value = CE->getValue();
1291    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1292    return (Value >= 0 && Value < 256) ||
1293      (Value >= 0x0100 && Value <= 0xff00) ||
1294      (Value >= 0x010000 && Value <= 0xff0000) ||
1295      (Value >= 0x01000000 && Value <= 0xff000000);
1296  }
1297
1298  bool isNEONi32vmov() const {
1299    if (!isImm()) return false;
1300    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1301    // Must be a constant.
1302    if (!CE) return false;
1303    int64_t Value = CE->getValue();
1304    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1305    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1306    return (Value >= 0 && Value < 256) ||
1307      (Value >= 0x0100 && Value <= 0xff00) ||
1308      (Value >= 0x010000 && Value <= 0xff0000) ||
1309      (Value >= 0x01000000 && Value <= 0xff000000) ||
1310      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1311      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1312  }
1313  bool isNEONi32vmovNeg() const {
1314    if (!isImm()) return false;
1315    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1316    // Must be a constant.
1317    if (!CE) return false;
1318    int64_t Value = ~CE->getValue();
1319    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1320    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1321    return (Value >= 0 && Value < 256) ||
1322      (Value >= 0x0100 && Value <= 0xff00) ||
1323      (Value >= 0x010000 && Value <= 0xff0000) ||
1324      (Value >= 0x01000000 && Value <= 0xff000000) ||
1325      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1326      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1327  }
1328
1329  bool isNEONi64splat() const {
1330    if (!isImm()) return false;
1331    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1332    // Must be a constant.
1333    if (!CE) return false;
1334    uint64_t Value = CE->getValue();
1335    // i64 value with each byte being either 0 or 0xff.
1336    for (unsigned i = 0; i < 8; ++i)
1337      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1338    return true;
1339  }
1340
1341  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1342    // Add as immediates when possible.  Null MCExpr = 0.
1343    if (Expr == 0)
1344      Inst.addOperand(MCOperand::CreateImm(0));
1345    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1346      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1347    else
1348      Inst.addOperand(MCOperand::CreateExpr(Expr));
1349  }
1350
1351  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1352    assert(N == 2 && "Invalid number of operands!");
1353    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1354    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1355    Inst.addOperand(MCOperand::CreateReg(RegNum));
1356  }
1357
1358  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1359    assert(N == 1 && "Invalid number of operands!");
1360    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1361  }
1362
1363  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1364    assert(N == 1 && "Invalid number of operands!");
1365    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1366  }
1367
1368  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1369    assert(N == 1 && "Invalid number of operands!");
1370    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1371  }
1372
1373  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1374    assert(N == 1 && "Invalid number of operands!");
1375    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1376  }
1377
1378  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1379    assert(N == 1 && "Invalid number of operands!");
1380    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1381  }
1382
1383  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1384    assert(N == 1 && "Invalid number of operands!");
1385    Inst.addOperand(MCOperand::CreateReg(getReg()));
1386  }
1387
1388  void addRegOperands(MCInst &Inst, unsigned N) const {
1389    assert(N == 1 && "Invalid number of operands!");
1390    Inst.addOperand(MCOperand::CreateReg(getReg()));
1391  }
1392
1393  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1394    assert(N == 3 && "Invalid number of operands!");
1395    assert(isRegShiftedReg() &&
1396           "addRegShiftedRegOperands() on non RegShiftedReg!");
1397    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1398    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1399    Inst.addOperand(MCOperand::CreateImm(
1400      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1401  }
1402
1403  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1404    assert(N == 2 && "Invalid number of operands!");
1405    assert(isRegShiftedImm() &&
1406           "addRegShiftedImmOperands() on non RegShiftedImm!");
1407    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1408    Inst.addOperand(MCOperand::CreateImm(
1409      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1410  }
1411
1412  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1413    assert(N == 1 && "Invalid number of operands!");
1414    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1415                                         ShifterImm.Imm));
1416  }
1417
1418  void addRegListOperands(MCInst &Inst, unsigned N) const {
1419    assert(N == 1 && "Invalid number of operands!");
1420    const SmallVectorImpl<unsigned> &RegList = getRegList();
1421    for (SmallVectorImpl<unsigned>::const_iterator
1422           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1423      Inst.addOperand(MCOperand::CreateReg(*I));
1424  }
1425
1426  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1427    addRegListOperands(Inst, N);
1428  }
1429
1430  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1431    addRegListOperands(Inst, N);
1432  }
1433
1434  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1435    assert(N == 1 && "Invalid number of operands!");
1436    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1437    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1438  }
1439
1440  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1441    assert(N == 1 && "Invalid number of operands!");
1442    // Munge the lsb/width into a bitfield mask.
1443    unsigned lsb = Bitfield.LSB;
1444    unsigned width = Bitfield.Width;
1445    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1446    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1447                      (32 - (lsb + width)));
1448    Inst.addOperand(MCOperand::CreateImm(Mask));
1449  }
1450
1451  void addImmOperands(MCInst &Inst, unsigned N) const {
1452    assert(N == 1 && "Invalid number of operands!");
1453    addExpr(Inst, getImm());
1454  }
1455
1456  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1457    assert(N == 1 && "Invalid number of operands!");
1458    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1459    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1460  }
1461
1462  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1463    assert(N == 1 && "Invalid number of operands!");
1464    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1465    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1466  }
1467
1468  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1469    assert(N == 1 && "Invalid number of operands!");
1470    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1471    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1472    Inst.addOperand(MCOperand::CreateImm(Val));
1473  }
1474
1475  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1476    assert(N == 1 && "Invalid number of operands!");
1477    // FIXME: We really want to scale the value here, but the LDRD/STRD
1478    // instruction don't encode operands that way yet.
1479    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1480    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1481  }
1482
1483  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1484    assert(N == 1 && "Invalid number of operands!");
1485    // The immediate is scaled by four in the encoding and is stored
1486    // in the MCInst as such. Lop off the low two bits here.
1487    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1488    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1489  }
1490
1491  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1492    assert(N == 1 && "Invalid number of operands!");
1493    // The immediate is scaled by four in the encoding and is stored
1494    // in the MCInst as such. Lop off the low two bits here.
1495    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1496    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1497  }
1498
1499  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1500    assert(N == 1 && "Invalid number of operands!");
1501    // The constant encodes as the immediate-1, and we store in the instruction
1502    // the bits as encoded, so subtract off one here.
1503    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1504    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1505  }
1506
1507  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1508    assert(N == 1 && "Invalid number of operands!");
1509    // The constant encodes as the immediate-1, and we store in the instruction
1510    // the bits as encoded, so subtract off one here.
1511    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1512    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1513  }
1514
1515  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1516    assert(N == 1 && "Invalid number of operands!");
1517    // The constant encodes as the immediate, except for 32, which encodes as
1518    // zero.
1519    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1520    unsigned Imm = CE->getValue();
1521    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1522  }
1523
1524  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1525    assert(N == 1 && "Invalid number of operands!");
1526    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1527    // the instruction as well.
1528    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1529    int Val = CE->getValue();
1530    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1531  }
1532
1533  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1534    assert(N == 1 && "Invalid number of operands!");
1535    // The operand is actually a t2_so_imm, but we have its bitwise
1536    // negation in the assembly source, so twiddle it here.
1537    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1538    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1539  }
1540
1541  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1542    assert(N == 1 && "Invalid number of operands!");
1543    // The operand is actually a t2_so_imm, but we have its
1544    // negation in the assembly source, so twiddle it here.
1545    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1546    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1547  }
1548
1549  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1550    assert(N == 1 && "Invalid number of operands!");
1551    // The operand is actually a so_imm, but we have its bitwise
1552    // negation in the assembly source, so twiddle it here.
1553    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1554    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1555  }
1556
1557  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1558    assert(N == 1 && "Invalid number of operands!");
1559    // The operand is actually a so_imm, but we have its
1560    // negation in the assembly source, so twiddle it here.
1561    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1562    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1563  }
1564
1565  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1566    assert(N == 1 && "Invalid number of operands!");
1567    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1568  }
1569
1570  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1571    assert(N == 1 && "Invalid number of operands!");
1572    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1573  }
1574
1575  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1576    assert(N == 1 && "Invalid number of operands!");
1577    int32_t Imm = Memory.OffsetImm->getValue();
1578    // FIXME: Handle #-0
1579    if (Imm == INT32_MIN) Imm = 0;
1580    Inst.addOperand(MCOperand::CreateImm(Imm));
1581  }
1582
1583  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1584    assert(N == 2 && "Invalid number of operands!");
1585    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1586    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1587  }
1588
1589  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1590    assert(N == 3 && "Invalid number of operands!");
1591    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1592    if (!Memory.OffsetRegNum) {
1593      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1594      // Special case for #-0
1595      if (Val == INT32_MIN) Val = 0;
1596      if (Val < 0) Val = -Val;
1597      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1598    } else {
1599      // For register offset, we encode the shift type and negation flag
1600      // here.
1601      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1602                              Memory.ShiftImm, Memory.ShiftType);
1603    }
1604    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1605    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1606    Inst.addOperand(MCOperand::CreateImm(Val));
1607  }
1608
1609  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1610    assert(N == 2 && "Invalid number of operands!");
1611    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1612    assert(CE && "non-constant AM2OffsetImm operand!");
1613    int32_t Val = CE->getValue();
1614    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1615    // Special case for #-0
1616    if (Val == INT32_MIN) Val = 0;
1617    if (Val < 0) Val = -Val;
1618    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1619    Inst.addOperand(MCOperand::CreateReg(0));
1620    Inst.addOperand(MCOperand::CreateImm(Val));
1621  }
1622
1623  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1624    assert(N == 3 && "Invalid number of operands!");
1625    // If we have an immediate that's not a constant, treat it as a label
1626    // reference needing a fixup. If it is a constant, it's something else
1627    // and we reject it.
1628    if (isImm()) {
1629      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1630      Inst.addOperand(MCOperand::CreateReg(0));
1631      Inst.addOperand(MCOperand::CreateImm(0));
1632      return;
1633    }
1634
1635    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1636    if (!Memory.OffsetRegNum) {
1637      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1638      // Special case for #-0
1639      if (Val == INT32_MIN) Val = 0;
1640      if (Val < 0) Val = -Val;
1641      Val = ARM_AM::getAM3Opc(AddSub, Val);
1642    } else {
1643      // For register offset, we encode the shift type and negation flag
1644      // here.
1645      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1646    }
1647    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1648    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1649    Inst.addOperand(MCOperand::CreateImm(Val));
1650  }
1651
1652  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1653    assert(N == 2 && "Invalid number of operands!");
1654    if (Kind == k_PostIndexRegister) {
1655      int32_t Val =
1656        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1657      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1658      Inst.addOperand(MCOperand::CreateImm(Val));
1659      return;
1660    }
1661
1662    // Constant offset.
1663    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1664    int32_t Val = CE->getValue();
1665    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1666    // Special case for #-0
1667    if (Val == INT32_MIN) Val = 0;
1668    if (Val < 0) Val = -Val;
1669    Val = ARM_AM::getAM3Opc(AddSub, Val);
1670    Inst.addOperand(MCOperand::CreateReg(0));
1671    Inst.addOperand(MCOperand::CreateImm(Val));
1672  }
1673
1674  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1675    assert(N == 2 && "Invalid number of operands!");
1676    // If we have an immediate that's not a constant, treat it as a label
1677    // reference needing a fixup. If it is a constant, it's something else
1678    // and we reject it.
1679    if (isImm()) {
1680      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1681      Inst.addOperand(MCOperand::CreateImm(0));
1682      return;
1683    }
1684
1685    // The lower two bits are always zero and as such are not encoded.
1686    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1687    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1688    // Special case for #-0
1689    if (Val == INT32_MIN) Val = 0;
1690    if (Val < 0) Val = -Val;
1691    Val = ARM_AM::getAM5Opc(AddSub, Val);
1692    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1693    Inst.addOperand(MCOperand::CreateImm(Val));
1694  }
1695
1696  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1697    assert(N == 2 && "Invalid number of operands!");
1698    // If we have an immediate that's not a constant, treat it as a label
1699    // reference needing a fixup. If it is a constant, it's something else
1700    // and we reject it.
1701    if (isImm()) {
1702      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1703      Inst.addOperand(MCOperand::CreateImm(0));
1704      return;
1705    }
1706
1707    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1708    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1709    Inst.addOperand(MCOperand::CreateImm(Val));
1710  }
1711
1712  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1713    assert(N == 2 && "Invalid number of operands!");
1714    // The lower two bits are always zero and as such are not encoded.
1715    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1716    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1717    Inst.addOperand(MCOperand::CreateImm(Val));
1718  }
1719
1720  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1721    assert(N == 2 && "Invalid number of operands!");
1722    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1723    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1724    Inst.addOperand(MCOperand::CreateImm(Val));
1725  }
1726
1727  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1728    addMemImm8OffsetOperands(Inst, N);
1729  }
1730
1731  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1732    addMemImm8OffsetOperands(Inst, N);
1733  }
1734
1735  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1736    assert(N == 2 && "Invalid number of operands!");
1737    // If this is an immediate, it's a label reference.
1738    if (isImm()) {
1739      addExpr(Inst, getImm());
1740      Inst.addOperand(MCOperand::CreateImm(0));
1741      return;
1742    }
1743
1744    // Otherwise, it's a normal memory reg+offset.
1745    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1746    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1747    Inst.addOperand(MCOperand::CreateImm(Val));
1748  }
1749
1750  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1751    assert(N == 2 && "Invalid number of operands!");
1752    // If this is an immediate, it's a label reference.
1753    if (isImm()) {
1754      addExpr(Inst, getImm());
1755      Inst.addOperand(MCOperand::CreateImm(0));
1756      return;
1757    }
1758
1759    // Otherwise, it's a normal memory reg+offset.
1760    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1761    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1762    Inst.addOperand(MCOperand::CreateImm(Val));
1763  }
1764
1765  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1766    assert(N == 2 && "Invalid number of operands!");
1767    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1768    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1769  }
1770
1771  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1772    assert(N == 2 && "Invalid number of operands!");
1773    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1774    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1775  }
1776
1777  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1778    assert(N == 3 && "Invalid number of operands!");
1779    unsigned Val =
1780      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1781                        Memory.ShiftImm, Memory.ShiftType);
1782    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1783    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1784    Inst.addOperand(MCOperand::CreateImm(Val));
1785  }
1786
1787  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1788    assert(N == 3 && "Invalid number of operands!");
1789    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1790    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1791    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1792  }
1793
1794  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1795    assert(N == 2 && "Invalid number of operands!");
1796    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1797    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1798  }
1799
1800  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1801    assert(N == 2 && "Invalid number of operands!");
1802    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1803    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1804    Inst.addOperand(MCOperand::CreateImm(Val));
1805  }
1806
1807  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1808    assert(N == 2 && "Invalid number of operands!");
1809    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1810    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1811    Inst.addOperand(MCOperand::CreateImm(Val));
1812  }
1813
1814  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1815    assert(N == 2 && "Invalid number of operands!");
1816    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1817    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1818    Inst.addOperand(MCOperand::CreateImm(Val));
1819  }
1820
1821  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1822    assert(N == 2 && "Invalid number of operands!");
1823    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1824    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1825    Inst.addOperand(MCOperand::CreateImm(Val));
1826  }
1827
1828  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1829    assert(N == 1 && "Invalid number of operands!");
1830    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1831    assert(CE && "non-constant post-idx-imm8 operand!");
1832    int Imm = CE->getValue();
1833    bool isAdd = Imm >= 0;
1834    if (Imm == INT32_MIN) Imm = 0;
1835    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1836    Inst.addOperand(MCOperand::CreateImm(Imm));
1837  }
1838
1839  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1840    assert(N == 1 && "Invalid number of operands!");
1841    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1842    assert(CE && "non-constant post-idx-imm8s4 operand!");
1843    int Imm = CE->getValue();
1844    bool isAdd = Imm >= 0;
1845    if (Imm == INT32_MIN) Imm = 0;
1846    // Immediate is scaled by 4.
1847    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1848    Inst.addOperand(MCOperand::CreateImm(Imm));
1849  }
1850
1851  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1852    assert(N == 2 && "Invalid number of operands!");
1853    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1854    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1855  }
1856
1857  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1858    assert(N == 2 && "Invalid number of operands!");
1859    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1860    // The sign, shift type, and shift amount are encoded in a single operand
1861    // using the AM2 encoding helpers.
1862    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1863    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1864                                     PostIdxReg.ShiftTy);
1865    Inst.addOperand(MCOperand::CreateImm(Imm));
1866  }
1867
1868  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1869    assert(N == 1 && "Invalid number of operands!");
1870    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1871  }
1872
1873  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1874    assert(N == 1 && "Invalid number of operands!");
1875    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1876  }
1877
1878  void addVecListOperands(MCInst &Inst, unsigned N) const {
1879    assert(N == 1 && "Invalid number of operands!");
1880    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1881  }
1882
1883  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1884    assert(N == 2 && "Invalid number of operands!");
1885    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1886    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1887  }
1888
1889  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1890    assert(N == 1 && "Invalid number of operands!");
1891    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1892  }
1893
1894  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1895    assert(N == 1 && "Invalid number of operands!");
1896    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1897  }
1898
1899  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1900    assert(N == 1 && "Invalid number of operands!");
1901    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1902  }
1903
1904  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1905    assert(N == 1 && "Invalid number of operands!");
1906    // The immediate encodes the type of constant as well as the value.
1907    // Mask in that this is an i8 splat.
1908    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1909    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1910  }
1911
1912  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1913    assert(N == 1 && "Invalid number of operands!");
1914    // The immediate encodes the type of constant as well as the value.
1915    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1916    unsigned Value = CE->getValue();
1917    if (Value >= 256)
1918      Value = (Value >> 8) | 0xa00;
1919    else
1920      Value |= 0x800;
1921    Inst.addOperand(MCOperand::CreateImm(Value));
1922  }
1923
1924  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1925    assert(N == 1 && "Invalid number of operands!");
1926    // The immediate encodes the type of constant as well as the value.
1927    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1928    unsigned Value = CE->getValue();
1929    if (Value >= 256 && Value <= 0xff00)
1930      Value = (Value >> 8) | 0x200;
1931    else if (Value > 0xffff && Value <= 0xff0000)
1932      Value = (Value >> 16) | 0x400;
1933    else if (Value > 0xffffff)
1934      Value = (Value >> 24) | 0x600;
1935    Inst.addOperand(MCOperand::CreateImm(Value));
1936  }
1937
1938  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1939    assert(N == 1 && "Invalid number of operands!");
1940    // The immediate encodes the type of constant as well as the value.
1941    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1942    unsigned Value = CE->getValue();
1943    if (Value >= 256 && Value <= 0xffff)
1944      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1945    else if (Value > 0xffff && Value <= 0xffffff)
1946      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1947    else if (Value > 0xffffff)
1948      Value = (Value >> 24) | 0x600;
1949    Inst.addOperand(MCOperand::CreateImm(Value));
1950  }
1951
1952  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1953    assert(N == 1 && "Invalid number of operands!");
1954    // The immediate encodes the type of constant as well as the value.
1955    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1956    unsigned Value = ~CE->getValue();
1957    if (Value >= 256 && Value <= 0xffff)
1958      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1959    else if (Value > 0xffff && Value <= 0xffffff)
1960      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1961    else if (Value > 0xffffff)
1962      Value = (Value >> 24) | 0x600;
1963    Inst.addOperand(MCOperand::CreateImm(Value));
1964  }
1965
1966  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1967    assert(N == 1 && "Invalid number of operands!");
1968    // The immediate encodes the type of constant as well as the value.
1969    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1970    uint64_t Value = CE->getValue();
1971    unsigned Imm = 0;
1972    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1973      Imm |= (Value & 1) << i;
1974    }
1975    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1976  }
1977
1978  virtual void print(raw_ostream &OS) const;
1979
1980  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1981    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1982    Op->ITMask.Mask = Mask;
1983    Op->StartLoc = S;
1984    Op->EndLoc = S;
1985    return Op;
1986  }
1987
1988  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1989    ARMOperand *Op = new ARMOperand(k_CondCode);
1990    Op->CC.Val = CC;
1991    Op->StartLoc = S;
1992    Op->EndLoc = S;
1993    return Op;
1994  }
1995
1996  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1997    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1998    Op->Cop.Val = CopVal;
1999    Op->StartLoc = S;
2000    Op->EndLoc = S;
2001    return Op;
2002  }
2003
2004  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2005    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2006    Op->Cop.Val = CopVal;
2007    Op->StartLoc = S;
2008    Op->EndLoc = S;
2009    return Op;
2010  }
2011
2012  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2013    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2014    Op->Cop.Val = Val;
2015    Op->StartLoc = S;
2016    Op->EndLoc = E;
2017    return Op;
2018  }
2019
2020  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2021    ARMOperand *Op = new ARMOperand(k_CCOut);
2022    Op->Reg.RegNum = RegNum;
2023    Op->StartLoc = S;
2024    Op->EndLoc = S;
2025    return Op;
2026  }
2027
2028  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2029    ARMOperand *Op = new ARMOperand(k_Token);
2030    Op->Tok.Data = Str.data();
2031    Op->Tok.Length = Str.size();
2032    Op->StartLoc = S;
2033    Op->EndLoc = S;
2034    return Op;
2035  }
2036
2037  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2038    ARMOperand *Op = new ARMOperand(k_Register);
2039    Op->Reg.RegNum = RegNum;
2040    Op->StartLoc = S;
2041    Op->EndLoc = E;
2042    return Op;
2043  }
2044
2045  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2046                                           unsigned SrcReg,
2047                                           unsigned ShiftReg,
2048                                           unsigned ShiftImm,
2049                                           SMLoc S, SMLoc E) {
2050    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2051    Op->RegShiftedReg.ShiftTy = ShTy;
2052    Op->RegShiftedReg.SrcReg = SrcReg;
2053    Op->RegShiftedReg.ShiftReg = ShiftReg;
2054    Op->RegShiftedReg.ShiftImm = ShiftImm;
2055    Op->StartLoc = S;
2056    Op->EndLoc = E;
2057    return Op;
2058  }
2059
2060  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2061                                            unsigned SrcReg,
2062                                            unsigned ShiftImm,
2063                                            SMLoc S, SMLoc E) {
2064    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2065    Op->RegShiftedImm.ShiftTy = ShTy;
2066    Op->RegShiftedImm.SrcReg = SrcReg;
2067    Op->RegShiftedImm.ShiftImm = ShiftImm;
2068    Op->StartLoc = S;
2069    Op->EndLoc = E;
2070    return Op;
2071  }
2072
2073  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2074                                   SMLoc S, SMLoc E) {
2075    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2076    Op->ShifterImm.isASR = isASR;
2077    Op->ShifterImm.Imm = Imm;
2078    Op->StartLoc = S;
2079    Op->EndLoc = E;
2080    return Op;
2081  }
2082
2083  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2084    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2085    Op->RotImm.Imm = Imm;
2086    Op->StartLoc = S;
2087    Op->EndLoc = E;
2088    return Op;
2089  }
2090
2091  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2092                                    SMLoc S, SMLoc E) {
2093    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2094    Op->Bitfield.LSB = LSB;
2095    Op->Bitfield.Width = Width;
2096    Op->StartLoc = S;
2097    Op->EndLoc = E;
2098    return Op;
2099  }
2100
2101  static ARMOperand *
2102  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2103                SMLoc StartLoc, SMLoc EndLoc) {
2104    KindTy Kind = k_RegisterList;
2105
2106    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2107      Kind = k_DPRRegisterList;
2108    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2109             contains(Regs.front().first))
2110      Kind = k_SPRRegisterList;
2111
2112    ARMOperand *Op = new ARMOperand(Kind);
2113    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2114           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2115      Op->Registers.push_back(I->first);
2116    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2117    Op->StartLoc = StartLoc;
2118    Op->EndLoc = EndLoc;
2119    return Op;
2120  }
2121
2122  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2123                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2124    ARMOperand *Op = new ARMOperand(k_VectorList);
2125    Op->VectorList.RegNum = RegNum;
2126    Op->VectorList.Count = Count;
2127    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2128    Op->StartLoc = S;
2129    Op->EndLoc = E;
2130    return Op;
2131  }
2132
2133  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2134                                              bool isDoubleSpaced,
2135                                              SMLoc S, SMLoc E) {
2136    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2137    Op->VectorList.RegNum = RegNum;
2138    Op->VectorList.Count = Count;
2139    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2140    Op->StartLoc = S;
2141    Op->EndLoc = E;
2142    return Op;
2143  }
2144
2145  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2146                                             unsigned Index,
2147                                             bool isDoubleSpaced,
2148                                             SMLoc S, SMLoc E) {
2149    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2150    Op->VectorList.RegNum = RegNum;
2151    Op->VectorList.Count = Count;
2152    Op->VectorList.LaneIndex = Index;
2153    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2154    Op->StartLoc = S;
2155    Op->EndLoc = E;
2156    return Op;
2157  }
2158
2159  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2160                                       MCContext &Ctx) {
2161    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2162    Op->VectorIndex.Val = Idx;
2163    Op->StartLoc = S;
2164    Op->EndLoc = E;
2165    return Op;
2166  }
2167
2168  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2169    ARMOperand *Op = new ARMOperand(k_Immediate);
2170    Op->Imm.Val = Val;
2171    Op->StartLoc = S;
2172    Op->EndLoc = E;
2173    return Op;
2174  }
2175
2176  static ARMOperand *CreateMem(unsigned BaseRegNum,
2177                               const MCConstantExpr *OffsetImm,
2178                               unsigned OffsetRegNum,
2179                               ARM_AM::ShiftOpc ShiftType,
2180                               unsigned ShiftImm,
2181                               unsigned Alignment,
2182                               bool isNegative,
2183                               SMLoc S, SMLoc E) {
2184    ARMOperand *Op = new ARMOperand(k_Memory);
2185    Op->Memory.BaseRegNum = BaseRegNum;
2186    Op->Memory.OffsetImm = OffsetImm;
2187    Op->Memory.OffsetRegNum = OffsetRegNum;
2188    Op->Memory.ShiftType = ShiftType;
2189    Op->Memory.ShiftImm = ShiftImm;
2190    Op->Memory.Alignment = Alignment;
2191    Op->Memory.isNegative = isNegative;
2192    Op->StartLoc = S;
2193    Op->EndLoc = E;
2194    return Op;
2195  }
2196
2197  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2198                                      ARM_AM::ShiftOpc ShiftTy,
2199                                      unsigned ShiftImm,
2200                                      SMLoc S, SMLoc E) {
2201    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2202    Op->PostIdxReg.RegNum = RegNum;
2203    Op->PostIdxReg.isAdd = isAdd;
2204    Op->PostIdxReg.ShiftTy = ShiftTy;
2205    Op->PostIdxReg.ShiftImm = ShiftImm;
2206    Op->StartLoc = S;
2207    Op->EndLoc = E;
2208    return Op;
2209  }
2210
2211  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2212    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2213    Op->MBOpt.Val = Opt;
2214    Op->StartLoc = S;
2215    Op->EndLoc = S;
2216    return Op;
2217  }
2218
2219  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2220    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2221    Op->IFlags.Val = IFlags;
2222    Op->StartLoc = S;
2223    Op->EndLoc = S;
2224    return Op;
2225  }
2226
2227  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2228    ARMOperand *Op = new ARMOperand(k_MSRMask);
2229    Op->MMask.Val = MMask;
2230    Op->StartLoc = S;
2231    Op->EndLoc = S;
2232    return Op;
2233  }
2234};
2235
2236} // end anonymous namespace.
2237
2238void ARMOperand::print(raw_ostream &OS) const {
2239  switch (Kind) {
2240  case k_CondCode:
2241    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2242    break;
2243  case k_CCOut:
2244    OS << "<ccout " << getReg() << ">";
2245    break;
2246  case k_ITCondMask: {
2247    static const char *MaskStr[] = {
2248      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2249      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2250    };
2251    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2252    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2253    break;
2254  }
2255  case k_CoprocNum:
2256    OS << "<coprocessor number: " << getCoproc() << ">";
2257    break;
2258  case k_CoprocReg:
2259    OS << "<coprocessor register: " << getCoproc() << ">";
2260    break;
2261  case k_CoprocOption:
2262    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2263    break;
2264  case k_MSRMask:
2265    OS << "<mask: " << getMSRMask() << ">";
2266    break;
2267  case k_Immediate:
2268    getImm()->print(OS);
2269    break;
2270  case k_MemBarrierOpt:
2271    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2272    break;
2273  case k_Memory:
2274    OS << "<memory "
2275       << " base:" << Memory.BaseRegNum;
2276    OS << ">";
2277    break;
2278  case k_PostIndexRegister:
2279    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2280       << PostIdxReg.RegNum;
2281    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2282      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2283         << PostIdxReg.ShiftImm;
2284    OS << ">";
2285    break;
2286  case k_ProcIFlags: {
2287    OS << "<ARM_PROC::";
2288    unsigned IFlags = getProcIFlags();
2289    for (int i=2; i >= 0; --i)
2290      if (IFlags & (1 << i))
2291        OS << ARM_PROC::IFlagsToString(1 << i);
2292    OS << ">";
2293    break;
2294  }
2295  case k_Register:
2296    OS << "<register " << getReg() << ">";
2297    break;
2298  case k_ShifterImmediate:
2299    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2300       << " #" << ShifterImm.Imm << ">";
2301    break;
2302  case k_ShiftedRegister:
2303    OS << "<so_reg_reg "
2304       << RegShiftedReg.SrcReg << " "
2305       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2306       << " " << RegShiftedReg.ShiftReg << ">";
2307    break;
2308  case k_ShiftedImmediate:
2309    OS << "<so_reg_imm "
2310       << RegShiftedImm.SrcReg << " "
2311       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2312       << " #" << RegShiftedImm.ShiftImm << ">";
2313    break;
2314  case k_RotateImmediate:
2315    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2316    break;
2317  case k_BitfieldDescriptor:
2318    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2319       << ", width: " << Bitfield.Width << ">";
2320    break;
2321  case k_RegisterList:
2322  case k_DPRRegisterList:
2323  case k_SPRRegisterList: {
2324    OS << "<register_list ";
2325
2326    const SmallVectorImpl<unsigned> &RegList = getRegList();
2327    for (SmallVectorImpl<unsigned>::const_iterator
2328           I = RegList.begin(), E = RegList.end(); I != E; ) {
2329      OS << *I;
2330      if (++I < E) OS << ", ";
2331    }
2332
2333    OS << ">";
2334    break;
2335  }
2336  case k_VectorList:
2337    OS << "<vector_list " << VectorList.Count << " * "
2338       << VectorList.RegNum << ">";
2339    break;
2340  case k_VectorListAllLanes:
2341    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2342       << VectorList.RegNum << ">";
2343    break;
2344  case k_VectorListIndexed:
2345    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2346       << VectorList.Count << " * " << VectorList.RegNum << ">";
2347    break;
2348  case k_Token:
2349    OS << "'" << getToken() << "'";
2350    break;
2351  case k_VectorIndex:
2352    OS << "<vectorindex " << getVectorIndex() << ">";
2353    break;
2354  }
2355}
2356
2357/// @name Auto-generated Match Functions
2358/// {
2359
2360static unsigned MatchRegisterName(StringRef Name);
2361
2362/// }
2363
2364bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2365                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2366  StartLoc = Parser.getTok().getLoc();
2367  RegNo = tryParseRegister();
2368  EndLoc = Parser.getTok().getLoc();
2369
2370  return (RegNo == (unsigned)-1);
2371}
2372
2373/// Try to parse a register name.  The token must be an Identifier when called,
2374/// and if it is a register name the token is eaten and the register number is
2375/// returned.  Otherwise return -1.
2376///
2377int ARMAsmParser::tryParseRegister() {
2378  const AsmToken &Tok = Parser.getTok();
2379  if (Tok.isNot(AsmToken::Identifier)) return -1;
2380
2381  std::string lowerCase = Tok.getString().lower();
2382  unsigned RegNum = MatchRegisterName(lowerCase);
2383  if (!RegNum) {
2384    RegNum = StringSwitch<unsigned>(lowerCase)
2385      .Case("r13", ARM::SP)
2386      .Case("r14", ARM::LR)
2387      .Case("r15", ARM::PC)
2388      .Case("ip", ARM::R12)
2389      // Additional register name aliases for 'gas' compatibility.
2390      .Case("a1", ARM::R0)
2391      .Case("a2", ARM::R1)
2392      .Case("a3", ARM::R2)
2393      .Case("a4", ARM::R3)
2394      .Case("v1", ARM::R4)
2395      .Case("v2", ARM::R5)
2396      .Case("v3", ARM::R6)
2397      .Case("v4", ARM::R7)
2398      .Case("v5", ARM::R8)
2399      .Case("v6", ARM::R9)
2400      .Case("v7", ARM::R10)
2401      .Case("v8", ARM::R11)
2402      .Case("sb", ARM::R9)
2403      .Case("sl", ARM::R10)
2404      .Case("fp", ARM::R11)
2405      .Default(0);
2406  }
2407  if (!RegNum) {
2408    // Check for aliases registered via .req. Canonicalize to lower case.
2409    // That's more consistent since register names are case insensitive, and
2410    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2411    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2412    // If no match, return failure.
2413    if (Entry == RegisterReqs.end())
2414      return -1;
2415    Parser.Lex(); // Eat identifier token.
2416    return Entry->getValue();
2417  }
2418
2419  Parser.Lex(); // Eat identifier token.
2420
2421  return RegNum;
2422}
2423
2424// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2425// If a recoverable error occurs, return 1. If an irrecoverable error
2426// occurs, return -1. An irrecoverable error is one where tokens have been
2427// consumed in the process of trying to parse the shifter (i.e., when it is
2428// indeed a shifter operand, but malformed).
2429int ARMAsmParser::tryParseShiftRegister(
2430                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2431  SMLoc S = Parser.getTok().getLoc();
2432  const AsmToken &Tok = Parser.getTok();
2433  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2434
2435  std::string lowerCase = Tok.getString().lower();
2436  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2437      .Case("asl", ARM_AM::lsl)
2438      .Case("lsl", ARM_AM::lsl)
2439      .Case("lsr", ARM_AM::lsr)
2440      .Case("asr", ARM_AM::asr)
2441      .Case("ror", ARM_AM::ror)
2442      .Case("rrx", ARM_AM::rrx)
2443      .Default(ARM_AM::no_shift);
2444
2445  if (ShiftTy == ARM_AM::no_shift)
2446    return 1;
2447
2448  Parser.Lex(); // Eat the operator.
2449
2450  // The source register for the shift has already been added to the
2451  // operand list, so we need to pop it off and combine it into the shifted
2452  // register operand instead.
2453  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2454  if (!PrevOp->isReg())
2455    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2456  int SrcReg = PrevOp->getReg();
2457  int64_t Imm = 0;
2458  int ShiftReg = 0;
2459  if (ShiftTy == ARM_AM::rrx) {
2460    // RRX Doesn't have an explicit shift amount. The encoder expects
2461    // the shift register to be the same as the source register. Seems odd,
2462    // but OK.
2463    ShiftReg = SrcReg;
2464  } else {
2465    // Figure out if this is shifted by a constant or a register (for non-RRX).
2466    if (Parser.getTok().is(AsmToken::Hash) ||
2467        Parser.getTok().is(AsmToken::Dollar)) {
2468      Parser.Lex(); // Eat hash.
2469      SMLoc ImmLoc = Parser.getTok().getLoc();
2470      const MCExpr *ShiftExpr = 0;
2471      if (getParser().ParseExpression(ShiftExpr)) {
2472        Error(ImmLoc, "invalid immediate shift value");
2473        return -1;
2474      }
2475      // The expression must be evaluatable as an immediate.
2476      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2477      if (!CE) {
2478        Error(ImmLoc, "invalid immediate shift value");
2479        return -1;
2480      }
2481      // Range check the immediate.
2482      // lsl, ror: 0 <= imm <= 31
2483      // lsr, asr: 0 <= imm <= 32
2484      Imm = CE->getValue();
2485      if (Imm < 0 ||
2486          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2487          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2488        Error(ImmLoc, "immediate shift value out of range");
2489        return -1;
2490      }
2491      // shift by zero is a nop. Always send it through as lsl.
2492      // ('as' compatibility)
2493      if (Imm == 0)
2494        ShiftTy = ARM_AM::lsl;
2495    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2496      ShiftReg = tryParseRegister();
2497      SMLoc L = Parser.getTok().getLoc();
2498      if (ShiftReg == -1) {
2499        Error (L, "expected immediate or register in shift operand");
2500        return -1;
2501      }
2502    } else {
2503      Error (Parser.getTok().getLoc(),
2504                    "expected immediate or register in shift operand");
2505      return -1;
2506    }
2507  }
2508
2509  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2510    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2511                                                         ShiftReg, Imm,
2512                                               S, Parser.getTok().getLoc()));
2513  else
2514    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2515                                               S, Parser.getTok().getLoc()));
2516
2517  return 0;
2518}
2519
2520
2521/// Try to parse a register name.  The token must be an Identifier when called.
2522/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2523/// if there is a "writeback". 'true' if it's not a register.
2524///
2525/// TODO this is likely to change to allow different register types and or to
2526/// parse for a specific register type.
2527bool ARMAsmParser::
2528tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2529  SMLoc S = Parser.getTok().getLoc();
2530  int RegNo = tryParseRegister();
2531  if (RegNo == -1)
2532    return true;
2533
2534  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2535
2536  const AsmToken &ExclaimTok = Parser.getTok();
2537  if (ExclaimTok.is(AsmToken::Exclaim)) {
2538    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2539                                               ExclaimTok.getLoc()));
2540    Parser.Lex(); // Eat exclaim token
2541    return false;
2542  }
2543
2544  // Also check for an index operand. This is only legal for vector registers,
2545  // but that'll get caught OK in operand matching, so we don't need to
2546  // explicitly filter everything else out here.
2547  if (Parser.getTok().is(AsmToken::LBrac)) {
2548    SMLoc SIdx = Parser.getTok().getLoc();
2549    Parser.Lex(); // Eat left bracket token.
2550
2551    const MCExpr *ImmVal;
2552    if (getParser().ParseExpression(ImmVal))
2553      return true;
2554    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2555    if (!MCE)
2556      return TokError("immediate value expected for vector index");
2557
2558    SMLoc E = Parser.getTok().getLoc();
2559    if (Parser.getTok().isNot(AsmToken::RBrac))
2560      return Error(E, "']' expected");
2561
2562    Parser.Lex(); // Eat right bracket token.
2563
2564    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2565                                                     SIdx, E,
2566                                                     getContext()));
2567  }
2568
2569  return false;
2570}
2571
2572/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2573/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2574/// "c5", ...
2575static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2576  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2577  // but efficient.
2578  switch (Name.size()) {
2579  default: return -1;
2580  case 2:
2581    if (Name[0] != CoprocOp)
2582      return -1;
2583    switch (Name[1]) {
2584    default:  return -1;
2585    case '0': return 0;
2586    case '1': return 1;
2587    case '2': return 2;
2588    case '3': return 3;
2589    case '4': return 4;
2590    case '5': return 5;
2591    case '6': return 6;
2592    case '7': return 7;
2593    case '8': return 8;
2594    case '9': return 9;
2595    }
2596  case 3:
2597    if (Name[0] != CoprocOp || Name[1] != '1')
2598      return -1;
2599    switch (Name[2]) {
2600    default:  return -1;
2601    case '0': return 10;
2602    case '1': return 11;
2603    case '2': return 12;
2604    case '3': return 13;
2605    case '4': return 14;
2606    case '5': return 15;
2607    }
2608  }
2609}
2610
2611/// parseITCondCode - Try to parse a condition code for an IT instruction.
2612ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2613parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2614  SMLoc S = Parser.getTok().getLoc();
2615  const AsmToken &Tok = Parser.getTok();
2616  if (!Tok.is(AsmToken::Identifier))
2617    return MatchOperand_NoMatch;
2618  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2619    .Case("eq", ARMCC::EQ)
2620    .Case("ne", ARMCC::NE)
2621    .Case("hs", ARMCC::HS)
2622    .Case("cs", ARMCC::HS)
2623    .Case("lo", ARMCC::LO)
2624    .Case("cc", ARMCC::LO)
2625    .Case("mi", ARMCC::MI)
2626    .Case("pl", ARMCC::PL)
2627    .Case("vs", ARMCC::VS)
2628    .Case("vc", ARMCC::VC)
2629    .Case("hi", ARMCC::HI)
2630    .Case("ls", ARMCC::LS)
2631    .Case("ge", ARMCC::GE)
2632    .Case("lt", ARMCC::LT)
2633    .Case("gt", ARMCC::GT)
2634    .Case("le", ARMCC::LE)
2635    .Case("al", ARMCC::AL)
2636    .Default(~0U);
2637  if (CC == ~0U)
2638    return MatchOperand_NoMatch;
2639  Parser.Lex(); // Eat the token.
2640
2641  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2642
2643  return MatchOperand_Success;
2644}
2645
2646/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2647/// token must be an Identifier when called, and if it is a coprocessor
2648/// number, the token is eaten and the operand is added to the operand list.
2649ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2650parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2651  SMLoc S = Parser.getTok().getLoc();
2652  const AsmToken &Tok = Parser.getTok();
2653  if (Tok.isNot(AsmToken::Identifier))
2654    return MatchOperand_NoMatch;
2655
2656  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2657  if (Num == -1)
2658    return MatchOperand_NoMatch;
2659
2660  Parser.Lex(); // Eat identifier token.
2661  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2662  return MatchOperand_Success;
2663}
2664
2665/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2666/// token must be an Identifier when called, and if it is a coprocessor
2667/// number, the token is eaten and the operand is added to the operand list.
2668ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2669parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2670  SMLoc S = Parser.getTok().getLoc();
2671  const AsmToken &Tok = Parser.getTok();
2672  if (Tok.isNot(AsmToken::Identifier))
2673    return MatchOperand_NoMatch;
2674
2675  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2676  if (Reg == -1)
2677    return MatchOperand_NoMatch;
2678
2679  Parser.Lex(); // Eat identifier token.
2680  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2681  return MatchOperand_Success;
2682}
2683
2684/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2685/// coproc_option : '{' imm0_255 '}'
2686ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2687parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2688  SMLoc S = Parser.getTok().getLoc();
2689
2690  // If this isn't a '{', this isn't a coprocessor immediate operand.
2691  if (Parser.getTok().isNot(AsmToken::LCurly))
2692    return MatchOperand_NoMatch;
2693  Parser.Lex(); // Eat the '{'
2694
2695  const MCExpr *Expr;
2696  SMLoc Loc = Parser.getTok().getLoc();
2697  if (getParser().ParseExpression(Expr)) {
2698    Error(Loc, "illegal expression");
2699    return MatchOperand_ParseFail;
2700  }
2701  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2702  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2703    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2704    return MatchOperand_ParseFail;
2705  }
2706  int Val = CE->getValue();
2707
2708  // Check for and consume the closing '}'
2709  if (Parser.getTok().isNot(AsmToken::RCurly))
2710    return MatchOperand_ParseFail;
2711  SMLoc E = Parser.getTok().getLoc();
2712  Parser.Lex(); // Eat the '}'
2713
2714  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2715  return MatchOperand_Success;
2716}
2717
2718// For register list parsing, we need to map from raw GPR register numbering
2719// to the enumeration values. The enumeration values aren't sorted by
2720// register number due to our using "sp", "lr" and "pc" as canonical names.
2721static unsigned getNextRegister(unsigned Reg) {
2722  // If this is a GPR, we need to do it manually, otherwise we can rely
2723  // on the sort ordering of the enumeration since the other reg-classes
2724  // are sane.
2725  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2726    return Reg + 1;
2727  switch(Reg) {
2728  default: llvm_unreachable("Invalid GPR number!");
2729  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2730  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2731  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2732  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2733  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2734  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2735  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2736  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2737  }
2738}
2739
2740// Return the low-subreg of a given Q register.
2741static unsigned getDRegFromQReg(unsigned QReg) {
2742  switch (QReg) {
2743  default: llvm_unreachable("expected a Q register!");
2744  case ARM::Q0:  return ARM::D0;
2745  case ARM::Q1:  return ARM::D2;
2746  case ARM::Q2:  return ARM::D4;
2747  case ARM::Q3:  return ARM::D6;
2748  case ARM::Q4:  return ARM::D8;
2749  case ARM::Q5:  return ARM::D10;
2750  case ARM::Q6:  return ARM::D12;
2751  case ARM::Q7:  return ARM::D14;
2752  case ARM::Q8:  return ARM::D16;
2753  case ARM::Q9:  return ARM::D18;
2754  case ARM::Q10: return ARM::D20;
2755  case ARM::Q11: return ARM::D22;
2756  case ARM::Q12: return ARM::D24;
2757  case ARM::Q13: return ARM::D26;
2758  case ARM::Q14: return ARM::D28;
2759  case ARM::Q15: return ARM::D30;
2760  }
2761}
2762
2763/// Parse a register list.
2764bool ARMAsmParser::
2765parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2766  assert(Parser.getTok().is(AsmToken::LCurly) &&
2767         "Token is not a Left Curly Brace");
2768  SMLoc S = Parser.getTok().getLoc();
2769  Parser.Lex(); // Eat '{' token.
2770  SMLoc RegLoc = Parser.getTok().getLoc();
2771
2772  // Check the first register in the list to see what register class
2773  // this is a list of.
2774  int Reg = tryParseRegister();
2775  if (Reg == -1)
2776    return Error(RegLoc, "register expected");
2777
2778  // The reglist instructions have at most 16 registers, so reserve
2779  // space for that many.
2780  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2781
2782  // Allow Q regs and just interpret them as the two D sub-registers.
2783  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2784    Reg = getDRegFromQReg(Reg);
2785    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2786    ++Reg;
2787  }
2788  const MCRegisterClass *RC;
2789  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2790    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2791  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2792    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2793  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2794    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2795  else
2796    return Error(RegLoc, "invalid register in register list");
2797
2798  // Store the register.
2799  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2800
2801  // This starts immediately after the first register token in the list,
2802  // so we can see either a comma or a minus (range separator) as a legal
2803  // next token.
2804  while (Parser.getTok().is(AsmToken::Comma) ||
2805         Parser.getTok().is(AsmToken::Minus)) {
2806    if (Parser.getTok().is(AsmToken::Minus)) {
2807      Parser.Lex(); // Eat the minus.
2808      SMLoc EndLoc = Parser.getTok().getLoc();
2809      int EndReg = tryParseRegister();
2810      if (EndReg == -1)
2811        return Error(EndLoc, "register expected");
2812      // Allow Q regs and just interpret them as the two D sub-registers.
2813      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2814        EndReg = getDRegFromQReg(EndReg) + 1;
2815      // If the register is the same as the start reg, there's nothing
2816      // more to do.
2817      if (Reg == EndReg)
2818        continue;
2819      // The register must be in the same register class as the first.
2820      if (!RC->contains(EndReg))
2821        return Error(EndLoc, "invalid register in register list");
2822      // Ranges must go from low to high.
2823      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2824        return Error(EndLoc, "bad range in register list");
2825
2826      // Add all the registers in the range to the register list.
2827      while (Reg != EndReg) {
2828        Reg = getNextRegister(Reg);
2829        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2830      }
2831      continue;
2832    }
2833    Parser.Lex(); // Eat the comma.
2834    RegLoc = Parser.getTok().getLoc();
2835    int OldReg = Reg;
2836    const AsmToken RegTok = Parser.getTok();
2837    Reg = tryParseRegister();
2838    if (Reg == -1)
2839      return Error(RegLoc, "register expected");
2840    // Allow Q regs and just interpret them as the two D sub-registers.
2841    bool isQReg = false;
2842    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2843      Reg = getDRegFromQReg(Reg);
2844      isQReg = true;
2845    }
2846    // The register must be in the same register class as the first.
2847    if (!RC->contains(Reg))
2848      return Error(RegLoc, "invalid register in register list");
2849    // List must be monotonically increasing.
2850    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2851      return Error(RegLoc, "register list not in ascending order");
2852    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2853      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2854              ") in register list");
2855      continue;
2856    }
2857    // VFP register lists must also be contiguous.
2858    // It's OK to use the enumeration values directly here rather, as the
2859    // VFP register classes have the enum sorted properly.
2860    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2861        Reg != OldReg + 1)
2862      return Error(RegLoc, "non-contiguous register range");
2863    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2864    if (isQReg)
2865      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2866  }
2867
2868  SMLoc E = Parser.getTok().getLoc();
2869  if (Parser.getTok().isNot(AsmToken::RCurly))
2870    return Error(E, "'}' expected");
2871  Parser.Lex(); // Eat '}' token.
2872
2873  // Push the register list operand.
2874  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2875
2876  // The ARM system instruction variants for LDM/STM have a '^' token here.
2877  if (Parser.getTok().is(AsmToken::Caret)) {
2878    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2879    Parser.Lex(); // Eat '^' token.
2880  }
2881
2882  return false;
2883}
2884
2885// Helper function to parse the lane index for vector lists.
2886ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2887parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2888  Index = 0; // Always return a defined index value.
2889  if (Parser.getTok().is(AsmToken::LBrac)) {
2890    Parser.Lex(); // Eat the '['.
2891    if (Parser.getTok().is(AsmToken::RBrac)) {
2892      // "Dn[]" is the 'all lanes' syntax.
2893      LaneKind = AllLanes;
2894      Parser.Lex(); // Eat the ']'.
2895      return MatchOperand_Success;
2896    }
2897    const MCExpr *LaneIndex;
2898    SMLoc Loc = Parser.getTok().getLoc();
2899    if (getParser().ParseExpression(LaneIndex)) {
2900      Error(Loc, "illegal expression");
2901      return MatchOperand_ParseFail;
2902    }
2903    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2904    if (!CE) {
2905      Error(Loc, "lane index must be empty or an integer");
2906      return MatchOperand_ParseFail;
2907    }
2908    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2909      Error(Parser.getTok().getLoc(), "']' expected");
2910      return MatchOperand_ParseFail;
2911    }
2912    Parser.Lex(); // Eat the ']'.
2913    int64_t Val = CE->getValue();
2914
2915    // FIXME: Make this range check context sensitive for .8, .16, .32.
2916    if (Val < 0 || Val > 7) {
2917      Error(Parser.getTok().getLoc(), "lane index out of range");
2918      return MatchOperand_ParseFail;
2919    }
2920    Index = Val;
2921    LaneKind = IndexedLane;
2922    return MatchOperand_Success;
2923  }
2924  LaneKind = NoLanes;
2925  return MatchOperand_Success;
2926}
2927
2928// parse a vector register list
2929ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2930parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2931  VectorLaneTy LaneKind;
2932  unsigned LaneIndex;
2933  SMLoc S = Parser.getTok().getLoc();
2934  // As an extension (to match gas), support a plain D register or Q register
2935  // (without encosing curly braces) as a single or double entry list,
2936  // respectively.
2937  if (Parser.getTok().is(AsmToken::Identifier)) {
2938    int Reg = tryParseRegister();
2939    if (Reg == -1)
2940      return MatchOperand_NoMatch;
2941    SMLoc E = Parser.getTok().getLoc();
2942    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2943      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2944      if (Res != MatchOperand_Success)
2945        return Res;
2946      switch (LaneKind) {
2947      case NoLanes:
2948        E = Parser.getTok().getLoc();
2949        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2950        break;
2951      case AllLanes:
2952        E = Parser.getTok().getLoc();
2953        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2954                                                                S, E));
2955        break;
2956      case IndexedLane:
2957        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2958                                                               LaneIndex,
2959                                                               false, S, E));
2960        break;
2961      }
2962      return MatchOperand_Success;
2963    }
2964    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2965      Reg = getDRegFromQReg(Reg);
2966      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2967      if (Res != MatchOperand_Success)
2968        return Res;
2969      switch (LaneKind) {
2970      case NoLanes:
2971        E = Parser.getTok().getLoc();
2972        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2973        break;
2974      case AllLanes:
2975        E = Parser.getTok().getLoc();
2976        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
2977                                                                S, E));
2978        break;
2979      case IndexedLane:
2980        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2981                                                               LaneIndex,
2982                                                               false, S, E));
2983        break;
2984      }
2985      return MatchOperand_Success;
2986    }
2987    Error(S, "vector register expected");
2988    return MatchOperand_ParseFail;
2989  }
2990
2991  if (Parser.getTok().isNot(AsmToken::LCurly))
2992    return MatchOperand_NoMatch;
2993
2994  Parser.Lex(); // Eat '{' token.
2995  SMLoc RegLoc = Parser.getTok().getLoc();
2996
2997  int Reg = tryParseRegister();
2998  if (Reg == -1) {
2999    Error(RegLoc, "register expected");
3000    return MatchOperand_ParseFail;
3001  }
3002  unsigned Count = 1;
3003  int Spacing = 0;
3004  unsigned FirstReg = Reg;
3005  // The list is of D registers, but we also allow Q regs and just interpret
3006  // them as the two D sub-registers.
3007  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3008    FirstReg = Reg = getDRegFromQReg(Reg);
3009    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3010                 // it's ambiguous with four-register single spaced.
3011    ++Reg;
3012    ++Count;
3013  }
3014  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
3015    return MatchOperand_ParseFail;
3016
3017  while (Parser.getTok().is(AsmToken::Comma) ||
3018         Parser.getTok().is(AsmToken::Minus)) {
3019    if (Parser.getTok().is(AsmToken::Minus)) {
3020      if (!Spacing)
3021        Spacing = 1; // Register range implies a single spaced list.
3022      else if (Spacing == 2) {
3023        Error(Parser.getTok().getLoc(),
3024              "sequential registers in double spaced list");
3025        return MatchOperand_ParseFail;
3026      }
3027      Parser.Lex(); // Eat the minus.
3028      SMLoc EndLoc = Parser.getTok().getLoc();
3029      int EndReg = tryParseRegister();
3030      if (EndReg == -1) {
3031        Error(EndLoc, "register expected");
3032        return MatchOperand_ParseFail;
3033      }
3034      // Allow Q regs and just interpret them as the two D sub-registers.
3035      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3036        EndReg = getDRegFromQReg(EndReg) + 1;
3037      // If the register is the same as the start reg, there's nothing
3038      // more to do.
3039      if (Reg == EndReg)
3040        continue;
3041      // The register must be in the same register class as the first.
3042      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3043        Error(EndLoc, "invalid register in register list");
3044        return MatchOperand_ParseFail;
3045      }
3046      // Ranges must go from low to high.
3047      if (Reg > EndReg) {
3048        Error(EndLoc, "bad range in register list");
3049        return MatchOperand_ParseFail;
3050      }
3051      // Parse the lane specifier if present.
3052      VectorLaneTy NextLaneKind;
3053      unsigned NextLaneIndex;
3054      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3055        return MatchOperand_ParseFail;
3056      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3057        Error(EndLoc, "mismatched lane index in register list");
3058        return MatchOperand_ParseFail;
3059      }
3060      EndLoc = Parser.getTok().getLoc();
3061
3062      // Add all the registers in the range to the register list.
3063      Count += EndReg - Reg;
3064      Reg = EndReg;
3065      continue;
3066    }
3067    Parser.Lex(); // Eat the comma.
3068    RegLoc = Parser.getTok().getLoc();
3069    int OldReg = Reg;
3070    Reg = tryParseRegister();
3071    if (Reg == -1) {
3072      Error(RegLoc, "register expected");
3073      return MatchOperand_ParseFail;
3074    }
3075    // vector register lists must be contiguous.
3076    // It's OK to use the enumeration values directly here rather, as the
3077    // VFP register classes have the enum sorted properly.
3078    //
3079    // The list is of D registers, but we also allow Q regs and just interpret
3080    // them as the two D sub-registers.
3081    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3082      if (!Spacing)
3083        Spacing = 1; // Register range implies a single spaced list.
3084      else if (Spacing == 2) {
3085        Error(RegLoc,
3086              "invalid register in double-spaced list (must be 'D' register')");
3087        return MatchOperand_ParseFail;
3088      }
3089      Reg = getDRegFromQReg(Reg);
3090      if (Reg != OldReg + 1) {
3091        Error(RegLoc, "non-contiguous register range");
3092        return MatchOperand_ParseFail;
3093      }
3094      ++Reg;
3095      Count += 2;
3096      // Parse the lane specifier if present.
3097      VectorLaneTy NextLaneKind;
3098      unsigned NextLaneIndex;
3099      SMLoc EndLoc = Parser.getTok().getLoc();
3100      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3101        return MatchOperand_ParseFail;
3102      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3103        Error(EndLoc, "mismatched lane index in register list");
3104        return MatchOperand_ParseFail;
3105      }
3106      continue;
3107    }
3108    // Normal D register.
3109    // Figure out the register spacing (single or double) of the list if
3110    // we don't know it already.
3111    if (!Spacing)
3112      Spacing = 1 + (Reg == OldReg + 2);
3113
3114    // Just check that it's contiguous and keep going.
3115    if (Reg != OldReg + Spacing) {
3116      Error(RegLoc, "non-contiguous register range");
3117      return MatchOperand_ParseFail;
3118    }
3119    ++Count;
3120    // Parse the lane specifier if present.
3121    VectorLaneTy NextLaneKind;
3122    unsigned NextLaneIndex;
3123    SMLoc EndLoc = Parser.getTok().getLoc();
3124    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3125      return MatchOperand_ParseFail;
3126    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3127      Error(EndLoc, "mismatched lane index in register list");
3128      return MatchOperand_ParseFail;
3129    }
3130  }
3131
3132  SMLoc E = Parser.getTok().getLoc();
3133  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3134    Error(E, "'}' expected");
3135    return MatchOperand_ParseFail;
3136  }
3137  Parser.Lex(); // Eat '}' token.
3138
3139  switch (LaneKind) {
3140  case NoLanes:
3141    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3142                                                    (Spacing == 2), S, E));
3143    break;
3144  case AllLanes:
3145    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3146                                                            (Spacing == 2),
3147                                                            S, E));
3148    break;
3149  case IndexedLane:
3150    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3151                                                           LaneIndex,
3152                                                           (Spacing == 2),
3153                                                           S, E));
3154    break;
3155  }
3156  return MatchOperand_Success;
3157}
3158
3159/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3160ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3161parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3162  SMLoc S = Parser.getTok().getLoc();
3163  const AsmToken &Tok = Parser.getTok();
3164  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3165  StringRef OptStr = Tok.getString();
3166
3167  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3168    .Case("sy",    ARM_MB::SY)
3169    .Case("st",    ARM_MB::ST)
3170    .Case("sh",    ARM_MB::ISH)
3171    .Case("ish",   ARM_MB::ISH)
3172    .Case("shst",  ARM_MB::ISHST)
3173    .Case("ishst", ARM_MB::ISHST)
3174    .Case("nsh",   ARM_MB::NSH)
3175    .Case("un",    ARM_MB::NSH)
3176    .Case("nshst", ARM_MB::NSHST)
3177    .Case("unst",  ARM_MB::NSHST)
3178    .Case("osh",   ARM_MB::OSH)
3179    .Case("oshst", ARM_MB::OSHST)
3180    .Default(~0U);
3181
3182  if (Opt == ~0U)
3183    return MatchOperand_NoMatch;
3184
3185  Parser.Lex(); // Eat identifier token.
3186  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3187  return MatchOperand_Success;
3188}
3189
3190/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3191ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3192parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3193  SMLoc S = Parser.getTok().getLoc();
3194  const AsmToken &Tok = Parser.getTok();
3195  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3196  StringRef IFlagsStr = Tok.getString();
3197
3198  // An iflags string of "none" is interpreted to mean that none of the AIF
3199  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3200  unsigned IFlags = 0;
3201  if (IFlagsStr != "none") {
3202        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3203      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3204        .Case("a", ARM_PROC::A)
3205        .Case("i", ARM_PROC::I)
3206        .Case("f", ARM_PROC::F)
3207        .Default(~0U);
3208
3209      // If some specific iflag is already set, it means that some letter is
3210      // present more than once, this is not acceptable.
3211      if (Flag == ~0U || (IFlags & Flag))
3212        return MatchOperand_NoMatch;
3213
3214      IFlags |= Flag;
3215    }
3216  }
3217
3218  Parser.Lex(); // Eat identifier token.
3219  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3220  return MatchOperand_Success;
3221}
3222
3223/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3224ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3225parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3226  SMLoc S = Parser.getTok().getLoc();
3227  const AsmToken &Tok = Parser.getTok();
3228  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3229  StringRef Mask = Tok.getString();
3230
3231  if (isMClass()) {
3232    // See ARMv6-M 10.1.1
3233    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3234      .Case("apsr", 0)
3235      .Case("iapsr", 1)
3236      .Case("eapsr", 2)
3237      .Case("xpsr", 3)
3238      .Case("ipsr", 5)
3239      .Case("epsr", 6)
3240      .Case("iepsr", 7)
3241      .Case("msp", 8)
3242      .Case("psp", 9)
3243      .Case("primask", 16)
3244      .Case("basepri", 17)
3245      .Case("basepri_max", 18)
3246      .Case("faultmask", 19)
3247      .Case("control", 20)
3248      .Default(~0U);
3249
3250    if (FlagsVal == ~0U)
3251      return MatchOperand_NoMatch;
3252
3253    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3254      // basepri, basepri_max and faultmask only valid for V7m.
3255      return MatchOperand_NoMatch;
3256
3257    Parser.Lex(); // Eat identifier token.
3258    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3259    return MatchOperand_Success;
3260  }
3261
3262  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3263  size_t Start = 0, Next = Mask.find('_');
3264  StringRef Flags = "";
3265  std::string SpecReg = Mask.slice(Start, Next).lower();
3266  if (Next != StringRef::npos)
3267    Flags = Mask.slice(Next+1, Mask.size());
3268
3269  // FlagsVal contains the complete mask:
3270  // 3-0: Mask
3271  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3272  unsigned FlagsVal = 0;
3273
3274  if (SpecReg == "apsr") {
3275    FlagsVal = StringSwitch<unsigned>(Flags)
3276    .Case("nzcvq",  0x8) // same as CPSR_f
3277    .Case("g",      0x4) // same as CPSR_s
3278    .Case("nzcvqg", 0xc) // same as CPSR_fs
3279    .Default(~0U);
3280
3281    if (FlagsVal == ~0U) {
3282      if (!Flags.empty())
3283        return MatchOperand_NoMatch;
3284      else
3285        FlagsVal = 8; // No flag
3286    }
3287  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3288    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3289      Flags = "fc";
3290    for (int i = 0, e = Flags.size(); i != e; ++i) {
3291      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3292      .Case("c", 1)
3293      .Case("x", 2)
3294      .Case("s", 4)
3295      .Case("f", 8)
3296      .Default(~0U);
3297
3298      // If some specific flag is already set, it means that some letter is
3299      // present more than once, this is not acceptable.
3300      if (FlagsVal == ~0U || (FlagsVal & Flag))
3301        return MatchOperand_NoMatch;
3302      FlagsVal |= Flag;
3303    }
3304  } else // No match for special register.
3305    return MatchOperand_NoMatch;
3306
3307  // Special register without flags is NOT equivalent to "fc" flags.
3308  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3309  // two lines would enable gas compatibility at the expense of breaking
3310  // round-tripping.
3311  //
3312  // if (!FlagsVal)
3313  //  FlagsVal = 0x9;
3314
3315  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3316  if (SpecReg == "spsr")
3317    FlagsVal |= 16;
3318
3319  Parser.Lex(); // Eat identifier token.
3320  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3321  return MatchOperand_Success;
3322}
3323
3324ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3325parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3326            int Low, int High) {
3327  const AsmToken &Tok = Parser.getTok();
3328  if (Tok.isNot(AsmToken::Identifier)) {
3329    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3330    return MatchOperand_ParseFail;
3331  }
3332  StringRef ShiftName = Tok.getString();
3333  std::string LowerOp = Op.lower();
3334  std::string UpperOp = Op.upper();
3335  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3336    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3337    return MatchOperand_ParseFail;
3338  }
3339  Parser.Lex(); // Eat shift type token.
3340
3341  // There must be a '#' and a shift amount.
3342  if (Parser.getTok().isNot(AsmToken::Hash) &&
3343      Parser.getTok().isNot(AsmToken::Dollar)) {
3344    Error(Parser.getTok().getLoc(), "'#' expected");
3345    return MatchOperand_ParseFail;
3346  }
3347  Parser.Lex(); // Eat hash token.
3348
3349  const MCExpr *ShiftAmount;
3350  SMLoc Loc = Parser.getTok().getLoc();
3351  if (getParser().ParseExpression(ShiftAmount)) {
3352    Error(Loc, "illegal expression");
3353    return MatchOperand_ParseFail;
3354  }
3355  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3356  if (!CE) {
3357    Error(Loc, "constant expression expected");
3358    return MatchOperand_ParseFail;
3359  }
3360  int Val = CE->getValue();
3361  if (Val < Low || Val > High) {
3362    Error(Loc, "immediate value out of range");
3363    return MatchOperand_ParseFail;
3364  }
3365
3366  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3367
3368  return MatchOperand_Success;
3369}
3370
3371ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3372parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3373  const AsmToken &Tok = Parser.getTok();
3374  SMLoc S = Tok.getLoc();
3375  if (Tok.isNot(AsmToken::Identifier)) {
3376    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3377    return MatchOperand_ParseFail;
3378  }
3379  int Val = StringSwitch<int>(Tok.getString())
3380    .Case("be", 1)
3381    .Case("le", 0)
3382    .Default(-1);
3383  Parser.Lex(); // Eat the token.
3384
3385  if (Val == -1) {
3386    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3387    return MatchOperand_ParseFail;
3388  }
3389  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3390                                                                  getContext()),
3391                                           S, Parser.getTok().getLoc()));
3392  return MatchOperand_Success;
3393}
3394
3395/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3396/// instructions. Legal values are:
3397///     lsl #n  'n' in [0,31]
3398///     asr #n  'n' in [1,32]
3399///             n == 32 encoded as n == 0.
3400ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3401parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3402  const AsmToken &Tok = Parser.getTok();
3403  SMLoc S = Tok.getLoc();
3404  if (Tok.isNot(AsmToken::Identifier)) {
3405    Error(S, "shift operator 'asr' or 'lsl' expected");
3406    return MatchOperand_ParseFail;
3407  }
3408  StringRef ShiftName = Tok.getString();
3409  bool isASR;
3410  if (ShiftName == "lsl" || ShiftName == "LSL")
3411    isASR = false;
3412  else if (ShiftName == "asr" || ShiftName == "ASR")
3413    isASR = true;
3414  else {
3415    Error(S, "shift operator 'asr' or 'lsl' expected");
3416    return MatchOperand_ParseFail;
3417  }
3418  Parser.Lex(); // Eat the operator.
3419
3420  // A '#' and a shift amount.
3421  if (Parser.getTok().isNot(AsmToken::Hash) &&
3422      Parser.getTok().isNot(AsmToken::Dollar)) {
3423    Error(Parser.getTok().getLoc(), "'#' expected");
3424    return MatchOperand_ParseFail;
3425  }
3426  Parser.Lex(); // Eat hash token.
3427
3428  const MCExpr *ShiftAmount;
3429  SMLoc E = Parser.getTok().getLoc();
3430  if (getParser().ParseExpression(ShiftAmount)) {
3431    Error(E, "malformed shift expression");
3432    return MatchOperand_ParseFail;
3433  }
3434  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3435  if (!CE) {
3436    Error(E, "shift amount must be an immediate");
3437    return MatchOperand_ParseFail;
3438  }
3439
3440  int64_t Val = CE->getValue();
3441  if (isASR) {
3442    // Shift amount must be in [1,32]
3443    if (Val < 1 || Val > 32) {
3444      Error(E, "'asr' shift amount must be in range [1,32]");
3445      return MatchOperand_ParseFail;
3446    }
3447    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3448    if (isThumb() && Val == 32) {
3449      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3450      return MatchOperand_ParseFail;
3451    }
3452    if (Val == 32) Val = 0;
3453  } else {
3454    // Shift amount must be in [1,32]
3455    if (Val < 0 || Val > 31) {
3456      Error(E, "'lsr' shift amount must be in range [0,31]");
3457      return MatchOperand_ParseFail;
3458    }
3459  }
3460
3461  E = Parser.getTok().getLoc();
3462  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3463
3464  return MatchOperand_Success;
3465}
3466
3467/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3468/// of instructions. Legal values are:
3469///     ror #n  'n' in {0, 8, 16, 24}
3470ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3471parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3472  const AsmToken &Tok = Parser.getTok();
3473  SMLoc S = Tok.getLoc();
3474  if (Tok.isNot(AsmToken::Identifier))
3475    return MatchOperand_NoMatch;
3476  StringRef ShiftName = Tok.getString();
3477  if (ShiftName != "ror" && ShiftName != "ROR")
3478    return MatchOperand_NoMatch;
3479  Parser.Lex(); // Eat the operator.
3480
3481  // A '#' and a rotate amount.
3482  if (Parser.getTok().isNot(AsmToken::Hash) &&
3483      Parser.getTok().isNot(AsmToken::Dollar)) {
3484    Error(Parser.getTok().getLoc(), "'#' expected");
3485    return MatchOperand_ParseFail;
3486  }
3487  Parser.Lex(); // Eat hash token.
3488
3489  const MCExpr *ShiftAmount;
3490  SMLoc E = Parser.getTok().getLoc();
3491  if (getParser().ParseExpression(ShiftAmount)) {
3492    Error(E, "malformed rotate expression");
3493    return MatchOperand_ParseFail;
3494  }
3495  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3496  if (!CE) {
3497    Error(E, "rotate amount must be an immediate");
3498    return MatchOperand_ParseFail;
3499  }
3500
3501  int64_t Val = CE->getValue();
3502  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3503  // normally, zero is represented in asm by omitting the rotate operand
3504  // entirely.
3505  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3506    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3507    return MatchOperand_ParseFail;
3508  }
3509
3510  E = Parser.getTok().getLoc();
3511  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3512
3513  return MatchOperand_Success;
3514}
3515
3516ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3517parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3518  SMLoc S = Parser.getTok().getLoc();
3519  // The bitfield descriptor is really two operands, the LSB and the width.
3520  if (Parser.getTok().isNot(AsmToken::Hash) &&
3521      Parser.getTok().isNot(AsmToken::Dollar)) {
3522    Error(Parser.getTok().getLoc(), "'#' expected");
3523    return MatchOperand_ParseFail;
3524  }
3525  Parser.Lex(); // Eat hash token.
3526
3527  const MCExpr *LSBExpr;
3528  SMLoc E = Parser.getTok().getLoc();
3529  if (getParser().ParseExpression(LSBExpr)) {
3530    Error(E, "malformed immediate expression");
3531    return MatchOperand_ParseFail;
3532  }
3533  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3534  if (!CE) {
3535    Error(E, "'lsb' operand must be an immediate");
3536    return MatchOperand_ParseFail;
3537  }
3538
3539  int64_t LSB = CE->getValue();
3540  // The LSB must be in the range [0,31]
3541  if (LSB < 0 || LSB > 31) {
3542    Error(E, "'lsb' operand must be in the range [0,31]");
3543    return MatchOperand_ParseFail;
3544  }
3545  E = Parser.getTok().getLoc();
3546
3547  // Expect another immediate operand.
3548  if (Parser.getTok().isNot(AsmToken::Comma)) {
3549    Error(Parser.getTok().getLoc(), "too few operands");
3550    return MatchOperand_ParseFail;
3551  }
3552  Parser.Lex(); // Eat hash token.
3553  if (Parser.getTok().isNot(AsmToken::Hash) &&
3554      Parser.getTok().isNot(AsmToken::Dollar)) {
3555    Error(Parser.getTok().getLoc(), "'#' expected");
3556    return MatchOperand_ParseFail;
3557  }
3558  Parser.Lex(); // Eat hash token.
3559
3560  const MCExpr *WidthExpr;
3561  if (getParser().ParseExpression(WidthExpr)) {
3562    Error(E, "malformed immediate expression");
3563    return MatchOperand_ParseFail;
3564  }
3565  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3566  if (!CE) {
3567    Error(E, "'width' operand must be an immediate");
3568    return MatchOperand_ParseFail;
3569  }
3570
3571  int64_t Width = CE->getValue();
3572  // The LSB must be in the range [1,32-lsb]
3573  if (Width < 1 || Width > 32 - LSB) {
3574    Error(E, "'width' operand must be in the range [1,32-lsb]");
3575    return MatchOperand_ParseFail;
3576  }
3577  E = Parser.getTok().getLoc();
3578
3579  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3580
3581  return MatchOperand_Success;
3582}
3583
3584ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3585parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3586  // Check for a post-index addressing register operand. Specifically:
3587  // postidx_reg := '+' register {, shift}
3588  //              | '-' register {, shift}
3589  //              | register {, shift}
3590
3591  // This method must return MatchOperand_NoMatch without consuming any tokens
3592  // in the case where there is no match, as other alternatives take other
3593  // parse methods.
3594  AsmToken Tok = Parser.getTok();
3595  SMLoc S = Tok.getLoc();
3596  bool haveEaten = false;
3597  bool isAdd = true;
3598  int Reg = -1;
3599  if (Tok.is(AsmToken::Plus)) {
3600    Parser.Lex(); // Eat the '+' token.
3601    haveEaten = true;
3602  } else if (Tok.is(AsmToken::Minus)) {
3603    Parser.Lex(); // Eat the '-' token.
3604    isAdd = false;
3605    haveEaten = true;
3606  }
3607  if (Parser.getTok().is(AsmToken::Identifier))
3608    Reg = tryParseRegister();
3609  if (Reg == -1) {
3610    if (!haveEaten)
3611      return MatchOperand_NoMatch;
3612    Error(Parser.getTok().getLoc(), "register expected");
3613    return MatchOperand_ParseFail;
3614  }
3615  SMLoc E = Parser.getTok().getLoc();
3616
3617  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3618  unsigned ShiftImm = 0;
3619  if (Parser.getTok().is(AsmToken::Comma)) {
3620    Parser.Lex(); // Eat the ','.
3621    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3622      return MatchOperand_ParseFail;
3623  }
3624
3625  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3626                                                  ShiftImm, S, E));
3627
3628  return MatchOperand_Success;
3629}
3630
3631ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3632parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3633  // Check for a post-index addressing register operand. Specifically:
3634  // am3offset := '+' register
3635  //              | '-' register
3636  //              | register
3637  //              | # imm
3638  //              | # + imm
3639  //              | # - imm
3640
3641  // This method must return MatchOperand_NoMatch without consuming any tokens
3642  // in the case where there is no match, as other alternatives take other
3643  // parse methods.
3644  AsmToken Tok = Parser.getTok();
3645  SMLoc S = Tok.getLoc();
3646
3647  // Do immediates first, as we always parse those if we have a '#'.
3648  if (Parser.getTok().is(AsmToken::Hash) ||
3649      Parser.getTok().is(AsmToken::Dollar)) {
3650    Parser.Lex(); // Eat the '#'.
3651    // Explicitly look for a '-', as we need to encode negative zero
3652    // differently.
3653    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3654    const MCExpr *Offset;
3655    if (getParser().ParseExpression(Offset))
3656      return MatchOperand_ParseFail;
3657    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3658    if (!CE) {
3659      Error(S, "constant expression expected");
3660      return MatchOperand_ParseFail;
3661    }
3662    SMLoc E = Tok.getLoc();
3663    // Negative zero is encoded as the flag value INT32_MIN.
3664    int32_t Val = CE->getValue();
3665    if (isNegative && Val == 0)
3666      Val = INT32_MIN;
3667
3668    Operands.push_back(
3669      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3670
3671    return MatchOperand_Success;
3672  }
3673
3674
3675  bool haveEaten = false;
3676  bool isAdd = true;
3677  int Reg = -1;
3678  if (Tok.is(AsmToken::Plus)) {
3679    Parser.Lex(); // Eat the '+' token.
3680    haveEaten = true;
3681  } else if (Tok.is(AsmToken::Minus)) {
3682    Parser.Lex(); // Eat the '-' token.
3683    isAdd = false;
3684    haveEaten = true;
3685  }
3686  if (Parser.getTok().is(AsmToken::Identifier))
3687    Reg = tryParseRegister();
3688  if (Reg == -1) {
3689    if (!haveEaten)
3690      return MatchOperand_NoMatch;
3691    Error(Parser.getTok().getLoc(), "register expected");
3692    return MatchOperand_ParseFail;
3693  }
3694  SMLoc E = Parser.getTok().getLoc();
3695
3696  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3697                                                  0, S, E));
3698
3699  return MatchOperand_Success;
3700}
3701
3702/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3703/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3704/// when they refer multiple MIOperands inside a single one.
3705bool ARMAsmParser::
3706cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3707             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3708  // Rt, Rt2
3709  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3710  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3711  // Create a writeback register dummy placeholder.
3712  Inst.addOperand(MCOperand::CreateReg(0));
3713  // addr
3714  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3715  // pred
3716  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3717  return true;
3718}
3719
3720/// cvtT2StrdPre - Convert parsed operands to MCInst.
3721/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3722/// when they refer multiple MIOperands inside a single one.
3723bool ARMAsmParser::
3724cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3725             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3726  // Create a writeback register dummy placeholder.
3727  Inst.addOperand(MCOperand::CreateReg(0));
3728  // Rt, Rt2
3729  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3730  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3731  // addr
3732  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3733  // pred
3734  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3735  return true;
3736}
3737
3738/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3739/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3740/// when they refer multiple MIOperands inside a single one.
3741bool ARMAsmParser::
3742cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3743                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3744  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3745
3746  // Create a writeback register dummy placeholder.
3747  Inst.addOperand(MCOperand::CreateImm(0));
3748
3749  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3750  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3751  return true;
3752}
3753
3754/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3755/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3756/// when they refer multiple MIOperands inside a single one.
3757bool ARMAsmParser::
3758cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3759                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3760  // Create a writeback register dummy placeholder.
3761  Inst.addOperand(MCOperand::CreateImm(0));
3762  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3763  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3764  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3765  return true;
3766}
3767
3768/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3769/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3770/// when they refer multiple MIOperands inside a single one.
3771bool ARMAsmParser::
3772cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3773                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3774  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3775
3776  // Create a writeback register dummy placeholder.
3777  Inst.addOperand(MCOperand::CreateImm(0));
3778
3779  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3780  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3781  return true;
3782}
3783
3784/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3785/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3786/// when they refer multiple MIOperands inside a single one.
3787bool ARMAsmParser::
3788cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3789                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3790  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3791
3792  // Create a writeback register dummy placeholder.
3793  Inst.addOperand(MCOperand::CreateImm(0));
3794
3795  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3796  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3797  return true;
3798}
3799
3800
3801/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3802/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3803/// when they refer multiple MIOperands inside a single one.
3804bool ARMAsmParser::
3805cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3806                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3807  // Create a writeback register dummy placeholder.
3808  Inst.addOperand(MCOperand::CreateImm(0));
3809  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3810  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3811  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3812  return true;
3813}
3814
3815/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3816/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3817/// when they refer multiple MIOperands inside a single one.
3818bool ARMAsmParser::
3819cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3820                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3821  // Create a writeback register dummy placeholder.
3822  Inst.addOperand(MCOperand::CreateImm(0));
3823  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3824  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3825  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3826  return true;
3827}
3828
3829/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3830/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3831/// when they refer multiple MIOperands inside a single one.
3832bool ARMAsmParser::
3833cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3834                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3835  // Create a writeback register dummy placeholder.
3836  Inst.addOperand(MCOperand::CreateImm(0));
3837  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3838  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3839  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3840  return true;
3841}
3842
3843/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3844/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3845/// when they refer multiple MIOperands inside a single one.
3846bool ARMAsmParser::
3847cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3848                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3849  // Rt
3850  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3851  // Create a writeback register dummy placeholder.
3852  Inst.addOperand(MCOperand::CreateImm(0));
3853  // addr
3854  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3855  // offset
3856  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3857  // pred
3858  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3859  return true;
3860}
3861
3862/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3863/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3864/// when they refer multiple MIOperands inside a single one.
3865bool ARMAsmParser::
3866cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3867                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3868  // Rt
3869  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3870  // Create a writeback register dummy placeholder.
3871  Inst.addOperand(MCOperand::CreateImm(0));
3872  // addr
3873  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3874  // offset
3875  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3876  // pred
3877  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3878  return true;
3879}
3880
3881/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3882/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3883/// when they refer multiple MIOperands inside a single one.
3884bool ARMAsmParser::
3885cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3886                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3887  // Create a writeback register dummy placeholder.
3888  Inst.addOperand(MCOperand::CreateImm(0));
3889  // Rt
3890  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3891  // addr
3892  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3893  // offset
3894  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3895  // pred
3896  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3897  return true;
3898}
3899
3900/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3901/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3902/// when they refer multiple MIOperands inside a single one.
3903bool ARMAsmParser::
3904cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3905                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3906  // Create a writeback register dummy placeholder.
3907  Inst.addOperand(MCOperand::CreateImm(0));
3908  // Rt
3909  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3910  // addr
3911  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3912  // offset
3913  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3914  // pred
3915  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3916  return true;
3917}
3918
3919/// cvtLdrdPre - Convert parsed operands to MCInst.
3920/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3921/// when they refer multiple MIOperands inside a single one.
3922bool ARMAsmParser::
3923cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3924           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3925  // Rt, Rt2
3926  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3927  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3928  // Create a writeback register dummy placeholder.
3929  Inst.addOperand(MCOperand::CreateImm(0));
3930  // addr
3931  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3932  // pred
3933  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3934  return true;
3935}
3936
3937/// cvtStrdPre - Convert parsed operands to MCInst.
3938/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3939/// when they refer multiple MIOperands inside a single one.
3940bool ARMAsmParser::
3941cvtStrdPre(MCInst &Inst, unsigned Opcode,
3942           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3943  // Create a writeback register dummy placeholder.
3944  Inst.addOperand(MCOperand::CreateImm(0));
3945  // Rt, Rt2
3946  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3947  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3948  // addr
3949  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3950  // pred
3951  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3952  return true;
3953}
3954
3955/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3956/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3957/// when they refer multiple MIOperands inside a single one.
3958bool ARMAsmParser::
3959cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3960                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3961  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3962  // Create a writeback register dummy placeholder.
3963  Inst.addOperand(MCOperand::CreateImm(0));
3964  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3965  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3966  return true;
3967}
3968
3969/// cvtThumbMultiple- Convert parsed operands to MCInst.
3970/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3971/// when they refer multiple MIOperands inside a single one.
3972bool ARMAsmParser::
3973cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3974           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3975  // The second source operand must be the same register as the destination
3976  // operand.
3977  if (Operands.size() == 6 &&
3978      (((ARMOperand*)Operands[3])->getReg() !=
3979       ((ARMOperand*)Operands[5])->getReg()) &&
3980      (((ARMOperand*)Operands[3])->getReg() !=
3981       ((ARMOperand*)Operands[4])->getReg())) {
3982    Error(Operands[3]->getStartLoc(),
3983          "destination register must match source register");
3984    return false;
3985  }
3986  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3987  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3988  // If we have a three-operand form, make sure to set Rn to be the operand
3989  // that isn't the same as Rd.
3990  unsigned RegOp = 4;
3991  if (Operands.size() == 6 &&
3992      ((ARMOperand*)Operands[4])->getReg() ==
3993        ((ARMOperand*)Operands[3])->getReg())
3994    RegOp = 5;
3995  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3996  Inst.addOperand(Inst.getOperand(0));
3997  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3998
3999  return true;
4000}
4001
4002bool ARMAsmParser::
4003cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
4004              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4005  // Vd
4006  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4007  // Create a writeback register dummy placeholder.
4008  Inst.addOperand(MCOperand::CreateImm(0));
4009  // Vn
4010  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4011  // pred
4012  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4013  return true;
4014}
4015
4016bool ARMAsmParser::
4017cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
4018                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4019  // Vd
4020  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4021  // Create a writeback register dummy placeholder.
4022  Inst.addOperand(MCOperand::CreateImm(0));
4023  // Vn
4024  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4025  // Vm
4026  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4027  // pred
4028  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4029  return true;
4030}
4031
4032bool ARMAsmParser::
4033cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
4034              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4035  // Create a writeback register dummy placeholder.
4036  Inst.addOperand(MCOperand::CreateImm(0));
4037  // Vn
4038  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4039  // Vt
4040  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4041  // pred
4042  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4043  return true;
4044}
4045
4046bool ARMAsmParser::
4047cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
4048                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4049  // Create a writeback register dummy placeholder.
4050  Inst.addOperand(MCOperand::CreateImm(0));
4051  // Vn
4052  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4053  // Vm
4054  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4055  // Vt
4056  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4057  // pred
4058  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4059  return true;
4060}
4061
4062/// Parse an ARM memory expression, return false if successful else return true
4063/// or an error.  The first token must be a '[' when called.
4064bool ARMAsmParser::
4065parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4066  SMLoc S, E;
4067  assert(Parser.getTok().is(AsmToken::LBrac) &&
4068         "Token is not a Left Bracket");
4069  S = Parser.getTok().getLoc();
4070  Parser.Lex(); // Eat left bracket token.
4071
4072  const AsmToken &BaseRegTok = Parser.getTok();
4073  int BaseRegNum = tryParseRegister();
4074  if (BaseRegNum == -1)
4075    return Error(BaseRegTok.getLoc(), "register expected");
4076
4077  // The next token must either be a comma or a closing bracket.
4078  const AsmToken &Tok = Parser.getTok();
4079  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4080    return Error(Tok.getLoc(), "malformed memory operand");
4081
4082  if (Tok.is(AsmToken::RBrac)) {
4083    E = Tok.getLoc();
4084    Parser.Lex(); // Eat right bracket token.
4085
4086    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4087                                             0, 0, false, S, E));
4088
4089    // If there's a pre-indexing writeback marker, '!', just add it as a token
4090    // operand. It's rather odd, but syntactically valid.
4091    if (Parser.getTok().is(AsmToken::Exclaim)) {
4092      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4093      Parser.Lex(); // Eat the '!'.
4094    }
4095
4096    return false;
4097  }
4098
4099  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4100  Parser.Lex(); // Eat the comma.
4101
4102  // If we have a ':', it's an alignment specifier.
4103  if (Parser.getTok().is(AsmToken::Colon)) {
4104    Parser.Lex(); // Eat the ':'.
4105    E = Parser.getTok().getLoc();
4106
4107    const MCExpr *Expr;
4108    if (getParser().ParseExpression(Expr))
4109     return true;
4110
4111    // The expression has to be a constant. Memory references with relocations
4112    // don't come through here, as they use the <label> forms of the relevant
4113    // instructions.
4114    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4115    if (!CE)
4116      return Error (E, "constant expression expected");
4117
4118    unsigned Align = 0;
4119    switch (CE->getValue()) {
4120    default:
4121      return Error(E,
4122                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4123    case 16:  Align = 2; break;
4124    case 32:  Align = 4; break;
4125    case 64:  Align = 8; break;
4126    case 128: Align = 16; break;
4127    case 256: Align = 32; break;
4128    }
4129
4130    // Now we should have the closing ']'
4131    E = Parser.getTok().getLoc();
4132    if (Parser.getTok().isNot(AsmToken::RBrac))
4133      return Error(E, "']' expected");
4134    Parser.Lex(); // Eat right bracket token.
4135
4136    // Don't worry about range checking the value here. That's handled by
4137    // the is*() predicates.
4138    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4139                                             ARM_AM::no_shift, 0, Align,
4140                                             false, S, E));
4141
4142    // If there's a pre-indexing writeback marker, '!', just add it as a token
4143    // operand.
4144    if (Parser.getTok().is(AsmToken::Exclaim)) {
4145      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4146      Parser.Lex(); // Eat the '!'.
4147    }
4148
4149    return false;
4150  }
4151
4152  // If we have a '#', it's an immediate offset, else assume it's a register
4153  // offset. Be friendly and also accept a plain integer (without a leading
4154  // hash) for gas compatibility.
4155  if (Parser.getTok().is(AsmToken::Hash) ||
4156      Parser.getTok().is(AsmToken::Dollar) ||
4157      Parser.getTok().is(AsmToken::Integer)) {
4158    if (Parser.getTok().isNot(AsmToken::Integer))
4159      Parser.Lex(); // Eat the '#'.
4160    E = Parser.getTok().getLoc();
4161
4162    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4163    const MCExpr *Offset;
4164    if (getParser().ParseExpression(Offset))
4165     return true;
4166
4167    // The expression has to be a constant. Memory references with relocations
4168    // don't come through here, as they use the <label> forms of the relevant
4169    // instructions.
4170    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4171    if (!CE)
4172      return Error (E, "constant expression expected");
4173
4174    // If the constant was #-0, represent it as INT32_MIN.
4175    int32_t Val = CE->getValue();
4176    if (isNegative && Val == 0)
4177      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4178
4179    // Now we should have the closing ']'
4180    E = Parser.getTok().getLoc();
4181    if (Parser.getTok().isNot(AsmToken::RBrac))
4182      return Error(E, "']' expected");
4183    Parser.Lex(); // Eat right bracket token.
4184
4185    // Don't worry about range checking the value here. That's handled by
4186    // the is*() predicates.
4187    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4188                                             ARM_AM::no_shift, 0, 0,
4189                                             false, S, E));
4190
4191    // If there's a pre-indexing writeback marker, '!', just add it as a token
4192    // operand.
4193    if (Parser.getTok().is(AsmToken::Exclaim)) {
4194      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4195      Parser.Lex(); // Eat the '!'.
4196    }
4197
4198    return false;
4199  }
4200
4201  // The register offset is optionally preceded by a '+' or '-'
4202  bool isNegative = false;
4203  if (Parser.getTok().is(AsmToken::Minus)) {
4204    isNegative = true;
4205    Parser.Lex(); // Eat the '-'.
4206  } else if (Parser.getTok().is(AsmToken::Plus)) {
4207    // Nothing to do.
4208    Parser.Lex(); // Eat the '+'.
4209  }
4210
4211  E = Parser.getTok().getLoc();
4212  int OffsetRegNum = tryParseRegister();
4213  if (OffsetRegNum == -1)
4214    return Error(E, "register expected");
4215
4216  // If there's a shift operator, handle it.
4217  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4218  unsigned ShiftImm = 0;
4219  if (Parser.getTok().is(AsmToken::Comma)) {
4220    Parser.Lex(); // Eat the ','.
4221    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4222      return true;
4223  }
4224
4225  // Now we should have the closing ']'
4226  E = Parser.getTok().getLoc();
4227  if (Parser.getTok().isNot(AsmToken::RBrac))
4228    return Error(E, "']' expected");
4229  Parser.Lex(); // Eat right bracket token.
4230
4231  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4232                                           ShiftType, ShiftImm, 0, isNegative,
4233                                           S, E));
4234
4235  // If there's a pre-indexing writeback marker, '!', just add it as a token
4236  // operand.
4237  if (Parser.getTok().is(AsmToken::Exclaim)) {
4238    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4239    Parser.Lex(); // Eat the '!'.
4240  }
4241
4242  return false;
4243}
4244
4245/// parseMemRegOffsetShift - one of these two:
4246///   ( lsl | lsr | asr | ror ) , # shift_amount
4247///   rrx
4248/// return true if it parses a shift otherwise it returns false.
4249bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4250                                          unsigned &Amount) {
4251  SMLoc Loc = Parser.getTok().getLoc();
4252  const AsmToken &Tok = Parser.getTok();
4253  if (Tok.isNot(AsmToken::Identifier))
4254    return true;
4255  StringRef ShiftName = Tok.getString();
4256  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4257      ShiftName == "asl" || ShiftName == "ASL")
4258    St = ARM_AM::lsl;
4259  else if (ShiftName == "lsr" || ShiftName == "LSR")
4260    St = ARM_AM::lsr;
4261  else if (ShiftName == "asr" || ShiftName == "ASR")
4262    St = ARM_AM::asr;
4263  else if (ShiftName == "ror" || ShiftName == "ROR")
4264    St = ARM_AM::ror;
4265  else if (ShiftName == "rrx" || ShiftName == "RRX")
4266    St = ARM_AM::rrx;
4267  else
4268    return Error(Loc, "illegal shift operator");
4269  Parser.Lex(); // Eat shift type token.
4270
4271  // rrx stands alone.
4272  Amount = 0;
4273  if (St != ARM_AM::rrx) {
4274    Loc = Parser.getTok().getLoc();
4275    // A '#' and a shift amount.
4276    const AsmToken &HashTok = Parser.getTok();
4277    if (HashTok.isNot(AsmToken::Hash) &&
4278        HashTok.isNot(AsmToken::Dollar))
4279      return Error(HashTok.getLoc(), "'#' expected");
4280    Parser.Lex(); // Eat hash token.
4281
4282    const MCExpr *Expr;
4283    if (getParser().ParseExpression(Expr))
4284      return true;
4285    // Range check the immediate.
4286    // lsl, ror: 0 <= imm <= 31
4287    // lsr, asr: 0 <= imm <= 32
4288    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4289    if (!CE)
4290      return Error(Loc, "shift amount must be an immediate");
4291    int64_t Imm = CE->getValue();
4292    if (Imm < 0 ||
4293        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4294        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4295      return Error(Loc, "immediate shift value out of range");
4296    Amount = Imm;
4297  }
4298
4299  return false;
4300}
4301
4302/// parseFPImm - A floating point immediate expression operand.
4303ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4304parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4305  // Anything that can accept a floating point constant as an operand
4306  // needs to go through here, as the regular ParseExpression is
4307  // integer only.
4308  //
4309  // This routine still creates a generic Immediate operand, containing
4310  // a bitcast of the 64-bit floating point value. The various operands
4311  // that accept floats can check whether the value is valid for them
4312  // via the standard is*() predicates.
4313
4314  SMLoc S = Parser.getTok().getLoc();
4315
4316  if (Parser.getTok().isNot(AsmToken::Hash) &&
4317      Parser.getTok().isNot(AsmToken::Dollar))
4318    return MatchOperand_NoMatch;
4319
4320  // Disambiguate the VMOV forms that can accept an FP immediate.
4321  // vmov.f32 <sreg>, #imm
4322  // vmov.f64 <dreg>, #imm
4323  // vmov.f32 <dreg>, #imm  @ vector f32x2
4324  // vmov.f32 <qreg>, #imm  @ vector f32x4
4325  //
4326  // There are also the NEON VMOV instructions which expect an
4327  // integer constant. Make sure we don't try to parse an FPImm
4328  // for these:
4329  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4330  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4331  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4332                           TyOp->getToken() != ".f64"))
4333    return MatchOperand_NoMatch;
4334
4335  Parser.Lex(); // Eat the '#'.
4336
4337  // Handle negation, as that still comes through as a separate token.
4338  bool isNegative = false;
4339  if (Parser.getTok().is(AsmToken::Minus)) {
4340    isNegative = true;
4341    Parser.Lex();
4342  }
4343  const AsmToken &Tok = Parser.getTok();
4344  SMLoc Loc = Tok.getLoc();
4345  if (Tok.is(AsmToken::Real)) {
4346    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4347    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4348    // If we had a '-' in front, toggle the sign bit.
4349    IntVal ^= (uint64_t)isNegative << 31;
4350    Parser.Lex(); // Eat the token.
4351    Operands.push_back(ARMOperand::CreateImm(
4352          MCConstantExpr::Create(IntVal, getContext()),
4353          S, Parser.getTok().getLoc()));
4354    return MatchOperand_Success;
4355  }
4356  // Also handle plain integers. Instructions which allow floating point
4357  // immediates also allow a raw encoded 8-bit value.
4358  if (Tok.is(AsmToken::Integer)) {
4359    int64_t Val = Tok.getIntVal();
4360    Parser.Lex(); // Eat the token.
4361    if (Val > 255 || Val < 0) {
4362      Error(Loc, "encoded floating point value out of range");
4363      return MatchOperand_ParseFail;
4364    }
4365    double RealVal = ARM_AM::getFPImmFloat(Val);
4366    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4367    Operands.push_back(ARMOperand::CreateImm(
4368        MCConstantExpr::Create(Val, getContext()), S,
4369        Parser.getTok().getLoc()));
4370    return MatchOperand_Success;
4371  }
4372
4373  Error(Loc, "invalid floating point immediate");
4374  return MatchOperand_ParseFail;
4375}
4376
4377/// Parse a arm instruction operand.  For now this parses the operand regardless
4378/// of the mnemonic.
4379bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4380                                StringRef Mnemonic) {
4381  SMLoc S, E;
4382
4383  // Check if the current operand has a custom associated parser, if so, try to
4384  // custom parse the operand, or fallback to the general approach.
4385  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4386  if (ResTy == MatchOperand_Success)
4387    return false;
4388  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4389  // there was a match, but an error occurred, in which case, just return that
4390  // the operand parsing failed.
4391  if (ResTy == MatchOperand_ParseFail)
4392    return true;
4393
4394  switch (getLexer().getKind()) {
4395  default:
4396    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4397    return true;
4398  case AsmToken::Identifier: {
4399    if (!tryParseRegisterWithWriteBack(Operands))
4400      return false;
4401    int Res = tryParseShiftRegister(Operands);
4402    if (Res == 0) // success
4403      return false;
4404    else if (Res == -1) // irrecoverable error
4405      return true;
4406    // If this is VMRS, check for the apsr_nzcv operand.
4407    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4408      S = Parser.getTok().getLoc();
4409      Parser.Lex();
4410      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4411      return false;
4412    }
4413
4414    // Fall though for the Identifier case that is not a register or a
4415    // special name.
4416  }
4417  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4418  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4419  case AsmToken::String:  // quoted label names.
4420  case AsmToken::Dot: {   // . as a branch target
4421    // This was not a register so parse other operands that start with an
4422    // identifier (like labels) as expressions and create them as immediates.
4423    const MCExpr *IdVal;
4424    S = Parser.getTok().getLoc();
4425    if (getParser().ParseExpression(IdVal))
4426      return true;
4427    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4428    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4429    return false;
4430  }
4431  case AsmToken::LBrac:
4432    return parseMemory(Operands);
4433  case AsmToken::LCurly:
4434    return parseRegisterList(Operands);
4435  case AsmToken::Dollar:
4436  case AsmToken::Hash: {
4437    // #42 -> immediate.
4438    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4439    S = Parser.getTok().getLoc();
4440    Parser.Lex();
4441    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4442    const MCExpr *ImmVal;
4443    if (getParser().ParseExpression(ImmVal))
4444      return true;
4445    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4446    if (CE) {
4447      int32_t Val = CE->getValue();
4448      if (isNegative && Val == 0)
4449        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4450    }
4451    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4452    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4453    return false;
4454  }
4455  case AsmToken::Colon: {
4456    // ":lower16:" and ":upper16:" expression prefixes
4457    // FIXME: Check it's an expression prefix,
4458    // e.g. (FOO - :lower16:BAR) isn't legal.
4459    ARMMCExpr::VariantKind RefKind;
4460    if (parsePrefix(RefKind))
4461      return true;
4462
4463    const MCExpr *SubExprVal;
4464    if (getParser().ParseExpression(SubExprVal))
4465      return true;
4466
4467    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4468                                                   getContext());
4469    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4470    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4471    return false;
4472  }
4473  }
4474}
4475
4476// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4477//  :lower16: and :upper16:.
4478bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4479  RefKind = ARMMCExpr::VK_ARM_None;
4480
4481  // :lower16: and :upper16: modifiers
4482  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4483  Parser.Lex(); // Eat ':'
4484
4485  if (getLexer().isNot(AsmToken::Identifier)) {
4486    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4487    return true;
4488  }
4489
4490  StringRef IDVal = Parser.getTok().getIdentifier();
4491  if (IDVal == "lower16") {
4492    RefKind = ARMMCExpr::VK_ARM_LO16;
4493  } else if (IDVal == "upper16") {
4494    RefKind = ARMMCExpr::VK_ARM_HI16;
4495  } else {
4496    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4497    return true;
4498  }
4499  Parser.Lex();
4500
4501  if (getLexer().isNot(AsmToken::Colon)) {
4502    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4503    return true;
4504  }
4505  Parser.Lex(); // Eat the last ':'
4506  return false;
4507}
4508
4509/// \brief Given a mnemonic, split out possible predication code and carry
4510/// setting letters to form a canonical mnemonic and flags.
4511//
4512// FIXME: Would be nice to autogen this.
4513// FIXME: This is a bit of a maze of special cases.
4514StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4515                                      unsigned &PredicationCode,
4516                                      bool &CarrySetting,
4517                                      unsigned &ProcessorIMod,
4518                                      StringRef &ITMask) {
4519  PredicationCode = ARMCC::AL;
4520  CarrySetting = false;
4521  ProcessorIMod = 0;
4522
4523  // Ignore some mnemonics we know aren't predicated forms.
4524  //
4525  // FIXME: Would be nice to autogen this.
4526  if ((Mnemonic == "movs" && isThumb()) ||
4527      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4528      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4529      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4530      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4531      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4532      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4533      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4534      Mnemonic == "fmuls")
4535    return Mnemonic;
4536
4537  // First, split out any predication code. Ignore mnemonics we know aren't
4538  // predicated but do have a carry-set and so weren't caught above.
4539  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4540      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4541      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4542      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4543    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4544      .Case("eq", ARMCC::EQ)
4545      .Case("ne", ARMCC::NE)
4546      .Case("hs", ARMCC::HS)
4547      .Case("cs", ARMCC::HS)
4548      .Case("lo", ARMCC::LO)
4549      .Case("cc", ARMCC::LO)
4550      .Case("mi", ARMCC::MI)
4551      .Case("pl", ARMCC::PL)
4552      .Case("vs", ARMCC::VS)
4553      .Case("vc", ARMCC::VC)
4554      .Case("hi", ARMCC::HI)
4555      .Case("ls", ARMCC::LS)
4556      .Case("ge", ARMCC::GE)
4557      .Case("lt", ARMCC::LT)
4558      .Case("gt", ARMCC::GT)
4559      .Case("le", ARMCC::LE)
4560      .Case("al", ARMCC::AL)
4561      .Default(~0U);
4562    if (CC != ~0U) {
4563      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4564      PredicationCode = CC;
4565    }
4566  }
4567
4568  // Next, determine if we have a carry setting bit. We explicitly ignore all
4569  // the instructions we know end in 's'.
4570  if (Mnemonic.endswith("s") &&
4571      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4572        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4573        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4574        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4575        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4576        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4577        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4578        Mnemonic == "fmuls" || Mnemonic == "fcmps" ||
4579        (Mnemonic == "movs" && isThumb()))) {
4580    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4581    CarrySetting = true;
4582  }
4583
4584  // The "cps" instruction can have a interrupt mode operand which is glued into
4585  // the mnemonic. Check if this is the case, split it and parse the imod op
4586  if (Mnemonic.startswith("cps")) {
4587    // Split out any imod code.
4588    unsigned IMod =
4589      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4590      .Case("ie", ARM_PROC::IE)
4591      .Case("id", ARM_PROC::ID)
4592      .Default(~0U);
4593    if (IMod != ~0U) {
4594      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4595      ProcessorIMod = IMod;
4596    }
4597  }
4598
4599  // The "it" instruction has the condition mask on the end of the mnemonic.
4600  if (Mnemonic.startswith("it")) {
4601    ITMask = Mnemonic.slice(2, Mnemonic.size());
4602    Mnemonic = Mnemonic.slice(0, 2);
4603  }
4604
4605  return Mnemonic;
4606}
4607
4608/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4609/// inclusion of carry set or predication code operands.
4610//
4611// FIXME: It would be nice to autogen this.
4612void ARMAsmParser::
4613getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4614                      bool &CanAcceptPredicationCode) {
4615  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4616      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4617      Mnemonic == "add" || Mnemonic == "adc" ||
4618      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4619      Mnemonic == "orr" || Mnemonic == "mvn" ||
4620      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4621      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4622      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4623                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4624                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4625    CanAcceptCarrySet = true;
4626  } else
4627    CanAcceptCarrySet = false;
4628
4629  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4630      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4631      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4632      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4633      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4634      (Mnemonic == "clrex" && !isThumb()) ||
4635      (Mnemonic == "nop" && isThumbOne()) ||
4636      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4637        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4638        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4639      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4640       !isThumb()) ||
4641      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4642    CanAcceptPredicationCode = false;
4643  } else
4644    CanAcceptPredicationCode = true;
4645
4646  if (isThumb()) {
4647    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4648        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4649      CanAcceptPredicationCode = false;
4650  }
4651}
4652
4653bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4654                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4655  // FIXME: This is all horribly hacky. We really need a better way to deal
4656  // with optional operands like this in the matcher table.
4657
4658  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4659  // another does not. Specifically, the MOVW instruction does not. So we
4660  // special case it here and remove the defaulted (non-setting) cc_out
4661  // operand if that's the instruction we're trying to match.
4662  //
4663  // We do this as post-processing of the explicit operands rather than just
4664  // conditionally adding the cc_out in the first place because we need
4665  // to check the type of the parsed immediate operand.
4666  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4667      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4668      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4669      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4670    return true;
4671
4672  // Register-register 'add' for thumb does not have a cc_out operand
4673  // when there are only two register operands.
4674  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4675      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4676      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4677      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4678    return true;
4679  // Register-register 'add' for thumb does not have a cc_out operand
4680  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4681  // have to check the immediate range here since Thumb2 has a variant
4682  // that can handle a different range and has a cc_out operand.
4683  if (((isThumb() && Mnemonic == "add") ||
4684       (isThumbTwo() && Mnemonic == "sub")) &&
4685      Operands.size() == 6 &&
4686      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4687      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4688      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4689      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4690      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4691       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4692    return true;
4693  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4694  // imm0_4095 variant. That's the least-preferred variant when
4695  // selecting via the generic "add" mnemonic, so to know that we
4696  // should remove the cc_out operand, we have to explicitly check that
4697  // it's not one of the other variants. Ugh.
4698  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4699      Operands.size() == 6 &&
4700      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4701      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4702      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4703    // Nest conditions rather than one big 'if' statement for readability.
4704    //
4705    // If either register is a high reg, it's either one of the SP
4706    // variants (handled above) or a 32-bit encoding, so we just
4707    // check against T3. If the second register is the PC, this is an
4708    // alternate form of ADR, which uses encoding T4, so check for that too.
4709    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4710         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4711        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4712        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4713      return false;
4714    // If both registers are low, we're in an IT block, and the immediate is
4715    // in range, we should use encoding T1 instead, which has a cc_out.
4716    if (inITBlock() &&
4717        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4718        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4719        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4720      return false;
4721
4722    // Otherwise, we use encoding T4, which does not have a cc_out
4723    // operand.
4724    return true;
4725  }
4726
4727  // The thumb2 multiply instruction doesn't have a CCOut register, so
4728  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4729  // use the 16-bit encoding or not.
4730  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4731      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4732      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4733      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4734      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4735      // If the registers aren't low regs, the destination reg isn't the
4736      // same as one of the source regs, or the cc_out operand is zero
4737      // outside of an IT block, we have to use the 32-bit encoding, so
4738      // remove the cc_out operand.
4739      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4740       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4741       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4742       !inITBlock() ||
4743       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4744        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4745        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4746        static_cast<ARMOperand*>(Operands[4])->getReg())))
4747    return true;
4748
4749  // Also check the 'mul' syntax variant that doesn't specify an explicit
4750  // destination register.
4751  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4752      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4753      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4754      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4755      // If the registers aren't low regs  or the cc_out operand is zero
4756      // outside of an IT block, we have to use the 32-bit encoding, so
4757      // remove the cc_out operand.
4758      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4759       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4760       !inITBlock()))
4761    return true;
4762
4763
4764
4765  // Register-register 'add/sub' for thumb does not have a cc_out operand
4766  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4767  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4768  // right, this will result in better diagnostics (which operand is off)
4769  // anyway.
4770  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4771      (Operands.size() == 5 || Operands.size() == 6) &&
4772      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4773      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4774      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4775    return true;
4776
4777  return false;
4778}
4779
4780static bool isDataTypeToken(StringRef Tok) {
4781  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4782    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4783    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4784    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4785    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4786    Tok == ".f" || Tok == ".d";
4787}
4788
4789// FIXME: This bit should probably be handled via an explicit match class
4790// in the .td files that matches the suffix instead of having it be
4791// a literal string token the way it is now.
4792static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4793  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4794}
4795
4796static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4797/// Parse an arm instruction mnemonic followed by its operands.
4798bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4799                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4800  // Apply mnemonic aliases before doing anything else, as the destination
4801  // mnemnonic may include suffices and we want to handle them normally.
4802  // The generic tblgen'erated code does this later, at the start of
4803  // MatchInstructionImpl(), but that's too late for aliases that include
4804  // any sort of suffix.
4805  unsigned AvailableFeatures = getAvailableFeatures();
4806  applyMnemonicAliases(Name, AvailableFeatures);
4807
4808  // First check for the ARM-specific .req directive.
4809  if (Parser.getTok().is(AsmToken::Identifier) &&
4810      Parser.getTok().getIdentifier() == ".req") {
4811    parseDirectiveReq(Name, NameLoc);
4812    // We always return 'error' for this, as we're done with this
4813    // statement and don't need to match the 'instruction."
4814    return true;
4815  }
4816
4817  // Create the leading tokens for the mnemonic, split by '.' characters.
4818  size_t Start = 0, Next = Name.find('.');
4819  StringRef Mnemonic = Name.slice(Start, Next);
4820
4821  // Split out the predication code and carry setting flag from the mnemonic.
4822  unsigned PredicationCode;
4823  unsigned ProcessorIMod;
4824  bool CarrySetting;
4825  StringRef ITMask;
4826  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4827                           ProcessorIMod, ITMask);
4828
4829  // In Thumb1, only the branch (B) instruction can be predicated.
4830  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4831    Parser.EatToEndOfStatement();
4832    return Error(NameLoc, "conditional execution not supported in Thumb1");
4833  }
4834
4835  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4836
4837  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4838  // is the mask as it will be for the IT encoding if the conditional
4839  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4840  // where the conditional bit0 is zero, the instruction post-processing
4841  // will adjust the mask accordingly.
4842  if (Mnemonic == "it") {
4843    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4844    if (ITMask.size() > 3) {
4845      Parser.EatToEndOfStatement();
4846      return Error(Loc, "too many conditions on IT instruction");
4847    }
4848    unsigned Mask = 8;
4849    for (unsigned i = ITMask.size(); i != 0; --i) {
4850      char pos = ITMask[i - 1];
4851      if (pos != 't' && pos != 'e') {
4852        Parser.EatToEndOfStatement();
4853        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4854      }
4855      Mask >>= 1;
4856      if (ITMask[i - 1] == 't')
4857        Mask |= 8;
4858    }
4859    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4860  }
4861
4862  // FIXME: This is all a pretty gross hack. We should automatically handle
4863  // optional operands like this via tblgen.
4864
4865  // Next, add the CCOut and ConditionCode operands, if needed.
4866  //
4867  // For mnemonics which can ever incorporate a carry setting bit or predication
4868  // code, our matching model involves us always generating CCOut and
4869  // ConditionCode operands to match the mnemonic "as written" and then we let
4870  // the matcher deal with finding the right instruction or generating an
4871  // appropriate error.
4872  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4873  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4874
4875  // If we had a carry-set on an instruction that can't do that, issue an
4876  // error.
4877  if (!CanAcceptCarrySet && CarrySetting) {
4878    Parser.EatToEndOfStatement();
4879    return Error(NameLoc, "instruction '" + Mnemonic +
4880                 "' can not set flags, but 's' suffix specified");
4881  }
4882  // If we had a predication code on an instruction that can't do that, issue an
4883  // error.
4884  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4885    Parser.EatToEndOfStatement();
4886    return Error(NameLoc, "instruction '" + Mnemonic +
4887                 "' is not predicable, but condition code specified");
4888  }
4889
4890  // Add the carry setting operand, if necessary.
4891  if (CanAcceptCarrySet) {
4892    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4893    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4894                                               Loc));
4895  }
4896
4897  // Add the predication code operand, if necessary.
4898  if (CanAcceptPredicationCode) {
4899    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4900                                      CarrySetting);
4901    Operands.push_back(ARMOperand::CreateCondCode(
4902                         ARMCC::CondCodes(PredicationCode), Loc));
4903  }
4904
4905  // Add the processor imod operand, if necessary.
4906  if (ProcessorIMod) {
4907    Operands.push_back(ARMOperand::CreateImm(
4908          MCConstantExpr::Create(ProcessorIMod, getContext()),
4909                                 NameLoc, NameLoc));
4910  }
4911
4912  // Add the remaining tokens in the mnemonic.
4913  while (Next != StringRef::npos) {
4914    Start = Next;
4915    Next = Name.find('.', Start + 1);
4916    StringRef ExtraToken = Name.slice(Start, Next);
4917
4918    // Some NEON instructions have an optional datatype suffix that is
4919    // completely ignored. Check for that.
4920    if (isDataTypeToken(ExtraToken) &&
4921        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4922      continue;
4923
4924    if (ExtraToken != ".n") {
4925      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4926      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4927    }
4928  }
4929
4930  // Read the remaining operands.
4931  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4932    // Read the first operand.
4933    if (parseOperand(Operands, Mnemonic)) {
4934      Parser.EatToEndOfStatement();
4935      return true;
4936    }
4937
4938    while (getLexer().is(AsmToken::Comma)) {
4939      Parser.Lex();  // Eat the comma.
4940
4941      // Parse and remember the operand.
4942      if (parseOperand(Operands, Mnemonic)) {
4943        Parser.EatToEndOfStatement();
4944        return true;
4945      }
4946    }
4947  }
4948
4949  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4950    SMLoc Loc = getLexer().getLoc();
4951    Parser.EatToEndOfStatement();
4952    return Error(Loc, "unexpected token in argument list");
4953  }
4954
4955  Parser.Lex(); // Consume the EndOfStatement
4956
4957  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4958  // do and don't have a cc_out optional-def operand. With some spot-checks
4959  // of the operand list, we can figure out which variant we're trying to
4960  // parse and adjust accordingly before actually matching. We shouldn't ever
4961  // try to remove a cc_out operand that was explicitly set on the the
4962  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4963  // table driven matcher doesn't fit well with the ARM instruction set.
4964  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4965    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4966    Operands.erase(Operands.begin() + 1);
4967    delete Op;
4968  }
4969
4970  // ARM mode 'blx' need special handling, as the register operand version
4971  // is predicable, but the label operand version is not. So, we can't rely
4972  // on the Mnemonic based checking to correctly figure out when to put
4973  // a k_CondCode operand in the list. If we're trying to match the label
4974  // version, remove the k_CondCode operand here.
4975  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4976      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4977    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4978    Operands.erase(Operands.begin() + 1);
4979    delete Op;
4980  }
4981
4982  // The vector-compare-to-zero instructions have a literal token "#0" at
4983  // the end that comes to here as an immediate operand. Convert it to a
4984  // token to play nicely with the matcher.
4985  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4986      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4987      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4988    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4989    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4990    if (CE && CE->getValue() == 0) {
4991      Operands.erase(Operands.begin() + 5);
4992      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4993      delete Op;
4994    }
4995  }
4996  // VCMP{E} does the same thing, but with a different operand count.
4997  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4998      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4999    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
5000    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5001    if (CE && CE->getValue() == 0) {
5002      Operands.erase(Operands.begin() + 4);
5003      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5004      delete Op;
5005    }
5006  }
5007  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
5008  // end. Convert it to a token here. Take care not to convert those
5009  // that should hit the Thumb2 encoding.
5010  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
5011      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5012      static_cast<ARMOperand*>(Operands[4])->isReg() &&
5013      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5014    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5015    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5016    if (CE && CE->getValue() == 0 &&
5017        (isThumbOne() ||
5018         // The cc_out operand matches the IT block.
5019         ((inITBlock() != CarrySetting) &&
5020         // Neither register operand is a high register.
5021         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
5022          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
5023      Operands.erase(Operands.begin() + 5);
5024      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5025      delete Op;
5026    }
5027  }
5028
5029  return false;
5030}
5031
5032// Validate context-sensitive operand constraints.
5033
5034// return 'true' if register list contains non-low GPR registers,
5035// 'false' otherwise. If Reg is in the register list or is HiReg, set
5036// 'containsReg' to true.
5037static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5038                                 unsigned HiReg, bool &containsReg) {
5039  containsReg = false;
5040  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5041    unsigned OpReg = Inst.getOperand(i).getReg();
5042    if (OpReg == Reg)
5043      containsReg = true;
5044    // Anything other than a low register isn't legal here.
5045    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5046      return true;
5047  }
5048  return false;
5049}
5050
5051// Check if the specified regisgter is in the register list of the inst,
5052// starting at the indicated operand number.
5053static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5054  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5055    unsigned OpReg = Inst.getOperand(i).getReg();
5056    if (OpReg == Reg)
5057      return true;
5058  }
5059  return false;
5060}
5061
5062// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5063// the ARMInsts array) instead. Getting that here requires awkward
5064// API changes, though. Better way?
5065namespace llvm {
5066extern const MCInstrDesc ARMInsts[];
5067}
5068static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5069  return ARMInsts[Opcode];
5070}
5071
5072// FIXME: We would really like to be able to tablegen'erate this.
5073bool ARMAsmParser::
5074validateInstruction(MCInst &Inst,
5075                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5076  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5077  SMLoc Loc = Operands[0]->getStartLoc();
5078  // Check the IT block state first.
5079  // NOTE: BKPT instruction has the interesting property of being
5080  // allowed in IT blocks, but not being predicable.  It just always
5081  // executes.
5082  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5083      Inst.getOpcode() != ARM::BKPT) {
5084    unsigned bit = 1;
5085    if (ITState.FirstCond)
5086      ITState.FirstCond = false;
5087    else
5088      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5089    // The instruction must be predicable.
5090    if (!MCID.isPredicable())
5091      return Error(Loc, "instructions in IT block must be predicable");
5092    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5093    unsigned ITCond = bit ? ITState.Cond :
5094      ARMCC::getOppositeCondition(ITState.Cond);
5095    if (Cond != ITCond) {
5096      // Find the condition code Operand to get its SMLoc information.
5097      SMLoc CondLoc;
5098      for (unsigned i = 1; i < Operands.size(); ++i)
5099        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5100          CondLoc = Operands[i]->getStartLoc();
5101      return Error(CondLoc, "incorrect condition in IT block; got '" +
5102                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5103                   "', but expected '" +
5104                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5105    }
5106  // Check for non-'al' condition codes outside of the IT block.
5107  } else if (isThumbTwo() && MCID.isPredicable() &&
5108             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5109             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5110             Inst.getOpcode() != ARM::t2B)
5111    return Error(Loc, "predicated instructions must be in IT block");
5112
5113  switch (Inst.getOpcode()) {
5114  case ARM::LDRD:
5115  case ARM::LDRD_PRE:
5116  case ARM::LDRD_POST:
5117  case ARM::LDREXD: {
5118    // Rt2 must be Rt + 1.
5119    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5120    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5121    if (Rt2 != Rt + 1)
5122      return Error(Operands[3]->getStartLoc(),
5123                   "destination operands must be sequential");
5124    return false;
5125  }
5126  case ARM::STRD: {
5127    // Rt2 must be Rt + 1.
5128    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5129    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5130    if (Rt2 != Rt + 1)
5131      return Error(Operands[3]->getStartLoc(),
5132                   "source operands must be sequential");
5133    return false;
5134  }
5135  case ARM::STRD_PRE:
5136  case ARM::STRD_POST:
5137  case ARM::STREXD: {
5138    // Rt2 must be Rt + 1.
5139    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5140    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5141    if (Rt2 != Rt + 1)
5142      return Error(Operands[3]->getStartLoc(),
5143                   "source operands must be sequential");
5144    return false;
5145  }
5146  case ARM::SBFX:
5147  case ARM::UBFX: {
5148    // width must be in range [1, 32-lsb]
5149    unsigned lsb = Inst.getOperand(2).getImm();
5150    unsigned widthm1 = Inst.getOperand(3).getImm();
5151    if (widthm1 >= 32 - lsb)
5152      return Error(Operands[5]->getStartLoc(),
5153                   "bitfield width must be in range [1,32-lsb]");
5154    return false;
5155  }
5156  case ARM::tLDMIA: {
5157    // If we're parsing Thumb2, the .w variant is available and handles
5158    // most cases that are normally illegal for a Thumb1 LDM
5159    // instruction. We'll make the transformation in processInstruction()
5160    // if necessary.
5161    //
5162    // Thumb LDM instructions are writeback iff the base register is not
5163    // in the register list.
5164    unsigned Rn = Inst.getOperand(0).getReg();
5165    bool hasWritebackToken =
5166      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5167       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5168    bool listContainsBase;
5169    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5170      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5171                   "registers must be in range r0-r7");
5172    // If we should have writeback, then there should be a '!' token.
5173    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5174      return Error(Operands[2]->getStartLoc(),
5175                   "writeback operator '!' expected");
5176    // If we should not have writeback, there must not be a '!'. This is
5177    // true even for the 32-bit wide encodings.
5178    if (listContainsBase && hasWritebackToken)
5179      return Error(Operands[3]->getStartLoc(),
5180                   "writeback operator '!' not allowed when base register "
5181                   "in register list");
5182
5183    break;
5184  }
5185  case ARM::t2LDMIA_UPD: {
5186    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5187      return Error(Operands[4]->getStartLoc(),
5188                   "writeback operator '!' not allowed when base register "
5189                   "in register list");
5190    break;
5191  }
5192  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5193  // so only issue a diagnostic for thumb1. The instructions will be
5194  // switched to the t2 encodings in processInstruction() if necessary.
5195  case ARM::tPOP: {
5196    bool listContainsBase;
5197    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5198        !isThumbTwo())
5199      return Error(Operands[2]->getStartLoc(),
5200                   "registers must be in range r0-r7 or pc");
5201    break;
5202  }
5203  case ARM::tPUSH: {
5204    bool listContainsBase;
5205    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5206        !isThumbTwo())
5207      return Error(Operands[2]->getStartLoc(),
5208                   "registers must be in range r0-r7 or lr");
5209    break;
5210  }
5211  case ARM::tSTMIA_UPD: {
5212    bool listContainsBase;
5213    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5214      return Error(Operands[4]->getStartLoc(),
5215                   "registers must be in range r0-r7");
5216    break;
5217  }
5218  }
5219
5220  return false;
5221}
5222
5223static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5224  switch(Opc) {
5225  default: llvm_unreachable("unexpected opcode!");
5226  // VST1LN
5227  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5228  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5229  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5230  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5231  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5232  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5233  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5234  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5235  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5236
5237  // VST2LN
5238  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5239  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5240  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5241  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5242  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5243
5244  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5245  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5246  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5247  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5248  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5249
5250  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5251  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5252  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5253  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5254  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5255
5256  // VST3LN
5257  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5258  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5259  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5260  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5261  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5262  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5263  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5264  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5265  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5266  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5267  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5268  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5269  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5270  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5271  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5272
5273  // VST3
5274  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5275  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5276  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5277  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5278  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5279  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5280  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5281  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5282  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5283  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5284  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5285  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5286  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5287  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5288  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5289  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5290  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5291  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5292
5293  // VST4LN
5294  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5295  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5296  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5297  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5298  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5299  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5300  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5301  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5302  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5303  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5304  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5305  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5306  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5307  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5308  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5309
5310  // VST4
5311  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5312  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5313  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5314  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5315  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5316  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5317  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5318  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5319  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5320  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5321  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5322  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5323  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5324  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5325  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5326  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5327  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5328  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5329  }
5330}
5331
5332static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5333  switch(Opc) {
5334  default: llvm_unreachable("unexpected opcode!");
5335  // VLD1LN
5336  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5337  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5338  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5339  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5340  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5341  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5342  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5343  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5344  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5345
5346  // VLD2LN
5347  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5348  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5349  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5350  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5351  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5352  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5353  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5354  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5355  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5356  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5357  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5358  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5359  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5360  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5361  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5362
5363  // VLD3DUP
5364  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5365  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5366  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5367  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5368  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5369  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5370  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5371  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5372  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5373  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5374  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5375  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5376  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5377  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5378  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5379  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5380  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5381  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5382
5383  // VLD3LN
5384  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5385  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5386  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5387  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5388  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5389  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5390  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5391  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5392  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5393  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5394  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5395  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5396  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5397  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5398  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5399
5400  // VLD3
5401  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5402  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5403  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5404  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5405  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5406  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5407  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5408  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5409  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5410  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5411  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5412  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5413  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5414  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5415  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5416  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5417  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5418  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5419
5420  // VLD4LN
5421  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5422  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5423  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5424  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5425  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5426  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5427  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5428  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5429  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5430  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5431  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5432  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5433  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5434  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5435  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5436
5437  // VLD4DUP
5438  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5439  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5440  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5441  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5442  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5443  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5444  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5445  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5446  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5447  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5448  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5449  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5450  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5451  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5452  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5453  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5454  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5455  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5456
5457  // VLD4
5458  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5459  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5460  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5461  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5462  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5463  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5464  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5465  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5466  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5467  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5468  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5469  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5470  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5471  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5472  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5473  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5474  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5475  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5476  }
5477}
5478
5479bool ARMAsmParser::
5480processInstruction(MCInst &Inst,
5481                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5482  switch (Inst.getOpcode()) {
5483  // Aliases for alternate PC+imm syntax of LDR instructions.
5484  case ARM::t2LDRpcrel:
5485    Inst.setOpcode(ARM::t2LDRpci);
5486    return true;
5487  case ARM::t2LDRBpcrel:
5488    Inst.setOpcode(ARM::t2LDRBpci);
5489    return true;
5490  case ARM::t2LDRHpcrel:
5491    Inst.setOpcode(ARM::t2LDRHpci);
5492    return true;
5493  case ARM::t2LDRSBpcrel:
5494    Inst.setOpcode(ARM::t2LDRSBpci);
5495    return true;
5496  case ARM::t2LDRSHpcrel:
5497    Inst.setOpcode(ARM::t2LDRSHpci);
5498    return true;
5499  // Handle NEON VST complex aliases.
5500  case ARM::VST1LNdWB_register_Asm_8:
5501  case ARM::VST1LNdWB_register_Asm_16:
5502  case ARM::VST1LNdWB_register_Asm_32: {
5503    MCInst TmpInst;
5504    // Shuffle the operands around so the lane index operand is in the
5505    // right place.
5506    unsigned Spacing;
5507    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5508    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5509    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5510    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5511    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5512    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5513    TmpInst.addOperand(Inst.getOperand(1)); // lane
5514    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5515    TmpInst.addOperand(Inst.getOperand(6));
5516    Inst = TmpInst;
5517    return true;
5518  }
5519
5520  case ARM::VST2LNdWB_register_Asm_8:
5521  case ARM::VST2LNdWB_register_Asm_16:
5522  case ARM::VST2LNdWB_register_Asm_32:
5523  case ARM::VST2LNqWB_register_Asm_16:
5524  case ARM::VST2LNqWB_register_Asm_32: {
5525    MCInst TmpInst;
5526    // Shuffle the operands around so the lane index operand is in the
5527    // right place.
5528    unsigned Spacing;
5529    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5530    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5531    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5532    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5533    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5534    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5535    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5536                                            Spacing));
5537    TmpInst.addOperand(Inst.getOperand(1)); // lane
5538    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5539    TmpInst.addOperand(Inst.getOperand(6));
5540    Inst = TmpInst;
5541    return true;
5542  }
5543
5544  case ARM::VST3LNdWB_register_Asm_8:
5545  case ARM::VST3LNdWB_register_Asm_16:
5546  case ARM::VST3LNdWB_register_Asm_32:
5547  case ARM::VST3LNqWB_register_Asm_16:
5548  case ARM::VST3LNqWB_register_Asm_32: {
5549    MCInst TmpInst;
5550    // Shuffle the operands around so the lane index operand is in the
5551    // right place.
5552    unsigned Spacing;
5553    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5554    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5555    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5556    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5557    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5558    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5559    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5560                                            Spacing));
5561    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5562                                            Spacing * 2));
5563    TmpInst.addOperand(Inst.getOperand(1)); // lane
5564    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5565    TmpInst.addOperand(Inst.getOperand(6));
5566    Inst = TmpInst;
5567    return true;
5568  }
5569
5570  case ARM::VST4LNdWB_register_Asm_8:
5571  case ARM::VST4LNdWB_register_Asm_16:
5572  case ARM::VST4LNdWB_register_Asm_32:
5573  case ARM::VST4LNqWB_register_Asm_16:
5574  case ARM::VST4LNqWB_register_Asm_32: {
5575    MCInst TmpInst;
5576    // Shuffle the operands around so the lane index operand is in the
5577    // right place.
5578    unsigned Spacing;
5579    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5580    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5581    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5582    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5583    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5584    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5585    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5586                                            Spacing));
5587    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5588                                            Spacing * 2));
5589    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5590                                            Spacing * 3));
5591    TmpInst.addOperand(Inst.getOperand(1)); // lane
5592    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5593    TmpInst.addOperand(Inst.getOperand(6));
5594    Inst = TmpInst;
5595    return true;
5596  }
5597
5598  case ARM::VST1LNdWB_fixed_Asm_8:
5599  case ARM::VST1LNdWB_fixed_Asm_16:
5600  case ARM::VST1LNdWB_fixed_Asm_32: {
5601    MCInst TmpInst;
5602    // Shuffle the operands around so the lane index operand is in the
5603    // right place.
5604    unsigned Spacing;
5605    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5606    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5607    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5608    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5609    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5610    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5611    TmpInst.addOperand(Inst.getOperand(1)); // lane
5612    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5613    TmpInst.addOperand(Inst.getOperand(5));
5614    Inst = TmpInst;
5615    return true;
5616  }
5617
5618  case ARM::VST2LNdWB_fixed_Asm_8:
5619  case ARM::VST2LNdWB_fixed_Asm_16:
5620  case ARM::VST2LNdWB_fixed_Asm_32:
5621  case ARM::VST2LNqWB_fixed_Asm_16:
5622  case ARM::VST2LNqWB_fixed_Asm_32: {
5623    MCInst TmpInst;
5624    // Shuffle the operands around so the lane index operand is in the
5625    // right place.
5626    unsigned Spacing;
5627    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5628    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5629    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5630    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5631    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5632    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5633    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5634                                            Spacing));
5635    TmpInst.addOperand(Inst.getOperand(1)); // lane
5636    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5637    TmpInst.addOperand(Inst.getOperand(5));
5638    Inst = TmpInst;
5639    return true;
5640  }
5641
5642  case ARM::VST3LNdWB_fixed_Asm_8:
5643  case ARM::VST3LNdWB_fixed_Asm_16:
5644  case ARM::VST3LNdWB_fixed_Asm_32:
5645  case ARM::VST3LNqWB_fixed_Asm_16:
5646  case ARM::VST3LNqWB_fixed_Asm_32: {
5647    MCInst TmpInst;
5648    // Shuffle the operands around so the lane index operand is in the
5649    // right place.
5650    unsigned Spacing;
5651    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5652    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5653    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5654    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5655    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5656    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5657    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5658                                            Spacing));
5659    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5660                                            Spacing * 2));
5661    TmpInst.addOperand(Inst.getOperand(1)); // lane
5662    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5663    TmpInst.addOperand(Inst.getOperand(5));
5664    Inst = TmpInst;
5665    return true;
5666  }
5667
5668  case ARM::VST4LNdWB_fixed_Asm_8:
5669  case ARM::VST4LNdWB_fixed_Asm_16:
5670  case ARM::VST4LNdWB_fixed_Asm_32:
5671  case ARM::VST4LNqWB_fixed_Asm_16:
5672  case ARM::VST4LNqWB_fixed_Asm_32: {
5673    MCInst TmpInst;
5674    // Shuffle the operands around so the lane index operand is in the
5675    // right place.
5676    unsigned Spacing;
5677    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5678    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5679    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5680    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5681    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5682    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5683    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5684                                            Spacing));
5685    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5686                                            Spacing * 2));
5687    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5688                                            Spacing * 3));
5689    TmpInst.addOperand(Inst.getOperand(1)); // lane
5690    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5691    TmpInst.addOperand(Inst.getOperand(5));
5692    Inst = TmpInst;
5693    return true;
5694  }
5695
5696  case ARM::VST1LNdAsm_8:
5697  case ARM::VST1LNdAsm_16:
5698  case ARM::VST1LNdAsm_32: {
5699    MCInst TmpInst;
5700    // Shuffle the operands around so the lane index operand is in the
5701    // right place.
5702    unsigned Spacing;
5703    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5704    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5705    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5706    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5707    TmpInst.addOperand(Inst.getOperand(1)); // lane
5708    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5709    TmpInst.addOperand(Inst.getOperand(5));
5710    Inst = TmpInst;
5711    return true;
5712  }
5713
5714  case ARM::VST2LNdAsm_8:
5715  case ARM::VST2LNdAsm_16:
5716  case ARM::VST2LNdAsm_32:
5717  case ARM::VST2LNqAsm_16:
5718  case ARM::VST2LNqAsm_32: {
5719    MCInst TmpInst;
5720    // Shuffle the operands around so the lane index operand is in the
5721    // right place.
5722    unsigned Spacing;
5723    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5724    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5725    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5726    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5727    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5728                                            Spacing));
5729    TmpInst.addOperand(Inst.getOperand(1)); // lane
5730    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5731    TmpInst.addOperand(Inst.getOperand(5));
5732    Inst = TmpInst;
5733    return true;
5734  }
5735
5736  case ARM::VST3LNdAsm_8:
5737  case ARM::VST3LNdAsm_16:
5738  case ARM::VST3LNdAsm_32:
5739  case ARM::VST3LNqAsm_16:
5740  case ARM::VST3LNqAsm_32: {
5741    MCInst TmpInst;
5742    // Shuffle the operands around so the lane index operand is in the
5743    // right place.
5744    unsigned Spacing;
5745    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5746    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5747    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5748    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5749    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5750                                            Spacing));
5751    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5752                                            Spacing * 2));
5753    TmpInst.addOperand(Inst.getOperand(1)); // lane
5754    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5755    TmpInst.addOperand(Inst.getOperand(5));
5756    Inst = TmpInst;
5757    return true;
5758  }
5759
5760  case ARM::VST4LNdAsm_8:
5761  case ARM::VST4LNdAsm_16:
5762  case ARM::VST4LNdAsm_32:
5763  case ARM::VST4LNqAsm_16:
5764  case ARM::VST4LNqAsm_32: {
5765    MCInst TmpInst;
5766    // Shuffle the operands around so the lane index operand is in the
5767    // right place.
5768    unsigned Spacing;
5769    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5770    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5771    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5772    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5773    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5774                                            Spacing));
5775    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5776                                            Spacing * 2));
5777    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5778                                            Spacing * 3));
5779    TmpInst.addOperand(Inst.getOperand(1)); // lane
5780    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5781    TmpInst.addOperand(Inst.getOperand(5));
5782    Inst = TmpInst;
5783    return true;
5784  }
5785
5786  // Handle NEON VLD complex aliases.
5787  case ARM::VLD1LNdWB_register_Asm_8:
5788  case ARM::VLD1LNdWB_register_Asm_16:
5789  case ARM::VLD1LNdWB_register_Asm_32: {
5790    MCInst TmpInst;
5791    // Shuffle the operands around so the lane index operand is in the
5792    // right place.
5793    unsigned Spacing;
5794    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5795    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5796    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5797    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5798    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5799    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5800    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5801    TmpInst.addOperand(Inst.getOperand(1)); // lane
5802    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5803    TmpInst.addOperand(Inst.getOperand(6));
5804    Inst = TmpInst;
5805    return true;
5806  }
5807
5808  case ARM::VLD2LNdWB_register_Asm_8:
5809  case ARM::VLD2LNdWB_register_Asm_16:
5810  case ARM::VLD2LNdWB_register_Asm_32:
5811  case ARM::VLD2LNqWB_register_Asm_16:
5812  case ARM::VLD2LNqWB_register_Asm_32: {
5813    MCInst TmpInst;
5814    // Shuffle the operands around so the lane index operand is in the
5815    // right place.
5816    unsigned Spacing;
5817    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5818    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5819    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5820                                            Spacing));
5821    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5822    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5823    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5824    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5825    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5826    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5827                                            Spacing));
5828    TmpInst.addOperand(Inst.getOperand(1)); // lane
5829    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5830    TmpInst.addOperand(Inst.getOperand(6));
5831    Inst = TmpInst;
5832    return true;
5833  }
5834
5835  case ARM::VLD3LNdWB_register_Asm_8:
5836  case ARM::VLD3LNdWB_register_Asm_16:
5837  case ARM::VLD3LNdWB_register_Asm_32:
5838  case ARM::VLD3LNqWB_register_Asm_16:
5839  case ARM::VLD3LNqWB_register_Asm_32: {
5840    MCInst TmpInst;
5841    // Shuffle the operands around so the lane index operand is in the
5842    // right place.
5843    unsigned Spacing;
5844    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5845    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5846    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5847                                            Spacing));
5848    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5849                                            Spacing * 2));
5850    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5851    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5852    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5853    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5854    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5855    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5856                                            Spacing));
5857    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5858                                            Spacing * 2));
5859    TmpInst.addOperand(Inst.getOperand(1)); // lane
5860    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5861    TmpInst.addOperand(Inst.getOperand(6));
5862    Inst = TmpInst;
5863    return true;
5864  }
5865
5866  case ARM::VLD4LNdWB_register_Asm_8:
5867  case ARM::VLD4LNdWB_register_Asm_16:
5868  case ARM::VLD4LNdWB_register_Asm_32:
5869  case ARM::VLD4LNqWB_register_Asm_16:
5870  case ARM::VLD4LNqWB_register_Asm_32: {
5871    MCInst TmpInst;
5872    // Shuffle the operands around so the lane index operand is in the
5873    // right place.
5874    unsigned Spacing;
5875    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5876    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5877    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5878                                            Spacing));
5879    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5880                                            Spacing * 2));
5881    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5882                                            Spacing * 3));
5883    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5884    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5885    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5886    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5887    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5888    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5889                                            Spacing));
5890    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5891                                            Spacing * 2));
5892    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5893                                            Spacing * 3));
5894    TmpInst.addOperand(Inst.getOperand(1)); // lane
5895    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5896    TmpInst.addOperand(Inst.getOperand(6));
5897    Inst = TmpInst;
5898    return true;
5899  }
5900
5901  case ARM::VLD1LNdWB_fixed_Asm_8:
5902  case ARM::VLD1LNdWB_fixed_Asm_16:
5903  case ARM::VLD1LNdWB_fixed_Asm_32: {
5904    MCInst TmpInst;
5905    // Shuffle the operands around so the lane index operand is in the
5906    // right place.
5907    unsigned Spacing;
5908    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5909    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5910    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5911    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5912    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5913    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5914    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5915    TmpInst.addOperand(Inst.getOperand(1)); // lane
5916    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5917    TmpInst.addOperand(Inst.getOperand(5));
5918    Inst = TmpInst;
5919    return true;
5920  }
5921
5922  case ARM::VLD2LNdWB_fixed_Asm_8:
5923  case ARM::VLD2LNdWB_fixed_Asm_16:
5924  case ARM::VLD2LNdWB_fixed_Asm_32:
5925  case ARM::VLD2LNqWB_fixed_Asm_16:
5926  case ARM::VLD2LNqWB_fixed_Asm_32: {
5927    MCInst TmpInst;
5928    // Shuffle the operands around so the lane index operand is in the
5929    // right place.
5930    unsigned Spacing;
5931    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5932    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5933    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5934                                            Spacing));
5935    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5936    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5937    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5938    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5939    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5940    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5941                                            Spacing));
5942    TmpInst.addOperand(Inst.getOperand(1)); // lane
5943    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5944    TmpInst.addOperand(Inst.getOperand(5));
5945    Inst = TmpInst;
5946    return true;
5947  }
5948
5949  case ARM::VLD3LNdWB_fixed_Asm_8:
5950  case ARM::VLD3LNdWB_fixed_Asm_16:
5951  case ARM::VLD3LNdWB_fixed_Asm_32:
5952  case ARM::VLD3LNqWB_fixed_Asm_16:
5953  case ARM::VLD3LNqWB_fixed_Asm_32: {
5954    MCInst TmpInst;
5955    // Shuffle the operands around so the lane index operand is in the
5956    // right place.
5957    unsigned Spacing;
5958    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5959    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5960    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5961                                            Spacing));
5962    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5963                                            Spacing * 2));
5964    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5965    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5966    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5967    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5968    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5969    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5970                                            Spacing));
5971    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5972                                            Spacing * 2));
5973    TmpInst.addOperand(Inst.getOperand(1)); // lane
5974    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5975    TmpInst.addOperand(Inst.getOperand(5));
5976    Inst = TmpInst;
5977    return true;
5978  }
5979
5980  case ARM::VLD4LNdWB_fixed_Asm_8:
5981  case ARM::VLD4LNdWB_fixed_Asm_16:
5982  case ARM::VLD4LNdWB_fixed_Asm_32:
5983  case ARM::VLD4LNqWB_fixed_Asm_16:
5984  case ARM::VLD4LNqWB_fixed_Asm_32: {
5985    MCInst TmpInst;
5986    // Shuffle the operands around so the lane index operand is in the
5987    // right place.
5988    unsigned Spacing;
5989    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5990    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5991    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5992                                            Spacing));
5993    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5994                                            Spacing * 2));
5995    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5996                                            Spacing * 3));
5997    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5998    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5999    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6000    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6001    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6002    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6003                                            Spacing));
6004    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6005                                            Spacing * 2));
6006    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6007                                            Spacing * 3));
6008    TmpInst.addOperand(Inst.getOperand(1)); // lane
6009    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6010    TmpInst.addOperand(Inst.getOperand(5));
6011    Inst = TmpInst;
6012    return true;
6013  }
6014
6015  case ARM::VLD1LNdAsm_8:
6016  case ARM::VLD1LNdAsm_16:
6017  case ARM::VLD1LNdAsm_32: {
6018    MCInst TmpInst;
6019    // Shuffle the operands around so the lane index operand is in the
6020    // right place.
6021    unsigned Spacing;
6022    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6023    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6024    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6025    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6026    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6027    TmpInst.addOperand(Inst.getOperand(1)); // lane
6028    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6029    TmpInst.addOperand(Inst.getOperand(5));
6030    Inst = TmpInst;
6031    return true;
6032  }
6033
6034  case ARM::VLD2LNdAsm_8:
6035  case ARM::VLD2LNdAsm_16:
6036  case ARM::VLD2LNdAsm_32:
6037  case ARM::VLD2LNqAsm_16:
6038  case ARM::VLD2LNqAsm_32: {
6039    MCInst TmpInst;
6040    // Shuffle the operands around so the lane index operand is in the
6041    // right place.
6042    unsigned Spacing;
6043    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6044    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6045    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6046                                            Spacing));
6047    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6048    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6049    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6050    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6051                                            Spacing));
6052    TmpInst.addOperand(Inst.getOperand(1)); // lane
6053    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6054    TmpInst.addOperand(Inst.getOperand(5));
6055    Inst = TmpInst;
6056    return true;
6057  }
6058
6059  case ARM::VLD3LNdAsm_8:
6060  case ARM::VLD3LNdAsm_16:
6061  case ARM::VLD3LNdAsm_32:
6062  case ARM::VLD3LNqAsm_16:
6063  case ARM::VLD3LNqAsm_32: {
6064    MCInst TmpInst;
6065    // Shuffle the operands around so the lane index operand is in the
6066    // right place.
6067    unsigned Spacing;
6068    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6069    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6070    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6071                                            Spacing));
6072    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6073                                            Spacing * 2));
6074    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6075    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6076    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6077    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6078                                            Spacing));
6079    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6080                                            Spacing * 2));
6081    TmpInst.addOperand(Inst.getOperand(1)); // lane
6082    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6083    TmpInst.addOperand(Inst.getOperand(5));
6084    Inst = TmpInst;
6085    return true;
6086  }
6087
6088  case ARM::VLD4LNdAsm_8:
6089  case ARM::VLD4LNdAsm_16:
6090  case ARM::VLD4LNdAsm_32:
6091  case ARM::VLD4LNqAsm_16:
6092  case ARM::VLD4LNqAsm_32: {
6093    MCInst TmpInst;
6094    // Shuffle the operands around so the lane index operand is in the
6095    // right place.
6096    unsigned Spacing;
6097    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6098    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6099    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6100                                            Spacing));
6101    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6102                                            Spacing * 2));
6103    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6104                                            Spacing * 3));
6105    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6106    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6107    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6108    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6109                                            Spacing));
6110    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6111                                            Spacing * 2));
6112    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6113                                            Spacing * 3));
6114    TmpInst.addOperand(Inst.getOperand(1)); // lane
6115    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6116    TmpInst.addOperand(Inst.getOperand(5));
6117    Inst = TmpInst;
6118    return true;
6119  }
6120
6121  // VLD3DUP single 3-element structure to all lanes instructions.
6122  case ARM::VLD3DUPdAsm_8:
6123  case ARM::VLD3DUPdAsm_16:
6124  case ARM::VLD3DUPdAsm_32:
6125  case ARM::VLD3DUPqAsm_8:
6126  case ARM::VLD3DUPqAsm_16:
6127  case ARM::VLD3DUPqAsm_32: {
6128    MCInst TmpInst;
6129    unsigned Spacing;
6130    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6131    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6132    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6133                                            Spacing));
6134    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6135                                            Spacing * 2));
6136    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6137    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6138    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6139    TmpInst.addOperand(Inst.getOperand(4));
6140    Inst = TmpInst;
6141    return true;
6142  }
6143
6144  case ARM::VLD3DUPdWB_fixed_Asm_8:
6145  case ARM::VLD3DUPdWB_fixed_Asm_16:
6146  case ARM::VLD3DUPdWB_fixed_Asm_32:
6147  case ARM::VLD3DUPqWB_fixed_Asm_8:
6148  case ARM::VLD3DUPqWB_fixed_Asm_16:
6149  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6150    MCInst TmpInst;
6151    unsigned Spacing;
6152    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6153    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6154    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6155                                            Spacing));
6156    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6157                                            Spacing * 2));
6158    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6159    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6160    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6161    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6162    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6163    TmpInst.addOperand(Inst.getOperand(4));
6164    Inst = TmpInst;
6165    return true;
6166  }
6167
6168  case ARM::VLD3DUPdWB_register_Asm_8:
6169  case ARM::VLD3DUPdWB_register_Asm_16:
6170  case ARM::VLD3DUPdWB_register_Asm_32:
6171  case ARM::VLD3DUPqWB_register_Asm_8:
6172  case ARM::VLD3DUPqWB_register_Asm_16:
6173  case ARM::VLD3DUPqWB_register_Asm_32: {
6174    MCInst TmpInst;
6175    unsigned Spacing;
6176    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6177    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6178    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6179                                            Spacing));
6180    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6181                                            Spacing * 2));
6182    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6183    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6184    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6185    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6186    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6187    TmpInst.addOperand(Inst.getOperand(5));
6188    Inst = TmpInst;
6189    return true;
6190  }
6191
6192  // VLD3 multiple 3-element structure instructions.
6193  case ARM::VLD3dAsm_8:
6194  case ARM::VLD3dAsm_16:
6195  case ARM::VLD3dAsm_32:
6196  case ARM::VLD3qAsm_8:
6197  case ARM::VLD3qAsm_16:
6198  case ARM::VLD3qAsm_32: {
6199    MCInst TmpInst;
6200    unsigned Spacing;
6201    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6202    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6203    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6204                                            Spacing));
6205    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6206                                            Spacing * 2));
6207    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6208    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6209    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6210    TmpInst.addOperand(Inst.getOperand(4));
6211    Inst = TmpInst;
6212    return true;
6213  }
6214
6215  case ARM::VLD3dWB_fixed_Asm_8:
6216  case ARM::VLD3dWB_fixed_Asm_16:
6217  case ARM::VLD3dWB_fixed_Asm_32:
6218  case ARM::VLD3qWB_fixed_Asm_8:
6219  case ARM::VLD3qWB_fixed_Asm_16:
6220  case ARM::VLD3qWB_fixed_Asm_32: {
6221    MCInst TmpInst;
6222    unsigned Spacing;
6223    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6224    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6225    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6226                                            Spacing));
6227    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6228                                            Spacing * 2));
6229    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6230    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6231    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6232    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6233    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6234    TmpInst.addOperand(Inst.getOperand(4));
6235    Inst = TmpInst;
6236    return true;
6237  }
6238
6239  case ARM::VLD3dWB_register_Asm_8:
6240  case ARM::VLD3dWB_register_Asm_16:
6241  case ARM::VLD3dWB_register_Asm_32:
6242  case ARM::VLD3qWB_register_Asm_8:
6243  case ARM::VLD3qWB_register_Asm_16:
6244  case ARM::VLD3qWB_register_Asm_32: {
6245    MCInst TmpInst;
6246    unsigned Spacing;
6247    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6248    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6249    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6250                                            Spacing));
6251    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6252                                            Spacing * 2));
6253    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6254    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6255    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6256    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6257    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6258    TmpInst.addOperand(Inst.getOperand(5));
6259    Inst = TmpInst;
6260    return true;
6261  }
6262
6263  // VLD4DUP single 3-element structure to all lanes instructions.
6264  case ARM::VLD4DUPdAsm_8:
6265  case ARM::VLD4DUPdAsm_16:
6266  case ARM::VLD4DUPdAsm_32:
6267  case ARM::VLD4DUPqAsm_8:
6268  case ARM::VLD4DUPqAsm_16:
6269  case ARM::VLD4DUPqAsm_32: {
6270    MCInst TmpInst;
6271    unsigned Spacing;
6272    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6273    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6274    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6275                                            Spacing));
6276    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6277                                            Spacing * 2));
6278    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6279                                            Spacing * 3));
6280    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6281    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6282    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6283    TmpInst.addOperand(Inst.getOperand(4));
6284    Inst = TmpInst;
6285    return true;
6286  }
6287
6288  case ARM::VLD4DUPdWB_fixed_Asm_8:
6289  case ARM::VLD4DUPdWB_fixed_Asm_16:
6290  case ARM::VLD4DUPdWB_fixed_Asm_32:
6291  case ARM::VLD4DUPqWB_fixed_Asm_8:
6292  case ARM::VLD4DUPqWB_fixed_Asm_16:
6293  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6294    MCInst TmpInst;
6295    unsigned Spacing;
6296    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6297    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6298    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6299                                            Spacing));
6300    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6301                                            Spacing * 2));
6302    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6303                                            Spacing * 3));
6304    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6305    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6306    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6307    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6308    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6309    TmpInst.addOperand(Inst.getOperand(4));
6310    Inst = TmpInst;
6311    return true;
6312  }
6313
6314  case ARM::VLD4DUPdWB_register_Asm_8:
6315  case ARM::VLD4DUPdWB_register_Asm_16:
6316  case ARM::VLD4DUPdWB_register_Asm_32:
6317  case ARM::VLD4DUPqWB_register_Asm_8:
6318  case ARM::VLD4DUPqWB_register_Asm_16:
6319  case ARM::VLD4DUPqWB_register_Asm_32: {
6320    MCInst TmpInst;
6321    unsigned Spacing;
6322    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6323    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6324    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6325                                            Spacing));
6326    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6327                                            Spacing * 2));
6328    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6329                                            Spacing * 3));
6330    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6331    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6332    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6333    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6334    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6335    TmpInst.addOperand(Inst.getOperand(5));
6336    Inst = TmpInst;
6337    return true;
6338  }
6339
6340  // VLD4 multiple 4-element structure instructions.
6341  case ARM::VLD4dAsm_8:
6342  case ARM::VLD4dAsm_16:
6343  case ARM::VLD4dAsm_32:
6344  case ARM::VLD4qAsm_8:
6345  case ARM::VLD4qAsm_16:
6346  case ARM::VLD4qAsm_32: {
6347    MCInst TmpInst;
6348    unsigned Spacing;
6349    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6350    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6351    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6352                                            Spacing));
6353    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6354                                            Spacing * 2));
6355    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6356                                            Spacing * 3));
6357    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6358    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6359    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6360    TmpInst.addOperand(Inst.getOperand(4));
6361    Inst = TmpInst;
6362    return true;
6363  }
6364
6365  case ARM::VLD4dWB_fixed_Asm_8:
6366  case ARM::VLD4dWB_fixed_Asm_16:
6367  case ARM::VLD4dWB_fixed_Asm_32:
6368  case ARM::VLD4qWB_fixed_Asm_8:
6369  case ARM::VLD4qWB_fixed_Asm_16:
6370  case ARM::VLD4qWB_fixed_Asm_32: {
6371    MCInst TmpInst;
6372    unsigned Spacing;
6373    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6374    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6375    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6376                                            Spacing));
6377    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6378                                            Spacing * 2));
6379    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6380                                            Spacing * 3));
6381    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6382    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6383    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6384    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6385    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6386    TmpInst.addOperand(Inst.getOperand(4));
6387    Inst = TmpInst;
6388    return true;
6389  }
6390
6391  case ARM::VLD4dWB_register_Asm_8:
6392  case ARM::VLD4dWB_register_Asm_16:
6393  case ARM::VLD4dWB_register_Asm_32:
6394  case ARM::VLD4qWB_register_Asm_8:
6395  case ARM::VLD4qWB_register_Asm_16:
6396  case ARM::VLD4qWB_register_Asm_32: {
6397    MCInst TmpInst;
6398    unsigned Spacing;
6399    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6400    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6401    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6402                                            Spacing));
6403    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6404                                            Spacing * 2));
6405    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6406                                            Spacing * 3));
6407    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6408    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6409    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6410    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6411    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6412    TmpInst.addOperand(Inst.getOperand(5));
6413    Inst = TmpInst;
6414    return true;
6415  }
6416
6417  // VST3 multiple 3-element structure instructions.
6418  case ARM::VST3dAsm_8:
6419  case ARM::VST3dAsm_16:
6420  case ARM::VST3dAsm_32:
6421  case ARM::VST3qAsm_8:
6422  case ARM::VST3qAsm_16:
6423  case ARM::VST3qAsm_32: {
6424    MCInst TmpInst;
6425    unsigned Spacing;
6426    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6427    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6428    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6429    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6430    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6431                                            Spacing));
6432    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6433                                            Spacing * 2));
6434    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6435    TmpInst.addOperand(Inst.getOperand(4));
6436    Inst = TmpInst;
6437    return true;
6438  }
6439
6440  case ARM::VST3dWB_fixed_Asm_8:
6441  case ARM::VST3dWB_fixed_Asm_16:
6442  case ARM::VST3dWB_fixed_Asm_32:
6443  case ARM::VST3qWB_fixed_Asm_8:
6444  case ARM::VST3qWB_fixed_Asm_16:
6445  case ARM::VST3qWB_fixed_Asm_32: {
6446    MCInst TmpInst;
6447    unsigned Spacing;
6448    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6449    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6450    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6451    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6452    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6453    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6454    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6455                                            Spacing));
6456    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6457                                            Spacing * 2));
6458    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6459    TmpInst.addOperand(Inst.getOperand(4));
6460    Inst = TmpInst;
6461    return true;
6462  }
6463
6464  case ARM::VST3dWB_register_Asm_8:
6465  case ARM::VST3dWB_register_Asm_16:
6466  case ARM::VST3dWB_register_Asm_32:
6467  case ARM::VST3qWB_register_Asm_8:
6468  case ARM::VST3qWB_register_Asm_16:
6469  case ARM::VST3qWB_register_Asm_32: {
6470    MCInst TmpInst;
6471    unsigned Spacing;
6472    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6473    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6474    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6475    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6476    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6477    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6478    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6479                                            Spacing));
6480    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6481                                            Spacing * 2));
6482    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6483    TmpInst.addOperand(Inst.getOperand(5));
6484    Inst = TmpInst;
6485    return true;
6486  }
6487
6488  // VST4 multiple 3-element structure instructions.
6489  case ARM::VST4dAsm_8:
6490  case ARM::VST4dAsm_16:
6491  case ARM::VST4dAsm_32:
6492  case ARM::VST4qAsm_8:
6493  case ARM::VST4qAsm_16:
6494  case ARM::VST4qAsm_32: {
6495    MCInst TmpInst;
6496    unsigned Spacing;
6497    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6498    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6499    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6500    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6501    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6502                                            Spacing));
6503    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6504                                            Spacing * 2));
6505    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6506                                            Spacing * 3));
6507    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6508    TmpInst.addOperand(Inst.getOperand(4));
6509    Inst = TmpInst;
6510    return true;
6511  }
6512
6513  case ARM::VST4dWB_fixed_Asm_8:
6514  case ARM::VST4dWB_fixed_Asm_16:
6515  case ARM::VST4dWB_fixed_Asm_32:
6516  case ARM::VST4qWB_fixed_Asm_8:
6517  case ARM::VST4qWB_fixed_Asm_16:
6518  case ARM::VST4qWB_fixed_Asm_32: {
6519    MCInst TmpInst;
6520    unsigned Spacing;
6521    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6522    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6523    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6524    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6525    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6526    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6527    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6528                                            Spacing));
6529    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6530                                            Spacing * 2));
6531    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6532                                            Spacing * 3));
6533    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6534    TmpInst.addOperand(Inst.getOperand(4));
6535    Inst = TmpInst;
6536    return true;
6537  }
6538
6539  case ARM::VST4dWB_register_Asm_8:
6540  case ARM::VST4dWB_register_Asm_16:
6541  case ARM::VST4dWB_register_Asm_32:
6542  case ARM::VST4qWB_register_Asm_8:
6543  case ARM::VST4qWB_register_Asm_16:
6544  case ARM::VST4qWB_register_Asm_32: {
6545    MCInst TmpInst;
6546    unsigned Spacing;
6547    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6548    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6549    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6550    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6551    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6552    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6553    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6554                                            Spacing));
6555    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6556                                            Spacing * 2));
6557    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6558                                            Spacing * 3));
6559    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6560    TmpInst.addOperand(Inst.getOperand(5));
6561    Inst = TmpInst;
6562    return true;
6563  }
6564
6565  // Handle the Thumb2 mode MOV complex aliases.
6566  case ARM::t2MOVsr:
6567  case ARM::t2MOVSsr: {
6568    // Which instruction to expand to depends on the CCOut operand and
6569    // whether we're in an IT block if the register operands are low
6570    // registers.
6571    bool isNarrow = false;
6572    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6573        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6574        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6575        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6576        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6577      isNarrow = true;
6578    MCInst TmpInst;
6579    unsigned newOpc;
6580    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6581    default: llvm_unreachable("unexpected opcode!");
6582    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6583    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6584    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6585    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6586    }
6587    TmpInst.setOpcode(newOpc);
6588    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6589    if (isNarrow)
6590      TmpInst.addOperand(MCOperand::CreateReg(
6591          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6592    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6593    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6594    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6595    TmpInst.addOperand(Inst.getOperand(5));
6596    if (!isNarrow)
6597      TmpInst.addOperand(MCOperand::CreateReg(
6598          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6599    Inst = TmpInst;
6600    return true;
6601  }
6602  case ARM::t2MOVsi:
6603  case ARM::t2MOVSsi: {
6604    // Which instruction to expand to depends on the CCOut operand and
6605    // whether we're in an IT block if the register operands are low
6606    // registers.
6607    bool isNarrow = false;
6608    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6609        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6610        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6611      isNarrow = true;
6612    MCInst TmpInst;
6613    unsigned newOpc;
6614    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6615    default: llvm_unreachable("unexpected opcode!");
6616    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6617    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6618    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6619    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6620    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6621    }
6622    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6623    if (Ammount == 32) Ammount = 0;
6624    TmpInst.setOpcode(newOpc);
6625    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6626    if (isNarrow)
6627      TmpInst.addOperand(MCOperand::CreateReg(
6628          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6629    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6630    if (newOpc != ARM::t2RRX)
6631      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
6632    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6633    TmpInst.addOperand(Inst.getOperand(4));
6634    if (!isNarrow)
6635      TmpInst.addOperand(MCOperand::CreateReg(
6636          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6637    Inst = TmpInst;
6638    return true;
6639  }
6640  // Handle the ARM mode MOV complex aliases.
6641  case ARM::ASRr:
6642  case ARM::LSRr:
6643  case ARM::LSLr:
6644  case ARM::RORr: {
6645    ARM_AM::ShiftOpc ShiftTy;
6646    switch(Inst.getOpcode()) {
6647    default: llvm_unreachable("unexpected opcode!");
6648    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6649    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6650    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6651    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6652    }
6653    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6654    MCInst TmpInst;
6655    TmpInst.setOpcode(ARM::MOVsr);
6656    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6657    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6658    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6659    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6660    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6661    TmpInst.addOperand(Inst.getOperand(4));
6662    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6663    Inst = TmpInst;
6664    return true;
6665  }
6666  case ARM::ASRi:
6667  case ARM::LSRi:
6668  case ARM::LSLi:
6669  case ARM::RORi: {
6670    ARM_AM::ShiftOpc ShiftTy;
6671    switch(Inst.getOpcode()) {
6672    default: llvm_unreachable("unexpected opcode!");
6673    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6674    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6675    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6676    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6677    }
6678    // A shift by zero is a plain MOVr, not a MOVsi.
6679    unsigned Amt = Inst.getOperand(2).getImm();
6680    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6681    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6682    MCInst TmpInst;
6683    TmpInst.setOpcode(Opc);
6684    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6685    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6686    if (Opc == ARM::MOVsi)
6687      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6688    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6689    TmpInst.addOperand(Inst.getOperand(4));
6690    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6691    Inst = TmpInst;
6692    return true;
6693  }
6694  case ARM::RRXi: {
6695    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6696    MCInst TmpInst;
6697    TmpInst.setOpcode(ARM::MOVsi);
6698    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6699    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6700    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6701    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6702    TmpInst.addOperand(Inst.getOperand(3));
6703    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6704    Inst = TmpInst;
6705    return true;
6706  }
6707  case ARM::t2LDMIA_UPD: {
6708    // If this is a load of a single register, then we should use
6709    // a post-indexed LDR instruction instead, per the ARM ARM.
6710    if (Inst.getNumOperands() != 5)
6711      return false;
6712    MCInst TmpInst;
6713    TmpInst.setOpcode(ARM::t2LDR_POST);
6714    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6715    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6716    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6717    TmpInst.addOperand(MCOperand::CreateImm(4));
6718    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6719    TmpInst.addOperand(Inst.getOperand(3));
6720    Inst = TmpInst;
6721    return true;
6722  }
6723  case ARM::t2STMDB_UPD: {
6724    // If this is a store of a single register, then we should use
6725    // a pre-indexed STR instruction instead, per the ARM ARM.
6726    if (Inst.getNumOperands() != 5)
6727      return false;
6728    MCInst TmpInst;
6729    TmpInst.setOpcode(ARM::t2STR_PRE);
6730    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6731    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6732    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6733    TmpInst.addOperand(MCOperand::CreateImm(-4));
6734    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6735    TmpInst.addOperand(Inst.getOperand(3));
6736    Inst = TmpInst;
6737    return true;
6738  }
6739  case ARM::LDMIA_UPD:
6740    // If this is a load of a single register via a 'pop', then we should use
6741    // a post-indexed LDR instruction instead, per the ARM ARM.
6742    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
6743        Inst.getNumOperands() == 5) {
6744      MCInst TmpInst;
6745      TmpInst.setOpcode(ARM::LDR_POST_IMM);
6746      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6747      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6748      TmpInst.addOperand(Inst.getOperand(1)); // Rn
6749      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
6750      TmpInst.addOperand(MCOperand::CreateImm(4));
6751      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6752      TmpInst.addOperand(Inst.getOperand(3));
6753      Inst = TmpInst;
6754      return true;
6755    }
6756    break;
6757  case ARM::STMDB_UPD:
6758    // If this is a store of a single register via a 'push', then we should use
6759    // a pre-indexed STR instruction instead, per the ARM ARM.
6760    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
6761        Inst.getNumOperands() == 5) {
6762      MCInst TmpInst;
6763      TmpInst.setOpcode(ARM::STR_PRE_IMM);
6764      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6765      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6766      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
6767      TmpInst.addOperand(MCOperand::CreateImm(-4));
6768      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6769      TmpInst.addOperand(Inst.getOperand(3));
6770      Inst = TmpInst;
6771    }
6772    break;
6773  case ARM::t2ADDri12:
6774    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
6775    // mnemonic was used (not "addw"), encoding T3 is preferred.
6776    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
6777        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6778      break;
6779    Inst.setOpcode(ARM::t2ADDri);
6780    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6781    break;
6782  case ARM::t2SUBri12:
6783    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
6784    // mnemonic was used (not "subw"), encoding T3 is preferred.
6785    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
6786        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6787      break;
6788    Inst.setOpcode(ARM::t2SUBri);
6789    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6790    break;
6791  case ARM::tADDi8:
6792    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6793    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6794    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6795    // to encoding T1 if <Rd> is omitted."
6796    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6797      Inst.setOpcode(ARM::tADDi3);
6798      return true;
6799    }
6800    break;
6801  case ARM::tSUBi8:
6802    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6803    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6804    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6805    // to encoding T1 if <Rd> is omitted."
6806    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6807      Inst.setOpcode(ARM::tSUBi3);
6808      return true;
6809    }
6810    break;
6811  case ARM::t2ADDrr: {
6812    // If the destination and first source operand are the same, and
6813    // there's no setting of the flags, use encoding T2 instead of T3.
6814    // Note that this is only for ADD, not SUB. This mirrors the system
6815    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6816    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6817        Inst.getOperand(5).getReg() != 0 ||
6818        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6819         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6820      break;
6821    MCInst TmpInst;
6822    TmpInst.setOpcode(ARM::tADDhirr);
6823    TmpInst.addOperand(Inst.getOperand(0));
6824    TmpInst.addOperand(Inst.getOperand(0));
6825    TmpInst.addOperand(Inst.getOperand(2));
6826    TmpInst.addOperand(Inst.getOperand(3));
6827    TmpInst.addOperand(Inst.getOperand(4));
6828    Inst = TmpInst;
6829    return true;
6830  }
6831  case ARM::tB:
6832    // A Thumb conditional branch outside of an IT block is a tBcc.
6833    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6834      Inst.setOpcode(ARM::tBcc);
6835      return true;
6836    }
6837    break;
6838  case ARM::t2B:
6839    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6840    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6841      Inst.setOpcode(ARM::t2Bcc);
6842      return true;
6843    }
6844    break;
6845  case ARM::t2Bcc:
6846    // If the conditional is AL or we're in an IT block, we really want t2B.
6847    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6848      Inst.setOpcode(ARM::t2B);
6849      return true;
6850    }
6851    break;
6852  case ARM::tBcc:
6853    // If the conditional is AL, we really want tB.
6854    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6855      Inst.setOpcode(ARM::tB);
6856      return true;
6857    }
6858    break;
6859  case ARM::tLDMIA: {
6860    // If the register list contains any high registers, or if the writeback
6861    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6862    // instead if we're in Thumb2. Otherwise, this should have generated
6863    // an error in validateInstruction().
6864    unsigned Rn = Inst.getOperand(0).getReg();
6865    bool hasWritebackToken =
6866      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6867       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6868    bool listContainsBase;
6869    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6870        (!listContainsBase && !hasWritebackToken) ||
6871        (listContainsBase && hasWritebackToken)) {
6872      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6873      assert (isThumbTwo());
6874      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6875      // If we're switching to the updating version, we need to insert
6876      // the writeback tied operand.
6877      if (hasWritebackToken)
6878        Inst.insert(Inst.begin(),
6879                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6880      return true;
6881    }
6882    break;
6883  }
6884  case ARM::tSTMIA_UPD: {
6885    // If the register list contains any high registers, we need to use
6886    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6887    // should have generated an error in validateInstruction().
6888    unsigned Rn = Inst.getOperand(0).getReg();
6889    bool listContainsBase;
6890    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6891      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6892      assert (isThumbTwo());
6893      Inst.setOpcode(ARM::t2STMIA_UPD);
6894      return true;
6895    }
6896    break;
6897  }
6898  case ARM::tPOP: {
6899    bool listContainsBase;
6900    // If the register list contains any high registers, we need to use
6901    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6902    // should have generated an error in validateInstruction().
6903    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6904      return false;
6905    assert (isThumbTwo());
6906    Inst.setOpcode(ARM::t2LDMIA_UPD);
6907    // Add the base register and writeback operands.
6908    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6909    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6910    return true;
6911  }
6912  case ARM::tPUSH: {
6913    bool listContainsBase;
6914    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6915      return false;
6916    assert (isThumbTwo());
6917    Inst.setOpcode(ARM::t2STMDB_UPD);
6918    // Add the base register and writeback operands.
6919    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6920    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6921    return true;
6922  }
6923  case ARM::t2MOVi: {
6924    // If we can use the 16-bit encoding and the user didn't explicitly
6925    // request the 32-bit variant, transform it here.
6926    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6927        Inst.getOperand(1).getImm() <= 255 &&
6928        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6929         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6930        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6931        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6932         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6933      // The operands aren't in the same order for tMOVi8...
6934      MCInst TmpInst;
6935      TmpInst.setOpcode(ARM::tMOVi8);
6936      TmpInst.addOperand(Inst.getOperand(0));
6937      TmpInst.addOperand(Inst.getOperand(4));
6938      TmpInst.addOperand(Inst.getOperand(1));
6939      TmpInst.addOperand(Inst.getOperand(2));
6940      TmpInst.addOperand(Inst.getOperand(3));
6941      Inst = TmpInst;
6942      return true;
6943    }
6944    break;
6945  }
6946  case ARM::t2MOVr: {
6947    // If we can use the 16-bit encoding and the user didn't explicitly
6948    // request the 32-bit variant, transform it here.
6949    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6950        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6951        Inst.getOperand(2).getImm() == ARMCC::AL &&
6952        Inst.getOperand(4).getReg() == ARM::CPSR &&
6953        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6954         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6955      // The operands aren't the same for tMOV[S]r... (no cc_out)
6956      MCInst TmpInst;
6957      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
6958      TmpInst.addOperand(Inst.getOperand(0));
6959      TmpInst.addOperand(Inst.getOperand(1));
6960      TmpInst.addOperand(Inst.getOperand(2));
6961      TmpInst.addOperand(Inst.getOperand(3));
6962      Inst = TmpInst;
6963      return true;
6964    }
6965    break;
6966  }
6967  case ARM::t2SXTH:
6968  case ARM::t2SXTB:
6969  case ARM::t2UXTH:
6970  case ARM::t2UXTB: {
6971    // If we can use the 16-bit encoding and the user didn't explicitly
6972    // request the 32-bit variant, transform it here.
6973    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6974        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6975        Inst.getOperand(2).getImm() == 0 &&
6976        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6977         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6978      unsigned NewOpc;
6979      switch (Inst.getOpcode()) {
6980      default: llvm_unreachable("Illegal opcode!");
6981      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
6982      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
6983      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
6984      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
6985      }
6986      // The operands aren't the same for thumb1 (no rotate operand).
6987      MCInst TmpInst;
6988      TmpInst.setOpcode(NewOpc);
6989      TmpInst.addOperand(Inst.getOperand(0));
6990      TmpInst.addOperand(Inst.getOperand(1));
6991      TmpInst.addOperand(Inst.getOperand(3));
6992      TmpInst.addOperand(Inst.getOperand(4));
6993      Inst = TmpInst;
6994      return true;
6995    }
6996    break;
6997  }
6998  case ARM::MOVsi: {
6999    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7000    if (SOpc == ARM_AM::rrx) return false;
7001    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7002      // Shifting by zero is accepted as a vanilla 'MOVr'
7003      MCInst TmpInst;
7004      TmpInst.setOpcode(ARM::MOVr);
7005      TmpInst.addOperand(Inst.getOperand(0));
7006      TmpInst.addOperand(Inst.getOperand(1));
7007      TmpInst.addOperand(Inst.getOperand(3));
7008      TmpInst.addOperand(Inst.getOperand(4));
7009      TmpInst.addOperand(Inst.getOperand(5));
7010      Inst = TmpInst;
7011      return true;
7012    }
7013    return false;
7014  }
7015  case ARM::ANDrsi:
7016  case ARM::ORRrsi:
7017  case ARM::EORrsi:
7018  case ARM::BICrsi:
7019  case ARM::SUBrsi:
7020  case ARM::ADDrsi: {
7021    unsigned newOpc;
7022    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7023    if (SOpc == ARM_AM::rrx) return false;
7024    switch (Inst.getOpcode()) {
7025    default: llvm_unreachable("unexpected opcode!");
7026    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7027    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7028    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7029    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7030    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7031    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7032    }
7033    // If the shift is by zero, use the non-shifted instruction definition.
7034    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
7035      MCInst TmpInst;
7036      TmpInst.setOpcode(newOpc);
7037      TmpInst.addOperand(Inst.getOperand(0));
7038      TmpInst.addOperand(Inst.getOperand(1));
7039      TmpInst.addOperand(Inst.getOperand(2));
7040      TmpInst.addOperand(Inst.getOperand(4));
7041      TmpInst.addOperand(Inst.getOperand(5));
7042      TmpInst.addOperand(Inst.getOperand(6));
7043      Inst = TmpInst;
7044      return true;
7045    }
7046    return false;
7047  }
7048  case ARM::ITasm:
7049  case ARM::t2IT: {
7050    // The mask bits for all but the first condition are represented as
7051    // the low bit of the condition code value implies 't'. We currently
7052    // always have 1 implies 't', so XOR toggle the bits if the low bit
7053    // of the condition code is zero. The encoding also expects the low
7054    // bit of the condition to be encoded as bit 4 of the mask operand,
7055    // so mask that in if needed
7056    MCOperand &MO = Inst.getOperand(1);
7057    unsigned Mask = MO.getImm();
7058    unsigned OrigMask = Mask;
7059    unsigned TZ = CountTrailingZeros_32(Mask);
7060    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7061      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7062      for (unsigned i = 3; i != TZ; --i)
7063        Mask ^= 1 << i;
7064    } else
7065      Mask |= 0x10;
7066    MO.setImm(Mask);
7067
7068    // Set up the IT block state according to the IT instruction we just
7069    // matched.
7070    assert(!inITBlock() && "nested IT blocks?!");
7071    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7072    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7073    ITState.CurPosition = 0;
7074    ITState.FirstCond = true;
7075    break;
7076  }
7077  }
7078  return false;
7079}
7080
7081unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7082  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7083  // suffix depending on whether they're in an IT block or not.
7084  unsigned Opc = Inst.getOpcode();
7085  const MCInstrDesc &MCID = getInstDesc(Opc);
7086  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7087    assert(MCID.hasOptionalDef() &&
7088           "optionally flag setting instruction missing optional def operand");
7089    assert(MCID.NumOperands == Inst.getNumOperands() &&
7090           "operand count mismatch!");
7091    // Find the optional-def operand (cc_out).
7092    unsigned OpNo;
7093    for (OpNo = 0;
7094         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7095         ++OpNo)
7096      ;
7097    // If we're parsing Thumb1, reject it completely.
7098    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7099      return Match_MnemonicFail;
7100    // If we're parsing Thumb2, which form is legal depends on whether we're
7101    // in an IT block.
7102    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7103        !inITBlock())
7104      return Match_RequiresITBlock;
7105    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7106        inITBlock())
7107      return Match_RequiresNotITBlock;
7108  }
7109  // Some high-register supporting Thumb1 encodings only allow both registers
7110  // to be from r0-r7 when in Thumb2.
7111  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7112           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7113           isARMLowRegister(Inst.getOperand(2).getReg()))
7114    return Match_RequiresThumb2;
7115  // Others only require ARMv6 or later.
7116  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7117           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7118           isARMLowRegister(Inst.getOperand(1).getReg()))
7119    return Match_RequiresV6;
7120  return Match_Success;
7121}
7122
7123bool ARMAsmParser::
7124MatchAndEmitInstruction(SMLoc IDLoc,
7125                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7126                        MCStreamer &Out) {
7127  MCInst Inst;
7128  unsigned ErrorInfo;
7129  unsigned MatchResult;
7130  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
7131  switch (MatchResult) {
7132  default: break;
7133  case Match_Success:
7134    // Context sensitive operand constraints aren't handled by the matcher,
7135    // so check them here.
7136    if (validateInstruction(Inst, Operands)) {
7137      // Still progress the IT block, otherwise one wrong condition causes
7138      // nasty cascading errors.
7139      forwardITPosition();
7140      return true;
7141    }
7142
7143    // Some instructions need post-processing to, for example, tweak which
7144    // encoding is selected. Loop on it while changes happen so the
7145    // individual transformations can chain off each other. E.g.,
7146    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7147    while (processInstruction(Inst, Operands))
7148      ;
7149
7150    // Only move forward at the very end so that everything in validate
7151    // and process gets a consistent answer about whether we're in an IT
7152    // block.
7153    forwardITPosition();
7154
7155    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7156    // doesn't actually encode.
7157    if (Inst.getOpcode() == ARM::ITasm)
7158      return false;
7159
7160    Inst.setLoc(IDLoc);
7161    Out.EmitInstruction(Inst);
7162    return false;
7163  case Match_MissingFeature:
7164    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
7165    return true;
7166  case Match_InvalidOperand: {
7167    SMLoc ErrorLoc = IDLoc;
7168    if (ErrorInfo != ~0U) {
7169      if (ErrorInfo >= Operands.size())
7170        return Error(IDLoc, "too few operands for instruction");
7171
7172      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7173      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7174    }
7175
7176    return Error(ErrorLoc, "invalid operand for instruction");
7177  }
7178  case Match_MnemonicFail:
7179    return Error(IDLoc, "invalid instruction");
7180  case Match_ConversionFail:
7181    // The converter function will have already emited a diagnostic.
7182    return true;
7183  case Match_RequiresNotITBlock:
7184    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7185  case Match_RequiresITBlock:
7186    return Error(IDLoc, "instruction only valid inside IT block");
7187  case Match_RequiresV6:
7188    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7189  case Match_RequiresThumb2:
7190    return Error(IDLoc, "instruction variant requires Thumb2");
7191  }
7192
7193  llvm_unreachable("Implement any new match types added!");
7194}
7195
7196/// parseDirective parses the arm specific directives
7197bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7198  StringRef IDVal = DirectiveID.getIdentifier();
7199  if (IDVal == ".word")
7200    return parseDirectiveWord(4, DirectiveID.getLoc());
7201  else if (IDVal == ".thumb")
7202    return parseDirectiveThumb(DirectiveID.getLoc());
7203  else if (IDVal == ".arm")
7204    return parseDirectiveARM(DirectiveID.getLoc());
7205  else if (IDVal == ".thumb_func")
7206    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7207  else if (IDVal == ".code")
7208    return parseDirectiveCode(DirectiveID.getLoc());
7209  else if (IDVal == ".syntax")
7210    return parseDirectiveSyntax(DirectiveID.getLoc());
7211  else if (IDVal == ".unreq")
7212    return parseDirectiveUnreq(DirectiveID.getLoc());
7213  else if (IDVal == ".arch")
7214    return parseDirectiveArch(DirectiveID.getLoc());
7215  else if (IDVal == ".eabi_attribute")
7216    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7217  return true;
7218}
7219
7220/// parseDirectiveWord
7221///  ::= .word [ expression (, expression)* ]
7222bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7223  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7224    for (;;) {
7225      const MCExpr *Value;
7226      if (getParser().ParseExpression(Value))
7227        return true;
7228
7229      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
7230
7231      if (getLexer().is(AsmToken::EndOfStatement))
7232        break;
7233
7234      // FIXME: Improve diagnostic.
7235      if (getLexer().isNot(AsmToken::Comma))
7236        return Error(L, "unexpected token in directive");
7237      Parser.Lex();
7238    }
7239  }
7240
7241  Parser.Lex();
7242  return false;
7243}
7244
7245/// parseDirectiveThumb
7246///  ::= .thumb
7247bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7248  if (getLexer().isNot(AsmToken::EndOfStatement))
7249    return Error(L, "unexpected token in directive");
7250  Parser.Lex();
7251
7252  if (!isThumb())
7253    SwitchMode();
7254  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7255  return false;
7256}
7257
7258/// parseDirectiveARM
7259///  ::= .arm
7260bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7261  if (getLexer().isNot(AsmToken::EndOfStatement))
7262    return Error(L, "unexpected token in directive");
7263  Parser.Lex();
7264
7265  if (isThumb())
7266    SwitchMode();
7267  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7268  return false;
7269}
7270
7271/// parseDirectiveThumbFunc
7272///  ::= .thumbfunc symbol_name
7273bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7274  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7275  bool isMachO = MAI.hasSubsectionsViaSymbols();
7276  StringRef Name;
7277  bool needFuncName = true;
7278
7279  // Darwin asm has (optionally) function name after .thumb_func direction
7280  // ELF doesn't
7281  if (isMachO) {
7282    const AsmToken &Tok = Parser.getTok();
7283    if (Tok.isNot(AsmToken::EndOfStatement)) {
7284      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7285        return Error(L, "unexpected token in .thumb_func directive");
7286      Name = Tok.getIdentifier();
7287      Parser.Lex(); // Consume the identifier token.
7288      needFuncName = false;
7289    }
7290  }
7291
7292  if (getLexer().isNot(AsmToken::EndOfStatement))
7293    return Error(L, "unexpected token in directive");
7294
7295  // Eat the end of statement and any blank lines that follow.
7296  while (getLexer().is(AsmToken::EndOfStatement))
7297    Parser.Lex();
7298
7299  // FIXME: assuming function name will be the line following .thumb_func
7300  // We really should be checking the next symbol definition even if there's
7301  // stuff in between.
7302  if (needFuncName) {
7303    Name = Parser.getTok().getIdentifier();
7304  }
7305
7306  // Mark symbol as a thumb symbol.
7307  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7308  getParser().getStreamer().EmitThumbFunc(Func);
7309  return false;
7310}
7311
7312/// parseDirectiveSyntax
7313///  ::= .syntax unified | divided
7314bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7315  const AsmToken &Tok = Parser.getTok();
7316  if (Tok.isNot(AsmToken::Identifier))
7317    return Error(L, "unexpected token in .syntax directive");
7318  StringRef Mode = Tok.getString();
7319  if (Mode == "unified" || Mode == "UNIFIED")
7320    Parser.Lex();
7321  else if (Mode == "divided" || Mode == "DIVIDED")
7322    return Error(L, "'.syntax divided' arm asssembly not supported");
7323  else
7324    return Error(L, "unrecognized syntax mode in .syntax directive");
7325
7326  if (getLexer().isNot(AsmToken::EndOfStatement))
7327    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7328  Parser.Lex();
7329
7330  // TODO tell the MC streamer the mode
7331  // getParser().getStreamer().Emit???();
7332  return false;
7333}
7334
7335/// parseDirectiveCode
7336///  ::= .code 16 | 32
7337bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7338  const AsmToken &Tok = Parser.getTok();
7339  if (Tok.isNot(AsmToken::Integer))
7340    return Error(L, "unexpected token in .code directive");
7341  int64_t Val = Parser.getTok().getIntVal();
7342  if (Val == 16)
7343    Parser.Lex();
7344  else if (Val == 32)
7345    Parser.Lex();
7346  else
7347    return Error(L, "invalid operand to .code directive");
7348
7349  if (getLexer().isNot(AsmToken::EndOfStatement))
7350    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7351  Parser.Lex();
7352
7353  if (Val == 16) {
7354    if (!isThumb())
7355      SwitchMode();
7356    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7357  } else {
7358    if (isThumb())
7359      SwitchMode();
7360    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7361  }
7362
7363  return false;
7364}
7365
7366/// parseDirectiveReq
7367///  ::= name .req registername
7368bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7369  Parser.Lex(); // Eat the '.req' token.
7370  unsigned Reg;
7371  SMLoc SRegLoc, ERegLoc;
7372  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7373    Parser.EatToEndOfStatement();
7374    return Error(SRegLoc, "register name expected");
7375  }
7376
7377  // Shouldn't be anything else.
7378  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7379    Parser.EatToEndOfStatement();
7380    return Error(Parser.getTok().getLoc(),
7381                 "unexpected input in .req directive.");
7382  }
7383
7384  Parser.Lex(); // Consume the EndOfStatement
7385
7386  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7387    return Error(SRegLoc, "redefinition of '" + Name +
7388                          "' does not match original.");
7389
7390  return false;
7391}
7392
7393/// parseDirectiveUneq
7394///  ::= .unreq registername
7395bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7396  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7397    Parser.EatToEndOfStatement();
7398    return Error(L, "unexpected input in .unreq directive.");
7399  }
7400  RegisterReqs.erase(Parser.getTok().getIdentifier());
7401  Parser.Lex(); // Eat the identifier.
7402  return false;
7403}
7404
7405/// parseDirectiveArch
7406///  ::= .arch token
7407bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7408  return true;
7409}
7410
7411/// parseDirectiveEabiAttr
7412///  ::= .eabi_attribute int, int
7413bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7414  return true;
7415}
7416
7417extern "C" void LLVMInitializeARMAsmLexer();
7418
7419/// Force static initialization.
7420extern "C" void LLVMInitializeARMAsmParser() {
7421  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7422  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7423  LLVMInitializeARMAsmLexer();
7424}
7425
7426#define GET_REGISTER_MATCHER
7427#define GET_MATCHER_IMPLEMENTATION
7428#include "ARMGenAsmMatcher.inc"
7429