ARMAsmParser.cpp revision ee5e24cb3e987c74d4dce146b4f78e83fb2b56a8
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "llvm/MC/MCTargetAsmParser.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMBaseInfo.h"
13#include "MCTargetDesc/ARMMCExpr.h"
14#include "llvm/ADT/BitVector.h"
15#include "llvm/ADT/OwningPtr.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/StringSwitch.h"
19#include "llvm/ADT/Twine.h"
20#include "llvm/MC/MCAsmInfo.h"
21#include "llvm/MC/MCAssembler.h"
22#include "llvm/MC/MCContext.h"
23#include "llvm/MC/MCELFStreamer.h"
24#include "llvm/MC/MCExpr.h"
25#include "llvm/MC/MCInst.h"
26#include "llvm/MC/MCInstrDesc.h"
27#include "llvm/MC/MCParser/MCAsmLexer.h"
28#include "llvm/MC/MCParser/MCAsmParser.h"
29#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
30#include "llvm/MC/MCRegisterInfo.h"
31#include "llvm/MC/MCStreamer.h"
32#include "llvm/MC/MCSubtargetInfo.h"
33#include "llvm/Support/ELF.h"
34#include "llvm/Support/MathExtras.h"
35#include "llvm/Support/SourceMgr.h"
36#include "llvm/Support/TargetRegistry.h"
37#include "llvm/Support/raw_ostream.h"
38
39using namespace llvm;
40
41namespace {
42
43class ARMOperand;
44
45enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
46
47class ARMAsmParser : public MCTargetAsmParser {
48  MCSubtargetInfo &STI;
49  MCAsmParser &Parser;
50  const MCRegisterInfo *MRI;
51
52  // Unwind directives state
53  SMLoc FnStartLoc;
54  SMLoc CantUnwindLoc;
55  SMLoc PersonalityLoc;
56  SMLoc HandlerDataLoc;
57  int FPReg;
58  void resetUnwindDirectiveParserState() {
59    FnStartLoc = SMLoc();
60    CantUnwindLoc = SMLoc();
61    PersonalityLoc = SMLoc();
62    HandlerDataLoc = SMLoc();
63    FPReg = -1;
64  }
65
66  // Map of register aliases registers via the .req directive.
67  StringMap<unsigned> RegisterReqs;
68
69  struct {
70    ARMCC::CondCodes Cond;    // Condition for IT block.
71    unsigned Mask:4;          // Condition mask for instructions.
72                              // Starting at first 1 (from lsb).
73                              //   '1'  condition as indicated in IT.
74                              //   '0'  inverse of condition (else).
75                              // Count of instructions in IT block is
76                              // 4 - trailingzeroes(mask)
77
78    bool FirstCond;           // Explicit flag for when we're parsing the
79                              // First instruction in the IT block. It's
80                              // implied in the mask, so needs special
81                              // handling.
82
83    unsigned CurPosition;     // Current position in parsing of IT
84                              // block. In range [0,3]. Initialized
85                              // according to count of instructions in block.
86                              // ~0U if no active IT block.
87  } ITState;
88  bool inITBlock() { return ITState.CurPosition != ~0U;}
89  void forwardITPosition() {
90    if (!inITBlock()) return;
91    // Move to the next instruction in the IT block, if there is one. If not,
92    // mark the block as done.
93    unsigned TZ = countTrailingZeros(ITState.Mask);
94    if (++ITState.CurPosition == 5 - TZ)
95      ITState.CurPosition = ~0U; // Done with the IT block after this.
96  }
97
98
99  MCAsmParser &getParser() const { return Parser; }
100  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
101
102  bool Warning(SMLoc L, const Twine &Msg,
103               ArrayRef<SMRange> Ranges = None) {
104    return Parser.Warning(L, Msg, Ranges);
105  }
106  bool Error(SMLoc L, const Twine &Msg,
107             ArrayRef<SMRange> Ranges = None) {
108    return Parser.Error(L, Msg, Ranges);
109  }
110
111  int tryParseRegister();
112  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
113  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
114  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
115  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
116  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
117  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
118  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
119                              unsigned &ShiftAmount);
120  bool parseDirectiveWord(unsigned Size, SMLoc L);
121  bool parseDirectiveThumb(SMLoc L);
122  bool parseDirectiveARM(SMLoc L);
123  bool parseDirectiveThumbFunc(SMLoc L);
124  bool parseDirectiveCode(SMLoc L);
125  bool parseDirectiveSyntax(SMLoc L);
126  bool parseDirectiveReq(StringRef Name, SMLoc L);
127  bool parseDirectiveUnreq(SMLoc L);
128  bool parseDirectiveArch(SMLoc L);
129  bool parseDirectiveEabiAttr(SMLoc L);
130  bool parseDirectiveFnStart(SMLoc L);
131  bool parseDirectiveFnEnd(SMLoc L);
132  bool parseDirectiveCantUnwind(SMLoc L);
133  bool parseDirectivePersonality(SMLoc L);
134  bool parseDirectiveHandlerData(SMLoc L);
135  bool parseDirectiveSetFP(SMLoc L);
136  bool parseDirectivePad(SMLoc L);
137  bool parseDirectiveRegSave(SMLoc L, bool IsVector);
138
139  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
140                          bool &CarrySetting, unsigned &ProcessorIMod,
141                          StringRef &ITMask);
142  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
143                             bool &CanAcceptPredicationCode);
144
145  bool isThumb() const {
146    // FIXME: Can tablegen auto-generate this?
147    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
148  }
149  bool isThumbOne() const {
150    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
151  }
152  bool isThumbTwo() const {
153    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
154  }
155  bool hasV6Ops() const {
156    return STI.getFeatureBits() & ARM::HasV6Ops;
157  }
158  bool hasV7Ops() const {
159    return STI.getFeatureBits() & ARM::HasV7Ops;
160  }
161  void SwitchMode() {
162    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
163    setAvailableFeatures(FB);
164  }
165  bool isMClass() const {
166    return STI.getFeatureBits() & ARM::FeatureMClass;
167  }
168
169  /// @name Auto-generated Match Functions
170  /// {
171
172#define GET_ASSEMBLER_HEADER
173#include "ARMGenAsmMatcher.inc"
174
175  /// }
176
177  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
178  OperandMatchResultTy parseCoprocNumOperand(
179    SmallVectorImpl<MCParsedAsmOperand*>&);
180  OperandMatchResultTy parseCoprocRegOperand(
181    SmallVectorImpl<MCParsedAsmOperand*>&);
182  OperandMatchResultTy parseCoprocOptionOperand(
183    SmallVectorImpl<MCParsedAsmOperand*>&);
184  OperandMatchResultTy parseMemBarrierOptOperand(
185    SmallVectorImpl<MCParsedAsmOperand*>&);
186  OperandMatchResultTy parseProcIFlagsOperand(
187    SmallVectorImpl<MCParsedAsmOperand*>&);
188  OperandMatchResultTy parseMSRMaskOperand(
189    SmallVectorImpl<MCParsedAsmOperand*>&);
190  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
191                                   StringRef Op, int Low, int High);
192  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
193    return parsePKHImm(O, "lsl", 0, 31);
194  }
195  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
196    return parsePKHImm(O, "asr", 1, 32);
197  }
198  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
199  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
200  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
201  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
202  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
203  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
204  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
205  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
206  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
207                                       SMLoc &EndLoc);
208
209  // Asm Match Converter Methods
210  void cvtT2LdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
211  void cvtT2StrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
212  void cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
213                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
214  void cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
215                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
216  void cvtLdWriteBackRegAddrMode2(MCInst &Inst,
217                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
218  void cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
219                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
220  void cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
221                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
222  void cvtStWriteBackRegAddrMode2(MCInst &Inst,
223                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
224  void cvtStWriteBackRegAddrMode3(MCInst &Inst,
225                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
226  void cvtLdExtTWriteBackImm(MCInst &Inst,
227                             const SmallVectorImpl<MCParsedAsmOperand*> &);
228  void cvtLdExtTWriteBackReg(MCInst &Inst,
229                             const SmallVectorImpl<MCParsedAsmOperand*> &);
230  void cvtStExtTWriteBackImm(MCInst &Inst,
231                             const SmallVectorImpl<MCParsedAsmOperand*> &);
232  void cvtStExtTWriteBackReg(MCInst &Inst,
233                             const SmallVectorImpl<MCParsedAsmOperand*> &);
234  void cvtLdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
235  void cvtStrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
236  void cvtLdWriteBackRegAddrMode3(MCInst &Inst,
237                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
238  void cvtThumbMultiply(MCInst &Inst,
239                        const SmallVectorImpl<MCParsedAsmOperand*> &);
240  void cvtVLDwbFixed(MCInst &Inst,
241                     const SmallVectorImpl<MCParsedAsmOperand*> &);
242  void cvtVLDwbRegister(MCInst &Inst,
243                        const SmallVectorImpl<MCParsedAsmOperand*> &);
244  void cvtVSTwbFixed(MCInst &Inst,
245                     const SmallVectorImpl<MCParsedAsmOperand*> &);
246  void cvtVSTwbRegister(MCInst &Inst,
247                        const SmallVectorImpl<MCParsedAsmOperand*> &);
248  bool validateInstruction(MCInst &Inst,
249                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
250  bool processInstruction(MCInst &Inst,
251                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
252  bool shouldOmitCCOutOperand(StringRef Mnemonic,
253                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
254
255public:
256  enum ARMMatchResultTy {
257    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
258    Match_RequiresNotITBlock,
259    Match_RequiresV6,
260    Match_RequiresThumb2,
261#define GET_OPERAND_DIAGNOSTIC_TYPES
262#include "ARMGenAsmMatcher.inc"
263
264  };
265
266  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
267    : MCTargetAsmParser(), STI(_STI), Parser(_Parser), FPReg(-1) {
268    MCAsmParserExtension::Initialize(_Parser);
269
270    // Cache the MCRegisterInfo.
271    MRI = &getContext().getRegisterInfo();
272
273    // Initialize the set of available features.
274    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
275
276    // Not in an ITBlock to start with.
277    ITState.CurPosition = ~0U;
278
279    // Set ELF header flags.
280    // FIXME: This should eventually end up somewhere else where more
281    // intelligent flag decisions can be made. For now we are just maintaining
282    // the statu/parseDirects quo for ARM and setting EF_ARM_EABI_VER5 as the default.
283    if (MCELFStreamer *MES = dyn_cast<MCELFStreamer>(&Parser.getStreamer()))
284      MES->getAssembler().setELFHeaderEFlags(ELF::EF_ARM_EABI_VER5);
285  }
286
287  // Implementation of the MCTargetAsmParser interface:
288  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
289  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
290                        SMLoc NameLoc,
291                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
292  bool ParseDirective(AsmToken DirectiveID);
293
294  unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind);
295  unsigned checkTargetMatchPredicate(MCInst &Inst);
296
297  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
298                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
299                               MCStreamer &Out, unsigned &ErrorInfo,
300                               bool MatchingInlineAsm);
301};
302} // end anonymous namespace
303
304namespace {
305
306/// ARMOperand - Instances of this class represent a parsed ARM machine
307/// operand.
308class ARMOperand : public MCParsedAsmOperand {
309  enum KindTy {
310    k_CondCode,
311    k_CCOut,
312    k_ITCondMask,
313    k_CoprocNum,
314    k_CoprocReg,
315    k_CoprocOption,
316    k_Immediate,
317    k_MemBarrierOpt,
318    k_Memory,
319    k_PostIndexRegister,
320    k_MSRMask,
321    k_ProcIFlags,
322    k_VectorIndex,
323    k_Register,
324    k_RegisterList,
325    k_DPRRegisterList,
326    k_SPRRegisterList,
327    k_VectorList,
328    k_VectorListAllLanes,
329    k_VectorListIndexed,
330    k_ShiftedRegister,
331    k_ShiftedImmediate,
332    k_ShifterImmediate,
333    k_RotateImmediate,
334    k_BitfieldDescriptor,
335    k_Token
336  } Kind;
337
338  SMLoc StartLoc, EndLoc;
339  SmallVector<unsigned, 8> Registers;
340
341  struct CCOp {
342    ARMCC::CondCodes Val;
343  };
344
345  struct CopOp {
346    unsigned Val;
347  };
348
349  struct CoprocOptionOp {
350    unsigned Val;
351  };
352
353  struct ITMaskOp {
354    unsigned Mask:4;
355  };
356
357  struct MBOptOp {
358    ARM_MB::MemBOpt Val;
359  };
360
361  struct IFlagsOp {
362    ARM_PROC::IFlags Val;
363  };
364
365  struct MMaskOp {
366    unsigned Val;
367  };
368
369  struct TokOp {
370    const char *Data;
371    unsigned Length;
372  };
373
374  struct RegOp {
375    unsigned RegNum;
376  };
377
378  // A vector register list is a sequential list of 1 to 4 registers.
379  struct VectorListOp {
380    unsigned RegNum;
381    unsigned Count;
382    unsigned LaneIndex;
383    bool isDoubleSpaced;
384  };
385
386  struct VectorIndexOp {
387    unsigned Val;
388  };
389
390  struct ImmOp {
391    const MCExpr *Val;
392  };
393
394  /// Combined record for all forms of ARM address expressions.
395  struct MemoryOp {
396    unsigned BaseRegNum;
397    // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
398    // was specified.
399    const MCConstantExpr *OffsetImm;  // Offset immediate value
400    unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
401    ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
402    unsigned ShiftImm;        // shift for OffsetReg.
403    unsigned Alignment;       // 0 = no alignment specified
404    // n = alignment in bytes (2, 4, 8, 16, or 32)
405    unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
406  };
407
408  struct PostIdxRegOp {
409    unsigned RegNum;
410    bool isAdd;
411    ARM_AM::ShiftOpc ShiftTy;
412    unsigned ShiftImm;
413  };
414
415  struct ShifterImmOp {
416    bool isASR;
417    unsigned Imm;
418  };
419
420  struct RegShiftedRegOp {
421    ARM_AM::ShiftOpc ShiftTy;
422    unsigned SrcReg;
423    unsigned ShiftReg;
424    unsigned ShiftImm;
425  };
426
427  struct RegShiftedImmOp {
428    ARM_AM::ShiftOpc ShiftTy;
429    unsigned SrcReg;
430    unsigned ShiftImm;
431  };
432
433  struct RotImmOp {
434    unsigned Imm;
435  };
436
437  struct BitfieldOp {
438    unsigned LSB;
439    unsigned Width;
440  };
441
442  union {
443    struct CCOp CC;
444    struct CopOp Cop;
445    struct CoprocOptionOp CoprocOption;
446    struct MBOptOp MBOpt;
447    struct ITMaskOp ITMask;
448    struct IFlagsOp IFlags;
449    struct MMaskOp MMask;
450    struct TokOp Tok;
451    struct RegOp Reg;
452    struct VectorListOp VectorList;
453    struct VectorIndexOp VectorIndex;
454    struct ImmOp Imm;
455    struct MemoryOp Memory;
456    struct PostIdxRegOp PostIdxReg;
457    struct ShifterImmOp ShifterImm;
458    struct RegShiftedRegOp RegShiftedReg;
459    struct RegShiftedImmOp RegShiftedImm;
460    struct RotImmOp RotImm;
461    struct BitfieldOp Bitfield;
462  };
463
464  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
465public:
466  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
467    Kind = o.Kind;
468    StartLoc = o.StartLoc;
469    EndLoc = o.EndLoc;
470    switch (Kind) {
471    case k_CondCode:
472      CC = o.CC;
473      break;
474    case k_ITCondMask:
475      ITMask = o.ITMask;
476      break;
477    case k_Token:
478      Tok = o.Tok;
479      break;
480    case k_CCOut:
481    case k_Register:
482      Reg = o.Reg;
483      break;
484    case k_RegisterList:
485    case k_DPRRegisterList:
486    case k_SPRRegisterList:
487      Registers = o.Registers;
488      break;
489    case k_VectorList:
490    case k_VectorListAllLanes:
491    case k_VectorListIndexed:
492      VectorList = o.VectorList;
493      break;
494    case k_CoprocNum:
495    case k_CoprocReg:
496      Cop = o.Cop;
497      break;
498    case k_CoprocOption:
499      CoprocOption = o.CoprocOption;
500      break;
501    case k_Immediate:
502      Imm = o.Imm;
503      break;
504    case k_MemBarrierOpt:
505      MBOpt = o.MBOpt;
506      break;
507    case k_Memory:
508      Memory = o.Memory;
509      break;
510    case k_PostIndexRegister:
511      PostIdxReg = o.PostIdxReg;
512      break;
513    case k_MSRMask:
514      MMask = o.MMask;
515      break;
516    case k_ProcIFlags:
517      IFlags = o.IFlags;
518      break;
519    case k_ShifterImmediate:
520      ShifterImm = o.ShifterImm;
521      break;
522    case k_ShiftedRegister:
523      RegShiftedReg = o.RegShiftedReg;
524      break;
525    case k_ShiftedImmediate:
526      RegShiftedImm = o.RegShiftedImm;
527      break;
528    case k_RotateImmediate:
529      RotImm = o.RotImm;
530      break;
531    case k_BitfieldDescriptor:
532      Bitfield = o.Bitfield;
533      break;
534    case k_VectorIndex:
535      VectorIndex = o.VectorIndex;
536      break;
537    }
538  }
539
540  /// getStartLoc - Get the location of the first token of this operand.
541  SMLoc getStartLoc() const { return StartLoc; }
542  /// getEndLoc - Get the location of the last token of this operand.
543  SMLoc getEndLoc() const { return EndLoc; }
544  /// getLocRange - Get the range between the first and last token of this
545  /// operand.
546  SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
547
548  ARMCC::CondCodes getCondCode() const {
549    assert(Kind == k_CondCode && "Invalid access!");
550    return CC.Val;
551  }
552
553  unsigned getCoproc() const {
554    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
555    return Cop.Val;
556  }
557
558  StringRef getToken() const {
559    assert(Kind == k_Token && "Invalid access!");
560    return StringRef(Tok.Data, Tok.Length);
561  }
562
563  unsigned getReg() const {
564    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
565    return Reg.RegNum;
566  }
567
568  const SmallVectorImpl<unsigned> &getRegList() const {
569    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
570            Kind == k_SPRRegisterList) && "Invalid access!");
571    return Registers;
572  }
573
574  const MCExpr *getImm() const {
575    assert(isImm() && "Invalid access!");
576    return Imm.Val;
577  }
578
579  unsigned getVectorIndex() const {
580    assert(Kind == k_VectorIndex && "Invalid access!");
581    return VectorIndex.Val;
582  }
583
584  ARM_MB::MemBOpt getMemBarrierOpt() const {
585    assert(Kind == k_MemBarrierOpt && "Invalid access!");
586    return MBOpt.Val;
587  }
588
589  ARM_PROC::IFlags getProcIFlags() const {
590    assert(Kind == k_ProcIFlags && "Invalid access!");
591    return IFlags.Val;
592  }
593
594  unsigned getMSRMask() const {
595    assert(Kind == k_MSRMask && "Invalid access!");
596    return MMask.Val;
597  }
598
599  bool isCoprocNum() const { return Kind == k_CoprocNum; }
600  bool isCoprocReg() const { return Kind == k_CoprocReg; }
601  bool isCoprocOption() const { return Kind == k_CoprocOption; }
602  bool isCondCode() const { return Kind == k_CondCode; }
603  bool isCCOut() const { return Kind == k_CCOut; }
604  bool isITMask() const { return Kind == k_ITCondMask; }
605  bool isITCondCode() const { return Kind == k_CondCode; }
606  bool isImm() const { return Kind == k_Immediate; }
607  bool isFPImm() const {
608    if (!isImm()) return false;
609    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
610    if (!CE) return false;
611    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
612    return Val != -1;
613  }
614  bool isFBits16() const {
615    if (!isImm()) return false;
616    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
617    if (!CE) return false;
618    int64_t Value = CE->getValue();
619    return Value >= 0 && Value <= 16;
620  }
621  bool isFBits32() const {
622    if (!isImm()) return false;
623    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
624    if (!CE) return false;
625    int64_t Value = CE->getValue();
626    return Value >= 1 && Value <= 32;
627  }
628  bool isImm8s4() const {
629    if (!isImm()) return false;
630    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
631    if (!CE) return false;
632    int64_t Value = CE->getValue();
633    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
634  }
635  bool isImm0_4() const {
636    if (!isImm()) return false;
637    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
638    if (!CE) return false;
639    int64_t Value = CE->getValue();
640    return Value >= 0 && Value < 5;
641  }
642  bool isImm0_1020s4() const {
643    if (!isImm()) return false;
644    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
645    if (!CE) return false;
646    int64_t Value = CE->getValue();
647    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
648  }
649  bool isImm0_508s4() const {
650    if (!isImm()) return false;
651    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
652    if (!CE) return false;
653    int64_t Value = CE->getValue();
654    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
655  }
656  bool isImm0_508s4Neg() const {
657    if (!isImm()) return false;
658    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
659    if (!CE) return false;
660    int64_t Value = -CE->getValue();
661    // explicitly exclude zero. we want that to use the normal 0_508 version.
662    return ((Value & 3) == 0) && Value > 0 && Value <= 508;
663  }
664  bool isImm0_255() const {
665    if (!isImm()) return false;
666    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
667    if (!CE) return false;
668    int64_t Value = CE->getValue();
669    return Value >= 0 && Value < 256;
670  }
671  bool isImm0_4095() const {
672    if (!isImm()) return false;
673    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
674    if (!CE) return false;
675    int64_t Value = CE->getValue();
676    return Value >= 0 && Value < 4096;
677  }
678  bool isImm0_4095Neg() const {
679    if (!isImm()) return false;
680    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
681    if (!CE) return false;
682    int64_t Value = -CE->getValue();
683    return Value > 0 && Value < 4096;
684  }
685  bool isImm0_1() const {
686    if (!isImm()) return false;
687    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
688    if (!CE) return false;
689    int64_t Value = CE->getValue();
690    return Value >= 0 && Value < 2;
691  }
692  bool isImm0_3() const {
693    if (!isImm()) return false;
694    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
695    if (!CE) return false;
696    int64_t Value = CE->getValue();
697    return Value >= 0 && Value < 4;
698  }
699  bool isImm0_7() const {
700    if (!isImm()) return false;
701    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
702    if (!CE) return false;
703    int64_t Value = CE->getValue();
704    return Value >= 0 && Value < 8;
705  }
706  bool isImm0_15() const {
707    if (!isImm()) return false;
708    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
709    if (!CE) return false;
710    int64_t Value = CE->getValue();
711    return Value >= 0 && Value < 16;
712  }
713  bool isImm0_31() const {
714    if (!isImm()) return false;
715    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
716    if (!CE) return false;
717    int64_t Value = CE->getValue();
718    return Value >= 0 && Value < 32;
719  }
720  bool isImm0_63() const {
721    if (!isImm()) return false;
722    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
723    if (!CE) return false;
724    int64_t Value = CE->getValue();
725    return Value >= 0 && Value < 64;
726  }
727  bool isImm8() const {
728    if (!isImm()) return false;
729    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
730    if (!CE) return false;
731    int64_t Value = CE->getValue();
732    return Value == 8;
733  }
734  bool isImm16() const {
735    if (!isImm()) return false;
736    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
737    if (!CE) return false;
738    int64_t Value = CE->getValue();
739    return Value == 16;
740  }
741  bool isImm32() const {
742    if (!isImm()) return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    if (!CE) return false;
745    int64_t Value = CE->getValue();
746    return Value == 32;
747  }
748  bool isShrImm8() const {
749    if (!isImm()) return false;
750    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
751    if (!CE) return false;
752    int64_t Value = CE->getValue();
753    return Value > 0 && Value <= 8;
754  }
755  bool isShrImm16() const {
756    if (!isImm()) return false;
757    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
758    if (!CE) return false;
759    int64_t Value = CE->getValue();
760    return Value > 0 && Value <= 16;
761  }
762  bool isShrImm32() const {
763    if (!isImm()) return false;
764    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
765    if (!CE) return false;
766    int64_t Value = CE->getValue();
767    return Value > 0 && Value <= 32;
768  }
769  bool isShrImm64() const {
770    if (!isImm()) return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return Value > 0 && Value <= 64;
775  }
776  bool isImm1_7() const {
777    if (!isImm()) return false;
778    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
779    if (!CE) return false;
780    int64_t Value = CE->getValue();
781    return Value > 0 && Value < 8;
782  }
783  bool isImm1_15() const {
784    if (!isImm()) return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return Value > 0 && Value < 16;
789  }
790  bool isImm1_31() const {
791    if (!isImm()) return false;
792    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793    if (!CE) return false;
794    int64_t Value = CE->getValue();
795    return Value > 0 && Value < 32;
796  }
797  bool isImm1_16() const {
798    if (!isImm()) return false;
799    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800    if (!CE) return false;
801    int64_t Value = CE->getValue();
802    return Value > 0 && Value < 17;
803  }
804  bool isImm1_32() const {
805    if (!isImm()) return false;
806    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
807    if (!CE) return false;
808    int64_t Value = CE->getValue();
809    return Value > 0 && Value < 33;
810  }
811  bool isImm0_32() const {
812    if (!isImm()) return false;
813    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
814    if (!CE) return false;
815    int64_t Value = CE->getValue();
816    return Value >= 0 && Value < 33;
817  }
818  bool isImm0_65535() const {
819    if (!isImm()) return false;
820    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
821    if (!CE) return false;
822    int64_t Value = CE->getValue();
823    return Value >= 0 && Value < 65536;
824  }
825  bool isImm0_65535Expr() const {
826    if (!isImm()) return false;
827    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
828    // If it's not a constant expression, it'll generate a fixup and be
829    // handled later.
830    if (!CE) return true;
831    int64_t Value = CE->getValue();
832    return Value >= 0 && Value < 65536;
833  }
834  bool isImm24bit() const {
835    if (!isImm()) return false;
836    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
837    if (!CE) return false;
838    int64_t Value = CE->getValue();
839    return Value >= 0 && Value <= 0xffffff;
840  }
841  bool isImmThumbSR() const {
842    if (!isImm()) return false;
843    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
844    if (!CE) return false;
845    int64_t Value = CE->getValue();
846    return Value > 0 && Value < 33;
847  }
848  bool isPKHLSLImm() const {
849    if (!isImm()) return false;
850    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
851    if (!CE) return false;
852    int64_t Value = CE->getValue();
853    return Value >= 0 && Value < 32;
854  }
855  bool isPKHASRImm() const {
856    if (!isImm()) return false;
857    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
858    if (!CE) return false;
859    int64_t Value = CE->getValue();
860    return Value > 0 && Value <= 32;
861  }
862  bool isAdrLabel() const {
863    // If we have an immediate that's not a constant, treat it as a label
864    // reference needing a fixup. If it is a constant, but it can't fit
865    // into shift immediate encoding, we reject it.
866    if (isImm() && !isa<MCConstantExpr>(getImm())) return true;
867    else return (isARMSOImm() || isARMSOImmNeg());
868  }
869  bool isARMSOImm() const {
870    if (!isImm()) return false;
871    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
872    if (!CE) return false;
873    int64_t Value = CE->getValue();
874    return ARM_AM::getSOImmVal(Value) != -1;
875  }
876  bool isARMSOImmNot() const {
877    if (!isImm()) return false;
878    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
879    if (!CE) return false;
880    int64_t Value = CE->getValue();
881    return ARM_AM::getSOImmVal(~Value) != -1;
882  }
883  bool isARMSOImmNeg() const {
884    if (!isImm()) return false;
885    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
886    if (!CE) return false;
887    int64_t Value = CE->getValue();
888    // Only use this when not representable as a plain so_imm.
889    return ARM_AM::getSOImmVal(Value) == -1 &&
890      ARM_AM::getSOImmVal(-Value) != -1;
891  }
892  bool isT2SOImm() const {
893    if (!isImm()) return false;
894    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
895    if (!CE) return false;
896    int64_t Value = CE->getValue();
897    return ARM_AM::getT2SOImmVal(Value) != -1;
898  }
899  bool isT2SOImmNot() const {
900    if (!isImm()) return false;
901    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
902    if (!CE) return false;
903    int64_t Value = CE->getValue();
904    return ARM_AM::getT2SOImmVal(~Value) != -1;
905  }
906  bool isT2SOImmNeg() const {
907    if (!isImm()) return false;
908    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
909    if (!CE) return false;
910    int64_t Value = CE->getValue();
911    // Only use this when not representable as a plain so_imm.
912    return ARM_AM::getT2SOImmVal(Value) == -1 &&
913      ARM_AM::getT2SOImmVal(-Value) != -1;
914  }
915  bool isSetEndImm() const {
916    if (!isImm()) return false;
917    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
918    if (!CE) return false;
919    int64_t Value = CE->getValue();
920    return Value == 1 || Value == 0;
921  }
922  bool isReg() const { return Kind == k_Register; }
923  bool isRegList() const { return Kind == k_RegisterList; }
924  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
925  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
926  bool isToken() const { return Kind == k_Token; }
927  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
928  bool isMem() const { return Kind == k_Memory; }
929  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
930  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
931  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
932  bool isRotImm() const { return Kind == k_RotateImmediate; }
933  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
934  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
935  bool isPostIdxReg() const {
936    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
937  }
938  bool isMemNoOffset(bool alignOK = false) const {
939    if (!isMem())
940      return false;
941    // No offset of any kind.
942    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
943     (alignOK || Memory.Alignment == 0);
944  }
945  bool isMemPCRelImm12() const {
946    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
947      return false;
948    // Base register must be PC.
949    if (Memory.BaseRegNum != ARM::PC)
950      return false;
951    // Immediate offset in range [-4095, 4095].
952    if (!Memory.OffsetImm) return true;
953    int64_t Val = Memory.OffsetImm->getValue();
954    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
955  }
956  bool isAlignedMemory() const {
957    return isMemNoOffset(true);
958  }
959  bool isAddrMode2() const {
960    if (!isMem() || Memory.Alignment != 0) return false;
961    // Check for register offset.
962    if (Memory.OffsetRegNum) return true;
963    // Immediate offset in range [-4095, 4095].
964    if (!Memory.OffsetImm) return true;
965    int64_t Val = Memory.OffsetImm->getValue();
966    return Val > -4096 && Val < 4096;
967  }
968  bool isAM2OffsetImm() const {
969    if (!isImm()) return false;
970    // Immediate offset in range [-4095, 4095].
971    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
972    if (!CE) return false;
973    int64_t Val = CE->getValue();
974    return Val > -4096 && Val < 4096;
975  }
976  bool isAddrMode3() const {
977    // If we have an immediate that's not a constant, treat it as a label
978    // reference needing a fixup. If it is a constant, it's something else
979    // and we reject it.
980    if (isImm() && !isa<MCConstantExpr>(getImm()))
981      return true;
982    if (!isMem() || Memory.Alignment != 0) return false;
983    // No shifts are legal for AM3.
984    if (Memory.ShiftType != ARM_AM::no_shift) return false;
985    // Check for register offset.
986    if (Memory.OffsetRegNum) return true;
987    // Immediate offset in range [-255, 255].
988    if (!Memory.OffsetImm) return true;
989    int64_t Val = Memory.OffsetImm->getValue();
990    // The #-0 offset is encoded as INT32_MIN, and we have to check
991    // for this too.
992    return (Val > -256 && Val < 256) || Val == INT32_MIN;
993  }
994  bool isAM3Offset() const {
995    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
996      return false;
997    if (Kind == k_PostIndexRegister)
998      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
999    // Immediate offset in range [-255, 255].
1000    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1001    if (!CE) return false;
1002    int64_t Val = CE->getValue();
1003    // Special case, #-0 is INT32_MIN.
1004    return (Val > -256 && Val < 256) || Val == INT32_MIN;
1005  }
1006  bool isAddrMode5() const {
1007    // If we have an immediate that's not a constant, treat it as a label
1008    // reference needing a fixup. If it is a constant, it's something else
1009    // and we reject it.
1010    if (isImm() && !isa<MCConstantExpr>(getImm()))
1011      return true;
1012    if (!isMem() || Memory.Alignment != 0) return false;
1013    // Check for register offset.
1014    if (Memory.OffsetRegNum) return false;
1015    // Immediate offset in range [-1020, 1020] and a multiple of 4.
1016    if (!Memory.OffsetImm) return true;
1017    int64_t Val = Memory.OffsetImm->getValue();
1018    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1019      Val == INT32_MIN;
1020  }
1021  bool isMemTBB() const {
1022    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1023        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1024      return false;
1025    return true;
1026  }
1027  bool isMemTBH() const {
1028    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1029        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1030        Memory.Alignment != 0 )
1031      return false;
1032    return true;
1033  }
1034  bool isMemRegOffset() const {
1035    if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1036      return false;
1037    return true;
1038  }
1039  bool isT2MemRegOffset() const {
1040    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1041        Memory.Alignment != 0)
1042      return false;
1043    // Only lsl #{0, 1, 2, 3} allowed.
1044    if (Memory.ShiftType == ARM_AM::no_shift)
1045      return true;
1046    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1047      return false;
1048    return true;
1049  }
1050  bool isMemThumbRR() const {
1051    // Thumb reg+reg addressing is simple. Just two registers, a base and
1052    // an offset. No shifts, negations or any other complicating factors.
1053    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1054        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1055      return false;
1056    return isARMLowRegister(Memory.BaseRegNum) &&
1057      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1058  }
1059  bool isMemThumbRIs4() const {
1060    if (!isMem() || Memory.OffsetRegNum != 0 ||
1061        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1062      return false;
1063    // Immediate offset, multiple of 4 in range [0, 124].
1064    if (!Memory.OffsetImm) return true;
1065    int64_t Val = Memory.OffsetImm->getValue();
1066    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1067  }
1068  bool isMemThumbRIs2() const {
1069    if (!isMem() || Memory.OffsetRegNum != 0 ||
1070        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1071      return false;
1072    // Immediate offset, multiple of 4 in range [0, 62].
1073    if (!Memory.OffsetImm) return true;
1074    int64_t Val = Memory.OffsetImm->getValue();
1075    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1076  }
1077  bool isMemThumbRIs1() const {
1078    if (!isMem() || Memory.OffsetRegNum != 0 ||
1079        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1080      return false;
1081    // Immediate offset in range [0, 31].
1082    if (!Memory.OffsetImm) return true;
1083    int64_t Val = Memory.OffsetImm->getValue();
1084    return Val >= 0 && Val <= 31;
1085  }
1086  bool isMemThumbSPI() const {
1087    if (!isMem() || Memory.OffsetRegNum != 0 ||
1088        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1089      return false;
1090    // Immediate offset, multiple of 4 in range [0, 1020].
1091    if (!Memory.OffsetImm) return true;
1092    int64_t Val = Memory.OffsetImm->getValue();
1093    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1094  }
1095  bool isMemImm8s4Offset() const {
1096    // If we have an immediate that's not a constant, treat it as a label
1097    // reference needing a fixup. If it is a constant, it's something else
1098    // and we reject it.
1099    if (isImm() && !isa<MCConstantExpr>(getImm()))
1100      return true;
1101    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1102      return false;
1103    // Immediate offset a multiple of 4 in range [-1020, 1020].
1104    if (!Memory.OffsetImm) return true;
1105    int64_t Val = Memory.OffsetImm->getValue();
1106    // Special case, #-0 is INT32_MIN.
1107    return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
1108  }
1109  bool isMemImm0_1020s4Offset() const {
1110    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1111      return false;
1112    // Immediate offset a multiple of 4 in range [0, 1020].
1113    if (!Memory.OffsetImm) return true;
1114    int64_t Val = Memory.OffsetImm->getValue();
1115    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1116  }
1117  bool isMemImm8Offset() const {
1118    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1119      return false;
1120    // Base reg of PC isn't allowed for these encodings.
1121    if (Memory.BaseRegNum == ARM::PC) return false;
1122    // Immediate offset in range [-255, 255].
1123    if (!Memory.OffsetImm) return true;
1124    int64_t Val = Memory.OffsetImm->getValue();
1125    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1126  }
1127  bool isMemPosImm8Offset() const {
1128    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1129      return false;
1130    // Immediate offset in range [0, 255].
1131    if (!Memory.OffsetImm) return true;
1132    int64_t Val = Memory.OffsetImm->getValue();
1133    return Val >= 0 && Val < 256;
1134  }
1135  bool isMemNegImm8Offset() const {
1136    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1137      return false;
1138    // Base reg of PC isn't allowed for these encodings.
1139    if (Memory.BaseRegNum == ARM::PC) return false;
1140    // Immediate offset in range [-255, -1].
1141    if (!Memory.OffsetImm) return false;
1142    int64_t Val = Memory.OffsetImm->getValue();
1143    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1144  }
1145  bool isMemUImm12Offset() const {
1146    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1147      return false;
1148    // Immediate offset in range [0, 4095].
1149    if (!Memory.OffsetImm) return true;
1150    int64_t Val = Memory.OffsetImm->getValue();
1151    return (Val >= 0 && Val < 4096);
1152  }
1153  bool isMemImm12Offset() const {
1154    // If we have an immediate that's not a constant, treat it as a label
1155    // reference needing a fixup. If it is a constant, it's something else
1156    // and we reject it.
1157    if (isImm() && !isa<MCConstantExpr>(getImm()))
1158      return true;
1159
1160    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1161      return false;
1162    // Immediate offset in range [-4095, 4095].
1163    if (!Memory.OffsetImm) return true;
1164    int64_t Val = Memory.OffsetImm->getValue();
1165    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1166  }
1167  bool isPostIdxImm8() const {
1168    if (!isImm()) return false;
1169    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1170    if (!CE) return false;
1171    int64_t Val = CE->getValue();
1172    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1173  }
1174  bool isPostIdxImm8s4() const {
1175    if (!isImm()) return false;
1176    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1177    if (!CE) return false;
1178    int64_t Val = CE->getValue();
1179    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1180      (Val == INT32_MIN);
1181  }
1182
1183  bool isMSRMask() const { return Kind == k_MSRMask; }
1184  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1185
1186  // NEON operands.
1187  bool isSingleSpacedVectorList() const {
1188    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1189  }
1190  bool isDoubleSpacedVectorList() const {
1191    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1192  }
1193  bool isVecListOneD() const {
1194    if (!isSingleSpacedVectorList()) return false;
1195    return VectorList.Count == 1;
1196  }
1197
1198  bool isVecListDPair() const {
1199    if (!isSingleSpacedVectorList()) return false;
1200    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1201              .contains(VectorList.RegNum));
1202  }
1203
1204  bool isVecListThreeD() const {
1205    if (!isSingleSpacedVectorList()) return false;
1206    return VectorList.Count == 3;
1207  }
1208
1209  bool isVecListFourD() const {
1210    if (!isSingleSpacedVectorList()) return false;
1211    return VectorList.Count == 4;
1212  }
1213
1214  bool isVecListDPairSpaced() const {
1215    if (isSingleSpacedVectorList()) return false;
1216    return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1217              .contains(VectorList.RegNum));
1218  }
1219
1220  bool isVecListThreeQ() const {
1221    if (!isDoubleSpacedVectorList()) return false;
1222    return VectorList.Count == 3;
1223  }
1224
1225  bool isVecListFourQ() const {
1226    if (!isDoubleSpacedVectorList()) return false;
1227    return VectorList.Count == 4;
1228  }
1229
1230  bool isSingleSpacedVectorAllLanes() const {
1231    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1232  }
1233  bool isDoubleSpacedVectorAllLanes() const {
1234    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1235  }
1236  bool isVecListOneDAllLanes() const {
1237    if (!isSingleSpacedVectorAllLanes()) return false;
1238    return VectorList.Count == 1;
1239  }
1240
1241  bool isVecListDPairAllLanes() const {
1242    if (!isSingleSpacedVectorAllLanes()) return false;
1243    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1244              .contains(VectorList.RegNum));
1245  }
1246
1247  bool isVecListDPairSpacedAllLanes() const {
1248    if (!isDoubleSpacedVectorAllLanes()) return false;
1249    return VectorList.Count == 2;
1250  }
1251
1252  bool isVecListThreeDAllLanes() const {
1253    if (!isSingleSpacedVectorAllLanes()) return false;
1254    return VectorList.Count == 3;
1255  }
1256
1257  bool isVecListThreeQAllLanes() const {
1258    if (!isDoubleSpacedVectorAllLanes()) return false;
1259    return VectorList.Count == 3;
1260  }
1261
1262  bool isVecListFourDAllLanes() const {
1263    if (!isSingleSpacedVectorAllLanes()) return false;
1264    return VectorList.Count == 4;
1265  }
1266
1267  bool isVecListFourQAllLanes() const {
1268    if (!isDoubleSpacedVectorAllLanes()) return false;
1269    return VectorList.Count == 4;
1270  }
1271
1272  bool isSingleSpacedVectorIndexed() const {
1273    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1274  }
1275  bool isDoubleSpacedVectorIndexed() const {
1276    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1277  }
1278  bool isVecListOneDByteIndexed() const {
1279    if (!isSingleSpacedVectorIndexed()) return false;
1280    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1281  }
1282
1283  bool isVecListOneDHWordIndexed() const {
1284    if (!isSingleSpacedVectorIndexed()) return false;
1285    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1286  }
1287
1288  bool isVecListOneDWordIndexed() const {
1289    if (!isSingleSpacedVectorIndexed()) return false;
1290    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1291  }
1292
1293  bool isVecListTwoDByteIndexed() const {
1294    if (!isSingleSpacedVectorIndexed()) return false;
1295    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1296  }
1297
1298  bool isVecListTwoDHWordIndexed() const {
1299    if (!isSingleSpacedVectorIndexed()) return false;
1300    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1301  }
1302
1303  bool isVecListTwoQWordIndexed() const {
1304    if (!isDoubleSpacedVectorIndexed()) return false;
1305    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1306  }
1307
1308  bool isVecListTwoQHWordIndexed() const {
1309    if (!isDoubleSpacedVectorIndexed()) return false;
1310    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1311  }
1312
1313  bool isVecListTwoDWordIndexed() const {
1314    if (!isSingleSpacedVectorIndexed()) return false;
1315    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1316  }
1317
1318  bool isVecListThreeDByteIndexed() const {
1319    if (!isSingleSpacedVectorIndexed()) return false;
1320    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1321  }
1322
1323  bool isVecListThreeDHWordIndexed() const {
1324    if (!isSingleSpacedVectorIndexed()) return false;
1325    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1326  }
1327
1328  bool isVecListThreeQWordIndexed() const {
1329    if (!isDoubleSpacedVectorIndexed()) return false;
1330    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1331  }
1332
1333  bool isVecListThreeQHWordIndexed() const {
1334    if (!isDoubleSpacedVectorIndexed()) return false;
1335    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1336  }
1337
1338  bool isVecListThreeDWordIndexed() const {
1339    if (!isSingleSpacedVectorIndexed()) return false;
1340    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1341  }
1342
1343  bool isVecListFourDByteIndexed() const {
1344    if (!isSingleSpacedVectorIndexed()) return false;
1345    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1346  }
1347
1348  bool isVecListFourDHWordIndexed() const {
1349    if (!isSingleSpacedVectorIndexed()) return false;
1350    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1351  }
1352
1353  bool isVecListFourQWordIndexed() const {
1354    if (!isDoubleSpacedVectorIndexed()) return false;
1355    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1356  }
1357
1358  bool isVecListFourQHWordIndexed() const {
1359    if (!isDoubleSpacedVectorIndexed()) return false;
1360    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1361  }
1362
1363  bool isVecListFourDWordIndexed() const {
1364    if (!isSingleSpacedVectorIndexed()) return false;
1365    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1366  }
1367
1368  bool isVectorIndex8() const {
1369    if (Kind != k_VectorIndex) return false;
1370    return VectorIndex.Val < 8;
1371  }
1372  bool isVectorIndex16() const {
1373    if (Kind != k_VectorIndex) return false;
1374    return VectorIndex.Val < 4;
1375  }
1376  bool isVectorIndex32() const {
1377    if (Kind != k_VectorIndex) return false;
1378    return VectorIndex.Val < 2;
1379  }
1380
1381  bool isNEONi8splat() const {
1382    if (!isImm()) return false;
1383    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1384    // Must be a constant.
1385    if (!CE) return false;
1386    int64_t Value = CE->getValue();
1387    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1388    // value.
1389    return Value >= 0 && Value < 256;
1390  }
1391
1392  bool isNEONi16splat() const {
1393    if (!isImm()) return false;
1394    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1395    // Must be a constant.
1396    if (!CE) return false;
1397    int64_t Value = CE->getValue();
1398    // i16 value in the range [0,255] or [0x0100, 0xff00]
1399    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1400  }
1401
1402  bool isNEONi32splat() const {
1403    if (!isImm()) return false;
1404    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1405    // Must be a constant.
1406    if (!CE) return false;
1407    int64_t Value = CE->getValue();
1408    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1409    return (Value >= 0 && Value < 256) ||
1410      (Value >= 0x0100 && Value <= 0xff00) ||
1411      (Value >= 0x010000 && Value <= 0xff0000) ||
1412      (Value >= 0x01000000 && Value <= 0xff000000);
1413  }
1414
1415  bool isNEONi32vmov() const {
1416    if (!isImm()) return false;
1417    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1418    // Must be a constant.
1419    if (!CE) return false;
1420    int64_t Value = CE->getValue();
1421    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1422    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1423    return (Value >= 0 && Value < 256) ||
1424      (Value >= 0x0100 && Value <= 0xff00) ||
1425      (Value >= 0x010000 && Value <= 0xff0000) ||
1426      (Value >= 0x01000000 && Value <= 0xff000000) ||
1427      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1428      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1429  }
1430  bool isNEONi32vmovNeg() const {
1431    if (!isImm()) return false;
1432    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1433    // Must be a constant.
1434    if (!CE) return false;
1435    int64_t Value = ~CE->getValue();
1436    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1437    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1438    return (Value >= 0 && Value < 256) ||
1439      (Value >= 0x0100 && Value <= 0xff00) ||
1440      (Value >= 0x010000 && Value <= 0xff0000) ||
1441      (Value >= 0x01000000 && Value <= 0xff000000) ||
1442      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1443      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1444  }
1445
1446  bool isNEONi64splat() const {
1447    if (!isImm()) return false;
1448    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1449    // Must be a constant.
1450    if (!CE) return false;
1451    uint64_t Value = CE->getValue();
1452    // i64 value with each byte being either 0 or 0xff.
1453    for (unsigned i = 0; i < 8; ++i)
1454      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1455    return true;
1456  }
1457
1458  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1459    // Add as immediates when possible.  Null MCExpr = 0.
1460    if (Expr == 0)
1461      Inst.addOperand(MCOperand::CreateImm(0));
1462    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1463      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1464    else
1465      Inst.addOperand(MCOperand::CreateExpr(Expr));
1466  }
1467
1468  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1469    assert(N == 2 && "Invalid number of operands!");
1470    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1471    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1472    Inst.addOperand(MCOperand::CreateReg(RegNum));
1473  }
1474
1475  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1476    assert(N == 1 && "Invalid number of operands!");
1477    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1478  }
1479
1480  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1481    assert(N == 1 && "Invalid number of operands!");
1482    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1483  }
1484
1485  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1486    assert(N == 1 && "Invalid number of operands!");
1487    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1488  }
1489
1490  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1491    assert(N == 1 && "Invalid number of operands!");
1492    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1493  }
1494
1495  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1496    assert(N == 1 && "Invalid number of operands!");
1497    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1498  }
1499
1500  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1501    assert(N == 1 && "Invalid number of operands!");
1502    Inst.addOperand(MCOperand::CreateReg(getReg()));
1503  }
1504
1505  void addRegOperands(MCInst &Inst, unsigned N) const {
1506    assert(N == 1 && "Invalid number of operands!");
1507    Inst.addOperand(MCOperand::CreateReg(getReg()));
1508  }
1509
1510  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1511    assert(N == 3 && "Invalid number of operands!");
1512    assert(isRegShiftedReg() &&
1513           "addRegShiftedRegOperands() on non RegShiftedReg!");
1514    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1515    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1516    Inst.addOperand(MCOperand::CreateImm(
1517      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1518  }
1519
1520  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1521    assert(N == 2 && "Invalid number of operands!");
1522    assert(isRegShiftedImm() &&
1523           "addRegShiftedImmOperands() on non RegShiftedImm!");
1524    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1525    // Shift of #32 is encoded as 0 where permitted
1526    unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
1527    Inst.addOperand(MCOperand::CreateImm(
1528      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
1529  }
1530
1531  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1532    assert(N == 1 && "Invalid number of operands!");
1533    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1534                                         ShifterImm.Imm));
1535  }
1536
1537  void addRegListOperands(MCInst &Inst, unsigned N) const {
1538    assert(N == 1 && "Invalid number of operands!");
1539    const SmallVectorImpl<unsigned> &RegList = getRegList();
1540    for (SmallVectorImpl<unsigned>::const_iterator
1541           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1542      Inst.addOperand(MCOperand::CreateReg(*I));
1543  }
1544
1545  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1546    addRegListOperands(Inst, N);
1547  }
1548
1549  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1550    addRegListOperands(Inst, N);
1551  }
1552
1553  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1554    assert(N == 1 && "Invalid number of operands!");
1555    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1556    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1557  }
1558
1559  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1560    assert(N == 1 && "Invalid number of operands!");
1561    // Munge the lsb/width into a bitfield mask.
1562    unsigned lsb = Bitfield.LSB;
1563    unsigned width = Bitfield.Width;
1564    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1565    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1566                      (32 - (lsb + width)));
1567    Inst.addOperand(MCOperand::CreateImm(Mask));
1568  }
1569
1570  void addImmOperands(MCInst &Inst, unsigned N) const {
1571    assert(N == 1 && "Invalid number of operands!");
1572    addExpr(Inst, getImm());
1573  }
1574
1575  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1576    assert(N == 1 && "Invalid number of operands!");
1577    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1578    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1579  }
1580
1581  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1582    assert(N == 1 && "Invalid number of operands!");
1583    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1584    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1585  }
1586
1587  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1588    assert(N == 1 && "Invalid number of operands!");
1589    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1590    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1591    Inst.addOperand(MCOperand::CreateImm(Val));
1592  }
1593
1594  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1595    assert(N == 1 && "Invalid number of operands!");
1596    // FIXME: We really want to scale the value here, but the LDRD/STRD
1597    // instruction don't encode operands that way yet.
1598    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1599    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1600  }
1601
1602  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1603    assert(N == 1 && "Invalid number of operands!");
1604    // The immediate is scaled by four in the encoding and is stored
1605    // in the MCInst as such. Lop off the low two bits here.
1606    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1607    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1608  }
1609
1610  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1611    assert(N == 1 && "Invalid number of operands!");
1612    // The immediate is scaled by four in the encoding and is stored
1613    // in the MCInst as such. Lop off the low two bits here.
1614    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1615    Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
1616  }
1617
1618  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1619    assert(N == 1 && "Invalid number of operands!");
1620    // The immediate is scaled by four in the encoding and is stored
1621    // in the MCInst as such. Lop off the low two bits here.
1622    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1623    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1624  }
1625
1626  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1627    assert(N == 1 && "Invalid number of operands!");
1628    // The constant encodes as the immediate-1, and we store in the instruction
1629    // the bits as encoded, so subtract off one here.
1630    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1631    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1632  }
1633
1634  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1635    assert(N == 1 && "Invalid number of operands!");
1636    // The constant encodes as the immediate-1, and we store in the instruction
1637    // the bits as encoded, so subtract off one here.
1638    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1639    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1640  }
1641
1642  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1643    assert(N == 1 && "Invalid number of operands!");
1644    // The constant encodes as the immediate, except for 32, which encodes as
1645    // zero.
1646    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1647    unsigned Imm = CE->getValue();
1648    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1649  }
1650
1651  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1652    assert(N == 1 && "Invalid number of operands!");
1653    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1654    // the instruction as well.
1655    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1656    int Val = CE->getValue();
1657    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1658  }
1659
1660  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1661    assert(N == 1 && "Invalid number of operands!");
1662    // The operand is actually a t2_so_imm, but we have its bitwise
1663    // negation in the assembly source, so twiddle it here.
1664    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1665    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1666  }
1667
1668  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1669    assert(N == 1 && "Invalid number of operands!");
1670    // The operand is actually a t2_so_imm, but we have its
1671    // negation in the assembly source, so twiddle it here.
1672    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1673    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1674  }
1675
1676  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1677    assert(N == 1 && "Invalid number of operands!");
1678    // The operand is actually an imm0_4095, but we have its
1679    // negation in the assembly source, so twiddle it here.
1680    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1681    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1682  }
1683
1684  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1685    assert(N == 1 && "Invalid number of operands!");
1686    // The operand is actually a so_imm, but we have its bitwise
1687    // negation in the assembly source, so twiddle it here.
1688    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1689    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1690  }
1691
1692  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1693    assert(N == 1 && "Invalid number of operands!");
1694    // The operand is actually a so_imm, but we have its
1695    // negation in the assembly source, so twiddle it here.
1696    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1697    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1698  }
1699
1700  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1701    assert(N == 1 && "Invalid number of operands!");
1702    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1703  }
1704
1705  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1706    assert(N == 1 && "Invalid number of operands!");
1707    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1708  }
1709
1710  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1711    assert(N == 1 && "Invalid number of operands!");
1712    int32_t Imm = Memory.OffsetImm->getValue();
1713    // FIXME: Handle #-0
1714    if (Imm == INT32_MIN) Imm = 0;
1715    Inst.addOperand(MCOperand::CreateImm(Imm));
1716  }
1717
1718  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1719    assert(N == 1 && "Invalid number of operands!");
1720    assert(isImm() && "Not an immediate!");
1721
1722    // If we have an immediate that's not a constant, treat it as a label
1723    // reference needing a fixup.
1724    if (!isa<MCConstantExpr>(getImm())) {
1725      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1726      return;
1727    }
1728
1729    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1730    int Val = CE->getValue();
1731    Inst.addOperand(MCOperand::CreateImm(Val));
1732  }
1733
1734  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1735    assert(N == 2 && "Invalid number of operands!");
1736    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1737    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1738  }
1739
1740  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1741    assert(N == 3 && "Invalid number of operands!");
1742    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1743    if (!Memory.OffsetRegNum) {
1744      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1745      // Special case for #-0
1746      if (Val == INT32_MIN) Val = 0;
1747      if (Val < 0) Val = -Val;
1748      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1749    } else {
1750      // For register offset, we encode the shift type and negation flag
1751      // here.
1752      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1753                              Memory.ShiftImm, Memory.ShiftType);
1754    }
1755    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1756    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1757    Inst.addOperand(MCOperand::CreateImm(Val));
1758  }
1759
1760  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1761    assert(N == 2 && "Invalid number of operands!");
1762    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1763    assert(CE && "non-constant AM2OffsetImm operand!");
1764    int32_t Val = CE->getValue();
1765    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1766    // Special case for #-0
1767    if (Val == INT32_MIN) Val = 0;
1768    if (Val < 0) Val = -Val;
1769    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1770    Inst.addOperand(MCOperand::CreateReg(0));
1771    Inst.addOperand(MCOperand::CreateImm(Val));
1772  }
1773
1774  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1775    assert(N == 3 && "Invalid number of operands!");
1776    // If we have an immediate that's not a constant, treat it as a label
1777    // reference needing a fixup. If it is a constant, it's something else
1778    // and we reject it.
1779    if (isImm()) {
1780      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1781      Inst.addOperand(MCOperand::CreateReg(0));
1782      Inst.addOperand(MCOperand::CreateImm(0));
1783      return;
1784    }
1785
1786    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1787    if (!Memory.OffsetRegNum) {
1788      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1789      // Special case for #-0
1790      if (Val == INT32_MIN) Val = 0;
1791      if (Val < 0) Val = -Val;
1792      Val = ARM_AM::getAM3Opc(AddSub, Val);
1793    } else {
1794      // For register offset, we encode the shift type and negation flag
1795      // here.
1796      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1797    }
1798    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1799    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1800    Inst.addOperand(MCOperand::CreateImm(Val));
1801  }
1802
1803  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1804    assert(N == 2 && "Invalid number of operands!");
1805    if (Kind == k_PostIndexRegister) {
1806      int32_t Val =
1807        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1808      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1809      Inst.addOperand(MCOperand::CreateImm(Val));
1810      return;
1811    }
1812
1813    // Constant offset.
1814    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1815    int32_t Val = CE->getValue();
1816    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1817    // Special case for #-0
1818    if (Val == INT32_MIN) Val = 0;
1819    if (Val < 0) Val = -Val;
1820    Val = ARM_AM::getAM3Opc(AddSub, Val);
1821    Inst.addOperand(MCOperand::CreateReg(0));
1822    Inst.addOperand(MCOperand::CreateImm(Val));
1823  }
1824
1825  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1826    assert(N == 2 && "Invalid number of operands!");
1827    // If we have an immediate that's not a constant, treat it as a label
1828    // reference needing a fixup. If it is a constant, it's something else
1829    // and we reject it.
1830    if (isImm()) {
1831      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1832      Inst.addOperand(MCOperand::CreateImm(0));
1833      return;
1834    }
1835
1836    // The lower two bits are always zero and as such are not encoded.
1837    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1838    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1839    // Special case for #-0
1840    if (Val == INT32_MIN) Val = 0;
1841    if (Val < 0) Val = -Val;
1842    Val = ARM_AM::getAM5Opc(AddSub, Val);
1843    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1844    Inst.addOperand(MCOperand::CreateImm(Val));
1845  }
1846
1847  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1848    assert(N == 2 && "Invalid number of operands!");
1849    // If we have an immediate that's not a constant, treat it as a label
1850    // reference needing a fixup. If it is a constant, it's something else
1851    // and we reject it.
1852    if (isImm()) {
1853      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1854      Inst.addOperand(MCOperand::CreateImm(0));
1855      return;
1856    }
1857
1858    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1859    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1860    Inst.addOperand(MCOperand::CreateImm(Val));
1861  }
1862
1863  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1864    assert(N == 2 && "Invalid number of operands!");
1865    // The lower two bits are always zero and as such are not encoded.
1866    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1867    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1868    Inst.addOperand(MCOperand::CreateImm(Val));
1869  }
1870
1871  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1872    assert(N == 2 && "Invalid number of operands!");
1873    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1874    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1875    Inst.addOperand(MCOperand::CreateImm(Val));
1876  }
1877
1878  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1879    addMemImm8OffsetOperands(Inst, N);
1880  }
1881
1882  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1883    addMemImm8OffsetOperands(Inst, N);
1884  }
1885
1886  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1887    assert(N == 2 && "Invalid number of operands!");
1888    // If this is an immediate, it's a label reference.
1889    if (isImm()) {
1890      addExpr(Inst, getImm());
1891      Inst.addOperand(MCOperand::CreateImm(0));
1892      return;
1893    }
1894
1895    // Otherwise, it's a normal memory reg+offset.
1896    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1897    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1898    Inst.addOperand(MCOperand::CreateImm(Val));
1899  }
1900
1901  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1902    assert(N == 2 && "Invalid number of operands!");
1903    // If this is an immediate, it's a label reference.
1904    if (isImm()) {
1905      addExpr(Inst, getImm());
1906      Inst.addOperand(MCOperand::CreateImm(0));
1907      return;
1908    }
1909
1910    // Otherwise, it's a normal memory reg+offset.
1911    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1912    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1913    Inst.addOperand(MCOperand::CreateImm(Val));
1914  }
1915
1916  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1917    assert(N == 2 && "Invalid number of operands!");
1918    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1919    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1920  }
1921
1922  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1923    assert(N == 2 && "Invalid number of operands!");
1924    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1925    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1926  }
1927
1928  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1929    assert(N == 3 && "Invalid number of operands!");
1930    unsigned Val =
1931      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1932                        Memory.ShiftImm, Memory.ShiftType);
1933    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1934    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1935    Inst.addOperand(MCOperand::CreateImm(Val));
1936  }
1937
1938  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1939    assert(N == 3 && "Invalid number of operands!");
1940    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1941    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1942    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1943  }
1944
1945  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1946    assert(N == 2 && "Invalid number of operands!");
1947    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1948    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1949  }
1950
1951  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1952    assert(N == 2 && "Invalid number of operands!");
1953    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1954    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1955    Inst.addOperand(MCOperand::CreateImm(Val));
1956  }
1957
1958  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1959    assert(N == 2 && "Invalid number of operands!");
1960    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1961    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1962    Inst.addOperand(MCOperand::CreateImm(Val));
1963  }
1964
1965  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1966    assert(N == 2 && "Invalid number of operands!");
1967    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1968    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1969    Inst.addOperand(MCOperand::CreateImm(Val));
1970  }
1971
1972  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1973    assert(N == 2 && "Invalid number of operands!");
1974    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1975    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1976    Inst.addOperand(MCOperand::CreateImm(Val));
1977  }
1978
1979  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1980    assert(N == 1 && "Invalid number of operands!");
1981    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1982    assert(CE && "non-constant post-idx-imm8 operand!");
1983    int Imm = CE->getValue();
1984    bool isAdd = Imm >= 0;
1985    if (Imm == INT32_MIN) Imm = 0;
1986    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1987    Inst.addOperand(MCOperand::CreateImm(Imm));
1988  }
1989
1990  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1991    assert(N == 1 && "Invalid number of operands!");
1992    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1993    assert(CE && "non-constant post-idx-imm8s4 operand!");
1994    int Imm = CE->getValue();
1995    bool isAdd = Imm >= 0;
1996    if (Imm == INT32_MIN) Imm = 0;
1997    // Immediate is scaled by 4.
1998    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1999    Inst.addOperand(MCOperand::CreateImm(Imm));
2000  }
2001
2002  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2003    assert(N == 2 && "Invalid number of operands!");
2004    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
2005    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
2006  }
2007
2008  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2009    assert(N == 2 && "Invalid number of operands!");
2010    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
2011    // The sign, shift type, and shift amount are encoded in a single operand
2012    // using the AM2 encoding helpers.
2013    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2014    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2015                                     PostIdxReg.ShiftTy);
2016    Inst.addOperand(MCOperand::CreateImm(Imm));
2017  }
2018
2019  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2020    assert(N == 1 && "Invalid number of operands!");
2021    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
2022  }
2023
2024  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2025    assert(N == 1 && "Invalid number of operands!");
2026    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
2027  }
2028
2029  void addVecListOperands(MCInst &Inst, unsigned N) const {
2030    assert(N == 1 && "Invalid number of operands!");
2031    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
2032  }
2033
2034  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2035    assert(N == 2 && "Invalid number of operands!");
2036    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
2037    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
2038  }
2039
2040  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2041    assert(N == 1 && "Invalid number of operands!");
2042    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2043  }
2044
2045  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2046    assert(N == 1 && "Invalid number of operands!");
2047    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2048  }
2049
2050  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2051    assert(N == 1 && "Invalid number of operands!");
2052    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2053  }
2054
2055  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2056    assert(N == 1 && "Invalid number of operands!");
2057    // The immediate encodes the type of constant as well as the value.
2058    // Mask in that this is an i8 splat.
2059    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2060    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
2061  }
2062
2063  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2064    assert(N == 1 && "Invalid number of operands!");
2065    // The immediate encodes the type of constant as well as the value.
2066    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2067    unsigned Value = CE->getValue();
2068    if (Value >= 256)
2069      Value = (Value >> 8) | 0xa00;
2070    else
2071      Value |= 0x800;
2072    Inst.addOperand(MCOperand::CreateImm(Value));
2073  }
2074
2075  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2076    assert(N == 1 && "Invalid number of operands!");
2077    // The immediate encodes the type of constant as well as the value.
2078    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2079    unsigned Value = CE->getValue();
2080    if (Value >= 256 && Value <= 0xff00)
2081      Value = (Value >> 8) | 0x200;
2082    else if (Value > 0xffff && Value <= 0xff0000)
2083      Value = (Value >> 16) | 0x400;
2084    else if (Value > 0xffffff)
2085      Value = (Value >> 24) | 0x600;
2086    Inst.addOperand(MCOperand::CreateImm(Value));
2087  }
2088
2089  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2090    assert(N == 1 && "Invalid number of operands!");
2091    // The immediate encodes the type of constant as well as the value.
2092    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2093    unsigned Value = CE->getValue();
2094    if (Value >= 256 && Value <= 0xffff)
2095      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2096    else if (Value > 0xffff && Value <= 0xffffff)
2097      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2098    else if (Value > 0xffffff)
2099      Value = (Value >> 24) | 0x600;
2100    Inst.addOperand(MCOperand::CreateImm(Value));
2101  }
2102
2103  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2104    assert(N == 1 && "Invalid number of operands!");
2105    // The immediate encodes the type of constant as well as the value.
2106    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2107    unsigned Value = ~CE->getValue();
2108    if (Value >= 256 && Value <= 0xffff)
2109      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2110    else if (Value > 0xffff && Value <= 0xffffff)
2111      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2112    else if (Value > 0xffffff)
2113      Value = (Value >> 24) | 0x600;
2114    Inst.addOperand(MCOperand::CreateImm(Value));
2115  }
2116
2117  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2118    assert(N == 1 && "Invalid number of operands!");
2119    // The immediate encodes the type of constant as well as the value.
2120    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2121    uint64_t Value = CE->getValue();
2122    unsigned Imm = 0;
2123    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2124      Imm |= (Value & 1) << i;
2125    }
2126    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
2127  }
2128
2129  virtual void print(raw_ostream &OS) const;
2130
2131  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
2132    ARMOperand *Op = new ARMOperand(k_ITCondMask);
2133    Op->ITMask.Mask = Mask;
2134    Op->StartLoc = S;
2135    Op->EndLoc = S;
2136    return Op;
2137  }
2138
2139  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
2140    ARMOperand *Op = new ARMOperand(k_CondCode);
2141    Op->CC.Val = CC;
2142    Op->StartLoc = S;
2143    Op->EndLoc = S;
2144    return Op;
2145  }
2146
2147  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2148    ARMOperand *Op = new ARMOperand(k_CoprocNum);
2149    Op->Cop.Val = CopVal;
2150    Op->StartLoc = S;
2151    Op->EndLoc = S;
2152    return Op;
2153  }
2154
2155  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2156    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2157    Op->Cop.Val = CopVal;
2158    Op->StartLoc = S;
2159    Op->EndLoc = S;
2160    return Op;
2161  }
2162
2163  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2164    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2165    Op->Cop.Val = Val;
2166    Op->StartLoc = S;
2167    Op->EndLoc = E;
2168    return Op;
2169  }
2170
2171  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2172    ARMOperand *Op = new ARMOperand(k_CCOut);
2173    Op->Reg.RegNum = RegNum;
2174    Op->StartLoc = S;
2175    Op->EndLoc = S;
2176    return Op;
2177  }
2178
2179  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2180    ARMOperand *Op = new ARMOperand(k_Token);
2181    Op->Tok.Data = Str.data();
2182    Op->Tok.Length = Str.size();
2183    Op->StartLoc = S;
2184    Op->EndLoc = S;
2185    return Op;
2186  }
2187
2188  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2189    ARMOperand *Op = new ARMOperand(k_Register);
2190    Op->Reg.RegNum = RegNum;
2191    Op->StartLoc = S;
2192    Op->EndLoc = E;
2193    return Op;
2194  }
2195
2196  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2197                                           unsigned SrcReg,
2198                                           unsigned ShiftReg,
2199                                           unsigned ShiftImm,
2200                                           SMLoc S, SMLoc E) {
2201    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2202    Op->RegShiftedReg.ShiftTy = ShTy;
2203    Op->RegShiftedReg.SrcReg = SrcReg;
2204    Op->RegShiftedReg.ShiftReg = ShiftReg;
2205    Op->RegShiftedReg.ShiftImm = ShiftImm;
2206    Op->StartLoc = S;
2207    Op->EndLoc = E;
2208    return Op;
2209  }
2210
2211  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2212                                            unsigned SrcReg,
2213                                            unsigned ShiftImm,
2214                                            SMLoc S, SMLoc E) {
2215    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2216    Op->RegShiftedImm.ShiftTy = ShTy;
2217    Op->RegShiftedImm.SrcReg = SrcReg;
2218    Op->RegShiftedImm.ShiftImm = ShiftImm;
2219    Op->StartLoc = S;
2220    Op->EndLoc = E;
2221    return Op;
2222  }
2223
2224  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2225                                   SMLoc S, SMLoc E) {
2226    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2227    Op->ShifterImm.isASR = isASR;
2228    Op->ShifterImm.Imm = Imm;
2229    Op->StartLoc = S;
2230    Op->EndLoc = E;
2231    return Op;
2232  }
2233
2234  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2235    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2236    Op->RotImm.Imm = Imm;
2237    Op->StartLoc = S;
2238    Op->EndLoc = E;
2239    return Op;
2240  }
2241
2242  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2243                                    SMLoc S, SMLoc E) {
2244    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2245    Op->Bitfield.LSB = LSB;
2246    Op->Bitfield.Width = Width;
2247    Op->StartLoc = S;
2248    Op->EndLoc = E;
2249    return Op;
2250  }
2251
2252  static ARMOperand *
2253  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2254                SMLoc StartLoc, SMLoc EndLoc) {
2255    KindTy Kind = k_RegisterList;
2256
2257    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2258      Kind = k_DPRRegisterList;
2259    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2260             contains(Regs.front().first))
2261      Kind = k_SPRRegisterList;
2262
2263    ARMOperand *Op = new ARMOperand(Kind);
2264    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2265           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2266      Op->Registers.push_back(I->first);
2267    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2268    Op->StartLoc = StartLoc;
2269    Op->EndLoc = EndLoc;
2270    return Op;
2271  }
2272
2273  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2274                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2275    ARMOperand *Op = new ARMOperand(k_VectorList);
2276    Op->VectorList.RegNum = RegNum;
2277    Op->VectorList.Count = Count;
2278    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2279    Op->StartLoc = S;
2280    Op->EndLoc = E;
2281    return Op;
2282  }
2283
2284  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2285                                              bool isDoubleSpaced,
2286                                              SMLoc S, SMLoc E) {
2287    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2288    Op->VectorList.RegNum = RegNum;
2289    Op->VectorList.Count = Count;
2290    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2291    Op->StartLoc = S;
2292    Op->EndLoc = E;
2293    return Op;
2294  }
2295
2296  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2297                                             unsigned Index,
2298                                             bool isDoubleSpaced,
2299                                             SMLoc S, SMLoc E) {
2300    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2301    Op->VectorList.RegNum = RegNum;
2302    Op->VectorList.Count = Count;
2303    Op->VectorList.LaneIndex = Index;
2304    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2305    Op->StartLoc = S;
2306    Op->EndLoc = E;
2307    return Op;
2308  }
2309
2310  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2311                                       MCContext &Ctx) {
2312    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2313    Op->VectorIndex.Val = Idx;
2314    Op->StartLoc = S;
2315    Op->EndLoc = E;
2316    return Op;
2317  }
2318
2319  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2320    ARMOperand *Op = new ARMOperand(k_Immediate);
2321    Op->Imm.Val = Val;
2322    Op->StartLoc = S;
2323    Op->EndLoc = E;
2324    return Op;
2325  }
2326
2327  static ARMOperand *CreateMem(unsigned BaseRegNum,
2328                               const MCConstantExpr *OffsetImm,
2329                               unsigned OffsetRegNum,
2330                               ARM_AM::ShiftOpc ShiftType,
2331                               unsigned ShiftImm,
2332                               unsigned Alignment,
2333                               bool isNegative,
2334                               SMLoc S, SMLoc E) {
2335    ARMOperand *Op = new ARMOperand(k_Memory);
2336    Op->Memory.BaseRegNum = BaseRegNum;
2337    Op->Memory.OffsetImm = OffsetImm;
2338    Op->Memory.OffsetRegNum = OffsetRegNum;
2339    Op->Memory.ShiftType = ShiftType;
2340    Op->Memory.ShiftImm = ShiftImm;
2341    Op->Memory.Alignment = Alignment;
2342    Op->Memory.isNegative = isNegative;
2343    Op->StartLoc = S;
2344    Op->EndLoc = E;
2345    return Op;
2346  }
2347
2348  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2349                                      ARM_AM::ShiftOpc ShiftTy,
2350                                      unsigned ShiftImm,
2351                                      SMLoc S, SMLoc E) {
2352    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2353    Op->PostIdxReg.RegNum = RegNum;
2354    Op->PostIdxReg.isAdd = isAdd;
2355    Op->PostIdxReg.ShiftTy = ShiftTy;
2356    Op->PostIdxReg.ShiftImm = ShiftImm;
2357    Op->StartLoc = S;
2358    Op->EndLoc = E;
2359    return Op;
2360  }
2361
2362  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2363    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2364    Op->MBOpt.Val = Opt;
2365    Op->StartLoc = S;
2366    Op->EndLoc = S;
2367    return Op;
2368  }
2369
2370  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2371    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2372    Op->IFlags.Val = IFlags;
2373    Op->StartLoc = S;
2374    Op->EndLoc = S;
2375    return Op;
2376  }
2377
2378  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2379    ARMOperand *Op = new ARMOperand(k_MSRMask);
2380    Op->MMask.Val = MMask;
2381    Op->StartLoc = S;
2382    Op->EndLoc = S;
2383    return Op;
2384  }
2385};
2386
2387} // end anonymous namespace.
2388
2389void ARMOperand::print(raw_ostream &OS) const {
2390  switch (Kind) {
2391  case k_CondCode:
2392    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2393    break;
2394  case k_CCOut:
2395    OS << "<ccout " << getReg() << ">";
2396    break;
2397  case k_ITCondMask: {
2398    static const char *const MaskStr[] = {
2399      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2400      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2401    };
2402    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2403    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2404    break;
2405  }
2406  case k_CoprocNum:
2407    OS << "<coprocessor number: " << getCoproc() << ">";
2408    break;
2409  case k_CoprocReg:
2410    OS << "<coprocessor register: " << getCoproc() << ">";
2411    break;
2412  case k_CoprocOption:
2413    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2414    break;
2415  case k_MSRMask:
2416    OS << "<mask: " << getMSRMask() << ">";
2417    break;
2418  case k_Immediate:
2419    getImm()->print(OS);
2420    break;
2421  case k_MemBarrierOpt:
2422    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2423    break;
2424  case k_Memory:
2425    OS << "<memory "
2426       << " base:" << Memory.BaseRegNum;
2427    OS << ">";
2428    break;
2429  case k_PostIndexRegister:
2430    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2431       << PostIdxReg.RegNum;
2432    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2433      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2434         << PostIdxReg.ShiftImm;
2435    OS << ">";
2436    break;
2437  case k_ProcIFlags: {
2438    OS << "<ARM_PROC::";
2439    unsigned IFlags = getProcIFlags();
2440    for (int i=2; i >= 0; --i)
2441      if (IFlags & (1 << i))
2442        OS << ARM_PROC::IFlagsToString(1 << i);
2443    OS << ">";
2444    break;
2445  }
2446  case k_Register:
2447    OS << "<register " << getReg() << ">";
2448    break;
2449  case k_ShifterImmediate:
2450    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2451       << " #" << ShifterImm.Imm << ">";
2452    break;
2453  case k_ShiftedRegister:
2454    OS << "<so_reg_reg "
2455       << RegShiftedReg.SrcReg << " "
2456       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2457       << " " << RegShiftedReg.ShiftReg << ">";
2458    break;
2459  case k_ShiftedImmediate:
2460    OS << "<so_reg_imm "
2461       << RegShiftedImm.SrcReg << " "
2462       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2463       << " #" << RegShiftedImm.ShiftImm << ">";
2464    break;
2465  case k_RotateImmediate:
2466    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2467    break;
2468  case k_BitfieldDescriptor:
2469    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2470       << ", width: " << Bitfield.Width << ">";
2471    break;
2472  case k_RegisterList:
2473  case k_DPRRegisterList:
2474  case k_SPRRegisterList: {
2475    OS << "<register_list ";
2476
2477    const SmallVectorImpl<unsigned> &RegList = getRegList();
2478    for (SmallVectorImpl<unsigned>::const_iterator
2479           I = RegList.begin(), E = RegList.end(); I != E; ) {
2480      OS << *I;
2481      if (++I < E) OS << ", ";
2482    }
2483
2484    OS << ">";
2485    break;
2486  }
2487  case k_VectorList:
2488    OS << "<vector_list " << VectorList.Count << " * "
2489       << VectorList.RegNum << ">";
2490    break;
2491  case k_VectorListAllLanes:
2492    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2493       << VectorList.RegNum << ">";
2494    break;
2495  case k_VectorListIndexed:
2496    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2497       << VectorList.Count << " * " << VectorList.RegNum << ">";
2498    break;
2499  case k_Token:
2500    OS << "'" << getToken() << "'";
2501    break;
2502  case k_VectorIndex:
2503    OS << "<vectorindex " << getVectorIndex() << ">";
2504    break;
2505  }
2506}
2507
2508/// @name Auto-generated Match Functions
2509/// {
2510
2511static unsigned MatchRegisterName(StringRef Name);
2512
2513/// }
2514
2515bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2516                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2517  StartLoc = Parser.getTok().getLoc();
2518  EndLoc = Parser.getTok().getEndLoc();
2519  RegNo = tryParseRegister();
2520
2521  return (RegNo == (unsigned)-1);
2522}
2523
2524/// Try to parse a register name.  The token must be an Identifier when called,
2525/// and if it is a register name the token is eaten and the register number is
2526/// returned.  Otherwise return -1.
2527///
2528int ARMAsmParser::tryParseRegister() {
2529  const AsmToken &Tok = Parser.getTok();
2530  if (Tok.isNot(AsmToken::Identifier)) return -1;
2531
2532  std::string lowerCase = Tok.getString().lower();
2533  unsigned RegNum = MatchRegisterName(lowerCase);
2534  if (!RegNum) {
2535    RegNum = StringSwitch<unsigned>(lowerCase)
2536      .Case("r13", ARM::SP)
2537      .Case("r14", ARM::LR)
2538      .Case("r15", ARM::PC)
2539      .Case("ip", ARM::R12)
2540      // Additional register name aliases for 'gas' compatibility.
2541      .Case("a1", ARM::R0)
2542      .Case("a2", ARM::R1)
2543      .Case("a3", ARM::R2)
2544      .Case("a4", ARM::R3)
2545      .Case("v1", ARM::R4)
2546      .Case("v2", ARM::R5)
2547      .Case("v3", ARM::R6)
2548      .Case("v4", ARM::R7)
2549      .Case("v5", ARM::R8)
2550      .Case("v6", ARM::R9)
2551      .Case("v7", ARM::R10)
2552      .Case("v8", ARM::R11)
2553      .Case("sb", ARM::R9)
2554      .Case("sl", ARM::R10)
2555      .Case("fp", ARM::R11)
2556      .Default(0);
2557  }
2558  if (!RegNum) {
2559    // Check for aliases registered via .req. Canonicalize to lower case.
2560    // That's more consistent since register names are case insensitive, and
2561    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2562    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2563    // If no match, return failure.
2564    if (Entry == RegisterReqs.end())
2565      return -1;
2566    Parser.Lex(); // Eat identifier token.
2567    return Entry->getValue();
2568  }
2569
2570  Parser.Lex(); // Eat identifier token.
2571
2572  return RegNum;
2573}
2574
2575// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2576// If a recoverable error occurs, return 1. If an irrecoverable error
2577// occurs, return -1. An irrecoverable error is one where tokens have been
2578// consumed in the process of trying to parse the shifter (i.e., when it is
2579// indeed a shifter operand, but malformed).
2580int ARMAsmParser::tryParseShiftRegister(
2581                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2582  SMLoc S = Parser.getTok().getLoc();
2583  const AsmToken &Tok = Parser.getTok();
2584  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2585
2586  std::string lowerCase = Tok.getString().lower();
2587  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2588      .Case("asl", ARM_AM::lsl)
2589      .Case("lsl", ARM_AM::lsl)
2590      .Case("lsr", ARM_AM::lsr)
2591      .Case("asr", ARM_AM::asr)
2592      .Case("ror", ARM_AM::ror)
2593      .Case("rrx", ARM_AM::rrx)
2594      .Default(ARM_AM::no_shift);
2595
2596  if (ShiftTy == ARM_AM::no_shift)
2597    return 1;
2598
2599  Parser.Lex(); // Eat the operator.
2600
2601  // The source register for the shift has already been added to the
2602  // operand list, so we need to pop it off and combine it into the shifted
2603  // register operand instead.
2604  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2605  if (!PrevOp->isReg())
2606    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2607  int SrcReg = PrevOp->getReg();
2608
2609  SMLoc EndLoc;
2610  int64_t Imm = 0;
2611  int ShiftReg = 0;
2612  if (ShiftTy == ARM_AM::rrx) {
2613    // RRX Doesn't have an explicit shift amount. The encoder expects
2614    // the shift register to be the same as the source register. Seems odd,
2615    // but OK.
2616    ShiftReg = SrcReg;
2617  } else {
2618    // Figure out if this is shifted by a constant or a register (for non-RRX).
2619    if (Parser.getTok().is(AsmToken::Hash) ||
2620        Parser.getTok().is(AsmToken::Dollar)) {
2621      Parser.Lex(); // Eat hash.
2622      SMLoc ImmLoc = Parser.getTok().getLoc();
2623      const MCExpr *ShiftExpr = 0;
2624      if (getParser().parseExpression(ShiftExpr, EndLoc)) {
2625        Error(ImmLoc, "invalid immediate shift value");
2626        return -1;
2627      }
2628      // The expression must be evaluatable as an immediate.
2629      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2630      if (!CE) {
2631        Error(ImmLoc, "invalid immediate shift value");
2632        return -1;
2633      }
2634      // Range check the immediate.
2635      // lsl, ror: 0 <= imm <= 31
2636      // lsr, asr: 0 <= imm <= 32
2637      Imm = CE->getValue();
2638      if (Imm < 0 ||
2639          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2640          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2641        Error(ImmLoc, "immediate shift value out of range");
2642        return -1;
2643      }
2644      // shift by zero is a nop. Always send it through as lsl.
2645      // ('as' compatibility)
2646      if (Imm == 0)
2647        ShiftTy = ARM_AM::lsl;
2648    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2649      SMLoc L = Parser.getTok().getLoc();
2650      EndLoc = Parser.getTok().getEndLoc();
2651      ShiftReg = tryParseRegister();
2652      if (ShiftReg == -1) {
2653        Error (L, "expected immediate or register in shift operand");
2654        return -1;
2655      }
2656    } else {
2657      Error (Parser.getTok().getLoc(),
2658                    "expected immediate or register in shift operand");
2659      return -1;
2660    }
2661  }
2662
2663  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2664    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2665                                                         ShiftReg, Imm,
2666                                                         S, EndLoc));
2667  else
2668    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2669                                                          S, EndLoc));
2670
2671  return 0;
2672}
2673
2674
2675/// Try to parse a register name.  The token must be an Identifier when called.
2676/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2677/// if there is a "writeback". 'true' if it's not a register.
2678///
2679/// TODO this is likely to change to allow different register types and or to
2680/// parse for a specific register type.
2681bool ARMAsmParser::
2682tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2683  const AsmToken &RegTok = Parser.getTok();
2684  int RegNo = tryParseRegister();
2685  if (RegNo == -1)
2686    return true;
2687
2688  Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(),
2689                                           RegTok.getEndLoc()));
2690
2691  const AsmToken &ExclaimTok = Parser.getTok();
2692  if (ExclaimTok.is(AsmToken::Exclaim)) {
2693    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2694                                               ExclaimTok.getLoc()));
2695    Parser.Lex(); // Eat exclaim token
2696    return false;
2697  }
2698
2699  // Also check for an index operand. This is only legal for vector registers,
2700  // but that'll get caught OK in operand matching, so we don't need to
2701  // explicitly filter everything else out here.
2702  if (Parser.getTok().is(AsmToken::LBrac)) {
2703    SMLoc SIdx = Parser.getTok().getLoc();
2704    Parser.Lex(); // Eat left bracket token.
2705
2706    const MCExpr *ImmVal;
2707    if (getParser().parseExpression(ImmVal))
2708      return true;
2709    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2710    if (!MCE)
2711      return TokError("immediate value expected for vector index");
2712
2713    if (Parser.getTok().isNot(AsmToken::RBrac))
2714      return Error(Parser.getTok().getLoc(), "']' expected");
2715
2716    SMLoc E = Parser.getTok().getEndLoc();
2717    Parser.Lex(); // Eat right bracket token.
2718
2719    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2720                                                     SIdx, E,
2721                                                     getContext()));
2722  }
2723
2724  return false;
2725}
2726
2727/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2728/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2729/// "c5", ...
2730static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2731  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2732  // but efficient.
2733  switch (Name.size()) {
2734  default: return -1;
2735  case 2:
2736    if (Name[0] != CoprocOp)
2737      return -1;
2738    switch (Name[1]) {
2739    default:  return -1;
2740    case '0': return 0;
2741    case '1': return 1;
2742    case '2': return 2;
2743    case '3': return 3;
2744    case '4': return 4;
2745    case '5': return 5;
2746    case '6': return 6;
2747    case '7': return 7;
2748    case '8': return 8;
2749    case '9': return 9;
2750    }
2751  case 3:
2752    if (Name[0] != CoprocOp || Name[1] != '1')
2753      return -1;
2754    switch (Name[2]) {
2755    default:  return -1;
2756    case '0': return 10;
2757    case '1': return 11;
2758    case '2': return 12;
2759    case '3': return 13;
2760    case '4': return 14;
2761    case '5': return 15;
2762    }
2763  }
2764}
2765
2766/// parseITCondCode - Try to parse a condition code for an IT instruction.
2767ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2768parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2769  SMLoc S = Parser.getTok().getLoc();
2770  const AsmToken &Tok = Parser.getTok();
2771  if (!Tok.is(AsmToken::Identifier))
2772    return MatchOperand_NoMatch;
2773  unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
2774    .Case("eq", ARMCC::EQ)
2775    .Case("ne", ARMCC::NE)
2776    .Case("hs", ARMCC::HS)
2777    .Case("cs", ARMCC::HS)
2778    .Case("lo", ARMCC::LO)
2779    .Case("cc", ARMCC::LO)
2780    .Case("mi", ARMCC::MI)
2781    .Case("pl", ARMCC::PL)
2782    .Case("vs", ARMCC::VS)
2783    .Case("vc", ARMCC::VC)
2784    .Case("hi", ARMCC::HI)
2785    .Case("ls", ARMCC::LS)
2786    .Case("ge", ARMCC::GE)
2787    .Case("lt", ARMCC::LT)
2788    .Case("gt", ARMCC::GT)
2789    .Case("le", ARMCC::LE)
2790    .Case("al", ARMCC::AL)
2791    .Default(~0U);
2792  if (CC == ~0U)
2793    return MatchOperand_NoMatch;
2794  Parser.Lex(); // Eat the token.
2795
2796  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2797
2798  return MatchOperand_Success;
2799}
2800
2801/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2802/// token must be an Identifier when called, and if it is a coprocessor
2803/// number, the token is eaten and the operand is added to the operand list.
2804ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2805parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2806  SMLoc S = Parser.getTok().getLoc();
2807  const AsmToken &Tok = Parser.getTok();
2808  if (Tok.isNot(AsmToken::Identifier))
2809    return MatchOperand_NoMatch;
2810
2811  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2812  if (Num == -1)
2813    return MatchOperand_NoMatch;
2814
2815  Parser.Lex(); // Eat identifier token.
2816  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2817  return MatchOperand_Success;
2818}
2819
2820/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2821/// token must be an Identifier when called, and if it is a coprocessor
2822/// number, the token is eaten and the operand is added to the operand list.
2823ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2824parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2825  SMLoc S = Parser.getTok().getLoc();
2826  const AsmToken &Tok = Parser.getTok();
2827  if (Tok.isNot(AsmToken::Identifier))
2828    return MatchOperand_NoMatch;
2829
2830  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2831  if (Reg == -1)
2832    return MatchOperand_NoMatch;
2833
2834  Parser.Lex(); // Eat identifier token.
2835  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2836  return MatchOperand_Success;
2837}
2838
2839/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2840/// coproc_option : '{' imm0_255 '}'
2841ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2842parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2843  SMLoc S = Parser.getTok().getLoc();
2844
2845  // If this isn't a '{', this isn't a coprocessor immediate operand.
2846  if (Parser.getTok().isNot(AsmToken::LCurly))
2847    return MatchOperand_NoMatch;
2848  Parser.Lex(); // Eat the '{'
2849
2850  const MCExpr *Expr;
2851  SMLoc Loc = Parser.getTok().getLoc();
2852  if (getParser().parseExpression(Expr)) {
2853    Error(Loc, "illegal expression");
2854    return MatchOperand_ParseFail;
2855  }
2856  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2857  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2858    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2859    return MatchOperand_ParseFail;
2860  }
2861  int Val = CE->getValue();
2862
2863  // Check for and consume the closing '}'
2864  if (Parser.getTok().isNot(AsmToken::RCurly))
2865    return MatchOperand_ParseFail;
2866  SMLoc E = Parser.getTok().getEndLoc();
2867  Parser.Lex(); // Eat the '}'
2868
2869  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2870  return MatchOperand_Success;
2871}
2872
2873// For register list parsing, we need to map from raw GPR register numbering
2874// to the enumeration values. The enumeration values aren't sorted by
2875// register number due to our using "sp", "lr" and "pc" as canonical names.
2876static unsigned getNextRegister(unsigned Reg) {
2877  // If this is a GPR, we need to do it manually, otherwise we can rely
2878  // on the sort ordering of the enumeration since the other reg-classes
2879  // are sane.
2880  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2881    return Reg + 1;
2882  switch(Reg) {
2883  default: llvm_unreachable("Invalid GPR number!");
2884  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2885  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2886  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2887  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2888  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2889  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2890  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2891  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2892  }
2893}
2894
2895// Return the low-subreg of a given Q register.
2896static unsigned getDRegFromQReg(unsigned QReg) {
2897  switch (QReg) {
2898  default: llvm_unreachable("expected a Q register!");
2899  case ARM::Q0:  return ARM::D0;
2900  case ARM::Q1:  return ARM::D2;
2901  case ARM::Q2:  return ARM::D4;
2902  case ARM::Q3:  return ARM::D6;
2903  case ARM::Q4:  return ARM::D8;
2904  case ARM::Q5:  return ARM::D10;
2905  case ARM::Q6:  return ARM::D12;
2906  case ARM::Q7:  return ARM::D14;
2907  case ARM::Q8:  return ARM::D16;
2908  case ARM::Q9:  return ARM::D18;
2909  case ARM::Q10: return ARM::D20;
2910  case ARM::Q11: return ARM::D22;
2911  case ARM::Q12: return ARM::D24;
2912  case ARM::Q13: return ARM::D26;
2913  case ARM::Q14: return ARM::D28;
2914  case ARM::Q15: return ARM::D30;
2915  }
2916}
2917
2918/// Parse a register list.
2919bool ARMAsmParser::
2920parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2921  assert(Parser.getTok().is(AsmToken::LCurly) &&
2922         "Token is not a Left Curly Brace");
2923  SMLoc S = Parser.getTok().getLoc();
2924  Parser.Lex(); // Eat '{' token.
2925  SMLoc RegLoc = Parser.getTok().getLoc();
2926
2927  // Check the first register in the list to see what register class
2928  // this is a list of.
2929  int Reg = tryParseRegister();
2930  if (Reg == -1)
2931    return Error(RegLoc, "register expected");
2932
2933  // The reglist instructions have at most 16 registers, so reserve
2934  // space for that many.
2935  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2936
2937  // Allow Q regs and just interpret them as the two D sub-registers.
2938  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2939    Reg = getDRegFromQReg(Reg);
2940    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2941    ++Reg;
2942  }
2943  const MCRegisterClass *RC;
2944  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2945    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2946  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2947    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2948  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2949    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2950  else
2951    return Error(RegLoc, "invalid register in register list");
2952
2953  // Store the register.
2954  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2955
2956  // This starts immediately after the first register token in the list,
2957  // so we can see either a comma or a minus (range separator) as a legal
2958  // next token.
2959  while (Parser.getTok().is(AsmToken::Comma) ||
2960         Parser.getTok().is(AsmToken::Minus)) {
2961    if (Parser.getTok().is(AsmToken::Minus)) {
2962      Parser.Lex(); // Eat the minus.
2963      SMLoc AfterMinusLoc = Parser.getTok().getLoc();
2964      int EndReg = tryParseRegister();
2965      if (EndReg == -1)
2966        return Error(AfterMinusLoc, "register expected");
2967      // Allow Q regs and just interpret them as the two D sub-registers.
2968      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2969        EndReg = getDRegFromQReg(EndReg) + 1;
2970      // If the register is the same as the start reg, there's nothing
2971      // more to do.
2972      if (Reg == EndReg)
2973        continue;
2974      // The register must be in the same register class as the first.
2975      if (!RC->contains(EndReg))
2976        return Error(AfterMinusLoc, "invalid register in register list");
2977      // Ranges must go from low to high.
2978      if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
2979        return Error(AfterMinusLoc, "bad range in register list");
2980
2981      // Add all the registers in the range to the register list.
2982      while (Reg != EndReg) {
2983        Reg = getNextRegister(Reg);
2984        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2985      }
2986      continue;
2987    }
2988    Parser.Lex(); // Eat the comma.
2989    RegLoc = Parser.getTok().getLoc();
2990    int OldReg = Reg;
2991    const AsmToken RegTok = Parser.getTok();
2992    Reg = tryParseRegister();
2993    if (Reg == -1)
2994      return Error(RegLoc, "register expected");
2995    // Allow Q regs and just interpret them as the two D sub-registers.
2996    bool isQReg = false;
2997    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2998      Reg = getDRegFromQReg(Reg);
2999      isQReg = true;
3000    }
3001    // The register must be in the same register class as the first.
3002    if (!RC->contains(Reg))
3003      return Error(RegLoc, "invalid register in register list");
3004    // List must be monotonically increasing.
3005    if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
3006      if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3007        Warning(RegLoc, "register list not in ascending order");
3008      else
3009        return Error(RegLoc, "register list not in ascending order");
3010    }
3011    if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
3012      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
3013              ") in register list");
3014      continue;
3015    }
3016    // VFP register lists must also be contiguous.
3017    // It's OK to use the enumeration values directly here rather, as the
3018    // VFP register classes have the enum sorted properly.
3019    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
3020        Reg != OldReg + 1)
3021      return Error(RegLoc, "non-contiguous register range");
3022    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
3023    if (isQReg)
3024      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
3025  }
3026
3027  if (Parser.getTok().isNot(AsmToken::RCurly))
3028    return Error(Parser.getTok().getLoc(), "'}' expected");
3029  SMLoc E = Parser.getTok().getEndLoc();
3030  Parser.Lex(); // Eat '}' token.
3031
3032  // Push the register list operand.
3033  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
3034
3035  // The ARM system instruction variants for LDM/STM have a '^' token here.
3036  if (Parser.getTok().is(AsmToken::Caret)) {
3037    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
3038    Parser.Lex(); // Eat '^' token.
3039  }
3040
3041  return false;
3042}
3043
3044// Helper function to parse the lane index for vector lists.
3045ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3046parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
3047  Index = 0; // Always return a defined index value.
3048  if (Parser.getTok().is(AsmToken::LBrac)) {
3049    Parser.Lex(); // Eat the '['.
3050    if (Parser.getTok().is(AsmToken::RBrac)) {
3051      // "Dn[]" is the 'all lanes' syntax.
3052      LaneKind = AllLanes;
3053      EndLoc = Parser.getTok().getEndLoc();
3054      Parser.Lex(); // Eat the ']'.
3055      return MatchOperand_Success;
3056    }
3057
3058    // There's an optional '#' token here. Normally there wouldn't be, but
3059    // inline assemble puts one in, and it's friendly to accept that.
3060    if (Parser.getTok().is(AsmToken::Hash))
3061      Parser.Lex(); // Eat the '#'
3062
3063    const MCExpr *LaneIndex;
3064    SMLoc Loc = Parser.getTok().getLoc();
3065    if (getParser().parseExpression(LaneIndex)) {
3066      Error(Loc, "illegal expression");
3067      return MatchOperand_ParseFail;
3068    }
3069    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3070    if (!CE) {
3071      Error(Loc, "lane index must be empty or an integer");
3072      return MatchOperand_ParseFail;
3073    }
3074    if (Parser.getTok().isNot(AsmToken::RBrac)) {
3075      Error(Parser.getTok().getLoc(), "']' expected");
3076      return MatchOperand_ParseFail;
3077    }
3078    EndLoc = Parser.getTok().getEndLoc();
3079    Parser.Lex(); // Eat the ']'.
3080    int64_t Val = CE->getValue();
3081
3082    // FIXME: Make this range check context sensitive for .8, .16, .32.
3083    if (Val < 0 || Val > 7) {
3084      Error(Parser.getTok().getLoc(), "lane index out of range");
3085      return MatchOperand_ParseFail;
3086    }
3087    Index = Val;
3088    LaneKind = IndexedLane;
3089    return MatchOperand_Success;
3090  }
3091  LaneKind = NoLanes;
3092  return MatchOperand_Success;
3093}
3094
3095// parse a vector register list
3096ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3097parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3098  VectorLaneTy LaneKind;
3099  unsigned LaneIndex;
3100  SMLoc S = Parser.getTok().getLoc();
3101  // As an extension (to match gas), support a plain D register or Q register
3102  // (without encosing curly braces) as a single or double entry list,
3103  // respectively.
3104  if (Parser.getTok().is(AsmToken::Identifier)) {
3105    SMLoc E = Parser.getTok().getEndLoc();
3106    int Reg = tryParseRegister();
3107    if (Reg == -1)
3108      return MatchOperand_NoMatch;
3109    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3110      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3111      if (Res != MatchOperand_Success)
3112        return Res;
3113      switch (LaneKind) {
3114      case NoLanes:
3115        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3116        break;
3117      case AllLanes:
3118        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3119                                                                S, E));
3120        break;
3121      case IndexedLane:
3122        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3123                                                               LaneIndex,
3124                                                               false, S, E));
3125        break;
3126      }
3127      return MatchOperand_Success;
3128    }
3129    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3130      Reg = getDRegFromQReg(Reg);
3131      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3132      if (Res != MatchOperand_Success)
3133        return Res;
3134      switch (LaneKind) {
3135      case NoLanes:
3136        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3137                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3138        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3139        break;
3140      case AllLanes:
3141        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3142                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3143        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3144                                                                S, E));
3145        break;
3146      case IndexedLane:
3147        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3148                                                               LaneIndex,
3149                                                               false, S, E));
3150        break;
3151      }
3152      return MatchOperand_Success;
3153    }
3154    Error(S, "vector register expected");
3155    return MatchOperand_ParseFail;
3156  }
3157
3158  if (Parser.getTok().isNot(AsmToken::LCurly))
3159    return MatchOperand_NoMatch;
3160
3161  Parser.Lex(); // Eat '{' token.
3162  SMLoc RegLoc = Parser.getTok().getLoc();
3163
3164  int Reg = tryParseRegister();
3165  if (Reg == -1) {
3166    Error(RegLoc, "register expected");
3167    return MatchOperand_ParseFail;
3168  }
3169  unsigned Count = 1;
3170  int Spacing = 0;
3171  unsigned FirstReg = Reg;
3172  // The list is of D registers, but we also allow Q regs and just interpret
3173  // them as the two D sub-registers.
3174  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3175    FirstReg = Reg = getDRegFromQReg(Reg);
3176    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3177                 // it's ambiguous with four-register single spaced.
3178    ++Reg;
3179    ++Count;
3180  }
3181
3182  SMLoc E;
3183  if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3184    return MatchOperand_ParseFail;
3185
3186  while (Parser.getTok().is(AsmToken::Comma) ||
3187         Parser.getTok().is(AsmToken::Minus)) {
3188    if (Parser.getTok().is(AsmToken::Minus)) {
3189      if (!Spacing)
3190        Spacing = 1; // Register range implies a single spaced list.
3191      else if (Spacing == 2) {
3192        Error(Parser.getTok().getLoc(),
3193              "sequential registers in double spaced list");
3194        return MatchOperand_ParseFail;
3195      }
3196      Parser.Lex(); // Eat the minus.
3197      SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3198      int EndReg = tryParseRegister();
3199      if (EndReg == -1) {
3200        Error(AfterMinusLoc, "register expected");
3201        return MatchOperand_ParseFail;
3202      }
3203      // Allow Q regs and just interpret them as the two D sub-registers.
3204      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3205        EndReg = getDRegFromQReg(EndReg) + 1;
3206      // If the register is the same as the start reg, there's nothing
3207      // more to do.
3208      if (Reg == EndReg)
3209        continue;
3210      // The register must be in the same register class as the first.
3211      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3212        Error(AfterMinusLoc, "invalid register in register list");
3213        return MatchOperand_ParseFail;
3214      }
3215      // Ranges must go from low to high.
3216      if (Reg > EndReg) {
3217        Error(AfterMinusLoc, "bad range in register list");
3218        return MatchOperand_ParseFail;
3219      }
3220      // Parse the lane specifier if present.
3221      VectorLaneTy NextLaneKind;
3222      unsigned NextLaneIndex;
3223      if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3224          MatchOperand_Success)
3225        return MatchOperand_ParseFail;
3226      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3227        Error(AfterMinusLoc, "mismatched lane index in register list");
3228        return MatchOperand_ParseFail;
3229      }
3230
3231      // Add all the registers in the range to the register list.
3232      Count += EndReg - Reg;
3233      Reg = EndReg;
3234      continue;
3235    }
3236    Parser.Lex(); // Eat the comma.
3237    RegLoc = Parser.getTok().getLoc();
3238    int OldReg = Reg;
3239    Reg = tryParseRegister();
3240    if (Reg == -1) {
3241      Error(RegLoc, "register expected");
3242      return MatchOperand_ParseFail;
3243    }
3244    // vector register lists must be contiguous.
3245    // It's OK to use the enumeration values directly here rather, as the
3246    // VFP register classes have the enum sorted properly.
3247    //
3248    // The list is of D registers, but we also allow Q regs and just interpret
3249    // them as the two D sub-registers.
3250    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3251      if (!Spacing)
3252        Spacing = 1; // Register range implies a single spaced list.
3253      else if (Spacing == 2) {
3254        Error(RegLoc,
3255              "invalid register in double-spaced list (must be 'D' register')");
3256        return MatchOperand_ParseFail;
3257      }
3258      Reg = getDRegFromQReg(Reg);
3259      if (Reg != OldReg + 1) {
3260        Error(RegLoc, "non-contiguous register range");
3261        return MatchOperand_ParseFail;
3262      }
3263      ++Reg;
3264      Count += 2;
3265      // Parse the lane specifier if present.
3266      VectorLaneTy NextLaneKind;
3267      unsigned NextLaneIndex;
3268      SMLoc LaneLoc = Parser.getTok().getLoc();
3269      if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3270          MatchOperand_Success)
3271        return MatchOperand_ParseFail;
3272      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3273        Error(LaneLoc, "mismatched lane index in register list");
3274        return MatchOperand_ParseFail;
3275      }
3276      continue;
3277    }
3278    // Normal D register.
3279    // Figure out the register spacing (single or double) of the list if
3280    // we don't know it already.
3281    if (!Spacing)
3282      Spacing = 1 + (Reg == OldReg + 2);
3283
3284    // Just check that it's contiguous and keep going.
3285    if (Reg != OldReg + Spacing) {
3286      Error(RegLoc, "non-contiguous register range");
3287      return MatchOperand_ParseFail;
3288    }
3289    ++Count;
3290    // Parse the lane specifier if present.
3291    VectorLaneTy NextLaneKind;
3292    unsigned NextLaneIndex;
3293    SMLoc EndLoc = Parser.getTok().getLoc();
3294    if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
3295      return MatchOperand_ParseFail;
3296    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3297      Error(EndLoc, "mismatched lane index in register list");
3298      return MatchOperand_ParseFail;
3299    }
3300  }
3301
3302  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3303    Error(Parser.getTok().getLoc(), "'}' expected");
3304    return MatchOperand_ParseFail;
3305  }
3306  E = Parser.getTok().getEndLoc();
3307  Parser.Lex(); // Eat '}' token.
3308
3309  switch (LaneKind) {
3310  case NoLanes:
3311    // Two-register operands have been converted to the
3312    // composite register classes.
3313    if (Count == 2) {
3314      const MCRegisterClass *RC = (Spacing == 1) ?
3315        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3316        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3317      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3318    }
3319
3320    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3321                                                    (Spacing == 2), S, E));
3322    break;
3323  case AllLanes:
3324    // Two-register operands have been converted to the
3325    // composite register classes.
3326    if (Count == 2) {
3327      const MCRegisterClass *RC = (Spacing == 1) ?
3328        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3329        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3330      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3331    }
3332    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3333                                                            (Spacing == 2),
3334                                                            S, E));
3335    break;
3336  case IndexedLane:
3337    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3338                                                           LaneIndex,
3339                                                           (Spacing == 2),
3340                                                           S, E));
3341    break;
3342  }
3343  return MatchOperand_Success;
3344}
3345
3346/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3347ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3348parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3349  SMLoc S = Parser.getTok().getLoc();
3350  const AsmToken &Tok = Parser.getTok();
3351  unsigned Opt;
3352
3353  if (Tok.is(AsmToken::Identifier)) {
3354    StringRef OptStr = Tok.getString();
3355
3356    Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
3357      .Case("sy",    ARM_MB::SY)
3358      .Case("st",    ARM_MB::ST)
3359      .Case("sh",    ARM_MB::ISH)
3360      .Case("ish",   ARM_MB::ISH)
3361      .Case("shst",  ARM_MB::ISHST)
3362      .Case("ishst", ARM_MB::ISHST)
3363      .Case("nsh",   ARM_MB::NSH)
3364      .Case("un",    ARM_MB::NSH)
3365      .Case("nshst", ARM_MB::NSHST)
3366      .Case("unst",  ARM_MB::NSHST)
3367      .Case("osh",   ARM_MB::OSH)
3368      .Case("oshst", ARM_MB::OSHST)
3369      .Default(~0U);
3370
3371    if (Opt == ~0U)
3372      return MatchOperand_NoMatch;
3373
3374    Parser.Lex(); // Eat identifier token.
3375  } else if (Tok.is(AsmToken::Hash) ||
3376             Tok.is(AsmToken::Dollar) ||
3377             Tok.is(AsmToken::Integer)) {
3378    if (Parser.getTok().isNot(AsmToken::Integer))
3379      Parser.Lex(); // Eat the '#'.
3380    SMLoc Loc = Parser.getTok().getLoc();
3381
3382    const MCExpr *MemBarrierID;
3383    if (getParser().parseExpression(MemBarrierID)) {
3384      Error(Loc, "illegal expression");
3385      return MatchOperand_ParseFail;
3386    }
3387
3388    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
3389    if (!CE) {
3390      Error(Loc, "constant expression expected");
3391      return MatchOperand_ParseFail;
3392    }
3393
3394    int Val = CE->getValue();
3395    if (Val & ~0xf) {
3396      Error(Loc, "immediate value out of range");
3397      return MatchOperand_ParseFail;
3398    }
3399
3400    Opt = ARM_MB::RESERVED_0 + Val;
3401  } else
3402    return MatchOperand_ParseFail;
3403
3404  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3405  return MatchOperand_Success;
3406}
3407
3408/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3409ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3410parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3411  SMLoc S = Parser.getTok().getLoc();
3412  const AsmToken &Tok = Parser.getTok();
3413  if (!Tok.is(AsmToken::Identifier))
3414    return MatchOperand_NoMatch;
3415  StringRef IFlagsStr = Tok.getString();
3416
3417  // An iflags string of "none" is interpreted to mean that none of the AIF
3418  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3419  unsigned IFlags = 0;
3420  if (IFlagsStr != "none") {
3421        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3422      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3423        .Case("a", ARM_PROC::A)
3424        .Case("i", ARM_PROC::I)
3425        .Case("f", ARM_PROC::F)
3426        .Default(~0U);
3427
3428      // If some specific iflag is already set, it means that some letter is
3429      // present more than once, this is not acceptable.
3430      if (Flag == ~0U || (IFlags & Flag))
3431        return MatchOperand_NoMatch;
3432
3433      IFlags |= Flag;
3434    }
3435  }
3436
3437  Parser.Lex(); // Eat identifier token.
3438  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3439  return MatchOperand_Success;
3440}
3441
3442/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3443ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3444parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3445  SMLoc S = Parser.getTok().getLoc();
3446  const AsmToken &Tok = Parser.getTok();
3447  if (!Tok.is(AsmToken::Identifier))
3448    return MatchOperand_NoMatch;
3449  StringRef Mask = Tok.getString();
3450
3451  if (isMClass()) {
3452    // See ARMv6-M 10.1.1
3453    std::string Name = Mask.lower();
3454    unsigned FlagsVal = StringSwitch<unsigned>(Name)
3455      // Note: in the documentation:
3456      //  ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
3457      //  for MSR APSR_nzcvq.
3458      // but we do make it an alias here.  This is so to get the "mask encoding"
3459      // bits correct on MSR APSR writes.
3460      //
3461      // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
3462      // should really only be allowed when writing a special register.  Note
3463      // they get dropped in the MRS instruction reading a special register as
3464      // the SYSm field is only 8 bits.
3465      //
3466      // FIXME: the _g and _nzcvqg versions are only allowed if the processor
3467      // includes the DSP extension but that is not checked.
3468      .Case("apsr", 0x800)
3469      .Case("apsr_nzcvq", 0x800)
3470      .Case("apsr_g", 0x400)
3471      .Case("apsr_nzcvqg", 0xc00)
3472      .Case("iapsr", 0x801)
3473      .Case("iapsr_nzcvq", 0x801)
3474      .Case("iapsr_g", 0x401)
3475      .Case("iapsr_nzcvqg", 0xc01)
3476      .Case("eapsr", 0x802)
3477      .Case("eapsr_nzcvq", 0x802)
3478      .Case("eapsr_g", 0x402)
3479      .Case("eapsr_nzcvqg", 0xc02)
3480      .Case("xpsr", 0x803)
3481      .Case("xpsr_nzcvq", 0x803)
3482      .Case("xpsr_g", 0x403)
3483      .Case("xpsr_nzcvqg", 0xc03)
3484      .Case("ipsr", 0x805)
3485      .Case("epsr", 0x806)
3486      .Case("iepsr", 0x807)
3487      .Case("msp", 0x808)
3488      .Case("psp", 0x809)
3489      .Case("primask", 0x810)
3490      .Case("basepri", 0x811)
3491      .Case("basepri_max", 0x812)
3492      .Case("faultmask", 0x813)
3493      .Case("control", 0x814)
3494      .Default(~0U);
3495
3496    if (FlagsVal == ~0U)
3497      return MatchOperand_NoMatch;
3498
3499    if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813)
3500      // basepri, basepri_max and faultmask only valid for V7m.
3501      return MatchOperand_NoMatch;
3502
3503    Parser.Lex(); // Eat identifier token.
3504    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3505    return MatchOperand_Success;
3506  }
3507
3508  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3509  size_t Start = 0, Next = Mask.find('_');
3510  StringRef Flags = "";
3511  std::string SpecReg = Mask.slice(Start, Next).lower();
3512  if (Next != StringRef::npos)
3513    Flags = Mask.slice(Next+1, Mask.size());
3514
3515  // FlagsVal contains the complete mask:
3516  // 3-0: Mask
3517  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3518  unsigned FlagsVal = 0;
3519
3520  if (SpecReg == "apsr") {
3521    FlagsVal = StringSwitch<unsigned>(Flags)
3522    .Case("nzcvq",  0x8) // same as CPSR_f
3523    .Case("g",      0x4) // same as CPSR_s
3524    .Case("nzcvqg", 0xc) // same as CPSR_fs
3525    .Default(~0U);
3526
3527    if (FlagsVal == ~0U) {
3528      if (!Flags.empty())
3529        return MatchOperand_NoMatch;
3530      else
3531        FlagsVal = 8; // No flag
3532    }
3533  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3534    // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
3535    if (Flags == "all" || Flags == "")
3536      Flags = "fc";
3537    for (int i = 0, e = Flags.size(); i != e; ++i) {
3538      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3539      .Case("c", 1)
3540      .Case("x", 2)
3541      .Case("s", 4)
3542      .Case("f", 8)
3543      .Default(~0U);
3544
3545      // If some specific flag is already set, it means that some letter is
3546      // present more than once, this is not acceptable.
3547      if (FlagsVal == ~0U || (FlagsVal & Flag))
3548        return MatchOperand_NoMatch;
3549      FlagsVal |= Flag;
3550    }
3551  } else // No match for special register.
3552    return MatchOperand_NoMatch;
3553
3554  // Special register without flags is NOT equivalent to "fc" flags.
3555  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3556  // two lines would enable gas compatibility at the expense of breaking
3557  // round-tripping.
3558  //
3559  // if (!FlagsVal)
3560  //  FlagsVal = 0x9;
3561
3562  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3563  if (SpecReg == "spsr")
3564    FlagsVal |= 16;
3565
3566  Parser.Lex(); // Eat identifier token.
3567  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3568  return MatchOperand_Success;
3569}
3570
3571ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3572parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3573            int Low, int High) {
3574  const AsmToken &Tok = Parser.getTok();
3575  if (Tok.isNot(AsmToken::Identifier)) {
3576    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3577    return MatchOperand_ParseFail;
3578  }
3579  StringRef ShiftName = Tok.getString();
3580  std::string LowerOp = Op.lower();
3581  std::string UpperOp = Op.upper();
3582  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3583    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3584    return MatchOperand_ParseFail;
3585  }
3586  Parser.Lex(); // Eat shift type token.
3587
3588  // There must be a '#' and a shift amount.
3589  if (Parser.getTok().isNot(AsmToken::Hash) &&
3590      Parser.getTok().isNot(AsmToken::Dollar)) {
3591    Error(Parser.getTok().getLoc(), "'#' expected");
3592    return MatchOperand_ParseFail;
3593  }
3594  Parser.Lex(); // Eat hash token.
3595
3596  const MCExpr *ShiftAmount;
3597  SMLoc Loc = Parser.getTok().getLoc();
3598  SMLoc EndLoc;
3599  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
3600    Error(Loc, "illegal expression");
3601    return MatchOperand_ParseFail;
3602  }
3603  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3604  if (!CE) {
3605    Error(Loc, "constant expression expected");
3606    return MatchOperand_ParseFail;
3607  }
3608  int Val = CE->getValue();
3609  if (Val < Low || Val > High) {
3610    Error(Loc, "immediate value out of range");
3611    return MatchOperand_ParseFail;
3612  }
3613
3614  Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
3615
3616  return MatchOperand_Success;
3617}
3618
3619ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3620parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3621  const AsmToken &Tok = Parser.getTok();
3622  SMLoc S = Tok.getLoc();
3623  if (Tok.isNot(AsmToken::Identifier)) {
3624    Error(S, "'be' or 'le' operand expected");
3625    return MatchOperand_ParseFail;
3626  }
3627  int Val = StringSwitch<int>(Tok.getString().lower())
3628    .Case("be", 1)
3629    .Case("le", 0)
3630    .Default(-1);
3631  Parser.Lex(); // Eat the token.
3632
3633  if (Val == -1) {
3634    Error(S, "'be' or 'le' operand expected");
3635    return MatchOperand_ParseFail;
3636  }
3637  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3638                                                                  getContext()),
3639                                           S, Tok.getEndLoc()));
3640  return MatchOperand_Success;
3641}
3642
3643/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3644/// instructions. Legal values are:
3645///     lsl #n  'n' in [0,31]
3646///     asr #n  'n' in [1,32]
3647///             n == 32 encoded as n == 0.
3648ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3649parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3650  const AsmToken &Tok = Parser.getTok();
3651  SMLoc S = Tok.getLoc();
3652  if (Tok.isNot(AsmToken::Identifier)) {
3653    Error(S, "shift operator 'asr' or 'lsl' expected");
3654    return MatchOperand_ParseFail;
3655  }
3656  StringRef ShiftName = Tok.getString();
3657  bool isASR;
3658  if (ShiftName == "lsl" || ShiftName == "LSL")
3659    isASR = false;
3660  else if (ShiftName == "asr" || ShiftName == "ASR")
3661    isASR = true;
3662  else {
3663    Error(S, "shift operator 'asr' or 'lsl' expected");
3664    return MatchOperand_ParseFail;
3665  }
3666  Parser.Lex(); // Eat the operator.
3667
3668  // A '#' and a shift amount.
3669  if (Parser.getTok().isNot(AsmToken::Hash) &&
3670      Parser.getTok().isNot(AsmToken::Dollar)) {
3671    Error(Parser.getTok().getLoc(), "'#' expected");
3672    return MatchOperand_ParseFail;
3673  }
3674  Parser.Lex(); // Eat hash token.
3675  SMLoc ExLoc = Parser.getTok().getLoc();
3676
3677  const MCExpr *ShiftAmount;
3678  SMLoc EndLoc;
3679  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
3680    Error(ExLoc, "malformed shift expression");
3681    return MatchOperand_ParseFail;
3682  }
3683  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3684  if (!CE) {
3685    Error(ExLoc, "shift amount must be an immediate");
3686    return MatchOperand_ParseFail;
3687  }
3688
3689  int64_t Val = CE->getValue();
3690  if (isASR) {
3691    // Shift amount must be in [1,32]
3692    if (Val < 1 || Val > 32) {
3693      Error(ExLoc, "'asr' shift amount must be in range [1,32]");
3694      return MatchOperand_ParseFail;
3695    }
3696    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3697    if (isThumb() && Val == 32) {
3698      Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
3699      return MatchOperand_ParseFail;
3700    }
3701    if (Val == 32) Val = 0;
3702  } else {
3703    // Shift amount must be in [1,32]
3704    if (Val < 0 || Val > 31) {
3705      Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
3706      return MatchOperand_ParseFail;
3707    }
3708  }
3709
3710  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
3711
3712  return MatchOperand_Success;
3713}
3714
3715/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3716/// of instructions. Legal values are:
3717///     ror #n  'n' in {0, 8, 16, 24}
3718ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3719parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3720  const AsmToken &Tok = Parser.getTok();
3721  SMLoc S = Tok.getLoc();
3722  if (Tok.isNot(AsmToken::Identifier))
3723    return MatchOperand_NoMatch;
3724  StringRef ShiftName = Tok.getString();
3725  if (ShiftName != "ror" && ShiftName != "ROR")
3726    return MatchOperand_NoMatch;
3727  Parser.Lex(); // Eat the operator.
3728
3729  // A '#' and a rotate amount.
3730  if (Parser.getTok().isNot(AsmToken::Hash) &&
3731      Parser.getTok().isNot(AsmToken::Dollar)) {
3732    Error(Parser.getTok().getLoc(), "'#' expected");
3733    return MatchOperand_ParseFail;
3734  }
3735  Parser.Lex(); // Eat hash token.
3736  SMLoc ExLoc = Parser.getTok().getLoc();
3737
3738  const MCExpr *ShiftAmount;
3739  SMLoc EndLoc;
3740  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
3741    Error(ExLoc, "malformed rotate expression");
3742    return MatchOperand_ParseFail;
3743  }
3744  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3745  if (!CE) {
3746    Error(ExLoc, "rotate amount must be an immediate");
3747    return MatchOperand_ParseFail;
3748  }
3749
3750  int64_t Val = CE->getValue();
3751  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3752  // normally, zero is represented in asm by omitting the rotate operand
3753  // entirely.
3754  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3755    Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
3756    return MatchOperand_ParseFail;
3757  }
3758
3759  Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
3760
3761  return MatchOperand_Success;
3762}
3763
3764ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3765parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3766  SMLoc S = Parser.getTok().getLoc();
3767  // The bitfield descriptor is really two operands, the LSB and the width.
3768  if (Parser.getTok().isNot(AsmToken::Hash) &&
3769      Parser.getTok().isNot(AsmToken::Dollar)) {
3770    Error(Parser.getTok().getLoc(), "'#' expected");
3771    return MatchOperand_ParseFail;
3772  }
3773  Parser.Lex(); // Eat hash token.
3774
3775  const MCExpr *LSBExpr;
3776  SMLoc E = Parser.getTok().getLoc();
3777  if (getParser().parseExpression(LSBExpr)) {
3778    Error(E, "malformed immediate expression");
3779    return MatchOperand_ParseFail;
3780  }
3781  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3782  if (!CE) {
3783    Error(E, "'lsb' operand must be an immediate");
3784    return MatchOperand_ParseFail;
3785  }
3786
3787  int64_t LSB = CE->getValue();
3788  // The LSB must be in the range [0,31]
3789  if (LSB < 0 || LSB > 31) {
3790    Error(E, "'lsb' operand must be in the range [0,31]");
3791    return MatchOperand_ParseFail;
3792  }
3793  E = Parser.getTok().getLoc();
3794
3795  // Expect another immediate operand.
3796  if (Parser.getTok().isNot(AsmToken::Comma)) {
3797    Error(Parser.getTok().getLoc(), "too few operands");
3798    return MatchOperand_ParseFail;
3799  }
3800  Parser.Lex(); // Eat hash token.
3801  if (Parser.getTok().isNot(AsmToken::Hash) &&
3802      Parser.getTok().isNot(AsmToken::Dollar)) {
3803    Error(Parser.getTok().getLoc(), "'#' expected");
3804    return MatchOperand_ParseFail;
3805  }
3806  Parser.Lex(); // Eat hash token.
3807
3808  const MCExpr *WidthExpr;
3809  SMLoc EndLoc;
3810  if (getParser().parseExpression(WidthExpr, EndLoc)) {
3811    Error(E, "malformed immediate expression");
3812    return MatchOperand_ParseFail;
3813  }
3814  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3815  if (!CE) {
3816    Error(E, "'width' operand must be an immediate");
3817    return MatchOperand_ParseFail;
3818  }
3819
3820  int64_t Width = CE->getValue();
3821  // The LSB must be in the range [1,32-lsb]
3822  if (Width < 1 || Width > 32 - LSB) {
3823    Error(E, "'width' operand must be in the range [1,32-lsb]");
3824    return MatchOperand_ParseFail;
3825  }
3826
3827  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
3828
3829  return MatchOperand_Success;
3830}
3831
3832ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3833parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3834  // Check for a post-index addressing register operand. Specifically:
3835  // postidx_reg := '+' register {, shift}
3836  //              | '-' register {, shift}
3837  //              | register {, shift}
3838
3839  // This method must return MatchOperand_NoMatch without consuming any tokens
3840  // in the case where there is no match, as other alternatives take other
3841  // parse methods.
3842  AsmToken Tok = Parser.getTok();
3843  SMLoc S = Tok.getLoc();
3844  bool haveEaten = false;
3845  bool isAdd = true;
3846  if (Tok.is(AsmToken::Plus)) {
3847    Parser.Lex(); // Eat the '+' token.
3848    haveEaten = true;
3849  } else if (Tok.is(AsmToken::Minus)) {
3850    Parser.Lex(); // Eat the '-' token.
3851    isAdd = false;
3852    haveEaten = true;
3853  }
3854
3855  SMLoc E = Parser.getTok().getEndLoc();
3856  int Reg = tryParseRegister();
3857  if (Reg == -1) {
3858    if (!haveEaten)
3859      return MatchOperand_NoMatch;
3860    Error(Parser.getTok().getLoc(), "register expected");
3861    return MatchOperand_ParseFail;
3862  }
3863
3864  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3865  unsigned ShiftImm = 0;
3866  if (Parser.getTok().is(AsmToken::Comma)) {
3867    Parser.Lex(); // Eat the ','.
3868    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3869      return MatchOperand_ParseFail;
3870
3871    // FIXME: Only approximates end...may include intervening whitespace.
3872    E = Parser.getTok().getLoc();
3873  }
3874
3875  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3876                                                  ShiftImm, S, E));
3877
3878  return MatchOperand_Success;
3879}
3880
3881ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3882parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3883  // Check for a post-index addressing register operand. Specifically:
3884  // am3offset := '+' register
3885  //              | '-' register
3886  //              | register
3887  //              | # imm
3888  //              | # + imm
3889  //              | # - imm
3890
3891  // This method must return MatchOperand_NoMatch without consuming any tokens
3892  // in the case where there is no match, as other alternatives take other
3893  // parse methods.
3894  AsmToken Tok = Parser.getTok();
3895  SMLoc S = Tok.getLoc();
3896
3897  // Do immediates first, as we always parse those if we have a '#'.
3898  if (Parser.getTok().is(AsmToken::Hash) ||
3899      Parser.getTok().is(AsmToken::Dollar)) {
3900    Parser.Lex(); // Eat the '#'.
3901    // Explicitly look for a '-', as we need to encode negative zero
3902    // differently.
3903    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3904    const MCExpr *Offset;
3905    SMLoc E;
3906    if (getParser().parseExpression(Offset, E))
3907      return MatchOperand_ParseFail;
3908    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3909    if (!CE) {
3910      Error(S, "constant expression expected");
3911      return MatchOperand_ParseFail;
3912    }
3913    // Negative zero is encoded as the flag value INT32_MIN.
3914    int32_t Val = CE->getValue();
3915    if (isNegative && Val == 0)
3916      Val = INT32_MIN;
3917
3918    Operands.push_back(
3919      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3920
3921    return MatchOperand_Success;
3922  }
3923
3924
3925  bool haveEaten = false;
3926  bool isAdd = true;
3927  if (Tok.is(AsmToken::Plus)) {
3928    Parser.Lex(); // Eat the '+' token.
3929    haveEaten = true;
3930  } else if (Tok.is(AsmToken::Minus)) {
3931    Parser.Lex(); // Eat the '-' token.
3932    isAdd = false;
3933    haveEaten = true;
3934  }
3935
3936  Tok = Parser.getTok();
3937  int Reg = tryParseRegister();
3938  if (Reg == -1) {
3939    if (!haveEaten)
3940      return MatchOperand_NoMatch;
3941    Error(Tok.getLoc(), "register expected");
3942    return MatchOperand_ParseFail;
3943  }
3944
3945  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3946                                                  0, S, Tok.getEndLoc()));
3947
3948  return MatchOperand_Success;
3949}
3950
3951/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3952/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3953/// when they refer multiple MIOperands inside a single one.
3954void ARMAsmParser::
3955cvtT2LdrdPre(MCInst &Inst,
3956             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3957  // Rt, Rt2
3958  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3959  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3960  // Create a writeback register dummy placeholder.
3961  Inst.addOperand(MCOperand::CreateReg(0));
3962  // addr
3963  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3964  // pred
3965  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3966}
3967
3968/// cvtT2StrdPre - Convert parsed operands to MCInst.
3969/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3970/// when they refer multiple MIOperands inside a single one.
3971void ARMAsmParser::
3972cvtT2StrdPre(MCInst &Inst,
3973             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3974  // Create a writeback register dummy placeholder.
3975  Inst.addOperand(MCOperand::CreateReg(0));
3976  // Rt, Rt2
3977  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3978  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3979  // addr
3980  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3981  // pred
3982  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3983}
3984
3985/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3986/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3987/// when they refer multiple MIOperands inside a single one.
3988void ARMAsmParser::
3989cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
3990                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3991  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3992
3993  // Create a writeback register dummy placeholder.
3994  Inst.addOperand(MCOperand::CreateImm(0));
3995
3996  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3997  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3998}
3999
4000/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
4001/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4002/// when they refer multiple MIOperands inside a single one.
4003void ARMAsmParser::
4004cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
4005                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4006  // Create a writeback register dummy placeholder.
4007  Inst.addOperand(MCOperand::CreateImm(0));
4008  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4009  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
4010  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4011}
4012
4013/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
4014/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4015/// when they refer multiple MIOperands inside a single one.
4016void ARMAsmParser::
4017cvtLdWriteBackRegAddrMode2(MCInst &Inst,
4018                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4019  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4020
4021  // Create a writeback register dummy placeholder.
4022  Inst.addOperand(MCOperand::CreateImm(0));
4023
4024  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
4025  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4026}
4027
4028/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
4029/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4030/// when they refer multiple MIOperands inside a single one.
4031void ARMAsmParser::
4032cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
4033                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4034  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4035
4036  // Create a writeback register dummy placeholder.
4037  Inst.addOperand(MCOperand::CreateImm(0));
4038
4039  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
4040  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4041}
4042
4043
4044/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
4045/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4046/// when they refer multiple MIOperands inside a single one.
4047void ARMAsmParser::
4048cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
4049                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4050  // Create a writeback register dummy placeholder.
4051  Inst.addOperand(MCOperand::CreateImm(0));
4052  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4053  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
4054  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4055}
4056
4057/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
4058/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4059/// when they refer multiple MIOperands inside a single one.
4060void ARMAsmParser::
4061cvtStWriteBackRegAddrMode2(MCInst &Inst,
4062                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4063  // Create a writeback register dummy placeholder.
4064  Inst.addOperand(MCOperand::CreateImm(0));
4065  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4066  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
4067  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4068}
4069
4070/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4071/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4072/// when they refer multiple MIOperands inside a single one.
4073void ARMAsmParser::
4074cvtStWriteBackRegAddrMode3(MCInst &Inst,
4075                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4076  // Create a writeback register dummy placeholder.
4077  Inst.addOperand(MCOperand::CreateImm(0));
4078  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4079  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4080  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4081}
4082
4083/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
4084/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4085/// when they refer multiple MIOperands inside a single one.
4086void ARMAsmParser::
4087cvtLdExtTWriteBackImm(MCInst &Inst,
4088                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4089  // Rt
4090  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4091  // Create a writeback register dummy placeholder.
4092  Inst.addOperand(MCOperand::CreateImm(0));
4093  // addr
4094  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4095  // offset
4096  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
4097  // pred
4098  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4099}
4100
4101/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
4102/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4103/// when they refer multiple MIOperands inside a single one.
4104void ARMAsmParser::
4105cvtLdExtTWriteBackReg(MCInst &Inst,
4106                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4107  // Rt
4108  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4109  // Create a writeback register dummy placeholder.
4110  Inst.addOperand(MCOperand::CreateImm(0));
4111  // addr
4112  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4113  // offset
4114  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
4115  // pred
4116  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4117}
4118
4119/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
4120/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4121/// when they refer multiple MIOperands inside a single one.
4122void ARMAsmParser::
4123cvtStExtTWriteBackImm(MCInst &Inst,
4124                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4125  // Create a writeback register dummy placeholder.
4126  Inst.addOperand(MCOperand::CreateImm(0));
4127  // Rt
4128  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4129  // addr
4130  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4131  // offset
4132  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
4133  // pred
4134  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4135}
4136
4137/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
4138/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4139/// when they refer multiple MIOperands inside a single one.
4140void ARMAsmParser::
4141cvtStExtTWriteBackReg(MCInst &Inst,
4142                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4143  // Create a writeback register dummy placeholder.
4144  Inst.addOperand(MCOperand::CreateImm(0));
4145  // Rt
4146  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4147  // addr
4148  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4149  // offset
4150  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
4151  // pred
4152  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4153}
4154
4155/// cvtLdrdPre - Convert parsed operands to MCInst.
4156/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4157/// when they refer multiple MIOperands inside a single one.
4158void ARMAsmParser::
4159cvtLdrdPre(MCInst &Inst,
4160           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4161  // Rt, Rt2
4162  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4163  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4164  // Create a writeback register dummy placeholder.
4165  Inst.addOperand(MCOperand::CreateImm(0));
4166  // addr
4167  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4168  // pred
4169  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4170}
4171
4172/// cvtStrdPre - Convert parsed operands to MCInst.
4173/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4174/// when they refer multiple MIOperands inside a single one.
4175void ARMAsmParser::
4176cvtStrdPre(MCInst &Inst,
4177           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4178  // Create a writeback register dummy placeholder.
4179  Inst.addOperand(MCOperand::CreateImm(0));
4180  // Rt, Rt2
4181  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4182  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4183  // addr
4184  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4185  // pred
4186  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4187}
4188
4189/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4190/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4191/// when they refer multiple MIOperands inside a single one.
4192void ARMAsmParser::
4193cvtLdWriteBackRegAddrMode3(MCInst &Inst,
4194                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4195  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4196  // Create a writeback register dummy placeholder.
4197  Inst.addOperand(MCOperand::CreateImm(0));
4198  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4199  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4200}
4201
4202/// cvtThumbMultiply - Convert parsed operands to MCInst.
4203/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4204/// when they refer multiple MIOperands inside a single one.
4205void ARMAsmParser::
4206cvtThumbMultiply(MCInst &Inst,
4207           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4208  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4209  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4210  // If we have a three-operand form, make sure to set Rn to be the operand
4211  // that isn't the same as Rd.
4212  unsigned RegOp = 4;
4213  if (Operands.size() == 6 &&
4214      ((ARMOperand*)Operands[4])->getReg() ==
4215        ((ARMOperand*)Operands[3])->getReg())
4216    RegOp = 5;
4217  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4218  Inst.addOperand(Inst.getOperand(0));
4219  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4220}
4221
4222void ARMAsmParser::
4223cvtVLDwbFixed(MCInst &Inst,
4224              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4225  // Vd
4226  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4227  // Create a writeback register dummy placeholder.
4228  Inst.addOperand(MCOperand::CreateImm(0));
4229  // Vn
4230  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4231  // pred
4232  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4233}
4234
4235void ARMAsmParser::
4236cvtVLDwbRegister(MCInst &Inst,
4237                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4238  // Vd
4239  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4240  // Create a writeback register dummy placeholder.
4241  Inst.addOperand(MCOperand::CreateImm(0));
4242  // Vn
4243  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4244  // Vm
4245  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4246  // pred
4247  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4248}
4249
4250void ARMAsmParser::
4251cvtVSTwbFixed(MCInst &Inst,
4252              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4253  // Create a writeback register dummy placeholder.
4254  Inst.addOperand(MCOperand::CreateImm(0));
4255  // Vn
4256  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4257  // Vt
4258  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4259  // pred
4260  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4261}
4262
4263void ARMAsmParser::
4264cvtVSTwbRegister(MCInst &Inst,
4265                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4266  // Create a writeback register dummy placeholder.
4267  Inst.addOperand(MCOperand::CreateImm(0));
4268  // Vn
4269  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4270  // Vm
4271  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4272  // Vt
4273  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4274  // pred
4275  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4276}
4277
4278/// Parse an ARM memory expression, return false if successful else return true
4279/// or an error.  The first token must be a '[' when called.
4280bool ARMAsmParser::
4281parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4282  SMLoc S, E;
4283  assert(Parser.getTok().is(AsmToken::LBrac) &&
4284         "Token is not a Left Bracket");
4285  S = Parser.getTok().getLoc();
4286  Parser.Lex(); // Eat left bracket token.
4287
4288  const AsmToken &BaseRegTok = Parser.getTok();
4289  int BaseRegNum = tryParseRegister();
4290  if (BaseRegNum == -1)
4291    return Error(BaseRegTok.getLoc(), "register expected");
4292
4293  // The next token must either be a comma, a colon or a closing bracket.
4294  const AsmToken &Tok = Parser.getTok();
4295  if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
4296      !Tok.is(AsmToken::RBrac))
4297    return Error(Tok.getLoc(), "malformed memory operand");
4298
4299  if (Tok.is(AsmToken::RBrac)) {
4300    E = Tok.getEndLoc();
4301    Parser.Lex(); // Eat right bracket token.
4302
4303    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4304                                             0, 0, false, S, E));
4305
4306    // If there's a pre-indexing writeback marker, '!', just add it as a token
4307    // operand. It's rather odd, but syntactically valid.
4308    if (Parser.getTok().is(AsmToken::Exclaim)) {
4309      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4310      Parser.Lex(); // Eat the '!'.
4311    }
4312
4313    return false;
4314  }
4315
4316  assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
4317         "Lost colon or comma in memory operand?!");
4318  if (Tok.is(AsmToken::Comma)) {
4319    Parser.Lex(); // Eat the comma.
4320  }
4321
4322  // If we have a ':', it's an alignment specifier.
4323  if (Parser.getTok().is(AsmToken::Colon)) {
4324    Parser.Lex(); // Eat the ':'.
4325    E = Parser.getTok().getLoc();
4326
4327    const MCExpr *Expr;
4328    if (getParser().parseExpression(Expr))
4329     return true;
4330
4331    // The expression has to be a constant. Memory references with relocations
4332    // don't come through here, as they use the <label> forms of the relevant
4333    // instructions.
4334    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4335    if (!CE)
4336      return Error (E, "constant expression expected");
4337
4338    unsigned Align = 0;
4339    switch (CE->getValue()) {
4340    default:
4341      return Error(E,
4342                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4343    case 16:  Align = 2; break;
4344    case 32:  Align = 4; break;
4345    case 64:  Align = 8; break;
4346    case 128: Align = 16; break;
4347    case 256: Align = 32; break;
4348    }
4349
4350    // Now we should have the closing ']'
4351    if (Parser.getTok().isNot(AsmToken::RBrac))
4352      return Error(Parser.getTok().getLoc(), "']' expected");
4353    E = Parser.getTok().getEndLoc();
4354    Parser.Lex(); // Eat right bracket token.
4355
4356    // Don't worry about range checking the value here. That's handled by
4357    // the is*() predicates.
4358    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4359                                             ARM_AM::no_shift, 0, Align,
4360                                             false, S, E));
4361
4362    // If there's a pre-indexing writeback marker, '!', just add it as a token
4363    // operand.
4364    if (Parser.getTok().is(AsmToken::Exclaim)) {
4365      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4366      Parser.Lex(); // Eat the '!'.
4367    }
4368
4369    return false;
4370  }
4371
4372  // If we have a '#', it's an immediate offset, else assume it's a register
4373  // offset. Be friendly and also accept a plain integer (without a leading
4374  // hash) for gas compatibility.
4375  if (Parser.getTok().is(AsmToken::Hash) ||
4376      Parser.getTok().is(AsmToken::Dollar) ||
4377      Parser.getTok().is(AsmToken::Integer)) {
4378    if (Parser.getTok().isNot(AsmToken::Integer))
4379      Parser.Lex(); // Eat the '#'.
4380    E = Parser.getTok().getLoc();
4381
4382    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4383    const MCExpr *Offset;
4384    if (getParser().parseExpression(Offset))
4385     return true;
4386
4387    // The expression has to be a constant. Memory references with relocations
4388    // don't come through here, as they use the <label> forms of the relevant
4389    // instructions.
4390    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4391    if (!CE)
4392      return Error (E, "constant expression expected");
4393
4394    // If the constant was #-0, represent it as INT32_MIN.
4395    int32_t Val = CE->getValue();
4396    if (isNegative && Val == 0)
4397      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4398
4399    // Now we should have the closing ']'
4400    if (Parser.getTok().isNot(AsmToken::RBrac))
4401      return Error(Parser.getTok().getLoc(), "']' expected");
4402    E = Parser.getTok().getEndLoc();
4403    Parser.Lex(); // Eat right bracket token.
4404
4405    // Don't worry about range checking the value here. That's handled by
4406    // the is*() predicates.
4407    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4408                                             ARM_AM::no_shift, 0, 0,
4409                                             false, S, E));
4410
4411    // If there's a pre-indexing writeback marker, '!', just add it as a token
4412    // operand.
4413    if (Parser.getTok().is(AsmToken::Exclaim)) {
4414      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4415      Parser.Lex(); // Eat the '!'.
4416    }
4417
4418    return false;
4419  }
4420
4421  // The register offset is optionally preceded by a '+' or '-'
4422  bool isNegative = false;
4423  if (Parser.getTok().is(AsmToken::Minus)) {
4424    isNegative = true;
4425    Parser.Lex(); // Eat the '-'.
4426  } else if (Parser.getTok().is(AsmToken::Plus)) {
4427    // Nothing to do.
4428    Parser.Lex(); // Eat the '+'.
4429  }
4430
4431  E = Parser.getTok().getLoc();
4432  int OffsetRegNum = tryParseRegister();
4433  if (OffsetRegNum == -1)
4434    return Error(E, "register expected");
4435
4436  // If there's a shift operator, handle it.
4437  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4438  unsigned ShiftImm = 0;
4439  if (Parser.getTok().is(AsmToken::Comma)) {
4440    Parser.Lex(); // Eat the ','.
4441    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4442      return true;
4443  }
4444
4445  // Now we should have the closing ']'
4446  if (Parser.getTok().isNot(AsmToken::RBrac))
4447    return Error(Parser.getTok().getLoc(), "']' expected");
4448  E = Parser.getTok().getEndLoc();
4449  Parser.Lex(); // Eat right bracket token.
4450
4451  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4452                                           ShiftType, ShiftImm, 0, isNegative,
4453                                           S, E));
4454
4455  // If there's a pre-indexing writeback marker, '!', just add it as a token
4456  // operand.
4457  if (Parser.getTok().is(AsmToken::Exclaim)) {
4458    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4459    Parser.Lex(); // Eat the '!'.
4460  }
4461
4462  return false;
4463}
4464
4465/// parseMemRegOffsetShift - one of these two:
4466///   ( lsl | lsr | asr | ror ) , # shift_amount
4467///   rrx
4468/// return true if it parses a shift otherwise it returns false.
4469bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4470                                          unsigned &Amount) {
4471  SMLoc Loc = Parser.getTok().getLoc();
4472  const AsmToken &Tok = Parser.getTok();
4473  if (Tok.isNot(AsmToken::Identifier))
4474    return true;
4475  StringRef ShiftName = Tok.getString();
4476  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4477      ShiftName == "asl" || ShiftName == "ASL")
4478    St = ARM_AM::lsl;
4479  else if (ShiftName == "lsr" || ShiftName == "LSR")
4480    St = ARM_AM::lsr;
4481  else if (ShiftName == "asr" || ShiftName == "ASR")
4482    St = ARM_AM::asr;
4483  else if (ShiftName == "ror" || ShiftName == "ROR")
4484    St = ARM_AM::ror;
4485  else if (ShiftName == "rrx" || ShiftName == "RRX")
4486    St = ARM_AM::rrx;
4487  else
4488    return Error(Loc, "illegal shift operator");
4489  Parser.Lex(); // Eat shift type token.
4490
4491  // rrx stands alone.
4492  Amount = 0;
4493  if (St != ARM_AM::rrx) {
4494    Loc = Parser.getTok().getLoc();
4495    // A '#' and a shift amount.
4496    const AsmToken &HashTok = Parser.getTok();
4497    if (HashTok.isNot(AsmToken::Hash) &&
4498        HashTok.isNot(AsmToken::Dollar))
4499      return Error(HashTok.getLoc(), "'#' expected");
4500    Parser.Lex(); // Eat hash token.
4501
4502    const MCExpr *Expr;
4503    if (getParser().parseExpression(Expr))
4504      return true;
4505    // Range check the immediate.
4506    // lsl, ror: 0 <= imm <= 31
4507    // lsr, asr: 0 <= imm <= 32
4508    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4509    if (!CE)
4510      return Error(Loc, "shift amount must be an immediate");
4511    int64_t Imm = CE->getValue();
4512    if (Imm < 0 ||
4513        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4514        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4515      return Error(Loc, "immediate shift value out of range");
4516    // If <ShiftTy> #0, turn it into a no_shift.
4517    if (Imm == 0)
4518      St = ARM_AM::lsl;
4519    // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
4520    if (Imm == 32)
4521      Imm = 0;
4522    Amount = Imm;
4523  }
4524
4525  return false;
4526}
4527
4528/// parseFPImm - A floating point immediate expression operand.
4529ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4530parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4531  // Anything that can accept a floating point constant as an operand
4532  // needs to go through here, as the regular parseExpression is
4533  // integer only.
4534  //
4535  // This routine still creates a generic Immediate operand, containing
4536  // a bitcast of the 64-bit floating point value. The various operands
4537  // that accept floats can check whether the value is valid for them
4538  // via the standard is*() predicates.
4539
4540  SMLoc S = Parser.getTok().getLoc();
4541
4542  if (Parser.getTok().isNot(AsmToken::Hash) &&
4543      Parser.getTok().isNot(AsmToken::Dollar))
4544    return MatchOperand_NoMatch;
4545
4546  // Disambiguate the VMOV forms that can accept an FP immediate.
4547  // vmov.f32 <sreg>, #imm
4548  // vmov.f64 <dreg>, #imm
4549  // vmov.f32 <dreg>, #imm  @ vector f32x2
4550  // vmov.f32 <qreg>, #imm  @ vector f32x4
4551  //
4552  // There are also the NEON VMOV instructions which expect an
4553  // integer constant. Make sure we don't try to parse an FPImm
4554  // for these:
4555  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4556  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4557  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4558                           TyOp->getToken() != ".f64"))
4559    return MatchOperand_NoMatch;
4560
4561  Parser.Lex(); // Eat the '#'.
4562
4563  // Handle negation, as that still comes through as a separate token.
4564  bool isNegative = false;
4565  if (Parser.getTok().is(AsmToken::Minus)) {
4566    isNegative = true;
4567    Parser.Lex();
4568  }
4569  const AsmToken &Tok = Parser.getTok();
4570  SMLoc Loc = Tok.getLoc();
4571  if (Tok.is(AsmToken::Real)) {
4572    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4573    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4574    // If we had a '-' in front, toggle the sign bit.
4575    IntVal ^= (uint64_t)isNegative << 31;
4576    Parser.Lex(); // Eat the token.
4577    Operands.push_back(ARMOperand::CreateImm(
4578          MCConstantExpr::Create(IntVal, getContext()),
4579          S, Parser.getTok().getLoc()));
4580    return MatchOperand_Success;
4581  }
4582  // Also handle plain integers. Instructions which allow floating point
4583  // immediates also allow a raw encoded 8-bit value.
4584  if (Tok.is(AsmToken::Integer)) {
4585    int64_t Val = Tok.getIntVal();
4586    Parser.Lex(); // Eat the token.
4587    if (Val > 255 || Val < 0) {
4588      Error(Loc, "encoded floating point value out of range");
4589      return MatchOperand_ParseFail;
4590    }
4591    double RealVal = ARM_AM::getFPImmFloat(Val);
4592    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4593    Operands.push_back(ARMOperand::CreateImm(
4594        MCConstantExpr::Create(Val, getContext()), S,
4595        Parser.getTok().getLoc()));
4596    return MatchOperand_Success;
4597  }
4598
4599  Error(Loc, "invalid floating point immediate");
4600  return MatchOperand_ParseFail;
4601}
4602
4603/// Parse a arm instruction operand.  For now this parses the operand regardless
4604/// of the mnemonic.
4605bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4606                                StringRef Mnemonic) {
4607  SMLoc S, E;
4608
4609  // Check if the current operand has a custom associated parser, if so, try to
4610  // custom parse the operand, or fallback to the general approach.
4611  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4612  if (ResTy == MatchOperand_Success)
4613    return false;
4614  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4615  // there was a match, but an error occurred, in which case, just return that
4616  // the operand parsing failed.
4617  if (ResTy == MatchOperand_ParseFail)
4618    return true;
4619
4620  switch (getLexer().getKind()) {
4621  default:
4622    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4623    return true;
4624  case AsmToken::Identifier: {
4625    // If we've seen a branch mnemonic, the next operand must be a label.  This
4626    // is true even if the label is a register name.  So "br r1" means branch to
4627    // label "r1".
4628    bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
4629    if (!ExpectLabel) {
4630      if (!tryParseRegisterWithWriteBack(Operands))
4631        return false;
4632      int Res = tryParseShiftRegister(Operands);
4633      if (Res == 0) // success
4634        return false;
4635      else if (Res == -1) // irrecoverable error
4636        return true;
4637      // If this is VMRS, check for the apsr_nzcv operand.
4638      if (Mnemonic == "vmrs" &&
4639          Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4640        S = Parser.getTok().getLoc();
4641        Parser.Lex();
4642        Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4643        return false;
4644      }
4645    }
4646
4647    // Fall though for the Identifier case that is not a register or a
4648    // special name.
4649  }
4650  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4651  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4652  case AsmToken::String:  // quoted label names.
4653  case AsmToken::Dot: {   // . as a branch target
4654    // This was not a register so parse other operands that start with an
4655    // identifier (like labels) as expressions and create them as immediates.
4656    const MCExpr *IdVal;
4657    S = Parser.getTok().getLoc();
4658    if (getParser().parseExpression(IdVal))
4659      return true;
4660    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4661    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4662    return false;
4663  }
4664  case AsmToken::LBrac:
4665    return parseMemory(Operands);
4666  case AsmToken::LCurly:
4667    return parseRegisterList(Operands);
4668  case AsmToken::Dollar:
4669  case AsmToken::Hash: {
4670    // #42 -> immediate.
4671    S = Parser.getTok().getLoc();
4672    Parser.Lex();
4673
4674    if (Parser.getTok().isNot(AsmToken::Colon)) {
4675      bool isNegative = Parser.getTok().is(AsmToken::Minus);
4676      const MCExpr *ImmVal;
4677      if (getParser().parseExpression(ImmVal))
4678        return true;
4679      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4680      if (CE) {
4681        int32_t Val = CE->getValue();
4682        if (isNegative && Val == 0)
4683          ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4684      }
4685      E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4686      Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4687
4688      // There can be a trailing '!' on operands that we want as a separate
4689      // '!' Token operand. Handle that here. For example, the compatibilty
4690      // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
4691      if (Parser.getTok().is(AsmToken::Exclaim)) {
4692        Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
4693                                                   Parser.getTok().getLoc()));
4694        Parser.Lex(); // Eat exclaim token
4695      }
4696      return false;
4697    }
4698    // w/ a ':' after the '#', it's just like a plain ':'.
4699    // FALLTHROUGH
4700  }
4701  case AsmToken::Colon: {
4702    // ":lower16:" and ":upper16:" expression prefixes
4703    // FIXME: Check it's an expression prefix,
4704    // e.g. (FOO - :lower16:BAR) isn't legal.
4705    ARMMCExpr::VariantKind RefKind;
4706    if (parsePrefix(RefKind))
4707      return true;
4708
4709    const MCExpr *SubExprVal;
4710    if (getParser().parseExpression(SubExprVal))
4711      return true;
4712
4713    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4714                                              getContext());
4715    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4716    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4717    return false;
4718  }
4719  }
4720}
4721
4722// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4723//  :lower16: and :upper16:.
4724bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4725  RefKind = ARMMCExpr::VK_ARM_None;
4726
4727  // :lower16: and :upper16: modifiers
4728  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4729  Parser.Lex(); // Eat ':'
4730
4731  if (getLexer().isNot(AsmToken::Identifier)) {
4732    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4733    return true;
4734  }
4735
4736  StringRef IDVal = Parser.getTok().getIdentifier();
4737  if (IDVal == "lower16") {
4738    RefKind = ARMMCExpr::VK_ARM_LO16;
4739  } else if (IDVal == "upper16") {
4740    RefKind = ARMMCExpr::VK_ARM_HI16;
4741  } else {
4742    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4743    return true;
4744  }
4745  Parser.Lex();
4746
4747  if (getLexer().isNot(AsmToken::Colon)) {
4748    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4749    return true;
4750  }
4751  Parser.Lex(); // Eat the last ':'
4752  return false;
4753}
4754
4755/// \brief Given a mnemonic, split out possible predication code and carry
4756/// setting letters to form a canonical mnemonic and flags.
4757//
4758// FIXME: Would be nice to autogen this.
4759// FIXME: This is a bit of a maze of special cases.
4760StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4761                                      unsigned &PredicationCode,
4762                                      bool &CarrySetting,
4763                                      unsigned &ProcessorIMod,
4764                                      StringRef &ITMask) {
4765  PredicationCode = ARMCC::AL;
4766  CarrySetting = false;
4767  ProcessorIMod = 0;
4768
4769  // Ignore some mnemonics we know aren't predicated forms.
4770  //
4771  // FIXME: Would be nice to autogen this.
4772  if ((Mnemonic == "movs" && isThumb()) ||
4773      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4774      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4775      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4776      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4777      Mnemonic == "vaclt" || Mnemonic == "vacle"  ||
4778      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4779      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4780      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4781      Mnemonic == "fmuls")
4782    return Mnemonic;
4783
4784  // First, split out any predication code. Ignore mnemonics we know aren't
4785  // predicated but do have a carry-set and so weren't caught above.
4786  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4787      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4788      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4789      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4790    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4791      .Case("eq", ARMCC::EQ)
4792      .Case("ne", ARMCC::NE)
4793      .Case("hs", ARMCC::HS)
4794      .Case("cs", ARMCC::HS)
4795      .Case("lo", ARMCC::LO)
4796      .Case("cc", ARMCC::LO)
4797      .Case("mi", ARMCC::MI)
4798      .Case("pl", ARMCC::PL)
4799      .Case("vs", ARMCC::VS)
4800      .Case("vc", ARMCC::VC)
4801      .Case("hi", ARMCC::HI)
4802      .Case("ls", ARMCC::LS)
4803      .Case("ge", ARMCC::GE)
4804      .Case("lt", ARMCC::LT)
4805      .Case("gt", ARMCC::GT)
4806      .Case("le", ARMCC::LE)
4807      .Case("al", ARMCC::AL)
4808      .Default(~0U);
4809    if (CC != ~0U) {
4810      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4811      PredicationCode = CC;
4812    }
4813  }
4814
4815  // Next, determine if we have a carry setting bit. We explicitly ignore all
4816  // the instructions we know end in 's'.
4817  if (Mnemonic.endswith("s") &&
4818      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4819        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4820        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4821        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4822        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4823        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4824        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4825        Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4826        Mnemonic == "vfms" || Mnemonic == "vfnms" ||
4827        (Mnemonic == "movs" && isThumb()))) {
4828    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4829    CarrySetting = true;
4830  }
4831
4832  // The "cps" instruction can have a interrupt mode operand which is glued into
4833  // the mnemonic. Check if this is the case, split it and parse the imod op
4834  if (Mnemonic.startswith("cps")) {
4835    // Split out any imod code.
4836    unsigned IMod =
4837      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4838      .Case("ie", ARM_PROC::IE)
4839      .Case("id", ARM_PROC::ID)
4840      .Default(~0U);
4841    if (IMod != ~0U) {
4842      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4843      ProcessorIMod = IMod;
4844    }
4845  }
4846
4847  // The "it" instruction has the condition mask on the end of the mnemonic.
4848  if (Mnemonic.startswith("it")) {
4849    ITMask = Mnemonic.slice(2, Mnemonic.size());
4850    Mnemonic = Mnemonic.slice(0, 2);
4851  }
4852
4853  return Mnemonic;
4854}
4855
4856/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4857/// inclusion of carry set or predication code operands.
4858//
4859// FIXME: It would be nice to autogen this.
4860void ARMAsmParser::
4861getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4862                      bool &CanAcceptPredicationCode) {
4863  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4864      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4865      Mnemonic == "add" || Mnemonic == "adc" ||
4866      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4867      Mnemonic == "orr" || Mnemonic == "mvn" ||
4868      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4869      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4870      Mnemonic == "vfm" || Mnemonic == "vfnm" ||
4871      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4872                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4873                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4874    CanAcceptCarrySet = true;
4875  } else
4876    CanAcceptCarrySet = false;
4877
4878  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4879      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4880      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4881      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4882      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4883      (Mnemonic == "clrex" && !isThumb()) ||
4884      (Mnemonic == "nop" && isThumbOne()) ||
4885      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4886        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4887        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4888      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4889       !isThumb()) ||
4890      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4891    CanAcceptPredicationCode = false;
4892  } else
4893    CanAcceptPredicationCode = true;
4894
4895  if (isThumb()) {
4896    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4897        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4898      CanAcceptPredicationCode = false;
4899  }
4900}
4901
4902bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4903                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4904  // FIXME: This is all horribly hacky. We really need a better way to deal
4905  // with optional operands like this in the matcher table.
4906
4907  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4908  // another does not. Specifically, the MOVW instruction does not. So we
4909  // special case it here and remove the defaulted (non-setting) cc_out
4910  // operand if that's the instruction we're trying to match.
4911  //
4912  // We do this as post-processing of the explicit operands rather than just
4913  // conditionally adding the cc_out in the first place because we need
4914  // to check the type of the parsed immediate operand.
4915  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4916      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4917      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4918      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4919    return true;
4920
4921  // Register-register 'add' for thumb does not have a cc_out operand
4922  // when there are only two register operands.
4923  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4924      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4925      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4926      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4927    return true;
4928  // Register-register 'add' for thumb does not have a cc_out operand
4929  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4930  // have to check the immediate range here since Thumb2 has a variant
4931  // that can handle a different range and has a cc_out operand.
4932  if (((isThumb() && Mnemonic == "add") ||
4933       (isThumbTwo() && Mnemonic == "sub")) &&
4934      Operands.size() == 6 &&
4935      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4936      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4937      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4938      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4939      ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
4940       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4941    return true;
4942  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4943  // imm0_4095 variant. That's the least-preferred variant when
4944  // selecting via the generic "add" mnemonic, so to know that we
4945  // should remove the cc_out operand, we have to explicitly check that
4946  // it's not one of the other variants. Ugh.
4947  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4948      Operands.size() == 6 &&
4949      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4950      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4951      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4952    // Nest conditions rather than one big 'if' statement for readability.
4953    //
4954    // If either register is a high reg, it's either one of the SP
4955    // variants (handled above) or a 32-bit encoding, so we just
4956    // check against T3. If the second register is the PC, this is an
4957    // alternate form of ADR, which uses encoding T4, so check for that too.
4958    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4959         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4960        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4961        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4962      return false;
4963    // If both registers are low, we're in an IT block, and the immediate is
4964    // in range, we should use encoding T1 instead, which has a cc_out.
4965    if (inITBlock() &&
4966        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4967        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4968        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4969      return false;
4970
4971    // Otherwise, we use encoding T4, which does not have a cc_out
4972    // operand.
4973    return true;
4974  }
4975
4976  // The thumb2 multiply instruction doesn't have a CCOut register, so
4977  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4978  // use the 16-bit encoding or not.
4979  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4980      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4981      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4982      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4983      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4984      // If the registers aren't low regs, the destination reg isn't the
4985      // same as one of the source regs, or the cc_out operand is zero
4986      // outside of an IT block, we have to use the 32-bit encoding, so
4987      // remove the cc_out operand.
4988      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4989       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4990       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4991       !inITBlock() ||
4992       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4993        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4994        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4995        static_cast<ARMOperand*>(Operands[4])->getReg())))
4996    return true;
4997
4998  // Also check the 'mul' syntax variant that doesn't specify an explicit
4999  // destination register.
5000  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
5001      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
5002      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5003      static_cast<ARMOperand*>(Operands[4])->isReg() &&
5004      // If the registers aren't low regs  or the cc_out operand is zero
5005      // outside of an IT block, we have to use the 32-bit encoding, so
5006      // remove the cc_out operand.
5007      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
5008       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
5009       !inITBlock()))
5010    return true;
5011
5012
5013
5014  // Register-register 'add/sub' for thumb does not have a cc_out operand
5015  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
5016  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
5017  // right, this will result in better diagnostics (which operand is off)
5018  // anyway.
5019  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
5020      (Operands.size() == 5 || Operands.size() == 6) &&
5021      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5022      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
5023      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
5024      (static_cast<ARMOperand*>(Operands[4])->isImm() ||
5025       (Operands.size() == 6 &&
5026        static_cast<ARMOperand*>(Operands[5])->isImm())))
5027    return true;
5028
5029  return false;
5030}
5031
5032static bool isDataTypeToken(StringRef Tok) {
5033  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
5034    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
5035    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
5036    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
5037    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
5038    Tok == ".f" || Tok == ".d";
5039}
5040
5041// FIXME: This bit should probably be handled via an explicit match class
5042// in the .td files that matches the suffix instead of having it be
5043// a literal string token the way it is now.
5044static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
5045  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
5046}
5047static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features,
5048                                 unsigned VariantID);
5049/// Parse an arm instruction mnemonic followed by its operands.
5050bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
5051                                    SMLoc NameLoc,
5052                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5053  // Apply mnemonic aliases before doing anything else, as the destination
5054  // mnemnonic may include suffices and we want to handle them normally.
5055  // The generic tblgen'erated code does this later, at the start of
5056  // MatchInstructionImpl(), but that's too late for aliases that include
5057  // any sort of suffix.
5058  unsigned AvailableFeatures = getAvailableFeatures();
5059  unsigned AssemblerDialect = getParser().getAssemblerDialect();
5060  applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
5061
5062  // First check for the ARM-specific .req directive.
5063  if (Parser.getTok().is(AsmToken::Identifier) &&
5064      Parser.getTok().getIdentifier() == ".req") {
5065    parseDirectiveReq(Name, NameLoc);
5066    // We always return 'error' for this, as we're done with this
5067    // statement and don't need to match the 'instruction."
5068    return true;
5069  }
5070
5071  // Create the leading tokens for the mnemonic, split by '.' characters.
5072  size_t Start = 0, Next = Name.find('.');
5073  StringRef Mnemonic = Name.slice(Start, Next);
5074
5075  // Split out the predication code and carry setting flag from the mnemonic.
5076  unsigned PredicationCode;
5077  unsigned ProcessorIMod;
5078  bool CarrySetting;
5079  StringRef ITMask;
5080  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
5081                           ProcessorIMod, ITMask);
5082
5083  // In Thumb1, only the branch (B) instruction can be predicated.
5084  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
5085    Parser.eatToEndOfStatement();
5086    return Error(NameLoc, "conditional execution not supported in Thumb1");
5087  }
5088
5089  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
5090
5091  // Handle the IT instruction ITMask. Convert it to a bitmask. This
5092  // is the mask as it will be for the IT encoding if the conditional
5093  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
5094  // where the conditional bit0 is zero, the instruction post-processing
5095  // will adjust the mask accordingly.
5096  if (Mnemonic == "it") {
5097    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
5098    if (ITMask.size() > 3) {
5099      Parser.eatToEndOfStatement();
5100      return Error(Loc, "too many conditions on IT instruction");
5101    }
5102    unsigned Mask = 8;
5103    for (unsigned i = ITMask.size(); i != 0; --i) {
5104      char pos = ITMask[i - 1];
5105      if (pos != 't' && pos != 'e') {
5106        Parser.eatToEndOfStatement();
5107        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
5108      }
5109      Mask >>= 1;
5110      if (ITMask[i - 1] == 't')
5111        Mask |= 8;
5112    }
5113    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
5114  }
5115
5116  // FIXME: This is all a pretty gross hack. We should automatically handle
5117  // optional operands like this via tblgen.
5118
5119  // Next, add the CCOut and ConditionCode operands, if needed.
5120  //
5121  // For mnemonics which can ever incorporate a carry setting bit or predication
5122  // code, our matching model involves us always generating CCOut and
5123  // ConditionCode operands to match the mnemonic "as written" and then we let
5124  // the matcher deal with finding the right instruction or generating an
5125  // appropriate error.
5126  bool CanAcceptCarrySet, CanAcceptPredicationCode;
5127  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
5128
5129  // If we had a carry-set on an instruction that can't do that, issue an
5130  // error.
5131  if (!CanAcceptCarrySet && CarrySetting) {
5132    Parser.eatToEndOfStatement();
5133    return Error(NameLoc, "instruction '" + Mnemonic +
5134                 "' can not set flags, but 's' suffix specified");
5135  }
5136  // If we had a predication code on an instruction that can't do that, issue an
5137  // error.
5138  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
5139    Parser.eatToEndOfStatement();
5140    return Error(NameLoc, "instruction '" + Mnemonic +
5141                 "' is not predicable, but condition code specified");
5142  }
5143
5144  // Add the carry setting operand, if necessary.
5145  if (CanAcceptCarrySet) {
5146    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
5147    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
5148                                               Loc));
5149  }
5150
5151  // Add the predication code operand, if necessary.
5152  if (CanAcceptPredicationCode) {
5153    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
5154                                      CarrySetting);
5155    Operands.push_back(ARMOperand::CreateCondCode(
5156                         ARMCC::CondCodes(PredicationCode), Loc));
5157  }
5158
5159  // Add the processor imod operand, if necessary.
5160  if (ProcessorIMod) {
5161    Operands.push_back(ARMOperand::CreateImm(
5162          MCConstantExpr::Create(ProcessorIMod, getContext()),
5163                                 NameLoc, NameLoc));
5164  }
5165
5166  // Add the remaining tokens in the mnemonic.
5167  while (Next != StringRef::npos) {
5168    Start = Next;
5169    Next = Name.find('.', Start + 1);
5170    StringRef ExtraToken = Name.slice(Start, Next);
5171
5172    // Some NEON instructions have an optional datatype suffix that is
5173    // completely ignored. Check for that.
5174    if (isDataTypeToken(ExtraToken) &&
5175        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5176      continue;
5177
5178    if (ExtraToken != ".n") {
5179      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5180      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5181    }
5182  }
5183
5184  // Read the remaining operands.
5185  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5186    // Read the first operand.
5187    if (parseOperand(Operands, Mnemonic)) {
5188      Parser.eatToEndOfStatement();
5189      return true;
5190    }
5191
5192    while (getLexer().is(AsmToken::Comma)) {
5193      Parser.Lex();  // Eat the comma.
5194
5195      // Parse and remember the operand.
5196      if (parseOperand(Operands, Mnemonic)) {
5197        Parser.eatToEndOfStatement();
5198        return true;
5199      }
5200    }
5201  }
5202
5203  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5204    SMLoc Loc = getLexer().getLoc();
5205    Parser.eatToEndOfStatement();
5206    return Error(Loc, "unexpected token in argument list");
5207  }
5208
5209  Parser.Lex(); // Consume the EndOfStatement
5210
5211  // Some instructions, mostly Thumb, have forms for the same mnemonic that
5212  // do and don't have a cc_out optional-def operand. With some spot-checks
5213  // of the operand list, we can figure out which variant we're trying to
5214  // parse and adjust accordingly before actually matching. We shouldn't ever
5215  // try to remove a cc_out operand that was explicitly set on the the
5216  // mnemonic, of course (CarrySetting == true). Reason number #317 the
5217  // table driven matcher doesn't fit well with the ARM instruction set.
5218  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5219    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5220    Operands.erase(Operands.begin() + 1);
5221    delete Op;
5222  }
5223
5224  // ARM mode 'blx' need special handling, as the register operand version
5225  // is predicable, but the label operand version is not. So, we can't rely
5226  // on the Mnemonic based checking to correctly figure out when to put
5227  // a k_CondCode operand in the list. If we're trying to match the label
5228  // version, remove the k_CondCode operand here.
5229  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5230      static_cast<ARMOperand*>(Operands[2])->isImm()) {
5231    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5232    Operands.erase(Operands.begin() + 1);
5233    delete Op;
5234  }
5235
5236  // Adjust operands of ldrexd/strexd to MCK_GPRPair.
5237  // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
5238  // a single GPRPair reg operand is used in the .td file to replace the two
5239  // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
5240  // expressed as a GPRPair, so we have to manually merge them.
5241  // FIXME: We would really like to be able to tablegen'erate this.
5242  if (!isThumb() && Operands.size() > 4 &&
5243      (Mnemonic == "ldrexd" || Mnemonic == "strexd")) {
5244    bool isLoad = (Mnemonic == "ldrexd");
5245    unsigned Idx = isLoad ? 2 : 3;
5246    ARMOperand* Op1 = static_cast<ARMOperand*>(Operands[Idx]);
5247    ARMOperand* Op2 = static_cast<ARMOperand*>(Operands[Idx+1]);
5248
5249    const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
5250    // Adjust only if Op1 and Op2 are GPRs.
5251    if (Op1->isReg() && Op2->isReg() && MRC.contains(Op1->getReg()) &&
5252        MRC.contains(Op2->getReg())) {
5253      unsigned Reg1 = Op1->getReg();
5254      unsigned Reg2 = Op2->getReg();
5255      unsigned Rt = MRI->getEncodingValue(Reg1);
5256      unsigned Rt2 = MRI->getEncodingValue(Reg2);
5257
5258      // Rt2 must be Rt + 1 and Rt must be even.
5259      if (Rt + 1 != Rt2 || (Rt & 1)) {
5260        Error(Op2->getStartLoc(), isLoad ?
5261            "destination operands must be sequential" :
5262            "source operands must be sequential");
5263        return true;
5264      }
5265      unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
5266          &(MRI->getRegClass(ARM::GPRPairRegClassID)));
5267      Operands.erase(Operands.begin() + Idx, Operands.begin() + Idx + 2);
5268      Operands.insert(Operands.begin() + Idx, ARMOperand::CreateReg(
5269            NewReg, Op1->getStartLoc(), Op2->getEndLoc()));
5270      delete Op1;
5271      delete Op2;
5272    }
5273  }
5274
5275  return false;
5276}
5277
5278// Validate context-sensitive operand constraints.
5279
5280// return 'true' if register list contains non-low GPR registers,
5281// 'false' otherwise. If Reg is in the register list or is HiReg, set
5282// 'containsReg' to true.
5283static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5284                                 unsigned HiReg, bool &containsReg) {
5285  containsReg = false;
5286  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5287    unsigned OpReg = Inst.getOperand(i).getReg();
5288    if (OpReg == Reg)
5289      containsReg = true;
5290    // Anything other than a low register isn't legal here.
5291    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5292      return true;
5293  }
5294  return false;
5295}
5296
5297// Check if the specified regisgter is in the register list of the inst,
5298// starting at the indicated operand number.
5299static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5300  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5301    unsigned OpReg = Inst.getOperand(i).getReg();
5302    if (OpReg == Reg)
5303      return true;
5304  }
5305  return false;
5306}
5307
5308// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5309// the ARMInsts array) instead. Getting that here requires awkward
5310// API changes, though. Better way?
5311namespace llvm {
5312extern const MCInstrDesc ARMInsts[];
5313}
5314static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5315  return ARMInsts[Opcode];
5316}
5317
5318// FIXME: We would really like to be able to tablegen'erate this.
5319bool ARMAsmParser::
5320validateInstruction(MCInst &Inst,
5321                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5322  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5323  SMLoc Loc = Operands[0]->getStartLoc();
5324  // Check the IT block state first.
5325  // NOTE: BKPT instruction has the interesting property of being
5326  // allowed in IT blocks, but not being predicable.  It just always
5327  // executes.
5328  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5329      Inst.getOpcode() != ARM::BKPT) {
5330    unsigned bit = 1;
5331    if (ITState.FirstCond)
5332      ITState.FirstCond = false;
5333    else
5334      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5335    // The instruction must be predicable.
5336    if (!MCID.isPredicable())
5337      return Error(Loc, "instructions in IT block must be predicable");
5338    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5339    unsigned ITCond = bit ? ITState.Cond :
5340      ARMCC::getOppositeCondition(ITState.Cond);
5341    if (Cond != ITCond) {
5342      // Find the condition code Operand to get its SMLoc information.
5343      SMLoc CondLoc;
5344      for (unsigned i = 1; i < Operands.size(); ++i)
5345        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5346          CondLoc = Operands[i]->getStartLoc();
5347      return Error(CondLoc, "incorrect condition in IT block; got '" +
5348                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5349                   "', but expected '" +
5350                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5351    }
5352  // Check for non-'al' condition codes outside of the IT block.
5353  } else if (isThumbTwo() && MCID.isPredicable() &&
5354             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5355             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5356             Inst.getOpcode() != ARM::t2B)
5357    return Error(Loc, "predicated instructions must be in IT block");
5358
5359  switch (Inst.getOpcode()) {
5360  case ARM::LDRD:
5361  case ARM::LDRD_PRE:
5362  case ARM::LDRD_POST: {
5363    // Rt2 must be Rt + 1.
5364    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5365    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5366    if (Rt2 != Rt + 1)
5367      return Error(Operands[3]->getStartLoc(),
5368                   "destination operands must be sequential");
5369    return false;
5370  }
5371  case ARM::STRD: {
5372    // Rt2 must be Rt + 1.
5373    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5374    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5375    if (Rt2 != Rt + 1)
5376      return Error(Operands[3]->getStartLoc(),
5377                   "source operands must be sequential");
5378    return false;
5379  }
5380  case ARM::STRD_PRE:
5381  case ARM::STRD_POST: {
5382    // Rt2 must be Rt + 1.
5383    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5384    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
5385    if (Rt2 != Rt + 1)
5386      return Error(Operands[3]->getStartLoc(),
5387                   "source operands must be sequential");
5388    return false;
5389  }
5390  case ARM::SBFX:
5391  case ARM::UBFX: {
5392    // width must be in range [1, 32-lsb]
5393    unsigned lsb = Inst.getOperand(2).getImm();
5394    unsigned widthm1 = Inst.getOperand(3).getImm();
5395    if (widthm1 >= 32 - lsb)
5396      return Error(Operands[5]->getStartLoc(),
5397                   "bitfield width must be in range [1,32-lsb]");
5398    return false;
5399  }
5400  case ARM::tLDMIA: {
5401    // If we're parsing Thumb2, the .w variant is available and handles
5402    // most cases that are normally illegal for a Thumb1 LDM
5403    // instruction. We'll make the transformation in processInstruction()
5404    // if necessary.
5405    //
5406    // Thumb LDM instructions are writeback iff the base register is not
5407    // in the register list.
5408    unsigned Rn = Inst.getOperand(0).getReg();
5409    bool hasWritebackToken =
5410      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5411       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5412    bool listContainsBase;
5413    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5414      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5415                   "registers must be in range r0-r7");
5416    // If we should have writeback, then there should be a '!' token.
5417    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5418      return Error(Operands[2]->getStartLoc(),
5419                   "writeback operator '!' expected");
5420    // If we should not have writeback, there must not be a '!'. This is
5421    // true even for the 32-bit wide encodings.
5422    if (listContainsBase && hasWritebackToken)
5423      return Error(Operands[3]->getStartLoc(),
5424                   "writeback operator '!' not allowed when base register "
5425                   "in register list");
5426
5427    break;
5428  }
5429  case ARM::t2LDMIA_UPD: {
5430    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5431      return Error(Operands[4]->getStartLoc(),
5432                   "writeback operator '!' not allowed when base register "
5433                   "in register list");
5434    break;
5435  }
5436  case ARM::tMUL: {
5437    // The second source operand must be the same register as the destination
5438    // operand.
5439    //
5440    // In this case, we must directly check the parsed operands because the
5441    // cvtThumbMultiply() function is written in such a way that it guarantees
5442    // this first statement is always true for the new Inst.  Essentially, the
5443    // destination is unconditionally copied into the second source operand
5444    // without checking to see if it matches what we actually parsed.
5445    if (Operands.size() == 6 &&
5446        (((ARMOperand*)Operands[3])->getReg() !=
5447         ((ARMOperand*)Operands[5])->getReg()) &&
5448        (((ARMOperand*)Operands[3])->getReg() !=
5449         ((ARMOperand*)Operands[4])->getReg())) {
5450      return Error(Operands[3]->getStartLoc(),
5451                   "destination register must match source register");
5452    }
5453    break;
5454  }
5455  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5456  // so only issue a diagnostic for thumb1. The instructions will be
5457  // switched to the t2 encodings in processInstruction() if necessary.
5458  case ARM::tPOP: {
5459    bool listContainsBase;
5460    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5461        !isThumbTwo())
5462      return Error(Operands[2]->getStartLoc(),
5463                   "registers must be in range r0-r7 or pc");
5464    break;
5465  }
5466  case ARM::tPUSH: {
5467    bool listContainsBase;
5468    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5469        !isThumbTwo())
5470      return Error(Operands[2]->getStartLoc(),
5471                   "registers must be in range r0-r7 or lr");
5472    break;
5473  }
5474  case ARM::tSTMIA_UPD: {
5475    bool listContainsBase;
5476    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5477      return Error(Operands[4]->getStartLoc(),
5478                   "registers must be in range r0-r7");
5479    break;
5480  }
5481  case ARM::tADDrSP: {
5482    // If the non-SP source operand and the destination operand are not the
5483    // same, we need thumb2 (for the wide encoding), or we have an error.
5484    if (!isThumbTwo() &&
5485        Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
5486      return Error(Operands[4]->getStartLoc(),
5487                   "source register must be the same as destination");
5488    }
5489    break;
5490  }
5491  }
5492
5493  return false;
5494}
5495
5496static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5497  switch(Opc) {
5498  default: llvm_unreachable("unexpected opcode!");
5499  // VST1LN
5500  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5501  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5502  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5503  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5504  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5505  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5506  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5507  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5508  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5509
5510  // VST2LN
5511  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5512  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5513  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5514  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5515  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5516
5517  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5518  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5519  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5520  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5521  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5522
5523  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5524  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5525  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5526  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5527  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5528
5529  // VST3LN
5530  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5531  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5532  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5533  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5534  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5535  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5536  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5537  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5538  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5539  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5540  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5541  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5542  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5543  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5544  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5545
5546  // VST3
5547  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5548  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5549  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5550  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5551  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5552  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5553  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5554  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5555  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5556  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5557  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5558  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5559  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5560  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5561  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5562  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5563  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5564  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5565
5566  // VST4LN
5567  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5568  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5569  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5570  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5571  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5572  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5573  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5574  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5575  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5576  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5577  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5578  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5579  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5580  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5581  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5582
5583  // VST4
5584  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5585  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5586  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5587  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5588  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5589  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5590  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5591  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5592  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5593  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5594  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5595  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5596  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5597  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5598  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5599  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5600  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5601  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5602  }
5603}
5604
5605static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5606  switch(Opc) {
5607  default: llvm_unreachable("unexpected opcode!");
5608  // VLD1LN
5609  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5610  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5611  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5612  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5613  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5614  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5615  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5616  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5617  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5618
5619  // VLD2LN
5620  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5621  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5622  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5623  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5624  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5625  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5626  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5627  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5628  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5629  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5630  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5631  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5632  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5633  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5634  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5635
5636  // VLD3DUP
5637  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5638  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5639  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5640  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5641  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5642  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5643  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5644  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5645  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5646  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5647  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5648  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5649  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5650  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5651  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5652  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5653  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5654  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5655
5656  // VLD3LN
5657  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5658  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5659  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5660  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5661  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5662  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5663  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5664  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5665  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5666  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5667  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5668  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5669  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5670  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5671  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5672
5673  // VLD3
5674  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5675  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5676  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5677  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5678  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5679  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5680  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5681  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5682  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5683  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5684  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5685  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5686  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5687  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5688  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5689  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5690  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5691  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5692
5693  // VLD4LN
5694  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5695  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5696  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5697  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5698  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5699  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5700  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5701  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5702  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5703  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5704  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5705  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5706  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5707  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5708  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5709
5710  // VLD4DUP
5711  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5712  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5713  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5714  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5715  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5716  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5717  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5718  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5719  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5720  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5721  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5722  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5723  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5724  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5725  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5726  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5727  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5728  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5729
5730  // VLD4
5731  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5732  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5733  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5734  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5735  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5736  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5737  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5738  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5739  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5740  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5741  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5742  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5743  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5744  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5745  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5746  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5747  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5748  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5749  }
5750}
5751
5752bool ARMAsmParser::
5753processInstruction(MCInst &Inst,
5754                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5755  switch (Inst.getOpcode()) {
5756  // Alias for alternate form of 'ADR Rd, #imm' instruction.
5757  case ARM::ADDri: {
5758    if (Inst.getOperand(1).getReg() != ARM::PC ||
5759        Inst.getOperand(5).getReg() != 0)
5760      return false;
5761    MCInst TmpInst;
5762    TmpInst.setOpcode(ARM::ADR);
5763    TmpInst.addOperand(Inst.getOperand(0));
5764    TmpInst.addOperand(Inst.getOperand(2));
5765    TmpInst.addOperand(Inst.getOperand(3));
5766    TmpInst.addOperand(Inst.getOperand(4));
5767    Inst = TmpInst;
5768    return true;
5769  }
5770  // Aliases for alternate PC+imm syntax of LDR instructions.
5771  case ARM::t2LDRpcrel:
5772    // Select the narrow version if the immediate will fit.
5773    if (Inst.getOperand(1).getImm() > 0 &&
5774        Inst.getOperand(1).getImm() <= 0xff)
5775      Inst.setOpcode(ARM::tLDRpci);
5776    else
5777      Inst.setOpcode(ARM::t2LDRpci);
5778    return true;
5779  case ARM::t2LDRBpcrel:
5780    Inst.setOpcode(ARM::t2LDRBpci);
5781    return true;
5782  case ARM::t2LDRHpcrel:
5783    Inst.setOpcode(ARM::t2LDRHpci);
5784    return true;
5785  case ARM::t2LDRSBpcrel:
5786    Inst.setOpcode(ARM::t2LDRSBpci);
5787    return true;
5788  case ARM::t2LDRSHpcrel:
5789    Inst.setOpcode(ARM::t2LDRSHpci);
5790    return true;
5791  // Handle NEON VST complex aliases.
5792  case ARM::VST1LNdWB_register_Asm_8:
5793  case ARM::VST1LNdWB_register_Asm_16:
5794  case ARM::VST1LNdWB_register_Asm_32: {
5795    MCInst TmpInst;
5796    // Shuffle the operands around so the lane index operand is in the
5797    // right place.
5798    unsigned Spacing;
5799    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5800    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5801    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5802    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5803    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5804    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5805    TmpInst.addOperand(Inst.getOperand(1)); // lane
5806    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5807    TmpInst.addOperand(Inst.getOperand(6));
5808    Inst = TmpInst;
5809    return true;
5810  }
5811
5812  case ARM::VST2LNdWB_register_Asm_8:
5813  case ARM::VST2LNdWB_register_Asm_16:
5814  case ARM::VST2LNdWB_register_Asm_32:
5815  case ARM::VST2LNqWB_register_Asm_16:
5816  case ARM::VST2LNqWB_register_Asm_32: {
5817    MCInst TmpInst;
5818    // Shuffle the operands around so the lane index operand is in the
5819    // right place.
5820    unsigned Spacing;
5821    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5822    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5823    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5824    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5825    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5826    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5827    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5828                                            Spacing));
5829    TmpInst.addOperand(Inst.getOperand(1)); // lane
5830    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5831    TmpInst.addOperand(Inst.getOperand(6));
5832    Inst = TmpInst;
5833    return true;
5834  }
5835
5836  case ARM::VST3LNdWB_register_Asm_8:
5837  case ARM::VST3LNdWB_register_Asm_16:
5838  case ARM::VST3LNdWB_register_Asm_32:
5839  case ARM::VST3LNqWB_register_Asm_16:
5840  case ARM::VST3LNqWB_register_Asm_32: {
5841    MCInst TmpInst;
5842    // Shuffle the operands around so the lane index operand is in the
5843    // right place.
5844    unsigned Spacing;
5845    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5846    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5847    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5848    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5849    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5850    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5851    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5852                                            Spacing));
5853    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5854                                            Spacing * 2));
5855    TmpInst.addOperand(Inst.getOperand(1)); // lane
5856    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5857    TmpInst.addOperand(Inst.getOperand(6));
5858    Inst = TmpInst;
5859    return true;
5860  }
5861
5862  case ARM::VST4LNdWB_register_Asm_8:
5863  case ARM::VST4LNdWB_register_Asm_16:
5864  case ARM::VST4LNdWB_register_Asm_32:
5865  case ARM::VST4LNqWB_register_Asm_16:
5866  case ARM::VST4LNqWB_register_Asm_32: {
5867    MCInst TmpInst;
5868    // Shuffle the operands around so the lane index operand is in the
5869    // right place.
5870    unsigned Spacing;
5871    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5872    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5873    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5874    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5875    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5876    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5877    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5878                                            Spacing));
5879    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5880                                            Spacing * 2));
5881    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5882                                            Spacing * 3));
5883    TmpInst.addOperand(Inst.getOperand(1)); // lane
5884    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5885    TmpInst.addOperand(Inst.getOperand(6));
5886    Inst = TmpInst;
5887    return true;
5888  }
5889
5890  case ARM::VST1LNdWB_fixed_Asm_8:
5891  case ARM::VST1LNdWB_fixed_Asm_16:
5892  case ARM::VST1LNdWB_fixed_Asm_32: {
5893    MCInst TmpInst;
5894    // Shuffle the operands around so the lane index operand is in the
5895    // right place.
5896    unsigned Spacing;
5897    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5898    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5899    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5900    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5901    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5902    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5903    TmpInst.addOperand(Inst.getOperand(1)); // lane
5904    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5905    TmpInst.addOperand(Inst.getOperand(5));
5906    Inst = TmpInst;
5907    return true;
5908  }
5909
5910  case ARM::VST2LNdWB_fixed_Asm_8:
5911  case ARM::VST2LNdWB_fixed_Asm_16:
5912  case ARM::VST2LNdWB_fixed_Asm_32:
5913  case ARM::VST2LNqWB_fixed_Asm_16:
5914  case ARM::VST2LNqWB_fixed_Asm_32: {
5915    MCInst TmpInst;
5916    // Shuffle the operands around so the lane index operand is in the
5917    // right place.
5918    unsigned Spacing;
5919    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5920    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5921    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5922    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5923    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5924    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5925    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5926                                            Spacing));
5927    TmpInst.addOperand(Inst.getOperand(1)); // lane
5928    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5929    TmpInst.addOperand(Inst.getOperand(5));
5930    Inst = TmpInst;
5931    return true;
5932  }
5933
5934  case ARM::VST3LNdWB_fixed_Asm_8:
5935  case ARM::VST3LNdWB_fixed_Asm_16:
5936  case ARM::VST3LNdWB_fixed_Asm_32:
5937  case ARM::VST3LNqWB_fixed_Asm_16:
5938  case ARM::VST3LNqWB_fixed_Asm_32: {
5939    MCInst TmpInst;
5940    // Shuffle the operands around so the lane index operand is in the
5941    // right place.
5942    unsigned Spacing;
5943    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5944    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5945    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5946    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5947    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5948    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5949    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5950                                            Spacing));
5951    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5952                                            Spacing * 2));
5953    TmpInst.addOperand(Inst.getOperand(1)); // lane
5954    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5955    TmpInst.addOperand(Inst.getOperand(5));
5956    Inst = TmpInst;
5957    return true;
5958  }
5959
5960  case ARM::VST4LNdWB_fixed_Asm_8:
5961  case ARM::VST4LNdWB_fixed_Asm_16:
5962  case ARM::VST4LNdWB_fixed_Asm_32:
5963  case ARM::VST4LNqWB_fixed_Asm_16:
5964  case ARM::VST4LNqWB_fixed_Asm_32: {
5965    MCInst TmpInst;
5966    // Shuffle the operands around so the lane index operand is in the
5967    // right place.
5968    unsigned Spacing;
5969    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5970    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5971    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5972    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5973    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5974    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5975    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5976                                            Spacing));
5977    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5978                                            Spacing * 2));
5979    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5980                                            Spacing * 3));
5981    TmpInst.addOperand(Inst.getOperand(1)); // lane
5982    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5983    TmpInst.addOperand(Inst.getOperand(5));
5984    Inst = TmpInst;
5985    return true;
5986  }
5987
5988  case ARM::VST1LNdAsm_8:
5989  case ARM::VST1LNdAsm_16:
5990  case ARM::VST1LNdAsm_32: {
5991    MCInst TmpInst;
5992    // Shuffle the operands around so the lane index operand is in the
5993    // right place.
5994    unsigned Spacing;
5995    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5996    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5997    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5998    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5999    TmpInst.addOperand(Inst.getOperand(1)); // lane
6000    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6001    TmpInst.addOperand(Inst.getOperand(5));
6002    Inst = TmpInst;
6003    return true;
6004  }
6005
6006  case ARM::VST2LNdAsm_8:
6007  case ARM::VST2LNdAsm_16:
6008  case ARM::VST2LNdAsm_32:
6009  case ARM::VST2LNqAsm_16:
6010  case ARM::VST2LNqAsm_32: {
6011    MCInst TmpInst;
6012    // Shuffle the operands around so the lane index operand is in the
6013    // right place.
6014    unsigned Spacing;
6015    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6016    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6017    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6018    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6019    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6020                                            Spacing));
6021    TmpInst.addOperand(Inst.getOperand(1)); // lane
6022    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6023    TmpInst.addOperand(Inst.getOperand(5));
6024    Inst = TmpInst;
6025    return true;
6026  }
6027
6028  case ARM::VST3LNdAsm_8:
6029  case ARM::VST3LNdAsm_16:
6030  case ARM::VST3LNdAsm_32:
6031  case ARM::VST3LNqAsm_16:
6032  case ARM::VST3LNqAsm_32: {
6033    MCInst TmpInst;
6034    // Shuffle the operands around so the lane index operand is in the
6035    // right place.
6036    unsigned Spacing;
6037    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6038    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6039    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6040    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6041    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6042                                            Spacing));
6043    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6044                                            Spacing * 2));
6045    TmpInst.addOperand(Inst.getOperand(1)); // lane
6046    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6047    TmpInst.addOperand(Inst.getOperand(5));
6048    Inst = TmpInst;
6049    return true;
6050  }
6051
6052  case ARM::VST4LNdAsm_8:
6053  case ARM::VST4LNdAsm_16:
6054  case ARM::VST4LNdAsm_32:
6055  case ARM::VST4LNqAsm_16:
6056  case ARM::VST4LNqAsm_32: {
6057    MCInst TmpInst;
6058    // Shuffle the operands around so the lane index operand is in the
6059    // right place.
6060    unsigned Spacing;
6061    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6062    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6063    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6064    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6065    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6066                                            Spacing));
6067    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6068                                            Spacing * 2));
6069    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6070                                            Spacing * 3));
6071    TmpInst.addOperand(Inst.getOperand(1)); // lane
6072    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6073    TmpInst.addOperand(Inst.getOperand(5));
6074    Inst = TmpInst;
6075    return true;
6076  }
6077
6078  // Handle NEON VLD complex aliases.
6079  case ARM::VLD1LNdWB_register_Asm_8:
6080  case ARM::VLD1LNdWB_register_Asm_16:
6081  case ARM::VLD1LNdWB_register_Asm_32: {
6082    MCInst TmpInst;
6083    // Shuffle the operands around so the lane index operand is in the
6084    // right place.
6085    unsigned Spacing;
6086    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6087    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6088    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6089    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6090    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6091    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6092    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6093    TmpInst.addOperand(Inst.getOperand(1)); // lane
6094    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6095    TmpInst.addOperand(Inst.getOperand(6));
6096    Inst = TmpInst;
6097    return true;
6098  }
6099
6100  case ARM::VLD2LNdWB_register_Asm_8:
6101  case ARM::VLD2LNdWB_register_Asm_16:
6102  case ARM::VLD2LNdWB_register_Asm_32:
6103  case ARM::VLD2LNqWB_register_Asm_16:
6104  case ARM::VLD2LNqWB_register_Asm_32: {
6105    MCInst TmpInst;
6106    // Shuffle the operands around so the lane index operand is in the
6107    // right place.
6108    unsigned Spacing;
6109    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6110    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6111    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6112                                            Spacing));
6113    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6114    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6115    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6116    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6117    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6118    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6119                                            Spacing));
6120    TmpInst.addOperand(Inst.getOperand(1)); // lane
6121    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6122    TmpInst.addOperand(Inst.getOperand(6));
6123    Inst = TmpInst;
6124    return true;
6125  }
6126
6127  case ARM::VLD3LNdWB_register_Asm_8:
6128  case ARM::VLD3LNdWB_register_Asm_16:
6129  case ARM::VLD3LNdWB_register_Asm_32:
6130  case ARM::VLD3LNqWB_register_Asm_16:
6131  case ARM::VLD3LNqWB_register_Asm_32: {
6132    MCInst TmpInst;
6133    // Shuffle the operands around so the lane index operand is in the
6134    // right place.
6135    unsigned Spacing;
6136    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6137    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6138    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6139                                            Spacing));
6140    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6141                                            Spacing * 2));
6142    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6143    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6144    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6145    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6146    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6147    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6148                                            Spacing));
6149    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6150                                            Spacing * 2));
6151    TmpInst.addOperand(Inst.getOperand(1)); // lane
6152    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6153    TmpInst.addOperand(Inst.getOperand(6));
6154    Inst = TmpInst;
6155    return true;
6156  }
6157
6158  case ARM::VLD4LNdWB_register_Asm_8:
6159  case ARM::VLD4LNdWB_register_Asm_16:
6160  case ARM::VLD4LNdWB_register_Asm_32:
6161  case ARM::VLD4LNqWB_register_Asm_16:
6162  case ARM::VLD4LNqWB_register_Asm_32: {
6163    MCInst TmpInst;
6164    // Shuffle the operands around so the lane index operand is in the
6165    // right place.
6166    unsigned Spacing;
6167    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6168    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6169    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6170                                            Spacing));
6171    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6172                                            Spacing * 2));
6173    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6174                                            Spacing * 3));
6175    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6176    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6177    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6178    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6179    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6180    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6181                                            Spacing));
6182    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6183                                            Spacing * 2));
6184    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6185                                            Spacing * 3));
6186    TmpInst.addOperand(Inst.getOperand(1)); // lane
6187    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6188    TmpInst.addOperand(Inst.getOperand(6));
6189    Inst = TmpInst;
6190    return true;
6191  }
6192
6193  case ARM::VLD1LNdWB_fixed_Asm_8:
6194  case ARM::VLD1LNdWB_fixed_Asm_16:
6195  case ARM::VLD1LNdWB_fixed_Asm_32: {
6196    MCInst TmpInst;
6197    // Shuffle the operands around so the lane index operand is in the
6198    // right place.
6199    unsigned Spacing;
6200    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6201    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6202    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6203    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6204    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6205    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6206    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6207    TmpInst.addOperand(Inst.getOperand(1)); // lane
6208    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6209    TmpInst.addOperand(Inst.getOperand(5));
6210    Inst = TmpInst;
6211    return true;
6212  }
6213
6214  case ARM::VLD2LNdWB_fixed_Asm_8:
6215  case ARM::VLD2LNdWB_fixed_Asm_16:
6216  case ARM::VLD2LNdWB_fixed_Asm_32:
6217  case ARM::VLD2LNqWB_fixed_Asm_16:
6218  case ARM::VLD2LNqWB_fixed_Asm_32: {
6219    MCInst TmpInst;
6220    // Shuffle the operands around so the lane index operand is in the
6221    // right place.
6222    unsigned Spacing;
6223    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6224    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6225    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6226                                            Spacing));
6227    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6228    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6229    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6230    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6231    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6232    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6233                                            Spacing));
6234    TmpInst.addOperand(Inst.getOperand(1)); // lane
6235    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6236    TmpInst.addOperand(Inst.getOperand(5));
6237    Inst = TmpInst;
6238    return true;
6239  }
6240
6241  case ARM::VLD3LNdWB_fixed_Asm_8:
6242  case ARM::VLD3LNdWB_fixed_Asm_16:
6243  case ARM::VLD3LNdWB_fixed_Asm_32:
6244  case ARM::VLD3LNqWB_fixed_Asm_16:
6245  case ARM::VLD3LNqWB_fixed_Asm_32: {
6246    MCInst TmpInst;
6247    // Shuffle the operands around so the lane index operand is in the
6248    // right place.
6249    unsigned Spacing;
6250    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6251    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6252    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6253                                            Spacing));
6254    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6255                                            Spacing * 2));
6256    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6257    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6258    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6259    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6260    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6261    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6262                                            Spacing));
6263    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6264                                            Spacing * 2));
6265    TmpInst.addOperand(Inst.getOperand(1)); // lane
6266    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6267    TmpInst.addOperand(Inst.getOperand(5));
6268    Inst = TmpInst;
6269    return true;
6270  }
6271
6272  case ARM::VLD4LNdWB_fixed_Asm_8:
6273  case ARM::VLD4LNdWB_fixed_Asm_16:
6274  case ARM::VLD4LNdWB_fixed_Asm_32:
6275  case ARM::VLD4LNqWB_fixed_Asm_16:
6276  case ARM::VLD4LNqWB_fixed_Asm_32: {
6277    MCInst TmpInst;
6278    // Shuffle the operands around so the lane index operand is in the
6279    // right place.
6280    unsigned Spacing;
6281    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6282    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6283    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6284                                            Spacing));
6285    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6286                                            Spacing * 2));
6287    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6288                                            Spacing * 3));
6289    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6290    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6291    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6292    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6293    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6294    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6295                                            Spacing));
6296    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6297                                            Spacing * 2));
6298    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6299                                            Spacing * 3));
6300    TmpInst.addOperand(Inst.getOperand(1)); // lane
6301    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6302    TmpInst.addOperand(Inst.getOperand(5));
6303    Inst = TmpInst;
6304    return true;
6305  }
6306
6307  case ARM::VLD1LNdAsm_8:
6308  case ARM::VLD1LNdAsm_16:
6309  case ARM::VLD1LNdAsm_32: {
6310    MCInst TmpInst;
6311    // Shuffle the operands around so the lane index operand is in the
6312    // right place.
6313    unsigned Spacing;
6314    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6315    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6316    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6317    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6318    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6319    TmpInst.addOperand(Inst.getOperand(1)); // lane
6320    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6321    TmpInst.addOperand(Inst.getOperand(5));
6322    Inst = TmpInst;
6323    return true;
6324  }
6325
6326  case ARM::VLD2LNdAsm_8:
6327  case ARM::VLD2LNdAsm_16:
6328  case ARM::VLD2LNdAsm_32:
6329  case ARM::VLD2LNqAsm_16:
6330  case ARM::VLD2LNqAsm_32: {
6331    MCInst TmpInst;
6332    // Shuffle the operands around so the lane index operand is in the
6333    // right place.
6334    unsigned Spacing;
6335    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6336    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6337    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6338                                            Spacing));
6339    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6340    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6341    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6342    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6343                                            Spacing));
6344    TmpInst.addOperand(Inst.getOperand(1)); // lane
6345    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6346    TmpInst.addOperand(Inst.getOperand(5));
6347    Inst = TmpInst;
6348    return true;
6349  }
6350
6351  case ARM::VLD3LNdAsm_8:
6352  case ARM::VLD3LNdAsm_16:
6353  case ARM::VLD3LNdAsm_32:
6354  case ARM::VLD3LNqAsm_16:
6355  case ARM::VLD3LNqAsm_32: {
6356    MCInst TmpInst;
6357    // Shuffle the operands around so the lane index operand is in the
6358    // right place.
6359    unsigned Spacing;
6360    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6361    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6362    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6363                                            Spacing));
6364    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6365                                            Spacing * 2));
6366    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6367    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6368    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6369    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6370                                            Spacing));
6371    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6372                                            Spacing * 2));
6373    TmpInst.addOperand(Inst.getOperand(1)); // lane
6374    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6375    TmpInst.addOperand(Inst.getOperand(5));
6376    Inst = TmpInst;
6377    return true;
6378  }
6379
6380  case ARM::VLD4LNdAsm_8:
6381  case ARM::VLD4LNdAsm_16:
6382  case ARM::VLD4LNdAsm_32:
6383  case ARM::VLD4LNqAsm_16:
6384  case ARM::VLD4LNqAsm_32: {
6385    MCInst TmpInst;
6386    // Shuffle the operands around so the lane index operand is in the
6387    // right place.
6388    unsigned Spacing;
6389    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6390    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6391    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6392                                            Spacing));
6393    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6394                                            Spacing * 2));
6395    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6396                                            Spacing * 3));
6397    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6398    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6399    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6400    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6401                                            Spacing));
6402    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6403                                            Spacing * 2));
6404    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6405                                            Spacing * 3));
6406    TmpInst.addOperand(Inst.getOperand(1)); // lane
6407    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6408    TmpInst.addOperand(Inst.getOperand(5));
6409    Inst = TmpInst;
6410    return true;
6411  }
6412
6413  // VLD3DUP single 3-element structure to all lanes instructions.
6414  case ARM::VLD3DUPdAsm_8:
6415  case ARM::VLD3DUPdAsm_16:
6416  case ARM::VLD3DUPdAsm_32:
6417  case ARM::VLD3DUPqAsm_8:
6418  case ARM::VLD3DUPqAsm_16:
6419  case ARM::VLD3DUPqAsm_32: {
6420    MCInst TmpInst;
6421    unsigned Spacing;
6422    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6423    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6424    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6425                                            Spacing));
6426    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6427                                            Spacing * 2));
6428    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6429    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6430    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6431    TmpInst.addOperand(Inst.getOperand(4));
6432    Inst = TmpInst;
6433    return true;
6434  }
6435
6436  case ARM::VLD3DUPdWB_fixed_Asm_8:
6437  case ARM::VLD3DUPdWB_fixed_Asm_16:
6438  case ARM::VLD3DUPdWB_fixed_Asm_32:
6439  case ARM::VLD3DUPqWB_fixed_Asm_8:
6440  case ARM::VLD3DUPqWB_fixed_Asm_16:
6441  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6442    MCInst TmpInst;
6443    unsigned Spacing;
6444    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6445    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6446    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6447                                            Spacing));
6448    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6449                                            Spacing * 2));
6450    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6451    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6452    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6453    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6454    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6455    TmpInst.addOperand(Inst.getOperand(4));
6456    Inst = TmpInst;
6457    return true;
6458  }
6459
6460  case ARM::VLD3DUPdWB_register_Asm_8:
6461  case ARM::VLD3DUPdWB_register_Asm_16:
6462  case ARM::VLD3DUPdWB_register_Asm_32:
6463  case ARM::VLD3DUPqWB_register_Asm_8:
6464  case ARM::VLD3DUPqWB_register_Asm_16:
6465  case ARM::VLD3DUPqWB_register_Asm_32: {
6466    MCInst TmpInst;
6467    unsigned Spacing;
6468    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6469    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6470    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6471                                            Spacing));
6472    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6473                                            Spacing * 2));
6474    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6475    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6476    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6477    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6478    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6479    TmpInst.addOperand(Inst.getOperand(5));
6480    Inst = TmpInst;
6481    return true;
6482  }
6483
6484  // VLD3 multiple 3-element structure instructions.
6485  case ARM::VLD3dAsm_8:
6486  case ARM::VLD3dAsm_16:
6487  case ARM::VLD3dAsm_32:
6488  case ARM::VLD3qAsm_8:
6489  case ARM::VLD3qAsm_16:
6490  case ARM::VLD3qAsm_32: {
6491    MCInst TmpInst;
6492    unsigned Spacing;
6493    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6494    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6495    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6496                                            Spacing));
6497    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6498                                            Spacing * 2));
6499    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6500    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6501    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6502    TmpInst.addOperand(Inst.getOperand(4));
6503    Inst = TmpInst;
6504    return true;
6505  }
6506
6507  case ARM::VLD3dWB_fixed_Asm_8:
6508  case ARM::VLD3dWB_fixed_Asm_16:
6509  case ARM::VLD3dWB_fixed_Asm_32:
6510  case ARM::VLD3qWB_fixed_Asm_8:
6511  case ARM::VLD3qWB_fixed_Asm_16:
6512  case ARM::VLD3qWB_fixed_Asm_32: {
6513    MCInst TmpInst;
6514    unsigned Spacing;
6515    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6516    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6517    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6518                                            Spacing));
6519    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6520                                            Spacing * 2));
6521    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6522    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6523    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6524    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6525    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6526    TmpInst.addOperand(Inst.getOperand(4));
6527    Inst = TmpInst;
6528    return true;
6529  }
6530
6531  case ARM::VLD3dWB_register_Asm_8:
6532  case ARM::VLD3dWB_register_Asm_16:
6533  case ARM::VLD3dWB_register_Asm_32:
6534  case ARM::VLD3qWB_register_Asm_8:
6535  case ARM::VLD3qWB_register_Asm_16:
6536  case ARM::VLD3qWB_register_Asm_32: {
6537    MCInst TmpInst;
6538    unsigned Spacing;
6539    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6540    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6541    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6542                                            Spacing));
6543    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6544                                            Spacing * 2));
6545    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6546    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6547    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6548    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6549    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6550    TmpInst.addOperand(Inst.getOperand(5));
6551    Inst = TmpInst;
6552    return true;
6553  }
6554
6555  // VLD4DUP single 3-element structure to all lanes instructions.
6556  case ARM::VLD4DUPdAsm_8:
6557  case ARM::VLD4DUPdAsm_16:
6558  case ARM::VLD4DUPdAsm_32:
6559  case ARM::VLD4DUPqAsm_8:
6560  case ARM::VLD4DUPqAsm_16:
6561  case ARM::VLD4DUPqAsm_32: {
6562    MCInst TmpInst;
6563    unsigned Spacing;
6564    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6565    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6566    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6567                                            Spacing));
6568    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6569                                            Spacing * 2));
6570    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6571                                            Spacing * 3));
6572    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6573    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6574    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6575    TmpInst.addOperand(Inst.getOperand(4));
6576    Inst = TmpInst;
6577    return true;
6578  }
6579
6580  case ARM::VLD4DUPdWB_fixed_Asm_8:
6581  case ARM::VLD4DUPdWB_fixed_Asm_16:
6582  case ARM::VLD4DUPdWB_fixed_Asm_32:
6583  case ARM::VLD4DUPqWB_fixed_Asm_8:
6584  case ARM::VLD4DUPqWB_fixed_Asm_16:
6585  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6586    MCInst TmpInst;
6587    unsigned Spacing;
6588    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6589    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6590    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6591                                            Spacing));
6592    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6593                                            Spacing * 2));
6594    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6595                                            Spacing * 3));
6596    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6597    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6598    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6599    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6600    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6601    TmpInst.addOperand(Inst.getOperand(4));
6602    Inst = TmpInst;
6603    return true;
6604  }
6605
6606  case ARM::VLD4DUPdWB_register_Asm_8:
6607  case ARM::VLD4DUPdWB_register_Asm_16:
6608  case ARM::VLD4DUPdWB_register_Asm_32:
6609  case ARM::VLD4DUPqWB_register_Asm_8:
6610  case ARM::VLD4DUPqWB_register_Asm_16:
6611  case ARM::VLD4DUPqWB_register_Asm_32: {
6612    MCInst TmpInst;
6613    unsigned Spacing;
6614    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6615    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6616    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6617                                            Spacing));
6618    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6619                                            Spacing * 2));
6620    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6621                                            Spacing * 3));
6622    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6623    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6624    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6625    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6626    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6627    TmpInst.addOperand(Inst.getOperand(5));
6628    Inst = TmpInst;
6629    return true;
6630  }
6631
6632  // VLD4 multiple 4-element structure instructions.
6633  case ARM::VLD4dAsm_8:
6634  case ARM::VLD4dAsm_16:
6635  case ARM::VLD4dAsm_32:
6636  case ARM::VLD4qAsm_8:
6637  case ARM::VLD4qAsm_16:
6638  case ARM::VLD4qAsm_32: {
6639    MCInst TmpInst;
6640    unsigned Spacing;
6641    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6642    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6643    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6644                                            Spacing));
6645    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6646                                            Spacing * 2));
6647    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6648                                            Spacing * 3));
6649    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6650    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6651    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6652    TmpInst.addOperand(Inst.getOperand(4));
6653    Inst = TmpInst;
6654    return true;
6655  }
6656
6657  case ARM::VLD4dWB_fixed_Asm_8:
6658  case ARM::VLD4dWB_fixed_Asm_16:
6659  case ARM::VLD4dWB_fixed_Asm_32:
6660  case ARM::VLD4qWB_fixed_Asm_8:
6661  case ARM::VLD4qWB_fixed_Asm_16:
6662  case ARM::VLD4qWB_fixed_Asm_32: {
6663    MCInst TmpInst;
6664    unsigned Spacing;
6665    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6666    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6667    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6668                                            Spacing));
6669    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6670                                            Spacing * 2));
6671    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6672                                            Spacing * 3));
6673    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6674    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6675    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6676    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6677    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6678    TmpInst.addOperand(Inst.getOperand(4));
6679    Inst = TmpInst;
6680    return true;
6681  }
6682
6683  case ARM::VLD4dWB_register_Asm_8:
6684  case ARM::VLD4dWB_register_Asm_16:
6685  case ARM::VLD4dWB_register_Asm_32:
6686  case ARM::VLD4qWB_register_Asm_8:
6687  case ARM::VLD4qWB_register_Asm_16:
6688  case ARM::VLD4qWB_register_Asm_32: {
6689    MCInst TmpInst;
6690    unsigned Spacing;
6691    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6692    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6693    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6694                                            Spacing));
6695    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6696                                            Spacing * 2));
6697    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6698                                            Spacing * 3));
6699    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6700    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6701    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6702    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6703    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6704    TmpInst.addOperand(Inst.getOperand(5));
6705    Inst = TmpInst;
6706    return true;
6707  }
6708
6709  // VST3 multiple 3-element structure instructions.
6710  case ARM::VST3dAsm_8:
6711  case ARM::VST3dAsm_16:
6712  case ARM::VST3dAsm_32:
6713  case ARM::VST3qAsm_8:
6714  case ARM::VST3qAsm_16:
6715  case ARM::VST3qAsm_32: {
6716    MCInst TmpInst;
6717    unsigned Spacing;
6718    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6719    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6720    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6721    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6722    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6723                                            Spacing));
6724    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6725                                            Spacing * 2));
6726    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6727    TmpInst.addOperand(Inst.getOperand(4));
6728    Inst = TmpInst;
6729    return true;
6730  }
6731
6732  case ARM::VST3dWB_fixed_Asm_8:
6733  case ARM::VST3dWB_fixed_Asm_16:
6734  case ARM::VST3dWB_fixed_Asm_32:
6735  case ARM::VST3qWB_fixed_Asm_8:
6736  case ARM::VST3qWB_fixed_Asm_16:
6737  case ARM::VST3qWB_fixed_Asm_32: {
6738    MCInst TmpInst;
6739    unsigned Spacing;
6740    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6741    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6742    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6743    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6744    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6745    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6746    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6747                                            Spacing));
6748    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6749                                            Spacing * 2));
6750    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6751    TmpInst.addOperand(Inst.getOperand(4));
6752    Inst = TmpInst;
6753    return true;
6754  }
6755
6756  case ARM::VST3dWB_register_Asm_8:
6757  case ARM::VST3dWB_register_Asm_16:
6758  case ARM::VST3dWB_register_Asm_32:
6759  case ARM::VST3qWB_register_Asm_8:
6760  case ARM::VST3qWB_register_Asm_16:
6761  case ARM::VST3qWB_register_Asm_32: {
6762    MCInst TmpInst;
6763    unsigned Spacing;
6764    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6765    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6766    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6767    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6768    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6769    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6770    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6771                                            Spacing));
6772    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6773                                            Spacing * 2));
6774    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6775    TmpInst.addOperand(Inst.getOperand(5));
6776    Inst = TmpInst;
6777    return true;
6778  }
6779
6780  // VST4 multiple 3-element structure instructions.
6781  case ARM::VST4dAsm_8:
6782  case ARM::VST4dAsm_16:
6783  case ARM::VST4dAsm_32:
6784  case ARM::VST4qAsm_8:
6785  case ARM::VST4qAsm_16:
6786  case ARM::VST4qAsm_32: {
6787    MCInst TmpInst;
6788    unsigned Spacing;
6789    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6790    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6791    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6792    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6793    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6794                                            Spacing));
6795    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6796                                            Spacing * 2));
6797    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6798                                            Spacing * 3));
6799    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6800    TmpInst.addOperand(Inst.getOperand(4));
6801    Inst = TmpInst;
6802    return true;
6803  }
6804
6805  case ARM::VST4dWB_fixed_Asm_8:
6806  case ARM::VST4dWB_fixed_Asm_16:
6807  case ARM::VST4dWB_fixed_Asm_32:
6808  case ARM::VST4qWB_fixed_Asm_8:
6809  case ARM::VST4qWB_fixed_Asm_16:
6810  case ARM::VST4qWB_fixed_Asm_32: {
6811    MCInst TmpInst;
6812    unsigned Spacing;
6813    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6814    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6815    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6816    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6817    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6818    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6819    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6820                                            Spacing));
6821    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6822                                            Spacing * 2));
6823    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6824                                            Spacing * 3));
6825    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6826    TmpInst.addOperand(Inst.getOperand(4));
6827    Inst = TmpInst;
6828    return true;
6829  }
6830
6831  case ARM::VST4dWB_register_Asm_8:
6832  case ARM::VST4dWB_register_Asm_16:
6833  case ARM::VST4dWB_register_Asm_32:
6834  case ARM::VST4qWB_register_Asm_8:
6835  case ARM::VST4qWB_register_Asm_16:
6836  case ARM::VST4qWB_register_Asm_32: {
6837    MCInst TmpInst;
6838    unsigned Spacing;
6839    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6840    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6841    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6842    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6843    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6844    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6845    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6846                                            Spacing));
6847    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6848                                            Spacing * 2));
6849    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6850                                            Spacing * 3));
6851    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6852    TmpInst.addOperand(Inst.getOperand(5));
6853    Inst = TmpInst;
6854    return true;
6855  }
6856
6857  // Handle encoding choice for the shift-immediate instructions.
6858  case ARM::t2LSLri:
6859  case ARM::t2LSRri:
6860  case ARM::t2ASRri: {
6861    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6862        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6863        Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
6864        !(static_cast<ARMOperand*>(Operands[3])->isToken() &&
6865         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) {
6866      unsigned NewOpc;
6867      switch (Inst.getOpcode()) {
6868      default: llvm_unreachable("unexpected opcode");
6869      case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
6870      case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
6871      case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
6872      }
6873      // The Thumb1 operands aren't in the same order. Awesome, eh?
6874      MCInst TmpInst;
6875      TmpInst.setOpcode(NewOpc);
6876      TmpInst.addOperand(Inst.getOperand(0));
6877      TmpInst.addOperand(Inst.getOperand(5));
6878      TmpInst.addOperand(Inst.getOperand(1));
6879      TmpInst.addOperand(Inst.getOperand(2));
6880      TmpInst.addOperand(Inst.getOperand(3));
6881      TmpInst.addOperand(Inst.getOperand(4));
6882      Inst = TmpInst;
6883      return true;
6884    }
6885    return false;
6886  }
6887
6888  // Handle the Thumb2 mode MOV complex aliases.
6889  case ARM::t2MOVsr:
6890  case ARM::t2MOVSsr: {
6891    // Which instruction to expand to depends on the CCOut operand and
6892    // whether we're in an IT block if the register operands are low
6893    // registers.
6894    bool isNarrow = false;
6895    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6896        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6897        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6898        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6899        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6900      isNarrow = true;
6901    MCInst TmpInst;
6902    unsigned newOpc;
6903    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6904    default: llvm_unreachable("unexpected opcode!");
6905    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6906    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6907    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6908    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6909    }
6910    TmpInst.setOpcode(newOpc);
6911    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6912    if (isNarrow)
6913      TmpInst.addOperand(MCOperand::CreateReg(
6914          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6915    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6916    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6917    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6918    TmpInst.addOperand(Inst.getOperand(5));
6919    if (!isNarrow)
6920      TmpInst.addOperand(MCOperand::CreateReg(
6921          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6922    Inst = TmpInst;
6923    return true;
6924  }
6925  case ARM::t2MOVsi:
6926  case ARM::t2MOVSsi: {
6927    // Which instruction to expand to depends on the CCOut operand and
6928    // whether we're in an IT block if the register operands are low
6929    // registers.
6930    bool isNarrow = false;
6931    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6932        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6933        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6934      isNarrow = true;
6935    MCInst TmpInst;
6936    unsigned newOpc;
6937    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6938    default: llvm_unreachable("unexpected opcode!");
6939    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6940    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6941    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6942    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6943    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6944    }
6945    unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6946    if (Amount == 32) Amount = 0;
6947    TmpInst.setOpcode(newOpc);
6948    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6949    if (isNarrow)
6950      TmpInst.addOperand(MCOperand::CreateReg(
6951          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6952    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6953    if (newOpc != ARM::t2RRX)
6954      TmpInst.addOperand(MCOperand::CreateImm(Amount));
6955    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6956    TmpInst.addOperand(Inst.getOperand(4));
6957    if (!isNarrow)
6958      TmpInst.addOperand(MCOperand::CreateReg(
6959          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6960    Inst = TmpInst;
6961    return true;
6962  }
6963  // Handle the ARM mode MOV complex aliases.
6964  case ARM::ASRr:
6965  case ARM::LSRr:
6966  case ARM::LSLr:
6967  case ARM::RORr: {
6968    ARM_AM::ShiftOpc ShiftTy;
6969    switch(Inst.getOpcode()) {
6970    default: llvm_unreachable("unexpected opcode!");
6971    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6972    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6973    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6974    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6975    }
6976    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6977    MCInst TmpInst;
6978    TmpInst.setOpcode(ARM::MOVsr);
6979    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6980    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6981    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6982    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6983    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6984    TmpInst.addOperand(Inst.getOperand(4));
6985    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6986    Inst = TmpInst;
6987    return true;
6988  }
6989  case ARM::ASRi:
6990  case ARM::LSRi:
6991  case ARM::LSLi:
6992  case ARM::RORi: {
6993    ARM_AM::ShiftOpc ShiftTy;
6994    switch(Inst.getOpcode()) {
6995    default: llvm_unreachable("unexpected opcode!");
6996    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6997    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6998    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6999    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
7000    }
7001    // A shift by zero is a plain MOVr, not a MOVsi.
7002    unsigned Amt = Inst.getOperand(2).getImm();
7003    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
7004    // A shift by 32 should be encoded as 0 when permitted
7005    if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
7006      Amt = 0;
7007    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
7008    MCInst TmpInst;
7009    TmpInst.setOpcode(Opc);
7010    TmpInst.addOperand(Inst.getOperand(0)); // Rd
7011    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7012    if (Opc == ARM::MOVsi)
7013      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7014    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7015    TmpInst.addOperand(Inst.getOperand(4));
7016    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
7017    Inst = TmpInst;
7018    return true;
7019  }
7020  case ARM::RRXi: {
7021    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
7022    MCInst TmpInst;
7023    TmpInst.setOpcode(ARM::MOVsi);
7024    TmpInst.addOperand(Inst.getOperand(0)); // Rd
7025    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7026    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7027    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7028    TmpInst.addOperand(Inst.getOperand(3));
7029    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
7030    Inst = TmpInst;
7031    return true;
7032  }
7033  case ARM::t2LDMIA_UPD: {
7034    // If this is a load of a single register, then we should use
7035    // a post-indexed LDR instruction instead, per the ARM ARM.
7036    if (Inst.getNumOperands() != 5)
7037      return false;
7038    MCInst TmpInst;
7039    TmpInst.setOpcode(ARM::t2LDR_POST);
7040    TmpInst.addOperand(Inst.getOperand(4)); // Rt
7041    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7042    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7043    TmpInst.addOperand(MCOperand::CreateImm(4));
7044    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7045    TmpInst.addOperand(Inst.getOperand(3));
7046    Inst = TmpInst;
7047    return true;
7048  }
7049  case ARM::t2STMDB_UPD: {
7050    // If this is a store of a single register, then we should use
7051    // a pre-indexed STR instruction instead, per the ARM ARM.
7052    if (Inst.getNumOperands() != 5)
7053      return false;
7054    MCInst TmpInst;
7055    TmpInst.setOpcode(ARM::t2STR_PRE);
7056    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7057    TmpInst.addOperand(Inst.getOperand(4)); // Rt
7058    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7059    TmpInst.addOperand(MCOperand::CreateImm(-4));
7060    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7061    TmpInst.addOperand(Inst.getOperand(3));
7062    Inst = TmpInst;
7063    return true;
7064  }
7065  case ARM::LDMIA_UPD:
7066    // If this is a load of a single register via a 'pop', then we should use
7067    // a post-indexed LDR instruction instead, per the ARM ARM.
7068    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
7069        Inst.getNumOperands() == 5) {
7070      MCInst TmpInst;
7071      TmpInst.setOpcode(ARM::LDR_POST_IMM);
7072      TmpInst.addOperand(Inst.getOperand(4)); // Rt
7073      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7074      TmpInst.addOperand(Inst.getOperand(1)); // Rn
7075      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
7076      TmpInst.addOperand(MCOperand::CreateImm(4));
7077      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7078      TmpInst.addOperand(Inst.getOperand(3));
7079      Inst = TmpInst;
7080      return true;
7081    }
7082    break;
7083  case ARM::STMDB_UPD:
7084    // If this is a store of a single register via a 'push', then we should use
7085    // a pre-indexed STR instruction instead, per the ARM ARM.
7086    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
7087        Inst.getNumOperands() == 5) {
7088      MCInst TmpInst;
7089      TmpInst.setOpcode(ARM::STR_PRE_IMM);
7090      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7091      TmpInst.addOperand(Inst.getOperand(4)); // Rt
7092      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
7093      TmpInst.addOperand(MCOperand::CreateImm(-4));
7094      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7095      TmpInst.addOperand(Inst.getOperand(3));
7096      Inst = TmpInst;
7097    }
7098    break;
7099  case ARM::t2ADDri12:
7100    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
7101    // mnemonic was used (not "addw"), encoding T3 is preferred.
7102    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
7103        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7104      break;
7105    Inst.setOpcode(ARM::t2ADDri);
7106    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7107    break;
7108  case ARM::t2SUBri12:
7109    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
7110    // mnemonic was used (not "subw"), encoding T3 is preferred.
7111    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
7112        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7113      break;
7114    Inst.setOpcode(ARM::t2SUBri);
7115    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7116    break;
7117  case ARM::tADDi8:
7118    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7119    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7120    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7121    // to encoding T1 if <Rd> is omitted."
7122    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7123      Inst.setOpcode(ARM::tADDi3);
7124      return true;
7125    }
7126    break;
7127  case ARM::tSUBi8:
7128    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7129    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7130    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7131    // to encoding T1 if <Rd> is omitted."
7132    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7133      Inst.setOpcode(ARM::tSUBi3);
7134      return true;
7135    }
7136    break;
7137  case ARM::t2ADDri:
7138  case ARM::t2SUBri: {
7139    // If the destination and first source operand are the same, and
7140    // the flags are compatible with the current IT status, use encoding T2
7141    // instead of T3. For compatibility with the system 'as'. Make sure the
7142    // wide encoding wasn't explicit.
7143    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7144        !isARMLowRegister(Inst.getOperand(0).getReg()) ||
7145        (unsigned)Inst.getOperand(2).getImm() > 255 ||
7146        ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
7147        (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
7148        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7149         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
7150      break;
7151    MCInst TmpInst;
7152    TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
7153                      ARM::tADDi8 : ARM::tSUBi8);
7154    TmpInst.addOperand(Inst.getOperand(0));
7155    TmpInst.addOperand(Inst.getOperand(5));
7156    TmpInst.addOperand(Inst.getOperand(0));
7157    TmpInst.addOperand(Inst.getOperand(2));
7158    TmpInst.addOperand(Inst.getOperand(3));
7159    TmpInst.addOperand(Inst.getOperand(4));
7160    Inst = TmpInst;
7161    return true;
7162  }
7163  case ARM::t2ADDrr: {
7164    // If the destination and first source operand are the same, and
7165    // there's no setting of the flags, use encoding T2 instead of T3.
7166    // Note that this is only for ADD, not SUB. This mirrors the system
7167    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
7168    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7169        Inst.getOperand(5).getReg() != 0 ||
7170        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7171         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
7172      break;
7173    MCInst TmpInst;
7174    TmpInst.setOpcode(ARM::tADDhirr);
7175    TmpInst.addOperand(Inst.getOperand(0));
7176    TmpInst.addOperand(Inst.getOperand(0));
7177    TmpInst.addOperand(Inst.getOperand(2));
7178    TmpInst.addOperand(Inst.getOperand(3));
7179    TmpInst.addOperand(Inst.getOperand(4));
7180    Inst = TmpInst;
7181    return true;
7182  }
7183  case ARM::tADDrSP: {
7184    // If the non-SP source operand and the destination operand are not the
7185    // same, we need to use the 32-bit encoding if it's available.
7186    if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
7187      Inst.setOpcode(ARM::t2ADDrr);
7188      Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7189      return true;
7190    }
7191    break;
7192  }
7193  case ARM::tB:
7194    // A Thumb conditional branch outside of an IT block is a tBcc.
7195    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
7196      Inst.setOpcode(ARM::tBcc);
7197      return true;
7198    }
7199    break;
7200  case ARM::t2B:
7201    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
7202    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
7203      Inst.setOpcode(ARM::t2Bcc);
7204      return true;
7205    }
7206    break;
7207  case ARM::t2Bcc:
7208    // If the conditional is AL or we're in an IT block, we really want t2B.
7209    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
7210      Inst.setOpcode(ARM::t2B);
7211      return true;
7212    }
7213    break;
7214  case ARM::tBcc:
7215    // If the conditional is AL, we really want tB.
7216    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
7217      Inst.setOpcode(ARM::tB);
7218      return true;
7219    }
7220    break;
7221  case ARM::tLDMIA: {
7222    // If the register list contains any high registers, or if the writeback
7223    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
7224    // instead if we're in Thumb2. Otherwise, this should have generated
7225    // an error in validateInstruction().
7226    unsigned Rn = Inst.getOperand(0).getReg();
7227    bool hasWritebackToken =
7228      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7229       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
7230    bool listContainsBase;
7231    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
7232        (!listContainsBase && !hasWritebackToken) ||
7233        (listContainsBase && hasWritebackToken)) {
7234      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7235      assert (isThumbTwo());
7236      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
7237      // If we're switching to the updating version, we need to insert
7238      // the writeback tied operand.
7239      if (hasWritebackToken)
7240        Inst.insert(Inst.begin(),
7241                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
7242      return true;
7243    }
7244    break;
7245  }
7246  case ARM::tSTMIA_UPD: {
7247    // If the register list contains any high registers, we need to use
7248    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7249    // should have generated an error in validateInstruction().
7250    unsigned Rn = Inst.getOperand(0).getReg();
7251    bool listContainsBase;
7252    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
7253      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7254      assert (isThumbTwo());
7255      Inst.setOpcode(ARM::t2STMIA_UPD);
7256      return true;
7257    }
7258    break;
7259  }
7260  case ARM::tPOP: {
7261    bool listContainsBase;
7262    // If the register list contains any high registers, we need to use
7263    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7264    // should have generated an error in validateInstruction().
7265    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
7266      return false;
7267    assert (isThumbTwo());
7268    Inst.setOpcode(ARM::t2LDMIA_UPD);
7269    // Add the base register and writeback operands.
7270    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7271    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7272    return true;
7273  }
7274  case ARM::tPUSH: {
7275    bool listContainsBase;
7276    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
7277      return false;
7278    assert (isThumbTwo());
7279    Inst.setOpcode(ARM::t2STMDB_UPD);
7280    // Add the base register and writeback operands.
7281    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7282    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7283    return true;
7284  }
7285  case ARM::t2MOVi: {
7286    // If we can use the 16-bit encoding and the user didn't explicitly
7287    // request the 32-bit variant, transform it here.
7288    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7289        (unsigned)Inst.getOperand(1).getImm() <= 255 &&
7290        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
7291         Inst.getOperand(4).getReg() == ARM::CPSR) ||
7292        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
7293        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7294         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7295      // The operands aren't in the same order for tMOVi8...
7296      MCInst TmpInst;
7297      TmpInst.setOpcode(ARM::tMOVi8);
7298      TmpInst.addOperand(Inst.getOperand(0));
7299      TmpInst.addOperand(Inst.getOperand(4));
7300      TmpInst.addOperand(Inst.getOperand(1));
7301      TmpInst.addOperand(Inst.getOperand(2));
7302      TmpInst.addOperand(Inst.getOperand(3));
7303      Inst = TmpInst;
7304      return true;
7305    }
7306    break;
7307  }
7308  case ARM::t2MOVr: {
7309    // If we can use the 16-bit encoding and the user didn't explicitly
7310    // request the 32-bit variant, transform it here.
7311    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7312        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7313        Inst.getOperand(2).getImm() == ARMCC::AL &&
7314        Inst.getOperand(4).getReg() == ARM::CPSR &&
7315        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7316         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7317      // The operands aren't the same for tMOV[S]r... (no cc_out)
7318      MCInst TmpInst;
7319      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7320      TmpInst.addOperand(Inst.getOperand(0));
7321      TmpInst.addOperand(Inst.getOperand(1));
7322      TmpInst.addOperand(Inst.getOperand(2));
7323      TmpInst.addOperand(Inst.getOperand(3));
7324      Inst = TmpInst;
7325      return true;
7326    }
7327    break;
7328  }
7329  case ARM::t2SXTH:
7330  case ARM::t2SXTB:
7331  case ARM::t2UXTH:
7332  case ARM::t2UXTB: {
7333    // If we can use the 16-bit encoding and the user didn't explicitly
7334    // request the 32-bit variant, transform it here.
7335    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7336        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7337        Inst.getOperand(2).getImm() == 0 &&
7338        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7339         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7340      unsigned NewOpc;
7341      switch (Inst.getOpcode()) {
7342      default: llvm_unreachable("Illegal opcode!");
7343      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7344      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7345      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7346      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7347      }
7348      // The operands aren't the same for thumb1 (no rotate operand).
7349      MCInst TmpInst;
7350      TmpInst.setOpcode(NewOpc);
7351      TmpInst.addOperand(Inst.getOperand(0));
7352      TmpInst.addOperand(Inst.getOperand(1));
7353      TmpInst.addOperand(Inst.getOperand(3));
7354      TmpInst.addOperand(Inst.getOperand(4));
7355      Inst = TmpInst;
7356      return true;
7357    }
7358    break;
7359  }
7360  case ARM::MOVsi: {
7361    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7362    // rrx shifts and asr/lsr of #32 is encoded as 0
7363    if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
7364      return false;
7365    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7366      // Shifting by zero is accepted as a vanilla 'MOVr'
7367      MCInst TmpInst;
7368      TmpInst.setOpcode(ARM::MOVr);
7369      TmpInst.addOperand(Inst.getOperand(0));
7370      TmpInst.addOperand(Inst.getOperand(1));
7371      TmpInst.addOperand(Inst.getOperand(3));
7372      TmpInst.addOperand(Inst.getOperand(4));
7373      TmpInst.addOperand(Inst.getOperand(5));
7374      Inst = TmpInst;
7375      return true;
7376    }
7377    return false;
7378  }
7379  case ARM::ANDrsi:
7380  case ARM::ORRrsi:
7381  case ARM::EORrsi:
7382  case ARM::BICrsi:
7383  case ARM::SUBrsi:
7384  case ARM::ADDrsi: {
7385    unsigned newOpc;
7386    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7387    if (SOpc == ARM_AM::rrx) return false;
7388    switch (Inst.getOpcode()) {
7389    default: llvm_unreachable("unexpected opcode!");
7390    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7391    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7392    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7393    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7394    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7395    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7396    }
7397    // If the shift is by zero, use the non-shifted instruction definition.
7398    // The exception is for right shifts, where 0 == 32
7399    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
7400        !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
7401      MCInst TmpInst;
7402      TmpInst.setOpcode(newOpc);
7403      TmpInst.addOperand(Inst.getOperand(0));
7404      TmpInst.addOperand(Inst.getOperand(1));
7405      TmpInst.addOperand(Inst.getOperand(2));
7406      TmpInst.addOperand(Inst.getOperand(4));
7407      TmpInst.addOperand(Inst.getOperand(5));
7408      TmpInst.addOperand(Inst.getOperand(6));
7409      Inst = TmpInst;
7410      return true;
7411    }
7412    return false;
7413  }
7414  case ARM::ITasm:
7415  case ARM::t2IT: {
7416    // The mask bits for all but the first condition are represented as
7417    // the low bit of the condition code value implies 't'. We currently
7418    // always have 1 implies 't', so XOR toggle the bits if the low bit
7419    // of the condition code is zero.
7420    MCOperand &MO = Inst.getOperand(1);
7421    unsigned Mask = MO.getImm();
7422    unsigned OrigMask = Mask;
7423    unsigned TZ = countTrailingZeros(Mask);
7424    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7425      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7426      Mask ^= (0xE << TZ) & 0xF;
7427    }
7428    MO.setImm(Mask);
7429
7430    // Set up the IT block state according to the IT instruction we just
7431    // matched.
7432    assert(!inITBlock() && "nested IT blocks?!");
7433    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7434    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7435    ITState.CurPosition = 0;
7436    ITState.FirstCond = true;
7437    break;
7438  }
7439  case ARM::t2LSLrr:
7440  case ARM::t2LSRrr:
7441  case ARM::t2ASRrr:
7442  case ARM::t2SBCrr:
7443  case ARM::t2RORrr:
7444  case ARM::t2BICrr:
7445  {
7446    // Assemblers should use the narrow encodings of these instructions when permissible.
7447    if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7448         isARMLowRegister(Inst.getOperand(2).getReg())) &&
7449        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7450        ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7451         (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7452        (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
7453         !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
7454      unsigned NewOpc;
7455      switch (Inst.getOpcode()) {
7456        default: llvm_unreachable("unexpected opcode");
7457        case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
7458        case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
7459        case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
7460        case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
7461        case ARM::t2RORrr: NewOpc = ARM::tROR; break;
7462        case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
7463      }
7464      MCInst TmpInst;
7465      TmpInst.setOpcode(NewOpc);
7466      TmpInst.addOperand(Inst.getOperand(0));
7467      TmpInst.addOperand(Inst.getOperand(5));
7468      TmpInst.addOperand(Inst.getOperand(1));
7469      TmpInst.addOperand(Inst.getOperand(2));
7470      TmpInst.addOperand(Inst.getOperand(3));
7471      TmpInst.addOperand(Inst.getOperand(4));
7472      Inst = TmpInst;
7473      return true;
7474    }
7475    return false;
7476  }
7477  case ARM::t2ANDrr:
7478  case ARM::t2EORrr:
7479  case ARM::t2ADCrr:
7480  case ARM::t2ORRrr:
7481  {
7482    // Assemblers should use the narrow encodings of these instructions when permissible.
7483    // These instructions are special in that they are commutable, so shorter encodings
7484    // are available more often.
7485    if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7486         isARMLowRegister(Inst.getOperand(2).getReg())) &&
7487        (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
7488         Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
7489        ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7490         (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7491        (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
7492         !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
7493      unsigned NewOpc;
7494      switch (Inst.getOpcode()) {
7495        default: llvm_unreachable("unexpected opcode");
7496        case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
7497        case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
7498        case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
7499        case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
7500      }
7501      MCInst TmpInst;
7502      TmpInst.setOpcode(NewOpc);
7503      TmpInst.addOperand(Inst.getOperand(0));
7504      TmpInst.addOperand(Inst.getOperand(5));
7505      if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
7506        TmpInst.addOperand(Inst.getOperand(1));
7507        TmpInst.addOperand(Inst.getOperand(2));
7508      } else {
7509        TmpInst.addOperand(Inst.getOperand(2));
7510        TmpInst.addOperand(Inst.getOperand(1));
7511      }
7512      TmpInst.addOperand(Inst.getOperand(3));
7513      TmpInst.addOperand(Inst.getOperand(4));
7514      Inst = TmpInst;
7515      return true;
7516    }
7517    return false;
7518  }
7519  }
7520  return false;
7521}
7522
7523unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7524  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7525  // suffix depending on whether they're in an IT block or not.
7526  unsigned Opc = Inst.getOpcode();
7527  const MCInstrDesc &MCID = getInstDesc(Opc);
7528  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7529    assert(MCID.hasOptionalDef() &&
7530           "optionally flag setting instruction missing optional def operand");
7531    assert(MCID.NumOperands == Inst.getNumOperands() &&
7532           "operand count mismatch!");
7533    // Find the optional-def operand (cc_out).
7534    unsigned OpNo;
7535    for (OpNo = 0;
7536         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7537         ++OpNo)
7538      ;
7539    // If we're parsing Thumb1, reject it completely.
7540    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7541      return Match_MnemonicFail;
7542    // If we're parsing Thumb2, which form is legal depends on whether we're
7543    // in an IT block.
7544    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7545        !inITBlock())
7546      return Match_RequiresITBlock;
7547    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7548        inITBlock())
7549      return Match_RequiresNotITBlock;
7550  }
7551  // Some high-register supporting Thumb1 encodings only allow both registers
7552  // to be from r0-r7 when in Thumb2.
7553  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7554           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7555           isARMLowRegister(Inst.getOperand(2).getReg()))
7556    return Match_RequiresThumb2;
7557  // Others only require ARMv6 or later.
7558  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7559           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7560           isARMLowRegister(Inst.getOperand(1).getReg()))
7561    return Match_RequiresV6;
7562  return Match_Success;
7563}
7564
7565static const char *getSubtargetFeatureName(unsigned Val);
7566bool ARMAsmParser::
7567MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
7568                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7569                        MCStreamer &Out, unsigned &ErrorInfo,
7570                        bool MatchingInlineAsm) {
7571  MCInst Inst;
7572  unsigned MatchResult;
7573
7574  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
7575                                     MatchingInlineAsm);
7576  switch (MatchResult) {
7577  default: break;
7578  case Match_Success:
7579    // Context sensitive operand constraints aren't handled by the matcher,
7580    // so check them here.
7581    if (validateInstruction(Inst, Operands)) {
7582      // Still progress the IT block, otherwise one wrong condition causes
7583      // nasty cascading errors.
7584      forwardITPosition();
7585      return true;
7586    }
7587
7588    // Some instructions need post-processing to, for example, tweak which
7589    // encoding is selected. Loop on it while changes happen so the
7590    // individual transformations can chain off each other. E.g.,
7591    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7592    while (processInstruction(Inst, Operands))
7593      ;
7594
7595    // Only move forward at the very end so that everything in validate
7596    // and process gets a consistent answer about whether we're in an IT
7597    // block.
7598    forwardITPosition();
7599
7600    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7601    // doesn't actually encode.
7602    if (Inst.getOpcode() == ARM::ITasm)
7603      return false;
7604
7605    Inst.setLoc(IDLoc);
7606    Out.EmitInstruction(Inst);
7607    return false;
7608  case Match_MissingFeature: {
7609    assert(ErrorInfo && "Unknown missing feature!");
7610    // Special case the error message for the very common case where only
7611    // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
7612    std::string Msg = "instruction requires:";
7613    unsigned Mask = 1;
7614    for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
7615      if (ErrorInfo & Mask) {
7616        Msg += " ";
7617        Msg += getSubtargetFeatureName(ErrorInfo & Mask);
7618      }
7619      Mask <<= 1;
7620    }
7621    return Error(IDLoc, Msg);
7622  }
7623  case Match_InvalidOperand: {
7624    SMLoc ErrorLoc = IDLoc;
7625    if (ErrorInfo != ~0U) {
7626      if (ErrorInfo >= Operands.size())
7627        return Error(IDLoc, "too few operands for instruction");
7628
7629      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7630      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7631    }
7632
7633    return Error(ErrorLoc, "invalid operand for instruction");
7634  }
7635  case Match_MnemonicFail:
7636    return Error(IDLoc, "invalid instruction",
7637                 ((ARMOperand*)Operands[0])->getLocRange());
7638  case Match_RequiresNotITBlock:
7639    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7640  case Match_RequiresITBlock:
7641    return Error(IDLoc, "instruction only valid inside IT block");
7642  case Match_RequiresV6:
7643    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7644  case Match_RequiresThumb2:
7645    return Error(IDLoc, "instruction variant requires Thumb2");
7646  case Match_ImmRange0_4: {
7647    SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7648    if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7649    return Error(ErrorLoc, "immediate operand must be in the range [0,4]");
7650  }
7651  case Match_ImmRange0_15: {
7652    SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7653    if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7654    return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
7655  }
7656  }
7657
7658  llvm_unreachable("Implement any new match types added!");
7659}
7660
7661/// parseDirective parses the arm specific directives
7662bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7663  StringRef IDVal = DirectiveID.getIdentifier();
7664  if (IDVal == ".word")
7665    return parseDirectiveWord(4, DirectiveID.getLoc());
7666  else if (IDVal == ".thumb")
7667    return parseDirectiveThumb(DirectiveID.getLoc());
7668  else if (IDVal == ".arm")
7669    return parseDirectiveARM(DirectiveID.getLoc());
7670  else if (IDVal == ".thumb_func")
7671    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7672  else if (IDVal == ".code")
7673    return parseDirectiveCode(DirectiveID.getLoc());
7674  else if (IDVal == ".syntax")
7675    return parseDirectiveSyntax(DirectiveID.getLoc());
7676  else if (IDVal == ".unreq")
7677    return parseDirectiveUnreq(DirectiveID.getLoc());
7678  else if (IDVal == ".arch")
7679    return parseDirectiveArch(DirectiveID.getLoc());
7680  else if (IDVal == ".eabi_attribute")
7681    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7682  else if (IDVal == ".fnstart")
7683    return parseDirectiveFnStart(DirectiveID.getLoc());
7684  else if (IDVal == ".fnend")
7685    return parseDirectiveFnEnd(DirectiveID.getLoc());
7686  else if (IDVal == ".cantunwind")
7687    return parseDirectiveCantUnwind(DirectiveID.getLoc());
7688  else if (IDVal == ".personality")
7689    return parseDirectivePersonality(DirectiveID.getLoc());
7690  else if (IDVal == ".handlerdata")
7691    return parseDirectiveHandlerData(DirectiveID.getLoc());
7692  else if (IDVal == ".setfp")
7693    return parseDirectiveSetFP(DirectiveID.getLoc());
7694  else if (IDVal == ".pad")
7695    return parseDirectivePad(DirectiveID.getLoc());
7696  else if (IDVal == ".save")
7697    return parseDirectiveRegSave(DirectiveID.getLoc(), false);
7698  else if (IDVal == ".vsave")
7699    return parseDirectiveRegSave(DirectiveID.getLoc(), true);
7700  return true;
7701}
7702
7703/// parseDirectiveWord
7704///  ::= .word [ expression (, expression)* ]
7705bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7706  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7707    for (;;) {
7708      const MCExpr *Value;
7709      if (getParser().parseExpression(Value))
7710        return true;
7711
7712      getParser().getStreamer().EmitValue(Value, Size);
7713
7714      if (getLexer().is(AsmToken::EndOfStatement))
7715        break;
7716
7717      // FIXME: Improve diagnostic.
7718      if (getLexer().isNot(AsmToken::Comma))
7719        return Error(L, "unexpected token in directive");
7720      Parser.Lex();
7721    }
7722  }
7723
7724  Parser.Lex();
7725  return false;
7726}
7727
7728/// parseDirectiveThumb
7729///  ::= .thumb
7730bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7731  if (getLexer().isNot(AsmToken::EndOfStatement))
7732    return Error(L, "unexpected token in directive");
7733  Parser.Lex();
7734
7735  if (!isThumb())
7736    SwitchMode();
7737  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7738  return false;
7739}
7740
7741/// parseDirectiveARM
7742///  ::= .arm
7743bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7744  if (getLexer().isNot(AsmToken::EndOfStatement))
7745    return Error(L, "unexpected token in directive");
7746  Parser.Lex();
7747
7748  if (isThumb())
7749    SwitchMode();
7750  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7751  return false;
7752}
7753
7754/// parseDirectiveThumbFunc
7755///  ::= .thumbfunc symbol_name
7756bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7757  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7758  bool isMachO = MAI.hasSubsectionsViaSymbols();
7759  StringRef Name;
7760  bool needFuncName = true;
7761
7762  // Darwin asm has (optionally) function name after .thumb_func direction
7763  // ELF doesn't
7764  if (isMachO) {
7765    const AsmToken &Tok = Parser.getTok();
7766    if (Tok.isNot(AsmToken::EndOfStatement)) {
7767      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7768        return Error(L, "unexpected token in .thumb_func directive");
7769      Name = Tok.getIdentifier();
7770      Parser.Lex(); // Consume the identifier token.
7771      needFuncName = false;
7772    }
7773  }
7774
7775  if (getLexer().isNot(AsmToken::EndOfStatement))
7776    return Error(L, "unexpected token in directive");
7777
7778  // Eat the end of statement and any blank lines that follow.
7779  while (getLexer().is(AsmToken::EndOfStatement))
7780    Parser.Lex();
7781
7782  // FIXME: assuming function name will be the line following .thumb_func
7783  // We really should be checking the next symbol definition even if there's
7784  // stuff in between.
7785  if (needFuncName) {
7786    Name = Parser.getTok().getIdentifier();
7787  }
7788
7789  // Mark symbol as a thumb symbol.
7790  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7791  getParser().getStreamer().EmitThumbFunc(Func);
7792  return false;
7793}
7794
7795/// parseDirectiveSyntax
7796///  ::= .syntax unified | divided
7797bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7798  const AsmToken &Tok = Parser.getTok();
7799  if (Tok.isNot(AsmToken::Identifier))
7800    return Error(L, "unexpected token in .syntax directive");
7801  StringRef Mode = Tok.getString();
7802  if (Mode == "unified" || Mode == "UNIFIED")
7803    Parser.Lex();
7804  else if (Mode == "divided" || Mode == "DIVIDED")
7805    return Error(L, "'.syntax divided' arm asssembly not supported");
7806  else
7807    return Error(L, "unrecognized syntax mode in .syntax directive");
7808
7809  if (getLexer().isNot(AsmToken::EndOfStatement))
7810    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7811  Parser.Lex();
7812
7813  // TODO tell the MC streamer the mode
7814  // getParser().getStreamer().Emit???();
7815  return false;
7816}
7817
7818/// parseDirectiveCode
7819///  ::= .code 16 | 32
7820bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7821  const AsmToken &Tok = Parser.getTok();
7822  if (Tok.isNot(AsmToken::Integer))
7823    return Error(L, "unexpected token in .code directive");
7824  int64_t Val = Parser.getTok().getIntVal();
7825  if (Val == 16)
7826    Parser.Lex();
7827  else if (Val == 32)
7828    Parser.Lex();
7829  else
7830    return Error(L, "invalid operand to .code directive");
7831
7832  if (getLexer().isNot(AsmToken::EndOfStatement))
7833    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7834  Parser.Lex();
7835
7836  if (Val == 16) {
7837    if (!isThumb())
7838      SwitchMode();
7839    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7840  } else {
7841    if (isThumb())
7842      SwitchMode();
7843    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7844  }
7845
7846  return false;
7847}
7848
7849/// parseDirectiveReq
7850///  ::= name .req registername
7851bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7852  Parser.Lex(); // Eat the '.req' token.
7853  unsigned Reg;
7854  SMLoc SRegLoc, ERegLoc;
7855  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7856    Parser.eatToEndOfStatement();
7857    return Error(SRegLoc, "register name expected");
7858  }
7859
7860  // Shouldn't be anything else.
7861  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7862    Parser.eatToEndOfStatement();
7863    return Error(Parser.getTok().getLoc(),
7864                 "unexpected input in .req directive.");
7865  }
7866
7867  Parser.Lex(); // Consume the EndOfStatement
7868
7869  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7870    return Error(SRegLoc, "redefinition of '" + Name +
7871                          "' does not match original.");
7872
7873  return false;
7874}
7875
7876/// parseDirectiveUneq
7877///  ::= .unreq registername
7878bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7879  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7880    Parser.eatToEndOfStatement();
7881    return Error(L, "unexpected input in .unreq directive.");
7882  }
7883  RegisterReqs.erase(Parser.getTok().getIdentifier());
7884  Parser.Lex(); // Eat the identifier.
7885  return false;
7886}
7887
7888/// parseDirectiveArch
7889///  ::= .arch token
7890bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7891  return true;
7892}
7893
7894/// parseDirectiveEabiAttr
7895///  ::= .eabi_attribute int, int
7896bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7897  return true;
7898}
7899
7900/// parseDirectiveFnStart
7901///  ::= .fnstart
7902bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
7903  if (FnStartLoc.isValid()) {
7904    Error(L, ".fnstart starts before the end of previous one");
7905    Error(FnStartLoc, "previous .fnstart starts here");
7906    return true;
7907  }
7908
7909  FnStartLoc = L;
7910  getParser().getStreamer().EmitFnStart();
7911  return false;
7912}
7913
7914/// parseDirectiveFnEnd
7915///  ::= .fnend
7916bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
7917  // Check the ordering of unwind directives
7918  if (!FnStartLoc.isValid())
7919    return Error(L, ".fnstart must precede .fnend directive");
7920
7921  // Reset the unwind directives parser state
7922  resetUnwindDirectiveParserState();
7923
7924  getParser().getStreamer().EmitFnEnd();
7925  return false;
7926}
7927
7928/// parseDirectiveCantUnwind
7929///  ::= .cantunwind
7930bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
7931  // Check the ordering of unwind directives
7932  CantUnwindLoc = L;
7933  if (!FnStartLoc.isValid())
7934    return Error(L, ".fnstart must precede .cantunwind directive");
7935  if (HandlerDataLoc.isValid()) {
7936    Error(L, ".cantunwind can't be used with .handlerdata directive");
7937    Error(HandlerDataLoc, ".handlerdata was specified here");
7938    return true;
7939  }
7940  if (PersonalityLoc.isValid()) {
7941    Error(L, ".cantunwind can't be used with .personality directive");
7942    Error(PersonalityLoc, ".personality was specified here");
7943    return true;
7944  }
7945
7946  getParser().getStreamer().EmitCantUnwind();
7947  return false;
7948}
7949
7950/// parseDirectivePersonality
7951///  ::= .personality name
7952bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
7953  // Check the ordering of unwind directives
7954  PersonalityLoc = L;
7955  if (!FnStartLoc.isValid())
7956    return Error(L, ".fnstart must precede .personality directive");
7957  if (CantUnwindLoc.isValid()) {
7958    Error(L, ".personality can't be used with .cantunwind directive");
7959    Error(CantUnwindLoc, ".cantunwind was specified here");
7960    return true;
7961  }
7962  if (HandlerDataLoc.isValid()) {
7963    Error(L, ".personality must precede .handlerdata directive");
7964    Error(HandlerDataLoc, ".handlerdata was specified here");
7965    return true;
7966  }
7967
7968  // Parse the name of the personality routine
7969  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7970    Parser.eatToEndOfStatement();
7971    return Error(L, "unexpected input in .personality directive.");
7972  }
7973  StringRef Name(Parser.getTok().getIdentifier());
7974  Parser.Lex();
7975
7976  MCSymbol *PR = getParser().getContext().GetOrCreateSymbol(Name);
7977  getParser().getStreamer().EmitPersonality(PR);
7978  return false;
7979}
7980
7981/// parseDirectiveHandlerData
7982///  ::= .handlerdata
7983bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
7984  // Check the ordering of unwind directives
7985  HandlerDataLoc = L;
7986  if (!FnStartLoc.isValid())
7987    return Error(L, ".fnstart must precede .personality directive");
7988  if (CantUnwindLoc.isValid()) {
7989    Error(L, ".handlerdata can't be used with .cantunwind directive");
7990    Error(CantUnwindLoc, ".cantunwind was specified here");
7991    return true;
7992  }
7993
7994  getParser().getStreamer().EmitHandlerData();
7995  return false;
7996}
7997
7998/// parseDirectiveSetFP
7999///  ::= .setfp fpreg, spreg [, offset]
8000bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
8001  // Check the ordering of unwind directives
8002  if (!FnStartLoc.isValid())
8003    return Error(L, ".fnstart must precede .setfp directive");
8004  if (HandlerDataLoc.isValid())
8005    return Error(L, ".setfp must precede .handlerdata directive");
8006
8007  // Parse fpreg
8008  SMLoc NewFPRegLoc = Parser.getTok().getLoc();
8009  int NewFPReg = tryParseRegister();
8010  if (NewFPReg == -1)
8011    return Error(NewFPRegLoc, "frame pointer register expected");
8012
8013  // Consume comma
8014  if (!Parser.getTok().is(AsmToken::Comma))
8015    return Error(Parser.getTok().getLoc(), "comma expected");
8016  Parser.Lex(); // skip comma
8017
8018  // Parse spreg
8019  SMLoc NewSPRegLoc = Parser.getTok().getLoc();
8020  int NewSPReg = tryParseRegister();
8021  if (NewSPReg == -1)
8022    return Error(NewSPRegLoc, "stack pointer register expected");
8023
8024  if (NewSPReg != ARM::SP && NewSPReg != FPReg)
8025    return Error(NewSPRegLoc,
8026                 "register should be either $sp or the latest fp register");
8027
8028  // Update the frame pointer register
8029  FPReg = NewFPReg;
8030
8031  // Parse offset
8032  int64_t Offset = 0;
8033  if (Parser.getTok().is(AsmToken::Comma)) {
8034    Parser.Lex(); // skip comma
8035
8036    if (Parser.getTok().isNot(AsmToken::Hash) &&
8037        Parser.getTok().isNot(AsmToken::Dollar)) {
8038      return Error(Parser.getTok().getLoc(), "'#' expected");
8039    }
8040    Parser.Lex(); // skip hash token.
8041
8042    const MCExpr *OffsetExpr;
8043    SMLoc ExLoc = Parser.getTok().getLoc();
8044    SMLoc EndLoc;
8045    if (getParser().parseExpression(OffsetExpr, EndLoc))
8046      return Error(ExLoc, "malformed setfp offset");
8047    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
8048    if (!CE)
8049      return Error(ExLoc, "setfp offset must be an immediate");
8050
8051    Offset = CE->getValue();
8052  }
8053
8054  getParser().getStreamer().EmitSetFP(static_cast<unsigned>(NewFPReg),
8055                                      static_cast<unsigned>(NewSPReg),
8056                                      Offset);
8057  return false;
8058}
8059
8060/// parseDirective
8061///  ::= .pad offset
8062bool ARMAsmParser::parseDirectivePad(SMLoc L) {
8063  // Check the ordering of unwind directives
8064  if (!FnStartLoc.isValid())
8065    return Error(L, ".fnstart must precede .pad directive");
8066  if (HandlerDataLoc.isValid())
8067    return Error(L, ".pad must precede .handlerdata directive");
8068
8069  // Parse the offset
8070  if (Parser.getTok().isNot(AsmToken::Hash) &&
8071      Parser.getTok().isNot(AsmToken::Dollar)) {
8072    return Error(Parser.getTok().getLoc(), "'#' expected");
8073  }
8074  Parser.Lex(); // skip hash token.
8075
8076  const MCExpr *OffsetExpr;
8077  SMLoc ExLoc = Parser.getTok().getLoc();
8078  SMLoc EndLoc;
8079  if (getParser().parseExpression(OffsetExpr, EndLoc))
8080    return Error(ExLoc, "malformed pad offset");
8081  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
8082  if (!CE)
8083    return Error(ExLoc, "pad offset must be an immediate");
8084
8085  getParser().getStreamer().EmitPad(CE->getValue());
8086  return false;
8087}
8088
8089/// parseDirectiveRegSave
8090///  ::= .save  { registers }
8091///  ::= .vsave { registers }
8092bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
8093  // Check the ordering of unwind directives
8094  if (!FnStartLoc.isValid())
8095    return Error(L, ".fnstart must precede .save or .vsave directives");
8096  if (HandlerDataLoc.isValid())
8097    return Error(L, ".save or .vsave must precede .handlerdata directive");
8098
8099  // Parse the register list
8100  SmallVector<MCParsedAsmOperand*, 1> Operands;
8101  if (parseRegisterList(Operands))
8102    return true;
8103  ARMOperand *Op = (ARMOperand*)Operands[0];
8104  if (!IsVector && !Op->isRegList())
8105    return Error(L, ".save expects GPR registers");
8106  if (IsVector && !Op->isDPRRegList())
8107    return Error(L, ".vsave expects DPR registers");
8108
8109  getParser().getStreamer().EmitRegSave(Op->getRegList(), IsVector);
8110  return false;
8111}
8112
8113/// Force static initialization.
8114extern "C" void LLVMInitializeARMAsmParser() {
8115  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
8116  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
8117}
8118
8119#define GET_REGISTER_MATCHER
8120#define GET_SUBTARGET_FEATURE_NAME
8121#define GET_MATCHER_IMPLEMENTATION
8122#include "ARMGenAsmMatcher.inc"
8123
8124// Define this matcher function after the auto-generated include so we
8125// have the match class enum definitions.
8126unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
8127                                                  unsigned Kind) {
8128  ARMOperand *Op = static_cast<ARMOperand*>(AsmOp);
8129  // If the kind is a token for a literal immediate, check if our asm
8130  // operand matches. This is for InstAliases which have a fixed-value
8131  // immediate in the syntax.
8132  if (Kind == MCK__35_0 && Op->isImm()) {
8133    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
8134    if (!CE)
8135      return Match_InvalidOperand;
8136    if (CE->getValue() == 0)
8137      return Match_Success;
8138  }
8139  return Match_InvalidOperand;
8140}
8141