ARMAsmParser.cpp revision 88eb89b89f9426feb7be9b19d1a664b37c590bdb
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "llvm/MC/MCTargetAsmParser.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMBaseInfo.h"
13#include "MCTargetDesc/ARMMCExpr.h"
14#include "llvm/ADT/BitVector.h"
15#include "llvm/ADT/OwningPtr.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/StringSwitch.h"
19#include "llvm/ADT/Twine.h"
20#include "llvm/MC/MCAsmInfo.h"
21#include "llvm/MC/MCAssembler.h"
22#include "llvm/MC/MCContext.h"
23#include "llvm/MC/MCELFStreamer.h"
24#include "llvm/MC/MCExpr.h"
25#include "llvm/MC/MCInst.h"
26#include "llvm/MC/MCInstrDesc.h"
27#include "llvm/MC/MCParser/MCAsmLexer.h"
28#include "llvm/MC/MCParser/MCAsmParser.h"
29#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
30#include "llvm/MC/MCRegisterInfo.h"
31#include "llvm/MC/MCStreamer.h"
32#include "llvm/MC/MCSubtargetInfo.h"
33#include "llvm/Support/ELF.h"
34#include "llvm/Support/MathExtras.h"
35#include "llvm/Support/SourceMgr.h"
36#include "llvm/Support/TargetRegistry.h"
37#include "llvm/Support/raw_ostream.h"
38
39using namespace llvm;
40
41namespace {
42
43class ARMOperand;
44
45enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
46
47class ARMAsmParser : public MCTargetAsmParser {
48  MCSubtargetInfo &STI;
49  MCAsmParser &Parser;
50  const MCRegisterInfo *MRI;
51
52  // Map of register aliases registers via the .req directive.
53  StringMap<unsigned> RegisterReqs;
54
55  struct {
56    ARMCC::CondCodes Cond;    // Condition for IT block.
57    unsigned Mask:4;          // Condition mask for instructions.
58                              // Starting at first 1 (from lsb).
59                              //   '1'  condition as indicated in IT.
60                              //   '0'  inverse of condition (else).
61                              // Count of instructions in IT block is
62                              // 4 - trailingzeroes(mask)
63
64    bool FirstCond;           // Explicit flag for when we're parsing the
65                              // First instruction in the IT block. It's
66                              // implied in the mask, so needs special
67                              // handling.
68
69    unsigned CurPosition;     // Current position in parsing of IT
70                              // block. In range [0,3]. Initialized
71                              // according to count of instructions in block.
72                              // ~0U if no active IT block.
73  } ITState;
74  bool inITBlock() { return ITState.CurPosition != ~0U;}
75  void forwardITPosition() {
76    if (!inITBlock()) return;
77    // Move to the next instruction in the IT block, if there is one. If not,
78    // mark the block as done.
79    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
80    if (++ITState.CurPosition == 5 - TZ)
81      ITState.CurPosition = ~0U; // Done with the IT block after this.
82  }
83
84
85  MCAsmParser &getParser() const { return Parser; }
86  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
87
88  bool Warning(SMLoc L, const Twine &Msg,
89               ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
90    return Parser.Warning(L, Msg, Ranges);
91  }
92  bool Error(SMLoc L, const Twine &Msg,
93             ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
94    return Parser.Error(L, Msg, Ranges);
95  }
96
97  int tryParseRegister();
98  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
99  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
100  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
101  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
102  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
103  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
104  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
105                              unsigned &ShiftAmount);
106  bool parseDirectiveWord(unsigned Size, SMLoc L);
107  bool parseDirectiveThumb(SMLoc L);
108  bool parseDirectiveARM(SMLoc L);
109  bool parseDirectiveThumbFunc(SMLoc L);
110  bool parseDirectiveCode(SMLoc L);
111  bool parseDirectiveSyntax(SMLoc L);
112  bool parseDirectiveReq(StringRef Name, SMLoc L);
113  bool parseDirectiveUnreq(SMLoc L);
114  bool parseDirectiveArch(SMLoc L);
115  bool parseDirectiveEabiAttr(SMLoc L);
116
117  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
118                          bool &CarrySetting, unsigned &ProcessorIMod,
119                          StringRef &ITMask);
120  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
121                             bool &CanAcceptPredicationCode);
122
123  bool isThumb() const {
124    // FIXME: Can tablegen auto-generate this?
125    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
126  }
127  bool isThumbOne() const {
128    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
129  }
130  bool isThumbTwo() const {
131    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
132  }
133  bool hasV6Ops() const {
134    return STI.getFeatureBits() & ARM::HasV6Ops;
135  }
136  bool hasV7Ops() const {
137    return STI.getFeatureBits() & ARM::HasV7Ops;
138  }
139  void SwitchMode() {
140    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
141    setAvailableFeatures(FB);
142  }
143  bool isMClass() const {
144    return STI.getFeatureBits() & ARM::FeatureMClass;
145  }
146
147  /// @name Auto-generated Match Functions
148  /// {
149
150#define GET_ASSEMBLER_HEADER
151#include "ARMGenAsmMatcher.inc"
152
153  /// }
154
155  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parseCoprocNumOperand(
157    SmallVectorImpl<MCParsedAsmOperand*>&);
158  OperandMatchResultTy parseCoprocRegOperand(
159    SmallVectorImpl<MCParsedAsmOperand*>&);
160  OperandMatchResultTy parseCoprocOptionOperand(
161    SmallVectorImpl<MCParsedAsmOperand*>&);
162  OperandMatchResultTy parseMemBarrierOptOperand(
163    SmallVectorImpl<MCParsedAsmOperand*>&);
164  OperandMatchResultTy parseProcIFlagsOperand(
165    SmallVectorImpl<MCParsedAsmOperand*>&);
166  OperandMatchResultTy parseMSRMaskOperand(
167    SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
169                                   StringRef Op, int Low, int High);
170  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
171    return parsePKHImm(O, "lsl", 0, 31);
172  }
173  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
174    return parsePKHImm(O, "asr", 1, 32);
175  }
176  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
177  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
178  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
179  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
180  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
181  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
182  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
183  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
184  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
185                                       SMLoc &EndLoc);
186
187  // Asm Match Converter Methods
188  void cvtT2LdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
189  void cvtT2StrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
190  void cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
191                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
192  void cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
193                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
194  void cvtLdWriteBackRegAddrMode2(MCInst &Inst,
195                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
196  void cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
197                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
198  void cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
199                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
200  void cvtStWriteBackRegAddrMode2(MCInst &Inst,
201                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
202  void cvtStWriteBackRegAddrMode3(MCInst &Inst,
203                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
204  void cvtLdExtTWriteBackImm(MCInst &Inst,
205                             const SmallVectorImpl<MCParsedAsmOperand*> &);
206  void cvtLdExtTWriteBackReg(MCInst &Inst,
207                             const SmallVectorImpl<MCParsedAsmOperand*> &);
208  void cvtStExtTWriteBackImm(MCInst &Inst,
209                             const SmallVectorImpl<MCParsedAsmOperand*> &);
210  void cvtStExtTWriteBackReg(MCInst &Inst,
211                             const SmallVectorImpl<MCParsedAsmOperand*> &);
212  void cvtLdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
213  void cvtStrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
214  void cvtLdWriteBackRegAddrMode3(MCInst &Inst,
215                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
216  void cvtThumbMultiply(MCInst &Inst,
217                        const SmallVectorImpl<MCParsedAsmOperand*> &);
218  void cvtVLDwbFixed(MCInst &Inst,
219                     const SmallVectorImpl<MCParsedAsmOperand*> &);
220  void cvtVLDwbRegister(MCInst &Inst,
221                        const SmallVectorImpl<MCParsedAsmOperand*> &);
222  void cvtVSTwbFixed(MCInst &Inst,
223                     const SmallVectorImpl<MCParsedAsmOperand*> &);
224  void cvtVSTwbRegister(MCInst &Inst,
225                        const SmallVectorImpl<MCParsedAsmOperand*> &);
226  bool validateInstruction(MCInst &Inst,
227                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
228  bool processInstruction(MCInst &Inst,
229                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
230  bool shouldOmitCCOutOperand(StringRef Mnemonic,
231                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
232
233public:
234  enum ARMMatchResultTy {
235    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
236    Match_RequiresNotITBlock,
237    Match_RequiresV6,
238    Match_RequiresThumb2,
239#define GET_OPERAND_DIAGNOSTIC_TYPES
240#include "ARMGenAsmMatcher.inc"
241
242  };
243
244  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
245    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
246    MCAsmParserExtension::Initialize(_Parser);
247
248    // Cache the MCRegisterInfo.
249    MRI = &getContext().getRegisterInfo();
250
251    // Initialize the set of available features.
252    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
253
254    // Not in an ITBlock to start with.
255    ITState.CurPosition = ~0U;
256
257    // Set ELF header flags.
258    // FIXME: This should eventually end up somewhere else where more
259    // intelligent flag decisions can be made. For now we are just maintaining
260    // the statu/parseDirects quo for ARM and setting EF_ARM_EABI_VER5 as the default.
261    if (MCELFStreamer *MES = dyn_cast<MCELFStreamer>(&Parser.getStreamer()))
262      MES->getAssembler().setELFHeaderEFlags(ELF::EF_ARM_EABI_VER5);
263  }
264
265  // Implementation of the MCTargetAsmParser interface:
266  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
267  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
268                        SMLoc NameLoc,
269                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
270  bool ParseDirective(AsmToken DirectiveID);
271
272  unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind);
273  unsigned checkTargetMatchPredicate(MCInst &Inst);
274
275  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
276                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
277                               MCStreamer &Out, unsigned &ErrorInfo,
278                               bool MatchingInlineAsm);
279};
280} // end anonymous namespace
281
282namespace {
283
284/// ARMOperand - Instances of this class represent a parsed ARM machine
285/// operand.
286class ARMOperand : public MCParsedAsmOperand {
287  enum KindTy {
288    k_CondCode,
289    k_CCOut,
290    k_ITCondMask,
291    k_CoprocNum,
292    k_CoprocReg,
293    k_CoprocOption,
294    k_Immediate,
295    k_MemBarrierOpt,
296    k_Memory,
297    k_PostIndexRegister,
298    k_MSRMask,
299    k_ProcIFlags,
300    k_VectorIndex,
301    k_Register,
302    k_RegisterList,
303    k_DPRRegisterList,
304    k_SPRRegisterList,
305    k_VectorList,
306    k_VectorListAllLanes,
307    k_VectorListIndexed,
308    k_ShiftedRegister,
309    k_ShiftedImmediate,
310    k_ShifterImmediate,
311    k_RotateImmediate,
312    k_BitfieldDescriptor,
313    k_Token
314  } Kind;
315
316  SMLoc StartLoc, EndLoc;
317  SmallVector<unsigned, 8> Registers;
318
319  struct CCOp {
320    ARMCC::CondCodes Val;
321  };
322
323  struct CopOp {
324    unsigned Val;
325  };
326
327  struct CoprocOptionOp {
328    unsigned Val;
329  };
330
331  struct ITMaskOp {
332    unsigned Mask:4;
333  };
334
335  struct MBOptOp {
336    ARM_MB::MemBOpt Val;
337  };
338
339  struct IFlagsOp {
340    ARM_PROC::IFlags Val;
341  };
342
343  struct MMaskOp {
344    unsigned Val;
345  };
346
347  struct TokOp {
348    const char *Data;
349    unsigned Length;
350  };
351
352  struct RegOp {
353    unsigned RegNum;
354  };
355
356  // A vector register list is a sequential list of 1 to 4 registers.
357  struct VectorListOp {
358    unsigned RegNum;
359    unsigned Count;
360    unsigned LaneIndex;
361    bool isDoubleSpaced;
362  };
363
364  struct VectorIndexOp {
365    unsigned Val;
366  };
367
368  struct ImmOp {
369    const MCExpr *Val;
370  };
371
372  /// Combined record for all forms of ARM address expressions.
373  struct MemoryOp {
374    unsigned BaseRegNum;
375    // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
376    // was specified.
377    const MCConstantExpr *OffsetImm;  // Offset immediate value
378    unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
379    ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
380    unsigned ShiftImm;        // shift for OffsetReg.
381    unsigned Alignment;       // 0 = no alignment specified
382    // n = alignment in bytes (2, 4, 8, 16, or 32)
383    unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
384  };
385
386  struct PostIdxRegOp {
387    unsigned RegNum;
388    bool isAdd;
389    ARM_AM::ShiftOpc ShiftTy;
390    unsigned ShiftImm;
391  };
392
393  struct ShifterImmOp {
394    bool isASR;
395    unsigned Imm;
396  };
397
398  struct RegShiftedRegOp {
399    ARM_AM::ShiftOpc ShiftTy;
400    unsigned SrcReg;
401    unsigned ShiftReg;
402    unsigned ShiftImm;
403  };
404
405  struct RegShiftedImmOp {
406    ARM_AM::ShiftOpc ShiftTy;
407    unsigned SrcReg;
408    unsigned ShiftImm;
409  };
410
411  struct RotImmOp {
412    unsigned Imm;
413  };
414
415  struct BitfieldOp {
416    unsigned LSB;
417    unsigned Width;
418  };
419
420  union {
421    struct CCOp CC;
422    struct CopOp Cop;
423    struct CoprocOptionOp CoprocOption;
424    struct MBOptOp MBOpt;
425    struct ITMaskOp ITMask;
426    struct IFlagsOp IFlags;
427    struct MMaskOp MMask;
428    struct TokOp Tok;
429    struct RegOp Reg;
430    struct VectorListOp VectorList;
431    struct VectorIndexOp VectorIndex;
432    struct ImmOp Imm;
433    struct MemoryOp Memory;
434    struct PostIdxRegOp PostIdxReg;
435    struct ShifterImmOp ShifterImm;
436    struct RegShiftedRegOp RegShiftedReg;
437    struct RegShiftedImmOp RegShiftedImm;
438    struct RotImmOp RotImm;
439    struct BitfieldOp Bitfield;
440  };
441
442  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
443public:
444  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
445    Kind = o.Kind;
446    StartLoc = o.StartLoc;
447    EndLoc = o.EndLoc;
448    switch (Kind) {
449    case k_CondCode:
450      CC = o.CC;
451      break;
452    case k_ITCondMask:
453      ITMask = o.ITMask;
454      break;
455    case k_Token:
456      Tok = o.Tok;
457      break;
458    case k_CCOut:
459    case k_Register:
460      Reg = o.Reg;
461      break;
462    case k_RegisterList:
463    case k_DPRRegisterList:
464    case k_SPRRegisterList:
465      Registers = o.Registers;
466      break;
467    case k_VectorList:
468    case k_VectorListAllLanes:
469    case k_VectorListIndexed:
470      VectorList = o.VectorList;
471      break;
472    case k_CoprocNum:
473    case k_CoprocReg:
474      Cop = o.Cop;
475      break;
476    case k_CoprocOption:
477      CoprocOption = o.CoprocOption;
478      break;
479    case k_Immediate:
480      Imm = o.Imm;
481      break;
482    case k_MemBarrierOpt:
483      MBOpt = o.MBOpt;
484      break;
485    case k_Memory:
486      Memory = o.Memory;
487      break;
488    case k_PostIndexRegister:
489      PostIdxReg = o.PostIdxReg;
490      break;
491    case k_MSRMask:
492      MMask = o.MMask;
493      break;
494    case k_ProcIFlags:
495      IFlags = o.IFlags;
496      break;
497    case k_ShifterImmediate:
498      ShifterImm = o.ShifterImm;
499      break;
500    case k_ShiftedRegister:
501      RegShiftedReg = o.RegShiftedReg;
502      break;
503    case k_ShiftedImmediate:
504      RegShiftedImm = o.RegShiftedImm;
505      break;
506    case k_RotateImmediate:
507      RotImm = o.RotImm;
508      break;
509    case k_BitfieldDescriptor:
510      Bitfield = o.Bitfield;
511      break;
512    case k_VectorIndex:
513      VectorIndex = o.VectorIndex;
514      break;
515    }
516  }
517
518  /// getStartLoc - Get the location of the first token of this operand.
519  SMLoc getStartLoc() const { return StartLoc; }
520  /// getEndLoc - Get the location of the last token of this operand.
521  SMLoc getEndLoc() const { return EndLoc; }
522  /// getLocRange - Get the range between the first and last token of this
523  /// operand.
524  SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
525
526  ARMCC::CondCodes getCondCode() const {
527    assert(Kind == k_CondCode && "Invalid access!");
528    return CC.Val;
529  }
530
531  unsigned getCoproc() const {
532    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
533    return Cop.Val;
534  }
535
536  StringRef getToken() const {
537    assert(Kind == k_Token && "Invalid access!");
538    return StringRef(Tok.Data, Tok.Length);
539  }
540
541  unsigned getReg() const {
542    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
543    return Reg.RegNum;
544  }
545
546  const SmallVectorImpl<unsigned> &getRegList() const {
547    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
548            Kind == k_SPRRegisterList) && "Invalid access!");
549    return Registers;
550  }
551
552  const MCExpr *getImm() const {
553    assert(isImm() && "Invalid access!");
554    return Imm.Val;
555  }
556
557  unsigned getVectorIndex() const {
558    assert(Kind == k_VectorIndex && "Invalid access!");
559    return VectorIndex.Val;
560  }
561
562  ARM_MB::MemBOpt getMemBarrierOpt() const {
563    assert(Kind == k_MemBarrierOpt && "Invalid access!");
564    return MBOpt.Val;
565  }
566
567  ARM_PROC::IFlags getProcIFlags() const {
568    assert(Kind == k_ProcIFlags && "Invalid access!");
569    return IFlags.Val;
570  }
571
572  unsigned getMSRMask() const {
573    assert(Kind == k_MSRMask && "Invalid access!");
574    return MMask.Val;
575  }
576
577  bool isCoprocNum() const { return Kind == k_CoprocNum; }
578  bool isCoprocReg() const { return Kind == k_CoprocReg; }
579  bool isCoprocOption() const { return Kind == k_CoprocOption; }
580  bool isCondCode() const { return Kind == k_CondCode; }
581  bool isCCOut() const { return Kind == k_CCOut; }
582  bool isITMask() const { return Kind == k_ITCondMask; }
583  bool isITCondCode() const { return Kind == k_CondCode; }
584  bool isImm() const { return Kind == k_Immediate; }
585  bool isFPImm() const {
586    if (!isImm()) return false;
587    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
588    if (!CE) return false;
589    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
590    return Val != -1;
591  }
592  bool isFBits16() const {
593    if (!isImm()) return false;
594    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
595    if (!CE) return false;
596    int64_t Value = CE->getValue();
597    return Value >= 0 && Value <= 16;
598  }
599  bool isFBits32() const {
600    if (!isImm()) return false;
601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602    if (!CE) return false;
603    int64_t Value = CE->getValue();
604    return Value >= 1 && Value <= 32;
605  }
606  bool isImm8s4() const {
607    if (!isImm()) return false;
608    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
609    if (!CE) return false;
610    int64_t Value = CE->getValue();
611    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
612  }
613  bool isImm0_4() const {
614    if (!isImm()) return false;
615    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
616    if (!CE) return false;
617    int64_t Value = CE->getValue();
618    return Value >= 0 && Value < 5;
619  }
620  bool isImm0_1020s4() const {
621    if (!isImm()) return false;
622    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
623    if (!CE) return false;
624    int64_t Value = CE->getValue();
625    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
626  }
627  bool isImm0_508s4() const {
628    if (!isImm()) return false;
629    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
630    if (!CE) return false;
631    int64_t Value = CE->getValue();
632    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
633  }
634  bool isImm0_508s4Neg() const {
635    if (!isImm()) return false;
636    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
637    if (!CE) return false;
638    int64_t Value = -CE->getValue();
639    // explicitly exclude zero. we want that to use the normal 0_508 version.
640    return ((Value & 3) == 0) && Value > 0 && Value <= 508;
641  }
642  bool isImm0_255() const {
643    if (!isImm()) return false;
644    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
645    if (!CE) return false;
646    int64_t Value = CE->getValue();
647    return Value >= 0 && Value < 256;
648  }
649  bool isImm0_4095() const {
650    if (!isImm()) return false;
651    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
652    if (!CE) return false;
653    int64_t Value = CE->getValue();
654    return Value >= 0 && Value < 4096;
655  }
656  bool isImm0_4095Neg() const {
657    if (!isImm()) return false;
658    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
659    if (!CE) return false;
660    int64_t Value = -CE->getValue();
661    return Value > 0 && Value < 4096;
662  }
663  bool isImm0_1() const {
664    if (!isImm()) return false;
665    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
666    if (!CE) return false;
667    int64_t Value = CE->getValue();
668    return Value >= 0 && Value < 2;
669  }
670  bool isImm0_3() const {
671    if (!isImm()) return false;
672    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
673    if (!CE) return false;
674    int64_t Value = CE->getValue();
675    return Value >= 0 && Value < 4;
676  }
677  bool isImm0_7() const {
678    if (!isImm()) return false;
679    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
680    if (!CE) return false;
681    int64_t Value = CE->getValue();
682    return Value >= 0 && Value < 8;
683  }
684  bool isImm0_15() const {
685    if (!isImm()) return false;
686    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
687    if (!CE) return false;
688    int64_t Value = CE->getValue();
689    return Value >= 0 && Value < 16;
690  }
691  bool isImm0_31() const {
692    if (!isImm()) return false;
693    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
694    if (!CE) return false;
695    int64_t Value = CE->getValue();
696    return Value >= 0 && Value < 32;
697  }
698  bool isImm0_63() const {
699    if (!isImm()) return false;
700    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
701    if (!CE) return false;
702    int64_t Value = CE->getValue();
703    return Value >= 0 && Value < 64;
704  }
705  bool isImm8() const {
706    if (!isImm()) return false;
707    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
708    if (!CE) return false;
709    int64_t Value = CE->getValue();
710    return Value == 8;
711  }
712  bool isImm16() const {
713    if (!isImm()) return false;
714    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
715    if (!CE) return false;
716    int64_t Value = CE->getValue();
717    return Value == 16;
718  }
719  bool isImm32() const {
720    if (!isImm()) return false;
721    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
722    if (!CE) return false;
723    int64_t Value = CE->getValue();
724    return Value == 32;
725  }
726  bool isShrImm8() const {
727    if (!isImm()) return false;
728    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
729    if (!CE) return false;
730    int64_t Value = CE->getValue();
731    return Value > 0 && Value <= 8;
732  }
733  bool isShrImm16() const {
734    if (!isImm()) return false;
735    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
736    if (!CE) return false;
737    int64_t Value = CE->getValue();
738    return Value > 0 && Value <= 16;
739  }
740  bool isShrImm32() const {
741    if (!isImm()) return false;
742    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
743    if (!CE) return false;
744    int64_t Value = CE->getValue();
745    return Value > 0 && Value <= 32;
746  }
747  bool isShrImm64() const {
748    if (!isImm()) return false;
749    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
750    if (!CE) return false;
751    int64_t Value = CE->getValue();
752    return Value > 0 && Value <= 64;
753  }
754  bool isImm1_7() const {
755    if (!isImm()) return false;
756    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
757    if (!CE) return false;
758    int64_t Value = CE->getValue();
759    return Value > 0 && Value < 8;
760  }
761  bool isImm1_15() const {
762    if (!isImm()) return false;
763    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
764    if (!CE) return false;
765    int64_t Value = CE->getValue();
766    return Value > 0 && Value < 16;
767  }
768  bool isImm1_31() const {
769    if (!isImm()) return false;
770    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
771    if (!CE) return false;
772    int64_t Value = CE->getValue();
773    return Value > 0 && Value < 32;
774  }
775  bool isImm1_16() const {
776    if (!isImm()) return false;
777    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
778    if (!CE) return false;
779    int64_t Value = CE->getValue();
780    return Value > 0 && Value < 17;
781  }
782  bool isImm1_32() const {
783    if (!isImm()) return false;
784    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
785    if (!CE) return false;
786    int64_t Value = CE->getValue();
787    return Value > 0 && Value < 33;
788  }
789  bool isImm0_32() const {
790    if (!isImm()) return false;
791    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
792    if (!CE) return false;
793    int64_t Value = CE->getValue();
794    return Value >= 0 && Value < 33;
795  }
796  bool isImm0_65535() const {
797    if (!isImm()) return false;
798    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
799    if (!CE) return false;
800    int64_t Value = CE->getValue();
801    return Value >= 0 && Value < 65536;
802  }
803  bool isImm0_65535Expr() const {
804    if (!isImm()) return false;
805    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
806    // If it's not a constant expression, it'll generate a fixup and be
807    // handled later.
808    if (!CE) return true;
809    int64_t Value = CE->getValue();
810    return Value >= 0 && Value < 65536;
811  }
812  bool isImm24bit() const {
813    if (!isImm()) return false;
814    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
815    if (!CE) return false;
816    int64_t Value = CE->getValue();
817    return Value >= 0 && Value <= 0xffffff;
818  }
819  bool isImmThumbSR() const {
820    if (!isImm()) return false;
821    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
822    if (!CE) return false;
823    int64_t Value = CE->getValue();
824    return Value > 0 && Value < 33;
825  }
826  bool isPKHLSLImm() const {
827    if (!isImm()) return false;
828    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
829    if (!CE) return false;
830    int64_t Value = CE->getValue();
831    return Value >= 0 && Value < 32;
832  }
833  bool isPKHASRImm() const {
834    if (!isImm()) return false;
835    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
836    if (!CE) return false;
837    int64_t Value = CE->getValue();
838    return Value > 0 && Value <= 32;
839  }
840  bool isAdrLabel() const {
841    // If we have an immediate that's not a constant, treat it as a label
842    // reference needing a fixup. If it is a constant, but it can't fit
843    // into shift immediate encoding, we reject it.
844    if (isImm() && !isa<MCConstantExpr>(getImm())) return true;
845    else return (isARMSOImm() || isARMSOImmNeg());
846  }
847  bool isARMSOImm() const {
848    if (!isImm()) return false;
849    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
850    if (!CE) return false;
851    int64_t Value = CE->getValue();
852    return ARM_AM::getSOImmVal(Value) != -1;
853  }
854  bool isARMSOImmNot() const {
855    if (!isImm()) return false;
856    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
857    if (!CE) return false;
858    int64_t Value = CE->getValue();
859    return ARM_AM::getSOImmVal(~Value) != -1;
860  }
861  bool isARMSOImmNeg() const {
862    if (!isImm()) return false;
863    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
864    if (!CE) return false;
865    int64_t Value = CE->getValue();
866    // Only use this when not representable as a plain so_imm.
867    return ARM_AM::getSOImmVal(Value) == -1 &&
868      ARM_AM::getSOImmVal(-Value) != -1;
869  }
870  bool isT2SOImm() const {
871    if (!isImm()) return false;
872    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
873    if (!CE) return false;
874    int64_t Value = CE->getValue();
875    return ARM_AM::getT2SOImmVal(Value) != -1;
876  }
877  bool isT2SOImmNot() const {
878    if (!isImm()) return false;
879    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
880    if (!CE) return false;
881    int64_t Value = CE->getValue();
882    return ARM_AM::getT2SOImmVal(~Value) != -1;
883  }
884  bool isT2SOImmNeg() const {
885    if (!isImm()) return false;
886    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
887    if (!CE) return false;
888    int64_t Value = CE->getValue();
889    // Only use this when not representable as a plain so_imm.
890    return ARM_AM::getT2SOImmVal(Value) == -1 &&
891      ARM_AM::getT2SOImmVal(-Value) != -1;
892  }
893  bool isSetEndImm() const {
894    if (!isImm()) return false;
895    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
896    if (!CE) return false;
897    int64_t Value = CE->getValue();
898    return Value == 1 || Value == 0;
899  }
900  bool isReg() const { return Kind == k_Register; }
901  bool isRegList() const { return Kind == k_RegisterList; }
902  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
903  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
904  bool isToken() const { return Kind == k_Token; }
905  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
906  bool isMem() const { return Kind == k_Memory; }
907  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
908  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
909  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
910  bool isRotImm() const { return Kind == k_RotateImmediate; }
911  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
912  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
913  bool isPostIdxReg() const {
914    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
915  }
916  bool isMemNoOffset(bool alignOK = false) const {
917    if (!isMem())
918      return false;
919    // No offset of any kind.
920    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
921     (alignOK || Memory.Alignment == 0);
922  }
923  bool isMemPCRelImm12() const {
924    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
925      return false;
926    // Base register must be PC.
927    if (Memory.BaseRegNum != ARM::PC)
928      return false;
929    // Immediate offset in range [-4095, 4095].
930    if (!Memory.OffsetImm) return true;
931    int64_t Val = Memory.OffsetImm->getValue();
932    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
933  }
934  bool isAlignedMemory() const {
935    return isMemNoOffset(true);
936  }
937  bool isAddrMode2() const {
938    if (!isMem() || Memory.Alignment != 0) return false;
939    // Check for register offset.
940    if (Memory.OffsetRegNum) return true;
941    // Immediate offset in range [-4095, 4095].
942    if (!Memory.OffsetImm) return true;
943    int64_t Val = Memory.OffsetImm->getValue();
944    return Val > -4096 && Val < 4096;
945  }
946  bool isAM2OffsetImm() const {
947    if (!isImm()) return false;
948    // Immediate offset in range [-4095, 4095].
949    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
950    if (!CE) return false;
951    int64_t Val = CE->getValue();
952    return Val > -4096 && Val < 4096;
953  }
954  bool isAddrMode3() const {
955    // If we have an immediate that's not a constant, treat it as a label
956    // reference needing a fixup. If it is a constant, it's something else
957    // and we reject it.
958    if (isImm() && !isa<MCConstantExpr>(getImm()))
959      return true;
960    if (!isMem() || Memory.Alignment != 0) return false;
961    // No shifts are legal for AM3.
962    if (Memory.ShiftType != ARM_AM::no_shift) return false;
963    // Check for register offset.
964    if (Memory.OffsetRegNum) return true;
965    // Immediate offset in range [-255, 255].
966    if (!Memory.OffsetImm) return true;
967    int64_t Val = Memory.OffsetImm->getValue();
968    // The #-0 offset is encoded as INT32_MIN, and we have to check
969    // for this too.
970    return (Val > -256 && Val < 256) || Val == INT32_MIN;
971  }
972  bool isAM3Offset() const {
973    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
974      return false;
975    if (Kind == k_PostIndexRegister)
976      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
977    // Immediate offset in range [-255, 255].
978    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
979    if (!CE) return false;
980    int64_t Val = CE->getValue();
981    // Special case, #-0 is INT32_MIN.
982    return (Val > -256 && Val < 256) || Val == INT32_MIN;
983  }
984  bool isAddrMode5() const {
985    // If we have an immediate that's not a constant, treat it as a label
986    // reference needing a fixup. If it is a constant, it's something else
987    // and we reject it.
988    if (isImm() && !isa<MCConstantExpr>(getImm()))
989      return true;
990    if (!isMem() || Memory.Alignment != 0) return false;
991    // Check for register offset.
992    if (Memory.OffsetRegNum) return false;
993    // Immediate offset in range [-1020, 1020] and a multiple of 4.
994    if (!Memory.OffsetImm) return true;
995    int64_t Val = Memory.OffsetImm->getValue();
996    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
997      Val == INT32_MIN;
998  }
999  bool isMemTBB() const {
1000    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1001        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1002      return false;
1003    return true;
1004  }
1005  bool isMemTBH() const {
1006    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1007        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1008        Memory.Alignment != 0 )
1009      return false;
1010    return true;
1011  }
1012  bool isMemRegOffset() const {
1013    if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1014      return false;
1015    return true;
1016  }
1017  bool isT2MemRegOffset() const {
1018    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1019        Memory.Alignment != 0)
1020      return false;
1021    // Only lsl #{0, 1, 2, 3} allowed.
1022    if (Memory.ShiftType == ARM_AM::no_shift)
1023      return true;
1024    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1025      return false;
1026    return true;
1027  }
1028  bool isMemThumbRR() const {
1029    // Thumb reg+reg addressing is simple. Just two registers, a base and
1030    // an offset. No shifts, negations or any other complicating factors.
1031    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1032        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1033      return false;
1034    return isARMLowRegister(Memory.BaseRegNum) &&
1035      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1036  }
1037  bool isMemThumbRIs4() const {
1038    if (!isMem() || Memory.OffsetRegNum != 0 ||
1039        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1040      return false;
1041    // Immediate offset, multiple of 4 in range [0, 124].
1042    if (!Memory.OffsetImm) return true;
1043    int64_t Val = Memory.OffsetImm->getValue();
1044    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1045  }
1046  bool isMemThumbRIs2() const {
1047    if (!isMem() || Memory.OffsetRegNum != 0 ||
1048        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1049      return false;
1050    // Immediate offset, multiple of 4 in range [0, 62].
1051    if (!Memory.OffsetImm) return true;
1052    int64_t Val = Memory.OffsetImm->getValue();
1053    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1054  }
1055  bool isMemThumbRIs1() const {
1056    if (!isMem() || Memory.OffsetRegNum != 0 ||
1057        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1058      return false;
1059    // Immediate offset in range [0, 31].
1060    if (!Memory.OffsetImm) return true;
1061    int64_t Val = Memory.OffsetImm->getValue();
1062    return Val >= 0 && Val <= 31;
1063  }
1064  bool isMemThumbSPI() const {
1065    if (!isMem() || Memory.OffsetRegNum != 0 ||
1066        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1067      return false;
1068    // Immediate offset, multiple of 4 in range [0, 1020].
1069    if (!Memory.OffsetImm) return true;
1070    int64_t Val = Memory.OffsetImm->getValue();
1071    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1072  }
1073  bool isMemImm8s4Offset() const {
1074    // If we have an immediate that's not a constant, treat it as a label
1075    // reference needing a fixup. If it is a constant, it's something else
1076    // and we reject it.
1077    if (isImm() && !isa<MCConstantExpr>(getImm()))
1078      return true;
1079    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1080      return false;
1081    // Immediate offset a multiple of 4 in range [-1020, 1020].
1082    if (!Memory.OffsetImm) return true;
1083    int64_t Val = Memory.OffsetImm->getValue();
1084    // Special case, #-0 is INT32_MIN.
1085    return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
1086  }
1087  bool isMemImm0_1020s4Offset() const {
1088    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1089      return false;
1090    // Immediate offset a multiple of 4 in range [0, 1020].
1091    if (!Memory.OffsetImm) return true;
1092    int64_t Val = Memory.OffsetImm->getValue();
1093    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1094  }
1095  bool isMemImm8Offset() const {
1096    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1097      return false;
1098    // Base reg of PC isn't allowed for these encodings.
1099    if (Memory.BaseRegNum == ARM::PC) return false;
1100    // Immediate offset in range [-255, 255].
1101    if (!Memory.OffsetImm) return true;
1102    int64_t Val = Memory.OffsetImm->getValue();
1103    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1104  }
1105  bool isMemPosImm8Offset() const {
1106    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1107      return false;
1108    // Immediate offset in range [0, 255].
1109    if (!Memory.OffsetImm) return true;
1110    int64_t Val = Memory.OffsetImm->getValue();
1111    return Val >= 0 && Val < 256;
1112  }
1113  bool isMemNegImm8Offset() const {
1114    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1115      return false;
1116    // Base reg of PC isn't allowed for these encodings.
1117    if (Memory.BaseRegNum == ARM::PC) return false;
1118    // Immediate offset in range [-255, -1].
1119    if (!Memory.OffsetImm) return false;
1120    int64_t Val = Memory.OffsetImm->getValue();
1121    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1122  }
1123  bool isMemUImm12Offset() const {
1124    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1125      return false;
1126    // Immediate offset in range [0, 4095].
1127    if (!Memory.OffsetImm) return true;
1128    int64_t Val = Memory.OffsetImm->getValue();
1129    return (Val >= 0 && Val < 4096);
1130  }
1131  bool isMemImm12Offset() const {
1132    // If we have an immediate that's not a constant, treat it as a label
1133    // reference needing a fixup. If it is a constant, it's something else
1134    // and we reject it.
1135    if (isImm() && !isa<MCConstantExpr>(getImm()))
1136      return true;
1137
1138    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1139      return false;
1140    // Immediate offset in range [-4095, 4095].
1141    if (!Memory.OffsetImm) return true;
1142    int64_t Val = Memory.OffsetImm->getValue();
1143    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1144  }
1145  bool isPostIdxImm8() const {
1146    if (!isImm()) return false;
1147    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1148    if (!CE) return false;
1149    int64_t Val = CE->getValue();
1150    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1151  }
1152  bool isPostIdxImm8s4() const {
1153    if (!isImm()) return false;
1154    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1155    if (!CE) return false;
1156    int64_t Val = CE->getValue();
1157    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1158      (Val == INT32_MIN);
1159  }
1160
1161  bool isMSRMask() const { return Kind == k_MSRMask; }
1162  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1163
1164  // NEON operands.
1165  bool isSingleSpacedVectorList() const {
1166    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1167  }
1168  bool isDoubleSpacedVectorList() const {
1169    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1170  }
1171  bool isVecListOneD() const {
1172    if (!isSingleSpacedVectorList()) return false;
1173    return VectorList.Count == 1;
1174  }
1175
1176  bool isVecListDPair() const {
1177    if (!isSingleSpacedVectorList()) return false;
1178    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1179              .contains(VectorList.RegNum));
1180  }
1181
1182  bool isVecListThreeD() const {
1183    if (!isSingleSpacedVectorList()) return false;
1184    return VectorList.Count == 3;
1185  }
1186
1187  bool isVecListFourD() const {
1188    if (!isSingleSpacedVectorList()) return false;
1189    return VectorList.Count == 4;
1190  }
1191
1192  bool isVecListDPairSpaced() const {
1193    if (isSingleSpacedVectorList()) return false;
1194    return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1195              .contains(VectorList.RegNum));
1196  }
1197
1198  bool isVecListThreeQ() const {
1199    if (!isDoubleSpacedVectorList()) return false;
1200    return VectorList.Count == 3;
1201  }
1202
1203  bool isVecListFourQ() const {
1204    if (!isDoubleSpacedVectorList()) return false;
1205    return VectorList.Count == 4;
1206  }
1207
1208  bool isSingleSpacedVectorAllLanes() const {
1209    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1210  }
1211  bool isDoubleSpacedVectorAllLanes() const {
1212    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1213  }
1214  bool isVecListOneDAllLanes() const {
1215    if (!isSingleSpacedVectorAllLanes()) return false;
1216    return VectorList.Count == 1;
1217  }
1218
1219  bool isVecListDPairAllLanes() const {
1220    if (!isSingleSpacedVectorAllLanes()) return false;
1221    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1222              .contains(VectorList.RegNum));
1223  }
1224
1225  bool isVecListDPairSpacedAllLanes() const {
1226    if (!isDoubleSpacedVectorAllLanes()) return false;
1227    return VectorList.Count == 2;
1228  }
1229
1230  bool isVecListThreeDAllLanes() const {
1231    if (!isSingleSpacedVectorAllLanes()) return false;
1232    return VectorList.Count == 3;
1233  }
1234
1235  bool isVecListThreeQAllLanes() const {
1236    if (!isDoubleSpacedVectorAllLanes()) return false;
1237    return VectorList.Count == 3;
1238  }
1239
1240  bool isVecListFourDAllLanes() const {
1241    if (!isSingleSpacedVectorAllLanes()) return false;
1242    return VectorList.Count == 4;
1243  }
1244
1245  bool isVecListFourQAllLanes() const {
1246    if (!isDoubleSpacedVectorAllLanes()) return false;
1247    return VectorList.Count == 4;
1248  }
1249
1250  bool isSingleSpacedVectorIndexed() const {
1251    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1252  }
1253  bool isDoubleSpacedVectorIndexed() const {
1254    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1255  }
1256  bool isVecListOneDByteIndexed() const {
1257    if (!isSingleSpacedVectorIndexed()) return false;
1258    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1259  }
1260
1261  bool isVecListOneDHWordIndexed() const {
1262    if (!isSingleSpacedVectorIndexed()) return false;
1263    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1264  }
1265
1266  bool isVecListOneDWordIndexed() const {
1267    if (!isSingleSpacedVectorIndexed()) return false;
1268    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1269  }
1270
1271  bool isVecListTwoDByteIndexed() const {
1272    if (!isSingleSpacedVectorIndexed()) return false;
1273    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1274  }
1275
1276  bool isVecListTwoDHWordIndexed() const {
1277    if (!isSingleSpacedVectorIndexed()) return false;
1278    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1279  }
1280
1281  bool isVecListTwoQWordIndexed() const {
1282    if (!isDoubleSpacedVectorIndexed()) return false;
1283    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1284  }
1285
1286  bool isVecListTwoQHWordIndexed() const {
1287    if (!isDoubleSpacedVectorIndexed()) return false;
1288    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1289  }
1290
1291  bool isVecListTwoDWordIndexed() const {
1292    if (!isSingleSpacedVectorIndexed()) return false;
1293    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1294  }
1295
1296  bool isVecListThreeDByteIndexed() const {
1297    if (!isSingleSpacedVectorIndexed()) return false;
1298    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1299  }
1300
1301  bool isVecListThreeDHWordIndexed() const {
1302    if (!isSingleSpacedVectorIndexed()) return false;
1303    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1304  }
1305
1306  bool isVecListThreeQWordIndexed() const {
1307    if (!isDoubleSpacedVectorIndexed()) return false;
1308    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1309  }
1310
1311  bool isVecListThreeQHWordIndexed() const {
1312    if (!isDoubleSpacedVectorIndexed()) return false;
1313    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1314  }
1315
1316  bool isVecListThreeDWordIndexed() const {
1317    if (!isSingleSpacedVectorIndexed()) return false;
1318    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1319  }
1320
1321  bool isVecListFourDByteIndexed() const {
1322    if (!isSingleSpacedVectorIndexed()) return false;
1323    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1324  }
1325
1326  bool isVecListFourDHWordIndexed() const {
1327    if (!isSingleSpacedVectorIndexed()) return false;
1328    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1329  }
1330
1331  bool isVecListFourQWordIndexed() const {
1332    if (!isDoubleSpacedVectorIndexed()) return false;
1333    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1334  }
1335
1336  bool isVecListFourQHWordIndexed() const {
1337    if (!isDoubleSpacedVectorIndexed()) return false;
1338    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1339  }
1340
1341  bool isVecListFourDWordIndexed() const {
1342    if (!isSingleSpacedVectorIndexed()) return false;
1343    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1344  }
1345
1346  bool isVectorIndex8() const {
1347    if (Kind != k_VectorIndex) return false;
1348    return VectorIndex.Val < 8;
1349  }
1350  bool isVectorIndex16() const {
1351    if (Kind != k_VectorIndex) return false;
1352    return VectorIndex.Val < 4;
1353  }
1354  bool isVectorIndex32() const {
1355    if (Kind != k_VectorIndex) return false;
1356    return VectorIndex.Val < 2;
1357  }
1358
1359  bool isNEONi8splat() const {
1360    if (!isImm()) return false;
1361    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1362    // Must be a constant.
1363    if (!CE) return false;
1364    int64_t Value = CE->getValue();
1365    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1366    // value.
1367    return Value >= 0 && Value < 256;
1368  }
1369
1370  bool isNEONi16splat() const {
1371    if (!isImm()) return false;
1372    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1373    // Must be a constant.
1374    if (!CE) return false;
1375    int64_t Value = CE->getValue();
1376    // i16 value in the range [0,255] or [0x0100, 0xff00]
1377    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1378  }
1379
1380  bool isNEONi32splat() const {
1381    if (!isImm()) return false;
1382    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1383    // Must be a constant.
1384    if (!CE) return false;
1385    int64_t Value = CE->getValue();
1386    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1387    return (Value >= 0 && Value < 256) ||
1388      (Value >= 0x0100 && Value <= 0xff00) ||
1389      (Value >= 0x010000 && Value <= 0xff0000) ||
1390      (Value >= 0x01000000 && Value <= 0xff000000);
1391  }
1392
1393  bool isNEONi32vmov() const {
1394    if (!isImm()) return false;
1395    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1396    // Must be a constant.
1397    if (!CE) return false;
1398    int64_t Value = CE->getValue();
1399    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1400    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1401    return (Value >= 0 && Value < 256) ||
1402      (Value >= 0x0100 && Value <= 0xff00) ||
1403      (Value >= 0x010000 && Value <= 0xff0000) ||
1404      (Value >= 0x01000000 && Value <= 0xff000000) ||
1405      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1406      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1407  }
1408  bool isNEONi32vmovNeg() const {
1409    if (!isImm()) return false;
1410    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1411    // Must be a constant.
1412    if (!CE) return false;
1413    int64_t Value = ~CE->getValue();
1414    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1415    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1416    return (Value >= 0 && Value < 256) ||
1417      (Value >= 0x0100 && Value <= 0xff00) ||
1418      (Value >= 0x010000 && Value <= 0xff0000) ||
1419      (Value >= 0x01000000 && Value <= 0xff000000) ||
1420      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1421      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1422  }
1423
1424  bool isNEONi64splat() const {
1425    if (!isImm()) return false;
1426    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1427    // Must be a constant.
1428    if (!CE) return false;
1429    uint64_t Value = CE->getValue();
1430    // i64 value with each byte being either 0 or 0xff.
1431    for (unsigned i = 0; i < 8; ++i)
1432      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1433    return true;
1434  }
1435
1436  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1437    // Add as immediates when possible.  Null MCExpr = 0.
1438    if (Expr == 0)
1439      Inst.addOperand(MCOperand::CreateImm(0));
1440    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1441      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1442    else
1443      Inst.addOperand(MCOperand::CreateExpr(Expr));
1444  }
1445
1446  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1447    assert(N == 2 && "Invalid number of operands!");
1448    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1449    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1450    Inst.addOperand(MCOperand::CreateReg(RegNum));
1451  }
1452
1453  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1454    assert(N == 1 && "Invalid number of operands!");
1455    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1456  }
1457
1458  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1459    assert(N == 1 && "Invalid number of operands!");
1460    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1461  }
1462
1463  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1464    assert(N == 1 && "Invalid number of operands!");
1465    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1466  }
1467
1468  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1469    assert(N == 1 && "Invalid number of operands!");
1470    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1471  }
1472
1473  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1474    assert(N == 1 && "Invalid number of operands!");
1475    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1476  }
1477
1478  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1479    assert(N == 1 && "Invalid number of operands!");
1480    Inst.addOperand(MCOperand::CreateReg(getReg()));
1481  }
1482
1483  void addRegOperands(MCInst &Inst, unsigned N) const {
1484    assert(N == 1 && "Invalid number of operands!");
1485    Inst.addOperand(MCOperand::CreateReg(getReg()));
1486  }
1487
1488  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1489    assert(N == 3 && "Invalid number of operands!");
1490    assert(isRegShiftedReg() &&
1491           "addRegShiftedRegOperands() on non RegShiftedReg!");
1492    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1493    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1494    Inst.addOperand(MCOperand::CreateImm(
1495      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1496  }
1497
1498  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1499    assert(N == 2 && "Invalid number of operands!");
1500    assert(isRegShiftedImm() &&
1501           "addRegShiftedImmOperands() on non RegShiftedImm!");
1502    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1503    // Shift of #32 is encoded as 0 where permitted
1504    unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
1505    Inst.addOperand(MCOperand::CreateImm(
1506      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
1507  }
1508
1509  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1510    assert(N == 1 && "Invalid number of operands!");
1511    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1512                                         ShifterImm.Imm));
1513  }
1514
1515  void addRegListOperands(MCInst &Inst, unsigned N) const {
1516    assert(N == 1 && "Invalid number of operands!");
1517    const SmallVectorImpl<unsigned> &RegList = getRegList();
1518    for (SmallVectorImpl<unsigned>::const_iterator
1519           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1520      Inst.addOperand(MCOperand::CreateReg(*I));
1521  }
1522
1523  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1524    addRegListOperands(Inst, N);
1525  }
1526
1527  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1528    addRegListOperands(Inst, N);
1529  }
1530
1531  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1532    assert(N == 1 && "Invalid number of operands!");
1533    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1534    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1535  }
1536
1537  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1538    assert(N == 1 && "Invalid number of operands!");
1539    // Munge the lsb/width into a bitfield mask.
1540    unsigned lsb = Bitfield.LSB;
1541    unsigned width = Bitfield.Width;
1542    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1543    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1544                      (32 - (lsb + width)));
1545    Inst.addOperand(MCOperand::CreateImm(Mask));
1546  }
1547
1548  void addImmOperands(MCInst &Inst, unsigned N) const {
1549    assert(N == 1 && "Invalid number of operands!");
1550    addExpr(Inst, getImm());
1551  }
1552
1553  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1554    assert(N == 1 && "Invalid number of operands!");
1555    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1556    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1557  }
1558
1559  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1560    assert(N == 1 && "Invalid number of operands!");
1561    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1562    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1563  }
1564
1565  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1566    assert(N == 1 && "Invalid number of operands!");
1567    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1568    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1569    Inst.addOperand(MCOperand::CreateImm(Val));
1570  }
1571
1572  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1573    assert(N == 1 && "Invalid number of operands!");
1574    // FIXME: We really want to scale the value here, but the LDRD/STRD
1575    // instruction don't encode operands that way yet.
1576    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1577    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1578  }
1579
1580  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1581    assert(N == 1 && "Invalid number of operands!");
1582    // The immediate is scaled by four in the encoding and is stored
1583    // in the MCInst as such. Lop off the low two bits here.
1584    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1585    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1586  }
1587
1588  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1589    assert(N == 1 && "Invalid number of operands!");
1590    // The immediate is scaled by four in the encoding and is stored
1591    // in the MCInst as such. Lop off the low two bits here.
1592    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1593    Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
1594  }
1595
1596  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1597    assert(N == 1 && "Invalid number of operands!");
1598    // The immediate is scaled by four in the encoding and is stored
1599    // in the MCInst as such. Lop off the low two bits here.
1600    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1601    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1602  }
1603
1604  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1605    assert(N == 1 && "Invalid number of operands!");
1606    // The constant encodes as the immediate-1, and we store in the instruction
1607    // the bits as encoded, so subtract off one here.
1608    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1609    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1610  }
1611
1612  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1613    assert(N == 1 && "Invalid number of operands!");
1614    // The constant encodes as the immediate-1, and we store in the instruction
1615    // the bits as encoded, so subtract off one here.
1616    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1617    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1618  }
1619
1620  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1621    assert(N == 1 && "Invalid number of operands!");
1622    // The constant encodes as the immediate, except for 32, which encodes as
1623    // zero.
1624    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1625    unsigned Imm = CE->getValue();
1626    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1627  }
1628
1629  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1630    assert(N == 1 && "Invalid number of operands!");
1631    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1632    // the instruction as well.
1633    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1634    int Val = CE->getValue();
1635    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1636  }
1637
1638  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1639    assert(N == 1 && "Invalid number of operands!");
1640    // The operand is actually a t2_so_imm, but we have its bitwise
1641    // negation in the assembly source, so twiddle it here.
1642    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1643    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1644  }
1645
1646  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1647    assert(N == 1 && "Invalid number of operands!");
1648    // The operand is actually a t2_so_imm, but we have its
1649    // negation in the assembly source, so twiddle it here.
1650    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1651    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1652  }
1653
1654  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1655    assert(N == 1 && "Invalid number of operands!");
1656    // The operand is actually an imm0_4095, but we have its
1657    // negation in the assembly source, so twiddle it here.
1658    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1659    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1660  }
1661
1662  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1663    assert(N == 1 && "Invalid number of operands!");
1664    // The operand is actually a so_imm, but we have its bitwise
1665    // negation in the assembly source, so twiddle it here.
1666    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1667    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1668  }
1669
1670  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1671    assert(N == 1 && "Invalid number of operands!");
1672    // The operand is actually a so_imm, but we have its
1673    // negation in the assembly source, so twiddle it here.
1674    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1675    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1676  }
1677
1678  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1679    assert(N == 1 && "Invalid number of operands!");
1680    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1681  }
1682
1683  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1684    assert(N == 1 && "Invalid number of operands!");
1685    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1686  }
1687
1688  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1689    assert(N == 1 && "Invalid number of operands!");
1690    int32_t Imm = Memory.OffsetImm->getValue();
1691    // FIXME: Handle #-0
1692    if (Imm == INT32_MIN) Imm = 0;
1693    Inst.addOperand(MCOperand::CreateImm(Imm));
1694  }
1695
1696  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1697    assert(N == 1 && "Invalid number of operands!");
1698    assert(isImm() && "Not an immediate!");
1699
1700    // If we have an immediate that's not a constant, treat it as a label
1701    // reference needing a fixup.
1702    if (!isa<MCConstantExpr>(getImm())) {
1703      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1704      return;
1705    }
1706
1707    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1708    int Val = CE->getValue();
1709    Inst.addOperand(MCOperand::CreateImm(Val));
1710  }
1711
1712  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1713    assert(N == 2 && "Invalid number of operands!");
1714    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1715    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1716  }
1717
1718  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1719    assert(N == 3 && "Invalid number of operands!");
1720    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1721    if (!Memory.OffsetRegNum) {
1722      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1723      // Special case for #-0
1724      if (Val == INT32_MIN) Val = 0;
1725      if (Val < 0) Val = -Val;
1726      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1727    } else {
1728      // For register offset, we encode the shift type and negation flag
1729      // here.
1730      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1731                              Memory.ShiftImm, Memory.ShiftType);
1732    }
1733    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1734    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1735    Inst.addOperand(MCOperand::CreateImm(Val));
1736  }
1737
1738  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1739    assert(N == 2 && "Invalid number of operands!");
1740    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1741    assert(CE && "non-constant AM2OffsetImm operand!");
1742    int32_t Val = CE->getValue();
1743    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1744    // Special case for #-0
1745    if (Val == INT32_MIN) Val = 0;
1746    if (Val < 0) Val = -Val;
1747    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1748    Inst.addOperand(MCOperand::CreateReg(0));
1749    Inst.addOperand(MCOperand::CreateImm(Val));
1750  }
1751
1752  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1753    assert(N == 3 && "Invalid number of operands!");
1754    // If we have an immediate that's not a constant, treat it as a label
1755    // reference needing a fixup. If it is a constant, it's something else
1756    // and we reject it.
1757    if (isImm()) {
1758      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1759      Inst.addOperand(MCOperand::CreateReg(0));
1760      Inst.addOperand(MCOperand::CreateImm(0));
1761      return;
1762    }
1763
1764    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1765    if (!Memory.OffsetRegNum) {
1766      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1767      // Special case for #-0
1768      if (Val == INT32_MIN) Val = 0;
1769      if (Val < 0) Val = -Val;
1770      Val = ARM_AM::getAM3Opc(AddSub, Val);
1771    } else {
1772      // For register offset, we encode the shift type and negation flag
1773      // here.
1774      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1775    }
1776    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1777    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1778    Inst.addOperand(MCOperand::CreateImm(Val));
1779  }
1780
1781  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1782    assert(N == 2 && "Invalid number of operands!");
1783    if (Kind == k_PostIndexRegister) {
1784      int32_t Val =
1785        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1786      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1787      Inst.addOperand(MCOperand::CreateImm(Val));
1788      return;
1789    }
1790
1791    // Constant offset.
1792    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1793    int32_t Val = CE->getValue();
1794    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1795    // Special case for #-0
1796    if (Val == INT32_MIN) Val = 0;
1797    if (Val < 0) Val = -Val;
1798    Val = ARM_AM::getAM3Opc(AddSub, Val);
1799    Inst.addOperand(MCOperand::CreateReg(0));
1800    Inst.addOperand(MCOperand::CreateImm(Val));
1801  }
1802
1803  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1804    assert(N == 2 && "Invalid number of operands!");
1805    // If we have an immediate that's not a constant, treat it as a label
1806    // reference needing a fixup. If it is a constant, it's something else
1807    // and we reject it.
1808    if (isImm()) {
1809      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1810      Inst.addOperand(MCOperand::CreateImm(0));
1811      return;
1812    }
1813
1814    // The lower two bits are always zero and as such are not encoded.
1815    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1816    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1817    // Special case for #-0
1818    if (Val == INT32_MIN) Val = 0;
1819    if (Val < 0) Val = -Val;
1820    Val = ARM_AM::getAM5Opc(AddSub, Val);
1821    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1822    Inst.addOperand(MCOperand::CreateImm(Val));
1823  }
1824
1825  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1826    assert(N == 2 && "Invalid number of operands!");
1827    // If we have an immediate that's not a constant, treat it as a label
1828    // reference needing a fixup. If it is a constant, it's something else
1829    // and we reject it.
1830    if (isImm()) {
1831      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1832      Inst.addOperand(MCOperand::CreateImm(0));
1833      return;
1834    }
1835
1836    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1837    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1838    Inst.addOperand(MCOperand::CreateImm(Val));
1839  }
1840
1841  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1842    assert(N == 2 && "Invalid number of operands!");
1843    // The lower two bits are always zero and as such are not encoded.
1844    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1845    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1846    Inst.addOperand(MCOperand::CreateImm(Val));
1847  }
1848
1849  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1850    assert(N == 2 && "Invalid number of operands!");
1851    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1852    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1853    Inst.addOperand(MCOperand::CreateImm(Val));
1854  }
1855
1856  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1857    addMemImm8OffsetOperands(Inst, N);
1858  }
1859
1860  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1861    addMemImm8OffsetOperands(Inst, N);
1862  }
1863
1864  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1865    assert(N == 2 && "Invalid number of operands!");
1866    // If this is an immediate, it's a label reference.
1867    if (isImm()) {
1868      addExpr(Inst, getImm());
1869      Inst.addOperand(MCOperand::CreateImm(0));
1870      return;
1871    }
1872
1873    // Otherwise, it's a normal memory reg+offset.
1874    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1875    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1876    Inst.addOperand(MCOperand::CreateImm(Val));
1877  }
1878
1879  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1880    assert(N == 2 && "Invalid number of operands!");
1881    // If this is an immediate, it's a label reference.
1882    if (isImm()) {
1883      addExpr(Inst, getImm());
1884      Inst.addOperand(MCOperand::CreateImm(0));
1885      return;
1886    }
1887
1888    // Otherwise, it's a normal memory reg+offset.
1889    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1890    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1891    Inst.addOperand(MCOperand::CreateImm(Val));
1892  }
1893
1894  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1895    assert(N == 2 && "Invalid number of operands!");
1896    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1897    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1898  }
1899
1900  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1901    assert(N == 2 && "Invalid number of operands!");
1902    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1903    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1904  }
1905
1906  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1907    assert(N == 3 && "Invalid number of operands!");
1908    unsigned Val =
1909      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1910                        Memory.ShiftImm, Memory.ShiftType);
1911    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1912    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1913    Inst.addOperand(MCOperand::CreateImm(Val));
1914  }
1915
1916  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1917    assert(N == 3 && "Invalid number of operands!");
1918    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1919    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1920    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1921  }
1922
1923  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1924    assert(N == 2 && "Invalid number of operands!");
1925    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1926    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1927  }
1928
1929  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1930    assert(N == 2 && "Invalid number of operands!");
1931    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1932    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1933    Inst.addOperand(MCOperand::CreateImm(Val));
1934  }
1935
1936  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1937    assert(N == 2 && "Invalid number of operands!");
1938    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1939    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1940    Inst.addOperand(MCOperand::CreateImm(Val));
1941  }
1942
1943  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1944    assert(N == 2 && "Invalid number of operands!");
1945    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1946    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1947    Inst.addOperand(MCOperand::CreateImm(Val));
1948  }
1949
1950  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1951    assert(N == 2 && "Invalid number of operands!");
1952    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1953    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1954    Inst.addOperand(MCOperand::CreateImm(Val));
1955  }
1956
1957  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1958    assert(N == 1 && "Invalid number of operands!");
1959    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1960    assert(CE && "non-constant post-idx-imm8 operand!");
1961    int Imm = CE->getValue();
1962    bool isAdd = Imm >= 0;
1963    if (Imm == INT32_MIN) Imm = 0;
1964    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1965    Inst.addOperand(MCOperand::CreateImm(Imm));
1966  }
1967
1968  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1969    assert(N == 1 && "Invalid number of operands!");
1970    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1971    assert(CE && "non-constant post-idx-imm8s4 operand!");
1972    int Imm = CE->getValue();
1973    bool isAdd = Imm >= 0;
1974    if (Imm == INT32_MIN) Imm = 0;
1975    // Immediate is scaled by 4.
1976    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1977    Inst.addOperand(MCOperand::CreateImm(Imm));
1978  }
1979
1980  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1981    assert(N == 2 && "Invalid number of operands!");
1982    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1983    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1984  }
1985
1986  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1987    assert(N == 2 && "Invalid number of operands!");
1988    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1989    // The sign, shift type, and shift amount are encoded in a single operand
1990    // using the AM2 encoding helpers.
1991    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1992    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1993                                     PostIdxReg.ShiftTy);
1994    Inst.addOperand(MCOperand::CreateImm(Imm));
1995  }
1996
1997  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1998    assert(N == 1 && "Invalid number of operands!");
1999    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
2000  }
2001
2002  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2003    assert(N == 1 && "Invalid number of operands!");
2004    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
2005  }
2006
2007  void addVecListOperands(MCInst &Inst, unsigned N) const {
2008    assert(N == 1 && "Invalid number of operands!");
2009    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
2010  }
2011
2012  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2013    assert(N == 2 && "Invalid number of operands!");
2014    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
2015    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
2016  }
2017
2018  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2019    assert(N == 1 && "Invalid number of operands!");
2020    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2021  }
2022
2023  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2024    assert(N == 1 && "Invalid number of operands!");
2025    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2026  }
2027
2028  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2029    assert(N == 1 && "Invalid number of operands!");
2030    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2031  }
2032
2033  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2034    assert(N == 1 && "Invalid number of operands!");
2035    // The immediate encodes the type of constant as well as the value.
2036    // Mask in that this is an i8 splat.
2037    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2038    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
2039  }
2040
2041  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2042    assert(N == 1 && "Invalid number of operands!");
2043    // The immediate encodes the type of constant as well as the value.
2044    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2045    unsigned Value = CE->getValue();
2046    if (Value >= 256)
2047      Value = (Value >> 8) | 0xa00;
2048    else
2049      Value |= 0x800;
2050    Inst.addOperand(MCOperand::CreateImm(Value));
2051  }
2052
2053  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2054    assert(N == 1 && "Invalid number of operands!");
2055    // The immediate encodes the type of constant as well as the value.
2056    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2057    unsigned Value = CE->getValue();
2058    if (Value >= 256 && Value <= 0xff00)
2059      Value = (Value >> 8) | 0x200;
2060    else if (Value > 0xffff && Value <= 0xff0000)
2061      Value = (Value >> 16) | 0x400;
2062    else if (Value > 0xffffff)
2063      Value = (Value >> 24) | 0x600;
2064    Inst.addOperand(MCOperand::CreateImm(Value));
2065  }
2066
2067  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2068    assert(N == 1 && "Invalid number of operands!");
2069    // The immediate encodes the type of constant as well as the value.
2070    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2071    unsigned Value = CE->getValue();
2072    if (Value >= 256 && Value <= 0xffff)
2073      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2074    else if (Value > 0xffff && Value <= 0xffffff)
2075      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2076    else if (Value > 0xffffff)
2077      Value = (Value >> 24) | 0x600;
2078    Inst.addOperand(MCOperand::CreateImm(Value));
2079  }
2080
2081  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2082    assert(N == 1 && "Invalid number of operands!");
2083    // The immediate encodes the type of constant as well as the value.
2084    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2085    unsigned Value = ~CE->getValue();
2086    if (Value >= 256 && Value <= 0xffff)
2087      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2088    else if (Value > 0xffff && Value <= 0xffffff)
2089      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2090    else if (Value > 0xffffff)
2091      Value = (Value >> 24) | 0x600;
2092    Inst.addOperand(MCOperand::CreateImm(Value));
2093  }
2094
2095  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2096    assert(N == 1 && "Invalid number of operands!");
2097    // The immediate encodes the type of constant as well as the value.
2098    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2099    uint64_t Value = CE->getValue();
2100    unsigned Imm = 0;
2101    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2102      Imm |= (Value & 1) << i;
2103    }
2104    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
2105  }
2106
2107  virtual void print(raw_ostream &OS) const;
2108
2109  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
2110    ARMOperand *Op = new ARMOperand(k_ITCondMask);
2111    Op->ITMask.Mask = Mask;
2112    Op->StartLoc = S;
2113    Op->EndLoc = S;
2114    return Op;
2115  }
2116
2117  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
2118    ARMOperand *Op = new ARMOperand(k_CondCode);
2119    Op->CC.Val = CC;
2120    Op->StartLoc = S;
2121    Op->EndLoc = S;
2122    return Op;
2123  }
2124
2125  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2126    ARMOperand *Op = new ARMOperand(k_CoprocNum);
2127    Op->Cop.Val = CopVal;
2128    Op->StartLoc = S;
2129    Op->EndLoc = S;
2130    return Op;
2131  }
2132
2133  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2134    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2135    Op->Cop.Val = CopVal;
2136    Op->StartLoc = S;
2137    Op->EndLoc = S;
2138    return Op;
2139  }
2140
2141  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2142    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2143    Op->Cop.Val = Val;
2144    Op->StartLoc = S;
2145    Op->EndLoc = E;
2146    return Op;
2147  }
2148
2149  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2150    ARMOperand *Op = new ARMOperand(k_CCOut);
2151    Op->Reg.RegNum = RegNum;
2152    Op->StartLoc = S;
2153    Op->EndLoc = S;
2154    return Op;
2155  }
2156
2157  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2158    ARMOperand *Op = new ARMOperand(k_Token);
2159    Op->Tok.Data = Str.data();
2160    Op->Tok.Length = Str.size();
2161    Op->StartLoc = S;
2162    Op->EndLoc = S;
2163    return Op;
2164  }
2165
2166  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2167    ARMOperand *Op = new ARMOperand(k_Register);
2168    Op->Reg.RegNum = RegNum;
2169    Op->StartLoc = S;
2170    Op->EndLoc = E;
2171    return Op;
2172  }
2173
2174  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2175                                           unsigned SrcReg,
2176                                           unsigned ShiftReg,
2177                                           unsigned ShiftImm,
2178                                           SMLoc S, SMLoc E) {
2179    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2180    Op->RegShiftedReg.ShiftTy = ShTy;
2181    Op->RegShiftedReg.SrcReg = SrcReg;
2182    Op->RegShiftedReg.ShiftReg = ShiftReg;
2183    Op->RegShiftedReg.ShiftImm = ShiftImm;
2184    Op->StartLoc = S;
2185    Op->EndLoc = E;
2186    return Op;
2187  }
2188
2189  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2190                                            unsigned SrcReg,
2191                                            unsigned ShiftImm,
2192                                            SMLoc S, SMLoc E) {
2193    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2194    Op->RegShiftedImm.ShiftTy = ShTy;
2195    Op->RegShiftedImm.SrcReg = SrcReg;
2196    Op->RegShiftedImm.ShiftImm = ShiftImm;
2197    Op->StartLoc = S;
2198    Op->EndLoc = E;
2199    return Op;
2200  }
2201
2202  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2203                                   SMLoc S, SMLoc E) {
2204    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2205    Op->ShifterImm.isASR = isASR;
2206    Op->ShifterImm.Imm = Imm;
2207    Op->StartLoc = S;
2208    Op->EndLoc = E;
2209    return Op;
2210  }
2211
2212  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2213    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2214    Op->RotImm.Imm = Imm;
2215    Op->StartLoc = S;
2216    Op->EndLoc = E;
2217    return Op;
2218  }
2219
2220  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2221                                    SMLoc S, SMLoc E) {
2222    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2223    Op->Bitfield.LSB = LSB;
2224    Op->Bitfield.Width = Width;
2225    Op->StartLoc = S;
2226    Op->EndLoc = E;
2227    return Op;
2228  }
2229
2230  static ARMOperand *
2231  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2232                SMLoc StartLoc, SMLoc EndLoc) {
2233    KindTy Kind = k_RegisterList;
2234
2235    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2236      Kind = k_DPRRegisterList;
2237    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2238             contains(Regs.front().first))
2239      Kind = k_SPRRegisterList;
2240
2241    ARMOperand *Op = new ARMOperand(Kind);
2242    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2243           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2244      Op->Registers.push_back(I->first);
2245    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2246    Op->StartLoc = StartLoc;
2247    Op->EndLoc = EndLoc;
2248    return Op;
2249  }
2250
2251  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2252                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2253    ARMOperand *Op = new ARMOperand(k_VectorList);
2254    Op->VectorList.RegNum = RegNum;
2255    Op->VectorList.Count = Count;
2256    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2257    Op->StartLoc = S;
2258    Op->EndLoc = E;
2259    return Op;
2260  }
2261
2262  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2263                                              bool isDoubleSpaced,
2264                                              SMLoc S, SMLoc E) {
2265    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2266    Op->VectorList.RegNum = RegNum;
2267    Op->VectorList.Count = Count;
2268    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2269    Op->StartLoc = S;
2270    Op->EndLoc = E;
2271    return Op;
2272  }
2273
2274  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2275                                             unsigned Index,
2276                                             bool isDoubleSpaced,
2277                                             SMLoc S, SMLoc E) {
2278    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2279    Op->VectorList.RegNum = RegNum;
2280    Op->VectorList.Count = Count;
2281    Op->VectorList.LaneIndex = Index;
2282    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2283    Op->StartLoc = S;
2284    Op->EndLoc = E;
2285    return Op;
2286  }
2287
2288  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2289                                       MCContext &Ctx) {
2290    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2291    Op->VectorIndex.Val = Idx;
2292    Op->StartLoc = S;
2293    Op->EndLoc = E;
2294    return Op;
2295  }
2296
2297  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2298    ARMOperand *Op = new ARMOperand(k_Immediate);
2299    Op->Imm.Val = Val;
2300    Op->StartLoc = S;
2301    Op->EndLoc = E;
2302    return Op;
2303  }
2304
2305  static ARMOperand *CreateMem(unsigned BaseRegNum,
2306                               const MCConstantExpr *OffsetImm,
2307                               unsigned OffsetRegNum,
2308                               ARM_AM::ShiftOpc ShiftType,
2309                               unsigned ShiftImm,
2310                               unsigned Alignment,
2311                               bool isNegative,
2312                               SMLoc S, SMLoc E) {
2313    ARMOperand *Op = new ARMOperand(k_Memory);
2314    Op->Memory.BaseRegNum = BaseRegNum;
2315    Op->Memory.OffsetImm = OffsetImm;
2316    Op->Memory.OffsetRegNum = OffsetRegNum;
2317    Op->Memory.ShiftType = ShiftType;
2318    Op->Memory.ShiftImm = ShiftImm;
2319    Op->Memory.Alignment = Alignment;
2320    Op->Memory.isNegative = isNegative;
2321    Op->StartLoc = S;
2322    Op->EndLoc = E;
2323    return Op;
2324  }
2325
2326  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2327                                      ARM_AM::ShiftOpc ShiftTy,
2328                                      unsigned ShiftImm,
2329                                      SMLoc S, SMLoc E) {
2330    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2331    Op->PostIdxReg.RegNum = RegNum;
2332    Op->PostIdxReg.isAdd = isAdd;
2333    Op->PostIdxReg.ShiftTy = ShiftTy;
2334    Op->PostIdxReg.ShiftImm = ShiftImm;
2335    Op->StartLoc = S;
2336    Op->EndLoc = E;
2337    return Op;
2338  }
2339
2340  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2341    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2342    Op->MBOpt.Val = Opt;
2343    Op->StartLoc = S;
2344    Op->EndLoc = S;
2345    return Op;
2346  }
2347
2348  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2349    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2350    Op->IFlags.Val = IFlags;
2351    Op->StartLoc = S;
2352    Op->EndLoc = S;
2353    return Op;
2354  }
2355
2356  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2357    ARMOperand *Op = new ARMOperand(k_MSRMask);
2358    Op->MMask.Val = MMask;
2359    Op->StartLoc = S;
2360    Op->EndLoc = S;
2361    return Op;
2362  }
2363};
2364
2365} // end anonymous namespace.
2366
2367void ARMOperand::print(raw_ostream &OS) const {
2368  switch (Kind) {
2369  case k_CondCode:
2370    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2371    break;
2372  case k_CCOut:
2373    OS << "<ccout " << getReg() << ">";
2374    break;
2375  case k_ITCondMask: {
2376    static const char *const MaskStr[] = {
2377      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2378      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2379    };
2380    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2381    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2382    break;
2383  }
2384  case k_CoprocNum:
2385    OS << "<coprocessor number: " << getCoproc() << ">";
2386    break;
2387  case k_CoprocReg:
2388    OS << "<coprocessor register: " << getCoproc() << ">";
2389    break;
2390  case k_CoprocOption:
2391    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2392    break;
2393  case k_MSRMask:
2394    OS << "<mask: " << getMSRMask() << ">";
2395    break;
2396  case k_Immediate:
2397    getImm()->print(OS);
2398    break;
2399  case k_MemBarrierOpt:
2400    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2401    break;
2402  case k_Memory:
2403    OS << "<memory "
2404       << " base:" << Memory.BaseRegNum;
2405    OS << ">";
2406    break;
2407  case k_PostIndexRegister:
2408    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2409       << PostIdxReg.RegNum;
2410    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2411      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2412         << PostIdxReg.ShiftImm;
2413    OS << ">";
2414    break;
2415  case k_ProcIFlags: {
2416    OS << "<ARM_PROC::";
2417    unsigned IFlags = getProcIFlags();
2418    for (int i=2; i >= 0; --i)
2419      if (IFlags & (1 << i))
2420        OS << ARM_PROC::IFlagsToString(1 << i);
2421    OS << ">";
2422    break;
2423  }
2424  case k_Register:
2425    OS << "<register " << getReg() << ">";
2426    break;
2427  case k_ShifterImmediate:
2428    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2429       << " #" << ShifterImm.Imm << ">";
2430    break;
2431  case k_ShiftedRegister:
2432    OS << "<so_reg_reg "
2433       << RegShiftedReg.SrcReg << " "
2434       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2435       << " " << RegShiftedReg.ShiftReg << ">";
2436    break;
2437  case k_ShiftedImmediate:
2438    OS << "<so_reg_imm "
2439       << RegShiftedImm.SrcReg << " "
2440       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2441       << " #" << RegShiftedImm.ShiftImm << ">";
2442    break;
2443  case k_RotateImmediate:
2444    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2445    break;
2446  case k_BitfieldDescriptor:
2447    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2448       << ", width: " << Bitfield.Width << ">";
2449    break;
2450  case k_RegisterList:
2451  case k_DPRRegisterList:
2452  case k_SPRRegisterList: {
2453    OS << "<register_list ";
2454
2455    const SmallVectorImpl<unsigned> &RegList = getRegList();
2456    for (SmallVectorImpl<unsigned>::const_iterator
2457           I = RegList.begin(), E = RegList.end(); I != E; ) {
2458      OS << *I;
2459      if (++I < E) OS << ", ";
2460    }
2461
2462    OS << ">";
2463    break;
2464  }
2465  case k_VectorList:
2466    OS << "<vector_list " << VectorList.Count << " * "
2467       << VectorList.RegNum << ">";
2468    break;
2469  case k_VectorListAllLanes:
2470    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2471       << VectorList.RegNum << ">";
2472    break;
2473  case k_VectorListIndexed:
2474    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2475       << VectorList.Count << " * " << VectorList.RegNum << ">";
2476    break;
2477  case k_Token:
2478    OS << "'" << getToken() << "'";
2479    break;
2480  case k_VectorIndex:
2481    OS << "<vectorindex " << getVectorIndex() << ">";
2482    break;
2483  }
2484}
2485
2486/// @name Auto-generated Match Functions
2487/// {
2488
2489static unsigned MatchRegisterName(StringRef Name);
2490
2491/// }
2492
2493bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2494                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2495  StartLoc = Parser.getTok().getLoc();
2496  EndLoc = Parser.getTok().getEndLoc();
2497  RegNo = tryParseRegister();
2498
2499  return (RegNo == (unsigned)-1);
2500}
2501
2502/// Try to parse a register name.  The token must be an Identifier when called,
2503/// and if it is a register name the token is eaten and the register number is
2504/// returned.  Otherwise return -1.
2505///
2506int ARMAsmParser::tryParseRegister() {
2507  const AsmToken &Tok = Parser.getTok();
2508  if (Tok.isNot(AsmToken::Identifier)) return -1;
2509
2510  std::string lowerCase = Tok.getString().lower();
2511  unsigned RegNum = MatchRegisterName(lowerCase);
2512  if (!RegNum) {
2513    RegNum = StringSwitch<unsigned>(lowerCase)
2514      .Case("r13", ARM::SP)
2515      .Case("r14", ARM::LR)
2516      .Case("r15", ARM::PC)
2517      .Case("ip", ARM::R12)
2518      // Additional register name aliases for 'gas' compatibility.
2519      .Case("a1", ARM::R0)
2520      .Case("a2", ARM::R1)
2521      .Case("a3", ARM::R2)
2522      .Case("a4", ARM::R3)
2523      .Case("v1", ARM::R4)
2524      .Case("v2", ARM::R5)
2525      .Case("v3", ARM::R6)
2526      .Case("v4", ARM::R7)
2527      .Case("v5", ARM::R8)
2528      .Case("v6", ARM::R9)
2529      .Case("v7", ARM::R10)
2530      .Case("v8", ARM::R11)
2531      .Case("sb", ARM::R9)
2532      .Case("sl", ARM::R10)
2533      .Case("fp", ARM::R11)
2534      .Default(0);
2535  }
2536  if (!RegNum) {
2537    // Check for aliases registered via .req. Canonicalize to lower case.
2538    // That's more consistent since register names are case insensitive, and
2539    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2540    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2541    // If no match, return failure.
2542    if (Entry == RegisterReqs.end())
2543      return -1;
2544    Parser.Lex(); // Eat identifier token.
2545    return Entry->getValue();
2546  }
2547
2548  Parser.Lex(); // Eat identifier token.
2549
2550  return RegNum;
2551}
2552
2553// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2554// If a recoverable error occurs, return 1. If an irrecoverable error
2555// occurs, return -1. An irrecoverable error is one where tokens have been
2556// consumed in the process of trying to parse the shifter (i.e., when it is
2557// indeed a shifter operand, but malformed).
2558int ARMAsmParser::tryParseShiftRegister(
2559                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2560  SMLoc S = Parser.getTok().getLoc();
2561  const AsmToken &Tok = Parser.getTok();
2562  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2563
2564  std::string lowerCase = Tok.getString().lower();
2565  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2566      .Case("asl", ARM_AM::lsl)
2567      .Case("lsl", ARM_AM::lsl)
2568      .Case("lsr", ARM_AM::lsr)
2569      .Case("asr", ARM_AM::asr)
2570      .Case("ror", ARM_AM::ror)
2571      .Case("rrx", ARM_AM::rrx)
2572      .Default(ARM_AM::no_shift);
2573
2574  if (ShiftTy == ARM_AM::no_shift)
2575    return 1;
2576
2577  Parser.Lex(); // Eat the operator.
2578
2579  // The source register for the shift has already been added to the
2580  // operand list, so we need to pop it off and combine it into the shifted
2581  // register operand instead.
2582  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2583  if (!PrevOp->isReg())
2584    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2585  int SrcReg = PrevOp->getReg();
2586
2587  SMLoc EndLoc;
2588  int64_t Imm = 0;
2589  int ShiftReg = 0;
2590  if (ShiftTy == ARM_AM::rrx) {
2591    // RRX Doesn't have an explicit shift amount. The encoder expects
2592    // the shift register to be the same as the source register. Seems odd,
2593    // but OK.
2594    ShiftReg = SrcReg;
2595  } else {
2596    // Figure out if this is shifted by a constant or a register (for non-RRX).
2597    if (Parser.getTok().is(AsmToken::Hash) ||
2598        Parser.getTok().is(AsmToken::Dollar)) {
2599      Parser.Lex(); // Eat hash.
2600      SMLoc ImmLoc = Parser.getTok().getLoc();
2601      const MCExpr *ShiftExpr = 0;
2602      if (getParser().parseExpression(ShiftExpr, EndLoc)) {
2603        Error(ImmLoc, "invalid immediate shift value");
2604        return -1;
2605      }
2606      // The expression must be evaluatable as an immediate.
2607      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2608      if (!CE) {
2609        Error(ImmLoc, "invalid immediate shift value");
2610        return -1;
2611      }
2612      // Range check the immediate.
2613      // lsl, ror: 0 <= imm <= 31
2614      // lsr, asr: 0 <= imm <= 32
2615      Imm = CE->getValue();
2616      if (Imm < 0 ||
2617          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2618          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2619        Error(ImmLoc, "immediate shift value out of range");
2620        return -1;
2621      }
2622      // shift by zero is a nop. Always send it through as lsl.
2623      // ('as' compatibility)
2624      if (Imm == 0)
2625        ShiftTy = ARM_AM::lsl;
2626    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2627      SMLoc L = Parser.getTok().getLoc();
2628      EndLoc = Parser.getTok().getEndLoc();
2629      ShiftReg = tryParseRegister();
2630      if (ShiftReg == -1) {
2631        Error (L, "expected immediate or register in shift operand");
2632        return -1;
2633      }
2634    } else {
2635      Error (Parser.getTok().getLoc(),
2636                    "expected immediate or register in shift operand");
2637      return -1;
2638    }
2639  }
2640
2641  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2642    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2643                                                         ShiftReg, Imm,
2644                                                         S, EndLoc));
2645  else
2646    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2647                                                          S, EndLoc));
2648
2649  return 0;
2650}
2651
2652
2653/// Try to parse a register name.  The token must be an Identifier when called.
2654/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2655/// if there is a "writeback". 'true' if it's not a register.
2656///
2657/// TODO this is likely to change to allow different register types and or to
2658/// parse for a specific register type.
2659bool ARMAsmParser::
2660tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2661  const AsmToken &RegTok = Parser.getTok();
2662  int RegNo = tryParseRegister();
2663  if (RegNo == -1)
2664    return true;
2665
2666  Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(),
2667                                           RegTok.getEndLoc()));
2668
2669  const AsmToken &ExclaimTok = Parser.getTok();
2670  if (ExclaimTok.is(AsmToken::Exclaim)) {
2671    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2672                                               ExclaimTok.getLoc()));
2673    Parser.Lex(); // Eat exclaim token
2674    return false;
2675  }
2676
2677  // Also check for an index operand. This is only legal for vector registers,
2678  // but that'll get caught OK in operand matching, so we don't need to
2679  // explicitly filter everything else out here.
2680  if (Parser.getTok().is(AsmToken::LBrac)) {
2681    SMLoc SIdx = Parser.getTok().getLoc();
2682    Parser.Lex(); // Eat left bracket token.
2683
2684    const MCExpr *ImmVal;
2685    if (getParser().parseExpression(ImmVal))
2686      return true;
2687    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2688    if (!MCE)
2689      return TokError("immediate value expected for vector index");
2690
2691    if (Parser.getTok().isNot(AsmToken::RBrac))
2692      return Error(Parser.getTok().getLoc(), "']' expected");
2693
2694    SMLoc E = Parser.getTok().getEndLoc();
2695    Parser.Lex(); // Eat right bracket token.
2696
2697    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2698                                                     SIdx, E,
2699                                                     getContext()));
2700  }
2701
2702  return false;
2703}
2704
2705/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2706/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2707/// "c5", ...
2708static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2709  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2710  // but efficient.
2711  switch (Name.size()) {
2712  default: return -1;
2713  case 2:
2714    if (Name[0] != CoprocOp)
2715      return -1;
2716    switch (Name[1]) {
2717    default:  return -1;
2718    case '0': return 0;
2719    case '1': return 1;
2720    case '2': return 2;
2721    case '3': return 3;
2722    case '4': return 4;
2723    case '5': return 5;
2724    case '6': return 6;
2725    case '7': return 7;
2726    case '8': return 8;
2727    case '9': return 9;
2728    }
2729  case 3:
2730    if (Name[0] != CoprocOp || Name[1] != '1')
2731      return -1;
2732    switch (Name[2]) {
2733    default:  return -1;
2734    case '0': return 10;
2735    case '1': return 11;
2736    case '2': return 12;
2737    case '3': return 13;
2738    case '4': return 14;
2739    case '5': return 15;
2740    }
2741  }
2742}
2743
2744/// parseITCondCode - Try to parse a condition code for an IT instruction.
2745ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2746parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2747  SMLoc S = Parser.getTok().getLoc();
2748  const AsmToken &Tok = Parser.getTok();
2749  if (!Tok.is(AsmToken::Identifier))
2750    return MatchOperand_NoMatch;
2751  unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
2752    .Case("eq", ARMCC::EQ)
2753    .Case("ne", ARMCC::NE)
2754    .Case("hs", ARMCC::HS)
2755    .Case("cs", ARMCC::HS)
2756    .Case("lo", ARMCC::LO)
2757    .Case("cc", ARMCC::LO)
2758    .Case("mi", ARMCC::MI)
2759    .Case("pl", ARMCC::PL)
2760    .Case("vs", ARMCC::VS)
2761    .Case("vc", ARMCC::VC)
2762    .Case("hi", ARMCC::HI)
2763    .Case("ls", ARMCC::LS)
2764    .Case("ge", ARMCC::GE)
2765    .Case("lt", ARMCC::LT)
2766    .Case("gt", ARMCC::GT)
2767    .Case("le", ARMCC::LE)
2768    .Case("al", ARMCC::AL)
2769    .Default(~0U);
2770  if (CC == ~0U)
2771    return MatchOperand_NoMatch;
2772  Parser.Lex(); // Eat the token.
2773
2774  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2775
2776  return MatchOperand_Success;
2777}
2778
2779/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2780/// token must be an Identifier when called, and if it is a coprocessor
2781/// number, the token is eaten and the operand is added to the operand list.
2782ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2783parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2784  SMLoc S = Parser.getTok().getLoc();
2785  const AsmToken &Tok = Parser.getTok();
2786  if (Tok.isNot(AsmToken::Identifier))
2787    return MatchOperand_NoMatch;
2788
2789  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2790  if (Num == -1)
2791    return MatchOperand_NoMatch;
2792
2793  Parser.Lex(); // Eat identifier token.
2794  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2795  return MatchOperand_Success;
2796}
2797
2798/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2799/// token must be an Identifier when called, and if it is a coprocessor
2800/// number, the token is eaten and the operand is added to the operand list.
2801ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2802parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2803  SMLoc S = Parser.getTok().getLoc();
2804  const AsmToken &Tok = Parser.getTok();
2805  if (Tok.isNot(AsmToken::Identifier))
2806    return MatchOperand_NoMatch;
2807
2808  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2809  if (Reg == -1)
2810    return MatchOperand_NoMatch;
2811
2812  Parser.Lex(); // Eat identifier token.
2813  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2814  return MatchOperand_Success;
2815}
2816
2817/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2818/// coproc_option : '{' imm0_255 '}'
2819ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2820parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2821  SMLoc S = Parser.getTok().getLoc();
2822
2823  // If this isn't a '{', this isn't a coprocessor immediate operand.
2824  if (Parser.getTok().isNot(AsmToken::LCurly))
2825    return MatchOperand_NoMatch;
2826  Parser.Lex(); // Eat the '{'
2827
2828  const MCExpr *Expr;
2829  SMLoc Loc = Parser.getTok().getLoc();
2830  if (getParser().parseExpression(Expr)) {
2831    Error(Loc, "illegal expression");
2832    return MatchOperand_ParseFail;
2833  }
2834  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2835  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2836    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2837    return MatchOperand_ParseFail;
2838  }
2839  int Val = CE->getValue();
2840
2841  // Check for and consume the closing '}'
2842  if (Parser.getTok().isNot(AsmToken::RCurly))
2843    return MatchOperand_ParseFail;
2844  SMLoc E = Parser.getTok().getEndLoc();
2845  Parser.Lex(); // Eat the '}'
2846
2847  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2848  return MatchOperand_Success;
2849}
2850
2851// For register list parsing, we need to map from raw GPR register numbering
2852// to the enumeration values. The enumeration values aren't sorted by
2853// register number due to our using "sp", "lr" and "pc" as canonical names.
2854static unsigned getNextRegister(unsigned Reg) {
2855  // If this is a GPR, we need to do it manually, otherwise we can rely
2856  // on the sort ordering of the enumeration since the other reg-classes
2857  // are sane.
2858  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2859    return Reg + 1;
2860  switch(Reg) {
2861  default: llvm_unreachable("Invalid GPR number!");
2862  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2863  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2864  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2865  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2866  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2867  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2868  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2869  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2870  }
2871}
2872
2873// Return the low-subreg of a given Q register.
2874static unsigned getDRegFromQReg(unsigned QReg) {
2875  switch (QReg) {
2876  default: llvm_unreachable("expected a Q register!");
2877  case ARM::Q0:  return ARM::D0;
2878  case ARM::Q1:  return ARM::D2;
2879  case ARM::Q2:  return ARM::D4;
2880  case ARM::Q3:  return ARM::D6;
2881  case ARM::Q4:  return ARM::D8;
2882  case ARM::Q5:  return ARM::D10;
2883  case ARM::Q6:  return ARM::D12;
2884  case ARM::Q7:  return ARM::D14;
2885  case ARM::Q8:  return ARM::D16;
2886  case ARM::Q9:  return ARM::D18;
2887  case ARM::Q10: return ARM::D20;
2888  case ARM::Q11: return ARM::D22;
2889  case ARM::Q12: return ARM::D24;
2890  case ARM::Q13: return ARM::D26;
2891  case ARM::Q14: return ARM::D28;
2892  case ARM::Q15: return ARM::D30;
2893  }
2894}
2895
2896/// Parse a register list.
2897bool ARMAsmParser::
2898parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2899  assert(Parser.getTok().is(AsmToken::LCurly) &&
2900         "Token is not a Left Curly Brace");
2901  SMLoc S = Parser.getTok().getLoc();
2902  Parser.Lex(); // Eat '{' token.
2903  SMLoc RegLoc = Parser.getTok().getLoc();
2904
2905  // Check the first register in the list to see what register class
2906  // this is a list of.
2907  int Reg = tryParseRegister();
2908  if (Reg == -1)
2909    return Error(RegLoc, "register expected");
2910
2911  // The reglist instructions have at most 16 registers, so reserve
2912  // space for that many.
2913  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2914
2915  // Allow Q regs and just interpret them as the two D sub-registers.
2916  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2917    Reg = getDRegFromQReg(Reg);
2918    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2919    ++Reg;
2920  }
2921  const MCRegisterClass *RC;
2922  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2923    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2924  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2925    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2926  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2927    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2928  else
2929    return Error(RegLoc, "invalid register in register list");
2930
2931  // Store the register.
2932  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2933
2934  // This starts immediately after the first register token in the list,
2935  // so we can see either a comma or a minus (range separator) as a legal
2936  // next token.
2937  while (Parser.getTok().is(AsmToken::Comma) ||
2938         Parser.getTok().is(AsmToken::Minus)) {
2939    if (Parser.getTok().is(AsmToken::Minus)) {
2940      Parser.Lex(); // Eat the minus.
2941      SMLoc AfterMinusLoc = Parser.getTok().getLoc();
2942      int EndReg = tryParseRegister();
2943      if (EndReg == -1)
2944        return Error(AfterMinusLoc, "register expected");
2945      // Allow Q regs and just interpret them as the two D sub-registers.
2946      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2947        EndReg = getDRegFromQReg(EndReg) + 1;
2948      // If the register is the same as the start reg, there's nothing
2949      // more to do.
2950      if (Reg == EndReg)
2951        continue;
2952      // The register must be in the same register class as the first.
2953      if (!RC->contains(EndReg))
2954        return Error(AfterMinusLoc, "invalid register in register list");
2955      // Ranges must go from low to high.
2956      if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
2957        return Error(AfterMinusLoc, "bad range in register list");
2958
2959      // Add all the registers in the range to the register list.
2960      while (Reg != EndReg) {
2961        Reg = getNextRegister(Reg);
2962        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2963      }
2964      continue;
2965    }
2966    Parser.Lex(); // Eat the comma.
2967    RegLoc = Parser.getTok().getLoc();
2968    int OldReg = Reg;
2969    const AsmToken RegTok = Parser.getTok();
2970    Reg = tryParseRegister();
2971    if (Reg == -1)
2972      return Error(RegLoc, "register expected");
2973    // Allow Q regs and just interpret them as the two D sub-registers.
2974    bool isQReg = false;
2975    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2976      Reg = getDRegFromQReg(Reg);
2977      isQReg = true;
2978    }
2979    // The register must be in the same register class as the first.
2980    if (!RC->contains(Reg))
2981      return Error(RegLoc, "invalid register in register list");
2982    // List must be monotonically increasing.
2983    if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
2984      if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2985        Warning(RegLoc, "register list not in ascending order");
2986      else
2987        return Error(RegLoc, "register list not in ascending order");
2988    }
2989    if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
2990      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2991              ") in register list");
2992      continue;
2993    }
2994    // VFP register lists must also be contiguous.
2995    // It's OK to use the enumeration values directly here rather, as the
2996    // VFP register classes have the enum sorted properly.
2997    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2998        Reg != OldReg + 1)
2999      return Error(RegLoc, "non-contiguous register range");
3000    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
3001    if (isQReg)
3002      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
3003  }
3004
3005  if (Parser.getTok().isNot(AsmToken::RCurly))
3006    return Error(Parser.getTok().getLoc(), "'}' expected");
3007  SMLoc E = Parser.getTok().getEndLoc();
3008  Parser.Lex(); // Eat '}' token.
3009
3010  // Push the register list operand.
3011  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
3012
3013  // The ARM system instruction variants for LDM/STM have a '^' token here.
3014  if (Parser.getTok().is(AsmToken::Caret)) {
3015    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
3016    Parser.Lex(); // Eat '^' token.
3017  }
3018
3019  return false;
3020}
3021
3022// Helper function to parse the lane index for vector lists.
3023ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3024parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
3025  Index = 0; // Always return a defined index value.
3026  if (Parser.getTok().is(AsmToken::LBrac)) {
3027    Parser.Lex(); // Eat the '['.
3028    if (Parser.getTok().is(AsmToken::RBrac)) {
3029      // "Dn[]" is the 'all lanes' syntax.
3030      LaneKind = AllLanes;
3031      EndLoc = Parser.getTok().getEndLoc();
3032      Parser.Lex(); // Eat the ']'.
3033      return MatchOperand_Success;
3034    }
3035
3036    // There's an optional '#' token here. Normally there wouldn't be, but
3037    // inline assemble puts one in, and it's friendly to accept that.
3038    if (Parser.getTok().is(AsmToken::Hash))
3039      Parser.Lex(); // Eat the '#'
3040
3041    const MCExpr *LaneIndex;
3042    SMLoc Loc = Parser.getTok().getLoc();
3043    if (getParser().parseExpression(LaneIndex)) {
3044      Error(Loc, "illegal expression");
3045      return MatchOperand_ParseFail;
3046    }
3047    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3048    if (!CE) {
3049      Error(Loc, "lane index must be empty or an integer");
3050      return MatchOperand_ParseFail;
3051    }
3052    if (Parser.getTok().isNot(AsmToken::RBrac)) {
3053      Error(Parser.getTok().getLoc(), "']' expected");
3054      return MatchOperand_ParseFail;
3055    }
3056    EndLoc = Parser.getTok().getEndLoc();
3057    Parser.Lex(); // Eat the ']'.
3058    int64_t Val = CE->getValue();
3059
3060    // FIXME: Make this range check context sensitive for .8, .16, .32.
3061    if (Val < 0 || Val > 7) {
3062      Error(Parser.getTok().getLoc(), "lane index out of range");
3063      return MatchOperand_ParseFail;
3064    }
3065    Index = Val;
3066    LaneKind = IndexedLane;
3067    return MatchOperand_Success;
3068  }
3069  LaneKind = NoLanes;
3070  return MatchOperand_Success;
3071}
3072
3073// parse a vector register list
3074ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3075parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3076  VectorLaneTy LaneKind;
3077  unsigned LaneIndex;
3078  SMLoc S = Parser.getTok().getLoc();
3079  // As an extension (to match gas), support a plain D register or Q register
3080  // (without encosing curly braces) as a single or double entry list,
3081  // respectively.
3082  if (Parser.getTok().is(AsmToken::Identifier)) {
3083    SMLoc E = Parser.getTok().getEndLoc();
3084    int Reg = tryParseRegister();
3085    if (Reg == -1)
3086      return MatchOperand_NoMatch;
3087    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3088      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3089      if (Res != MatchOperand_Success)
3090        return Res;
3091      switch (LaneKind) {
3092      case NoLanes:
3093        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3094        break;
3095      case AllLanes:
3096        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3097                                                                S, E));
3098        break;
3099      case IndexedLane:
3100        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3101                                                               LaneIndex,
3102                                                               false, S, E));
3103        break;
3104      }
3105      return MatchOperand_Success;
3106    }
3107    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3108      Reg = getDRegFromQReg(Reg);
3109      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3110      if (Res != MatchOperand_Success)
3111        return Res;
3112      switch (LaneKind) {
3113      case NoLanes:
3114        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3115                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3116        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3117        break;
3118      case AllLanes:
3119        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3120                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3121        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3122                                                                S, E));
3123        break;
3124      case IndexedLane:
3125        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3126                                                               LaneIndex,
3127                                                               false, S, E));
3128        break;
3129      }
3130      return MatchOperand_Success;
3131    }
3132    Error(S, "vector register expected");
3133    return MatchOperand_ParseFail;
3134  }
3135
3136  if (Parser.getTok().isNot(AsmToken::LCurly))
3137    return MatchOperand_NoMatch;
3138
3139  Parser.Lex(); // Eat '{' token.
3140  SMLoc RegLoc = Parser.getTok().getLoc();
3141
3142  int Reg = tryParseRegister();
3143  if (Reg == -1) {
3144    Error(RegLoc, "register expected");
3145    return MatchOperand_ParseFail;
3146  }
3147  unsigned Count = 1;
3148  int Spacing = 0;
3149  unsigned FirstReg = Reg;
3150  // The list is of D registers, but we also allow Q regs and just interpret
3151  // them as the two D sub-registers.
3152  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3153    FirstReg = Reg = getDRegFromQReg(Reg);
3154    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3155                 // it's ambiguous with four-register single spaced.
3156    ++Reg;
3157    ++Count;
3158  }
3159
3160  SMLoc E;
3161  if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3162    return MatchOperand_ParseFail;
3163
3164  while (Parser.getTok().is(AsmToken::Comma) ||
3165         Parser.getTok().is(AsmToken::Minus)) {
3166    if (Parser.getTok().is(AsmToken::Minus)) {
3167      if (!Spacing)
3168        Spacing = 1; // Register range implies a single spaced list.
3169      else if (Spacing == 2) {
3170        Error(Parser.getTok().getLoc(),
3171              "sequential registers in double spaced list");
3172        return MatchOperand_ParseFail;
3173      }
3174      Parser.Lex(); // Eat the minus.
3175      SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3176      int EndReg = tryParseRegister();
3177      if (EndReg == -1) {
3178        Error(AfterMinusLoc, "register expected");
3179        return MatchOperand_ParseFail;
3180      }
3181      // Allow Q regs and just interpret them as the two D sub-registers.
3182      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3183        EndReg = getDRegFromQReg(EndReg) + 1;
3184      // If the register is the same as the start reg, there's nothing
3185      // more to do.
3186      if (Reg == EndReg)
3187        continue;
3188      // The register must be in the same register class as the first.
3189      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3190        Error(AfterMinusLoc, "invalid register in register list");
3191        return MatchOperand_ParseFail;
3192      }
3193      // Ranges must go from low to high.
3194      if (Reg > EndReg) {
3195        Error(AfterMinusLoc, "bad range in register list");
3196        return MatchOperand_ParseFail;
3197      }
3198      // Parse the lane specifier if present.
3199      VectorLaneTy NextLaneKind;
3200      unsigned NextLaneIndex;
3201      if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3202          MatchOperand_Success)
3203        return MatchOperand_ParseFail;
3204      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3205        Error(AfterMinusLoc, "mismatched lane index in register list");
3206        return MatchOperand_ParseFail;
3207      }
3208
3209      // Add all the registers in the range to the register list.
3210      Count += EndReg - Reg;
3211      Reg = EndReg;
3212      continue;
3213    }
3214    Parser.Lex(); // Eat the comma.
3215    RegLoc = Parser.getTok().getLoc();
3216    int OldReg = Reg;
3217    Reg = tryParseRegister();
3218    if (Reg == -1) {
3219      Error(RegLoc, "register expected");
3220      return MatchOperand_ParseFail;
3221    }
3222    // vector register lists must be contiguous.
3223    // It's OK to use the enumeration values directly here rather, as the
3224    // VFP register classes have the enum sorted properly.
3225    //
3226    // The list is of D registers, but we also allow Q regs and just interpret
3227    // them as the two D sub-registers.
3228    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3229      if (!Spacing)
3230        Spacing = 1; // Register range implies a single spaced list.
3231      else if (Spacing == 2) {
3232        Error(RegLoc,
3233              "invalid register in double-spaced list (must be 'D' register')");
3234        return MatchOperand_ParseFail;
3235      }
3236      Reg = getDRegFromQReg(Reg);
3237      if (Reg != OldReg + 1) {
3238        Error(RegLoc, "non-contiguous register range");
3239        return MatchOperand_ParseFail;
3240      }
3241      ++Reg;
3242      Count += 2;
3243      // Parse the lane specifier if present.
3244      VectorLaneTy NextLaneKind;
3245      unsigned NextLaneIndex;
3246      SMLoc LaneLoc = Parser.getTok().getLoc();
3247      if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3248          MatchOperand_Success)
3249        return MatchOperand_ParseFail;
3250      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3251        Error(LaneLoc, "mismatched lane index in register list");
3252        return MatchOperand_ParseFail;
3253      }
3254      continue;
3255    }
3256    // Normal D register.
3257    // Figure out the register spacing (single or double) of the list if
3258    // we don't know it already.
3259    if (!Spacing)
3260      Spacing = 1 + (Reg == OldReg + 2);
3261
3262    // Just check that it's contiguous and keep going.
3263    if (Reg != OldReg + Spacing) {
3264      Error(RegLoc, "non-contiguous register range");
3265      return MatchOperand_ParseFail;
3266    }
3267    ++Count;
3268    // Parse the lane specifier if present.
3269    VectorLaneTy NextLaneKind;
3270    unsigned NextLaneIndex;
3271    SMLoc EndLoc = Parser.getTok().getLoc();
3272    if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
3273      return MatchOperand_ParseFail;
3274    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3275      Error(EndLoc, "mismatched lane index in register list");
3276      return MatchOperand_ParseFail;
3277    }
3278  }
3279
3280  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3281    Error(Parser.getTok().getLoc(), "'}' expected");
3282    return MatchOperand_ParseFail;
3283  }
3284  E = Parser.getTok().getEndLoc();
3285  Parser.Lex(); // Eat '}' token.
3286
3287  switch (LaneKind) {
3288  case NoLanes:
3289    // Two-register operands have been converted to the
3290    // composite register classes.
3291    if (Count == 2) {
3292      const MCRegisterClass *RC = (Spacing == 1) ?
3293        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3294        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3295      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3296    }
3297
3298    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3299                                                    (Spacing == 2), S, E));
3300    break;
3301  case AllLanes:
3302    // Two-register operands have been converted to the
3303    // composite register classes.
3304    if (Count == 2) {
3305      const MCRegisterClass *RC = (Spacing == 1) ?
3306        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3307        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3308      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3309    }
3310    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3311                                                            (Spacing == 2),
3312                                                            S, E));
3313    break;
3314  case IndexedLane:
3315    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3316                                                           LaneIndex,
3317                                                           (Spacing == 2),
3318                                                           S, E));
3319    break;
3320  }
3321  return MatchOperand_Success;
3322}
3323
3324/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3325ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3326parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3327  SMLoc S = Parser.getTok().getLoc();
3328  const AsmToken &Tok = Parser.getTok();
3329  unsigned Opt;
3330
3331  if (Tok.is(AsmToken::Identifier)) {
3332    StringRef OptStr = Tok.getString();
3333
3334    Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
3335      .Case("sy",    ARM_MB::SY)
3336      .Case("st",    ARM_MB::ST)
3337      .Case("sh",    ARM_MB::ISH)
3338      .Case("ish",   ARM_MB::ISH)
3339      .Case("shst",  ARM_MB::ISHST)
3340      .Case("ishst", ARM_MB::ISHST)
3341      .Case("nsh",   ARM_MB::NSH)
3342      .Case("un",    ARM_MB::NSH)
3343      .Case("nshst", ARM_MB::NSHST)
3344      .Case("unst",  ARM_MB::NSHST)
3345      .Case("osh",   ARM_MB::OSH)
3346      .Case("oshst", ARM_MB::OSHST)
3347      .Default(~0U);
3348
3349    if (Opt == ~0U)
3350      return MatchOperand_NoMatch;
3351
3352    Parser.Lex(); // Eat identifier token.
3353  } else if (Tok.is(AsmToken::Hash) ||
3354             Tok.is(AsmToken::Dollar) ||
3355             Tok.is(AsmToken::Integer)) {
3356    if (Parser.getTok().isNot(AsmToken::Integer))
3357      Parser.Lex(); // Eat the '#'.
3358    SMLoc Loc = Parser.getTok().getLoc();
3359
3360    const MCExpr *MemBarrierID;
3361    if (getParser().parseExpression(MemBarrierID)) {
3362      Error(Loc, "illegal expression");
3363      return MatchOperand_ParseFail;
3364    }
3365
3366    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
3367    if (!CE) {
3368      Error(Loc, "constant expression expected");
3369      return MatchOperand_ParseFail;
3370    }
3371
3372    int Val = CE->getValue();
3373    if (Val & ~0xf) {
3374      Error(Loc, "immediate value out of range");
3375      return MatchOperand_ParseFail;
3376    }
3377
3378    Opt = ARM_MB::RESERVED_0 + Val;
3379  } else
3380    return MatchOperand_ParseFail;
3381
3382  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3383  return MatchOperand_Success;
3384}
3385
3386/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3387ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3388parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3389  SMLoc S = Parser.getTok().getLoc();
3390  const AsmToken &Tok = Parser.getTok();
3391  if (!Tok.is(AsmToken::Identifier))
3392    return MatchOperand_NoMatch;
3393  StringRef IFlagsStr = Tok.getString();
3394
3395  // An iflags string of "none" is interpreted to mean that none of the AIF
3396  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3397  unsigned IFlags = 0;
3398  if (IFlagsStr != "none") {
3399        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3400      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3401        .Case("a", ARM_PROC::A)
3402        .Case("i", ARM_PROC::I)
3403        .Case("f", ARM_PROC::F)
3404        .Default(~0U);
3405
3406      // If some specific iflag is already set, it means that some letter is
3407      // present more than once, this is not acceptable.
3408      if (Flag == ~0U || (IFlags & Flag))
3409        return MatchOperand_NoMatch;
3410
3411      IFlags |= Flag;
3412    }
3413  }
3414
3415  Parser.Lex(); // Eat identifier token.
3416  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3417  return MatchOperand_Success;
3418}
3419
3420/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3421ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3422parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3423  SMLoc S = Parser.getTok().getLoc();
3424  const AsmToken &Tok = Parser.getTok();
3425  if (!Tok.is(AsmToken::Identifier))
3426    return MatchOperand_NoMatch;
3427  StringRef Mask = Tok.getString();
3428
3429  if (isMClass()) {
3430    // See ARMv6-M 10.1.1
3431    std::string Name = Mask.lower();
3432    unsigned FlagsVal = StringSwitch<unsigned>(Name)
3433      // Note: in the documentation:
3434      //  ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
3435      //  for MSR APSR_nzcvq.
3436      // but we do make it an alias here.  This is so to get the "mask encoding"
3437      // bits correct on MSR APSR writes.
3438      //
3439      // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
3440      // should really only be allowed when writing a special register.  Note
3441      // they get dropped in the MRS instruction reading a special register as
3442      // the SYSm field is only 8 bits.
3443      //
3444      // FIXME: the _g and _nzcvqg versions are only allowed if the processor
3445      // includes the DSP extension but that is not checked.
3446      .Case("apsr", 0x800)
3447      .Case("apsr_nzcvq", 0x800)
3448      .Case("apsr_g", 0x400)
3449      .Case("apsr_nzcvqg", 0xc00)
3450      .Case("iapsr", 0x801)
3451      .Case("iapsr_nzcvq", 0x801)
3452      .Case("iapsr_g", 0x401)
3453      .Case("iapsr_nzcvqg", 0xc01)
3454      .Case("eapsr", 0x802)
3455      .Case("eapsr_nzcvq", 0x802)
3456      .Case("eapsr_g", 0x402)
3457      .Case("eapsr_nzcvqg", 0xc02)
3458      .Case("xpsr", 0x803)
3459      .Case("xpsr_nzcvq", 0x803)
3460      .Case("xpsr_g", 0x403)
3461      .Case("xpsr_nzcvqg", 0xc03)
3462      .Case("ipsr", 0x805)
3463      .Case("epsr", 0x806)
3464      .Case("iepsr", 0x807)
3465      .Case("msp", 0x808)
3466      .Case("psp", 0x809)
3467      .Case("primask", 0x810)
3468      .Case("basepri", 0x811)
3469      .Case("basepri_max", 0x812)
3470      .Case("faultmask", 0x813)
3471      .Case("control", 0x814)
3472      .Default(~0U);
3473
3474    if (FlagsVal == ~0U)
3475      return MatchOperand_NoMatch;
3476
3477    if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813)
3478      // basepri, basepri_max and faultmask only valid for V7m.
3479      return MatchOperand_NoMatch;
3480
3481    Parser.Lex(); // Eat identifier token.
3482    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3483    return MatchOperand_Success;
3484  }
3485
3486  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3487  size_t Start = 0, Next = Mask.find('_');
3488  StringRef Flags = "";
3489  std::string SpecReg = Mask.slice(Start, Next).lower();
3490  if (Next != StringRef::npos)
3491    Flags = Mask.slice(Next+1, Mask.size());
3492
3493  // FlagsVal contains the complete mask:
3494  // 3-0: Mask
3495  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3496  unsigned FlagsVal = 0;
3497
3498  if (SpecReg == "apsr") {
3499    FlagsVal = StringSwitch<unsigned>(Flags)
3500    .Case("nzcvq",  0x8) // same as CPSR_f
3501    .Case("g",      0x4) // same as CPSR_s
3502    .Case("nzcvqg", 0xc) // same as CPSR_fs
3503    .Default(~0U);
3504
3505    if (FlagsVal == ~0U) {
3506      if (!Flags.empty())
3507        return MatchOperand_NoMatch;
3508      else
3509        FlagsVal = 8; // No flag
3510    }
3511  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3512    // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
3513    if (Flags == "all" || Flags == "")
3514      Flags = "fc";
3515    for (int i = 0, e = Flags.size(); i != e; ++i) {
3516      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3517      .Case("c", 1)
3518      .Case("x", 2)
3519      .Case("s", 4)
3520      .Case("f", 8)
3521      .Default(~0U);
3522
3523      // If some specific flag is already set, it means that some letter is
3524      // present more than once, this is not acceptable.
3525      if (FlagsVal == ~0U || (FlagsVal & Flag))
3526        return MatchOperand_NoMatch;
3527      FlagsVal |= Flag;
3528    }
3529  } else // No match for special register.
3530    return MatchOperand_NoMatch;
3531
3532  // Special register without flags is NOT equivalent to "fc" flags.
3533  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3534  // two lines would enable gas compatibility at the expense of breaking
3535  // round-tripping.
3536  //
3537  // if (!FlagsVal)
3538  //  FlagsVal = 0x9;
3539
3540  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3541  if (SpecReg == "spsr")
3542    FlagsVal |= 16;
3543
3544  Parser.Lex(); // Eat identifier token.
3545  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3546  return MatchOperand_Success;
3547}
3548
3549ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3550parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3551            int Low, int High) {
3552  const AsmToken &Tok = Parser.getTok();
3553  if (Tok.isNot(AsmToken::Identifier)) {
3554    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3555    return MatchOperand_ParseFail;
3556  }
3557  StringRef ShiftName = Tok.getString();
3558  std::string LowerOp = Op.lower();
3559  std::string UpperOp = Op.upper();
3560  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3561    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3562    return MatchOperand_ParseFail;
3563  }
3564  Parser.Lex(); // Eat shift type token.
3565
3566  // There must be a '#' and a shift amount.
3567  if (Parser.getTok().isNot(AsmToken::Hash) &&
3568      Parser.getTok().isNot(AsmToken::Dollar)) {
3569    Error(Parser.getTok().getLoc(), "'#' expected");
3570    return MatchOperand_ParseFail;
3571  }
3572  Parser.Lex(); // Eat hash token.
3573
3574  const MCExpr *ShiftAmount;
3575  SMLoc Loc = Parser.getTok().getLoc();
3576  SMLoc EndLoc;
3577  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
3578    Error(Loc, "illegal expression");
3579    return MatchOperand_ParseFail;
3580  }
3581  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3582  if (!CE) {
3583    Error(Loc, "constant expression expected");
3584    return MatchOperand_ParseFail;
3585  }
3586  int Val = CE->getValue();
3587  if (Val < Low || Val > High) {
3588    Error(Loc, "immediate value out of range");
3589    return MatchOperand_ParseFail;
3590  }
3591
3592  Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
3593
3594  return MatchOperand_Success;
3595}
3596
3597ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3598parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3599  const AsmToken &Tok = Parser.getTok();
3600  SMLoc S = Tok.getLoc();
3601  if (Tok.isNot(AsmToken::Identifier)) {
3602    Error(S, "'be' or 'le' operand expected");
3603    return MatchOperand_ParseFail;
3604  }
3605  int Val = StringSwitch<int>(Tok.getString())
3606    .Case("be", 1)
3607    .Case("le", 0)
3608    .Default(-1);
3609  Parser.Lex(); // Eat the token.
3610
3611  if (Val == -1) {
3612    Error(S, "'be' or 'le' operand expected");
3613    return MatchOperand_ParseFail;
3614  }
3615  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3616                                                                  getContext()),
3617                                           S, Tok.getEndLoc()));
3618  return MatchOperand_Success;
3619}
3620
3621/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3622/// instructions. Legal values are:
3623///     lsl #n  'n' in [0,31]
3624///     asr #n  'n' in [1,32]
3625///             n == 32 encoded as n == 0.
3626ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3627parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3628  const AsmToken &Tok = Parser.getTok();
3629  SMLoc S = Tok.getLoc();
3630  if (Tok.isNot(AsmToken::Identifier)) {
3631    Error(S, "shift operator 'asr' or 'lsl' expected");
3632    return MatchOperand_ParseFail;
3633  }
3634  StringRef ShiftName = Tok.getString();
3635  bool isASR;
3636  if (ShiftName == "lsl" || ShiftName == "LSL")
3637    isASR = false;
3638  else if (ShiftName == "asr" || ShiftName == "ASR")
3639    isASR = true;
3640  else {
3641    Error(S, "shift operator 'asr' or 'lsl' expected");
3642    return MatchOperand_ParseFail;
3643  }
3644  Parser.Lex(); // Eat the operator.
3645
3646  // A '#' and a shift amount.
3647  if (Parser.getTok().isNot(AsmToken::Hash) &&
3648      Parser.getTok().isNot(AsmToken::Dollar)) {
3649    Error(Parser.getTok().getLoc(), "'#' expected");
3650    return MatchOperand_ParseFail;
3651  }
3652  Parser.Lex(); // Eat hash token.
3653  SMLoc ExLoc = Parser.getTok().getLoc();
3654
3655  const MCExpr *ShiftAmount;
3656  SMLoc EndLoc;
3657  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
3658    Error(ExLoc, "malformed shift expression");
3659    return MatchOperand_ParseFail;
3660  }
3661  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3662  if (!CE) {
3663    Error(ExLoc, "shift amount must be an immediate");
3664    return MatchOperand_ParseFail;
3665  }
3666
3667  int64_t Val = CE->getValue();
3668  if (isASR) {
3669    // Shift amount must be in [1,32]
3670    if (Val < 1 || Val > 32) {
3671      Error(ExLoc, "'asr' shift amount must be in range [1,32]");
3672      return MatchOperand_ParseFail;
3673    }
3674    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3675    if (isThumb() && Val == 32) {
3676      Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
3677      return MatchOperand_ParseFail;
3678    }
3679    if (Val == 32) Val = 0;
3680  } else {
3681    // Shift amount must be in [1,32]
3682    if (Val < 0 || Val > 31) {
3683      Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
3684      return MatchOperand_ParseFail;
3685    }
3686  }
3687
3688  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
3689
3690  return MatchOperand_Success;
3691}
3692
3693/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3694/// of instructions. Legal values are:
3695///     ror #n  'n' in {0, 8, 16, 24}
3696ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3697parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3698  const AsmToken &Tok = Parser.getTok();
3699  SMLoc S = Tok.getLoc();
3700  if (Tok.isNot(AsmToken::Identifier))
3701    return MatchOperand_NoMatch;
3702  StringRef ShiftName = Tok.getString();
3703  if (ShiftName != "ror" && ShiftName != "ROR")
3704    return MatchOperand_NoMatch;
3705  Parser.Lex(); // Eat the operator.
3706
3707  // A '#' and a rotate amount.
3708  if (Parser.getTok().isNot(AsmToken::Hash) &&
3709      Parser.getTok().isNot(AsmToken::Dollar)) {
3710    Error(Parser.getTok().getLoc(), "'#' expected");
3711    return MatchOperand_ParseFail;
3712  }
3713  Parser.Lex(); // Eat hash token.
3714  SMLoc ExLoc = Parser.getTok().getLoc();
3715
3716  const MCExpr *ShiftAmount;
3717  SMLoc EndLoc;
3718  if (getParser().parseExpression(ShiftAmount, EndLoc)) {
3719    Error(ExLoc, "malformed rotate expression");
3720    return MatchOperand_ParseFail;
3721  }
3722  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3723  if (!CE) {
3724    Error(ExLoc, "rotate amount must be an immediate");
3725    return MatchOperand_ParseFail;
3726  }
3727
3728  int64_t Val = CE->getValue();
3729  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3730  // normally, zero is represented in asm by omitting the rotate operand
3731  // entirely.
3732  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3733    Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
3734    return MatchOperand_ParseFail;
3735  }
3736
3737  Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
3738
3739  return MatchOperand_Success;
3740}
3741
3742ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3743parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3744  SMLoc S = Parser.getTok().getLoc();
3745  // The bitfield descriptor is really two operands, the LSB and the width.
3746  if (Parser.getTok().isNot(AsmToken::Hash) &&
3747      Parser.getTok().isNot(AsmToken::Dollar)) {
3748    Error(Parser.getTok().getLoc(), "'#' expected");
3749    return MatchOperand_ParseFail;
3750  }
3751  Parser.Lex(); // Eat hash token.
3752
3753  const MCExpr *LSBExpr;
3754  SMLoc E = Parser.getTok().getLoc();
3755  if (getParser().parseExpression(LSBExpr)) {
3756    Error(E, "malformed immediate expression");
3757    return MatchOperand_ParseFail;
3758  }
3759  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3760  if (!CE) {
3761    Error(E, "'lsb' operand must be an immediate");
3762    return MatchOperand_ParseFail;
3763  }
3764
3765  int64_t LSB = CE->getValue();
3766  // The LSB must be in the range [0,31]
3767  if (LSB < 0 || LSB > 31) {
3768    Error(E, "'lsb' operand must be in the range [0,31]");
3769    return MatchOperand_ParseFail;
3770  }
3771  E = Parser.getTok().getLoc();
3772
3773  // Expect another immediate operand.
3774  if (Parser.getTok().isNot(AsmToken::Comma)) {
3775    Error(Parser.getTok().getLoc(), "too few operands");
3776    return MatchOperand_ParseFail;
3777  }
3778  Parser.Lex(); // Eat hash token.
3779  if (Parser.getTok().isNot(AsmToken::Hash) &&
3780      Parser.getTok().isNot(AsmToken::Dollar)) {
3781    Error(Parser.getTok().getLoc(), "'#' expected");
3782    return MatchOperand_ParseFail;
3783  }
3784  Parser.Lex(); // Eat hash token.
3785
3786  const MCExpr *WidthExpr;
3787  SMLoc EndLoc;
3788  if (getParser().parseExpression(WidthExpr, EndLoc)) {
3789    Error(E, "malformed immediate expression");
3790    return MatchOperand_ParseFail;
3791  }
3792  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3793  if (!CE) {
3794    Error(E, "'width' operand must be an immediate");
3795    return MatchOperand_ParseFail;
3796  }
3797
3798  int64_t Width = CE->getValue();
3799  // The LSB must be in the range [1,32-lsb]
3800  if (Width < 1 || Width > 32 - LSB) {
3801    Error(E, "'width' operand must be in the range [1,32-lsb]");
3802    return MatchOperand_ParseFail;
3803  }
3804
3805  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
3806
3807  return MatchOperand_Success;
3808}
3809
3810ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3811parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3812  // Check for a post-index addressing register operand. Specifically:
3813  // postidx_reg := '+' register {, shift}
3814  //              | '-' register {, shift}
3815  //              | register {, shift}
3816
3817  // This method must return MatchOperand_NoMatch without consuming any tokens
3818  // in the case where there is no match, as other alternatives take other
3819  // parse methods.
3820  AsmToken Tok = Parser.getTok();
3821  SMLoc S = Tok.getLoc();
3822  bool haveEaten = false;
3823  bool isAdd = true;
3824  if (Tok.is(AsmToken::Plus)) {
3825    Parser.Lex(); // Eat the '+' token.
3826    haveEaten = true;
3827  } else if (Tok.is(AsmToken::Minus)) {
3828    Parser.Lex(); // Eat the '-' token.
3829    isAdd = false;
3830    haveEaten = true;
3831  }
3832
3833  SMLoc E = Parser.getTok().getEndLoc();
3834  int Reg = tryParseRegister();
3835  if (Reg == -1) {
3836    if (!haveEaten)
3837      return MatchOperand_NoMatch;
3838    Error(Parser.getTok().getLoc(), "register expected");
3839    return MatchOperand_ParseFail;
3840  }
3841
3842  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3843  unsigned ShiftImm = 0;
3844  if (Parser.getTok().is(AsmToken::Comma)) {
3845    Parser.Lex(); // Eat the ','.
3846    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3847      return MatchOperand_ParseFail;
3848
3849    // FIXME: Only approximates end...may include intervening whitespace.
3850    E = Parser.getTok().getLoc();
3851  }
3852
3853  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3854                                                  ShiftImm, S, E));
3855
3856  return MatchOperand_Success;
3857}
3858
3859ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3860parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3861  // Check for a post-index addressing register operand. Specifically:
3862  // am3offset := '+' register
3863  //              | '-' register
3864  //              | register
3865  //              | # imm
3866  //              | # + imm
3867  //              | # - imm
3868
3869  // This method must return MatchOperand_NoMatch without consuming any tokens
3870  // in the case where there is no match, as other alternatives take other
3871  // parse methods.
3872  AsmToken Tok = Parser.getTok();
3873  SMLoc S = Tok.getLoc();
3874
3875  // Do immediates first, as we always parse those if we have a '#'.
3876  if (Parser.getTok().is(AsmToken::Hash) ||
3877      Parser.getTok().is(AsmToken::Dollar)) {
3878    Parser.Lex(); // Eat the '#'.
3879    // Explicitly look for a '-', as we need to encode negative zero
3880    // differently.
3881    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3882    const MCExpr *Offset;
3883    SMLoc E;
3884    if (getParser().parseExpression(Offset, E))
3885      return MatchOperand_ParseFail;
3886    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3887    if (!CE) {
3888      Error(S, "constant expression expected");
3889      return MatchOperand_ParseFail;
3890    }
3891    // Negative zero is encoded as the flag value INT32_MIN.
3892    int32_t Val = CE->getValue();
3893    if (isNegative && Val == 0)
3894      Val = INT32_MIN;
3895
3896    Operands.push_back(
3897      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3898
3899    return MatchOperand_Success;
3900  }
3901
3902
3903  bool haveEaten = false;
3904  bool isAdd = true;
3905  if (Tok.is(AsmToken::Plus)) {
3906    Parser.Lex(); // Eat the '+' token.
3907    haveEaten = true;
3908  } else if (Tok.is(AsmToken::Minus)) {
3909    Parser.Lex(); // Eat the '-' token.
3910    isAdd = false;
3911    haveEaten = true;
3912  }
3913
3914  Tok = Parser.getTok();
3915  int Reg = tryParseRegister();
3916  if (Reg == -1) {
3917    if (!haveEaten)
3918      return MatchOperand_NoMatch;
3919    Error(Tok.getLoc(), "register expected");
3920    return MatchOperand_ParseFail;
3921  }
3922
3923  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3924                                                  0, S, Tok.getEndLoc()));
3925
3926  return MatchOperand_Success;
3927}
3928
3929/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3930/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3931/// when they refer multiple MIOperands inside a single one.
3932void ARMAsmParser::
3933cvtT2LdrdPre(MCInst &Inst,
3934             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3935  // Rt, Rt2
3936  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3937  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3938  // Create a writeback register dummy placeholder.
3939  Inst.addOperand(MCOperand::CreateReg(0));
3940  // addr
3941  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3942  // pred
3943  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3944}
3945
3946/// cvtT2StrdPre - Convert parsed operands to MCInst.
3947/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3948/// when they refer multiple MIOperands inside a single one.
3949void ARMAsmParser::
3950cvtT2StrdPre(MCInst &Inst,
3951             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3952  // Create a writeback register dummy placeholder.
3953  Inst.addOperand(MCOperand::CreateReg(0));
3954  // Rt, Rt2
3955  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3956  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3957  // addr
3958  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3959  // pred
3960  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3961}
3962
3963/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3964/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3965/// when they refer multiple MIOperands inside a single one.
3966void ARMAsmParser::
3967cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
3968                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3969  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3970
3971  // Create a writeback register dummy placeholder.
3972  Inst.addOperand(MCOperand::CreateImm(0));
3973
3974  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3975  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3976}
3977
3978/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3979/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3980/// when they refer multiple MIOperands inside a single one.
3981void ARMAsmParser::
3982cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
3983                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3984  // Create a writeback register dummy placeholder.
3985  Inst.addOperand(MCOperand::CreateImm(0));
3986  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3987  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3988  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3989}
3990
3991/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3992/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3993/// when they refer multiple MIOperands inside a single one.
3994void ARMAsmParser::
3995cvtLdWriteBackRegAddrMode2(MCInst &Inst,
3996                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3997  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3998
3999  // Create a writeback register dummy placeholder.
4000  Inst.addOperand(MCOperand::CreateImm(0));
4001
4002  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
4003  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4004}
4005
4006/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
4007/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4008/// when they refer multiple MIOperands inside a single one.
4009void ARMAsmParser::
4010cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
4011                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4012  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4013
4014  // Create a writeback register dummy placeholder.
4015  Inst.addOperand(MCOperand::CreateImm(0));
4016
4017  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
4018  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4019}
4020
4021
4022/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
4023/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4024/// when they refer multiple MIOperands inside a single one.
4025void ARMAsmParser::
4026cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
4027                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4028  // Create a writeback register dummy placeholder.
4029  Inst.addOperand(MCOperand::CreateImm(0));
4030  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4031  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
4032  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4033}
4034
4035/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
4036/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4037/// when they refer multiple MIOperands inside a single one.
4038void ARMAsmParser::
4039cvtStWriteBackRegAddrMode2(MCInst &Inst,
4040                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4041  // Create a writeback register dummy placeholder.
4042  Inst.addOperand(MCOperand::CreateImm(0));
4043  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4044  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
4045  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4046}
4047
4048/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4049/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4050/// when they refer multiple MIOperands inside a single one.
4051void ARMAsmParser::
4052cvtStWriteBackRegAddrMode3(MCInst &Inst,
4053                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4054  // Create a writeback register dummy placeholder.
4055  Inst.addOperand(MCOperand::CreateImm(0));
4056  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4057  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4058  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4059}
4060
4061/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
4062/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4063/// when they refer multiple MIOperands inside a single one.
4064void ARMAsmParser::
4065cvtLdExtTWriteBackImm(MCInst &Inst,
4066                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4067  // Rt
4068  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4069  // Create a writeback register dummy placeholder.
4070  Inst.addOperand(MCOperand::CreateImm(0));
4071  // addr
4072  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4073  // offset
4074  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
4075  // pred
4076  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4077}
4078
4079/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
4080/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4081/// when they refer multiple MIOperands inside a single one.
4082void ARMAsmParser::
4083cvtLdExtTWriteBackReg(MCInst &Inst,
4084                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4085  // Rt
4086  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4087  // Create a writeback register dummy placeholder.
4088  Inst.addOperand(MCOperand::CreateImm(0));
4089  // addr
4090  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4091  // offset
4092  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
4093  // pred
4094  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4095}
4096
4097/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
4098/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4099/// when they refer multiple MIOperands inside a single one.
4100void ARMAsmParser::
4101cvtStExtTWriteBackImm(MCInst &Inst,
4102                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4103  // Create a writeback register dummy placeholder.
4104  Inst.addOperand(MCOperand::CreateImm(0));
4105  // Rt
4106  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4107  // addr
4108  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4109  // offset
4110  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
4111  // pred
4112  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4113}
4114
4115/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
4116/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4117/// when they refer multiple MIOperands inside a single one.
4118void ARMAsmParser::
4119cvtStExtTWriteBackReg(MCInst &Inst,
4120                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4121  // Create a writeback register dummy placeholder.
4122  Inst.addOperand(MCOperand::CreateImm(0));
4123  // Rt
4124  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4125  // addr
4126  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4127  // offset
4128  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
4129  // pred
4130  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4131}
4132
4133/// cvtLdrdPre - Convert parsed operands to MCInst.
4134/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4135/// when they refer multiple MIOperands inside a single one.
4136void ARMAsmParser::
4137cvtLdrdPre(MCInst &Inst,
4138           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4139  // Rt, Rt2
4140  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4141  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4142  // Create a writeback register dummy placeholder.
4143  Inst.addOperand(MCOperand::CreateImm(0));
4144  // addr
4145  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4146  // pred
4147  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4148}
4149
4150/// cvtStrdPre - Convert parsed operands to MCInst.
4151/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4152/// when they refer multiple MIOperands inside a single one.
4153void ARMAsmParser::
4154cvtStrdPre(MCInst &Inst,
4155           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4156  // Create a writeback register dummy placeholder.
4157  Inst.addOperand(MCOperand::CreateImm(0));
4158  // Rt, Rt2
4159  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4160  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4161  // addr
4162  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4163  // pred
4164  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4165}
4166
4167/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4168/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4169/// when they refer multiple MIOperands inside a single one.
4170void ARMAsmParser::
4171cvtLdWriteBackRegAddrMode3(MCInst &Inst,
4172                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4173  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4174  // Create a writeback register dummy placeholder.
4175  Inst.addOperand(MCOperand::CreateImm(0));
4176  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4177  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4178}
4179
4180/// cvtThumbMultiply - Convert parsed operands to MCInst.
4181/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4182/// when they refer multiple MIOperands inside a single one.
4183void ARMAsmParser::
4184cvtThumbMultiply(MCInst &Inst,
4185           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4186  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4187  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4188  // If we have a three-operand form, make sure to set Rn to be the operand
4189  // that isn't the same as Rd.
4190  unsigned RegOp = 4;
4191  if (Operands.size() == 6 &&
4192      ((ARMOperand*)Operands[4])->getReg() ==
4193        ((ARMOperand*)Operands[3])->getReg())
4194    RegOp = 5;
4195  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4196  Inst.addOperand(Inst.getOperand(0));
4197  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4198}
4199
4200void ARMAsmParser::
4201cvtVLDwbFixed(MCInst &Inst,
4202              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4203  // Vd
4204  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4205  // Create a writeback register dummy placeholder.
4206  Inst.addOperand(MCOperand::CreateImm(0));
4207  // Vn
4208  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4209  // pred
4210  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4211}
4212
4213void ARMAsmParser::
4214cvtVLDwbRegister(MCInst &Inst,
4215                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4216  // Vd
4217  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4218  // Create a writeback register dummy placeholder.
4219  Inst.addOperand(MCOperand::CreateImm(0));
4220  // Vn
4221  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4222  // Vm
4223  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4224  // pred
4225  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4226}
4227
4228void ARMAsmParser::
4229cvtVSTwbFixed(MCInst &Inst,
4230              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4231  // Create a writeback register dummy placeholder.
4232  Inst.addOperand(MCOperand::CreateImm(0));
4233  // Vn
4234  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4235  // Vt
4236  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4237  // pred
4238  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4239}
4240
4241void ARMAsmParser::
4242cvtVSTwbRegister(MCInst &Inst,
4243                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4244  // Create a writeback register dummy placeholder.
4245  Inst.addOperand(MCOperand::CreateImm(0));
4246  // Vn
4247  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4248  // Vm
4249  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4250  // Vt
4251  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4252  // pred
4253  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4254}
4255
4256/// Parse an ARM memory expression, return false if successful else return true
4257/// or an error.  The first token must be a '[' when called.
4258bool ARMAsmParser::
4259parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4260  SMLoc S, E;
4261  assert(Parser.getTok().is(AsmToken::LBrac) &&
4262         "Token is not a Left Bracket");
4263  S = Parser.getTok().getLoc();
4264  Parser.Lex(); // Eat left bracket token.
4265
4266  const AsmToken &BaseRegTok = Parser.getTok();
4267  int BaseRegNum = tryParseRegister();
4268  if (BaseRegNum == -1)
4269    return Error(BaseRegTok.getLoc(), "register expected");
4270
4271  // The next token must either be a comma, a colon or a closing bracket.
4272  const AsmToken &Tok = Parser.getTok();
4273  if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
4274      !Tok.is(AsmToken::RBrac))
4275    return Error(Tok.getLoc(), "malformed memory operand");
4276
4277  if (Tok.is(AsmToken::RBrac)) {
4278    E = Tok.getEndLoc();
4279    Parser.Lex(); // Eat right bracket token.
4280
4281    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4282                                             0, 0, false, S, E));
4283
4284    // If there's a pre-indexing writeback marker, '!', just add it as a token
4285    // operand. It's rather odd, but syntactically valid.
4286    if (Parser.getTok().is(AsmToken::Exclaim)) {
4287      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4288      Parser.Lex(); // Eat the '!'.
4289    }
4290
4291    return false;
4292  }
4293
4294  assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
4295         "Lost colon or comma in memory operand?!");
4296  if (Tok.is(AsmToken::Comma)) {
4297    Parser.Lex(); // Eat the comma.
4298  }
4299
4300  // If we have a ':', it's an alignment specifier.
4301  if (Parser.getTok().is(AsmToken::Colon)) {
4302    Parser.Lex(); // Eat the ':'.
4303    E = Parser.getTok().getLoc();
4304
4305    const MCExpr *Expr;
4306    if (getParser().parseExpression(Expr))
4307     return true;
4308
4309    // The expression has to be a constant. Memory references with relocations
4310    // don't come through here, as they use the <label> forms of the relevant
4311    // instructions.
4312    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4313    if (!CE)
4314      return Error (E, "constant expression expected");
4315
4316    unsigned Align = 0;
4317    switch (CE->getValue()) {
4318    default:
4319      return Error(E,
4320                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4321    case 16:  Align = 2; break;
4322    case 32:  Align = 4; break;
4323    case 64:  Align = 8; break;
4324    case 128: Align = 16; break;
4325    case 256: Align = 32; break;
4326    }
4327
4328    // Now we should have the closing ']'
4329    if (Parser.getTok().isNot(AsmToken::RBrac))
4330      return Error(Parser.getTok().getLoc(), "']' expected");
4331    E = Parser.getTok().getEndLoc();
4332    Parser.Lex(); // Eat right bracket token.
4333
4334    // Don't worry about range checking the value here. That's handled by
4335    // the is*() predicates.
4336    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4337                                             ARM_AM::no_shift, 0, Align,
4338                                             false, S, E));
4339
4340    // If there's a pre-indexing writeback marker, '!', just add it as a token
4341    // operand.
4342    if (Parser.getTok().is(AsmToken::Exclaim)) {
4343      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4344      Parser.Lex(); // Eat the '!'.
4345    }
4346
4347    return false;
4348  }
4349
4350  // If we have a '#', it's an immediate offset, else assume it's a register
4351  // offset. Be friendly and also accept a plain integer (without a leading
4352  // hash) for gas compatibility.
4353  if (Parser.getTok().is(AsmToken::Hash) ||
4354      Parser.getTok().is(AsmToken::Dollar) ||
4355      Parser.getTok().is(AsmToken::Integer)) {
4356    if (Parser.getTok().isNot(AsmToken::Integer))
4357      Parser.Lex(); // Eat the '#'.
4358    E = Parser.getTok().getLoc();
4359
4360    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4361    const MCExpr *Offset;
4362    if (getParser().parseExpression(Offset))
4363     return true;
4364
4365    // The expression has to be a constant. Memory references with relocations
4366    // don't come through here, as they use the <label> forms of the relevant
4367    // instructions.
4368    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4369    if (!CE)
4370      return Error (E, "constant expression expected");
4371
4372    // If the constant was #-0, represent it as INT32_MIN.
4373    int32_t Val = CE->getValue();
4374    if (isNegative && Val == 0)
4375      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4376
4377    // Now we should have the closing ']'
4378    if (Parser.getTok().isNot(AsmToken::RBrac))
4379      return Error(Parser.getTok().getLoc(), "']' expected");
4380    E = Parser.getTok().getEndLoc();
4381    Parser.Lex(); // Eat right bracket token.
4382
4383    // Don't worry about range checking the value here. That's handled by
4384    // the is*() predicates.
4385    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4386                                             ARM_AM::no_shift, 0, 0,
4387                                             false, S, E));
4388
4389    // If there's a pre-indexing writeback marker, '!', just add it as a token
4390    // operand.
4391    if (Parser.getTok().is(AsmToken::Exclaim)) {
4392      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4393      Parser.Lex(); // Eat the '!'.
4394    }
4395
4396    return false;
4397  }
4398
4399  // The register offset is optionally preceded by a '+' or '-'
4400  bool isNegative = false;
4401  if (Parser.getTok().is(AsmToken::Minus)) {
4402    isNegative = true;
4403    Parser.Lex(); // Eat the '-'.
4404  } else if (Parser.getTok().is(AsmToken::Plus)) {
4405    // Nothing to do.
4406    Parser.Lex(); // Eat the '+'.
4407  }
4408
4409  E = Parser.getTok().getLoc();
4410  int OffsetRegNum = tryParseRegister();
4411  if (OffsetRegNum == -1)
4412    return Error(E, "register expected");
4413
4414  // If there's a shift operator, handle it.
4415  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4416  unsigned ShiftImm = 0;
4417  if (Parser.getTok().is(AsmToken::Comma)) {
4418    Parser.Lex(); // Eat the ','.
4419    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4420      return true;
4421  }
4422
4423  // Now we should have the closing ']'
4424  if (Parser.getTok().isNot(AsmToken::RBrac))
4425    return Error(Parser.getTok().getLoc(), "']' expected");
4426  E = Parser.getTok().getEndLoc();
4427  Parser.Lex(); // Eat right bracket token.
4428
4429  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4430                                           ShiftType, ShiftImm, 0, isNegative,
4431                                           S, E));
4432
4433  // If there's a pre-indexing writeback marker, '!', just add it as a token
4434  // operand.
4435  if (Parser.getTok().is(AsmToken::Exclaim)) {
4436    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4437    Parser.Lex(); // Eat the '!'.
4438  }
4439
4440  return false;
4441}
4442
4443/// parseMemRegOffsetShift - one of these two:
4444///   ( lsl | lsr | asr | ror ) , # shift_amount
4445///   rrx
4446/// return true if it parses a shift otherwise it returns false.
4447bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4448                                          unsigned &Amount) {
4449  SMLoc Loc = Parser.getTok().getLoc();
4450  const AsmToken &Tok = Parser.getTok();
4451  if (Tok.isNot(AsmToken::Identifier))
4452    return true;
4453  StringRef ShiftName = Tok.getString();
4454  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4455      ShiftName == "asl" || ShiftName == "ASL")
4456    St = ARM_AM::lsl;
4457  else if (ShiftName == "lsr" || ShiftName == "LSR")
4458    St = ARM_AM::lsr;
4459  else if (ShiftName == "asr" || ShiftName == "ASR")
4460    St = ARM_AM::asr;
4461  else if (ShiftName == "ror" || ShiftName == "ROR")
4462    St = ARM_AM::ror;
4463  else if (ShiftName == "rrx" || ShiftName == "RRX")
4464    St = ARM_AM::rrx;
4465  else
4466    return Error(Loc, "illegal shift operator");
4467  Parser.Lex(); // Eat shift type token.
4468
4469  // rrx stands alone.
4470  Amount = 0;
4471  if (St != ARM_AM::rrx) {
4472    Loc = Parser.getTok().getLoc();
4473    // A '#' and a shift amount.
4474    const AsmToken &HashTok = Parser.getTok();
4475    if (HashTok.isNot(AsmToken::Hash) &&
4476        HashTok.isNot(AsmToken::Dollar))
4477      return Error(HashTok.getLoc(), "'#' expected");
4478    Parser.Lex(); // Eat hash token.
4479
4480    const MCExpr *Expr;
4481    if (getParser().parseExpression(Expr))
4482      return true;
4483    // Range check the immediate.
4484    // lsl, ror: 0 <= imm <= 31
4485    // lsr, asr: 0 <= imm <= 32
4486    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4487    if (!CE)
4488      return Error(Loc, "shift amount must be an immediate");
4489    int64_t Imm = CE->getValue();
4490    if (Imm < 0 ||
4491        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4492        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4493      return Error(Loc, "immediate shift value out of range");
4494    // If <ShiftTy> #0, turn it into a no_shift.
4495    if (Imm == 0)
4496      St = ARM_AM::lsl;
4497    // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
4498    if (Imm == 32)
4499      Imm = 0;
4500    Amount = Imm;
4501  }
4502
4503  return false;
4504}
4505
4506/// parseFPImm - A floating point immediate expression operand.
4507ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4508parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4509  // Anything that can accept a floating point constant as an operand
4510  // needs to go through here, as the regular parseExpression is
4511  // integer only.
4512  //
4513  // This routine still creates a generic Immediate operand, containing
4514  // a bitcast of the 64-bit floating point value. The various operands
4515  // that accept floats can check whether the value is valid for them
4516  // via the standard is*() predicates.
4517
4518  SMLoc S = Parser.getTok().getLoc();
4519
4520  if (Parser.getTok().isNot(AsmToken::Hash) &&
4521      Parser.getTok().isNot(AsmToken::Dollar))
4522    return MatchOperand_NoMatch;
4523
4524  // Disambiguate the VMOV forms that can accept an FP immediate.
4525  // vmov.f32 <sreg>, #imm
4526  // vmov.f64 <dreg>, #imm
4527  // vmov.f32 <dreg>, #imm  @ vector f32x2
4528  // vmov.f32 <qreg>, #imm  @ vector f32x4
4529  //
4530  // There are also the NEON VMOV instructions which expect an
4531  // integer constant. Make sure we don't try to parse an FPImm
4532  // for these:
4533  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4534  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4535  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4536                           TyOp->getToken() != ".f64"))
4537    return MatchOperand_NoMatch;
4538
4539  Parser.Lex(); // Eat the '#'.
4540
4541  // Handle negation, as that still comes through as a separate token.
4542  bool isNegative = false;
4543  if (Parser.getTok().is(AsmToken::Minus)) {
4544    isNegative = true;
4545    Parser.Lex();
4546  }
4547  const AsmToken &Tok = Parser.getTok();
4548  SMLoc Loc = Tok.getLoc();
4549  if (Tok.is(AsmToken::Real)) {
4550    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4551    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4552    // If we had a '-' in front, toggle the sign bit.
4553    IntVal ^= (uint64_t)isNegative << 31;
4554    Parser.Lex(); // Eat the token.
4555    Operands.push_back(ARMOperand::CreateImm(
4556          MCConstantExpr::Create(IntVal, getContext()),
4557          S, Parser.getTok().getLoc()));
4558    return MatchOperand_Success;
4559  }
4560  // Also handle plain integers. Instructions which allow floating point
4561  // immediates also allow a raw encoded 8-bit value.
4562  if (Tok.is(AsmToken::Integer)) {
4563    int64_t Val = Tok.getIntVal();
4564    Parser.Lex(); // Eat the token.
4565    if (Val > 255 || Val < 0) {
4566      Error(Loc, "encoded floating point value out of range");
4567      return MatchOperand_ParseFail;
4568    }
4569    double RealVal = ARM_AM::getFPImmFloat(Val);
4570    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4571    Operands.push_back(ARMOperand::CreateImm(
4572        MCConstantExpr::Create(Val, getContext()), S,
4573        Parser.getTok().getLoc()));
4574    return MatchOperand_Success;
4575  }
4576
4577  Error(Loc, "invalid floating point immediate");
4578  return MatchOperand_ParseFail;
4579}
4580
4581/// Parse a arm instruction operand.  For now this parses the operand regardless
4582/// of the mnemonic.
4583bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4584                                StringRef Mnemonic) {
4585  SMLoc S, E;
4586
4587  // Check if the current operand has a custom associated parser, if so, try to
4588  // custom parse the operand, or fallback to the general approach.
4589  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4590  if (ResTy == MatchOperand_Success)
4591    return false;
4592  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4593  // there was a match, but an error occurred, in which case, just return that
4594  // the operand parsing failed.
4595  if (ResTy == MatchOperand_ParseFail)
4596    return true;
4597
4598  switch (getLexer().getKind()) {
4599  default:
4600    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4601    return true;
4602  case AsmToken::Identifier: {
4603    // If we've seen a branch mnemonic, the next operand must be a label.  This
4604    // is true even if the label is a register name.  So "br r1" means branch to
4605    // label "r1".
4606    bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
4607    if (!ExpectLabel) {
4608      if (!tryParseRegisterWithWriteBack(Operands))
4609        return false;
4610      int Res = tryParseShiftRegister(Operands);
4611      if (Res == 0) // success
4612        return false;
4613      else if (Res == -1) // irrecoverable error
4614        return true;
4615      // If this is VMRS, check for the apsr_nzcv operand.
4616      if (Mnemonic == "vmrs" &&
4617          Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4618        S = Parser.getTok().getLoc();
4619        Parser.Lex();
4620        Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4621        return false;
4622      }
4623    }
4624
4625    // Fall though for the Identifier case that is not a register or a
4626    // special name.
4627  }
4628  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4629  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4630  case AsmToken::String:  // quoted label names.
4631  case AsmToken::Dot: {   // . as a branch target
4632    // This was not a register so parse other operands that start with an
4633    // identifier (like labels) as expressions and create them as immediates.
4634    const MCExpr *IdVal;
4635    S = Parser.getTok().getLoc();
4636    if (getParser().parseExpression(IdVal))
4637      return true;
4638    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4639    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4640    return false;
4641  }
4642  case AsmToken::LBrac:
4643    return parseMemory(Operands);
4644  case AsmToken::LCurly:
4645    return parseRegisterList(Operands);
4646  case AsmToken::Dollar:
4647  case AsmToken::Hash: {
4648    // #42 -> immediate.
4649    S = Parser.getTok().getLoc();
4650    Parser.Lex();
4651
4652    if (Parser.getTok().isNot(AsmToken::Colon)) {
4653      bool isNegative = Parser.getTok().is(AsmToken::Minus);
4654      const MCExpr *ImmVal;
4655      if (getParser().parseExpression(ImmVal))
4656        return true;
4657      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4658      if (CE) {
4659        int32_t Val = CE->getValue();
4660        if (isNegative && Val == 0)
4661          ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4662      }
4663      E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4664      Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4665
4666      // There can be a trailing '!' on operands that we want as a separate
4667      // '!' Token operand. Handle that here. For example, the compatibilty
4668      // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
4669      if (Parser.getTok().is(AsmToken::Exclaim)) {
4670        Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
4671                                                   Parser.getTok().getLoc()));
4672        Parser.Lex(); // Eat exclaim token
4673      }
4674      return false;
4675    }
4676    // w/ a ':' after the '#', it's just like a plain ':'.
4677    // FALLTHROUGH
4678  }
4679  case AsmToken::Colon: {
4680    // ":lower16:" and ":upper16:" expression prefixes
4681    // FIXME: Check it's an expression prefix,
4682    // e.g. (FOO - :lower16:BAR) isn't legal.
4683    ARMMCExpr::VariantKind RefKind;
4684    if (parsePrefix(RefKind))
4685      return true;
4686
4687    const MCExpr *SubExprVal;
4688    if (getParser().parseExpression(SubExprVal))
4689      return true;
4690
4691    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4692                                              getContext());
4693    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4694    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4695    return false;
4696  }
4697  }
4698}
4699
4700// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4701//  :lower16: and :upper16:.
4702bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4703  RefKind = ARMMCExpr::VK_ARM_None;
4704
4705  // :lower16: and :upper16: modifiers
4706  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4707  Parser.Lex(); // Eat ':'
4708
4709  if (getLexer().isNot(AsmToken::Identifier)) {
4710    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4711    return true;
4712  }
4713
4714  StringRef IDVal = Parser.getTok().getIdentifier();
4715  if (IDVal == "lower16") {
4716    RefKind = ARMMCExpr::VK_ARM_LO16;
4717  } else if (IDVal == "upper16") {
4718    RefKind = ARMMCExpr::VK_ARM_HI16;
4719  } else {
4720    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4721    return true;
4722  }
4723  Parser.Lex();
4724
4725  if (getLexer().isNot(AsmToken::Colon)) {
4726    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4727    return true;
4728  }
4729  Parser.Lex(); // Eat the last ':'
4730  return false;
4731}
4732
4733/// \brief Given a mnemonic, split out possible predication code and carry
4734/// setting letters to form a canonical mnemonic and flags.
4735//
4736// FIXME: Would be nice to autogen this.
4737// FIXME: This is a bit of a maze of special cases.
4738StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4739                                      unsigned &PredicationCode,
4740                                      bool &CarrySetting,
4741                                      unsigned &ProcessorIMod,
4742                                      StringRef &ITMask) {
4743  PredicationCode = ARMCC::AL;
4744  CarrySetting = false;
4745  ProcessorIMod = 0;
4746
4747  // Ignore some mnemonics we know aren't predicated forms.
4748  //
4749  // FIXME: Would be nice to autogen this.
4750  if ((Mnemonic == "movs" && isThumb()) ||
4751      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4752      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4753      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4754      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4755      Mnemonic == "vaclt" || Mnemonic == "vacle"  ||
4756      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4757      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4758      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4759      Mnemonic == "fmuls")
4760    return Mnemonic;
4761
4762  // First, split out any predication code. Ignore mnemonics we know aren't
4763  // predicated but do have a carry-set and so weren't caught above.
4764  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4765      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4766      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4767      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4768    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4769      .Case("eq", ARMCC::EQ)
4770      .Case("ne", ARMCC::NE)
4771      .Case("hs", ARMCC::HS)
4772      .Case("cs", ARMCC::HS)
4773      .Case("lo", ARMCC::LO)
4774      .Case("cc", ARMCC::LO)
4775      .Case("mi", ARMCC::MI)
4776      .Case("pl", ARMCC::PL)
4777      .Case("vs", ARMCC::VS)
4778      .Case("vc", ARMCC::VC)
4779      .Case("hi", ARMCC::HI)
4780      .Case("ls", ARMCC::LS)
4781      .Case("ge", ARMCC::GE)
4782      .Case("lt", ARMCC::LT)
4783      .Case("gt", ARMCC::GT)
4784      .Case("le", ARMCC::LE)
4785      .Case("al", ARMCC::AL)
4786      .Default(~0U);
4787    if (CC != ~0U) {
4788      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4789      PredicationCode = CC;
4790    }
4791  }
4792
4793  // Next, determine if we have a carry setting bit. We explicitly ignore all
4794  // the instructions we know end in 's'.
4795  if (Mnemonic.endswith("s") &&
4796      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4797        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4798        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4799        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4800        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4801        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4802        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4803        Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4804        Mnemonic == "vfms" || Mnemonic == "vfnms" ||
4805        (Mnemonic == "movs" && isThumb()))) {
4806    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4807    CarrySetting = true;
4808  }
4809
4810  // The "cps" instruction can have a interrupt mode operand which is glued into
4811  // the mnemonic. Check if this is the case, split it and parse the imod op
4812  if (Mnemonic.startswith("cps")) {
4813    // Split out any imod code.
4814    unsigned IMod =
4815      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4816      .Case("ie", ARM_PROC::IE)
4817      .Case("id", ARM_PROC::ID)
4818      .Default(~0U);
4819    if (IMod != ~0U) {
4820      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4821      ProcessorIMod = IMod;
4822    }
4823  }
4824
4825  // The "it" instruction has the condition mask on the end of the mnemonic.
4826  if (Mnemonic.startswith("it")) {
4827    ITMask = Mnemonic.slice(2, Mnemonic.size());
4828    Mnemonic = Mnemonic.slice(0, 2);
4829  }
4830
4831  return Mnemonic;
4832}
4833
4834/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4835/// inclusion of carry set or predication code operands.
4836//
4837// FIXME: It would be nice to autogen this.
4838void ARMAsmParser::
4839getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4840                      bool &CanAcceptPredicationCode) {
4841  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4842      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4843      Mnemonic == "add" || Mnemonic == "adc" ||
4844      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4845      Mnemonic == "orr" || Mnemonic == "mvn" ||
4846      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4847      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4848      Mnemonic == "vfm" || Mnemonic == "vfnm" ||
4849      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4850                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4851                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4852    CanAcceptCarrySet = true;
4853  } else
4854    CanAcceptCarrySet = false;
4855
4856  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4857      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4858      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4859      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4860      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4861      (Mnemonic == "clrex" && !isThumb()) ||
4862      (Mnemonic == "nop" && isThumbOne()) ||
4863      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4864        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4865        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4866      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4867       !isThumb()) ||
4868      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4869    CanAcceptPredicationCode = false;
4870  } else
4871    CanAcceptPredicationCode = true;
4872
4873  if (isThumb()) {
4874    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4875        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4876      CanAcceptPredicationCode = false;
4877  }
4878}
4879
4880bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4881                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4882  // FIXME: This is all horribly hacky. We really need a better way to deal
4883  // with optional operands like this in the matcher table.
4884
4885  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4886  // another does not. Specifically, the MOVW instruction does not. So we
4887  // special case it here and remove the defaulted (non-setting) cc_out
4888  // operand if that's the instruction we're trying to match.
4889  //
4890  // We do this as post-processing of the explicit operands rather than just
4891  // conditionally adding the cc_out in the first place because we need
4892  // to check the type of the parsed immediate operand.
4893  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4894      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4895      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4896      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4897    return true;
4898
4899  // Register-register 'add' for thumb does not have a cc_out operand
4900  // when there are only two register operands.
4901  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4902      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4903      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4904      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4905    return true;
4906  // Register-register 'add' for thumb does not have a cc_out operand
4907  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4908  // have to check the immediate range here since Thumb2 has a variant
4909  // that can handle a different range and has a cc_out operand.
4910  if (((isThumb() && Mnemonic == "add") ||
4911       (isThumbTwo() && Mnemonic == "sub")) &&
4912      Operands.size() == 6 &&
4913      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4914      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4915      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4916      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4917      ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
4918       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4919    return true;
4920  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4921  // imm0_4095 variant. That's the least-preferred variant when
4922  // selecting via the generic "add" mnemonic, so to know that we
4923  // should remove the cc_out operand, we have to explicitly check that
4924  // it's not one of the other variants. Ugh.
4925  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4926      Operands.size() == 6 &&
4927      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4928      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4929      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4930    // Nest conditions rather than one big 'if' statement for readability.
4931    //
4932    // If either register is a high reg, it's either one of the SP
4933    // variants (handled above) or a 32-bit encoding, so we just
4934    // check against T3. If the second register is the PC, this is an
4935    // alternate form of ADR, which uses encoding T4, so check for that too.
4936    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4937         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4938        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4939        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4940      return false;
4941    // If both registers are low, we're in an IT block, and the immediate is
4942    // in range, we should use encoding T1 instead, which has a cc_out.
4943    if (inITBlock() &&
4944        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4945        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4946        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4947      return false;
4948
4949    // Otherwise, we use encoding T4, which does not have a cc_out
4950    // operand.
4951    return true;
4952  }
4953
4954  // The thumb2 multiply instruction doesn't have a CCOut register, so
4955  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4956  // use the 16-bit encoding or not.
4957  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4958      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4959      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4960      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4961      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4962      // If the registers aren't low regs, the destination reg isn't the
4963      // same as one of the source regs, or the cc_out operand is zero
4964      // outside of an IT block, we have to use the 32-bit encoding, so
4965      // remove the cc_out operand.
4966      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4967       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4968       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4969       !inITBlock() ||
4970       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4971        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4972        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4973        static_cast<ARMOperand*>(Operands[4])->getReg())))
4974    return true;
4975
4976  // Also check the 'mul' syntax variant that doesn't specify an explicit
4977  // destination register.
4978  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4979      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4980      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4981      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4982      // If the registers aren't low regs  or the cc_out operand is zero
4983      // outside of an IT block, we have to use the 32-bit encoding, so
4984      // remove the cc_out operand.
4985      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4986       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4987       !inITBlock()))
4988    return true;
4989
4990
4991
4992  // Register-register 'add/sub' for thumb does not have a cc_out operand
4993  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4994  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4995  // right, this will result in better diagnostics (which operand is off)
4996  // anyway.
4997  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4998      (Operands.size() == 5 || Operands.size() == 6) &&
4999      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5000      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
5001      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
5002      (static_cast<ARMOperand*>(Operands[4])->isImm() ||
5003       (Operands.size() == 6 &&
5004        static_cast<ARMOperand*>(Operands[5])->isImm())))
5005    return true;
5006
5007  return false;
5008}
5009
5010static bool isDataTypeToken(StringRef Tok) {
5011  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
5012    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
5013    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
5014    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
5015    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
5016    Tok == ".f" || Tok == ".d";
5017}
5018
5019// FIXME: This bit should probably be handled via an explicit match class
5020// in the .td files that matches the suffix instead of having it be
5021// a literal string token the way it is now.
5022static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
5023  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
5024}
5025static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features,
5026                                 unsigned VariantID);
5027/// Parse an arm instruction mnemonic followed by its operands.
5028bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
5029                                    SMLoc NameLoc,
5030                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5031  // Apply mnemonic aliases before doing anything else, as the destination
5032  // mnemnonic may include suffices and we want to handle them normally.
5033  // The generic tblgen'erated code does this later, at the start of
5034  // MatchInstructionImpl(), but that's too late for aliases that include
5035  // any sort of suffix.
5036  unsigned AvailableFeatures = getAvailableFeatures();
5037  unsigned AssemblerDialect = getParser().getAssemblerDialect();
5038  applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
5039
5040  // First check for the ARM-specific .req directive.
5041  if (Parser.getTok().is(AsmToken::Identifier) &&
5042      Parser.getTok().getIdentifier() == ".req") {
5043    parseDirectiveReq(Name, NameLoc);
5044    // We always return 'error' for this, as we're done with this
5045    // statement and don't need to match the 'instruction."
5046    return true;
5047  }
5048
5049  // Create the leading tokens for the mnemonic, split by '.' characters.
5050  size_t Start = 0, Next = Name.find('.');
5051  StringRef Mnemonic = Name.slice(Start, Next);
5052
5053  // Split out the predication code and carry setting flag from the mnemonic.
5054  unsigned PredicationCode;
5055  unsigned ProcessorIMod;
5056  bool CarrySetting;
5057  StringRef ITMask;
5058  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
5059                           ProcessorIMod, ITMask);
5060
5061  // In Thumb1, only the branch (B) instruction can be predicated.
5062  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
5063    Parser.eatToEndOfStatement();
5064    return Error(NameLoc, "conditional execution not supported in Thumb1");
5065  }
5066
5067  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
5068
5069  // Handle the IT instruction ITMask. Convert it to a bitmask. This
5070  // is the mask as it will be for the IT encoding if the conditional
5071  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
5072  // where the conditional bit0 is zero, the instruction post-processing
5073  // will adjust the mask accordingly.
5074  if (Mnemonic == "it") {
5075    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
5076    if (ITMask.size() > 3) {
5077      Parser.eatToEndOfStatement();
5078      return Error(Loc, "too many conditions on IT instruction");
5079    }
5080    unsigned Mask = 8;
5081    for (unsigned i = ITMask.size(); i != 0; --i) {
5082      char pos = ITMask[i - 1];
5083      if (pos != 't' && pos != 'e') {
5084        Parser.eatToEndOfStatement();
5085        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
5086      }
5087      Mask >>= 1;
5088      if (ITMask[i - 1] == 't')
5089        Mask |= 8;
5090    }
5091    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
5092  }
5093
5094  // FIXME: This is all a pretty gross hack. We should automatically handle
5095  // optional operands like this via tblgen.
5096
5097  // Next, add the CCOut and ConditionCode operands, if needed.
5098  //
5099  // For mnemonics which can ever incorporate a carry setting bit or predication
5100  // code, our matching model involves us always generating CCOut and
5101  // ConditionCode operands to match the mnemonic "as written" and then we let
5102  // the matcher deal with finding the right instruction or generating an
5103  // appropriate error.
5104  bool CanAcceptCarrySet, CanAcceptPredicationCode;
5105  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
5106
5107  // If we had a carry-set on an instruction that can't do that, issue an
5108  // error.
5109  if (!CanAcceptCarrySet && CarrySetting) {
5110    Parser.eatToEndOfStatement();
5111    return Error(NameLoc, "instruction '" + Mnemonic +
5112                 "' can not set flags, but 's' suffix specified");
5113  }
5114  // If we had a predication code on an instruction that can't do that, issue an
5115  // error.
5116  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
5117    Parser.eatToEndOfStatement();
5118    return Error(NameLoc, "instruction '" + Mnemonic +
5119                 "' is not predicable, but condition code specified");
5120  }
5121
5122  // Add the carry setting operand, if necessary.
5123  if (CanAcceptCarrySet) {
5124    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
5125    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
5126                                               Loc));
5127  }
5128
5129  // Add the predication code operand, if necessary.
5130  if (CanAcceptPredicationCode) {
5131    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
5132                                      CarrySetting);
5133    Operands.push_back(ARMOperand::CreateCondCode(
5134                         ARMCC::CondCodes(PredicationCode), Loc));
5135  }
5136
5137  // Add the processor imod operand, if necessary.
5138  if (ProcessorIMod) {
5139    Operands.push_back(ARMOperand::CreateImm(
5140          MCConstantExpr::Create(ProcessorIMod, getContext()),
5141                                 NameLoc, NameLoc));
5142  }
5143
5144  // Add the remaining tokens in the mnemonic.
5145  while (Next != StringRef::npos) {
5146    Start = Next;
5147    Next = Name.find('.', Start + 1);
5148    StringRef ExtraToken = Name.slice(Start, Next);
5149
5150    // Some NEON instructions have an optional datatype suffix that is
5151    // completely ignored. Check for that.
5152    if (isDataTypeToken(ExtraToken) &&
5153        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5154      continue;
5155
5156    if (ExtraToken != ".n") {
5157      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5158      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5159    }
5160  }
5161
5162  // Read the remaining operands.
5163  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5164    // Read the first operand.
5165    if (parseOperand(Operands, Mnemonic)) {
5166      Parser.eatToEndOfStatement();
5167      return true;
5168    }
5169
5170    while (getLexer().is(AsmToken::Comma)) {
5171      Parser.Lex();  // Eat the comma.
5172
5173      // Parse and remember the operand.
5174      if (parseOperand(Operands, Mnemonic)) {
5175        Parser.eatToEndOfStatement();
5176        return true;
5177      }
5178    }
5179  }
5180
5181  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5182    SMLoc Loc = getLexer().getLoc();
5183    Parser.eatToEndOfStatement();
5184    return Error(Loc, "unexpected token in argument list");
5185  }
5186
5187  Parser.Lex(); // Consume the EndOfStatement
5188
5189  // Some instructions, mostly Thumb, have forms for the same mnemonic that
5190  // do and don't have a cc_out optional-def operand. With some spot-checks
5191  // of the operand list, we can figure out which variant we're trying to
5192  // parse and adjust accordingly before actually matching. We shouldn't ever
5193  // try to remove a cc_out operand that was explicitly set on the the
5194  // mnemonic, of course (CarrySetting == true). Reason number #317 the
5195  // table driven matcher doesn't fit well with the ARM instruction set.
5196  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5197    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5198    Operands.erase(Operands.begin() + 1);
5199    delete Op;
5200  }
5201
5202  // ARM mode 'blx' need special handling, as the register operand version
5203  // is predicable, but the label operand version is not. So, we can't rely
5204  // on the Mnemonic based checking to correctly figure out when to put
5205  // a k_CondCode operand in the list. If we're trying to match the label
5206  // version, remove the k_CondCode operand here.
5207  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5208      static_cast<ARMOperand*>(Operands[2])->isImm()) {
5209    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5210    Operands.erase(Operands.begin() + 1);
5211    delete Op;
5212  }
5213
5214  // Adjust operands of ldrexd/strexd to MCK_GPRPair.
5215  // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
5216  // a single GPRPair reg operand is used in the .td file to replace the two
5217  // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
5218  // expressed as a GPRPair, so we have to manually merge them.
5219  // FIXME: We would really like to be able to tablegen'erate this.
5220  if (!isThumb() && Operands.size() > 4 &&
5221      (Mnemonic == "ldrexd" || Mnemonic == "strexd")) {
5222    bool isLoad = (Mnemonic == "ldrexd");
5223    unsigned Idx = isLoad ? 2 : 3;
5224    ARMOperand* Op1 = static_cast<ARMOperand*>(Operands[Idx]);
5225    ARMOperand* Op2 = static_cast<ARMOperand*>(Operands[Idx+1]);
5226
5227    const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
5228    // Adjust only if Op1 and Op2 are GPRs.
5229    if (Op1->isReg() && Op2->isReg() && MRC.contains(Op1->getReg()) &&
5230        MRC.contains(Op2->getReg())) {
5231      unsigned Reg1 = Op1->getReg();
5232      unsigned Reg2 = Op2->getReg();
5233      unsigned Rt = MRI->getEncodingValue(Reg1);
5234      unsigned Rt2 = MRI->getEncodingValue(Reg2);
5235
5236      // Rt2 must be Rt + 1 and Rt must be even.
5237      if (Rt + 1 != Rt2 || (Rt & 1)) {
5238        Error(Op2->getStartLoc(), isLoad ?
5239            "destination operands must be sequential" :
5240            "source operands must be sequential");
5241        return true;
5242      }
5243      unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
5244          &(MRI->getRegClass(ARM::GPRPairRegClassID)));
5245      Operands.erase(Operands.begin() + Idx, Operands.begin() + Idx + 2);
5246      Operands.insert(Operands.begin() + Idx, ARMOperand::CreateReg(
5247            NewReg, Op1->getStartLoc(), Op2->getEndLoc()));
5248      delete Op1;
5249      delete Op2;
5250    }
5251  }
5252
5253  return false;
5254}
5255
5256// Validate context-sensitive operand constraints.
5257
5258// return 'true' if register list contains non-low GPR registers,
5259// 'false' otherwise. If Reg is in the register list or is HiReg, set
5260// 'containsReg' to true.
5261static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5262                                 unsigned HiReg, bool &containsReg) {
5263  containsReg = false;
5264  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5265    unsigned OpReg = Inst.getOperand(i).getReg();
5266    if (OpReg == Reg)
5267      containsReg = true;
5268    // Anything other than a low register isn't legal here.
5269    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5270      return true;
5271  }
5272  return false;
5273}
5274
5275// Check if the specified regisgter is in the register list of the inst,
5276// starting at the indicated operand number.
5277static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5278  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5279    unsigned OpReg = Inst.getOperand(i).getReg();
5280    if (OpReg == Reg)
5281      return true;
5282  }
5283  return false;
5284}
5285
5286// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5287// the ARMInsts array) instead. Getting that here requires awkward
5288// API changes, though. Better way?
5289namespace llvm {
5290extern const MCInstrDesc ARMInsts[];
5291}
5292static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5293  return ARMInsts[Opcode];
5294}
5295
5296// FIXME: We would really like to be able to tablegen'erate this.
5297bool ARMAsmParser::
5298validateInstruction(MCInst &Inst,
5299                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5300  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5301  SMLoc Loc = Operands[0]->getStartLoc();
5302  // Check the IT block state first.
5303  // NOTE: BKPT instruction has the interesting property of being
5304  // allowed in IT blocks, but not being predicable.  It just always
5305  // executes.
5306  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5307      Inst.getOpcode() != ARM::BKPT) {
5308    unsigned bit = 1;
5309    if (ITState.FirstCond)
5310      ITState.FirstCond = false;
5311    else
5312      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5313    // The instruction must be predicable.
5314    if (!MCID.isPredicable())
5315      return Error(Loc, "instructions in IT block must be predicable");
5316    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5317    unsigned ITCond = bit ? ITState.Cond :
5318      ARMCC::getOppositeCondition(ITState.Cond);
5319    if (Cond != ITCond) {
5320      // Find the condition code Operand to get its SMLoc information.
5321      SMLoc CondLoc;
5322      for (unsigned i = 1; i < Operands.size(); ++i)
5323        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5324          CondLoc = Operands[i]->getStartLoc();
5325      return Error(CondLoc, "incorrect condition in IT block; got '" +
5326                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5327                   "', but expected '" +
5328                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5329    }
5330  // Check for non-'al' condition codes outside of the IT block.
5331  } else if (isThumbTwo() && MCID.isPredicable() &&
5332             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5333             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5334             Inst.getOpcode() != ARM::t2B)
5335    return Error(Loc, "predicated instructions must be in IT block");
5336
5337  switch (Inst.getOpcode()) {
5338  case ARM::LDRD:
5339  case ARM::LDRD_PRE:
5340  case ARM::LDRD_POST: {
5341    // Rt2 must be Rt + 1.
5342    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5343    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5344    if (Rt2 != Rt + 1)
5345      return Error(Operands[3]->getStartLoc(),
5346                   "destination operands must be sequential");
5347    return false;
5348  }
5349  case ARM::STRD: {
5350    // Rt2 must be Rt + 1.
5351    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5352    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5353    if (Rt2 != Rt + 1)
5354      return Error(Operands[3]->getStartLoc(),
5355                   "source operands must be sequential");
5356    return false;
5357  }
5358  case ARM::STRD_PRE:
5359  case ARM::STRD_POST: {
5360    // Rt2 must be Rt + 1.
5361    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5362    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
5363    if (Rt2 != Rt + 1)
5364      return Error(Operands[3]->getStartLoc(),
5365                   "source operands must be sequential");
5366    return false;
5367  }
5368  case ARM::SBFX:
5369  case ARM::UBFX: {
5370    // width must be in range [1, 32-lsb]
5371    unsigned lsb = Inst.getOperand(2).getImm();
5372    unsigned widthm1 = Inst.getOperand(3).getImm();
5373    if (widthm1 >= 32 - lsb)
5374      return Error(Operands[5]->getStartLoc(),
5375                   "bitfield width must be in range [1,32-lsb]");
5376    return false;
5377  }
5378  case ARM::tLDMIA: {
5379    // If we're parsing Thumb2, the .w variant is available and handles
5380    // most cases that are normally illegal for a Thumb1 LDM
5381    // instruction. We'll make the transformation in processInstruction()
5382    // if necessary.
5383    //
5384    // Thumb LDM instructions are writeback iff the base register is not
5385    // in the register list.
5386    unsigned Rn = Inst.getOperand(0).getReg();
5387    bool hasWritebackToken =
5388      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5389       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5390    bool listContainsBase;
5391    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5392      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5393                   "registers must be in range r0-r7");
5394    // If we should have writeback, then there should be a '!' token.
5395    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5396      return Error(Operands[2]->getStartLoc(),
5397                   "writeback operator '!' expected");
5398    // If we should not have writeback, there must not be a '!'. This is
5399    // true even for the 32-bit wide encodings.
5400    if (listContainsBase && hasWritebackToken)
5401      return Error(Operands[3]->getStartLoc(),
5402                   "writeback operator '!' not allowed when base register "
5403                   "in register list");
5404
5405    break;
5406  }
5407  case ARM::t2LDMIA_UPD: {
5408    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5409      return Error(Operands[4]->getStartLoc(),
5410                   "writeback operator '!' not allowed when base register "
5411                   "in register list");
5412    break;
5413  }
5414  case ARM::tMUL: {
5415    // The second source operand must be the same register as the destination
5416    // operand.
5417    //
5418    // In this case, we must directly check the parsed operands because the
5419    // cvtThumbMultiply() function is written in such a way that it guarantees
5420    // this first statement is always true for the new Inst.  Essentially, the
5421    // destination is unconditionally copied into the second source operand
5422    // without checking to see if it matches what we actually parsed.
5423    if (Operands.size() == 6 &&
5424        (((ARMOperand*)Operands[3])->getReg() !=
5425         ((ARMOperand*)Operands[5])->getReg()) &&
5426        (((ARMOperand*)Operands[3])->getReg() !=
5427         ((ARMOperand*)Operands[4])->getReg())) {
5428      return Error(Operands[3]->getStartLoc(),
5429                   "destination register must match source register");
5430    }
5431    break;
5432  }
5433  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5434  // so only issue a diagnostic for thumb1. The instructions will be
5435  // switched to the t2 encodings in processInstruction() if necessary.
5436  case ARM::tPOP: {
5437    bool listContainsBase;
5438    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5439        !isThumbTwo())
5440      return Error(Operands[2]->getStartLoc(),
5441                   "registers must be in range r0-r7 or pc");
5442    break;
5443  }
5444  case ARM::tPUSH: {
5445    bool listContainsBase;
5446    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5447        !isThumbTwo())
5448      return Error(Operands[2]->getStartLoc(),
5449                   "registers must be in range r0-r7 or lr");
5450    break;
5451  }
5452  case ARM::tSTMIA_UPD: {
5453    bool listContainsBase;
5454    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5455      return Error(Operands[4]->getStartLoc(),
5456                   "registers must be in range r0-r7");
5457    break;
5458  }
5459  case ARM::tADDrSP: {
5460    // If the non-SP source operand and the destination operand are not the
5461    // same, we need thumb2 (for the wide encoding), or we have an error.
5462    if (!isThumbTwo() &&
5463        Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
5464      return Error(Operands[4]->getStartLoc(),
5465                   "source register must be the same as destination");
5466    }
5467    break;
5468  }
5469  }
5470
5471  return false;
5472}
5473
5474static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5475  switch(Opc) {
5476  default: llvm_unreachable("unexpected opcode!");
5477  // VST1LN
5478  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5479  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5480  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5481  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5482  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5483  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5484  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5485  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5486  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5487
5488  // VST2LN
5489  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5490  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5491  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5492  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5493  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5494
5495  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5496  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5497  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5498  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5499  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5500
5501  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5502  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5503  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5504  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5505  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5506
5507  // VST3LN
5508  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5509  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5510  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5511  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5512  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5513  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5514  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5515  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5516  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5517  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5518  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5519  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5520  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5521  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5522  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5523
5524  // VST3
5525  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5526  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5527  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5528  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5529  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5530  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5531  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5532  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5533  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5534  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5535  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5536  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5537  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5538  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5539  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5540  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5541  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5542  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5543
5544  // VST4LN
5545  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5546  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5547  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5548  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5549  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5550  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5551  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5552  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5553  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5554  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5555  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5556  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5557  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5558  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5559  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5560
5561  // VST4
5562  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5563  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5564  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5565  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5566  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5567  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5568  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5569  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5570  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5571  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5572  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5573  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5574  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5575  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5576  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5577  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5578  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5579  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5580  }
5581}
5582
5583static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5584  switch(Opc) {
5585  default: llvm_unreachable("unexpected opcode!");
5586  // VLD1LN
5587  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5588  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5589  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5590  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5591  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5592  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5593  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5594  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5595  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5596
5597  // VLD2LN
5598  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5599  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5600  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5601  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5602  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5603  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5604  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5605  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5606  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5607  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5608  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5609  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5610  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5611  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5612  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5613
5614  // VLD3DUP
5615  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5616  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5617  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5618  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5619  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5620  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5621  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5622  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5623  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5624  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5625  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5626  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5627  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5628  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5629  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5630  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5631  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5632  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5633
5634  // VLD3LN
5635  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5636  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5637  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5638  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5639  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5640  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5641  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5642  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5643  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5644  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5645  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5646  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5647  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5648  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5649  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5650
5651  // VLD3
5652  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5653  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5654  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5655  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5656  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5657  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5658  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5659  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5660  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5661  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5662  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5663  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5664  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5665  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5666  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5667  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5668  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5669  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5670
5671  // VLD4LN
5672  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5673  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5674  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5675  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5676  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5677  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5678  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5679  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5680  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5681  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5682  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5683  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5684  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5685  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5686  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5687
5688  // VLD4DUP
5689  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5690  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5691  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5692  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5693  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5694  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5695  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5696  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5697  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5698  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5699  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5700  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5701  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5702  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5703  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5704  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5705  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5706  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5707
5708  // VLD4
5709  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5710  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5711  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5712  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5713  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5714  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5715  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5716  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5717  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5718  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5719  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5720  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5721  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5722  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5723  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5724  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5725  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5726  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5727  }
5728}
5729
5730bool ARMAsmParser::
5731processInstruction(MCInst &Inst,
5732                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5733  switch (Inst.getOpcode()) {
5734  // Alias for alternate form of 'ADR Rd, #imm' instruction.
5735  case ARM::ADDri: {
5736    if (Inst.getOperand(1).getReg() != ARM::PC ||
5737        Inst.getOperand(5).getReg() != 0)
5738      return false;
5739    MCInst TmpInst;
5740    TmpInst.setOpcode(ARM::ADR);
5741    TmpInst.addOperand(Inst.getOperand(0));
5742    TmpInst.addOperand(Inst.getOperand(2));
5743    TmpInst.addOperand(Inst.getOperand(3));
5744    TmpInst.addOperand(Inst.getOperand(4));
5745    Inst = TmpInst;
5746    return true;
5747  }
5748  // Aliases for alternate PC+imm syntax of LDR instructions.
5749  case ARM::t2LDRpcrel:
5750    // Select the narrow version if the immediate will fit.
5751    if (Inst.getOperand(1).getImm() > 0 &&
5752        Inst.getOperand(1).getImm() <= 0xff)
5753      Inst.setOpcode(ARM::tLDRpci);
5754    else
5755      Inst.setOpcode(ARM::t2LDRpci);
5756    return true;
5757  case ARM::t2LDRBpcrel:
5758    Inst.setOpcode(ARM::t2LDRBpci);
5759    return true;
5760  case ARM::t2LDRHpcrel:
5761    Inst.setOpcode(ARM::t2LDRHpci);
5762    return true;
5763  case ARM::t2LDRSBpcrel:
5764    Inst.setOpcode(ARM::t2LDRSBpci);
5765    return true;
5766  case ARM::t2LDRSHpcrel:
5767    Inst.setOpcode(ARM::t2LDRSHpci);
5768    return true;
5769  // Handle NEON VST complex aliases.
5770  case ARM::VST1LNdWB_register_Asm_8:
5771  case ARM::VST1LNdWB_register_Asm_16:
5772  case ARM::VST1LNdWB_register_Asm_32: {
5773    MCInst TmpInst;
5774    // Shuffle the operands around so the lane index operand is in the
5775    // right place.
5776    unsigned Spacing;
5777    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5778    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5779    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5780    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5781    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5782    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5783    TmpInst.addOperand(Inst.getOperand(1)); // lane
5784    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5785    TmpInst.addOperand(Inst.getOperand(6));
5786    Inst = TmpInst;
5787    return true;
5788  }
5789
5790  case ARM::VST2LNdWB_register_Asm_8:
5791  case ARM::VST2LNdWB_register_Asm_16:
5792  case ARM::VST2LNdWB_register_Asm_32:
5793  case ARM::VST2LNqWB_register_Asm_16:
5794  case ARM::VST2LNqWB_register_Asm_32: {
5795    MCInst TmpInst;
5796    // Shuffle the operands around so the lane index operand is in the
5797    // right place.
5798    unsigned Spacing;
5799    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5800    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5801    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5802    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5803    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5804    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5805    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5806                                            Spacing));
5807    TmpInst.addOperand(Inst.getOperand(1)); // lane
5808    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5809    TmpInst.addOperand(Inst.getOperand(6));
5810    Inst = TmpInst;
5811    return true;
5812  }
5813
5814  case ARM::VST3LNdWB_register_Asm_8:
5815  case ARM::VST3LNdWB_register_Asm_16:
5816  case ARM::VST3LNdWB_register_Asm_32:
5817  case ARM::VST3LNqWB_register_Asm_16:
5818  case ARM::VST3LNqWB_register_Asm_32: {
5819    MCInst TmpInst;
5820    // Shuffle the operands around so the lane index operand is in the
5821    // right place.
5822    unsigned Spacing;
5823    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5824    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5825    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5826    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5827    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5828    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5829    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5830                                            Spacing));
5831    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5832                                            Spacing * 2));
5833    TmpInst.addOperand(Inst.getOperand(1)); // lane
5834    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5835    TmpInst.addOperand(Inst.getOperand(6));
5836    Inst = TmpInst;
5837    return true;
5838  }
5839
5840  case ARM::VST4LNdWB_register_Asm_8:
5841  case ARM::VST4LNdWB_register_Asm_16:
5842  case ARM::VST4LNdWB_register_Asm_32:
5843  case ARM::VST4LNqWB_register_Asm_16:
5844  case ARM::VST4LNqWB_register_Asm_32: {
5845    MCInst TmpInst;
5846    // Shuffle the operands around so the lane index operand is in the
5847    // right place.
5848    unsigned Spacing;
5849    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5850    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5851    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5852    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5853    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5854    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5855    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5856                                            Spacing));
5857    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5858                                            Spacing * 2));
5859    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5860                                            Spacing * 3));
5861    TmpInst.addOperand(Inst.getOperand(1)); // lane
5862    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5863    TmpInst.addOperand(Inst.getOperand(6));
5864    Inst = TmpInst;
5865    return true;
5866  }
5867
5868  case ARM::VST1LNdWB_fixed_Asm_8:
5869  case ARM::VST1LNdWB_fixed_Asm_16:
5870  case ARM::VST1LNdWB_fixed_Asm_32: {
5871    MCInst TmpInst;
5872    // Shuffle the operands around so the lane index operand is in the
5873    // right place.
5874    unsigned Spacing;
5875    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5876    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5877    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5878    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5879    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5880    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5881    TmpInst.addOperand(Inst.getOperand(1)); // lane
5882    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5883    TmpInst.addOperand(Inst.getOperand(5));
5884    Inst = TmpInst;
5885    return true;
5886  }
5887
5888  case ARM::VST2LNdWB_fixed_Asm_8:
5889  case ARM::VST2LNdWB_fixed_Asm_16:
5890  case ARM::VST2LNdWB_fixed_Asm_32:
5891  case ARM::VST2LNqWB_fixed_Asm_16:
5892  case ARM::VST2LNqWB_fixed_Asm_32: {
5893    MCInst TmpInst;
5894    // Shuffle the operands around so the lane index operand is in the
5895    // right place.
5896    unsigned Spacing;
5897    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5898    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5899    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5900    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5901    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5902    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5903    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5904                                            Spacing));
5905    TmpInst.addOperand(Inst.getOperand(1)); // lane
5906    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5907    TmpInst.addOperand(Inst.getOperand(5));
5908    Inst = TmpInst;
5909    return true;
5910  }
5911
5912  case ARM::VST3LNdWB_fixed_Asm_8:
5913  case ARM::VST3LNdWB_fixed_Asm_16:
5914  case ARM::VST3LNdWB_fixed_Asm_32:
5915  case ARM::VST3LNqWB_fixed_Asm_16:
5916  case ARM::VST3LNqWB_fixed_Asm_32: {
5917    MCInst TmpInst;
5918    // Shuffle the operands around so the lane index operand is in the
5919    // right place.
5920    unsigned Spacing;
5921    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5922    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5923    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5924    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5925    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5926    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5927    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5928                                            Spacing));
5929    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5930                                            Spacing * 2));
5931    TmpInst.addOperand(Inst.getOperand(1)); // lane
5932    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5933    TmpInst.addOperand(Inst.getOperand(5));
5934    Inst = TmpInst;
5935    return true;
5936  }
5937
5938  case ARM::VST4LNdWB_fixed_Asm_8:
5939  case ARM::VST4LNdWB_fixed_Asm_16:
5940  case ARM::VST4LNdWB_fixed_Asm_32:
5941  case ARM::VST4LNqWB_fixed_Asm_16:
5942  case ARM::VST4LNqWB_fixed_Asm_32: {
5943    MCInst TmpInst;
5944    // Shuffle the operands around so the lane index operand is in the
5945    // right place.
5946    unsigned Spacing;
5947    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5948    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5949    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5950    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5951    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5952    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5953    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5954                                            Spacing));
5955    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5956                                            Spacing * 2));
5957    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5958                                            Spacing * 3));
5959    TmpInst.addOperand(Inst.getOperand(1)); // lane
5960    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5961    TmpInst.addOperand(Inst.getOperand(5));
5962    Inst = TmpInst;
5963    return true;
5964  }
5965
5966  case ARM::VST1LNdAsm_8:
5967  case ARM::VST1LNdAsm_16:
5968  case ARM::VST1LNdAsm_32: {
5969    MCInst TmpInst;
5970    // Shuffle the operands around so the lane index operand is in the
5971    // right place.
5972    unsigned Spacing;
5973    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5974    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5975    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5976    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5977    TmpInst.addOperand(Inst.getOperand(1)); // lane
5978    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5979    TmpInst.addOperand(Inst.getOperand(5));
5980    Inst = TmpInst;
5981    return true;
5982  }
5983
5984  case ARM::VST2LNdAsm_8:
5985  case ARM::VST2LNdAsm_16:
5986  case ARM::VST2LNdAsm_32:
5987  case ARM::VST2LNqAsm_16:
5988  case ARM::VST2LNqAsm_32: {
5989    MCInst TmpInst;
5990    // Shuffle the operands around so the lane index operand is in the
5991    // right place.
5992    unsigned Spacing;
5993    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5994    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5995    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5996    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5997    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5998                                            Spacing));
5999    TmpInst.addOperand(Inst.getOperand(1)); // lane
6000    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6001    TmpInst.addOperand(Inst.getOperand(5));
6002    Inst = TmpInst;
6003    return true;
6004  }
6005
6006  case ARM::VST3LNdAsm_8:
6007  case ARM::VST3LNdAsm_16:
6008  case ARM::VST3LNdAsm_32:
6009  case ARM::VST3LNqAsm_16:
6010  case ARM::VST3LNqAsm_32: {
6011    MCInst TmpInst;
6012    // Shuffle the operands around so the lane index operand is in the
6013    // right place.
6014    unsigned Spacing;
6015    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6016    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6017    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6018    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6019    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6020                                            Spacing));
6021    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6022                                            Spacing * 2));
6023    TmpInst.addOperand(Inst.getOperand(1)); // lane
6024    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6025    TmpInst.addOperand(Inst.getOperand(5));
6026    Inst = TmpInst;
6027    return true;
6028  }
6029
6030  case ARM::VST4LNdAsm_8:
6031  case ARM::VST4LNdAsm_16:
6032  case ARM::VST4LNdAsm_32:
6033  case ARM::VST4LNqAsm_16:
6034  case ARM::VST4LNqAsm_32: {
6035    MCInst TmpInst;
6036    // Shuffle the operands around so the lane index operand is in the
6037    // right place.
6038    unsigned Spacing;
6039    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6040    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6041    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6042    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6043    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6044                                            Spacing));
6045    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6046                                            Spacing * 2));
6047    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6048                                            Spacing * 3));
6049    TmpInst.addOperand(Inst.getOperand(1)); // lane
6050    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6051    TmpInst.addOperand(Inst.getOperand(5));
6052    Inst = TmpInst;
6053    return true;
6054  }
6055
6056  // Handle NEON VLD complex aliases.
6057  case ARM::VLD1LNdWB_register_Asm_8:
6058  case ARM::VLD1LNdWB_register_Asm_16:
6059  case ARM::VLD1LNdWB_register_Asm_32: {
6060    MCInst TmpInst;
6061    // Shuffle the operands around so the lane index operand is in the
6062    // right place.
6063    unsigned Spacing;
6064    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6065    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6066    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6067    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6068    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6069    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6070    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6071    TmpInst.addOperand(Inst.getOperand(1)); // lane
6072    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6073    TmpInst.addOperand(Inst.getOperand(6));
6074    Inst = TmpInst;
6075    return true;
6076  }
6077
6078  case ARM::VLD2LNdWB_register_Asm_8:
6079  case ARM::VLD2LNdWB_register_Asm_16:
6080  case ARM::VLD2LNdWB_register_Asm_32:
6081  case ARM::VLD2LNqWB_register_Asm_16:
6082  case ARM::VLD2LNqWB_register_Asm_32: {
6083    MCInst TmpInst;
6084    // Shuffle the operands around so the lane index operand is in the
6085    // right place.
6086    unsigned Spacing;
6087    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6088    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6089    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6090                                            Spacing));
6091    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6092    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6093    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6094    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6095    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6096    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6097                                            Spacing));
6098    TmpInst.addOperand(Inst.getOperand(1)); // lane
6099    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6100    TmpInst.addOperand(Inst.getOperand(6));
6101    Inst = TmpInst;
6102    return true;
6103  }
6104
6105  case ARM::VLD3LNdWB_register_Asm_8:
6106  case ARM::VLD3LNdWB_register_Asm_16:
6107  case ARM::VLD3LNdWB_register_Asm_32:
6108  case ARM::VLD3LNqWB_register_Asm_16:
6109  case ARM::VLD3LNqWB_register_Asm_32: {
6110    MCInst TmpInst;
6111    // Shuffle the operands around so the lane index operand is in the
6112    // right place.
6113    unsigned Spacing;
6114    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6115    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6116    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6117                                            Spacing));
6118    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6119                                            Spacing * 2));
6120    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6121    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6122    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6123    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6124    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6125    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6126                                            Spacing));
6127    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6128                                            Spacing * 2));
6129    TmpInst.addOperand(Inst.getOperand(1)); // lane
6130    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6131    TmpInst.addOperand(Inst.getOperand(6));
6132    Inst = TmpInst;
6133    return true;
6134  }
6135
6136  case ARM::VLD4LNdWB_register_Asm_8:
6137  case ARM::VLD4LNdWB_register_Asm_16:
6138  case ARM::VLD4LNdWB_register_Asm_32:
6139  case ARM::VLD4LNqWB_register_Asm_16:
6140  case ARM::VLD4LNqWB_register_Asm_32: {
6141    MCInst TmpInst;
6142    // Shuffle the operands around so the lane index operand is in the
6143    // right place.
6144    unsigned Spacing;
6145    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6146    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6147    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6148                                            Spacing));
6149    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6150                                            Spacing * 2));
6151    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6152                                            Spacing * 3));
6153    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6154    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6155    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6156    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6157    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6158    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6159                                            Spacing));
6160    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6161                                            Spacing * 2));
6162    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6163                                            Spacing * 3));
6164    TmpInst.addOperand(Inst.getOperand(1)); // lane
6165    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6166    TmpInst.addOperand(Inst.getOperand(6));
6167    Inst = TmpInst;
6168    return true;
6169  }
6170
6171  case ARM::VLD1LNdWB_fixed_Asm_8:
6172  case ARM::VLD1LNdWB_fixed_Asm_16:
6173  case ARM::VLD1LNdWB_fixed_Asm_32: {
6174    MCInst TmpInst;
6175    // Shuffle the operands around so the lane index operand is in the
6176    // right place.
6177    unsigned Spacing;
6178    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6179    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6180    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6181    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6182    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6183    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6184    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6185    TmpInst.addOperand(Inst.getOperand(1)); // lane
6186    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6187    TmpInst.addOperand(Inst.getOperand(5));
6188    Inst = TmpInst;
6189    return true;
6190  }
6191
6192  case ARM::VLD2LNdWB_fixed_Asm_8:
6193  case ARM::VLD2LNdWB_fixed_Asm_16:
6194  case ARM::VLD2LNdWB_fixed_Asm_32:
6195  case ARM::VLD2LNqWB_fixed_Asm_16:
6196  case ARM::VLD2LNqWB_fixed_Asm_32: {
6197    MCInst TmpInst;
6198    // Shuffle the operands around so the lane index operand is in the
6199    // right place.
6200    unsigned Spacing;
6201    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6202    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6203    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6204                                            Spacing));
6205    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6206    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6207    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6208    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6209    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6210    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6211                                            Spacing));
6212    TmpInst.addOperand(Inst.getOperand(1)); // lane
6213    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6214    TmpInst.addOperand(Inst.getOperand(5));
6215    Inst = TmpInst;
6216    return true;
6217  }
6218
6219  case ARM::VLD3LNdWB_fixed_Asm_8:
6220  case ARM::VLD3LNdWB_fixed_Asm_16:
6221  case ARM::VLD3LNdWB_fixed_Asm_32:
6222  case ARM::VLD3LNqWB_fixed_Asm_16:
6223  case ARM::VLD3LNqWB_fixed_Asm_32: {
6224    MCInst TmpInst;
6225    // Shuffle the operands around so the lane index operand is in the
6226    // right place.
6227    unsigned Spacing;
6228    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6229    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6230    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6231                                            Spacing));
6232    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6233                                            Spacing * 2));
6234    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6235    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6236    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6237    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6238    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6239    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6240                                            Spacing));
6241    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6242                                            Spacing * 2));
6243    TmpInst.addOperand(Inst.getOperand(1)); // lane
6244    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6245    TmpInst.addOperand(Inst.getOperand(5));
6246    Inst = TmpInst;
6247    return true;
6248  }
6249
6250  case ARM::VLD4LNdWB_fixed_Asm_8:
6251  case ARM::VLD4LNdWB_fixed_Asm_16:
6252  case ARM::VLD4LNdWB_fixed_Asm_32:
6253  case ARM::VLD4LNqWB_fixed_Asm_16:
6254  case ARM::VLD4LNqWB_fixed_Asm_32: {
6255    MCInst TmpInst;
6256    // Shuffle the operands around so the lane index operand is in the
6257    // right place.
6258    unsigned Spacing;
6259    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6260    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6261    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6262                                            Spacing));
6263    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6264                                            Spacing * 2));
6265    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6266                                            Spacing * 3));
6267    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6268    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6269    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6270    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6271    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6272    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6273                                            Spacing));
6274    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6275                                            Spacing * 2));
6276    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6277                                            Spacing * 3));
6278    TmpInst.addOperand(Inst.getOperand(1)); // lane
6279    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6280    TmpInst.addOperand(Inst.getOperand(5));
6281    Inst = TmpInst;
6282    return true;
6283  }
6284
6285  case ARM::VLD1LNdAsm_8:
6286  case ARM::VLD1LNdAsm_16:
6287  case ARM::VLD1LNdAsm_32: {
6288    MCInst TmpInst;
6289    // Shuffle the operands around so the lane index operand is in the
6290    // right place.
6291    unsigned Spacing;
6292    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6293    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6294    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6295    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6296    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6297    TmpInst.addOperand(Inst.getOperand(1)); // lane
6298    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6299    TmpInst.addOperand(Inst.getOperand(5));
6300    Inst = TmpInst;
6301    return true;
6302  }
6303
6304  case ARM::VLD2LNdAsm_8:
6305  case ARM::VLD2LNdAsm_16:
6306  case ARM::VLD2LNdAsm_32:
6307  case ARM::VLD2LNqAsm_16:
6308  case ARM::VLD2LNqAsm_32: {
6309    MCInst TmpInst;
6310    // Shuffle the operands around so the lane index operand is in the
6311    // right place.
6312    unsigned Spacing;
6313    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6314    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6315    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6316                                            Spacing));
6317    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6318    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6319    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6320    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6321                                            Spacing));
6322    TmpInst.addOperand(Inst.getOperand(1)); // lane
6323    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6324    TmpInst.addOperand(Inst.getOperand(5));
6325    Inst = TmpInst;
6326    return true;
6327  }
6328
6329  case ARM::VLD3LNdAsm_8:
6330  case ARM::VLD3LNdAsm_16:
6331  case ARM::VLD3LNdAsm_32:
6332  case ARM::VLD3LNqAsm_16:
6333  case ARM::VLD3LNqAsm_32: {
6334    MCInst TmpInst;
6335    // Shuffle the operands around so the lane index operand is in the
6336    // right place.
6337    unsigned Spacing;
6338    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6339    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6340    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6341                                            Spacing));
6342    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6343                                            Spacing * 2));
6344    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6345    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6346    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6347    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6348                                            Spacing));
6349    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6350                                            Spacing * 2));
6351    TmpInst.addOperand(Inst.getOperand(1)); // lane
6352    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6353    TmpInst.addOperand(Inst.getOperand(5));
6354    Inst = TmpInst;
6355    return true;
6356  }
6357
6358  case ARM::VLD4LNdAsm_8:
6359  case ARM::VLD4LNdAsm_16:
6360  case ARM::VLD4LNdAsm_32:
6361  case ARM::VLD4LNqAsm_16:
6362  case ARM::VLD4LNqAsm_32: {
6363    MCInst TmpInst;
6364    // Shuffle the operands around so the lane index operand is in the
6365    // right place.
6366    unsigned Spacing;
6367    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6368    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6369    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6370                                            Spacing));
6371    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6372                                            Spacing * 2));
6373    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6374                                            Spacing * 3));
6375    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6376    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6377    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6378    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6379                                            Spacing));
6380    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6381                                            Spacing * 2));
6382    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6383                                            Spacing * 3));
6384    TmpInst.addOperand(Inst.getOperand(1)); // lane
6385    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6386    TmpInst.addOperand(Inst.getOperand(5));
6387    Inst = TmpInst;
6388    return true;
6389  }
6390
6391  // VLD3DUP single 3-element structure to all lanes instructions.
6392  case ARM::VLD3DUPdAsm_8:
6393  case ARM::VLD3DUPdAsm_16:
6394  case ARM::VLD3DUPdAsm_32:
6395  case ARM::VLD3DUPqAsm_8:
6396  case ARM::VLD3DUPqAsm_16:
6397  case ARM::VLD3DUPqAsm_32: {
6398    MCInst TmpInst;
6399    unsigned Spacing;
6400    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6401    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6402    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6403                                            Spacing));
6404    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6405                                            Spacing * 2));
6406    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6407    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6408    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6409    TmpInst.addOperand(Inst.getOperand(4));
6410    Inst = TmpInst;
6411    return true;
6412  }
6413
6414  case ARM::VLD3DUPdWB_fixed_Asm_8:
6415  case ARM::VLD3DUPdWB_fixed_Asm_16:
6416  case ARM::VLD3DUPdWB_fixed_Asm_32:
6417  case ARM::VLD3DUPqWB_fixed_Asm_8:
6418  case ARM::VLD3DUPqWB_fixed_Asm_16:
6419  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6420    MCInst TmpInst;
6421    unsigned Spacing;
6422    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6423    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6424    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6425                                            Spacing));
6426    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6427                                            Spacing * 2));
6428    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6429    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6430    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6431    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6432    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6433    TmpInst.addOperand(Inst.getOperand(4));
6434    Inst = TmpInst;
6435    return true;
6436  }
6437
6438  case ARM::VLD3DUPdWB_register_Asm_8:
6439  case ARM::VLD3DUPdWB_register_Asm_16:
6440  case ARM::VLD3DUPdWB_register_Asm_32:
6441  case ARM::VLD3DUPqWB_register_Asm_8:
6442  case ARM::VLD3DUPqWB_register_Asm_16:
6443  case ARM::VLD3DUPqWB_register_Asm_32: {
6444    MCInst TmpInst;
6445    unsigned Spacing;
6446    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6447    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6448    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6449                                            Spacing));
6450    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6451                                            Spacing * 2));
6452    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6453    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6454    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6455    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6456    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6457    TmpInst.addOperand(Inst.getOperand(5));
6458    Inst = TmpInst;
6459    return true;
6460  }
6461
6462  // VLD3 multiple 3-element structure instructions.
6463  case ARM::VLD3dAsm_8:
6464  case ARM::VLD3dAsm_16:
6465  case ARM::VLD3dAsm_32:
6466  case ARM::VLD3qAsm_8:
6467  case ARM::VLD3qAsm_16:
6468  case ARM::VLD3qAsm_32: {
6469    MCInst TmpInst;
6470    unsigned Spacing;
6471    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6472    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6473    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6474                                            Spacing));
6475    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6476                                            Spacing * 2));
6477    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6478    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6479    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6480    TmpInst.addOperand(Inst.getOperand(4));
6481    Inst = TmpInst;
6482    return true;
6483  }
6484
6485  case ARM::VLD3dWB_fixed_Asm_8:
6486  case ARM::VLD3dWB_fixed_Asm_16:
6487  case ARM::VLD3dWB_fixed_Asm_32:
6488  case ARM::VLD3qWB_fixed_Asm_8:
6489  case ARM::VLD3qWB_fixed_Asm_16:
6490  case ARM::VLD3qWB_fixed_Asm_32: {
6491    MCInst TmpInst;
6492    unsigned Spacing;
6493    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6494    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6495    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6496                                            Spacing));
6497    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6498                                            Spacing * 2));
6499    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6500    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6501    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6502    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6503    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6504    TmpInst.addOperand(Inst.getOperand(4));
6505    Inst = TmpInst;
6506    return true;
6507  }
6508
6509  case ARM::VLD3dWB_register_Asm_8:
6510  case ARM::VLD3dWB_register_Asm_16:
6511  case ARM::VLD3dWB_register_Asm_32:
6512  case ARM::VLD3qWB_register_Asm_8:
6513  case ARM::VLD3qWB_register_Asm_16:
6514  case ARM::VLD3qWB_register_Asm_32: {
6515    MCInst TmpInst;
6516    unsigned Spacing;
6517    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6518    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6519    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6520                                            Spacing));
6521    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6522                                            Spacing * 2));
6523    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6524    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6525    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6526    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6527    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6528    TmpInst.addOperand(Inst.getOperand(5));
6529    Inst = TmpInst;
6530    return true;
6531  }
6532
6533  // VLD4DUP single 3-element structure to all lanes instructions.
6534  case ARM::VLD4DUPdAsm_8:
6535  case ARM::VLD4DUPdAsm_16:
6536  case ARM::VLD4DUPdAsm_32:
6537  case ARM::VLD4DUPqAsm_8:
6538  case ARM::VLD4DUPqAsm_16:
6539  case ARM::VLD4DUPqAsm_32: {
6540    MCInst TmpInst;
6541    unsigned Spacing;
6542    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6543    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6544    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6545                                            Spacing));
6546    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6547                                            Spacing * 2));
6548    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6549                                            Spacing * 3));
6550    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6551    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6552    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6553    TmpInst.addOperand(Inst.getOperand(4));
6554    Inst = TmpInst;
6555    return true;
6556  }
6557
6558  case ARM::VLD4DUPdWB_fixed_Asm_8:
6559  case ARM::VLD4DUPdWB_fixed_Asm_16:
6560  case ARM::VLD4DUPdWB_fixed_Asm_32:
6561  case ARM::VLD4DUPqWB_fixed_Asm_8:
6562  case ARM::VLD4DUPqWB_fixed_Asm_16:
6563  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6564    MCInst TmpInst;
6565    unsigned Spacing;
6566    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6567    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6568    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6569                                            Spacing));
6570    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6571                                            Spacing * 2));
6572    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6573                                            Spacing * 3));
6574    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6575    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6576    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6577    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6578    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6579    TmpInst.addOperand(Inst.getOperand(4));
6580    Inst = TmpInst;
6581    return true;
6582  }
6583
6584  case ARM::VLD4DUPdWB_register_Asm_8:
6585  case ARM::VLD4DUPdWB_register_Asm_16:
6586  case ARM::VLD4DUPdWB_register_Asm_32:
6587  case ARM::VLD4DUPqWB_register_Asm_8:
6588  case ARM::VLD4DUPqWB_register_Asm_16:
6589  case ARM::VLD4DUPqWB_register_Asm_32: {
6590    MCInst TmpInst;
6591    unsigned Spacing;
6592    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6593    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6594    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6595                                            Spacing));
6596    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6597                                            Spacing * 2));
6598    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6599                                            Spacing * 3));
6600    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6601    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6602    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6603    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6604    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6605    TmpInst.addOperand(Inst.getOperand(5));
6606    Inst = TmpInst;
6607    return true;
6608  }
6609
6610  // VLD4 multiple 4-element structure instructions.
6611  case ARM::VLD4dAsm_8:
6612  case ARM::VLD4dAsm_16:
6613  case ARM::VLD4dAsm_32:
6614  case ARM::VLD4qAsm_8:
6615  case ARM::VLD4qAsm_16:
6616  case ARM::VLD4qAsm_32: {
6617    MCInst TmpInst;
6618    unsigned Spacing;
6619    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6620    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6621    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6622                                            Spacing));
6623    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6624                                            Spacing * 2));
6625    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6626                                            Spacing * 3));
6627    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6628    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6629    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6630    TmpInst.addOperand(Inst.getOperand(4));
6631    Inst = TmpInst;
6632    return true;
6633  }
6634
6635  case ARM::VLD4dWB_fixed_Asm_8:
6636  case ARM::VLD4dWB_fixed_Asm_16:
6637  case ARM::VLD4dWB_fixed_Asm_32:
6638  case ARM::VLD4qWB_fixed_Asm_8:
6639  case ARM::VLD4qWB_fixed_Asm_16:
6640  case ARM::VLD4qWB_fixed_Asm_32: {
6641    MCInst TmpInst;
6642    unsigned Spacing;
6643    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6644    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6645    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6646                                            Spacing));
6647    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6648                                            Spacing * 2));
6649    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6650                                            Spacing * 3));
6651    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6652    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6653    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6654    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6655    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6656    TmpInst.addOperand(Inst.getOperand(4));
6657    Inst = TmpInst;
6658    return true;
6659  }
6660
6661  case ARM::VLD4dWB_register_Asm_8:
6662  case ARM::VLD4dWB_register_Asm_16:
6663  case ARM::VLD4dWB_register_Asm_32:
6664  case ARM::VLD4qWB_register_Asm_8:
6665  case ARM::VLD4qWB_register_Asm_16:
6666  case ARM::VLD4qWB_register_Asm_32: {
6667    MCInst TmpInst;
6668    unsigned Spacing;
6669    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6670    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6671    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6672                                            Spacing));
6673    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6674                                            Spacing * 2));
6675    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6676                                            Spacing * 3));
6677    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6678    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6679    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6680    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6681    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6682    TmpInst.addOperand(Inst.getOperand(5));
6683    Inst = TmpInst;
6684    return true;
6685  }
6686
6687  // VST3 multiple 3-element structure instructions.
6688  case ARM::VST3dAsm_8:
6689  case ARM::VST3dAsm_16:
6690  case ARM::VST3dAsm_32:
6691  case ARM::VST3qAsm_8:
6692  case ARM::VST3qAsm_16:
6693  case ARM::VST3qAsm_32: {
6694    MCInst TmpInst;
6695    unsigned Spacing;
6696    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6697    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6698    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6699    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6700    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6701                                            Spacing));
6702    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6703                                            Spacing * 2));
6704    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6705    TmpInst.addOperand(Inst.getOperand(4));
6706    Inst = TmpInst;
6707    return true;
6708  }
6709
6710  case ARM::VST3dWB_fixed_Asm_8:
6711  case ARM::VST3dWB_fixed_Asm_16:
6712  case ARM::VST3dWB_fixed_Asm_32:
6713  case ARM::VST3qWB_fixed_Asm_8:
6714  case ARM::VST3qWB_fixed_Asm_16:
6715  case ARM::VST3qWB_fixed_Asm_32: {
6716    MCInst TmpInst;
6717    unsigned Spacing;
6718    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6719    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6720    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6721    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6722    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6723    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6724    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6725                                            Spacing));
6726    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6727                                            Spacing * 2));
6728    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6729    TmpInst.addOperand(Inst.getOperand(4));
6730    Inst = TmpInst;
6731    return true;
6732  }
6733
6734  case ARM::VST3dWB_register_Asm_8:
6735  case ARM::VST3dWB_register_Asm_16:
6736  case ARM::VST3dWB_register_Asm_32:
6737  case ARM::VST3qWB_register_Asm_8:
6738  case ARM::VST3qWB_register_Asm_16:
6739  case ARM::VST3qWB_register_Asm_32: {
6740    MCInst TmpInst;
6741    unsigned Spacing;
6742    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6743    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6744    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6745    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6746    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6747    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6748    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6749                                            Spacing));
6750    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6751                                            Spacing * 2));
6752    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6753    TmpInst.addOperand(Inst.getOperand(5));
6754    Inst = TmpInst;
6755    return true;
6756  }
6757
6758  // VST4 multiple 3-element structure instructions.
6759  case ARM::VST4dAsm_8:
6760  case ARM::VST4dAsm_16:
6761  case ARM::VST4dAsm_32:
6762  case ARM::VST4qAsm_8:
6763  case ARM::VST4qAsm_16:
6764  case ARM::VST4qAsm_32: {
6765    MCInst TmpInst;
6766    unsigned Spacing;
6767    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6768    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6769    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6770    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6771    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6772                                            Spacing));
6773    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6774                                            Spacing * 2));
6775    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6776                                            Spacing * 3));
6777    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6778    TmpInst.addOperand(Inst.getOperand(4));
6779    Inst = TmpInst;
6780    return true;
6781  }
6782
6783  case ARM::VST4dWB_fixed_Asm_8:
6784  case ARM::VST4dWB_fixed_Asm_16:
6785  case ARM::VST4dWB_fixed_Asm_32:
6786  case ARM::VST4qWB_fixed_Asm_8:
6787  case ARM::VST4qWB_fixed_Asm_16:
6788  case ARM::VST4qWB_fixed_Asm_32: {
6789    MCInst TmpInst;
6790    unsigned Spacing;
6791    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6792    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6793    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6794    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6795    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6796    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6797    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6798                                            Spacing));
6799    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6800                                            Spacing * 2));
6801    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6802                                            Spacing * 3));
6803    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6804    TmpInst.addOperand(Inst.getOperand(4));
6805    Inst = TmpInst;
6806    return true;
6807  }
6808
6809  case ARM::VST4dWB_register_Asm_8:
6810  case ARM::VST4dWB_register_Asm_16:
6811  case ARM::VST4dWB_register_Asm_32:
6812  case ARM::VST4qWB_register_Asm_8:
6813  case ARM::VST4qWB_register_Asm_16:
6814  case ARM::VST4qWB_register_Asm_32: {
6815    MCInst TmpInst;
6816    unsigned Spacing;
6817    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6818    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6819    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6820    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6821    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6822    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6823    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6824                                            Spacing));
6825    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6826                                            Spacing * 2));
6827    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6828                                            Spacing * 3));
6829    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6830    TmpInst.addOperand(Inst.getOperand(5));
6831    Inst = TmpInst;
6832    return true;
6833  }
6834
6835  // Handle encoding choice for the shift-immediate instructions.
6836  case ARM::t2LSLri:
6837  case ARM::t2LSRri:
6838  case ARM::t2ASRri: {
6839    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6840        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6841        Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
6842        !(static_cast<ARMOperand*>(Operands[3])->isToken() &&
6843         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) {
6844      unsigned NewOpc;
6845      switch (Inst.getOpcode()) {
6846      default: llvm_unreachable("unexpected opcode");
6847      case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
6848      case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
6849      case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
6850      }
6851      // The Thumb1 operands aren't in the same order. Awesome, eh?
6852      MCInst TmpInst;
6853      TmpInst.setOpcode(NewOpc);
6854      TmpInst.addOperand(Inst.getOperand(0));
6855      TmpInst.addOperand(Inst.getOperand(5));
6856      TmpInst.addOperand(Inst.getOperand(1));
6857      TmpInst.addOperand(Inst.getOperand(2));
6858      TmpInst.addOperand(Inst.getOperand(3));
6859      TmpInst.addOperand(Inst.getOperand(4));
6860      Inst = TmpInst;
6861      return true;
6862    }
6863    return false;
6864  }
6865
6866  // Handle the Thumb2 mode MOV complex aliases.
6867  case ARM::t2MOVsr:
6868  case ARM::t2MOVSsr: {
6869    // Which instruction to expand to depends on the CCOut operand and
6870    // whether we're in an IT block if the register operands are low
6871    // registers.
6872    bool isNarrow = false;
6873    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6874        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6875        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6876        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6877        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6878      isNarrow = true;
6879    MCInst TmpInst;
6880    unsigned newOpc;
6881    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6882    default: llvm_unreachable("unexpected opcode!");
6883    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6884    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6885    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6886    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6887    }
6888    TmpInst.setOpcode(newOpc);
6889    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6890    if (isNarrow)
6891      TmpInst.addOperand(MCOperand::CreateReg(
6892          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6893    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6894    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6895    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6896    TmpInst.addOperand(Inst.getOperand(5));
6897    if (!isNarrow)
6898      TmpInst.addOperand(MCOperand::CreateReg(
6899          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6900    Inst = TmpInst;
6901    return true;
6902  }
6903  case ARM::t2MOVsi:
6904  case ARM::t2MOVSsi: {
6905    // Which instruction to expand to depends on the CCOut operand and
6906    // whether we're in an IT block if the register operands are low
6907    // registers.
6908    bool isNarrow = false;
6909    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6910        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6911        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6912      isNarrow = true;
6913    MCInst TmpInst;
6914    unsigned newOpc;
6915    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6916    default: llvm_unreachable("unexpected opcode!");
6917    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6918    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6919    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6920    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6921    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6922    }
6923    unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6924    if (Amount == 32) Amount = 0;
6925    TmpInst.setOpcode(newOpc);
6926    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6927    if (isNarrow)
6928      TmpInst.addOperand(MCOperand::CreateReg(
6929          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6930    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6931    if (newOpc != ARM::t2RRX)
6932      TmpInst.addOperand(MCOperand::CreateImm(Amount));
6933    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6934    TmpInst.addOperand(Inst.getOperand(4));
6935    if (!isNarrow)
6936      TmpInst.addOperand(MCOperand::CreateReg(
6937          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6938    Inst = TmpInst;
6939    return true;
6940  }
6941  // Handle the ARM mode MOV complex aliases.
6942  case ARM::ASRr:
6943  case ARM::LSRr:
6944  case ARM::LSLr:
6945  case ARM::RORr: {
6946    ARM_AM::ShiftOpc ShiftTy;
6947    switch(Inst.getOpcode()) {
6948    default: llvm_unreachable("unexpected opcode!");
6949    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6950    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6951    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6952    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6953    }
6954    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6955    MCInst TmpInst;
6956    TmpInst.setOpcode(ARM::MOVsr);
6957    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6958    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6959    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6960    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6961    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6962    TmpInst.addOperand(Inst.getOperand(4));
6963    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6964    Inst = TmpInst;
6965    return true;
6966  }
6967  case ARM::ASRi:
6968  case ARM::LSRi:
6969  case ARM::LSLi:
6970  case ARM::RORi: {
6971    ARM_AM::ShiftOpc ShiftTy;
6972    switch(Inst.getOpcode()) {
6973    default: llvm_unreachable("unexpected opcode!");
6974    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6975    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6976    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6977    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6978    }
6979    // A shift by zero is a plain MOVr, not a MOVsi.
6980    unsigned Amt = Inst.getOperand(2).getImm();
6981    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6982    // A shift by 32 should be encoded as 0 when permitted
6983    if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
6984      Amt = 0;
6985    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6986    MCInst TmpInst;
6987    TmpInst.setOpcode(Opc);
6988    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6989    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6990    if (Opc == ARM::MOVsi)
6991      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6992    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6993    TmpInst.addOperand(Inst.getOperand(4));
6994    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6995    Inst = TmpInst;
6996    return true;
6997  }
6998  case ARM::RRXi: {
6999    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
7000    MCInst TmpInst;
7001    TmpInst.setOpcode(ARM::MOVsi);
7002    TmpInst.addOperand(Inst.getOperand(0)); // Rd
7003    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7004    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
7005    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7006    TmpInst.addOperand(Inst.getOperand(3));
7007    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
7008    Inst = TmpInst;
7009    return true;
7010  }
7011  case ARM::t2LDMIA_UPD: {
7012    // If this is a load of a single register, then we should use
7013    // a post-indexed LDR instruction instead, per the ARM ARM.
7014    if (Inst.getNumOperands() != 5)
7015      return false;
7016    MCInst TmpInst;
7017    TmpInst.setOpcode(ARM::t2LDR_POST);
7018    TmpInst.addOperand(Inst.getOperand(4)); // Rt
7019    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7020    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7021    TmpInst.addOperand(MCOperand::CreateImm(4));
7022    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7023    TmpInst.addOperand(Inst.getOperand(3));
7024    Inst = TmpInst;
7025    return true;
7026  }
7027  case ARM::t2STMDB_UPD: {
7028    // If this is a store of a single register, then we should use
7029    // a pre-indexed STR instruction instead, per the ARM ARM.
7030    if (Inst.getNumOperands() != 5)
7031      return false;
7032    MCInst TmpInst;
7033    TmpInst.setOpcode(ARM::t2STR_PRE);
7034    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7035    TmpInst.addOperand(Inst.getOperand(4)); // Rt
7036    TmpInst.addOperand(Inst.getOperand(1)); // Rn
7037    TmpInst.addOperand(MCOperand::CreateImm(-4));
7038    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7039    TmpInst.addOperand(Inst.getOperand(3));
7040    Inst = TmpInst;
7041    return true;
7042  }
7043  case ARM::LDMIA_UPD:
7044    // If this is a load of a single register via a 'pop', then we should use
7045    // a post-indexed LDR instruction instead, per the ARM ARM.
7046    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
7047        Inst.getNumOperands() == 5) {
7048      MCInst TmpInst;
7049      TmpInst.setOpcode(ARM::LDR_POST_IMM);
7050      TmpInst.addOperand(Inst.getOperand(4)); // Rt
7051      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7052      TmpInst.addOperand(Inst.getOperand(1)); // Rn
7053      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
7054      TmpInst.addOperand(MCOperand::CreateImm(4));
7055      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7056      TmpInst.addOperand(Inst.getOperand(3));
7057      Inst = TmpInst;
7058      return true;
7059    }
7060    break;
7061  case ARM::STMDB_UPD:
7062    // If this is a store of a single register via a 'push', then we should use
7063    // a pre-indexed STR instruction instead, per the ARM ARM.
7064    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
7065        Inst.getNumOperands() == 5) {
7066      MCInst TmpInst;
7067      TmpInst.setOpcode(ARM::STR_PRE_IMM);
7068      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7069      TmpInst.addOperand(Inst.getOperand(4)); // Rt
7070      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
7071      TmpInst.addOperand(MCOperand::CreateImm(-4));
7072      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7073      TmpInst.addOperand(Inst.getOperand(3));
7074      Inst = TmpInst;
7075    }
7076    break;
7077  case ARM::t2ADDri12:
7078    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
7079    // mnemonic was used (not "addw"), encoding T3 is preferred.
7080    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
7081        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7082      break;
7083    Inst.setOpcode(ARM::t2ADDri);
7084    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7085    break;
7086  case ARM::t2SUBri12:
7087    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
7088    // mnemonic was used (not "subw"), encoding T3 is preferred.
7089    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
7090        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7091      break;
7092    Inst.setOpcode(ARM::t2SUBri);
7093    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7094    break;
7095  case ARM::tADDi8:
7096    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7097    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7098    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7099    // to encoding T1 if <Rd> is omitted."
7100    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7101      Inst.setOpcode(ARM::tADDi3);
7102      return true;
7103    }
7104    break;
7105  case ARM::tSUBi8:
7106    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7107    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7108    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7109    // to encoding T1 if <Rd> is omitted."
7110    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7111      Inst.setOpcode(ARM::tSUBi3);
7112      return true;
7113    }
7114    break;
7115  case ARM::t2ADDri:
7116  case ARM::t2SUBri: {
7117    // If the destination and first source operand are the same, and
7118    // the flags are compatible with the current IT status, use encoding T2
7119    // instead of T3. For compatibility with the system 'as'. Make sure the
7120    // wide encoding wasn't explicit.
7121    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7122        !isARMLowRegister(Inst.getOperand(0).getReg()) ||
7123        (unsigned)Inst.getOperand(2).getImm() > 255 ||
7124        ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
7125        (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
7126        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7127         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
7128      break;
7129    MCInst TmpInst;
7130    TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
7131                      ARM::tADDi8 : ARM::tSUBi8);
7132    TmpInst.addOperand(Inst.getOperand(0));
7133    TmpInst.addOperand(Inst.getOperand(5));
7134    TmpInst.addOperand(Inst.getOperand(0));
7135    TmpInst.addOperand(Inst.getOperand(2));
7136    TmpInst.addOperand(Inst.getOperand(3));
7137    TmpInst.addOperand(Inst.getOperand(4));
7138    Inst = TmpInst;
7139    return true;
7140  }
7141  case ARM::t2ADDrr: {
7142    // If the destination and first source operand are the same, and
7143    // there's no setting of the flags, use encoding T2 instead of T3.
7144    // Note that this is only for ADD, not SUB. This mirrors the system
7145    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
7146    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7147        Inst.getOperand(5).getReg() != 0 ||
7148        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7149         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
7150      break;
7151    MCInst TmpInst;
7152    TmpInst.setOpcode(ARM::tADDhirr);
7153    TmpInst.addOperand(Inst.getOperand(0));
7154    TmpInst.addOperand(Inst.getOperand(0));
7155    TmpInst.addOperand(Inst.getOperand(2));
7156    TmpInst.addOperand(Inst.getOperand(3));
7157    TmpInst.addOperand(Inst.getOperand(4));
7158    Inst = TmpInst;
7159    return true;
7160  }
7161  case ARM::tADDrSP: {
7162    // If the non-SP source operand and the destination operand are not the
7163    // same, we need to use the 32-bit encoding if it's available.
7164    if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
7165      Inst.setOpcode(ARM::t2ADDrr);
7166      Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7167      return true;
7168    }
7169    break;
7170  }
7171  case ARM::tB:
7172    // A Thumb conditional branch outside of an IT block is a tBcc.
7173    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
7174      Inst.setOpcode(ARM::tBcc);
7175      return true;
7176    }
7177    break;
7178  case ARM::t2B:
7179    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
7180    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
7181      Inst.setOpcode(ARM::t2Bcc);
7182      return true;
7183    }
7184    break;
7185  case ARM::t2Bcc:
7186    // If the conditional is AL or we're in an IT block, we really want t2B.
7187    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
7188      Inst.setOpcode(ARM::t2B);
7189      return true;
7190    }
7191    break;
7192  case ARM::tBcc:
7193    // If the conditional is AL, we really want tB.
7194    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
7195      Inst.setOpcode(ARM::tB);
7196      return true;
7197    }
7198    break;
7199  case ARM::tLDMIA: {
7200    // If the register list contains any high registers, or if the writeback
7201    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
7202    // instead if we're in Thumb2. Otherwise, this should have generated
7203    // an error in validateInstruction().
7204    unsigned Rn = Inst.getOperand(0).getReg();
7205    bool hasWritebackToken =
7206      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7207       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
7208    bool listContainsBase;
7209    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
7210        (!listContainsBase && !hasWritebackToken) ||
7211        (listContainsBase && hasWritebackToken)) {
7212      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7213      assert (isThumbTwo());
7214      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
7215      // If we're switching to the updating version, we need to insert
7216      // the writeback tied operand.
7217      if (hasWritebackToken)
7218        Inst.insert(Inst.begin(),
7219                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
7220      return true;
7221    }
7222    break;
7223  }
7224  case ARM::tSTMIA_UPD: {
7225    // If the register list contains any high registers, we need to use
7226    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7227    // should have generated an error in validateInstruction().
7228    unsigned Rn = Inst.getOperand(0).getReg();
7229    bool listContainsBase;
7230    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
7231      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7232      assert (isThumbTwo());
7233      Inst.setOpcode(ARM::t2STMIA_UPD);
7234      return true;
7235    }
7236    break;
7237  }
7238  case ARM::tPOP: {
7239    bool listContainsBase;
7240    // If the register list contains any high registers, we need to use
7241    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7242    // should have generated an error in validateInstruction().
7243    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
7244      return false;
7245    assert (isThumbTwo());
7246    Inst.setOpcode(ARM::t2LDMIA_UPD);
7247    // Add the base register and writeback operands.
7248    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7249    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7250    return true;
7251  }
7252  case ARM::tPUSH: {
7253    bool listContainsBase;
7254    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
7255      return false;
7256    assert (isThumbTwo());
7257    Inst.setOpcode(ARM::t2STMDB_UPD);
7258    // Add the base register and writeback operands.
7259    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7260    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7261    return true;
7262  }
7263  case ARM::t2MOVi: {
7264    // If we can use the 16-bit encoding and the user didn't explicitly
7265    // request the 32-bit variant, transform it here.
7266    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7267        (unsigned)Inst.getOperand(1).getImm() <= 255 &&
7268        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
7269         Inst.getOperand(4).getReg() == ARM::CPSR) ||
7270        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
7271        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7272         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7273      // The operands aren't in the same order for tMOVi8...
7274      MCInst TmpInst;
7275      TmpInst.setOpcode(ARM::tMOVi8);
7276      TmpInst.addOperand(Inst.getOperand(0));
7277      TmpInst.addOperand(Inst.getOperand(4));
7278      TmpInst.addOperand(Inst.getOperand(1));
7279      TmpInst.addOperand(Inst.getOperand(2));
7280      TmpInst.addOperand(Inst.getOperand(3));
7281      Inst = TmpInst;
7282      return true;
7283    }
7284    break;
7285  }
7286  case ARM::t2MOVr: {
7287    // If we can use the 16-bit encoding and the user didn't explicitly
7288    // request the 32-bit variant, transform it here.
7289    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7290        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7291        Inst.getOperand(2).getImm() == ARMCC::AL &&
7292        Inst.getOperand(4).getReg() == ARM::CPSR &&
7293        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7294         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7295      // The operands aren't the same for tMOV[S]r... (no cc_out)
7296      MCInst TmpInst;
7297      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7298      TmpInst.addOperand(Inst.getOperand(0));
7299      TmpInst.addOperand(Inst.getOperand(1));
7300      TmpInst.addOperand(Inst.getOperand(2));
7301      TmpInst.addOperand(Inst.getOperand(3));
7302      Inst = TmpInst;
7303      return true;
7304    }
7305    break;
7306  }
7307  case ARM::t2SXTH:
7308  case ARM::t2SXTB:
7309  case ARM::t2UXTH:
7310  case ARM::t2UXTB: {
7311    // If we can use the 16-bit encoding and the user didn't explicitly
7312    // request the 32-bit variant, transform it here.
7313    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7314        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7315        Inst.getOperand(2).getImm() == 0 &&
7316        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7317         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7318      unsigned NewOpc;
7319      switch (Inst.getOpcode()) {
7320      default: llvm_unreachable("Illegal opcode!");
7321      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7322      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7323      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7324      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7325      }
7326      // The operands aren't the same for thumb1 (no rotate operand).
7327      MCInst TmpInst;
7328      TmpInst.setOpcode(NewOpc);
7329      TmpInst.addOperand(Inst.getOperand(0));
7330      TmpInst.addOperand(Inst.getOperand(1));
7331      TmpInst.addOperand(Inst.getOperand(3));
7332      TmpInst.addOperand(Inst.getOperand(4));
7333      Inst = TmpInst;
7334      return true;
7335    }
7336    break;
7337  }
7338  case ARM::MOVsi: {
7339    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7340    // rrx shifts and asr/lsr of #32 is encoded as 0
7341    if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
7342      return false;
7343    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7344      // Shifting by zero is accepted as a vanilla 'MOVr'
7345      MCInst TmpInst;
7346      TmpInst.setOpcode(ARM::MOVr);
7347      TmpInst.addOperand(Inst.getOperand(0));
7348      TmpInst.addOperand(Inst.getOperand(1));
7349      TmpInst.addOperand(Inst.getOperand(3));
7350      TmpInst.addOperand(Inst.getOperand(4));
7351      TmpInst.addOperand(Inst.getOperand(5));
7352      Inst = TmpInst;
7353      return true;
7354    }
7355    return false;
7356  }
7357  case ARM::ANDrsi:
7358  case ARM::ORRrsi:
7359  case ARM::EORrsi:
7360  case ARM::BICrsi:
7361  case ARM::SUBrsi:
7362  case ARM::ADDrsi: {
7363    unsigned newOpc;
7364    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7365    if (SOpc == ARM_AM::rrx) return false;
7366    switch (Inst.getOpcode()) {
7367    default: llvm_unreachable("unexpected opcode!");
7368    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7369    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7370    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7371    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7372    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7373    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7374    }
7375    // If the shift is by zero, use the non-shifted instruction definition.
7376    // The exception is for right shifts, where 0 == 32
7377    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
7378        !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
7379      MCInst TmpInst;
7380      TmpInst.setOpcode(newOpc);
7381      TmpInst.addOperand(Inst.getOperand(0));
7382      TmpInst.addOperand(Inst.getOperand(1));
7383      TmpInst.addOperand(Inst.getOperand(2));
7384      TmpInst.addOperand(Inst.getOperand(4));
7385      TmpInst.addOperand(Inst.getOperand(5));
7386      TmpInst.addOperand(Inst.getOperand(6));
7387      Inst = TmpInst;
7388      return true;
7389    }
7390    return false;
7391  }
7392  case ARM::ITasm:
7393  case ARM::t2IT: {
7394    // The mask bits for all but the first condition are represented as
7395    // the low bit of the condition code value implies 't'. We currently
7396    // always have 1 implies 't', so XOR toggle the bits if the low bit
7397    // of the condition code is zero.
7398    MCOperand &MO = Inst.getOperand(1);
7399    unsigned Mask = MO.getImm();
7400    unsigned OrigMask = Mask;
7401    unsigned TZ = CountTrailingZeros_32(Mask);
7402    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7403      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7404      for (unsigned i = 3; i != TZ; --i)
7405        Mask ^= 1 << i;
7406    }
7407    MO.setImm(Mask);
7408
7409    // Set up the IT block state according to the IT instruction we just
7410    // matched.
7411    assert(!inITBlock() && "nested IT blocks?!");
7412    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7413    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7414    ITState.CurPosition = 0;
7415    ITState.FirstCond = true;
7416    break;
7417  }
7418  case ARM::t2LSLrr:
7419  case ARM::t2LSRrr:
7420  case ARM::t2ASRrr:
7421  case ARM::t2SBCrr:
7422  case ARM::t2RORrr:
7423  case ARM::t2BICrr:
7424  {
7425    // Assemblers should use the narrow encodings of these instructions when permissible.
7426    if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7427         isARMLowRegister(Inst.getOperand(2).getReg())) &&
7428        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7429        ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7430         (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7431        (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
7432         !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
7433      unsigned NewOpc;
7434      switch (Inst.getOpcode()) {
7435        default: llvm_unreachable("unexpected opcode");
7436        case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
7437        case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
7438        case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
7439        case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
7440        case ARM::t2RORrr: NewOpc = ARM::tROR; break;
7441        case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
7442      }
7443      MCInst TmpInst;
7444      TmpInst.setOpcode(NewOpc);
7445      TmpInst.addOperand(Inst.getOperand(0));
7446      TmpInst.addOperand(Inst.getOperand(5));
7447      TmpInst.addOperand(Inst.getOperand(1));
7448      TmpInst.addOperand(Inst.getOperand(2));
7449      TmpInst.addOperand(Inst.getOperand(3));
7450      TmpInst.addOperand(Inst.getOperand(4));
7451      Inst = TmpInst;
7452      return true;
7453    }
7454    return false;
7455  }
7456  case ARM::t2ANDrr:
7457  case ARM::t2EORrr:
7458  case ARM::t2ADCrr:
7459  case ARM::t2ORRrr:
7460  {
7461    // Assemblers should use the narrow encodings of these instructions when permissible.
7462    // These instructions are special in that they are commutable, so shorter encodings
7463    // are available more often.
7464    if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7465         isARMLowRegister(Inst.getOperand(2).getReg())) &&
7466        (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
7467         Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
7468        ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7469         (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7470        (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
7471         !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
7472      unsigned NewOpc;
7473      switch (Inst.getOpcode()) {
7474        default: llvm_unreachable("unexpected opcode");
7475        case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
7476        case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
7477        case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
7478        case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
7479      }
7480      MCInst TmpInst;
7481      TmpInst.setOpcode(NewOpc);
7482      TmpInst.addOperand(Inst.getOperand(0));
7483      TmpInst.addOperand(Inst.getOperand(5));
7484      if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
7485        TmpInst.addOperand(Inst.getOperand(1));
7486        TmpInst.addOperand(Inst.getOperand(2));
7487      } else {
7488        TmpInst.addOperand(Inst.getOperand(2));
7489        TmpInst.addOperand(Inst.getOperand(1));
7490      }
7491      TmpInst.addOperand(Inst.getOperand(3));
7492      TmpInst.addOperand(Inst.getOperand(4));
7493      Inst = TmpInst;
7494      return true;
7495    }
7496    return false;
7497  }
7498  }
7499  return false;
7500}
7501
7502unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7503  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7504  // suffix depending on whether they're in an IT block or not.
7505  unsigned Opc = Inst.getOpcode();
7506  const MCInstrDesc &MCID = getInstDesc(Opc);
7507  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7508    assert(MCID.hasOptionalDef() &&
7509           "optionally flag setting instruction missing optional def operand");
7510    assert(MCID.NumOperands == Inst.getNumOperands() &&
7511           "operand count mismatch!");
7512    // Find the optional-def operand (cc_out).
7513    unsigned OpNo;
7514    for (OpNo = 0;
7515         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7516         ++OpNo)
7517      ;
7518    // If we're parsing Thumb1, reject it completely.
7519    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7520      return Match_MnemonicFail;
7521    // If we're parsing Thumb2, which form is legal depends on whether we're
7522    // in an IT block.
7523    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7524        !inITBlock())
7525      return Match_RequiresITBlock;
7526    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7527        inITBlock())
7528      return Match_RequiresNotITBlock;
7529  }
7530  // Some high-register supporting Thumb1 encodings only allow both registers
7531  // to be from r0-r7 when in Thumb2.
7532  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7533           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7534           isARMLowRegister(Inst.getOperand(2).getReg()))
7535    return Match_RequiresThumb2;
7536  // Others only require ARMv6 or later.
7537  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7538           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7539           isARMLowRegister(Inst.getOperand(1).getReg()))
7540    return Match_RequiresV6;
7541  return Match_Success;
7542}
7543
7544static const char *getSubtargetFeatureName(unsigned Val);
7545bool ARMAsmParser::
7546MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
7547                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7548                        MCStreamer &Out, unsigned &ErrorInfo,
7549                        bool MatchingInlineAsm) {
7550  MCInst Inst;
7551  unsigned MatchResult;
7552
7553  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
7554                                     MatchingInlineAsm);
7555  switch (MatchResult) {
7556  default: break;
7557  case Match_Success:
7558    // Context sensitive operand constraints aren't handled by the matcher,
7559    // so check them here.
7560    if (validateInstruction(Inst, Operands)) {
7561      // Still progress the IT block, otherwise one wrong condition causes
7562      // nasty cascading errors.
7563      forwardITPosition();
7564      return true;
7565    }
7566
7567    // Some instructions need post-processing to, for example, tweak which
7568    // encoding is selected. Loop on it while changes happen so the
7569    // individual transformations can chain off each other. E.g.,
7570    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7571    while (processInstruction(Inst, Operands))
7572      ;
7573
7574    // Only move forward at the very end so that everything in validate
7575    // and process gets a consistent answer about whether we're in an IT
7576    // block.
7577    forwardITPosition();
7578
7579    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7580    // doesn't actually encode.
7581    if (Inst.getOpcode() == ARM::ITasm)
7582      return false;
7583
7584    Inst.setLoc(IDLoc);
7585    Out.EmitInstruction(Inst);
7586    return false;
7587  case Match_MissingFeature: {
7588    assert(ErrorInfo && "Unknown missing feature!");
7589    // Special case the error message for the very common case where only
7590    // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
7591    std::string Msg = "instruction requires:";
7592    unsigned Mask = 1;
7593    for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
7594      if (ErrorInfo & Mask) {
7595        Msg += " ";
7596        Msg += getSubtargetFeatureName(ErrorInfo & Mask);
7597      }
7598      Mask <<= 1;
7599    }
7600    return Error(IDLoc, Msg);
7601  }
7602  case Match_InvalidOperand: {
7603    SMLoc ErrorLoc = IDLoc;
7604    if (ErrorInfo != ~0U) {
7605      if (ErrorInfo >= Operands.size())
7606        return Error(IDLoc, "too few operands for instruction");
7607
7608      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7609      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7610    }
7611
7612    return Error(ErrorLoc, "invalid operand for instruction");
7613  }
7614  case Match_MnemonicFail:
7615    return Error(IDLoc, "invalid instruction",
7616                 ((ARMOperand*)Operands[0])->getLocRange());
7617  case Match_RequiresNotITBlock:
7618    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7619  case Match_RequiresITBlock:
7620    return Error(IDLoc, "instruction only valid inside IT block");
7621  case Match_RequiresV6:
7622    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7623  case Match_RequiresThumb2:
7624    return Error(IDLoc, "instruction variant requires Thumb2");
7625  case Match_ImmRange0_15: {
7626    SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7627    if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7628    return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
7629  }
7630  }
7631
7632  llvm_unreachable("Implement any new match types added!");
7633}
7634
7635/// parseDirective parses the arm specific directives
7636bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7637  StringRef IDVal = DirectiveID.getIdentifier();
7638  if (IDVal == ".word")
7639    return parseDirectiveWord(4, DirectiveID.getLoc());
7640  else if (IDVal == ".thumb")
7641    return parseDirectiveThumb(DirectiveID.getLoc());
7642  else if (IDVal == ".arm")
7643    return parseDirectiveARM(DirectiveID.getLoc());
7644  else if (IDVal == ".thumb_func")
7645    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7646  else if (IDVal == ".code")
7647    return parseDirectiveCode(DirectiveID.getLoc());
7648  else if (IDVal == ".syntax")
7649    return parseDirectiveSyntax(DirectiveID.getLoc());
7650  else if (IDVal == ".unreq")
7651    return parseDirectiveUnreq(DirectiveID.getLoc());
7652  else if (IDVal == ".arch")
7653    return parseDirectiveArch(DirectiveID.getLoc());
7654  else if (IDVal == ".eabi_attribute")
7655    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7656  return true;
7657}
7658
7659/// parseDirectiveWord
7660///  ::= .word [ expression (, expression)* ]
7661bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7662  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7663    for (;;) {
7664      const MCExpr *Value;
7665      if (getParser().parseExpression(Value))
7666        return true;
7667
7668      getParser().getStreamer().EmitValue(Value, Size);
7669
7670      if (getLexer().is(AsmToken::EndOfStatement))
7671        break;
7672
7673      // FIXME: Improve diagnostic.
7674      if (getLexer().isNot(AsmToken::Comma))
7675        return Error(L, "unexpected token in directive");
7676      Parser.Lex();
7677    }
7678  }
7679
7680  Parser.Lex();
7681  return false;
7682}
7683
7684/// parseDirectiveThumb
7685///  ::= .thumb
7686bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7687  if (getLexer().isNot(AsmToken::EndOfStatement))
7688    return Error(L, "unexpected token in directive");
7689  Parser.Lex();
7690
7691  if (!isThumb())
7692    SwitchMode();
7693  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7694  return false;
7695}
7696
7697/// parseDirectiveARM
7698///  ::= .arm
7699bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7700  if (getLexer().isNot(AsmToken::EndOfStatement))
7701    return Error(L, "unexpected token in directive");
7702  Parser.Lex();
7703
7704  if (isThumb())
7705    SwitchMode();
7706  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7707  return false;
7708}
7709
7710/// parseDirectiveThumbFunc
7711///  ::= .thumbfunc symbol_name
7712bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7713  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7714  bool isMachO = MAI.hasSubsectionsViaSymbols();
7715  StringRef Name;
7716  bool needFuncName = true;
7717
7718  // Darwin asm has (optionally) function name after .thumb_func direction
7719  // ELF doesn't
7720  if (isMachO) {
7721    const AsmToken &Tok = Parser.getTok();
7722    if (Tok.isNot(AsmToken::EndOfStatement)) {
7723      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7724        return Error(L, "unexpected token in .thumb_func directive");
7725      Name = Tok.getIdentifier();
7726      Parser.Lex(); // Consume the identifier token.
7727      needFuncName = false;
7728    }
7729  }
7730
7731  if (getLexer().isNot(AsmToken::EndOfStatement))
7732    return Error(L, "unexpected token in directive");
7733
7734  // Eat the end of statement and any blank lines that follow.
7735  while (getLexer().is(AsmToken::EndOfStatement))
7736    Parser.Lex();
7737
7738  // FIXME: assuming function name will be the line following .thumb_func
7739  // We really should be checking the next symbol definition even if there's
7740  // stuff in between.
7741  if (needFuncName) {
7742    Name = Parser.getTok().getIdentifier();
7743  }
7744
7745  // Mark symbol as a thumb symbol.
7746  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7747  getParser().getStreamer().EmitThumbFunc(Func);
7748  return false;
7749}
7750
7751/// parseDirectiveSyntax
7752///  ::= .syntax unified | divided
7753bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7754  const AsmToken &Tok = Parser.getTok();
7755  if (Tok.isNot(AsmToken::Identifier))
7756    return Error(L, "unexpected token in .syntax directive");
7757  StringRef Mode = Tok.getString();
7758  if (Mode == "unified" || Mode == "UNIFIED")
7759    Parser.Lex();
7760  else if (Mode == "divided" || Mode == "DIVIDED")
7761    return Error(L, "'.syntax divided' arm asssembly not supported");
7762  else
7763    return Error(L, "unrecognized syntax mode in .syntax directive");
7764
7765  if (getLexer().isNot(AsmToken::EndOfStatement))
7766    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7767  Parser.Lex();
7768
7769  // TODO tell the MC streamer the mode
7770  // getParser().getStreamer().Emit???();
7771  return false;
7772}
7773
7774/// parseDirectiveCode
7775///  ::= .code 16 | 32
7776bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7777  const AsmToken &Tok = Parser.getTok();
7778  if (Tok.isNot(AsmToken::Integer))
7779    return Error(L, "unexpected token in .code directive");
7780  int64_t Val = Parser.getTok().getIntVal();
7781  if (Val == 16)
7782    Parser.Lex();
7783  else if (Val == 32)
7784    Parser.Lex();
7785  else
7786    return Error(L, "invalid operand to .code directive");
7787
7788  if (getLexer().isNot(AsmToken::EndOfStatement))
7789    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7790  Parser.Lex();
7791
7792  if (Val == 16) {
7793    if (!isThumb())
7794      SwitchMode();
7795    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7796  } else {
7797    if (isThumb())
7798      SwitchMode();
7799    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7800  }
7801
7802  return false;
7803}
7804
7805/// parseDirectiveReq
7806///  ::= name .req registername
7807bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7808  Parser.Lex(); // Eat the '.req' token.
7809  unsigned Reg;
7810  SMLoc SRegLoc, ERegLoc;
7811  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7812    Parser.eatToEndOfStatement();
7813    return Error(SRegLoc, "register name expected");
7814  }
7815
7816  // Shouldn't be anything else.
7817  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7818    Parser.eatToEndOfStatement();
7819    return Error(Parser.getTok().getLoc(),
7820                 "unexpected input in .req directive.");
7821  }
7822
7823  Parser.Lex(); // Consume the EndOfStatement
7824
7825  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7826    return Error(SRegLoc, "redefinition of '" + Name +
7827                          "' does not match original.");
7828
7829  return false;
7830}
7831
7832/// parseDirectiveUneq
7833///  ::= .unreq registername
7834bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7835  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7836    Parser.eatToEndOfStatement();
7837    return Error(L, "unexpected input in .unreq directive.");
7838  }
7839  RegisterReqs.erase(Parser.getTok().getIdentifier());
7840  Parser.Lex(); // Eat the identifier.
7841  return false;
7842}
7843
7844/// parseDirectiveArch
7845///  ::= .arch token
7846bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7847  return true;
7848}
7849
7850/// parseDirectiveEabiAttr
7851///  ::= .eabi_attribute int, int
7852bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7853  return true;
7854}
7855
7856/// Force static initialization.
7857extern "C" void LLVMInitializeARMAsmParser() {
7858  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7859  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7860}
7861
7862#define GET_REGISTER_MATCHER
7863#define GET_SUBTARGET_FEATURE_NAME
7864#define GET_MATCHER_IMPLEMENTATION
7865#include "ARMGenAsmMatcher.inc"
7866
7867// Define this matcher function after the auto-generated include so we
7868// have the match class enum definitions.
7869unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
7870                                                  unsigned Kind) {
7871  ARMOperand *Op = static_cast<ARMOperand*>(AsmOp);
7872  // If the kind is a token for a literal immediate, check if our asm
7873  // operand matches. This is for InstAliases which have a fixed-value
7874  // immediate in the syntax.
7875  if (Kind == MCK__35_0 && Op->isImm()) {
7876    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
7877    if (!CE)
7878      return Match_InvalidOperand;
7879    if (CE->getValue() == 0)
7880      return Match_Success;
7881  }
7882  return Match_InvalidOperand;
7883}
7884